patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -185,6 +185,7 @@ namespace MvvmCross.Platforms.Ios.Views
}
else
{
+ keyboardFrame.Height -= scrollView.SafeAreaInsets.Bottom;
scrollView.CenterView(activeView, keyboardFrame);
}
} | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MS-PL license.
// See the LICENSE file in the project root for more information.
using System;
using CoreGraphics;
using Foundation;
using MvvmCross.ViewModels;
using UIKit;
namespace MvvmCross.Platforms.Ios.Views
{
/// <summary>
/// Mvx base view controller that provides a few extra bits of implementation over the standard View Controllers.
/// </summary>
public abstract class MvxBaseViewController<TViewModel> : MvxViewController where TViewModel : IMvxViewModel
{
public MvxBaseViewController()
{
}
public MvxBaseViewController(NSCoder coder) : base(coder)
{
}
protected MvxBaseViewController(NSObjectFlag t) : base(t)
{
}
protected internal MvxBaseViewController(IntPtr handle) : base(handle)
{
}
public MvxBaseViewController(string nibName, NSBundle bundle) : base(nibName, bundle)
{
}
/// <summary>
/// Gets or sets the view model.
/// </summary>
/// <value>
/// The view model.
/// </value>
protected new TViewModel ViewModel
{
get { return (TViewModel)base.ViewModel; }
set { base.ViewModel = value; }
}
/// <summary>
/// The view to center on keyboard shown
/// </summary>
protected UIView ViewToCenterOnKeyboardShown;
/// <summary>
/// The scroll to center on keyboard shown
/// </summary>
protected UIScrollView ScrollToCenterOnKeyboardShown;
/// <summary>
/// Initialises the keyboard handling. The view must also contain a UIScrollView for this to work. You must also override HandlesKeyboardNotifications() and return true from that method.
/// </summary>
/// <param name="enableAutoDismiss">If set to <c>true</c> enable auto dismiss.</param>
protected virtual void InitKeyboardHandling(bool enableAutoDismiss = true)
{
//Only do this if required
if (HandlesKeyboardNotifications())
{
RegisterForKeyboardNotifications();
}
if (enableAutoDismiss)
{
DismissKeyboardOnBackgroundTap();
}
}
/// <summary>
/// Override this in derived Views in order to handle the keyboard.
/// </summary>
/// <returns></returns>
public virtual bool HandlesKeyboardNotifications()
{
return false;
}
private NSObject _keyboardShowObserver;
private NSObject _keyboardHideObserver;
protected virtual void RegisterForKeyboardNotifications()
{
if (_keyboardShowObserver == null)
{
_keyboardShowObserver = NSNotificationCenter.DefaultCenter.AddObserver(UIKeyboard.WillShowNotification, OnKeyboardNotification);
}
if (_keyboardHideObserver == null)
{
_keyboardHideObserver = NSNotificationCenter.DefaultCenter.AddObserver(UIKeyboard.WillHideNotification, OnKeyboardNotification);
}
}
protected virtual void UnregisterForKeyboardNotifications()
{
if (_keyboardShowObserver != null)
{
NSNotificationCenter.DefaultCenter.RemoveObserver(_keyboardShowObserver);
_keyboardShowObserver.Dispose();
_keyboardShowObserver = null;
}
if (_keyboardHideObserver != null)
{
NSNotificationCenter.DefaultCenter.RemoveObserver(_keyboardHideObserver);
_keyboardHideObserver.Dispose();
_keyboardHideObserver = null;
}
}
/// <summary>
/// Gets the UIView that represents the "active" user input control (e.g. textfield, or button under a text field)
/// </summary>
/// <returns>
/// A <see cref="UIView"/>
/// </returns>
protected virtual UIView KeyboardGetActiveView()
{
return View.FindFirstResponder();
}
/// <summary>
/// Called when keyboard notifications are produced.
/// </summary>
/// <param name="notification">The notification.</param>
private void OnKeyboardNotification(NSNotification notification)
{
if (!IsViewLoaded) return;
//Check if the keyboard is becoming visible
var visible = notification.Name == UIKeyboard.WillShowNotification;
//Start an animation, using values from the keyboard
UIView.BeginAnimations("AnimateForKeyboard");
UIView.SetAnimationBeginsFromCurrentState(true);
UIView.SetAnimationDuration(UIKeyboard.AnimationDurationFromNotification(notification));
UIView.SetAnimationCurve((UIViewAnimationCurve)UIKeyboard.AnimationCurveFromNotification(notification));
//Pass the notification, calculating keyboard height, etc.
var keyboardFrame = visible
? UIKeyboard.FrameEndFromNotification(notification)
: UIKeyboard.FrameBeginFromNotification(notification);
OnKeyboardChanged(visible, keyboardFrame);
//Commit the animation
UIView.CommitAnimations();
}
/// <summary>
/// Override this method to apply custom logic when the keyboard is shown/hidden
/// </summary>
/// <param name='visible'>
/// If the keyboard is visible
/// </param>
/// <param name='keyboardFrame'>
/// Frame of the keyboard
/// </param>
protected virtual void OnKeyboardChanged(bool visible, CGRect keyboardFrame)
{
var activeView = ViewToCenterOnKeyboardShown ?? KeyboardGetActiveView();
if (activeView == null)
{
return;
}
var scrollView = ScrollToCenterOnKeyboardShown ?? activeView.FindTopSuperviewOfType(View, typeof(UIScrollView)) as UIScrollView;
if (scrollView == null)
{
return;
}
if (!visible)
{
scrollView.RestoreScrollPosition();
}
else
{
scrollView.CenterView(activeView, keyboardFrame);
}
}
/// <summary>
/// Call it to force dismiss keyboard when background is tapped
/// </summary>
protected void DismissKeyboardOnBackgroundTap()
{
// Add gesture recognizer to hide keyboard
var tap = new UITapGestureRecognizer { CancelsTouchesInView = false };
tap.AddTarget(() => View.EndEditing(true));
tap.ShouldReceiveTouch = (recognizer, touch) => !(touch.View is UIControl || touch.View.FindSuperviewOfType(View, typeof(UITableViewCell)) != null);
View.AddGestureRecognizer(tap);
}
/// <summary>
/// Selects next TextField to become FirstResponder.
/// Usage: textField.ShouldReturn += TextFieldShouldReturn;
/// </summary>
/// <returns></returns>
/// <param name="textField">The TextField</param>
public bool TextFieldShouldReturn(UITextField textField)
{
var nextTag = textField.Tag + 1;
UIResponder nextResponder = View.ViewWithTag(nextTag);
if (nextResponder != null)
{
nextResponder.BecomeFirstResponder();
}
else {
// Not found, so remove keyboard.
textField.ResignFirstResponder();
}
return false; // We do not want UITextField to insert line-breaks.
}
}
}
| 1 | 15,254 | Please add a check to see if SafeAreaInsets is available. It isn't on all iOS versions. | MvvmCross-MvvmCross | .cs |
@@ -77,7 +77,7 @@ class RolesController < ApplicationController
# rubocop:disable LineLength
render json: {
code: 1,
- msg: _("Successfully changed the permissions for #{@role.user.email}. They have been notified via email.")
+ msg: _("Successfully changed the permissions for %{email}. They have been notified via email.") % { email: @role.user.email }
}
# rubocop:enable LineLength
else | 1 | # frozen_string_literal: true
class RolesController < ApplicationController
include ConditionalUserMailer
respond_to :html
after_action :verify_authorized
def create
registered = true
@role = Role.new(role_params)
authorize @role
plan = Plan.find(role_params[:plan_id])
message = ""
if params[:user].present? && plan.present?
if @role.plan.owner.present? && @role.plan.owner.email == params[:user]
# rubocop:disable LineLength
flash[:notice] = _("Cannot share plan with %{email} since that email matches with the owner of the plan.") % {
email: params[:user]
}
# rubocop:enable LineLength
else
user = User.where_case_insensitive("email", params[:user]).first
if Role.find_by(plan: @role.plan, user: user) # role already exists
flash[:notice] = _("Plan is already shared with %{email}.") % {
email: params[:user]
}
else
if user.nil?
registered = false
User.invite!({email: params[:user],
firstname: _("First Name"),
surname: _("Surname"),
org: current_user.org },
current_user )
message = _("Invitation to %{email} issued successfully.") % {
email: params[:user]
}
user = User.where_case_insensitive("email", params[:user]).first
end
message += _("Plan shared with %{email}.") % {
email: user.email
}
@role.user = user
if @role.save
if registered
deliver_if(recipients: user, key: "users.added_as_coowner") do |r|
UserMailer.sharing_notification(@role, r, inviter: current_user)
.deliver_now
end
end
flash[:notice] = message
else
# rubocop:disable LineLength
flash[:alert] = _("You must provide a valid email address and select a permission level.")
# rubocop:enable LineLength
end
end
end
else
flash[:alert] = _("Please enter an email address")
end
redirect_to controller: "plans", action: "share", id: @role.plan.id
end
def update
@role = Role.find(params[:id])
authorize @role
if @role.update_attributes(access: role_params[:access])
deliver_if(recipients: @role.user, key: "users.added_as_coowner") do |r|
UserMailer.permissions_change_notification(@role, current_user).deliver_now
end
# rubocop:disable LineLength
render json: {
code: 1,
msg: _("Successfully changed the permissions for #{@role.user.email}. They have been notified via email.")
}
# rubocop:enable LineLength
else
render json: { code: 0, msg: flash[:alert] }
end
end
def destroy
@role = Role.find(params[:id])
authorize @role
user = @role.user
plan = @role.plan
@role.destroy
flash[:notice] = _("Access removed")
deliver_if(recipients: user, key: "users.added_as_coowner") do |r|
UserMailer.plan_access_removed(user, plan, current_user).deliver_now
end
redirect_to controller: "plans", action: "share", id: @role.plan.id
end
# This function makes user's role on a plan inactive
# i.e. "removes" this from their plans
def deactivate
role = Role.find(params[:id])
authorize role
if role.deactivate!
flash[:notice] = _("Plan removed")
else
flash[:alert] = _("Unable to remove the plan")
end
redirect_to(plans_path)
end
private
def role_params
params.require(:role).permit(:plan_id, :access)
end
end
| 1 | 18,438 | interpolation here was causing the translation to not get picked up | DMPRoadmap-roadmap | rb |
@@ -69,8 +69,13 @@ StatelessWriter::StatelessWriter(
StatelessWriter::~StatelessWriter()
{
- AsyncWriterThread::removeWriter(*this);
logInfo(RTPS_WRITER,"StatelessWriter destructor";);
+
+ mp_RTPSParticipant->async_thread().unregister_writer(this);
+
+ // After unregistering writer from AsyncWriterThread, delete all flow_controllers because they register the writer in
+ // the AsyncWriterThread.
+ flow_controllers_.clear();
}
void StatelessWriter::get_builtin_guid() | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* @file StatelessWriter.cpp
*
*/
#include <fastrtps/rtps/writer/StatelessWriter.h>
#include <fastrtps/rtps/writer/WriterListener.h>
#include <fastrtps/rtps/history/WriterHistory.h>
#include <fastrtps/rtps/resources/AsyncWriterThread.h>
#include "../participant/RTPSParticipantImpl.h"
#include "../flowcontrol/FlowController.h"
#include "../history/HistoryAttributesExtension.hpp"
#include "RTPSWriterCollector.h"
#include <fastrtps/rtps/builtin/BuiltinProtocols.h>
#include <fastrtps/rtps/builtin/liveliness/WLP.h>
#include <algorithm>
#include <mutex>
#include <set>
#include <vector>
#include <fastrtps/log/Log.h>
namespace eprosima {
namespace fastrtps{
namespace rtps {
StatelessWriter::StatelessWriter(
RTPSParticipantImpl* participant,
GUID_t& guid,
WriterAttributes& attributes,
WriterHistory* history,
WriterListener* listener)
: RTPSWriter(
participant,
guid,
attributes,
history,
listener)
, matched_readers_(attributes.matched_readers_allocation)
, unsent_changes_(resource_limits_from_history(history->m_att))
{
get_builtin_guid();
const RemoteLocatorsAllocationAttributes& loc_alloc =
participant->getRTPSParticipantAttributes().allocation.locators;
for (size_t i = 0; i < attributes.matched_readers_allocation.initial; ++i)
{
matched_readers_.emplace_back(
mp_RTPSParticipant,
loc_alloc.max_unicast_locators,
loc_alloc.max_multicast_locators);
}
}
StatelessWriter::~StatelessWriter()
{
AsyncWriterThread::removeWriter(*this);
logInfo(RTPS_WRITER,"StatelessWriter destructor";);
}
void StatelessWriter::get_builtin_guid()
{
if (m_guid.entityId == ENTITYID_SPDP_BUILTIN_RTPSParticipant_WRITER)
{
add_guid(GUID_t{ GuidPrefix_t(), c_EntityId_SPDPReader });
}
#if HAVE_SECURITY
else if (m_guid.entityId == ENTITYID_P2P_BUILTIN_PARTICIPANT_STATELESS_WRITER)
{
add_guid(GUID_t{ GuidPrefix_t(), participant_stateless_message_reader_entity_id });
}
#endif
}
bool StatelessWriter::has_builtin_guid()
{
if (m_guid.entityId == ENTITYID_SPDP_BUILTIN_RTPSParticipant_WRITER)
{
return true;
}
#if HAVE_SECURITY
if (m_guid.entityId == ENTITYID_P2P_BUILTIN_PARTICIPANT_STATELESS_WRITER)
{
return true;
}
#endif
return false;
}
void StatelessWriter::update_reader_info(bool create_sender_resources)
{
bool addGuid = !has_builtin_guid();
is_inline_qos_expected_ = false;
for (const ReaderLocator& reader : matched_readers_)
{
is_inline_qos_expected_ |= reader.expects_inline_qos();
}
update_cached_info_nts();
if (addGuid)
{
compute_selected_guids();
}
if (create_sender_resources)
{
RTPSParticipantImpl* part = mp_RTPSParticipant;
locator_selector_.for_each([part](const Locator_t& loc)
{
part->createSenderResources(loc);
});
}
}
/*
* CHANGE-RELATED METHODS
*/
// TODO(Ricardo) This function only can be used by history. Private it and frined History.
// TODO(Ricardo) Look for other functions
void StatelessWriter::unsent_change_added_to_history(
CacheChange_t* change,
const std::chrono::time_point<std::chrono::steady_clock>& max_blocking_time)
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
if (!fixed_locators_.empty() || locator_selector_.selected_size() > 0)
{
#if HAVE_SECURITY
encrypt_cachechange(change);
#endif
if (!isAsync())
{
try
{
if(m_separateSendingEnabled)
{
std::vector<GUID_t> guids(1);
for (const ReaderLocator& it : matched_readers_)
{
RTPSMessageGroup group(mp_RTPSParticipant, this, m_cdrmessages, it, max_blocking_time);
if (!group.add_data(*change, it.expects_inline_qos()))
{
logError(RTPS_WRITER, "Error sending change " << change->sequenceNumber);
}
}
}
else
{
RTPSMessageGroup group(mp_RTPSParticipant, this, m_cdrmessages, *this, max_blocking_time);
if (!group.add_data(*change, is_inline_qos_expected_))
{
logError(RTPS_WRITER, "Error sending change " << change->sequenceNumber);
}
}
if (mp_listener != nullptr)
{
mp_listener->onWriterChangeReceivedByAll(this, change);
}
}
catch(const RTPSMessageGroup::timeout&)
{
logError(RTPS_WRITER, "Max blocking time reached");
}
}
else
{
unsent_changes_.push_back(ChangeForReader_t(change));
AsyncWriterThread::wakeUp(this);
}
if (liveliness_lease_duration_ < c_TimeInfinite)
{
mp_RTPSParticipant->wlp()->assert_liveliness(
getGuid(),
liveliness_kind_,
liveliness_lease_duration_);
}
}
else
{
logInfo(RTPS_WRITER, "No reader to add change.");
if (mp_listener != nullptr)
{
mp_listener->onWriterChangeReceivedByAll(this, change);
}
}
}
bool StatelessWriter::change_removed_by_history(CacheChange_t* change)
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
unsent_changes_.remove_if(
[change](ChangeForReader_t& cptr)
{
return cptr.getChange() == change ||
cptr.getChange()->sequenceNumber == change->sequenceNumber;
});
return true;
}
bool StatelessWriter::is_acked_by_all(const CacheChange_t* change) const
{
// Only asynchronous writers may have unacked (i.e. unsent changes)
if (isAsync())
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
// Return false if change is pending to be sent
auto it = std::find_if(unsent_changes_.begin(),
unsent_changes_.end(),
[change](const ChangeForReader_t& unsent_change)
{
return change == unsent_change.getChange();
});
return it == unsent_changes_.end();
}
return true;
}
void StatelessWriter::update_unsent_changes(
const SequenceNumber_t& seq_num,
const FragmentNumber_t& frag_num)
{
auto find_by_seq_num = [seq_num](const ChangeForReader_t& unsent_change)
{
return seq_num == unsent_change.getSequenceNumber();
};
auto it = std::find_if(unsent_changes_.begin(), unsent_changes_.end(), find_by_seq_num);
if(it != unsent_changes_.end())
{
bool should_remove = (frag_num == 0);
if (!should_remove)
{
it->markFragmentsAsSent(frag_num);
FragmentNumberSet_t fragment_sns = it->getUnsentFragments();
should_remove = fragment_sns.empty();
}
if(should_remove)
{
unsent_changes_.remove_if(find_by_seq_num);
}
}
}
void StatelessWriter::send_any_unsent_changes()
{
//TODO(Mcc) Separate sending for asynchronous writers
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
RTPSWriterCollector<ReaderLocator*> changesToSend;
for (const ChangeForReader_t& unsentChange : unsent_changes_)
{
changesToSend.add_change(unsentChange.getChange(), nullptr, unsentChange.getUnsentFragments());
}
// Clear through local controllers
for (auto& controller : flow_controllers_)
{
(*controller)(changesToSend);
}
// Clear through parent controllers
for (auto& controller : mp_RTPSParticipant->getFlowControllers())
{
(*controller)(changesToSend);
}
try
{
RTPSMessageGroup group(mp_RTPSParticipant, this, m_cdrmessages, *this);
bool bHasListener = mp_listener != nullptr;
while(!changesToSend.empty())
{
RTPSWriterCollector<ReaderLocator*>::Item changeToSend = changesToSend.pop();
// Remove the messages selected for sending from the original list,
// and update those that were fragmented with the new sent index
update_unsent_changes(changeToSend.sequenceNumber, changeToSend.fragmentNumber);
// Notify the controllers
FlowController::NotifyControllersChangeSent(changeToSend.cacheChange);
if(changeToSend.fragmentNumber != 0)
{
if(!group.add_data_frag(*changeToSend.cacheChange, changeToSend.fragmentNumber,
is_inline_qos_expected_))
{
logError(RTPS_WRITER, "Error sending fragment (" << changeToSend.sequenceNumber <<
", " << changeToSend.fragmentNumber << ")");
}
}
else
{
if(!group.add_data(*changeToSend.cacheChange, is_inline_qos_expected_))
{
logError(RTPS_WRITER, "Error sending change " << changeToSend.sequenceNumber);
}
}
if (bHasListener && is_acked_by_all(changeToSend.cacheChange))
{
mp_listener->onWriterChangeReceivedByAll(this, changeToSend.cacheChange);
}
}
}
catch(const RTPSMessageGroup::timeout&)
{
logError(RTPS_WRITER, "Max blocking time reached");
}
logInfo(RTPS_WRITER, "Finish sending unsent changes";);
}
/*
* MATCHED_READER-RELATED METHODS
*/
bool StatelessWriter::matched_reader_add(const ReaderProxyData& data)
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
assert(data.guid() != c_Guid_Unknown);
for(ReaderLocator& reader : matched_readers_)
{
if(reader.remote_guid() == data.guid())
{
logWarning(RTPS_WRITER, "Attempting to add existing reader, updating information.");
if (reader.update(data.remote_locators().unicast,
data.remote_locators().multicast,
data.m_expectsInlineQos))
{
update_reader_info(true);
}
return false;
}
}
// Try to add entry on matched_readers_
ReaderLocator* new_reader = nullptr;
for (ReaderLocator& reader : matched_readers_)
{
if (reader.start(data.guid(),
data.remote_locators().unicast,
data.remote_locators().multicast,
data.m_expectsInlineQos))
{
new_reader = &reader;
break;
}
}
if (new_reader == nullptr)
{
const RemoteLocatorsAllocationAttributes& loc_alloc =
mp_RTPSParticipant->getRTPSParticipantAttributes().allocation.locators;
new_reader = matched_readers_.emplace_back(
mp_RTPSParticipant,
loc_alloc.max_unicast_locators,
loc_alloc.max_multicast_locators);
if (new_reader != nullptr)
{
new_reader->start(data.guid(),
data.remote_locators().unicast,
data.remote_locators().multicast,
data.m_expectsInlineQos);
}
else
{
logWarning(RTPS_WRITER, "Couldn't add matched reader due to resource limits");
return false;
}
}
// Add info of new datareader.
locator_selector_.clear();
for (ReaderLocator& reader : matched_readers_)
{
locator_selector_.add_entry(reader.locator_selector_entry());
}
update_reader_info(true);
if (data.m_qos.m_durability.kind >= TRANSIENT_LOCAL_DURABILITY_QOS)
{
unsent_changes_.assign(mp_history->changesBegin(), mp_history->changesEnd());
AsyncWriterThread::wakeUp(this);
}
logInfo(RTPS_READER,"Reader " << data.guid() << " added to "<<m_guid.entityId);
return true;
}
bool StatelessWriter::set_fixed_locators(const LocatorList_t& locator_list)
{
#if HAVE_SECURITY
if (getAttributes().security_attributes().is_submessage_protected ||
getAttributes().security_attributes().is_payload_protected)
{
logError(RTPS_WRITER, "A secure besteffort writer cannot add a lonely locator");
return false;
}
#endif
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
fixed_locators_.push_back(locator_list);
mp_RTPSParticipant->createSenderResources(fixed_locators_);
return true;
}
bool StatelessWriter::matched_reader_remove(const GUID_t& reader_guid)
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
bool found = locator_selector_.remove_entry(reader_guid);
if(found)
{
found = false;
for (ReaderLocator& reader : matched_readers_)
{
if (reader.stop(reader_guid))
{
found = true;
break;
}
}
// guid should be both on locator_selector_ and matched_readers_
assert(found);
update_reader_info(false);
}
return found;
}
bool StatelessWriter::matched_reader_is_matched(const GUID_t& reader_guid)
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
return std::any_of(matched_readers_.begin(), matched_readers_.end(),
[reader_guid](const ReaderLocator& item)
{
return item.remote_guid() == reader_guid;
});
}
void StatelessWriter::unsent_changes_reset()
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
unsent_changes_.assign(mp_history->changesBegin(), mp_history->changesEnd());
AsyncWriterThread::wakeUp(this);
}
void StatelessWriter::add_flow_controller(std::unique_ptr<FlowController> controller)
{
flow_controllers_.push_back(std::move(controller));
}
bool StatelessWriter::send(
CDRMessage_t* message,
std::chrono::steady_clock::time_point& max_blocking_time_point) const
{
if (!RTPSWriter::send(message, max_blocking_time_point))
{
return false;
}
for (const Locator_t& locator : fixed_locators_)
{
if (!mp_RTPSParticipant->sendSync(message, locator, max_blocking_time_point))
{
return false;
}
}
return true;
}
} /* namespace rtps */
} /* namespace fastrtps */
} /* namespace eprosima */
| 1 | 14,644 | We don't need to disable the flow controller here as in `StatefulWriter`s destructor? | eProsima-Fast-DDS | cpp |
@@ -24,7 +24,7 @@ class Service(service.ChromiumService):
"""
def __init__(self, executable_path, port=0, service_args=None,
- log_path=None, env=None):
+ log_path=None, env=None, create_no_window=False):
"""
Creates a new instance of the Service
| 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.chromium import service
class Service(service.ChromiumService):
"""
Object that manages the starting and stopping of the ChromeDriver
"""
def __init__(self, executable_path, port=0, service_args=None,
log_path=None, env=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to the ChromeDriver
- port : Port the service is running on
- service_args : List of args to pass to the chromedriver service
- log_path : Path for the chromedriver service to log to"""
super(Service, self).__init__(
executable_path,
port,
service_args,
log_path,
env,
"Please see https://chromedriver.chromium.org/home")
| 1 | 17,876 | I would rather no have this as a `kwarg` as it encourages "growth" which lead to an unweildy constructor in other classes. Let's add a method or property to take care of this instead as I think it's usage is going to be quite low. | SeleniumHQ-selenium | java |
@@ -313,11 +313,17 @@ func (r *DefaultRuleRenderer) filterOutputChain() *Chain {
// That decision is based on pragmatism; it's generally very useful to be able to contact
// any local workload from the host and policing the traffic doesn't really protect
// against host compromise. If a host is compromised, then the rules could be removed!
+ // However, we do apply policy to workload ingress traffic if it belongs to an IPVS connection.
for _, prefix := range r.WorkloadIfacePrefixes {
- // If the packet is going to a worklaod endpoint, RETURN.
+ // If the packet is going to a workload endpoint, apply workload ingress policy if traffic
+ // belongs to an IPVS connection and return at the end.
log.WithField("ifacePrefix", prefix).Debug("Adding workload match rules")
ifaceMatch := prefix + "+"
rules = append(rules,
+ Rule{
+ Match: Match().OutInterface(ifaceMatch).IPVSConnection(),
+ Action: JumpAction{Target: ChainToWorkloadDispatch},
+ },
Rule{
Match: Match().OutInterface(ifaceMatch),
Action: ReturnAction{}, | 1 | // Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
log "github.com/sirupsen/logrus"
. "github.com/projectcalico/felix/iptables"
)
func (r *DefaultRuleRenderer) StaticFilterTableChains(ipVersion uint8) (chains []*Chain) {
chains = append(chains, r.StaticFilterForwardChains()...)
chains = append(chains, r.StaticFilterInputChains(ipVersion)...)
chains = append(chains, r.StaticFilterOutputChains()...)
return
}
const (
ProtoIPIP = 4
ProtoICMPv6 = 58
)
func (r *DefaultRuleRenderer) StaticFilterInputChains(ipVersion uint8) []*Chain {
return []*Chain{
r.filterInputChain(ipVersion),
r.filterWorkloadToHostChain(ipVersion),
r.failsafeInChain(),
}
}
func (r *DefaultRuleRenderer) acceptAlreadyAccepted() []Rule {
return []Rule{
{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: AcceptAction{},
},
}
}
func (r *DefaultRuleRenderer) filterInputChain(ipVersion uint8) *Chain {
var inputRules []Rule
// Accept immediately if we've already accepted this packet in the raw or mangle table.
inputRules = append(inputRules, r.acceptAlreadyAccepted()...)
if ipVersion == 4 && r.IPIPEnabled {
// IPIP is enabled, filter incoming IPIP packets to ensure they come from a
// recognised host. We use the protocol number rather than its name because the
// name is not guaranteed to be known by the kernel.
match := Match().ProtocolNum(ProtoIPIP).
NotSourceIPSet(r.IPSetConfigV4.NameForMainIPSet(IPSetIDAllHostIPs))
inputRules = append(inputRules, Rule{
Match: match,
Action: DropAction{},
Comment: "Drop IPIP packets from non-Calico hosts",
})
}
// Apply our policy to packets coming from workload endpoints.
for _, prefix := range r.WorkloadIfacePrefixes {
log.WithField("ifacePrefix", prefix).Debug("Adding workload match rules")
ifaceMatch := prefix + "+"
inputRules = append(inputRules, Rule{
Match: Match().InInterface(ifaceMatch),
Action: GotoAction{Target: ChainWorkloadToHost},
})
}
// Apply host endpoint policy.
inputRules = append(inputRules,
Rule{
Action: ClearMarkAction{Mark: r.allCalicoMarkBits()},
},
Rule{
Action: JumpAction{Target: ChainDispatchFromHostEndpoint},
},
Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: r.filterAllowAction,
Comment: "Host endpoint policy accepted packet.",
},
)
return &Chain{
Name: ChainFilterInput,
Rules: inputRules,
}
}
func (r *DefaultRuleRenderer) filterWorkloadToHostChain(ipVersion uint8) *Chain {
var rules []Rule
// For IPv6, we need to white-list certain ICMP traffic from workloads in order to to act
// as a router. Note: we do this before the policy chains, so we're bypassing the egress
// rules for this traffic. While that might be unexpected, it makes sure that the user
// doesn't cut off their own connectivity in subtle ways that they shouldn't have to worry
// about.
//
// - 130: multicast listener query.
// - 131: multicast listener report.
// - 132: multicast listener done.
// - 133: router solicitation, which an endpoint uses to request
// configuration information rather than waiting for an
// unsolicited router advertisement.
// - 135: neighbor solicitation.
// - 136: neighbor advertisement.
if ipVersion == 6 {
for _, icmpType := range []uint8{130, 131, 132, 133, 135, 136} {
rules = append(rules, Rule{
Match: Match().
ProtocolNum(ProtoICMPv6).
ICMPV6Type(icmpType),
Action: AcceptAction{},
})
}
}
if r.OpenStackSpecialCasesEnabled {
log.Info("Adding OpenStack special-case rules.")
if ipVersion == 4 && r.OpenStackMetadataIP != nil {
// For OpenStack compatibility, we support a special-case to allow incoming traffic
// to the OpenStack metadata IP/port.
// TODO(smc) Long-term, it'd be nice if the OpenStack plugin programmed a policy to
// do this instead.
log.WithField("ip", r.OpenStackMetadataIP).Info(
"OpenStack metadata IP specified, installing whitelist rule.")
rules = append(rules, Rule{
Match: Match().
Protocol("tcp").
DestNet(r.OpenStackMetadataIP.String()).
DestPorts(r.OpenStackMetadataPort),
Action: AcceptAction{},
})
}
// Again, for OpenStack compatibility, white-list certain protocols.
// TODO(smc) Long-term, it'd be nice if the OpenStack plugin programmed a policy to
// do this instead.
dhcpSrcPort := uint16(68)
dhcpDestPort := uint16(67)
if ipVersion == 6 {
dhcpSrcPort = uint16(546)
dhcpDestPort = uint16(547)
}
dnsDestPort := uint16(53)
rules = append(rules,
Rule{
Match: Match().
Protocol("udp").
SourcePorts(dhcpSrcPort).
DestPorts(dhcpDestPort),
Action: AcceptAction{},
},
Rule{
Match: Match().
Protocol("udp").
DestPorts(dnsDestPort),
Action: AcceptAction{},
},
)
}
// Now send traffic to the policy chains to apply the egress policy.
rules = append(rules, Rule{
Action: JumpAction{Target: ChainFromWorkloadDispatch},
})
// If the dispatch chain accepts the packet, it returns to us here. Apply the configured
// action. Note: we may have done work above to allow the packet and then end up dropping
// it here. We can't optimize that away because there may be other rules (such as log
// rules in the policy).
for _, action := range r.inputAcceptActions {
rules = append(rules, Rule{
Action: action,
Comment: "Configured DefaultEndpointToHostAction",
})
}
return &Chain{
Name: ChainWorkloadToHost,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) failsafeInChain() *Chain {
rules := []Rule{}
for _, protoPort := range r.Config.FailsafeInboundHostPorts {
rules = append(rules, Rule{
Match: Match().
Protocol(protoPort.Protocol).
DestPorts(protoPort.Port),
Action: AcceptAction{},
})
}
return &Chain{
Name: ChainFailsafeIn,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) failsafeOutChain() *Chain {
rules := []Rule{}
for _, protoPort := range r.Config.FailsafeOutboundHostPorts {
rules = append(rules, Rule{
Match: Match().
Protocol(protoPort.Protocol).
DestPorts(protoPort.Port),
Action: AcceptAction{},
})
}
return &Chain{
Name: ChainFailsafeOut,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) StaticFilterForwardChains() []*Chain {
rules := []Rule{}
// Rules for filter forward chains dispatches the packet to our dispatch chains if it is going
// to/from an interface that we're responsible for. Note: the dispatch chains represent "allow"
// by returning to this chain for further processing; this is required to handle traffic that
// is going between endpoints on the same host. In that case we need to apply the egress policy
// for one endpoint and the ingress policy for the other.
//
// Packets will be accepted if they passed through both workload and host endpoint policy
// and were returned.
// Jump to from-host-endpoint dispatch chains.
rules = append(rules,
Rule{
// we're clearing all our mark bits to minimise non-determinism caused by rules in other chains.
// We exclude the accept bit because we use that to communicate from the raw/pre-dnat chains.
Action: ClearMarkAction{Mark: r.allCalicoMarkBits() &^ r.IptablesMarkAccept},
},
Rule{
// Apply forward policy for the incoming Host endpoint if accept bit is clear which means the packet
// was not accepted in a previous raw or pre-DNAT chain.
Match: Match().MarkClear(r.IptablesMarkAccept),
Action: JumpAction{Target: ChainDispatchFromHostEndPointForward},
},
)
// Jump to workload dispatch chains.
for _, prefix := range r.WorkloadIfacePrefixes {
log.WithField("ifacePrefix", prefix).Debug("Adding workload match rules")
ifaceMatch := prefix + "+"
rules = append(rules,
Rule{
Match: Match().InInterface(ifaceMatch),
Action: JumpAction{Target: ChainFromWorkloadDispatch},
},
Rule{
Match: Match().OutInterface(ifaceMatch),
Action: JumpAction{Target: ChainToWorkloadDispatch},
},
)
}
// Jump to to-host-endpoint dispatch chains.
rules = append(rules,
Rule{
// Apply forward policy for the outgoing host endpoint.
Action: JumpAction{Target: ChainDispatchToHostEndpointForward},
},
)
// Accept packet if policies above set ACCEPT mark.
rules = append(rules,
Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: r.filterAllowAction,
Comment: "Policy explicitly accepted packet.",
},
)
return []*Chain{{
Name: ChainFilterForward,
Rules: rules,
}}
}
func (r *DefaultRuleRenderer) StaticFilterOutputChains() []*Chain {
return []*Chain{
r.filterOutputChain(),
r.failsafeOutChain(),
}
}
func (r *DefaultRuleRenderer) filterOutputChain() *Chain {
rules := []Rule{}
// Accept immediately if we've already accepted this packet in the raw or mangle table.
rules = append(rules, r.acceptAlreadyAccepted()...)
// We don't currently police host -> endpoint according to the endpoint's ingress policy.
// That decision is based on pragmatism; it's generally very useful to be able to contact
// any local workload from the host and policing the traffic doesn't really protect
// against host compromise. If a host is compromised, then the rules could be removed!
for _, prefix := range r.WorkloadIfacePrefixes {
// If the packet is going to a worklaod endpoint, RETURN.
log.WithField("ifacePrefix", prefix).Debug("Adding workload match rules")
ifaceMatch := prefix + "+"
rules = append(rules,
Rule{
Match: Match().OutInterface(ifaceMatch),
Action: ReturnAction{},
},
)
}
// If we reach here, the packet is not going to a workload so it must be going to a
// host endpoint.
// Apply host endpoint policy.
rules = append(rules,
Rule{
Action: ClearMarkAction{Mark: r.allCalicoMarkBits()},
},
Rule{
Action: JumpAction{Target: ChainDispatchToHostEndpoint},
},
Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: r.filterAllowAction,
Comment: "Host endpoint policy accepted packet.",
},
)
return &Chain{
Name: ChainFilterOutput,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) StaticNATTableChains(ipVersion uint8) (chains []*Chain) {
chains = append(chains, r.StaticNATPreroutingChains(ipVersion)...)
chains = append(chains, r.StaticNATPostroutingChains(ipVersion)...)
chains = append(chains, r.StaticNATOutputChains(ipVersion)...)
return
}
func (r *DefaultRuleRenderer) StaticNATPreroutingChains(ipVersion uint8) []*Chain {
rules := []Rule{
{
Action: JumpAction{Target: ChainFIPDnat},
},
}
if ipVersion == 4 && r.OpenStackSpecialCasesEnabled && r.OpenStackMetadataIP != nil {
rules = append(rules, Rule{
Match: Match().
Protocol("tcp").
DestPorts(80).
DestNet("169.254.169.254/32"),
Action: DNATAction{
DestAddr: r.OpenStackMetadataIP.String(),
DestPort: r.OpenStackMetadataPort,
},
})
}
return []*Chain{{
Name: ChainNATPrerouting,
Rules: rules,
}}
}
func (r *DefaultRuleRenderer) StaticNATPostroutingChains(ipVersion uint8) []*Chain {
rules := []Rule{
{
Action: JumpAction{Target: ChainFIPSnat},
},
{
Action: JumpAction{Target: ChainNATOutgoing},
},
}
if ipVersion == 4 && r.IPIPEnabled && len(r.IPIPTunnelAddress) > 0 {
// Add a rule to catch packets that are being sent down the IPIP tunnel from an
// incorrect local IP address of the host and NAT them to use the tunnel IP as its
// source. This happens if:
//
// - the user explicitly binds their socket to the wrong source IP accidentally
// - the user sends traffic to, for example, a Kubernetes service IP, which is
// implemented via NAT instead of routing, leading the kernel to choose the
// wrong source IP.
//
// We NAT the source of the packet to use the tunnel IP. We assume that
// non-local IPs have been correctly routed. Since Calico-assigned IPs are
// non-local (because they're down a veth), they won't get caught by the rule.
// Other remote sources will only reach the tunnel if they're being NATted
// already (for example, a Kubernetes "NodePort"). The kernel will then
// choose the correct source on its own.
rules = append(rules, Rule{
Match: Match().
// Only match packets going out the tunnel.
OutInterface("tunl0").
// Match packets that don't have the correct source address. This
// matches local addresses (i.e. ones assigned to this host)
// limiting the match to the output interface (which we matched
// above as the tunnel). Avoiding embedding the IP address lets
// us use a static rule, which is easier to manage.
NotSrcAddrType(AddrTypeLocal, true).
// Only match if the IP is also some local IP on the box. This
// prevents us from matching packets from workloads, which are
// remote as far as the routing table is concerned.
SrcAddrType(AddrTypeLocal, false),
Action: MasqAction{},
})
}
return []*Chain{{
Name: ChainNATPostrouting,
Rules: rules,
}}
}
func (r *DefaultRuleRenderer) StaticNATOutputChains(ipVersion uint8) []*Chain {
rules := []Rule{
{
Action: JumpAction{Target: ChainFIPDnat},
},
}
return []*Chain{{
Name: ChainNATOutput,
Rules: rules,
}}
}
func (r *DefaultRuleRenderer) StaticMangleTableChains(ipVersion uint8) (chains []*Chain) {
return []*Chain{
r.failsafeInChain(),
r.StaticManglePreroutingChain(ipVersion),
}
}
func (r *DefaultRuleRenderer) StaticManglePreroutingChain(ipVersion uint8) *Chain {
rules := []Rule{}
// ACCEPT or RETURN immediately if packet matches an existing connection. Note that we also
// have a rule like this at the start of each pre-endpoint chain; the functional difference
// with placing this rule here is that it will also apply to packets that may be unrelated
// to Calico (i.e. not to or from Calico workloads, and not via Calico host endpoints). We
// think this is appropriate in the mangle table here - whereas we don't have a rule like
// this in the filter table - because the mangle table is generally not used (except by us)
// for dropping packets, so it is very unlikely that we would be circumventing someone
// else's rule to drop a packet. (And in that case, the user can configure
// IptablesMangleAllowAction to be RETURN.)
rules = append(rules,
Rule{
Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: r.mangleAllowAction,
},
)
// Or if we've already accepted this packet in the raw table.
rules = append(rules,
Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: r.mangleAllowAction,
},
)
// If packet is from a workload interface, ACCEPT or RETURN immediately according to
// IptablesMangleAllowAction (because pre-DNAT policy is only for host endpoints).
for _, ifacePrefix := range r.WorkloadIfacePrefixes {
rules = append(rules, Rule{
Match: Match().InInterface(ifacePrefix + "+"),
Action: r.mangleAllowAction,
})
}
// Now (=> not from a workload) dispatch to host endpoint chain for the incoming interface.
rules = append(rules,
Rule{
Action: JumpAction{Target: ChainDispatchFromHostEndpoint},
},
// Following that... If the packet was explicitly allowed by a pre-DNAT policy, it
// will have MarkAccept set. If the packet was denied, it will have been dropped
// already. If the incoming interface isn't one that we're policing, or the packet
// isn't governed by any pre-DNAT policy on that interface, it will fall through to
// here without any Calico bits set.
// In the MarkAccept case, we ACCEPT or RETURN according to
// IptablesMangleAllowAction.
Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: r.mangleAllowAction,
Comment: "Host endpoint policy accepted packet.",
},
)
return &Chain{
Name: ChainManglePrerouting,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) StaticRawTableChains(ipVersion uint8) []*Chain {
return []*Chain{
r.failsafeInChain(),
r.failsafeOutChain(),
r.StaticRawPreroutingChain(ipVersion),
r.StaticRawOutputChain(),
}
}
func (r *DefaultRuleRenderer) StaticRawPreroutingChain(ipVersion uint8) *Chain {
rules := []Rule{}
// For safety, clear all our mark bits before we start. (We could be in append mode and
// another process' rules could have left the mark bit set.)
rules = append(rules,
Rule{Action: ClearMarkAction{Mark: r.allCalicoMarkBits()}},
)
// Set a mark on the packet if it's from a workload interface.
markFromWorkload := r.IptablesMarkScratch0
for _, ifacePrefix := range r.WorkloadIfacePrefixes {
rules = append(rules, Rule{
Match: Match().InInterface(ifacePrefix + "+"),
Action: SetMarkAction{Mark: markFromWorkload},
})
}
if ipVersion == 6 {
// Apply strict RPF check to packets from workload interfaces. This prevents
// workloads from spoofing their IPs. Note: non-privileged containers can't
// usually spoof but privileged containers and VMs can.
//
// We only do this for IPv6 because the IPv4 RPF check is handled via a sysctl.
// In addition, the IPv4 check is complicated by the fact that we have special
// case handling for DHCP to the host, which would require an exclusion.
rules = append(rules, Rule{
Match: Match().MarkSet(markFromWorkload).RPFCheckFailed(),
Action: DropAction{},
})
}
rules = append(rules,
// Send non-workload traffic to the untracked policy chains.
Rule{Match: Match().MarkClear(markFromWorkload),
Action: JumpAction{Target: ChainDispatchFromHostEndpoint}},
// Then, if the packet was marked as allowed, accept it. Packets also return here
// without the mark bit set if the interface wasn't one that we're policing. We
// let those packets fall through to the user's policy.
Rule{Match: Match().MarkSet(r.IptablesMarkAccept),
Action: AcceptAction{}},
)
return &Chain{
Name: ChainRawPrerouting,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) allCalicoMarkBits() uint32 {
return r.IptablesMarkAccept |
r.IptablesMarkPass |
r.IptablesMarkScratch0 |
r.IptablesMarkScratch1
}
func (r *DefaultRuleRenderer) StaticRawOutputChain() *Chain {
return &Chain{
Name: ChainRawOutput,
Rules: []Rule{
// For safety, clear all our mark bits before we start. (We could be in
// append mode and another process' rules could have left the mark bit set.)
{Action: ClearMarkAction{Mark: r.allCalicoMarkBits()}},
// Then, jump to the untracked policy chains.
{Action: JumpAction{Target: ChainDispatchToHostEndpoint}},
// Then, if the packet was marked as allowed, accept it. Packets also
// return here without the mark bit set if the interface wasn't one that
// we're policing.
{Match: Match().MarkSet(r.IptablesMarkAccept),
Action: AcceptAction{}},
},
}
}
| 1 | 15,812 | I just noticed that we use Return here, when we have logically allowed a packet, whereas in the forward chain we use AcceptAction. Do you know why that is? | projectcalico-felix | c |
@@ -60,3 +60,11 @@ func isABuildFile(name string, config *core.Configuration) bool {
}
return false
}
+
+// Max is an incredibly complicated function, which is presumably why it didn't make it into the Go stdlib
+func Max(x, y int) int {
+ if x < y {
+ return y
+ }
+ return x
+} | 1 | // Package utils contains various utility functions and whatnot.
package utils
import (
"path"
"path/filepath"
"strings"
"gopkg.in/op/go-logging.v1"
"cli"
"core"
"fs"
)
var log = logging.MustGetLogger("utils")
// FindAllSubpackages finds all packages under a particular path.
// Used to implement rules with ... where we need to know all possible packages
// under that location.
func FindAllSubpackages(config *core.Configuration, rootPath string, prefix string) <-chan string {
ch := make(chan string)
go func() {
if rootPath == "" {
rootPath = "."
}
if err := fs.Walk(rootPath, func(name string, isDir bool) error {
basename := path.Base(name)
if name == core.OutDir || (isDir && strings.HasPrefix(basename, ".") && name != ".") {
return filepath.SkipDir // Don't walk output or hidden directories
} else if isDir && !strings.HasPrefix(name, prefix) && !strings.HasPrefix(prefix, name) {
return filepath.SkipDir // Skip any directory without the prefix we're after (but not any directory beneath that)
} else if isABuildFile(basename, config) && !isDir {
dir, _ := path.Split(name)
ch <- strings.TrimRight(dir, "/")
} else if cli.ContainsString(name, config.Parse.ExperimentalDir) {
return filepath.SkipDir // Skip the experimental directory if it's set
}
// Check against blacklist
for _, dir := range config.Parse.BlacklistDirs {
if dir == basename || strings.HasPrefix(name, dir) {
return filepath.SkipDir
}
}
return nil
}); err != nil {
log.Fatalf("Failed to walk tree under %s; %s\n", rootPath, err)
}
close(ch)
}()
return ch
}
// isABuildFile returns true if given filename is a build file name.
func isABuildFile(name string, config *core.Configuration) bool {
for _, buildFileName := range config.Parse.BuildFileName {
if name == buildFileName {
return true
}
}
return false
}
| 1 | 8,226 | There's one for floats but not for ints. The float implementation is non-trivial though which I guess is the argument there. | thought-machine-please | go |
@@ -51,6 +51,9 @@ const (
// 6. QueryWorkflow
// please also reference selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs
DCRedirectionPolicySelectedAPIsForwarding = "selected-apis-forwarding"
+
+ // DCRedirectionPolicyAllAPIsForwarding means forwarding all APIs based on namespace active cluster
+ DCRedirectionPolicyAllAPIsForwarding = "all-apis-forwarding"
)
type ( | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination dcRedirectionPolicy_mock.go
package frontend
import (
"context"
"fmt"
"go.temporal.io/api/serviceerror"
"go.temporal.io/server/common/cluster"
"go.temporal.io/server/common/config"
"go.temporal.io/server/common/namespace"
)
const (
// DCRedirectionPolicyDefault means no redirection
DCRedirectionPolicyDefault = ""
// DCRedirectionPolicyNoop means no redirection
DCRedirectionPolicyNoop = "noop"
// DCRedirectionPolicySelectedAPIsForwarding means forwarding the following APIs based namespace
// 1. StartWorkflowExecution
// 2. SignalWithStartWorkflowExecution
// 3. SignalWorkflowExecution
// 4. RequestCancelWorkflowExecution
// 5. TerminateWorkflowExecution
// 6. QueryWorkflow
// please also reference selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs
DCRedirectionPolicySelectedAPIsForwarding = "selected-apis-forwarding"
)
type (
// DCRedirectionPolicy is a DC redirection policy interface
DCRedirectionPolicy interface {
WithNamespaceIDRedirect(ctx context.Context, namespaceID namespace.ID, apiName string, call func(string) error) error
WithNamespaceRedirect(ctx context.Context, namespace namespace.Name, apiName string, call func(string) error) error
}
// NoopRedirectionPolicy is DC redirection policy which does nothing
NoopRedirectionPolicy struct {
currentClusterName string
}
// SelectedAPIsForwardingRedirectionPolicy is a DC redirection policy
// which (based on namespace) forwards selected APIs calls to active cluster
SelectedAPIsForwardingRedirectionPolicy struct {
currentClusterName string
config *Config
namespaceRegistry namespace.Registry
}
)
// selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs contains a list of APIs which can be redirected
var selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs = map[string]struct{}{
"StartWorkflowExecution": {},
"SignalWithStartWorkflowExecution": {},
"SignalWorkflowExecution": {},
"RequestCancelWorkflowExecution": {},
"TerminateWorkflowExecution": {},
"QueryWorkflow": {},
}
// RedirectionPolicyGenerator generate corresponding redirection policy
func RedirectionPolicyGenerator(clusterMetadata cluster.Metadata, config *Config,
namespaceRegistry namespace.Registry, policy config.DCRedirectionPolicy) DCRedirectionPolicy {
switch policy.Policy {
case DCRedirectionPolicyDefault:
// default policy, noop
return NewNoopRedirectionPolicy(clusterMetadata.GetCurrentClusterName())
case DCRedirectionPolicyNoop:
return NewNoopRedirectionPolicy(clusterMetadata.GetCurrentClusterName())
case DCRedirectionPolicySelectedAPIsForwarding:
currentClusterName := clusterMetadata.GetCurrentClusterName()
return NewSelectedAPIsForwardingPolicy(currentClusterName, config, namespaceRegistry)
default:
panic(fmt.Sprintf("Unknown DC redirection policy %v", policy.Policy))
}
}
// NewNoopRedirectionPolicy is DC redirection policy which does nothing
func NewNoopRedirectionPolicy(currentClusterName string) *NoopRedirectionPolicy {
return &NoopRedirectionPolicy{
currentClusterName: currentClusterName,
}
}
// WithNamespaceIDRedirect redirect the API call based on namespace ID
func (policy *NoopRedirectionPolicy) WithNamespaceIDRedirect(_ context.Context, _ namespace.ID, _ string, call func(string) error) error {
return call(policy.currentClusterName)
}
// WithNamespaceRedirect redirect the API call based on namespace name
func (policy *NoopRedirectionPolicy) WithNamespaceRedirect(_ context.Context, _ namespace.Name, _ string, call func(string) error) error {
return call(policy.currentClusterName)
}
// NewSelectedAPIsForwardingPolicy creates a forwarding policy for selected APIs based on namespace
func NewSelectedAPIsForwardingPolicy(currentClusterName string, config *Config, namespaceRegistry namespace.Registry) *SelectedAPIsForwardingRedirectionPolicy {
return &SelectedAPIsForwardingRedirectionPolicy{
currentClusterName: currentClusterName,
config: config,
namespaceRegistry: namespaceRegistry,
}
}
// WithNamespaceIDRedirect redirect the API call based on namespace ID
func (policy *SelectedAPIsForwardingRedirectionPolicy) WithNamespaceIDRedirect(ctx context.Context, namespaceID namespace.ID, apiName string, call func(string) error) error {
namespaceEntry, err := policy.namespaceRegistry.GetNamespaceByID(namespaceID)
if err != nil {
return err
}
return policy.withRedirect(ctx, namespaceEntry, apiName, call)
}
// WithNamespaceRedirect redirect the API call based on namespace name
func (policy *SelectedAPIsForwardingRedirectionPolicy) WithNamespaceRedirect(ctx context.Context, namespace namespace.Name, apiName string, call func(string) error) error {
namespaceEntry, err := policy.namespaceRegistry.GetNamespace(namespace)
if err != nil {
return err
}
return policy.withRedirect(ctx, namespaceEntry, apiName, call)
}
func (policy *SelectedAPIsForwardingRedirectionPolicy) withRedirect(ctx context.Context, namespaceEntry *namespace.Namespace, apiName string, call func(string) error) error {
targetDC, enableNamespaceNotActiveForwarding := policy.getTargetClusterAndIsNamespaceNotActiveAutoForwarding(ctx, namespaceEntry, apiName)
err := call(targetDC)
targetDC, ok := policy.isNamespaceNotActiveError(err)
if !ok || !enableNamespaceNotActiveForwarding {
return err
}
return call(targetDC)
}
func (policy *SelectedAPIsForwardingRedirectionPolicy) isNamespaceNotActiveError(err error) (string, bool) {
namespaceNotActiveErr, ok := err.(*serviceerror.NamespaceNotActive)
if !ok {
return "", false
}
return namespaceNotActiveErr.ActiveCluster, true
}
func (policy *SelectedAPIsForwardingRedirectionPolicy) getTargetClusterAndIsNamespaceNotActiveAutoForwarding(ctx context.Context, namespaceEntry *namespace.Namespace, apiName string) (string, bool) {
if !namespaceEntry.IsGlobalNamespace() {
return policy.currentClusterName, false
}
if len(namespaceEntry.ClusterNames()) == 1 {
// do not do dc redirection if namespace is only targeting at 1 dc (effectively local namespace)
return policy.currentClusterName, false
}
if !policy.config.EnableNamespaceNotActiveAutoForwarding(namespaceEntry.Name().String()) {
// do not do dc redirection if auto-forwarding dynamic config flag is not enabled
return policy.currentClusterName, false
}
_, ok := selectedAPIsForwardingRedirectionPolicyWhitelistedAPIs[apiName]
if !ok {
// do not do dc redirection if API is not whitelisted
return policy.currentClusterName, false
}
return namespaceEntry.ActiveClusterName(), true
}
| 1 | 13,788 | If we have a redirection policy for Selected API forwarding, why isn't "redirect all" just a special case where they are all Selected? | temporalio-temporal | go |
@@ -130,6 +130,8 @@ const (
FrameworkTally = "tally"
// FrameworkOpentelemetry OpenTelemetry framework id
FrameworkOpentelemetry = "opentelemetry"
+ // FrameworkCustom Custom framework id
+ FrameworkCustom = "custom"
)
// tally sanitizer options that satisfy both Prometheus and M3 restrictions. | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package metrics
import (
"fmt"
"time"
"github.com/cactus/go-statsd-client/statsd"
prom "github.com/m3db/prometheus_client_golang/prometheus"
"github.com/uber-go/tally"
"github.com/uber-go/tally/m3"
"github.com/uber-go/tally/prometheus"
tallystatsdreporter "github.com/uber-go/tally/statsd"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
statsdreporter "go.temporal.io/server/common/metrics/tally/statsd"
)
type (
// Config contains the config items for metrics subsystem
Config struct {
// M3 is the configuration for m3 metrics reporter
M3 *m3.Configuration `yaml:"m3"`
// Statsd is the configuration for statsd reporter
Statsd *StatsdConfig `yaml:"statsd"`
// Prometheus is the configuration for prometheus reporter
Prometheus *PrometheusConfig `yaml:"prometheus"`
// {optional} Config for Prometheus metrics reporter for SDK reported metrics.
PrometheusSDK *PrometheusConfig `yaml:"prometheusSDK"`
// Tags is the set of key-value pairs to be reported as part of every metric
Tags map[string]string `yaml:"tags"`
// Prefix sets the prefix to all outgoing metrics
Prefix string `yaml:"prefix"`
}
// StatsdConfig contains the config items for statsd metrics reporter
StatsdConfig struct {
// The host and port of the statsd server
HostPort string `yaml:"hostPort" validate:"nonzero"`
// The prefix to use in reporting to statsd
Prefix string `yaml:"prefix" validate:"nonzero"`
// FlushInterval is the maximum interval for sending packets.
// If it is not specified, it defaults to 1 second.
FlushInterval time.Duration `yaml:"flushInterval"`
// FlushBytes specifies the maximum udp packet size you wish to send.
// If FlushBytes is unspecified, it defaults to 1432 bytes, which is
// considered safe for local traffic.
FlushBytes int `yaml:"flushBytes"`
}
// PrometheusConfig is a new format for config for prometheus metrics.
PrometheusConfig struct {
// Metric framework: Tally/OpenTelemetry
Framework string `yaml:framework`
// Address for prometheus to serve metrics from.
ListenAddress string `yaml:"listenAddress"`
// DefaultHistogramBoundaries defines the default histogram bucket
// boundaries.
DefaultHistogramBoundaries []float64 `yaml:"defaultHistogramBoundaries"`
// HandlerPath if specified will be used instead of using the default
// HTTP handler path "/metrics".
HandlerPath string `yaml:"handlerPath"`
// Configs below are kept for backwards compatibility with previously exposed tally prometheus.Configuration.
// Deprecated. ListenNetwork if specified will be used instead of using tcp network.
// Supported networks: tcp, tcp4, tcp6 and unix.
ListenNetwork string `yaml:"listenNetwork"`
// Deprecated. TimerType is the default Prometheus type to use for Tally timers.
TimerType string `yaml:"timerType"`
// Deprecated. DefaultHistogramBuckets if specified will set the default histogram
// buckets to be used by the reporter.
DefaultHistogramBuckets []HistogramObjective `yaml:"defaultHistogramBuckets"`
// Deprecated. DefaultSummaryObjectives if specified will set the default summary
// objectives to be used by the reporter.
DefaultSummaryObjectives []SummaryObjective `yaml:"defaultSummaryObjectives"`
// Deprecated. OnError specifies what to do when an error either with listening
// on the specified listen address or registering a metric with the
// Prometheus. By default the registerer will panic.
OnError string `yaml:"onError"`
}
)
// Deprecated. HistogramObjective is a Prometheus histogram bucket.
// Added for backwards compatibility.
type HistogramObjective struct {
Upper float64 `yaml:"upper"`
}
// Deprecated. SummaryObjective is a Prometheus summary objective.
// Added for backwards compatibility.
type SummaryObjective struct {
Percentile float64 `yaml:"percentile"`
AllowedError float64 `yaml:"allowedError"`
}
const (
ms = float64(time.Millisecond) / float64(time.Second)
// Supported framework types
// FrameworkTally tally framework id
FrameworkTally = "tally"
// FrameworkOpentelemetry OpenTelemetry framework id
FrameworkOpentelemetry = "opentelemetry"
)
// tally sanitizer options that satisfy both Prometheus and M3 restrictions.
// This will rename metrics at the tally emission level, so metrics name we
// use maybe different from what gets emitted. In the current implementation
// it will replace - and . with _
// We should still ensure that the base metrics are prometheus compatible,
// but this is necessary as the same prom client initialization is used by
// our system workflows.
var (
safeCharacters = []rune{'_'}
sanitizeOptions = tally.SanitizeOptions{
NameCharacters: tally.ValidCharacters{
Ranges: tally.AlphanumericRange,
Characters: safeCharacters,
},
KeyCharacters: tally.ValidCharacters{
Ranges: tally.AlphanumericRange,
Characters: safeCharacters,
},
ValueCharacters: tally.ValidCharacters{
Ranges: tally.AlphanumericRange,
Characters: safeCharacters,
},
ReplacementCharacter: tally.DefaultReplacementCharacter,
}
defaultQuantiles = []float64{50, 75, 90, 95, 99}
defaultHistogramBoundaries = []float64{
1 * ms,
2 * ms,
5 * ms,
10 * ms,
20 * ms,
50 * ms,
100 * ms,
200 * ms,
500 * ms,
1000 * ms,
2000 * ms,
5000 * ms,
10000 * ms,
20000 * ms,
50000 * ms,
100000 * ms,
200000 * ms,
500000 * ms,
1000000 * ms,
}
)
// InitMetricReporters is a root function for initalizing metrics clients.
//
// Usage pattern
// serverReporter, sdkReporter, err := c.InitMetricReporters(logger, customReporter)
// metricsClient := serverReporter.newClient(logger, serviceIdx)
//
// customReporter Provide this argument if you want to report metrics to a custom metric platform, otherwise use nil.
//
// returns SeverReporter, SDKReporter, error
func (c *Config) InitMetricReporters(logger log.Logger, customReporter interface{}) (Reporter, Reporter, error) {
if c.Prometheus != nil && len(c.Prometheus.Framework) > 0 {
return c.initReportersFromPrometheusConfig(logger, customReporter)
}
var scope tally.Scope
if customReporter != nil {
if tallyCustomReporter, ok := customReporter.(tally.BaseStatsReporter); ok {
scope = c.NewCustomReporterScope(logger, tallyCustomReporter)
} else {
return nil, nil, fmt.Errorf(
"specified customReporter is not of expected type tally.BaseStatsReporter "+
"as expected for metrics framework %q", FrameworkTally,
)
}
} else {
scope = c.NewScope(logger)
}
reporter := newTallyReporter(scope)
return reporter, reporter, nil
}
func (c *Config) initReportersFromPrometheusConfig(logger log.Logger, customReporter interface{}) (Reporter, Reporter, error) {
if customReporter != nil {
logger.Fatal("Metrics extension point is not implemented.")
}
serverReporter, err := c.initReporterFromPrometheusConfig(logger, c.Prometheus, customReporter)
if err != nil {
return nil, nil, err
}
sdkReporter := serverReporter
if c.PrometheusSDK != nil {
sdkReporter, err = c.initReporterFromPrometheusConfig(logger, c.PrometheusSDK, customReporter)
if err != nil {
return nil, nil, err
}
}
return serverReporter, sdkReporter, nil
}
func (c *Config) initReporterFromPrometheusConfig(logger log.Logger, config *PrometheusConfig, extensionPoint interface{}) (Reporter, error) {
switch config.Framework {
case FrameworkTally:
return c.newTallyReporterByPrometheusConfig(logger, config), nil
case FrameworkOpentelemetry:
return newOpentelemeteryReporter(logger, c.Tags, c.Prefix, config)
default:
err := fmt.Errorf("unsupported framework type specified in config: %q", config.Framework)
logger.Error(err.Error())
return nil, err
}
}
func (c *Config) newTallyReporterByPrometheusConfig(logger log.Logger, config *PrometheusConfig) Reporter {
tallyConfig := c.convertPrometheusConfigToTally(config)
tallyScope := c.newPrometheusScope(logger, tallyConfig)
return newTallyReporter(tallyScope)
}
// NewScope builds a new tally scope for this metrics configuration
//
// If the underlying configuration is valid for multiple reporter types,
// only one of them will be used for reporting.
//
// Current priority order is:
// m3 > statsd > prometheus
func (c *Config) NewScope(logger log.Logger) tally.Scope {
if c.M3 != nil {
return c.newM3Scope(logger)
}
if c.Statsd != nil {
return c.newStatsdScope(logger)
}
if c.Prometheus != nil {
return c.newPrometheusScope(logger, c.convertPrometheusConfigToTally(c.Prometheus))
}
return tally.NoopScope
}
func (c *Config) buildHistogramBuckets(config *PrometheusConfig) []prometheus.HistogramObjective {
var result []prometheus.HistogramObjective
if len(config.DefaultHistogramBuckets) > 0 {
result = make([]prometheus.HistogramObjective, len(config.DefaultHistogramBuckets))
for i, item := range config.DefaultHistogramBuckets {
result[i].Upper = item.Upper
}
} else if len(config.DefaultHistogramBoundaries) > 0 {
result = histogramBoundariesToHistogramObjectives(config.DefaultHistogramBoundaries)
}
return result
}
func (c *Config) convertPrometheusConfigToTally(config *PrometheusConfig) *prometheus.Configuration {
defaultObjectives := make([]prometheus.SummaryObjective, len(config.DefaultSummaryObjectives))
for i, item := range config.DefaultSummaryObjectives {
defaultObjectives[i].AllowedError = item.AllowedError
defaultObjectives[i].Percentile = item.Percentile
}
return &prometheus.Configuration{
HandlerPath: config.HandlerPath,
ListenNetwork: config.ListenNetwork,
ListenAddress: config.ListenAddress,
TimerType: "histogram",
DefaultHistogramBuckets: c.buildHistogramBuckets(config),
DefaultSummaryObjectives: defaultObjectives,
OnError: config.OnError,
}
}
func (c *Config) NewCustomReporterScope(logger log.Logger, customReporter tally.BaseStatsReporter) tally.Scope {
options := tally.ScopeOptions{
DefaultBuckets: histogramBoundariesToValueBuckets(defaultHistogramBoundaries),
}
if c != nil {
options.Tags = c.Tags
options.Prefix = c.Prefix
}
switch reporter := customReporter.(type) {
case tally.StatsReporter:
options.Reporter = reporter
case tally.CachedStatsReporter:
options.CachedReporter = reporter
default:
logger.Error("Unsupported metrics reporter type.", tag.ValueType(customReporter))
return tally.NoopScope
}
scope, _ := tally.NewRootScope(options, time.Second)
return scope
}
// newM3Scope returns a new m3 scope with
// a default reporting interval of a second
func (c *Config) newM3Scope(logger log.Logger) tally.Scope {
reporter, err := c.M3.NewReporter()
if err != nil {
logger.Fatal("error creating m3 reporter", tag.Error(err))
}
scopeOpts := tally.ScopeOptions{
Tags: c.Tags,
CachedReporter: reporter,
Prefix: c.Prefix,
DefaultBuckets: histogramBoundariesToValueBuckets(defaultHistogramBoundaries),
}
scope, _ := tally.NewRootScope(scopeOpts, time.Second)
return scope
}
// newM3Scope returns a new statsd scope with
// a default reporting interval of a second
func (c *Config) newStatsdScope(logger log.Logger) tally.Scope {
config := c.Statsd
if len(config.HostPort) == 0 {
return tally.NoopScope
}
statter, err := statsd.NewBufferedClient(config.HostPort, config.Prefix, config.FlushInterval, config.FlushBytes)
if err != nil {
logger.Fatal("error creating statsd client", tag.Error(err))
}
//NOTE: according to ( https://github.com/uber-go/tally )Tally's statsd implementation doesn't support tagging.
// Therefore, we implement Tally interface to have a statsd reporter that can support tagging
reporter := statsdreporter.NewReporter(statter, tallystatsdreporter.Options{})
scopeOpts := tally.ScopeOptions{
Tags: c.Tags,
Reporter: reporter,
Prefix: c.Prefix,
DefaultBuckets: histogramBoundariesToValueBuckets(defaultHistogramBoundaries),
}
scope, _ := tally.NewRootScope(scopeOpts, time.Second)
return scope
}
// newPrometheusScope returns a new prometheus scope with
// a default reporting interval of a second
func (c *Config) newPrometheusScope(logger log.Logger, config *prometheus.Configuration) tally.Scope {
if len(config.DefaultHistogramBuckets) == 0 {
config.DefaultHistogramBuckets = histogramBoundariesToHistogramObjectives(defaultHistogramBoundaries)
}
reporter, err := config.NewReporter(
prometheus.ConfigurationOptions{
Registry: prom.NewRegistry(),
OnError: func(err error) {
logger.Warn("error in prometheus reporter", tag.Error(err))
},
},
)
if err != nil {
logger.Fatal("error creating prometheus reporter", tag.Error(err))
}
scopeOpts := tally.ScopeOptions{
Tags: c.Tags,
CachedReporter: reporter,
Separator: prometheus.DefaultSeparator,
SanitizeOptions: &sanitizeOptions,
Prefix: c.Prefix,
DefaultBuckets: histogramBoundariesToValueBuckets(defaultHistogramBoundaries),
}
scope, _ := tally.NewRootScope(scopeOpts, time.Second)
return scope
}
func histogramBoundariesToHistogramObjectives(boundaries []float64) []prometheus.HistogramObjective {
var result []prometheus.HistogramObjective
for _, value := range boundaries {
result = append(
result,
prometheus.HistogramObjective{
Upper: value,
},
)
}
return result
}
func histogramBoundariesToValueBuckets(buckets []float64) tally.ValueBuckets {
return tally.ValueBuckets(buckets)
}
| 1 | 11,754 | I wouldn't add this. Just completely ignore config if custom reporter is not `nil` in server options. | temporalio-temporal | go |
@@ -159,10 +159,19 @@ int main(int argc, char *argv[]) {
auto ioThreadPool = std::make_shared<folly::IOThreadPoolExecutor>(FLAGS_num_io_threads);
+ std::string clusteridFile =
+ folly::stringPrintf("%s/%s", paths[0].c_str(), "/storage.cluster.id");
+ auto clusterMan
+ = std::make_unique<nebula::meta::ClusterManager>("", clusteridFile);
+ if (!clusterMan->loadClusterId()) {
+ LOG(INFO) << "storaged misses clusterId";
+ }
+
// Meta client
auto metaClient = std::make_unique<nebula::meta::MetaClient>(ioThreadPool,
std::move(metaAddrsRet.value()),
localhost,
+ clusterMan.get(),
true);
if (!metaClient->waitForMetadReady()) {
LOG(ERROR) << "waitForMetadReady error!"; | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "common/base/SignalHandler.h"
#include <thrift/lib/cpp2/server/ThriftServer.h>
#include "network/NetworkUtils.h"
#include "thread/GenericThreadPool.h"
#include "storage/StorageServiceHandler.h"
#include "storage/StorageHttpStatusHandler.h"
#include "storage/StorageHttpDownloadHandler.h"
#include "storage/StorageHttpAdminHandler.h"
#include "kvstore/NebulaStore.h"
#include "kvstore/PartManager.h"
#include "process/ProcessUtils.h"
#include "storage/test/TestUtils.h"
#include "webservice/WebService.h"
#include "meta/SchemaManager.h"
#include "meta/client/MetaClient.h"
#include "meta/ClientBasedGflagsManager.h"
#include "storage/CompactionFilter.h"
#include "hdfs/HdfsHelper.h"
#include "hdfs/HdfsCommandHelper.h"
DEFINE_int32(port, 44500, "Storage daemon listening port");
DEFINE_bool(reuse_port, true, "Whether to turn on the SO_REUSEPORT option");
DEFINE_string(data_path, "", "Root data path, multi paths should be split by comma."
"For rocksdb engine, one path one instance.");
DEFINE_string(local_ip, "", "IP address which is used to identify this server, "
"combined with the listen port");
DEFINE_bool(mock_server, true, "start mock server");
DEFINE_bool(daemonize, true, "Whether to run the process as a daemon");
DEFINE_string(pid_file, "pids/nebula-storaged.pid", "File to hold the process id");
DEFINE_string(meta_server_addrs, "", "list of meta server addresses,"
"the format looks like ip1:port1, ip2:port2, ip3:port3");
DEFINE_string(store_type, "nebula",
"Which type of KVStore to be used by the storage daemon."
" Options can be \"nebula\", \"hbase\", etc.");
DEFINE_int32(num_io_threads, 16, "Number of IO threads");
DEFINE_int32(num_worker_threads, 32, "Number of workers");
using nebula::operator<<;
using nebula::Status;
using nebula::HostAddr;
using nebula::storage::StorageServiceHandler;
using nebula::kvstore::KVStore;
using nebula::meta::SchemaManager;
using nebula::meta::MetaClient;
using nebula::network::NetworkUtils;
using nebula::ProcessUtils;
static std::unique_ptr<apache::thrift::ThriftServer> gServer;
static void signalHandler(int sig);
static Status setupSignalHandler();
std::unique_ptr<nebula::kvstore::KVStore> getStoreInstance(
HostAddr localhost,
std::vector<std::string> paths,
std::shared_ptr<folly::IOThreadPoolExecutor> ioPool,
nebula::meta::MetaClient* metaClient,
nebula::meta::SchemaManager* schemaMan) {
nebula::kvstore::KVOptions options;
options.dataPaths_ = std::move(paths);
options.partMan_ = std::make_unique<nebula::kvstore::MetaServerBasedPartManager>(
localhost,
metaClient);
options.cfFactory_ = std::shared_ptr<nebula::kvstore::KVCompactionFilterFactory>(
new nebula::storage::NebulaCompactionFilterFactory(schemaMan));
if (FLAGS_store_type == "nebula") {
auto nbStore = std::make_unique<nebula::kvstore::NebulaStore>(std::move(options),
ioPool,
localhost);
if (!(nbStore->init())) {
LOG(ERROR) << "nebula store init failed";
return nullptr;
}
return nbStore;
} else if (FLAGS_store_type == "hbase") {
LOG(FATAL) << "HBase store has not been implemented";
} else {
LOG(FATAL) << "Unknown store type \"" << FLAGS_store_type << "\"";
}
return nullptr;
}
int main(int argc, char *argv[]) {
google::SetVersionString(nebula::versionString());
folly::init(&argc, &argv, true);
if (FLAGS_daemonize) {
google::SetStderrLogging(google::FATAL);
} else {
google::SetStderrLogging(google::INFO);
}
// Detect if the server has already been started
auto pidPath = FLAGS_pid_file;
auto status = ProcessUtils::isPidAvailable(pidPath);
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
if (FLAGS_daemonize) {
status = ProcessUtils::daemonize(pidPath);
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
} else {
// Write the current pid into the pid file
status = ProcessUtils::makePidFile(pidPath);
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
}
if (FLAGS_data_path.empty()) {
LOG(ERROR) << "Storage Data Path should not empty";
return EXIT_FAILURE;
}
auto result = nebula::network::NetworkUtils::getLocalIP(FLAGS_local_ip);
if (!result.ok()) {
LOG(ERROR) << "Get localIp failed, ip " << FLAGS_local_ip
<< ", status:" << result.status();
return EXIT_FAILURE;
}
auto hostRet = nebula::network::NetworkUtils::toHostAddr(result.value(), FLAGS_port);
if (!hostRet.ok()) {
LOG(ERROR) << "Bad local host addr, status:" << hostRet.status();
return EXIT_FAILURE;
}
auto& localhost = hostRet.value();
auto metaAddrsRet = nebula::network::NetworkUtils::toHosts(FLAGS_meta_server_addrs);
if (!metaAddrsRet.ok() || metaAddrsRet.value().empty()) {
LOG(ERROR) << "Can't get metaServer address, status:" << metaAddrsRet.status()
<< ", FLAGS_meta_server_addrs:" << FLAGS_meta_server_addrs;
return EXIT_FAILURE;
}
std::vector<std::string> paths;
folly::split(",", FLAGS_data_path, paths, true);
std::transform(paths.begin(), paths.end(), paths.begin(), [](auto& p) {
return folly::trimWhitespace(p).str();
});
if (paths.empty()) {
LOG(ERROR) << "Bad data_path format:" << FLAGS_data_path;
return EXIT_FAILURE;
}
auto ioThreadPool = std::make_shared<folly::IOThreadPoolExecutor>(FLAGS_num_io_threads);
// Meta client
auto metaClient = std::make_unique<nebula::meta::MetaClient>(ioThreadPool,
std::move(metaAddrsRet.value()),
localhost,
true);
if (!metaClient->waitForMetadReady()) {
LOG(ERROR) << "waitForMetadReady error!";
return EXIT_FAILURE;
}
auto gflagsManager = std::make_unique<nebula::meta::ClientBasedGflagsManager>(metaClient.get());
gflagsManager->init();
LOG(INFO) << "Init schema manager";
auto schemaMan = nebula::meta::SchemaManager::create();
schemaMan->init(metaClient.get());
LOG(INFO) << "Init kvstore";
std::unique_ptr<KVStore> kvstore = getStoreInstance(localhost,
std::move(paths),
ioThreadPool,
metaClient.get(),
schemaMan.get());
if (nullptr == kvstore) {
return EXIT_FAILURE;
}
std::unique_ptr<nebula::hdfs::HdfsHelper> helper =
std::make_unique<nebula::hdfs::HdfsCommandHelper>();
auto* helperPtr = helper.get();
LOG(INFO) << "Starting Storage HTTP Service";
nebula::WebService::registerHandler("/status", [] {
return new nebula::storage::StorageHttpStatusHandler();
});
nebula::WebService::registerHandler("/download", [helperPtr] {
auto* handler = new nebula::storage::StorageHttpDownloadHandler();
handler->init(helperPtr);
return handler;
});
nebula::WebService::registerHandler("/admin", [&] {
return new nebula::storage::StorageHttpAdminHandler(schemaMan.get(), kvstore.get());
});
status = nebula::WebService::start();
if (!status.ok()) {
return EXIT_FAILURE;
}
// Setup the signal handlers
status = setupSignalHandler();
if (!status.ok()) {
LOG(ERROR) << status;
nebula::WebService::stop();
return EXIT_FAILURE;
}
auto handler = std::make_shared<StorageServiceHandler>(kvstore.get(), schemaMan.get());
try {
LOG(INFO) << "The storage deamon start on " << localhost;
gServer = std::make_unique<apache::thrift::ThriftServer>();
gServer->setInterface(std::move(handler));
gServer->setPort(FLAGS_port);
gServer->setReusePort(FLAGS_reuse_port);
gServer->setIdleTimeout(std::chrono::seconds(0)); // No idle timeout on client connection
gServer->setIOThreadPool(ioThreadPool);
gServer->setNumCPUWorkerThreads(FLAGS_num_worker_threads);
gServer->setCPUWorkerThreadName("executor");
gServer->serve(); // Will wait until the server shuts down
} catch (const std::exception& e) {
nebula::WebService::stop();
LOG(ERROR) << "Start thrift server failed, error:" << e.what();
return EXIT_FAILURE;
}
nebula::WebService::stop();
LOG(INFO) << "The storage Daemon stopped";
return EXIT_SUCCESS;
}
Status setupSignalHandler() {
return nebula::SignalHandler::install(
{SIGINT, SIGTERM},
[](nebula::SignalHandler::GeneralSignalInfo *info) {
signalHandler(info->sig());
});
}
void signalHandler(int sig) {
switch (sig) {
case SIGINT:
case SIGTERM:
FLOG_INFO("Signal %d(%s) received, stopping this server", sig, ::strsignal(sig));
gServer->stop();
break;
default:
FLOG_ERROR("Signal %d(%s) received but ignored", sig, ::strsignal(sig));
}
}
| 1 | 20,422 | What's the meaning about the code? | vesoft-inc-nebula | cpp |
@@ -172,7 +172,7 @@ public class WebViewLocalServer {
return null;
}
- if (isLocalFile(loadingUrl) || (bridge.getConfig().getString("server.url") == null && !bridge.getAppAllowNavigationMask().matches(loadingUrl.getHost()))) {
+ if (isLocalFile(loadingUrl) || loadingUrl.getHost().contains(bridge.getHost()) || (bridge.getServerUrl() == null && !bridge.getAppAllowNavigationMask().matches(loadingUrl.getHost()))) {
Logger.debug("Handling local request: " + request.getUrl().toString());
return handleLocalRequest(request, handler);
} else { | 1 | /*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.getcapacitor;
import android.content.Context;
import android.net.Uri;
import android.webkit.CookieManager;
import android.webkit.WebResourceRequest;
import android.webkit.WebResourceResponse;
import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.SocketTimeoutException;
import java.net.URL;
import java.net.URLConnection;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
/**
* Helper class meant to be used with the android.webkit.WebView class to enable hosting assets,
* resources and other data on 'virtual' https:// URL.
* Hosting assets and resources on https:// URLs is desirable as it is compatible with the
* Same-Origin policy.
* <p>
* This class is intended to be used from within the
* {@link android.webkit.WebViewClient#shouldInterceptRequest(android.webkit.WebView, String)} and
* {@link android.webkit.WebViewClient#shouldInterceptRequest(android.webkit.WebView,
* android.webkit.WebResourceRequest)}
* methods.
*/
public class WebViewLocalServer {
private final static String capacitorFileStart = Bridge.CAPACITOR_FILE_START;
private final static String capacitorContentStart = Bridge.CAPACITOR_CONTENT_START;
private String basePath;
private final UriMatcher uriMatcher;
private final AndroidProtocolHandler protocolHandler;
private final ArrayList<String> authorities;
private boolean isAsset;
// Whether to route all requests to paths without extensions back to `index.html`
private final boolean html5mode;
private final JSInjector jsInjector;
private final Bridge bridge;
/**
* A handler that produces responses for paths on the virtual asset server.
* <p>
* Methods of this handler will be invoked on a background thread and care must be taken to
* correctly synchronize access to any shared state.
* <p>
* On Android KitKat and above these methods may be called on more than one thread. This thread
* may be different than the thread on which the shouldInterceptRequest method was invoke.
* This means that on Android KitKat and above it is possible to block in this method without
* blocking other resources from loading. The number of threads used to parallelize loading
* is an internal implementation detail of the WebView and may change between updates which
* means that the amount of time spend blocking in this method should be kept to an absolute
* minimum.
*/
public abstract static class PathHandler {
protected String mimeType;
private String encoding;
private String charset;
private int statusCode;
private String reasonPhrase;
private Map<String, String> responseHeaders;
public PathHandler() {
this(null, null, 200, "OK", null);
}
public PathHandler(String encoding, String charset, int statusCode,
String reasonPhrase, Map<String, String> responseHeaders) {
this.encoding = encoding;
this.charset = charset;
this.statusCode = statusCode;
this.reasonPhrase = reasonPhrase;
Map<String, String> tempResponseHeaders;
if (responseHeaders == null) {
tempResponseHeaders = new HashMap<>();
} else {
tempResponseHeaders = responseHeaders;
}
tempResponseHeaders.put("Cache-Control", "no-cache");
this.responseHeaders = tempResponseHeaders;
}
public InputStream handle(WebResourceRequest request) {
return handle(request.getUrl());
}
abstract public InputStream handle(Uri url);
public String getEncoding() {
return encoding;
}
public String getCharset() {
return charset;
}
public int getStatusCode() {
return statusCode;
}
public String getReasonPhrase() {
return reasonPhrase;
}
public Map<String, String> getResponseHeaders() {
return responseHeaders;
}
}
WebViewLocalServer(Context context, Bridge bridge, JSInjector jsInjector, ArrayList<String> authorities, boolean html5mode) {
uriMatcher = new UriMatcher(null);
this.html5mode = html5mode;
this.protocolHandler = new AndroidProtocolHandler(context.getApplicationContext());
this.authorities = authorities;
this.bridge = bridge;
this.jsInjector = jsInjector;
}
private static Uri parseAndVerifyUrl(String url) {
if (url == null) {
return null;
}
Uri uri = Uri.parse(url);
if (uri == null) {
Logger.error("Malformed URL: " + url);
return null;
}
String path = uri.getPath();
if (path == null || path.length() == 0) {
Logger.error("URL does not have a path: " + url);
return null;
}
return uri;
}
/**
* Attempt to retrieve the WebResourceResponse associated with the given <code>request</code>.
* This method should be invoked from within
* {@link android.webkit.WebViewClient#shouldInterceptRequest(android.webkit.WebView,
* android.webkit.WebResourceRequest)}.
*
* @param request the request to process.
* @return a response if the request URL had a matching handler, null if no handler was found.
*/
public WebResourceResponse shouldInterceptRequest(WebResourceRequest request) {
Uri loadingUrl = request.getUrl();
PathHandler handler;
synchronized (uriMatcher) {
handler = (PathHandler) uriMatcher.match(request.getUrl());
}
if (handler == null) {
return null;
}
if (isLocalFile(loadingUrl) || (bridge.getConfig().getString("server.url") == null && !bridge.getAppAllowNavigationMask().matches(loadingUrl.getHost()))) {
Logger.debug("Handling local request: " + request.getUrl().toString());
return handleLocalRequest(request, handler);
} else {
return handleProxyRequest(request, handler);
}
}
private boolean isLocalFile(Uri uri) {
String path = uri.getPath();
if (path.startsWith(capacitorContentStart) || path.startsWith(capacitorFileStart)) {
return true;
}
return false;
}
private WebResourceResponse handleLocalRequest(WebResourceRequest request, PathHandler handler) {
String path = request.getUrl().getPath();
if (request.getRequestHeaders().get("Range") != null) {
InputStream responseStream = new LollipopLazyInputStream(handler, request);
String mimeType = getMimeType(path, responseStream);
Map<String, String> tempResponseHeaders = handler.getResponseHeaders();
int statusCode = 206;
try {
int totalRange = responseStream.available();
String rangeString = request.getRequestHeaders().get("Range");
String[] parts = rangeString.split("=");
String[] streamParts = parts[1].split("-");
String fromRange = streamParts[0];
int range = totalRange-1;
if (streamParts.length > 1) {
range = Integer.parseInt(streamParts[1]);
}
tempResponseHeaders.put("Accept-Ranges", "bytes");
tempResponseHeaders.put("Content-Range", "bytes " + fromRange + "-" + range + "/" + totalRange);
} catch (IOException e) {
statusCode = 404;
}
return new WebResourceResponse(mimeType, handler.getEncoding(),
statusCode, handler.getReasonPhrase(), tempResponseHeaders, responseStream);
}
if (isLocalFile(request.getUrl())) {
InputStream responseStream = new LollipopLazyInputStream(handler, request);
String mimeType = getMimeType(request.getUrl().getPath(), responseStream);
int statusCode = getStatusCode(responseStream, handler.getStatusCode());
return new WebResourceResponse(mimeType, handler.getEncoding(),
statusCode, handler.getReasonPhrase(), handler.getResponseHeaders(), responseStream);
}
if (path.equals("/cordova.js")) {
return new WebResourceResponse("application/javascript", handler.getEncoding(),
handler.getStatusCode(), handler.getReasonPhrase(), handler.getResponseHeaders(), null);
}
if (path.equals("/") || (!request.getUrl().getLastPathSegment().contains(".") && html5mode)) {
InputStream responseStream;
try {
String startPath = this.basePath + "/index.html";
if (isAsset) {
responseStream = protocolHandler.openAsset(startPath);
} else {
responseStream = protocolHandler.openFile(startPath);
}
} catch (IOException e) {
Logger.error("Unable to open index.html", e);
return null;
}
responseStream = jsInjector.getInjectedStream(responseStream);
bridge.reset();
int statusCode = getStatusCode(responseStream, handler.getStatusCode());
return new WebResourceResponse("text/html", handler.getEncoding(),
statusCode, handler.getReasonPhrase(), handler.getResponseHeaders(), responseStream);
}
if ("/favicon.ico".equalsIgnoreCase(path)) {
try {
return new WebResourceResponse("image/png", null, null);
} catch (Exception e) {
Logger.error("favicon handling failed", e);
}
}
int periodIndex = path.lastIndexOf(".");
if (periodIndex >= 0) {
String ext = path.substring(path.lastIndexOf("."), path.length());
InputStream responseStream = new LollipopLazyInputStream(handler, request);
// TODO: Conjure up a bit more subtlety than this
if (ext.equals(".html")) {
responseStream = jsInjector.getInjectedStream(responseStream);
bridge.reset();
}
String mimeType = getMimeType(path, responseStream);
int statusCode = getStatusCode(responseStream, handler.getStatusCode());
return new WebResourceResponse(mimeType, handler.getEncoding(),
statusCode, handler.getReasonPhrase(), handler.getResponseHeaders(), responseStream);
}
return null;
}
/**
* Instead of reading files from the filesystem/assets, proxy through to the URL
* and let an external server handle it.
* @param request
* @param handler
* @return
*/
private WebResourceResponse handleProxyRequest(WebResourceRequest request, PathHandler handler) {
final String method = request.getMethod();
if (method.equals("GET")) {
try {
String url = request.getUrl().toString();
Map<String, String> headers = request.getRequestHeaders();
boolean isHtmlText = false;
for (Map.Entry<String, String> header : headers.entrySet()) {
if (header.getKey().equalsIgnoreCase("Accept") && header.getValue().toLowerCase().contains("text/html")) {
isHtmlText = true;
break;
}
}
if (isHtmlText) {
HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection();
for (Map.Entry<String, String> header : headers.entrySet()) {
conn.setRequestProperty(header.getKey(), header.getValue());
}
String getCookie = CookieManager.getInstance().getCookie(url);
if (getCookie != null) {
conn.setRequestProperty("Cookie", getCookie);
}
conn.setRequestMethod(method);
conn.setReadTimeout(30 * 1000);
conn.setConnectTimeout(30 * 1000);
String cookie = conn.getHeaderField("Set-Cookie");
if (cookie != null) {
CookieManager.getInstance().setCookie(url, cookie);
}
InputStream responseStream = conn.getInputStream();
responseStream = jsInjector.getInjectedStream(responseStream);
bridge.reset();
return new WebResourceResponse("text/html", handler.getEncoding(),
handler.getStatusCode(), handler.getReasonPhrase(), handler.getResponseHeaders(), responseStream);
}
} catch (SocketTimeoutException ex) {
bridge.handleAppUrlLoadError(ex);
} catch (Exception ex) {
bridge.handleAppUrlLoadError(ex);
}
}
return null;
}
private String getMimeType(String path, InputStream stream) {
String mimeType = null;
try {
mimeType = URLConnection.guessContentTypeFromName(path); // Does not recognize *.js
if (mimeType != null && path.endsWith(".js") && mimeType.equals("image/x-icon")) {
Logger.debug("We shouldn't be here");
}
if (mimeType == null) {
if (path.endsWith(".js") || path.endsWith(".mjs")) {
// Make sure JS files get the proper mimetype to support ES modules
mimeType = "application/javascript";
} else if (path.endsWith(".wasm")) {
mimeType = "application/wasm";
} else {
mimeType = URLConnection.guessContentTypeFromStream(stream);
}
}
} catch (Exception ex) {
Logger.error("Unable to get mime type" + path, ex);
}
return mimeType;
}
private int getStatusCode(InputStream stream, int defaultCode) {
int finalStatusCode = defaultCode;
try {
if (stream.available() == -1) {
finalStatusCode = 404;
}
} catch (IOException e) {
finalStatusCode = 500;
}
return finalStatusCode;
}
/**
* Registers a handler for the given <code>uri</code>. The <code>handler</code> will be invoked
* every time the <code>shouldInterceptRequest</code> method of the instance is called with
* a matching <code>uri</code>.
*
* @param uri the uri to use the handler for. The scheme and authority (domain) will be matched
* exactly. The path may contain a '*' element which will match a single element of
* a path (so a handler registered for /a/* will be invoked for /a/b and /a/c.html
* but not for /a/b/b) or the '**' element which will match any number of path
* elements.
* @param handler the handler to use for the uri.
*/
void register(Uri uri, PathHandler handler) {
synchronized (uriMatcher) {
uriMatcher.addURI(uri.getScheme(), uri.getAuthority(), uri.getPath(), handler);
}
}
/**
* Hosts the application's assets on an https:// URL. Assets from the local path
* <code>assetPath/...</code> will be available under
* <code>https://{uuid}.androidplatform.net/assets/...</code>.
*
* @param assetPath the local path in the application's asset folder which will be made
* available by the server (for example "/www").
* @return prefixes under which the assets are hosted.
*/
public void hostAssets(String assetPath) {
this.isAsset = true;
this.basePath = assetPath;
createHostingDetails();
}
/**
* Hosts the application's files on an https:// URL. Files from the basePath
* <code>basePath/...</code> will be available under
* <code>https://{uuid}.androidplatform.net/...</code>.
*
* @param basePath the local path in the application's data folder which will be made
* available by the server (for example "/www").
* @return prefixes under which the assets are hosted.
*/
public void hostFiles(final String basePath) {
this.isAsset = false;
this.basePath = basePath;
createHostingDetails();
}
private void createHostingDetails() {
final String assetPath = this.basePath;
if (assetPath.indexOf('*') != -1) {
throw new IllegalArgumentException("assetPath cannot contain the '*' character.");
}
PathHandler handler = new PathHandler() {
@Override
public InputStream handle(Uri url) {
InputStream stream = null;
String path = url.getPath();
try {
if (path.startsWith(capacitorContentStart)) {
stream = protocolHandler.openContentUrl(url);
} else if (path.startsWith(capacitorFileStart) || !isAsset) {
if (!path.startsWith(capacitorFileStart)) {
path = basePath + url.getPath();
}
stream = protocolHandler.openFile(path);
} else {
stream = protocolHandler.openAsset(assetPath + path);
}
} catch (IOException e) {
Logger.error("Unable to open asset URL: " + url);
return null;
}
return stream;
}
};
for (String authority: authorities) {
registerUriForScheme(Bridge.CAPACITOR_HTTP_SCHEME, handler, authority);
registerUriForScheme(Bridge.CAPACITOR_HTTPS_SCHEME, handler, authority);
String customScheme = this.bridge.getScheme();
if (!customScheme.equals(Bridge.CAPACITOR_HTTP_SCHEME) && !customScheme.equals(Bridge.CAPACITOR_HTTPS_SCHEME)) {
registerUriForScheme(customScheme, handler, authority);
}
}
}
private void registerUriForScheme(String scheme, PathHandler handler, String authority) {
Uri.Builder uriBuilder = new Uri.Builder();
uriBuilder.scheme(scheme);
uriBuilder.authority(authority);
uriBuilder.path("");
Uri uriPrefix = uriBuilder.build();
register(Uri.withAppendedPath(uriPrefix, "/"), handler);
register(Uri.withAppendedPath(uriPrefix, "**"), handler);
}
/**
* The KitKat WebView reads the InputStream on a separate threadpool. We can use that to
* parallelize loading.
*/
private static abstract class LazyInputStream extends InputStream {
protected final PathHandler handler;
private InputStream is = null;
public LazyInputStream(PathHandler handler) {
this.handler = handler;
}
private InputStream getInputStream() {
if (is == null) {
is = handle();
}
return is;
}
protected abstract InputStream handle();
@Override
public int available() throws IOException {
InputStream is = getInputStream();
return (is != null) ? is.available() : -1;
}
@Override
public int read() throws IOException {
InputStream is = getInputStream();
return (is != null) ? is.read() : -1;
}
@Override
public int read(byte b[]) throws IOException {
InputStream is = getInputStream();
return (is != null) ? is.read(b) : -1;
}
@Override
public int read(byte b[], int off, int len) throws IOException {
InputStream is = getInputStream();
return (is != null) ? is.read(b, off, len) : -1;
}
@Override
public long skip(long n) throws IOException {
InputStream is = getInputStream();
return (is != null) ? is.skip(n) : 0;
}
}
// For L and above.
private static class LollipopLazyInputStream extends LazyInputStream {
private WebResourceRequest request;
private InputStream is;
public LollipopLazyInputStream(PathHandler handler, WebResourceRequest request) {
super(handler);
this.request = request;
}
@Override
protected InputStream handle() {
return handler.handle(request);
}
}
public String getBasePath(){
return this.basePath;
}
}
| 1 | 9,244 | Would this incorrectly trigger if `loadingUrl.getHost() = "something.app"` and `bridge.getHost() = "app"`? | ionic-team-capacitor | js |
@@ -23,7 +23,11 @@ class AnchorHead(BaseDenseHead, BBoxTestMixin):
anchor_generator (dict): Config dict for anchor generator
bbox_coder (dict): Config of bounding box coder.
reg_decoded_bbox (bool): If true, the regression loss would be
- applied on decoded bounding boxes. Default: False
+ applied directly on decoded bounding boxes, meaning both the
+ predicted boxes and regression targets are with absolute
+ coordinates format. Default False. Note generally it should be
+ set to `True` when using `IoULoss`, `GIoULoss`, or `DIoULoss`
+ in the bbox head.
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
train_cfg (dict): Training config of anchor head. | 1 | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmcv.runner import force_fp32
from mmdet.core import (anchor_inside_flags, build_anchor_generator,
build_assigner, build_bbox_coder, build_sampler,
images_to_levels, multi_apply, multiclass_nms, unmap)
from ..builder import HEADS, build_loss
from .base_dense_head import BaseDenseHead
from .dense_test_mixins import BBoxTestMixin
@HEADS.register_module()
class AnchorHead(BaseDenseHead, BBoxTestMixin):
"""Anchor-based head (RPN, RetinaNet, SSD, etc.).
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of hidden channels. Used in child classes.
anchor_generator (dict): Config dict for anchor generator
bbox_coder (dict): Config of bounding box coder.
reg_decoded_bbox (bool): If true, the regression loss would be
applied on decoded bounding boxes. Default: False
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
train_cfg (dict): Training config of anchor head.
test_cfg (dict): Testing config of anchor head.
""" # noqa: W605
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
clip_border=True,
target_means=(.0, .0, .0, .0),
target_stds=(1.0, 1.0, 1.0, 1.0)),
reg_decoded_bbox=False,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
train_cfg=None,
test_cfg=None):
super(AnchorHead, self).__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
# TODO better way to determine whether sample or not
self.sampling = loss_cls['type'] not in [
'FocalLoss', 'GHMC', 'QualityFocalLoss'
]
if self.use_sigmoid_cls:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = num_classes + 1
if self.cls_out_channels <= 0:
raise ValueError(f'num_classes={num_classes} is too small')
self.reg_decoded_bbox = reg_decoded_bbox
self.bbox_coder = build_bbox_coder(bbox_coder)
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
# use PseudoSampler when sampling is False
if self.sampling and hasattr(self.train_cfg, 'sampler'):
sampler_cfg = self.train_cfg.sampler
else:
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.fp16_enabled = False
self.anchor_generator = build_anchor_generator(anchor_generator)
# usually the numbers of anchors for each level are the same
# except SSD detectors
self.num_anchors = self.anchor_generator.num_base_anchors[0]
self._init_layers()
def _init_layers(self):
"""Initialize layers of the head."""
self.conv_cls = nn.Conv2d(self.in_channels,
self.num_anchors * self.cls_out_channels, 1)
self.conv_reg = nn.Conv2d(self.in_channels, self.num_anchors * 4, 1)
def init_weights(self):
"""Initialize weights of the head."""
normal_init(self.conv_cls, std=0.01)
normal_init(self.conv_reg, std=0.01)
def forward_single(self, x):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level \
the channels number is num_anchors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale \
level, the channels number is num_anchors * 4.
"""
cls_score = self.conv_cls(x)
bbox_pred = self.conv_reg(x)
return cls_score, bbox_pred
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: A tuple of classification scores and bbox prediction.
- cls_scores (list[Tensor]): Classification scores for all \
scale levels, each is a 4D-tensor, the channels number \
is num_anchors * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for all \
scale levels, each is a 4D-tensor, the channels number \
is num_anchors * 4.
"""
return multi_apply(self.forward_single, feats)
def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
"""Get anchors according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
img_metas (list[dict]): Image meta info.
device (torch.device | str): Device for returned tensors
Returns:
tuple:
anchor_list (list[Tensor]): Anchors of each image.
valid_flag_list (list[Tensor]): Valid flags of each image.
"""
num_imgs = len(img_metas)
# since feature map sizes of all images are the same, we only compute
# anchors for one time
multi_level_anchors = self.anchor_generator.grid_anchors(
featmap_sizes, device)
anchor_list = [multi_level_anchors for _ in range(num_imgs)]
# for each image, we compute valid flags of multi level anchors
valid_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = self.anchor_generator.valid_flags(
featmap_sizes, img_meta['pad_shape'], device)
valid_flag_list.append(multi_level_flags)
return anchor_list, valid_flag_list
def _get_targets_single(self,
flat_anchors,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
label_channels=1,
unmap_outputs=True):
"""Compute regression and classification targets for anchors in a
single image.
Args:
flat_anchors (Tensor): Multi-level anchors of the image, which are
concatenated into a single tensor of shape (num_anchors ,4)
valid_flags (Tensor): Multi level valid flags of the image,
which are concatenated into a single tensor of
shape (num_anchors,).
gt_bboxes (Tensor): Ground truth bboxes of the image,
shape (num_gts, 4).
img_meta (dict): Meta info of the image.
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
img_meta (dict): Meta info of the image.
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple:
labels_list (list[Tensor]): Labels of each level
label_weights_list (list[Tensor]): Label weights of each level
bbox_targets_list (list[Tensor]): BBox targets of each level
bbox_weights_list (list[Tensor]): BBox weights of each level
num_total_pos (int): Number of positive samples in all images
num_total_neg (int): Number of negative samples in all images
"""
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
self.train_cfg.allowed_border)
if not inside_flags.any():
return (None, ) * 7
# assign gt and sample anchors
anchors = flat_anchors[inside_flags, :]
assign_result = self.assigner.assign(
anchors, gt_bboxes, gt_bboxes_ignore,
None if self.sampling else gt_labels)
sampling_result = self.sampler.sample(assign_result, anchors,
gt_bboxes)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
labels = anchors.new_full((num_valid_anchors, ),
self.num_classes,
dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
if not self.reg_decoded_bbox:
pos_bbox_targets = self.bbox_coder.encode(
sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
else:
pos_bbox_targets = sampling_result.pos_gt_bboxes
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
if gt_labels is None:
# Only rpn gives gt_labels as None
# Foreground is the first class since v2.5.0
labels[pos_inds] = 0
else:
labels[pos_inds] = gt_labels[
sampling_result.pos_assigned_gt_inds]
if self.train_cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
labels = unmap(
labels, num_total_anchors, inside_flags,
fill=self.num_classes) # fill bg label
label_weights = unmap(label_weights, num_total_anchors,
inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
neg_inds, sampling_result)
def get_targets(self,
anchor_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
unmap_outputs=True,
return_sampling_results=False):
"""Compute regression and classification targets for anchors in
multiple images.
Args:
anchor_list (list[list[Tensor]]): Multi level anchors of each
image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, 4).
valid_flag_list (list[list[Tensor]]): Multi level valid flags of
each image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, )
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
ignored.
gt_labels_list (list[Tensor]): Ground truth labels of each box.
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple: Usually returns a tuple containing learning targets.
- labels_list (list[Tensor]): Labels of each level.
- label_weights_list (list[Tensor]): Label weights of each \
level.
- bbox_targets_list (list[Tensor]): BBox targets of each level.
- bbox_weights_list (list[Tensor]): BBox weights of each level.
- num_total_pos (int): Number of positive samples in all \
images.
- num_total_neg (int): Number of negative samples in all \
images.
additional_returns: This function enables user-defined returns from
`self._get_targets_single`. These returns are currently refined
to properties at each feature map (i.e. having HxW dimension).
The results will be concatenated after the end
"""
num_imgs = len(img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors to a single tensor
concat_anchor_list = []
concat_valid_flag_list = []
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
concat_anchor_list.append(torch.cat(anchor_list[i]))
concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
results = multi_apply(
self._get_targets_single,
concat_anchor_list,
concat_valid_flag_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
label_channels=label_channels,
unmap_outputs=unmap_outputs)
(all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
pos_inds_list, neg_inds_list, sampling_results_list) = results[:7]
rest_results = list(results[7:]) # user-added return values
# no valid anchors
if any([labels is None for labels in all_labels]):
return None
# sampled anchors of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
# split targets to a list w.r.t. multiple levels
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights,
num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets,
num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights,
num_level_anchors)
res = (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, num_total_pos, num_total_neg)
if return_sampling_results:
res = res + (sampling_results_list, )
for i, r in enumerate(rest_results): # user-added return values
rest_results[i] = images_to_levels(r, num_level_anchors)
return res + tuple(rest_results)
def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights,
bbox_targets, bbox_weights, num_total_samples):
"""Compute loss of a single scale level.
Args:
cls_score (Tensor): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W).
bbox_pred (Tensor): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
anchors (Tensor): Box reference for each scale level with shape
(N, num_total_anchors, 4).
labels (Tensor): Labels of each anchors with shape
(N, num_total_anchors).
label_weights (Tensor): Label weights of each anchor with shape
(N, num_total_anchors)
bbox_targets (Tensor): BBox regression targets of each anchor wight
shape (N, num_total_anchors, 4).
bbox_weights (Tensor): BBox regression loss weights of each anchor
with shape (N, num_total_anchors, 4).
num_total_samples (int): If sampling, num total samples equal to
the number of total anchors; Otherwise, it is the number of
positive anchors.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=num_total_samples)
# regression loss
bbox_targets = bbox_targets.reshape(-1, 4)
bbox_weights = bbox_weights.reshape(-1, 4)
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
if self.reg_decoded_bbox:
anchors = anchors.reshape(-1, 4)
bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
loss_bbox = self.loss_bbox(
bbox_pred,
bbox_targets,
bbox_weights,
avg_factor=num_total_samples)
return loss_cls, loss_bbox
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss. Default: None
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = (
num_total_pos + num_total_neg if self.sampling else num_total_pos)
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
concat_anchor_list = []
for i in range(len(anchor_list)):
concat_anchor_list.append(torch.cat(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list,
num_level_anchors)
losses_cls, losses_bbox = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
all_anchor_list,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
num_total_samples=num_total_samples)
return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
img_metas,
cfg=None,
rescale=False,
with_nms=True):
"""Transform network output for a batch into bbox predictions.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
cfg (mmcv.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
The first item is an (n, 5) tensor, where the first 4 columns
are bounding box positions (tl_x, tl_y, br_x, br_y) and the
5-th column is a score between 0 and 1. The second item is a
(n,) tensor where each item is the predicted class labelof the
corresponding box.
Example:
>>> import mmcv
>>> self = AnchorHead(
>>> num_classes=9,
>>> in_channels=1,
>>> anchor_generator=dict(
>>> type='AnchorGenerator',
>>> scales=[8],
>>> ratios=[0.5, 1.0, 2.0],
>>> strides=[4,]))
>>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}]
>>> cfg = mmcv.Config(dict(
>>> score_thr=0.00,
>>> nms=dict(type='nms', iou_thr=1.0),
>>> max_per_img=10))
>>> feat = torch.rand(1, 1, 3, 3)
>>> cls_score, bbox_pred = self.forward_single(feat)
>>> # note the input lists are over different levels, not images
>>> cls_scores, bbox_preds = [cls_score], [bbox_pred]
>>> result_list = self.get_bboxes(cls_scores, bbox_preds,
>>> img_metas, cfg)
>>> det_bboxes, det_labels = result_list[0]
>>> assert len(result_list) == 1
>>> assert det_bboxes.shape[1] == 5
>>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img
"""
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
device = cls_scores[0].device
featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
mlvl_anchors = self.anchor_generator.grid_anchors(
featmap_sizes, device=device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
if with_nms:
# some heads don't support with_nms argument
proposals = self._get_bboxes_single(cls_score_list,
bbox_pred_list,
mlvl_anchors, img_shape,
scale_factor, cfg, rescale)
else:
proposals = self._get_bboxes_single(cls_score_list,
bbox_pred_list,
mlvl_anchors, img_shape,
scale_factor, cfg, rescale,
with_nms)
result_list.append(proposals)
return result_list
def _get_bboxes_single(self,
cls_score_list,
bbox_pred_list,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False,
with_nms=True):
"""Transform outputs for a single batch item into bbox predictions.
Args:
cls_score_list (list[Tensor]): Box scores for a single scale level
Has shape (num_anchors * num_classes, H, W).
bbox_pred_list (list[Tensor]): Box energies / deltas for a single
scale level with shape (num_anchors * 4, H, W).
mlvl_anchors (list[Tensor]): Box reference for a single scale level
with shape (num_total_anchors, 4).
img_shape (tuple[int]): Shape of the input image,
(height, width, 3).
scale_factor (ndarray): Scale factor of the image arange as
(w_scale, h_scale, w_scale, h_scale).
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
Tensor: Labeled boxes in shape (n, 5), where the first 4 columns
are bounding box positions (tl_x, tl_y, br_x, br_y) and the
5-th column is a score between 0 and 1.
"""
cfg = self.test_cfg if cfg is None else cfg
assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, anchors in zip(cls_score_list,
bbox_pred_list, mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
# Get maximum scores for foreground classes.
if self.use_sigmoid_cls:
max_scores, _ = scores.max(dim=1)
else:
# remind that we set FG labels to [0, num_class-1]
# since mmdet v2.0
# BG cat_id: num_class
max_scores, _ = scores[:, :-1].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = self.bbox_coder.decode(
anchors, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
if self.use_sigmoid_cls:
# Add a dummy background class to the backend when using sigmoid
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
# BG cat_id: num_class
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
if with_nms:
det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
else:
return mlvl_bboxes, mlvl_scores
def aug_test(self, feats, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
feats (list[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains features for all images in the batch.
img_metas (list[list[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. each dict has image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[ndarray]: bbox results of each class
"""
return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
| 1 | 22,289 | Note generally it -> It | open-mmlab-mmdetection | py |
@@ -81,8 +81,7 @@ public abstract class ConfigSetService {
) ? false: true;
SolrConfig solrConfig = createSolrConfig(dcore, coreLoader, trusted);
- IndexSchema indexSchema = createIndexSchema(dcore, solrConfig, false);
- return new ConfigSet(configSetName(dcore), solrConfig, force -> indexSchema, properties, trusted);
+ return new ConfigSet(configSetName(dcore), solrConfig, force -> createIndexSchema(dcore, solrConfig, force), properties, trusted);
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Could not load conf for core " + dcore.getName() + | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.core;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import com.github.benmanes.caffeine.cache.Cache;
import com.github.benmanes.caffeine.cache.Caffeine;
import org.apache.solr.cloud.CloudConfigSetService;
import org.apache.solr.cloud.ZkController;
import org.apache.solr.cloud.ZkSolrResourceLoader;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.IndexSchemaFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Service class used by the CoreContainer to load ConfigSets for use in SolrCore
* creation.
*/
public abstract class ConfigSetService {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static ConfigSetService createConfigSetService(NodeConfig nodeConfig, SolrResourceLoader loader, ZkController zkController) {
if (zkController == null) {
return new Standalone(loader, nodeConfig.hasSchemaCache(), nodeConfig.getConfigSetBaseDirectory());
} else {
return new CloudConfigSetService(loader, nodeConfig.hasSchemaCache(), zkController);
}
}
protected final SolrResourceLoader parentLoader;
/** Optional cache of schemas, key'ed by a bunch of concatenated things */
private final Cache<String, IndexSchema> schemaCache;
/**
* Load the ConfigSet for a core
* @param dcore the core's CoreDescriptor
* @return a ConfigSet
*/
@SuppressWarnings({"rawtypes"})
public final ConfigSet loadConfigSet(CoreDescriptor dcore) {
SolrResourceLoader coreLoader = createCoreResourceLoader(dcore);
try {
// ConfigSet properties are loaded from ConfigSetProperties.DEFAULT_FILENAME file.
NamedList properties = loadConfigSetProperties(dcore, coreLoader);
// ConfigSet flags are loaded from the metadata of the ZK node of the configset.
NamedList flags = loadConfigSetFlags(dcore, coreLoader);
boolean trusted =
(coreLoader instanceof ZkSolrResourceLoader
&& flags != null
&& flags.get("trusted") != null
&& !flags.getBooleanArg("trusted")
) ? false: true;
SolrConfig solrConfig = createSolrConfig(dcore, coreLoader, trusted);
IndexSchema indexSchema = createIndexSchema(dcore, solrConfig, false);
return new ConfigSet(configSetName(dcore), solrConfig, force -> indexSchema, properties, trusted);
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Could not load conf for core " + dcore.getName() +
": " + e.getMessage(), e);
}
}
/**
* Create a new ConfigSetService
* @param loader the CoreContainer's resource loader
* @param shareSchema should we share the IndexSchema among cores of same config?
*/
public ConfigSetService(SolrResourceLoader loader, boolean shareSchema) {
this.parentLoader = loader;
this.schemaCache = shareSchema ? Caffeine.newBuilder().weakValues().build() : null;
}
/**
* Create a SolrConfig object for a core
* @param cd the core's CoreDescriptor
* @param loader the core's resource loader
* @param isTrusted is the configset trusted?
* @return a SolrConfig object
*/
protected SolrConfig createSolrConfig(CoreDescriptor cd, SolrResourceLoader loader, boolean isTrusted) {
return SolrConfig.readFromResourceLoader(loader, cd.getConfigName(), isTrusted, cd.getSubstitutableProperties());
}
/**
* Create an IndexSchema object for a core. It might be a cached lookup.
* @param cd the core's CoreDescriptor
* @param solrConfig the core's SolrConfig
* @return an IndexSchema
*/
protected IndexSchema createIndexSchema(CoreDescriptor cd, SolrConfig solrConfig, boolean forceFetch) {
// This is the schema name from the core descriptor. Sometimes users specify a custom schema file.
// Important: indexSchemaFactory.create wants this!
String cdSchemaName = cd.getSchemaName();
// This is the schema name that we think will actually be used. In the case of a managed schema,
// we don't know for sure without examining what files exists in the configSet, and we don't
// want to pay the overhead of that at this juncture. If we guess wrong, no schema sharing.
// The fix is usually to name your schema managed-schema instead of schema.xml.
IndexSchemaFactory indexSchemaFactory = IndexSchemaFactory.newIndexSchemaFactory(solrConfig);
String configSet = cd.getConfigSet();
if (configSet != null && schemaCache != null) {
String guessSchemaName = indexSchemaFactory.getSchemaResourceName(cdSchemaName);
Long modVersion = getCurrentSchemaModificationVersion(configSet, solrConfig, guessSchemaName);
if (modVersion != null) {
// note: luceneMatchVersion influences the schema
String cacheKey = configSet + "/" + guessSchemaName + "/" + modVersion + "/" + solrConfig.luceneMatchVersion;
return schemaCache.get(cacheKey,
(key) -> indexSchemaFactory.create(cdSchemaName, solrConfig));
} else {
log.warn("Unable to get schema modification version, configSet={} schema={}", configSet, guessSchemaName);
// see explanation above; "guessSchema" is a guess
}
}
return indexSchemaFactory.create(cdSchemaName, solrConfig);
}
/**
* Returns a modification version for the schema file.
* Null may be returned if not known, and if so it defeats schema caching.
*/
protected abstract Long getCurrentSchemaModificationVersion(String configSet, SolrConfig solrConfig, String schemaFile);
/**
* Return the ConfigSet properties or null if none.
* @see ConfigSetProperties
* @param cd the core's CoreDescriptor
* @param loader the core's resource loader
* @return the ConfigSet properties
*/
@SuppressWarnings({"rawtypes"})
protected NamedList loadConfigSetProperties(CoreDescriptor cd, SolrResourceLoader loader) {
return ConfigSetProperties.readFromResourceLoader(loader, cd.getConfigSetPropertiesName());
}
/**
* Return the ConfigSet flags or null if none.
*/
// TODO should fold into configSetProps -- SOLR-14059
@SuppressWarnings({"rawtypes"})
protected NamedList loadConfigSetFlags(CoreDescriptor cd, SolrResourceLoader loader) {
return null;
}
/**
* Create a SolrResourceLoader for a core
* @param cd the core's CoreDescriptor
* @return a SolrResourceLoader
*/
protected abstract SolrResourceLoader createCoreResourceLoader(CoreDescriptor cd);
/**
* Return a name for the ConfigSet for a core to be used for printing/diagnostic purposes.
* @param cd the core's CoreDescriptor
* @return a name for the core's ConfigSet
*/
public abstract String configSetName(CoreDescriptor cd);
/**
* The Solr standalone version of ConfigSetService.
*
* Loads a ConfigSet defined by the core's configSet property,
* looking for a directory named for the configSet property value underneath
* a base directory. If no configSet property is set, loads the ConfigSet
* instead from the core's instance directory.
*/
public static class Standalone extends ConfigSetService {
private final Path configSetBase;
public Standalone(SolrResourceLoader loader, boolean shareSchema, Path configSetBase) {
super(loader, shareSchema);
this.configSetBase = configSetBase;
}
@Override
public SolrResourceLoader createCoreResourceLoader(CoreDescriptor cd) {
Path instanceDir = locateInstanceDir(cd);
SolrResourceLoader solrResourceLoader = new SolrResourceLoader(instanceDir, parentLoader.getClassLoader());
return solrResourceLoader;
}
@Override
public String configSetName(CoreDescriptor cd) {
return (cd.getConfigSet() == null ? "instancedir " : "configset ") + locateInstanceDir(cd);
}
protected Path locateInstanceDir(CoreDescriptor cd) {
String configSet = cd.getConfigSet();
if (configSet == null)
return cd.getInstanceDir();
Path configSetDirectory = configSetBase.resolve(configSet);
if (!Files.isDirectory(configSetDirectory))
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Could not load configuration from directory " + configSetDirectory);
return configSetDirectory;
}
@Override
protected Long getCurrentSchemaModificationVersion(String configSet, SolrConfig solrConfig, String schemaFileName) {
Path schemaFile = Paths.get(solrConfig.getResourceLoader().getConfigDir()).resolve(schemaFileName);
try {
return Files.getLastModifiedTime(schemaFile).toMillis();
} catch (FileNotFoundException e) {
return null; // acceptable
} catch (IOException e) {
log.warn("Unexpected exception when getting modification time of {}", schemaFile, e);
return null; // debatable; we'll see an error soon if there's a real problem
}
}
}
}
| 1 | 36,690 | Does this mean that every call to `configSet.getIndexSchema()` will create a new schema object (if shared schema is not enabled)? | apache-lucene-solr | java |
@@ -1,12 +1,11 @@
/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
+ * Copyright 2017 Huawei Technologies Co., Ltd
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicecomb.common.rest.codec.param;
import org.junit.Assert;
import org.junit.Test;
import io.servicecomb.common.rest.codec.param.QueryProcessorCreator.QueryProcessor;
import io.swagger.models.parameters.Parameter;
import io.swagger.models.parameters.QueryParameter;
public class TestQueryProcessorCreator {
@Test
public void testCreate() {
ParamValueProcessorCreator creator =
ParamValueProcessorCreatorManager.INSTANCE.findValue(QueryProcessorCreator.PARAMTYPE);
Parameter parameter = new QueryParameter();
parameter.setName("query");
ParamValueProcessor processor = creator.create(parameter, String.class);
Assert.assertEquals(QueryProcessor.class, processor.getClass());
}
}
| 1 | 8,070 | License header should use the Apache one. | apache-servicecomb-java-chassis | java |
@@ -107,6 +107,8 @@ func (globber *Globber) Glob(rootPath string, includes, excludes []string, inclu
matches, err := globber.glob(rootPath, include, excludes, includeHidden, includeSymlinks)
if err != nil {
panic(fmt.Errorf("error globbing files with %v: %v", include, err))
+ } else if len(matches) == 0 {
+ panic(fmt.Errorf("No matches found for this glob"))
}
// Remove the root path from the returned files and add them to the output
for _, filename := range matches { | 1 | package fs
import (
"fmt"
"path"
"path/filepath"
"regexp"
"strings"
)
type matcher interface {
Match(name string) (bool, error)
}
type builtInGlob string
func (p builtInGlob) Match(name string) (bool, error) {
matched, err := filepath.Match(string(p), name)
if err != nil {
return false, fmt.Errorf("failed to glob, invalid patern: %v, %w", string(p), err)
}
return matched, nil
}
type regexGlob struct {
regex *regexp.Regexp
}
func (r regexGlob) Match(name string) (bool, error) {
return r.regex.Match([]byte(name)), nil
}
// This converts the string pattern into a matcher. A matcher can either be one of our homebrew compiled regexs that
// support ** or a matcher that uses the built in filesystem.Match functionality.
func patternToMatcher(root, pattern string) (matcher, error) {
fullPattern := filepath.Join(root, pattern)
// Use the built in filesystem.Match globs when not using double star as it's far more efficient
if !strings.Contains(pattern, "**") {
return builtInGlob(fullPattern), nil
}
regex, err := regexp.Compile(toRegexString(fullPattern))
if err != nil {
return nil, fmt.Errorf("failed to compile glob pattern %s, %w", pattern, err)
}
return regexGlob{regex: regex}, nil
}
func toRegexString(pattern string) string {
pattern = "^" + pattern + "$"
pattern = strings.ReplaceAll(pattern, "+", "\\+") // escape +
pattern = strings.ReplaceAll(pattern, ".", "\\.") // escape .
pattern = strings.ReplaceAll(pattern, "?", ".") // match ? as any single char
pattern = strings.ReplaceAll(pattern, "*", "[^/]*") // handle single (all) * components
pattern = strings.ReplaceAll(pattern, "[^/]*[^/]*", ".*") // handle ** components
pattern = strings.ReplaceAll(pattern, "/.*/", "/(.*/)?") // Allow ** to match zero directories
return pattern
}
// IsGlob returns true if the given pattern requires globbing (i.e. contains characters that would be expanded by it)
func IsGlob(pattern string) bool {
return strings.ContainsAny(pattern, "*?[")
}
// Glob implements matching using Go's built-in filepath.Glob, but extends it to support
// Ant-style patterns using **.
func Glob(buildFileNames []string, rootPath string, includes, excludes []string, includeHidden bool) []string {
return NewGlobber(buildFileNames).Glob(rootPath, includes, excludes, includeHidden, true)
}
// A Globber is used to implement Glob. You can persist one for use to save repeated filesystem calls, but
// it isn't safe for use in concurrent goroutines.
type Globber struct {
buildFileNames []string
walkedDirs map[string]walkedDir
}
type walkedDir struct {
fileNames, symlinks, subPackages []string
}
func Match(glob, path string) (bool, error) {
matcher, err := patternToMatcher(".", glob)
if err != nil {
return false, err
}
return matcher.Match(path)
}
// NewGlobber creates a new Globber. You should call this rather than creating one directly (or use Glob() if you don't care).
func NewGlobber(buildFileNames []string) *Globber {
return &Globber{
buildFileNames: buildFileNames,
walkedDirs: map[string]walkedDir{},
}
}
func (globber *Globber) Glob(rootPath string, includes, excludes []string, includeHidden, includeSymlinks bool) []string {
if rootPath == "" {
rootPath = "."
}
var filenames []string
for _, include := range includes {
mustBeValidGlobString(include)
matches, err := globber.glob(rootPath, include, excludes, includeHidden, includeSymlinks)
if err != nil {
panic(fmt.Errorf("error globbing files with %v: %v", include, err))
}
// Remove the root path from the returned files and add them to the output
for _, filename := range matches {
filenames = append(filenames, strings.TrimPrefix(filename, rootPath+"/"))
}
}
return filenames
}
func (globber *Globber) glob(rootPath string, glob string, excludes []string, includeHidden, includeSymlinks bool) ([]string, error) {
p, err := patternToMatcher(rootPath, glob)
if err != nil {
return nil, err
}
walkedDir, err := globber.walkDir(rootPath)
if err != nil {
return nil, err
}
var globMatches []string
fileNames := walkedDir.fileNames
if includeSymlinks {
fileNames = append(fileNames, walkedDir.symlinks...)
}
for _, name := range fileNames {
if match, err := p.Match(name); err != nil {
return nil, err
} else if match {
globMatches = append(globMatches, name)
}
}
matches := make([]string, 0, len(globMatches))
for _, m := range globMatches {
if isInDirectories(m, walkedDir.subPackages) {
continue
}
if !includeHidden && isHidden(m) {
continue
}
shouldExclude, err := shouldExcludeMatch(rootPath, m, excludes)
if err != nil {
return nil, err
}
if shouldExclude {
continue
}
matches = append(matches, m)
}
return matches, nil
}
func (globber *Globber) walkDir(rootPath string) (walkedDir, error) {
if dir, present := globber.walkedDirs[rootPath]; present {
return dir, nil
}
dir := walkedDir{}
if err := WalkMode(rootPath, func(name string, mode Mode) error {
if isBuildFile(globber.buildFileNames, name) {
packageName := filepath.Dir(name)
if packageName != rootPath {
dir.subPackages = append(dir.subPackages, packageName)
return filepath.SkipDir
}
}
// Exclude plz-out
if name == "plz-out" && rootPath == "." {
return filepath.SkipDir
}
if mode.IsSymlink() {
dir.symlinks = append(dir.symlinks, name)
} else {
dir.fileNames = append(dir.fileNames, name)
}
return nil
}); err != nil {
return dir, err
}
globber.walkedDirs[rootPath] = dir
return dir, nil
}
func mustBeValidGlobString(glob string) {
if glob == "" {
panic("cannot use an empty string as a glob")
}
}
func isBathPathOf(path string, base string) bool {
if !strings.HasPrefix(path, base) {
return false
}
rest := strings.TrimPrefix(path, base)
return rest == "" || rest[0] == filepath.Separator
}
// shouldExcludeMatch checks if the match also matches any of the exclude patterns. If the exclude pattern is a relative
// pattern i.e. doesn't contain any /'s, then the pattern is checked against the file name part only. Otherwise the
// pattern is checked against the whole path. This is so `glob(["**/*.go"], exclude = ["*_test.go"])` will match as
// you'd expect.
func shouldExcludeMatch(root, match string, excludes []string) (bool, error) {
for _, excl := range excludes {
mustBeValidGlobString(excl)
rootPath := root
m := match
if isBathPathOf(match, filepath.Join(root, excl)) {
return true, nil
}
// If the exclude pattern doesn't contain any slashes and the match does, we only match against the base of the
// match path.
if strings.ContainsRune(match, '/') && !strings.ContainsRune(excl, '/') {
m = path.Base(match)
rootPath = ""
}
matcher, err := patternToMatcher(rootPath, excl)
if err != nil {
return false, err
}
match, err := matcher.Match(m)
if err != nil {
return false, err
}
if match {
return true, nil
}
}
return false, nil
}
// isBuildFile checks if the filename is considered a build filename
func isBuildFile(buildFileNames []string, name string) bool {
fileName := filepath.Base(name)
for _, buildFileName := range buildFileNames {
if fileName == buildFileName {
return true
}
}
return false
}
// isInDirectories checks to see if the file is in any of the provided directories
func isInDirectories(name string, directories []string) bool {
for _, dir := range directories {
if strings.HasPrefix(name, dir+"/") || name == dir {
return true
}
}
return false
}
// isHidden checks if the file is a hidden file i.e. starts with . or, starts and ends with #.
func isHidden(name string) bool {
file := filepath.Base(name)
return strings.HasPrefix(file, ".") || (strings.HasPrefix(file, "#") && strings.HasSuffix(file, "#"))
}
| 1 | 10,268 | I think this makes more sense to be applied within the parser code where we evaluate the `glob` builtin? This function looks generic in the sense of where it can be used, and perhaps no matches is fine in other cases? | thought-machine-please | go |
@@ -226,6 +226,14 @@ func NewConfig(ec2client ec2.EC2MetadataClient) (*Config, error) {
}
config := &envConfig
+ if config.OnPrem.Enabled() {
+ if config.AWSRegion == "" {
+ return nil, errors.New("AWS_DEFAULT_REGION has to be set when running on-premises")
+ }
+ // Use fake ec2 metadata client if on prem config is set.
+ ec2client = ec2.NewBlackholeEC2MetadataClient()
+ }
+
if config.complete() {
// No need to do file / network IO
return config, nil | 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package config
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"reflect"
"strings"
"time"
apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors"
"github.com/aws/amazon-ecs-agent/agent/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/cihub/seelog"
)
const (
// http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
DockerReservedPort = 2375
DockerReservedSSLPort = 2376
// DockerTagSeparator is the charactor used to separate names and tag in docker
DockerTagSeparator = ":"
// DefaultDockerTag is the default tag used by docker
DefaultDockerTag = "latest"
SSHPort = 22
// AgentIntrospectionPort is used to serve the metadata about the agent and to query the tasks being managed by the agent.
AgentIntrospectionPort = 51678
// AgentCredentialsPort is used to serve the credentials for tasks.
AgentCredentialsPort = 51679
// AgentPrometheusExpositionPort is used to expose Prometheus metrics that can be scraped by a Prometheus server
AgentPrometheusExpositionPort = 51680
// defaultConfigFileName is the default (json-formatted) config file
defaultConfigFileName = "/etc/ecs_container_agent/config.json"
// DefaultClusterName is the name of the default cluster.
DefaultClusterName = "default"
// DefaultTaskCleanupWaitDuration specifies the default value for task cleanup duration. It is used to
// clean up task's containers.
DefaultTaskCleanupWaitDuration = 3 * time.Hour
// DefaultPollingMetricsWaitDuration specifies the default value for polling metrics wait duration
// This is only used when PollMetrics is set to true
DefaultPollingMetricsWaitDuration = DefaultContainerMetricsPublishInterval / 2
// defaultDockerStopTimeout specifies the value for container stop timeout duration
defaultDockerStopTimeout = 30 * time.Second
// DefaultImageCleanupTimeInterval specifies the default value for image cleanup duration. It is used to
// remove the images pulled by agent.
DefaultImageCleanupTimeInterval = 30 * time.Minute
// DefaultNumImagesToDeletePerCycle specifies the default number of images to delete when agent performs
// image cleanup.
DefaultNumImagesToDeletePerCycle = 5
// DefaultNumNonECSContainersToDeletePerCycle specifies the default number of nonecs containers to delete when agent performs
// nonecs containers cleanup.
DefaultNumNonECSContainersToDeletePerCycle = 5
// DefaultImageDeletionAge specifies the default value for minimum amount of elapsed time after an image
// has been pulled before it can be deleted.
DefaultImageDeletionAge = 1 * time.Hour
// DefaultNonECSImageDeletionAge specifies the default value for minimum amount of elapsed time after an image
// has been created before it can be deleted
DefaultNonECSImageDeletionAge = 1 * time.Hour
//DefaultImagePullTimeout specifies the timeout for PullImage API.
DefaultImagePullTimeout = 2 * time.Hour
// minimumTaskCleanupWaitDuration specifies the minimum duration to wait before cleaning up
// a task's container. This is used to enforce sane values for the config.TaskCleanupWaitDuration field.
minimumTaskCleanupWaitDuration = 1 * time.Minute
// minimumImagePullInactivityTimeout specifies the minimum amount of time for that an image can be
// 'stuck' in the pull / unpack step. Very small values are unsafe and lead to high failure rate.
minimumImagePullInactivityTimeout = 1 * time.Minute
// minimumPollingMetricsWaitDuration specifies the minimum duration to wait before polling for new stats
// from docker. This is only used when PollMetrics is set to true
minimumPollingMetricsWaitDuration = 5 * time.Second
// maximumPollingMetricsWaitDuration specifies the maximum duration to wait before polling for new stats
// from docker. This is only used when PollMetrics is set to true
maximumPollingMetricsWaitDuration = DefaultContainerMetricsPublishInterval
// minimumDockerStopTimeout specifies the minimum value for docker StopContainer API
minimumDockerStopTimeout = 1 * time.Second
// minimumImageCleanupInterval specifies the minimum time for agent to wait before performing
// image cleanup.
minimumImageCleanupInterval = 10 * time.Minute
// minimumNumImagesToDeletePerCycle specifies the minimum number of images that to be deleted when
// performing image cleanup.
minimumNumImagesToDeletePerCycle = 1
// defaultCNIPluginsPath is the default path where cni binaries are located
defaultCNIPluginsPath = "/amazon-ecs-cni-plugins"
// DefaultMinSupportedCNIVersion denotes the minimum version of cni spec required
DefaultMinSupportedCNIVersion = "0.3.0"
// pauseContainerTarball is the path to the pause container tarball
pauseContainerTarballPath = "/images/amazon-ecs-pause.tar"
// DefaultTaskMetadataSteadyStateRate is set as 40. This is arrived from our benchmarking
// results where task endpoint can handle 4000 rps effectively. Here, 100 containers
// will be able to send out 40 rps.
DefaultTaskMetadataSteadyStateRate = 40
// DefaultTaskMetadataBurstRate is set to handle 60 burst requests at once
DefaultTaskMetadataBurstRate = 60
//Known cached image names
CachedImageNameAgentContainer = "amazon/amazon-ecs-agent:latest"
// DefaultNvidiaRuntime is the name of the runtime to pass Nvidia GPUs to containers
DefaultNvidiaRuntime = "nvidia"
// defaultCgroupCPUPeriod is set to 100 ms to set isCFS period and quota for task limits
defaultCgroupCPUPeriod = 100 * time.Millisecond
maximumCgroupCPUPeriod = 100 * time.Millisecond
minimumCgroupCPUPeriod = 8 * time.Millisecond
// DefaultContainerMetricsPublishInterval is the default interval that we publish
// metrics to the ECS telemetry backend (TACS)
DefaultContainerMetricsPublishInterval = 20 * time.Second
)
const (
// ImagePullDefaultBehavior specifies the behavior that if an image pull API call fails,
// agent tries to start from the Docker image cache anyway, assuming that the image has not changed.
ImagePullDefaultBehavior ImagePullBehaviorType = iota
// ImagePullAlwaysBehavior specifies the behavior that if an image pull API call fails,
// the task fails instead of using cached image.
ImagePullAlwaysBehavior
// ImagePullOnceBehavior specifies the behavior that agent will only attempt to pull
// the same image once, once an image is pulled, local image cache will be used
// for all the containers.
ImagePullOnceBehavior
// ImagePullPreferCachedBehavior specifies the behavior that agent will only attempt to pull
// the image if there is no cached image.
ImagePullPreferCachedBehavior
)
const (
// When ContainerInstancePropagateTagsFromNoneType is specified, no DescribeTags
// API call will be made.
ContainerInstancePropagateTagsFromNoneType ContainerInstancePropagateTagsFromType = iota
// When ContainerInstancePropagateTagsFromEC2InstanceType is specified, agent will
// make DescribeTags API call to get tags remotely.
ContainerInstancePropagateTagsFromEC2InstanceType
)
var (
// DefaultPauseContainerImageName is the name of the pause container image. The linker's
// load flags are used to populate this value from the Makefile
DefaultPauseContainerImageName = ""
// DefaultPauseContainerTag is the tag for the pause container image. The linker's load
// flags are used to populate this value from the Makefile
DefaultPauseContainerTag = ""
)
// Merge merges two config files, preferring the ones on the left. Any nil or
// zero values present in the left that are present in the right will be overridden
func (cfg *Config) Merge(rhs Config) *Config {
left := reflect.ValueOf(cfg).Elem()
right := reflect.ValueOf(&rhs).Elem()
for i := 0; i < left.NumField(); i++ {
leftField := left.Field(i)
switch leftField.Interface().(type) {
case BooleanDefaultFalse, BooleanDefaultTrue:
str, _ := json.Marshal(reflect.ValueOf(leftField.Interface()).Interface())
if string(str) == "null" {
leftField.Set(reflect.ValueOf(right.Field(i).Interface()))
}
default:
if utils.ZeroOrNil(leftField.Interface()) {
leftField.Set(reflect.ValueOf(right.Field(i).Interface()))
}
}
}
return cfg //make it chainable
}
// NewConfig returns a config struct created by merging environment variables,
// a config file, and EC2 Metadata info.
// The 'config' struct it returns can be used, even if an error is returned. An
// error is returned, however, if the config is incomplete in some way that is
// considered fatal.
func NewConfig(ec2client ec2.EC2MetadataClient) (*Config, error) {
var errs []error
envConfig, err := environmentConfig() //Environment overrides all else
if err != nil {
errs = append(errs, err)
}
config := &envConfig
if config.complete() {
// No need to do file / network IO
return config, nil
}
fcfg, err := fileConfig()
if err != nil {
errs = append(errs, err)
}
config.Merge(fcfg)
config.Merge(userDataConfig(ec2client))
if config.AWSRegion == "" {
if config.NoIID {
// get it from AWS SDK if we don't have instance identity document
awsRegion, err := ec2client.Region()
if err != nil {
errs = append(errs, err)
}
config.AWSRegion = awsRegion
} else {
// Get it from metadata only if we need to (network io)
config.Merge(ec2MetadataConfig(ec2client))
}
}
return config, config.mergeDefaultConfig(errs)
}
func (config *Config) mergeDefaultConfig(errs []error) error {
config.trimWhitespace()
config.Merge(DefaultConfig())
err := config.validateAndOverrideBounds()
if err != nil {
errs = append(errs, err)
}
if len(errs) != 0 {
return apierrors.NewMultiError(errs...)
}
return nil
}
// trimWhitespace trims whitespace from all string cfg values with the
// `trim` tag
func (cfg *Config) trimWhitespace() {
cfgElem := reflect.ValueOf(cfg).Elem()
cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type()
for i := 0; i < cfgElem.NumField(); i++ {
cfgField := cfgElem.Field(i)
if !cfgField.CanInterface() {
continue
}
trimTag := cfgStructField.Field(i).Tag.Get("trim")
if len(trimTag) == 0 {
continue
}
if cfgField.Kind() != reflect.String {
seelog.Warnf("Cannot trim non-string field type %v index %v", cfgField.Kind().String(), i)
continue
}
str := cfgField.Interface().(string)
cfgField.SetString(strings.TrimSpace(str))
}
}
// validateAndOverrideBounds performs validation over members of the Config struct
// and check the value against the minimum required value.
func (cfg *Config) validateAndOverrideBounds() error {
err := cfg.checkMissingAndDepreciated()
if err != nil {
return err
}
if cfg.DockerStopTimeout < minimumDockerStopTimeout {
return fmt.Errorf("config: invalid value for docker container stop timeout: %v", cfg.DockerStopTimeout.String())
}
if cfg.ContainerStartTimeout < minimumContainerStartTimeout {
return fmt.Errorf("config: invalid value for docker container start timeout: %v", cfg.ContainerStartTimeout.String())
}
var badDrivers []string
for _, driver := range cfg.AvailableLoggingDrivers {
_, ok := dockerclient.LoggingDriverMinimumVersion[driver]
if !ok {
badDrivers = append(badDrivers, string(driver))
}
}
if len(badDrivers) > 0 {
return errors.New("Invalid logging drivers: " + strings.Join(badDrivers, ", "))
}
// If a value has been set for taskCleanupWaitDuration and the value is less than the minimum allowed cleanup duration,
// print a warning and override it
if cfg.TaskCleanupWaitDuration < minimumTaskCleanupWaitDuration {
seelog.Warnf("Invalid value for ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", DefaultTaskCleanupWaitDuration.String(), cfg.TaskCleanupWaitDuration, minimumTaskCleanupWaitDuration)
cfg.TaskCleanupWaitDuration = DefaultTaskCleanupWaitDuration
}
if cfg.ImagePullInactivityTimeout < minimumImagePullInactivityTimeout {
seelog.Warnf("Invalid value for image pull inactivity timeout duration, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", defaultImagePullInactivityTimeout.String(), cfg.ImagePullInactivityTimeout, minimumImagePullInactivityTimeout)
cfg.ImagePullInactivityTimeout = defaultImagePullInactivityTimeout
}
if cfg.ImageCleanupInterval < minimumImageCleanupInterval {
seelog.Warnf("Invalid value for ECS_IMAGE_CLEANUP_INTERVAL, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", DefaultImageCleanupTimeInterval.String(), cfg.ImageCleanupInterval, minimumImageCleanupInterval)
cfg.ImageCleanupInterval = DefaultImageCleanupTimeInterval
}
if cfg.NumImagesToDeletePerCycle < minimumNumImagesToDeletePerCycle {
seelog.Warnf("Invalid value for number of images to delete for image cleanup, will be overridden with the default value: %d. Parsed value: %d, minimum value: %d.", DefaultImageDeletionAge, cfg.NumImagesToDeletePerCycle, minimumNumImagesToDeletePerCycle)
cfg.NumImagesToDeletePerCycle = DefaultNumImagesToDeletePerCycle
}
if cfg.TaskMetadataSteadyStateRate <= 0 || cfg.TaskMetadataBurstRate <= 0 {
seelog.Warnf("Invalid values for rate limits, will be overridden with default values: %d,%d.", DefaultTaskMetadataSteadyStateRate, DefaultTaskMetadataBurstRate)
cfg.TaskMetadataSteadyStateRate = DefaultTaskMetadataSteadyStateRate
cfg.TaskMetadataBurstRate = DefaultTaskMetadataBurstRate
}
// check the PollMetrics specific configurations
cfg.pollMetricsOverrides()
cfg.platformOverrides()
return nil
}
func (cfg *Config) pollMetricsOverrides() {
if cfg.PollMetrics.Enabled() {
if cfg.PollingMetricsWaitDuration < minimumPollingMetricsWaitDuration {
seelog.Warnf("ECS_POLLING_METRICS_WAIT_DURATION parsed value (%s) is less than the minimum of %s. Setting polling interval to minimum.",
cfg.PollingMetricsWaitDuration, minimumPollingMetricsWaitDuration)
cfg.PollingMetricsWaitDuration = minimumPollingMetricsWaitDuration
}
if cfg.PollingMetricsWaitDuration > maximumPollingMetricsWaitDuration {
seelog.Warnf("ECS_POLLING_METRICS_WAIT_DURATION parsed value (%s) is greater than the maximum of %s. Setting polling interval to maximum.",
cfg.PollingMetricsWaitDuration, maximumPollingMetricsWaitDuration)
cfg.PollingMetricsWaitDuration = maximumPollingMetricsWaitDuration
}
}
}
// checkMissingAndDeprecated checks all zero-valued fields for tags of the form
// missing:STRING and acts based on that string. Current options are: fatal,
// warn. Fatal will result in an error being returned, warn will result in a
// warning that the field is missing being logged.
func (cfg *Config) checkMissingAndDepreciated() error {
cfgElem := reflect.ValueOf(cfg).Elem()
cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type()
fatalFields := []string{}
for i := 0; i < cfgElem.NumField(); i++ {
cfgField := cfgElem.Field(i)
if utils.ZeroOrNil(cfgField.Interface()) {
missingTag := cfgStructField.Field(i).Tag.Get("missing")
if len(missingTag) == 0 {
continue
}
switch missingTag {
case "warn":
seelog.Warnf("Configuration key not set, key: %v", cfgStructField.Field(i).Name)
case "fatal":
seelog.Criticalf("Configuration key not set, key: %v", cfgStructField.Field(i).Name)
fatalFields = append(fatalFields, cfgStructField.Field(i).Name)
default:
seelog.Warnf("Unexpected `missing` tag value, tag %v", missingTag)
}
} else {
// present
deprecatedTag := cfgStructField.Field(i).Tag.Get("deprecated")
if len(deprecatedTag) == 0 {
continue
}
seelog.Warnf("Use of deprecated configuration key, key: %v message: %v", cfgStructField.Field(i).Name, deprecatedTag)
}
}
if len(fatalFields) > 0 {
return errors.New("Missing required fields: " + strings.Join(fatalFields, ", "))
}
return nil
}
// complete returns true if all fields of the config are populated / nonzero
func (cfg *Config) complete() bool {
cfgElem := reflect.ValueOf(cfg).Elem()
for i := 0; i < cfgElem.NumField(); i++ {
if utils.ZeroOrNil(cfgElem.Field(i).Interface()) {
return false
}
}
return true
}
func fileConfig() (Config, error) {
fileName := utils.DefaultIfBlank(os.Getenv("ECS_AGENT_CONFIG_FILE_PATH"), defaultConfigFileName)
cfg := Config{}
file, err := os.Open(fileName)
if err != nil {
return cfg, nil
}
data, err := ioutil.ReadAll(file)
if err != nil {
seelog.Errorf("Unable to read cfg file, err %v", err)
return cfg, err
}
if strings.TrimSpace(string(data)) == "" {
// empty file, not an error
return cfg, nil
}
err = json.Unmarshal(data, &cfg)
if err != nil {
seelog.Criticalf("Error reading cfg json data, err %v", err)
return cfg, err
}
// Handle any deprecated keys correctly here
if utils.ZeroOrNil(cfg.Cluster) && !utils.ZeroOrNil(cfg.ClusterArn) {
cfg.Cluster = cfg.ClusterArn
}
return cfg, nil
}
// userDataConfig reads configuration JSON from instance's userdata. It doesn't
// return any error as it's entirely optional to configure the ECS agent using
// this method.
// Example:
// {"ECSAgentConfiguration":{"Cluster":"default"}}
func userDataConfig(ec2Client ec2.EC2MetadataClient) Config {
type userDataParser struct {
Config Config `json:"ECSAgentConfiguration"`
}
parsedUserData := userDataParser{
Config: Config{},
}
userData, err := ec2Client.GetUserData()
if err != nil {
seelog.Warnf("Unable to fetch user data: %v", err)
// Unable to read userdata from instance metadata. Just
// return early
return parsedUserData.Config
}
// In the future, if we want to support base64 encoded config,
// we'd need to add logic to decode the string here.
err = json.Unmarshal([]byte(userData), &parsedUserData)
if err != nil {
seelog.Debugf("Non-json user data, skip merging into agent config: %v", err)
// Unable to parse userdata as a valid JSON. Return the
// empty config
return Config{}
}
return parsedUserData.Config
}
// environmentConfig reads the given configs from the environment and attempts
// to convert them to the given type
func environmentConfig() (Config, error) {
dataDir := os.Getenv("ECS_DATADIR")
steadyStateRate, burstRate := parseTaskMetadataThrottles()
var errs []error
instanceAttributes, errs := parseInstanceAttributes(errs)
containerInstanceTags, errs := parseContainerInstanceTags(errs)
additionalLocalRoutes, errs := parseAdditionalLocalRoutes(errs)
var err error
if len(errs) > 0 {
err = apierrors.NewMultiError(errs...)
}
return Config{
Cluster: os.Getenv("ECS_CLUSTER"),
APIEndpoint: os.Getenv("ECS_BACKEND_HOST"),
AWSRegion: os.Getenv("AWS_DEFAULT_REGION"),
DockerEndpoint: os.Getenv("DOCKER_HOST"),
ReservedPorts: parseReservedPorts("ECS_RESERVED_PORTS"),
ReservedPortsUDP: parseReservedPorts("ECS_RESERVED_PORTS_UDP"),
DataDir: dataDir,
Checkpoint: parseCheckpoint(dataDir),
EngineAuthType: os.Getenv("ECS_ENGINE_AUTH_TYPE"),
EngineAuthData: NewSensitiveRawMessage([]byte(os.Getenv("ECS_ENGINE_AUTH_DATA"))),
UpdatesEnabled: parseBooleanDefaultFalseConfig("ECS_UPDATES_ENABLED"),
UpdateDownloadDir: os.Getenv("ECS_UPDATE_DOWNLOAD_DIR"),
DisableMetrics: parseBooleanDefaultFalseConfig("ECS_DISABLE_METRICS"),
ReservedMemory: parseEnvVariableUint16("ECS_RESERVED_MEMORY"),
AvailableLoggingDrivers: parseAvailableLoggingDrivers(),
PrivilegedDisabled: parseBooleanDefaultFalseConfig("ECS_DISABLE_PRIVILEGED"),
SELinuxCapable: parseBooleanDefaultFalseConfig("ECS_SELINUX_CAPABLE"),
AppArmorCapable: parseBooleanDefaultFalseConfig("ECS_APPARMOR_CAPABLE"),
TaskCleanupWaitDuration: parseEnvVariableDuration("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION"),
TaskENIEnabled: parseBooleanDefaultFalseConfig("ECS_ENABLE_TASK_ENI"),
TaskIAMRoleEnabled: parseBooleanDefaultFalseConfig("ECS_ENABLE_TASK_IAM_ROLE"),
DeleteNonECSImagesEnabled: parseBooleanDefaultFalseConfig("ECS_ENABLE_UNTRACKED_IMAGE_CLEANUP"),
TaskCPUMemLimit: parseBooleanDefaultTrueConfig("ECS_ENABLE_TASK_CPU_MEM_LIMIT"),
DockerStopTimeout: parseDockerStopTimeout(),
ContainerStartTimeout: parseContainerStartTimeout(),
ImagePullInactivityTimeout: parseImagePullInactivityTimeout(),
ImagePullTimeout: parseEnvVariableDuration("ECS_IMAGE_PULL_TIMEOUT"),
CredentialsAuditLogFile: os.Getenv("ECS_AUDIT_LOGFILE"),
CredentialsAuditLogDisabled: utils.ParseBool(os.Getenv("ECS_AUDIT_LOGFILE_DISABLED"), false),
TaskIAMRoleEnabledForNetworkHost: utils.ParseBool(os.Getenv("ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST"), false),
ImageCleanupDisabled: parseBooleanDefaultFalseConfig("ECS_DISABLE_IMAGE_CLEANUP"),
MinimumImageDeletionAge: parseEnvVariableDuration("ECS_IMAGE_MINIMUM_CLEANUP_AGE"),
NonECSMinimumImageDeletionAge: parseEnvVariableDuration("NON_ECS_IMAGE_MINIMUM_CLEANUP_AGE"),
ImageCleanupInterval: parseEnvVariableDuration("ECS_IMAGE_CLEANUP_INTERVAL"),
NumImagesToDeletePerCycle: parseNumImagesToDeletePerCycle(),
NumNonECSContainersToDeletePerCycle: parseNumNonECSContainersToDeletePerCycle(),
ImagePullBehavior: parseImagePullBehavior(),
ImageCleanupExclusionList: parseImageCleanupExclusionList("ECS_EXCLUDE_UNTRACKED_IMAGE"),
InstanceAttributes: instanceAttributes,
CNIPluginsPath: os.Getenv("ECS_CNI_PLUGINS_PATH"),
AWSVPCBlockInstanceMetdata: parseBooleanDefaultFalseConfig("ECS_AWSVPC_BLOCK_IMDS"),
AWSVPCAdditionalLocalRoutes: additionalLocalRoutes,
ContainerMetadataEnabled: parseBooleanDefaultFalseConfig("ECS_ENABLE_CONTAINER_METADATA"),
DataDirOnHost: os.Getenv("ECS_HOST_DATA_DIR"),
OverrideAWSLogsExecutionRole: parseBooleanDefaultFalseConfig("ECS_ENABLE_AWSLOGS_EXECUTIONROLE_OVERRIDE"),
CgroupPath: os.Getenv("ECS_CGROUP_PATH"),
TaskMetadataSteadyStateRate: steadyStateRate,
TaskMetadataBurstRate: burstRate,
SharedVolumeMatchFullConfig: parseBooleanDefaultFalseConfig("ECS_SHARED_VOLUME_MATCH_FULL_CONFIG"),
ContainerInstanceTags: containerInstanceTags,
ContainerInstancePropagateTagsFrom: parseContainerInstancePropagateTagsFrom(),
PollMetrics: parseBooleanDefaultTrueConfig("ECS_POLL_METRICS"),
PollingMetricsWaitDuration: parseEnvVariableDuration("ECS_POLLING_METRICS_WAIT_DURATION"),
DisableDockerHealthCheck: parseBooleanDefaultFalseConfig("ECS_DISABLE_DOCKER_HEALTH_CHECK"),
GPUSupportEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_GPU_SUPPORT"), false),
InferentiaSupportEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_INF_SUPPORT"), false),
NvidiaRuntime: os.Getenv("ECS_NVIDIA_RUNTIME"),
TaskMetadataAZDisabled: utils.ParseBool(os.Getenv("ECS_DISABLE_TASK_METADATA_AZ"), false),
CgroupCPUPeriod: parseCgroupCPUPeriod(),
SpotInstanceDrainingEnabled: parseBooleanDefaultFalseConfig("ECS_ENABLE_SPOT_INSTANCE_DRAINING"),
GMSACapable: parseGMSACapability(),
VolumePluginCapabilities: parseVolumePluginCapabilities(),
}, err
}
func ec2MetadataConfig(ec2client ec2.EC2MetadataClient) Config {
iid, err := ec2client.InstanceIdentityDocument()
if err != nil {
seelog.Criticalf("Unable to communicate with EC2 Metadata service to infer region: %v", err.Error())
return Config{}
}
return Config{AWSRegion: iid.Region}
}
// String returns a lossy string representation of the config suitable for human readable display.
// Consequently, it *should not* return any sensitive information.
func (cfg *Config) String() string {
return fmt.Sprintf(
"Cluster: %v, "+
" Region: %v, "+
" DataDir: %v,"+
" Checkpoint: %v, "+
"AuthType: %v, "+
"UpdatesEnabled: %v, "+
"DisableMetrics: %v, "+
"PollMetrics: %v, "+
"PollingMetricsWaitDuration: %v, "+
"ReservedMem: %v, "+
"TaskCleanupWaitDuration: %v, "+
"DockerStopTimeout: %v, "+
"ContainerStartTimeout: %v, "+
"TaskCPUMemLimit: %v, "+
"%s",
cfg.Cluster,
cfg.AWSRegion,
cfg.DataDir,
cfg.Checkpoint,
cfg.EngineAuthType,
cfg.UpdatesEnabled,
cfg.DisableMetrics,
cfg.PollMetrics,
cfg.PollingMetricsWaitDuration,
cfg.ReservedMemory,
cfg.TaskCleanupWaitDuration,
cfg.DockerStopTimeout,
cfg.ContainerStartTimeout,
cfg.TaskCPUMemLimit,
cfg.platformString(),
)
}
| 1 | 25,346 | should we validate that AWSRegion has also been set here so that we don't get into the if-block that relies on the ec2 metadata client below? | aws-amazon-ecs-agent | go |
@@ -89,11 +89,13 @@ class ThriftRequestHandler():
checker_md_docs,
checker_md_docs_map,
suppress_handler,
- db_version_info):
+ db_version_info,
+ version):
self.__checker_md_docs = checker_md_docs
self.__checker_doc_map = checker_md_docs_map
self.__suppress_handler = suppress_handler
+ self.__version = version
self.__session = session
def __queryReport(self, reportId): | 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
"""
Handle thrift requests.
"""
from collections import defaultdict
import codecs
import ntpath
import os
import zlib
from sqlalchemy.sql.expression import false
from sqlalchemy.sql.expression import true
import sqlalchemy
import shared
from codeCheckerDBAccess import constants
from codeCheckerDBAccess.ttypes import *
from codechecker_lib.logger import LoggerFactory
from codechecker_lib.profiler import timeit
from db_model.orm_model import *
LOG = LoggerFactory.get_new_logger('ACCESS HANDLER')
def conv(text):
"""
Convert * to % got from clients for the database queries.
"""
if text is None:
return '%'
return text.replace('*', '%')
def construct_report_filter(report_filters):
"""
Construct the report filter for reports and suppressed reports.
"""
OR = []
if report_filters is None:
AND = [Report.checker_message.like('%'), Report.checker_id.like('%'),
File.filepath.like('%')]
OR.append(and_(*AND))
filter_expression = or_(*OR)
return filter_expression
for report_filter in report_filters:
AND = []
if report_filter.checkerMsg:
AND.append(Report.checker_message.ilike(
conv(report_filter.checkerMsg)))
if report_filter.checkerId:
AND.append(Report.checker_id.ilike(
conv(report_filter.checkerId)))
if report_filter.filepath:
AND.append(File.filepath.ilike(
conv(report_filter.filepath)))
if report_filter.severity is not None:
# severity value can be 0
AND.append(Report.severity == report_filter.severity)
if report_filter.suppressed:
AND.append(Report.suppressed == true())
else:
AND.append(Report.suppressed == false())
OR.append(and_(*AND))
filter_expression = or_(*OR)
return filter_expression
class ThriftRequestHandler():
"""
Connect to database and handle thrift client requests.
"""
def __init__(self,
session,
checker_md_docs,
checker_md_docs_map,
suppress_handler,
db_version_info):
self.__checker_md_docs = checker_md_docs
self.__checker_doc_map = checker_md_docs_map
self.__suppress_handler = suppress_handler
self.__session = session
def __queryReport(self, reportId):
session = self.__session
try:
q = session.query(Report,
File,
BugPathEvent,
SuppressBug) \
.filter(Report.id == reportId) \
.outerjoin(File,
Report.file_id == File.id) \
.outerjoin(BugPathEvent,
Report.end_bugevent == BugPathEvent.id) \
.outerjoin(SuppressBug,
SuppressBug.hash == Report.bug_id)
results = q.limit(1).all()
if len(results) < 1:
raise shared.ttypes.RequestFailed(
shared.ttypes.ErrorCode.DATABASE,
"Report " + reportId + " not found!")
report, source_file, lbpe, suppress_bug = results[0]
last_event_pos = \
shared.ttypes.BugPathEvent(
startLine=lbpe.line_begin,
startCol=lbpe.col_begin,
endLine=lbpe.line_end,
endCol=lbpe.col_end,
msg=lbpe.msg,
fileId=lbpe.file_id,
filePath=source_file.filepath)
if suppress_bug:
suppress_comment = suppress_bug.comment
else:
suppress_comment = None
return ReportData(
bugHash=report.bug_id,
checkedFile=source_file.filepath,
checkerMsg=report.checker_message,
suppressed=report.suppressed,
reportId=report.id,
fileId=source_file.id,
lastBugPosition=last_event_pos,
checkerId=report.checker_id,
severity=report.severity,
suppressComment=suppress_comment)
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(
shared.ttypes.ErrorCode.DATABASE,
msg)
def __sortResultsQuery(self, query, sort_types=None):
"""
Helper method for __queryDiffResults and __queryResults to apply
sorting.
"""
# Get a list of sort_types which will be a nested ORDER BY.
sort_type_map = {}
sort_type_map[SortType.FILENAME] = [File.filepath,
BugPathEvent.line_begin]
sort_type_map[SortType.CHECKER_NAME] = [Report.checker_id]
sort_type_map[SortType.SEVERITY] = [Report.severity]
# Mapping the SQLAlchemy functions.
order_type_map = {Order.ASC: asc, Order.DESC: desc}
if sort_types is None:
sort_types = [SortMode(SortType.FILENAME, Order.ASC)]
for sort in sort_types:
sorttypes = sort_type_map.get(sort.type)
for sorttype in sorttypes:
order_type = order_type_map.get(sort.ord)
query = query.order_by(order_type(sorttype))
return query
def __queryResults(self, run_id, limit, offset, sort_types,
report_filters):
max_query_limit = constants.MAX_QUERY_SIZE
if limit > max_query_limit:
LOG.debug('Query limit ' + str(limit) +
' was larger than max query limit ' +
str(max_query_limit) + ', setting limit to ' +
str(max_query_limit))
limit = max_query_limit
session = self.__session
filter_expression = construct_report_filter(report_filters)
try:
q = session.query(Report,
File,
BugPathEvent,
SuppressBug) \
.filter(Report.run_id == run_id) \
.outerjoin(File,
and_(Report.file_id == File.id,
File.run_id == run_id)) \
.outerjoin(BugPathEvent,
Report.end_bugevent == BugPathEvent.id) \
.outerjoin(SuppressBug,
and_(SuppressBug.hash == Report.bug_id,
SuppressBug.run_id == run_id)) \
.filter(filter_expression)
q = self.__sortResultsQuery(q, sort_types)
results = []
for report, source_file, lbpe, suppress_bug in \
q.limit(limit).offset(offset):
last_event_pos = \
shared.ttypes.BugPathEvent(startLine=lbpe.line_begin,
startCol=lbpe.col_begin,
endLine=lbpe.line_end,
endCol=lbpe.col_end,
msg=lbpe.msg,
fileId=lbpe.file_id,
filePath=source_file.filepath)
if suppress_bug:
suppress_comment = suppress_bug.comment
else:
suppress_comment = None
results.append(
ReportData(bugHash=report.bug_id,
checkedFile=source_file.filepath,
checkerMsg=report.checker_message,
suppressed=report.suppressed,
reportId=report.id,
fileId=source_file.id,
lastBugPosition=last_event_pos,
checkerId=report.checker_id,
severity=report.severity,
suppressComment=suppress_comment)
)
return results
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
@timeit
def getRunData(self):
session = self.__session
results = []
try:
# Count the reports subquery.
stmt = session.query(Report.run_id,
func.count(literal_column('*')).label(
'report_count')) \
.filter(Report.suppressed == false()) \
.group_by(Report.run_id) \
.subquery()
q = session.query(Run, stmt.c.report_count) \
.outerjoin(stmt, Run.id == stmt.c.run_id) \
.order_by(Run.date)
for instance, reportCount in q:
if reportCount is None:
reportCount = 0
results.append(RunData(instance.id,
str(instance.date),
instance.name,
instance.duration,
reportCount,
instance.command
))
return results
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
@timeit
def getReport(self, reportId):
return self.__queryReport(reportId)
@timeit
def getRunResults(self, run_id, limit, offset, sort_types, report_filters):
return self.__queryResults(run_id,
limit,
offset,
sort_types,
report_filters)
@timeit
def getRunResultCount(self, run_id, report_filters):
filter_expression = construct_report_filter(report_filters)
session = self.__session
try:
reportCount = session.query(Report) \
.filter(Report.run_id == run_id) \
.outerjoin(File,
and_(Report.file_id == File.id,
File.run_id == run_id)) \
.outerjoin(BugPathEvent,
Report.end_bugevent == BugPathEvent.id) \
.outerjoin(SuppressBug,
and_(SuppressBug.hash == Report.bug_id,
SuppressBug.run_id == run_id)) \
.filter(filter_expression) \
.count()
if reportCount is None:
reportCount = 0
return reportCount
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
@timeit
def __construct_bug_event_list(self, session, start_bug_event):
file_path_cache = {}
bug_events = []
event = session.query(BugPathEvent).get(start_bug_event)
file_path = file_path_cache.get(event.file_id)
if not file_path:
f = session.query(File).get(event.file_id)
file_path = f.filepath
file_path_cache[event.file_id] = file_path
bug_events.append((event, file_path))
while event.next is not None:
event = session.query(BugPathEvent).get(event.next)
file_path = file_path_cache.get(event.file_id)
if not file_path:
f = session.query(File).get(event.file_id)
file_path = f.filepath
file_path_cache[event.file_id] = file_path
bug_events.append((event, file_path))
return bug_events
@timeit
def __construct_bug_point_list(self, session, start_bug_point):
# Start_bug_point can be None.
file_path_cache = {}
bug_points = []
if start_bug_point:
bug_point = session.query(BugReportPoint).get(start_bug_point)
file_path = file_path_cache.get(bug_point.file_id)
if not file_path:
f = session.query(File).get(bug_point.file_id)
file_path = f.filepath
file_path_cache[bug_point.file_id] = file_path
bug_points.append((bug_point, file_path))
while bug_point.next is not None:
bug_point = session.query(BugReportPoint).get(bug_point.next)
file_path = file_path_cache.get(bug_point.file_id)
if not file_path:
f = session.query(File).get(bug_point.file_id)
file_path = f.filepath
file_path_cache[bug_point.file_id] = file_path
bug_points.append((bug_point, file_path))
return bug_points
@timeit
def getReportDetails(self, reportId):
"""
Parameters:
- reportId
"""
session = self.__session
try:
report = session.query(Report).get(reportId)
events = self.__construct_bug_event_list(session,
report.start_bugevent)
bug_events_list = []
for (event, file_path) in events:
bug_events_list.append(
shared.ttypes.BugPathEvent(
startLine=event.line_begin,
startCol=event.col_begin,
endLine=event.line_end,
endCol=event.col_end,
msg=event.msg,
fileId=event.file_id,
filePath=file_path))
points = self.__construct_bug_point_list(session,
report.start_bugpoint)
bug_point_list = []
for (bug_point, file_path) in points:
bug_point_list.append(
shared.ttypes.BugPathPos(
startLine=bug_point.line_begin,
startCol=bug_point.col_begin,
endLine=bug_point.line_end,
endCol=bug_point.col_end,
fileId=bug_point.file_id,
filePath=file_path))
return ReportDetails(bug_events_list, bug_point_list)
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
def __set_report_suppress_flag(self,
session,
run_ids,
bug_id_hash,
source_file_name,
suppress_flag):
"""
Update the suppress flag for multiple report entries based on the
filter.
"""
if not run_ids:
# There are no run ids where the report should be suppressed.
return
def check_filename(data):
report, file_obj = data
source_file_path, f_name = ntpath.split(file_obj.filepath)
if f_name == source_file_name:
return True
else:
return False
reports = session.query(Report, File) \
.filter(and_(Report.bug_id == bug_id_hash,
Report.run_id.in_(run_ids))) \
.outerjoin(File, File.id == Report.file_id) \
.all()
reports = filter(check_filename, reports)
for report, file_obj in reports:
report.suppressed = suppress_flag
def __update_suppress_storage_data(self,
run_ids,
report,
suppress,
comment=u''):
"""
Update suppress information in the database and in the suppress file
can be used to suppress or unsuppress a report for multiple runs.
"""
session = self.__session
report_id = report.id
bug_id_hash = report.bug_id
source_file = session.query(File).get(report.file_id)
source_file_path, source_file_name = ntpath.split(source_file.filepath)
LOG.debug('Updating suppress data for: {0} bug id {1}'
'file name {2} supressing {3}'.format(report_id,
bug_id_hash,
source_file_name,
suppress))
# Check if it is already suppressed for any run ids.
suppressed = session.query(SuppressBug) \
.filter(or_(
and_(SuppressBug.hash == bug_id_hash,
SuppressBug.file_name == source_file_name,
SuppressBug.run_id.in_(run_ids)),
and_(SuppressBug.hash == bug_id_hash,
SuppressBug.file_name == '',
SuppressBug.run_id.in_(run_ids))
)) \
.all()
if not suppressed and suppress:
# The bug is not suppressed for any run_id, suppressing it.
LOG.debug('Bug is not suppressed in any runs')
for rId in run_ids:
suppress_bug = SuppressBug(rId,
bug_id_hash,
source_file_name,
comment)
session.add(suppress_bug)
# Update report entries.
self.__set_report_suppress_flag(session,
run_ids,
bug_id_hash,
source_file_name,
suppress_flag=suppress)
elif suppressed and suppress:
# Already suppressed for some run ids check if other suppression
# is needed for other run id.
suppressed_runids = set([r.run_id for r in suppressed])
LOG.debug('Bug is suppressed in these runs:' +
' '.join([str(r) for r in suppressed_runids]))
suppress_in_these_runs = set(run_ids).difference(suppressed_runids)
for run_id in suppress_in_these_runs:
suppress_bug = SuppressBug(run_id,
bug_id_hash,
source_file_name,
comment)
session.add(suppress_bug)
self.__set_report_suppress_flag(session,
suppress_in_these_runs,
bug_id_hash,
source_file_name,
suppress_flag=suppress)
elif suppressed and not suppress:
# Already suppressed for some run ids
# remove those entries.
already_suppressed_runids = \
filter(lambda bug: bug.run_id in run_ids, set(suppressed))
unsuppress_in_these_runs = \
{bug.run_id for bug in already_suppressed_runids}
LOG.debug('Already suppressed, unsuppressing now')
suppressed = session.query(SuppressBug) \
.filter(and_(SuppressBug.hash == bug_id_hash,
SuppressBug.file_name == source_file_name,
SuppressBug.run_id.in_(unsuppress_in_these_runs)))
# Delete suppress bug entries.
for sp in suppressed:
session.delete(sp)
# Update report entries.
self.__set_report_suppress_flag(session,
unsuppress_in_these_runs,
bug_id_hash,
source_file_name,
suppress_flag=suppress)
# elif suppressed is None and not suppress:
# # check only in the file if there is anything that should be
# # removed the database has no entries in the suppressBug table
if suppress:
# Store to suppress file.
ret = self.__suppress_handler \
.store_suppress_bug_id(bug_id_hash,
source_file_name,
comment)
else:
# Remove from suppress file.
ret = self.__suppress_handler \
.remove_suppress_bug_id(bug_id_hash,
source_file_name)
if not ret:
session.rollback()
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.IOERROR,
'Failed to store suppress bugId')
else:
session.commit()
return True
@timeit
def suppressBug(self, run_ids, report_id, comment):
"""
Add suppress bug entry to the SuppressBug table.
Set the suppressed flag for the selected report.
"""
session = self.__session
try:
report = session.query(Report).get(report_id)
if report:
return self.__update_suppress_storage_data(run_ids,
report,
True,
comment)
else:
msg = 'Report id ' + str(report_id) + \
' was not found in the database.'
LOG.error(msg)
raise shared.ttypes.RequestFailed(
shared.ttypes.ErrorCode.DATABASE, msg)
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(
shared.ttypes.ErrorCode.DATABASE, msg)
except Exception as ex:
msg = str(ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(
shared.ttypes.ErrorCode.IOERROR, msg)
@timeit
def unSuppressBug(self, run_ids, report_id):
"""
Remove the suppress flag from the reports in multiple runs if given.
Cleanup the SuppressBug table to remove suppress entries.
"""
session = self.__session
try:
report = session.query(Report).get(report_id)
if report:
return self.__update_suppress_storage_data(run_ids,
report,
False)
else:
msg = 'Report id ' + str(report_id) + \
' was not found in the database.'
LOG.error(msg)
raise shared.ttypes.RequestFailed(
shared.ttypes.ErrorCode.DATABASE, msg)
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
except Exception as ex:
msg = str(ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.IOERROR,
msg)
def getCheckerDoc(self, checkerId):
"""
Parameters:
- checkerId
"""
text = "No documentation found for checker: " + checkerId + \
"\n\nPlease refer to the documentation at the "
sa_link = "http://clang-analyzer.llvm.org/available_checks.html"
tidy_link = "http://clang.llvm.org/extra/clang-tidy/checks/list.html"
if "." in checkerId:
text += "[ClangSA](" + sa_link + ")"
elif "-" in checkerId:
text += "[ClangTidy](" + tidy_link + ")"
text += " homepage."
try:
md_file = self.__checker_doc_map.get(checkerId)
if md_file:
md_file = os.path.join(self.__checker_md_docs, md_file)
with open(md_file, 'r') as md_content:
text = md_content.read()
return text
except Exception as ex:
msg = str(ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.IOERROR,
msg)
def getCheckerConfigs(self, run_id):
"""
Parameters:
- run_id
"""
session = self.__session
try:
configs = session.query(Config) \
.filter(Config.run_id == run_id) \
.all()
configs = [(c.checker_name, c.attribute, c.value)
for c in configs]
res = []
for cName, attribute, value in configs:
res.append(shared.ttypes.ConfigValue(cName, attribute, value))
return res
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
@timeit
def getSkipPaths(self, run_id):
session = self.__session
try:
suppressed_paths = session.query(SkipPath) \
.filter(SkipPath.run_id == run_id) \
.all()
results = []
for sp in suppressed_paths:
encoded_path = sp.path
encoded_comment = sp.comment
results.append(SkipPathData(encoded_path, encoded_comment))
return results
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
@timeit
def getBuildActions(self, reportId):
session = self.__session
try:
build_actions = session.query(BuildAction) \
.outerjoin(ReportsToBuildActions) \
.filter(ReportsToBuildActions.report_id == reportId) \
.all()
return [BuildActionData(id=ba.id,
runId=ba.run_id,
buildCmd=ba.build_cmd_hash,
analyzerType=ba.analyzer_type,
file=ba.analyzed_source_file,
checkCmd=ba.check_cmd,
failure=ba.failure_txt,
date=str(ba.date),
duration=ba.duration) for ba in
build_actions]
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
@timeit
def getFileId(self, run_id, path):
session = self.__session
try:
sourcefile = session.query(File) \
.filter(File.run_id == run_id,
File.filepath == path) \
.first()
if sourcefile is None:
return -1
return sourcefile.id
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
@timeit
def getSourceFileData(self, fileId, fileContent):
"""
Parameters:
- fileId
- fileContent
"""
session = self.__session
try:
sourcefile = session.query(File) \
.filter(File.id == fileId).first()
if sourcefile is None:
return SourceFileData()
if fileContent and sourcefile.content:
source = zlib.decompress(sourcefile.content)
source = codecs.decode(source, 'utf-8', 'replace')
return SourceFileData(fileId=sourcefile.id,
filePath=sourcefile.filepath,
fileContent=source)
else:
return SourceFileData(fileId=sourcefile.id,
filePath=sourcefile.filepath)
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
@timeit
def getRunResultTypes(self, run_id, report_filters):
session = self.__session
try:
filter_expression = construct_report_filter(report_filters)
q = session.query(Report) \
.filter(Report.run_id == run_id) \
.outerjoin(File,
Report.file_id == File.id) \
.outerjoin(BugPathEvent,
Report.end_bugevent == BugPathEvent.id) \
.outerjoin(SuppressBug,
and_(SuppressBug.hash == Report.bug_id,
SuppressBug.run_id == run_id)) \
.order_by(Report.checker_id) \
.filter(filter_expression) \
.all()
count_results = defaultdict(int)
result_reports = defaultdict()
# Count and filter out the results for the same checker_id.
for r in q:
count_results[r.checker_id] += 1
result_reports[r.checker_id] = r
results = []
for checker_id, res in result_reports.items():
results.append(ReportDataTypeCount(res.checker_id,
res.severity,
count_results[
res.checker_id]))
# Result count ascending.
results = sorted(results, key=lambda rep: rep.count, reverse=True)
return results
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
# -----------------------------------------------------------------------
@timeit
def __get_hashes_for_diff(self, session, base_run_id, new_run_id):
LOG.debug('query all baseline hashes')
# Keyed tuple list is returned.
base_line_hashes = session.query(Report.bug_id) \
.filter(Report.run_id == base_run_id) \
.all()
LOG.debug('query all new check hashes')
# Keyed tuple list is returned.
new_check_hashes = session.query(Report.bug_id) \
.filter(Report.run_id == new_run_id) \
.all()
base_line_hashes = set([t[0] for t in base_line_hashes])
new_check_hashes = set([t[0] for t in new_check_hashes])
return base_line_hashes, new_check_hashes
# -----------------------------------------------------------------------
@timeit
def __queryDiffResults(self,
session,
diff_hash_list,
run_id,
limit,
offset,
sort_types=None,
report_filters=None):
max_query_limit = constants.MAX_QUERY_SIZE
if limit > max_query_limit:
LOG.debug('Query limit ' + str(limit) +
' was larger than max query limit ' +
str(max_query_limit) + ', setting limit to ' +
str(max_query_limit))
limit = max_query_limit
filter_expression = construct_report_filter(report_filters)
try:
q = session.query(Report,
File,
BugPathEvent,
SuppressBug) \
.filter(Report.run_id == run_id) \
.outerjoin(File,
and_(Report.file_id == File.id,
File.run_id == run_id)) \
.outerjoin(BugPathEvent,
Report.end_bugevent == BugPathEvent.id) \
.outerjoin(SuppressBug,
and_(SuppressBug.hash == Report.bug_id,
SuppressBug.run_id == run_id)) \
.filter(Report.bug_id.in_(diff_hash_list)) \
.filter(filter_expression)
q = self.__sortResultsQuery(q, sort_types)
results = []
for report, source_file, lbpe, suppress_bug \
in q.limit(limit).offset(offset):
lastEventPos = \
shared.ttypes.BugPathEvent(startLine=lbpe.line_begin,
startCol=lbpe.col_begin,
endLine=lbpe.line_end,
endCol=lbpe.col_end,
msg=lbpe.msg,
fileId=lbpe.file_id)
if suppress_bug:
suppress_comment = suppress_bug.comment
else:
suppress_comment = None
results.append(ReportData(bugHash=report.bug_id,
checkedFile=source_file.filepath,
checkerMsg=report.checker_message,
suppressed=report.suppressed,
reportId=report.id,
fileId=source_file.id,
lastBugPosition=lastEventPos,
checkerId=report.checker_id,
severity=report.severity,
suppressComment=suppress_comment))
return results
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
# -----------------------------------------------------------------------
@timeit
def getNewResults(self,
base_run_id,
new_run_id,
limit,
offset,
sort_types=None,
report_filters=None):
session = self.__session
base_line_hashes, new_check_hashes = \
self.__get_hashes_for_diff(session,
base_run_id,
new_run_id)
diff_hashes = list(new_check_hashes.difference(base_line_hashes))
LOG.debug(len(diff_hashes))
LOG.debug(diff_hashes)
if len(diff_hashes) == 0:
return []
return self.__queryDiffResults(session,
diff_hashes,
new_run_id,
limit,
offset,
sort_types,
report_filters)
# -----------------------------------------------------------------------
@timeit
def getResolvedResults(self,
base_run_id,
new_run_id,
limit,
offset,
sort_types=None,
report_filters=None):
session = self.__session
base_line_hashes, new_check_hashes = \
self.__get_hashes_for_diff(session,
base_run_id,
new_run_id)
diff_hashes = list(base_line_hashes.difference(new_check_hashes))
LOG.debug(len(diff_hashes))
LOG.debug(diff_hashes)
if len(diff_hashes) == 0:
return []
return self.__queryDiffResults(session,
diff_hashes,
base_run_id,
limit,
offset,
sort_types,
report_filters)
# -----------------------------------------------------------------------
@timeit
def getUnresolvedResults(self,
base_run_id,
new_run_id,
limit,
offset,
sort_types=None,
report_filters=None):
session = self.__session
base_line_hashes, new_check_hashes = \
self.__get_hashes_for_diff(session,
base_run_id,
new_run_id)
diff_hashes = list(base_line_hashes.intersection(new_check_hashes))
LOG.debug('diff hashes' + str(len(diff_hashes)))
LOG.debug(diff_hashes)
if len(diff_hashes) == 0:
return []
return self.__queryDiffResults(session,
diff_hashes,
new_run_id,
limit,
offset,
sort_types,
report_filters)
# -----------------------------------------------------------------------
@timeit
def getAPIVersion(self):
# Returns the thrift api version.
return constants.API_VERSION
# -----------------------------------------------------------------------
@timeit
def removeRunResults(self, run_ids):
session = self.__session
runs_to_delete = []
for run_id in run_ids:
LOG.debug('run ids to delete')
LOG.debug(run_id)
run_to_delete = session.query(Run).get(run_id)
if not run_to_delete.can_delete:
LOG.debug("Can't delete " + str(run_id))
continue
run_to_delete.can_delete = False
session.commit()
runs_to_delete.append(run_to_delete)
for run_to_delete in runs_to_delete:
session.delete(run_to_delete)
session.commit()
return True
# -----------------------------------------------------------------------
def getSuppressFile(self):
"""
Return the suppress file path or empty string if not set.
"""
suppress_file = self.__suppress_handler.suppress_file
if suppress_file:
return suppress_file
return ''
# -----------------------------------------------------------------------
def __queryDiffResultsCount(self,
session,
diff_hash_list,
run_id,
report_filters=None):
"""
Count results for a hash list with filters.
"""
filter_expression = construct_report_filter(report_filters)
try:
report_count = session.query(Report) \
.filter(Report.run_id == run_id) \
.outerjoin(File,
and_(Report.file_id == File.id,
File.run_id == run_id)) \
.outerjoin(BugPathEvent,
Report.end_bugevent == BugPathEvent.id) \
.outerjoin(SuppressBug,
and_(SuppressBug.hash == Report.bug_id,
SuppressBug.run_id == run_id)) \
.filter(Report.bug_id.in_(diff_hash_list)) \
.filter(filter_expression) \
.count()
if report_count is None:
report_count = 0
return report_count
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
# -----------------------------------------------------------------------
@timeit
def getDiffResultCount(self,
base_run_id,
new_run_id,
diff_type,
report_filters):
"""
Count the diff results.
"""
session = self.__session
base_line_hashes, new_check_hashes = \
self.__get_hashes_for_diff(session,
base_run_id,
new_run_id)
if diff_type == DiffType.NEW:
diff_hashes = list(new_check_hashes.difference(base_line_hashes))
if not diff_hashes:
return 0
run_id = new_run_id
elif diff_type == DiffType.RESOLVED:
diff_hashes = list(base_line_hashes.difference(new_check_hashes))
if not diff_hashes:
return 0
run_id = base_run_id
elif diff_type == DiffType.UNRESOLVED:
diff_hashes = list(base_line_hashes.intersection(new_check_hashes))
if not diff_hashes:
return 0
run_id = new_run_id
else:
msg = 'Unsupported diff type: ' + str(diff_type)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
return self.__queryDiffResultsCount(session,
diff_hashes,
run_id,
report_filters)
# -----------------------------------------------------------------------
def __queryDiffResultTypes(self,
session,
diff_hash_list,
run_id,
report_filters):
"""
Query and count results for a hash list with filters.
"""
try:
filter_expression = construct_report_filter(report_filters)
q = session.query(Report) \
.filter(Report.run_id == run_id) \
.outerjoin(File,
Report.file_id == File.id) \
.outerjoin(BugPathEvent,
Report.end_bugevent == BugPathEvent.id) \
.outerjoin(SuppressBug,
and_(SuppressBug.hash == Report.bug_id,
SuppressBug.run_id == run_id)) \
.order_by(Report.checker_id) \
.filter(Report.bug_id.in_(diff_hash_list)) \
.filter(filter_expression) \
.all()
count_results = defaultdict(int)
result_reports = defaultdict()
# Count and filter out the results for the same checker_id.
for r in q:
count_results[r.checker_id] += 1
result_reports[r.checker_id] = r
results = []
for checker_id, res in result_reports.items():
results.append(ReportDataTypeCount(res.checker_id,
res.severity,
count_results[
res.checker_id]))
# Result count ascending.
results = sorted(results, key=lambda rep: rep.count, reverse=True)
return results
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
msg = str(alchemy_ex)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
# -----------------------------------------------------------------------
@timeit
def getDiffResultTypes(self,
base_run_id,
new_run_id,
diff_type,
report_filters):
session = self.__session
base_line_hashes, new_check_hashes = \
self.__get_hashes_for_diff(session,
base_run_id,
new_run_id)
if diff_type == DiffType.NEW:
diff_hashes = list(new_check_hashes.difference(base_line_hashes))
if not diff_hashes:
return diff_hashes
run_id = new_run_id
elif diff_type == DiffType.RESOLVED:
diff_hashes = list(base_line_hashes.difference(new_check_hashes))
if not diff_hashes:
return diff_hashes
run_id = base_run_id
elif diff_type == DiffType.UNRESOLVED:
diff_hashes = list(base_line_hashes.intersection(new_check_hashes))
if not diff_hashes:
return diff_hashes
run_id = new_run_id
else:
msg = 'Unsupported diff type: ' + str(diff_type)
LOG.error(msg)
raise shared.ttypes.RequestFailed(shared.ttypes.ErrorCode.DATABASE,
msg)
return self.__queryDiffResultTypes(session,
diff_hashes,
run_id,
report_filters)
| 1 | 6,511 | maybe naming `version` to `package_version` or something similar can be more descriptive | Ericsson-codechecker | c |
@@ -235,7 +235,7 @@ class MessageTypeToType extends ParquetTypeVisitor<Type> {
return current;
}
- private int getId(org.apache.parquet.schema.Type type) {
+ protected int getId(org.apache.parquet.schema.Type type) {
org.apache.parquet.schema.Type.ID id = type.getId();
if (id != null) {
return id.intValue(); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.parquet;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.types.Types.TimestampType;
import org.apache.parquet.schema.GroupType;
import org.apache.parquet.schema.LogicalTypeAnnotation;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Type.Repetition;
import static org.apache.iceberg.types.Types.NestedField.optional;
import static org.apache.iceberg.types.Types.NestedField.required;
class MessageTypeToType extends ParquetTypeVisitor<Type> {
private static final Joiner DOT = Joiner.on(".");
private final Map<String, Integer> aliasToId = Maps.newHashMap();
private final GroupType root;
private int nextId = 1;
MessageTypeToType(GroupType root) {
this.root = root;
this.nextId = 1_000; // use ids that won't match other than for root
}
public Map<String, Integer> getAliases() {
return aliasToId;
}
@Override
public Type message(MessageType message, List<Type> fields) {
return struct(message, fields);
}
@Override
public Type struct(GroupType struct, List<Type> fieldTypes) {
if (struct == root) {
nextId = 1; // use the reserved IDs for the root struct
}
List<org.apache.parquet.schema.Type> parquetFields = struct.getFields();
List<Types.NestedField> fields = Lists.newArrayListWithExpectedSize(fieldTypes.size());
for (int i = 0; i < parquetFields.size(); i += 1) {
org.apache.parquet.schema.Type field = parquetFields.get(i);
Preconditions.checkArgument(
!field.isRepetition(Repetition.REPEATED),
"Fields cannot have repetition REPEATED: %s", field);
int fieldId = getId(field);
addAlias(field.getName(), fieldId);
if (parquetFields.get(i).isRepetition(Repetition.OPTIONAL)) {
fields.add(optional(fieldId, field.getName(), fieldTypes.get(i)));
} else {
fields.add(required(fieldId, field.getName(), fieldTypes.get(i)));
}
}
return Types.StructType.of(fields);
}
@Override
public Type list(GroupType array, Type elementType) {
GroupType repeated = array.getType(0).asGroupType();
org.apache.parquet.schema.Type element = repeated.getType(0);
Preconditions.checkArgument(
!element.isRepetition(Repetition.REPEATED),
"Elements cannot have repetition REPEATED: %s", element);
int elementFieldId = getId(element);
addAlias(element.getName(), elementFieldId);
if (element.isRepetition(Repetition.OPTIONAL)) {
return Types.ListType.ofOptional(elementFieldId, elementType);
} else {
return Types.ListType.ofRequired(elementFieldId, elementType);
}
}
@Override
public Type map(GroupType map, Type keyType, Type valueType) {
GroupType keyValue = map.getType(0).asGroupType();
org.apache.parquet.schema.Type key = keyValue.getType(0);
org.apache.parquet.schema.Type value = keyValue.getType(1);
Preconditions.checkArgument(
!value.isRepetition(Repetition.REPEATED),
"Values cannot have repetition REPEATED: %s", value);
int keyFieldId = getId(key);
int valueFieldId = getId(value);
addAlias(key.getName(), keyFieldId);
addAlias(value.getName(), valueFieldId);
if (value.isRepetition(Repetition.OPTIONAL)) {
return Types.MapType.ofOptional(keyFieldId, valueFieldId, keyType, valueType);
} else {
return Types.MapType.ofRequired(keyFieldId, valueFieldId, keyType, valueType);
}
}
@Override
public Type primitive(PrimitiveType primitive) {
// first, use the logical type annotation, if present
LogicalTypeAnnotation logicalType = primitive.getLogicalTypeAnnotation();
if (logicalType != null) {
Optional<Type> converted = logicalType.accept(ParquetLogicalTypeVisitor.get());
if (converted.isPresent()) {
return converted.get();
}
}
// last, use the primitive type
switch (primitive.getPrimitiveTypeName()) {
case BOOLEAN:
return Types.BooleanType.get();
case INT32:
return Types.IntegerType.get();
case INT64:
return Types.LongType.get();
case FLOAT:
return Types.FloatType.get();
case DOUBLE:
return Types.DoubleType.get();
case FIXED_LEN_BYTE_ARRAY:
return Types.FixedType.ofLength(primitive.getTypeLength());
case BINARY:
return Types.BinaryType.get();
}
throw new UnsupportedOperationException(
"Cannot convert unknown primitive type: " + primitive);
}
private static class ParquetLogicalTypeVisitor implements LogicalTypeAnnotation.LogicalTypeAnnotationVisitor<Type> {
private static final ParquetLogicalTypeVisitor INSTANCE = new ParquetLogicalTypeVisitor();
private static ParquetLogicalTypeVisitor get() {
return INSTANCE;
}
@Override
public Optional<Type> visit(LogicalTypeAnnotation.StringLogicalTypeAnnotation stringType) {
return Optional.of(Types.StringType.get());
}
@Override
public Optional<Type> visit(LogicalTypeAnnotation.EnumLogicalTypeAnnotation enumType) {
return Optional.of(Types.StringType.get());
}
@Override
public Optional<Type> visit(LogicalTypeAnnotation.DecimalLogicalTypeAnnotation decimalType) {
return Optional.of(Types.DecimalType.of(decimalType.getPrecision(), decimalType.getScale()));
}
@Override
public Optional<Type> visit(LogicalTypeAnnotation.DateLogicalTypeAnnotation dateType) {
return Optional.of(Types.DateType.get());
}
@Override
public Optional<Type> visit(LogicalTypeAnnotation.TimeLogicalTypeAnnotation timeType) {
return Optional.of(Types.TimeType.get());
}
@Override
public Optional<Type> visit(LogicalTypeAnnotation.TimestampLogicalTypeAnnotation timestampType) {
return Optional.of(timestampType.isAdjustedToUTC() ? TimestampType.withZone() : TimestampType.withoutZone());
}
@Override
public Optional<Type> visit(LogicalTypeAnnotation.IntLogicalTypeAnnotation intType) {
Preconditions.checkArgument(intType.isSigned() || intType.getBitWidth() < 64,
"Cannot use uint64: not a supported Java type");
if (intType.getBitWidth() < 32) {
return Optional.of(Types.IntegerType.get());
} else if (intType.getBitWidth() == 32 && intType.isSigned()) {
return Optional.of(Types.IntegerType.get());
} else {
return Optional.of(Types.LongType.get());
}
}
@Override
public Optional<Type> visit(LogicalTypeAnnotation.JsonLogicalTypeAnnotation jsonType) {
return Optional.of(Types.StringType.get());
}
@Override
public Optional<Type> visit(LogicalTypeAnnotation.BsonLogicalTypeAnnotation bsonType) {
return Optional.of(Types.BinaryType.get());
}
}
private void addAlias(String name, int fieldId) {
aliasToId.put(DOT.join(path(name)), fieldId);
}
protected int nextId() {
int current = nextId;
nextId += 1;
return current;
}
private int getId(org.apache.parquet.schema.Type type) {
org.apache.parquet.schema.Type.ID id = type.getId();
if (id != null) {
return id.intValue();
} else {
return nextId();
}
}
}
| 1 | 18,388 | Why was this changed? | apache-iceberg | java |
@@ -1,3 +1,4 @@
+//go:build !arm64
// +build !arm64
package nodeps | 1 | // +build !arm64
package nodeps
// MariaDBDefaultVersion is the default MariaDB version
const MariaDBDefaultVersion = MariaDB103
// ValidMariaDBVersions is the versions of MariaDB that are valid
var ValidMariaDBVersions = map[string]bool{
MariaDB55: true,
MariaDB100: true,
MariaDB101: true,
MariaDB102: true,
MariaDB103: true,
MariaDB104: true,
MariaDB105: true,
MariaDB106: true,
}
// MariaDB Versions
const (
MariaDB55 = "5.5"
MariaDB100 = "10.0"
MariaDB101 = "10.1"
MariaDB102 = "10.2"
MariaDB103 = "10.3"
MariaDB104 = "10.4"
MariaDB105 = "10.5"
MariaDB106 = "10.6"
)
| 1 | 15,789 | Should not have snuck in here right? This is a golang 1.17 feature, wii definitely want to update these | drud-ddev | php |
@@ -1,10 +1,10 @@
module OrgAdmin
class PlansController < ApplicationController
- after_action :verify_authorized
-
+ # GET org_admin/plans
def index
- authorize Plan
-
+ # Test auth directly and throw Pundit error sincePundit is unaware of namespacing
+ raise Pundit::NotAuthorizedError unless current_user.present? && current_user.can_org_admin?
+
vals = Role.access_values_for(:reviewer)
@feedback_plans = Plan.joins(:roles).where('roles.user_id = ? and roles.access IN (?)', current_user.id, vals)
@plans = current_user.org.plans | 1 | module OrgAdmin
class PlansController < ApplicationController
after_action :verify_authorized
def index
authorize Plan
vals = Role.access_values_for(:reviewer)
@feedback_plans = Plan.joins(:roles).where('roles.user_id = ? and roles.access IN (?)', current_user.id, vals)
@plans = current_user.org.plans
end
# GET org_admin/plans/:id/feedback_complete
def feedback_complete
plan = Plan.find(params[:id])
authorize plan
if plan.complete_feedback(current_user)
redirect_to org_admin_plans_path, notice: _('%{plan_owner} has been notified that you have finished providing feedback') % { plan_owner: plan.owner.name(false) }
else
redirect_to org_admin_plans_path, alert: _('Unable to notify user that you have finished providing feedback.')
end
end
end
end | 1 | 17,267 | Pundit is unaware of namespacing, however you can still create the class under policies/org_admin/plans_policy.rb. That means, the condition for the unless can be reused in other places too. You would call the policy as OrgAdmin::PlansPolicy.new(current_user).index? after that unless | DMPRoadmap-roadmap | rb |
@@ -203,6 +203,13 @@ public class PMD {
return 0;
}
+ // Log warning only once, if not explicitly disabled
+ if (!configuration.isIgnoreIncrementalAnalysis() && LOG.isLoggable(Level.WARNING)) {
+ final String version = PMDVersion.isUnknown() || PMDVersion.isSnapshot() ? "latest" : "pmd-" + PMDVersion.VERSION;
+ LOG.warning("This analysis could be faster, please consider using Incremental Analysis: "
+ + "https://pmd.github.io/" + version + "/pmd_userdocs_getting_started.html#incremental-analysis");
+ }
+
Set<Language> languages = getApplicableLanguages(configuration, ruleSets);
List<DataSource> files = getApplicableFiles(configuration, languages);
| 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd;
import java.io.File;
import java.io.IOException;
import java.net.URISyntaxException;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Handler;
import java.util.logging.Level;
import java.util.logging.Logger;
import net.sourceforge.pmd.benchmark.Benchmark;
import net.sourceforge.pmd.benchmark.Benchmarker;
import net.sourceforge.pmd.benchmark.TextReport;
import net.sourceforge.pmd.cli.PMDCommandLineInterface;
import net.sourceforge.pmd.cli.PMDParameters;
import net.sourceforge.pmd.lang.Language;
import net.sourceforge.pmd.lang.LanguageFilenameFilter;
import net.sourceforge.pmd.lang.LanguageVersion;
import net.sourceforge.pmd.lang.LanguageVersionDiscoverer;
import net.sourceforge.pmd.lang.LanguageVersionHandler;
import net.sourceforge.pmd.lang.Parser;
import net.sourceforge.pmd.lang.ParserOptions;
import net.sourceforge.pmd.processor.MonoThreadProcessor;
import net.sourceforge.pmd.processor.MultiThreadProcessor;
import net.sourceforge.pmd.renderers.Renderer;
import net.sourceforge.pmd.stat.Metric;
import net.sourceforge.pmd.util.ClasspathClassLoader;
import net.sourceforge.pmd.util.FileUtil;
import net.sourceforge.pmd.util.IOUtil;
import net.sourceforge.pmd.util.ResourceLoader;
import net.sourceforge.pmd.util.database.DBMSMetadata;
import net.sourceforge.pmd.util.database.DBURI;
import net.sourceforge.pmd.util.database.SourceObject;
import net.sourceforge.pmd.util.datasource.DataSource;
import net.sourceforge.pmd.util.datasource.ReaderDataSource;
import net.sourceforge.pmd.util.log.ConsoleLogHandler;
import net.sourceforge.pmd.util.log.ScopedLogHandlersManager;
/**
* This is the main class for interacting with PMD. The primary flow of all Rule
* process is controlled via interactions with this class. A command line
* interface is supported, as well as a programmatic API for integrating PMD
* with other software such as IDEs and Ant.
*/
public class PMD {
private static final Logger LOG = Logger.getLogger(PMD.class.getName());
/**
* The line delimiter used by PMD in outputs. Usually the platform specific
* line separator.
*/
public static final String EOL = System.getProperty("line.separator", "\n");
/** The default suppress marker string. */
public static final String SUPPRESS_MARKER = "NOPMD";
/**
* Contains the configuration with which this PMD instance has been created.
*/
protected final PMDConfiguration configuration;
private final SourceCodeProcessor rulesetsFileProcessor;
/**
* Constant that contains always the current version of PMD.
* @deprecated Use {@link PMDVersion#VERSION} instead.
*/
@Deprecated // to be removed with PMD 7.0.0.
public static final String VERSION = PMDVersion.VERSION;
/**
* Create a PMD instance using a default Configuration. Changes to the
* configuration may be required.
*/
public PMD() {
this(new PMDConfiguration());
}
/**
* Create a PMD instance using the specified Configuration.
*
* @param configuration
* The runtime Configuration of PMD to use.
*/
public PMD(PMDConfiguration configuration) {
this.configuration = configuration;
this.rulesetsFileProcessor = new SourceCodeProcessor(configuration);
}
/**
* Parses the given string as a database uri and returns a list of
* datasources.
*
* @param uriString
* the URI to parse
* @return list of data sources
* @throws PMDException
* if the URI couldn't be parsed
* @see DBURI
*/
public static List<DataSource> getURIDataSources(String uriString) throws PMDException {
List<DataSource> dataSources = new ArrayList<>();
try {
DBURI dbUri = new DBURI(uriString);
DBMSMetadata dbmsMetadata = new DBMSMetadata(dbUri);
LOG.log(Level.FINE, "DBMSMetadata retrieved");
List<SourceObject> sourceObjectList = dbmsMetadata.getSourceObjectList();
LOG.log(Level.FINE, "Located {0} database source objects", sourceObjectList.size());
for (SourceObject sourceObject : sourceObjectList) {
String falseFilePath = sourceObject.getPseudoFileName();
LOG.log(Level.FINEST, "Adding database source object {0}", falseFilePath);
try {
dataSources.add(new ReaderDataSource(dbmsMetadata.getSourceCode(sourceObject), falseFilePath));
} catch (SQLException ex) {
if (LOG.isLoggable(Level.WARNING)) {
LOG.log(Level.WARNING, "Cannot get SourceCode for " + falseFilePath + " - skipping ...", ex);
}
}
}
} catch (URISyntaxException e) {
throw new PMDException("Cannot get DataSources from DBURI - \"" + uriString + "\"", e);
} catch (SQLException e) {
throw new PMDException(
"Cannot get DataSources from DBURI, couldn't access the database - \"" + uriString + "\"", e);
} catch (ClassNotFoundException e) {
throw new PMDException(
"Cannot get DataSources from DBURI, probably missing database jdbc driver - \"" + uriString + "\"",
e);
} catch (Exception e) {
throw new PMDException("Encountered unexpected problem with URI \"" + uriString + "\"", e);
}
return dataSources;
}
/**
* Helper method to get a configured parser for the requested language. The
* parser is configured based on the given {@link PMDConfiguration}.
*
* @param languageVersion
* the requested language
* @param configuration
* the given configuration
* @return the pre-configured parser
*/
public static Parser parserFor(LanguageVersion languageVersion, PMDConfiguration configuration) {
// TODO Handle Rules having different parser options.
LanguageVersionHandler languageVersionHandler = languageVersion.getLanguageVersionHandler();
ParserOptions options = languageVersionHandler.getDefaultParserOptions();
if (configuration != null) {
options.setSuppressMarker(configuration.getSuppressMarker());
}
return languageVersionHandler.getParser(options);
}
/**
* Get the runtime configuration. The configuration can be modified to
* affect how PMD behaves.
*
* @return The configuration.
* @see PMDConfiguration
*/
public PMDConfiguration getConfiguration() {
return configuration;
}
/**
* Gets the source code processor.
*
* @return SourceCodeProcessor
*/
public SourceCodeProcessor getSourceCodeProcessor() {
return rulesetsFileProcessor;
}
/**
* This method is the main entry point for command line usage.
*
* @param configuration
* the configure to use
* @return number of violations found.
*/
public static int doPMD(PMDConfiguration configuration) {
// Load the RuleSets
RuleSetFactory ruleSetFactory = RulesetsFactoryUtils.getRulesetFactory(configuration, new ResourceLoader());
RuleSets ruleSets = RulesetsFactoryUtils.getRuleSetsWithBenchmark(configuration.getRuleSets(), ruleSetFactory);
if (ruleSets == null) {
return 0;
}
Set<Language> languages = getApplicableLanguages(configuration, ruleSets);
List<DataSource> files = getApplicableFiles(configuration, languages);
long reportStart = System.nanoTime();
try {
Renderer renderer = configuration.createRenderer();
List<Renderer> renderers = Collections.singletonList(renderer);
renderer.setWriter(IOUtil.createWriter(configuration.getReportFile()));
renderer.start();
Benchmarker.mark(Benchmark.Reporting, System.nanoTime() - reportStart, 0);
RuleContext ctx = new RuleContext();
final AtomicInteger violations = new AtomicInteger(0);
ctx.getReport().addListener(new ThreadSafeReportListener() {
@Override
public void ruleViolationAdded(RuleViolation ruleViolation) {
violations.incrementAndGet();
}
@Override
public void metricAdded(Metric metric) {
}
});
processFiles(configuration, ruleSetFactory, files, ctx, renderers);
reportStart = System.nanoTime();
renderer.end();
renderer.flush();
return violations.get();
} catch (Exception e) {
String message = e.getMessage();
if (message != null) {
LOG.severe(message);
} else {
LOG.log(Level.SEVERE, "Exception during processing", e);
}
LOG.log(Level.FINE, "Exception during processing", e);
LOG.info(PMDCommandLineInterface.buildUsageText());
return 0;
} finally {
Benchmarker.mark(Benchmark.Reporting, System.nanoTime() - reportStart, 0);
/*
* Make sure it's our own classloader before attempting to close it....
* Maven + Jacoco provide us with a cloaseable classloader that if closed
* will throw a ClassNotFoundException.
*/
if (configuration.getClassLoader() instanceof ClasspathClassLoader) {
IOUtil.tryCloseClassLoader(configuration.getClassLoader());
}
}
}
/**
* Creates a new rule context, initialized with a new, empty report.
*
* @param sourceCodeFilename
* the source code filename
* @param sourceCodeFile
* the source code file
* @return the rule context
*/
public static RuleContext newRuleContext(String sourceCodeFilename, File sourceCodeFile) {
RuleContext context = new RuleContext();
context.setSourceCodeFile(sourceCodeFile);
context.setSourceCodeFilename(sourceCodeFilename);
context.setReport(new Report());
return context;
}
/**
* Run PMD on a list of files using multiple threads - if more than one is
* available
*
* @param configuration
* Configuration
* @param ruleSetFactory
* RuleSetFactory
* @param files
* List of {@link DataSource}s
* @param ctx
* RuleContext
* @param renderers
* List of {@link Renderer}s
*/
public static void processFiles(final PMDConfiguration configuration, final RuleSetFactory ruleSetFactory,
final List<DataSource> files, final RuleContext ctx, final List<Renderer> renderers) {
sortFiles(configuration, files);
// Make sure the cache is listening for analysis results
ctx.getReport().addListener(configuration.getAnalysisCache());
final RuleSetFactory silentFactoy = new RuleSetFactory(ruleSetFactory, false);
/*
* Check if multithreaded support is available. ExecutorService can also
* be disabled if threadCount is not positive, e.g. using the
* "-threads 0" command line option.
*/
if (configuration.getThreads() > 0) {
new MultiThreadProcessor(configuration).processFiles(silentFactoy, files, ctx, renderers);
} else {
new MonoThreadProcessor(configuration).processFiles(silentFactoy, files, ctx, renderers);
}
// Persist the analysis cache
configuration.getAnalysisCache().persist();
}
private static void sortFiles(final PMDConfiguration configuration, final List<DataSource> files) {
if (configuration.isStressTest()) {
// randomize processing order
Collections.shuffle(files);
} else {
final boolean useShortNames = configuration.isReportShortNames();
final String inputPaths = configuration.getInputPaths();
Collections.sort(files, new Comparator<DataSource>() {
@Override
public int compare(DataSource left, DataSource right) {
String leftString = left.getNiceFileName(useShortNames, inputPaths);
String rightString = right.getNiceFileName(useShortNames, inputPaths);
return leftString.compareTo(rightString);
}
});
}
}
/**
* Determines all the files, that should be analyzed by PMD.
*
* @param configuration
* contains either the file path or the DB URI, from where to
* load the files
* @param languages
* used to filter by file extension
* @return List of {@link DataSource} of files
*/
public static List<DataSource> getApplicableFiles(PMDConfiguration configuration, Set<Language> languages) {
long startFiles = System.nanoTime();
List<DataSource> files = internalGetApplicableFiles(configuration, languages);
long endFiles = System.nanoTime();
Benchmarker.mark(Benchmark.CollectFiles, endFiles - startFiles, 0);
return files;
}
private static List<DataSource> internalGetApplicableFiles(PMDConfiguration configuration,
Set<Language> languages) {
LanguageFilenameFilter fileSelector = new LanguageFilenameFilter(languages);
List<DataSource> files = new ArrayList<>();
if (null != configuration.getInputPaths()) {
files.addAll(FileUtil.collectFiles(configuration.getInputPaths(), fileSelector));
}
if (null != configuration.getInputUri()) {
String uriString = configuration.getInputUri();
try {
List<DataSource> dataSources = getURIDataSources(uriString);
files.addAll(dataSources);
} catch (PMDException ex) {
LOG.log(Level.SEVERE, "Problem with Input URI", ex);
throw new RuntimeException("Problem with DBURI: " + uriString, ex);
}
}
if (null != configuration.getInputFilePath()) {
String inputFilePath = configuration.getInputFilePath();
File file = new File(inputFilePath);
try {
if (!file.exists()) {
LOG.log(Level.SEVERE, "Problem with Input File Path", inputFilePath);
throw new RuntimeException("Problem with Input File Path: " + inputFilePath);
} else {
String filePaths = FileUtil.readFilelist(new File(inputFilePath));
files.addAll(FileUtil.collectFiles(filePaths, fileSelector));
}
} catch (IOException ex) {
LOG.log(Level.SEVERE, "Problem with Input File", ex);
throw new RuntimeException("Problem with Input File Path: " + inputFilePath, ex);
}
}
return files;
}
private static Set<Language> getApplicableLanguages(PMDConfiguration configuration, RuleSets ruleSets) {
Set<Language> languages = new HashSet<>();
LanguageVersionDiscoverer discoverer = configuration.getLanguageVersionDiscoverer();
for (Rule rule : ruleSets.getAllRules()) {
Language language = rule.getLanguage();
if (languages.contains(language)) {
continue;
}
LanguageVersion version = discoverer.getDefaultLanguageVersion(language);
if (RuleSet.applies(rule, version)) {
languages.add(language);
if (LOG.isLoggable(Level.FINE)) {
LOG.fine("Using " + language.getShortName() + " version: " + version.getShortName());
}
}
}
return languages;
}
/**
* Entry to invoke PMD as command line tool
*
* @param args
* command line arguments
*/
public static void main(String[] args) {
PMDCommandLineInterface.run(args);
}
/**
* Parses the command line arguments and executes PMD.
*
* @param args
* command line arguments
* @return the exit code, where <code>0</code> means successful execution,
* <code>1</code> means error, <code>4</code> means there have been
* violations found.
*/
public static int run(String[] args) {
int status = 0;
long start = System.nanoTime();
final PMDParameters params = PMDCommandLineInterface.extractParameters(new PMDParameters(), args, "pmd");
final PMDConfiguration configuration = PMDParameters.transformParametersIntoConfiguration(params);
final Level logLevel = params.isDebug() ? Level.FINER : Level.INFO;
final Handler logHandler = new ConsoleLogHandler();
final ScopedLogHandlersManager logHandlerManager = new ScopedLogHandlersManager(logLevel, logHandler);
final Level oldLogLevel = LOG.getLevel();
// Need to do this, since the static logger has already been initialized
// at this point
LOG.setLevel(logLevel);
try {
int violations = PMD.doPMD(configuration);
if (violations > 0 && configuration.isFailOnViolation()) {
status = PMDCommandLineInterface.VIOLATIONS_FOUND;
} else {
status = 0;
}
} catch (Exception e) {
System.out.println(PMDCommandLineInterface.buildUsageText());
System.out.println();
System.err.println(e.getMessage());
status = PMDCommandLineInterface.ERROR_STATUS;
} finally {
logHandlerManager.close();
LOG.setLevel(oldLogLevel);
if (params.isBenchmark()) {
long end = System.nanoTime();
Benchmarker.mark(Benchmark.TotalPMD, end - start, 0);
// TODO get specified report format from config
TextReport report = new TextReport();
report.generate(Benchmarker.values(), System.err);
}
}
return status;
}
}
| 1 | 13,698 | this warning shouldn't be produced if we configured a cache either | pmd-pmd | java |
@@ -16,6 +16,7 @@ describe('examples(change-stream):', function() {
client = await this.configuration.newClient().connect();
db = client.db(this.configuration.db);
+ await db.createCollection('inventory');
await db.collection('inventory').deleteMany({});
});
| 1 | /* eslint no-unused-vars: 0 */
'use strict';
const setupDatabase = require('../functional/shared').setupDatabase;
const expect = require('chai').expect;
describe('examples(change-stream):', function() {
let client;
let db;
before(async function() {
await setupDatabase(this.configuration);
});
beforeEach(async function() {
client = await this.configuration.newClient().connect();
db = client.db(this.configuration.db);
await db.collection('inventory').deleteMany({});
});
afterEach(async function() {
await client.close();
client = undefined;
db = undefined;
});
class Looper {
constructor(lambda, interval) {
this._run = false;
this._lambda = lambda;
this._interval = interval || 50;
}
async _go() {
this._run = true;
while (this._run) {
await new Promise(r => setTimeout(r, this._interval));
await this._lambda();
}
}
run() {
this._p = this._go().catch(() => {});
}
stop() {
this._run = false;
return this._p;
}
}
it('Open A Change Stream', {
metadata: { requires: { topology: ['replicaset'], mongodb: '>=3.6.0' } },
test: async function() {
const looper = new Looper(() => db.collection('inventory').insertOne({ a: 1 }));
looper.run();
// Start Changestream Example 1
const collection = db.collection('inventory');
const changeStream = collection.watch();
changeStream.on('change', next => {
// process next document
});
// End Changestream Example 1
// Start Changestream Example 1 Alternative
const changeStreamIterator = collection.watch();
const next = await changeStreamIterator.next();
// End Changestream Example 1 Alternative
await changeStream.close();
await changeStreamIterator.close();
await looper.stop();
expect(next)
.to.have.property('operationType')
.that.equals('insert');
}
});
it('Lookup Full Document for Update Operations', {
metadata: { requires: { topology: ['replicaset'], mongodb: '>=3.6.0' } },
test: async function() {
await db.collection('inventory').insertOne({ a: 1, b: 2 });
const looper = new Looper(() =>
db.collection('inventory').updateOne({ a: 1 }, { $set: { a: 2 } })
);
looper.run();
// Start Changestream Example 2
const collection = db.collection('inventory');
const changeStream = collection.watch({ fullDocument: 'updateLookup' });
changeStream.on('change', next => {
// process next document
});
// End Changestream Example 2
// Start Changestream Example 2 Alternative
const changeStreamIterator = collection.watch({ fullDocument: 'updateLookup' });
const next = await changeStreamIterator.next();
// End Changestream Example 2 Alternative
await changeStream.close();
await changeStreamIterator.close();
await looper.stop();
expect(next)
.to.have.property('operationType')
.that.equals('update');
expect(next)
.to.have.property('fullDocument')
.that.has.all.keys(['_id', 'a', 'b']);
}
});
it('Resume a Change Stream', {
metadata: { requires: { topology: ['replicaset'], mongodb: '>=3.6.0' } },
test: async function() {
const looper = new Looper(async () => {
await db.collection('inventory').insertOne({ a: 1 });
await db.collection('inventory').insertOne({ b: 2 });
});
looper.run();
// Start Changestream Example 3
const collection = db.collection('inventory');
const changeStream = collection.watch();
let newChangeStream;
changeStream.on('change', next => {
const resumeToken = changeStream.resumeToken;
changeStream.close();
newChangeStream = collection.watch({ resumeAfter: resumeToken });
newChangeStream.on('change', next => {
// process next document
});
});
// End Changestream Example 3
// Start Changestream Example 3 Alternative
const changeStreamIterator = collection.watch();
const change1 = await changeStreamIterator.next();
const resumeToken = changeStreamIterator.resumeToken;
changeStreamIterator.close();
const newChangeStreamIterator = collection.watch({ resumeAfter: resumeToken });
const change2 = await newChangeStreamIterator.next();
// End Changestream Example 3 Alternative
await newChangeStreamIterator.close();
await newChangeStream.close();
await looper.stop();
expect(change1).to.have.nested.property('fullDocument.a', 1);
expect(change2).to.have.nested.property('fullDocument.b', 2);
}
});
it('Modify Change Stream Output', {
metadata: { requires: { topology: ['replicaset'], mongodb: '>=3.6.0' } },
test: async function() {
const looper = new Looper(async () => {
await db.collection('inventory').insertOne({ username: 'alice' });
});
looper.run();
// Start Changestream Example 4
const pipeline = [
{ $match: { 'fullDocument.username': 'alice' } },
{ $addFields: { newField: 'this is an added field!' } }
];
const collection = db.collection('inventory');
const changeStream = collection.watch(pipeline);
changeStream.on('change', next => {
// process next document
});
// End Changestream Example 4
// Start Changestream Example 4 Alternative
const changeStreamIterator = collection.watch(pipeline);
const next = await changeStreamIterator.next();
// End Changestream Example 4 Alternative
await changeStream.close();
await changeStreamIterator.close();
await looper.stop();
expect(next).to.have.nested.property('fullDocument.username', 'alice');
expect(next).to.have.property('newField', 'this is an added field!');
}
});
});
| 1 | 16,571 | what if the collection is already there? | mongodb-node-mongodb-native | js |
@@ -24,8 +24,7 @@ import java.io.OutputStream;
import java.util.Base64;
/**
- * Defines the output type for a screenshot. See org.openqa.selenium.Screenshot for usage and
- * examples.
+ * Defines the output type for a screenshot.
*
* @see TakesScreenshot
* @param <T> Type for the screenshot output. | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Base64;
/**
* Defines the output type for a screenshot. See org.openqa.selenium.Screenshot for usage and
* examples.
*
* @see TakesScreenshot
* @param <T> Type for the screenshot output.
*/
public interface OutputType<T> {
/**
* Obtain the screenshot as base64 data.
*/
OutputType<String> BASE64 = new OutputType<String>() {
public String convertFromBase64Png(String base64Png) {
return base64Png;
}
public String convertFromPngBytes(byte[] png) {
return Base64.getEncoder().encodeToString(png);
}
public String toString() {
return "OutputType.BASE64";
}
};
/**
* Obtain the screenshot as raw bytes.
*/
OutputType<byte[]> BYTES = new OutputType<byte[]>() {
public byte[] convertFromBase64Png(String base64Png) {
return Base64.getDecoder().decode(base64Png);
}
public byte[] convertFromPngBytes(byte[] png) {
return png;
}
public String toString() {
return "OutputType.BYTES";
}
};
/**
* Obtain the screenshot into a temporary file that will be deleted once the JVM exits. It is up
* to users to make a copy of this file.
*/
OutputType<File> FILE = new OutputType<File>() {
public File convertFromBase64Png(String base64Png) {
return save(BYTES.convertFromBase64Png(base64Png));
}
public File convertFromPngBytes(byte[] data) {
return save(data);
}
private File save(byte[] data) {
OutputStream stream = null;
try {
File tmpFile = File.createTempFile("screenshot", ".png");
tmpFile.deleteOnExit();
stream = new FileOutputStream(tmpFile);
stream.write(data);
return tmpFile;
} catch (IOException e) {
throw new WebDriverException(e);
} finally {
if (stream != null) {
try {
stream.close();
} catch (IOException e) {
// Nothing sane to do
}
}
}
}
public String toString() {
return "OutputType.FILE";
}
};
/**
* Convert the given base64 png to a requested format.
*
* @param base64Png base64 encoded png.
* @return png encoded into requested format.
*/
T convertFromBase64Png(String base64Png);
/**
* Convert the given png to a requested format.
*
* @param png an array of bytes forming a png file.
* @return png encoded into requested format.
*/
T convertFromPngBytes(byte[] png);
}
| 1 | 13,489 | instead of removing can you reference org.openqa.selenium.TakesScreenshot ? | SeleniumHQ-selenium | java |
@@ -644,4 +644,6 @@ public class FeedItemlistFragment extends Fragment implements AdapterView.OnItem
}
}
}
+
+
} | 1 | package de.danoeh.antennapod.fragment;
import android.content.Context;
import android.content.Intent;
import android.content.res.Configuration;
import android.graphics.LightingColorFilter;
import android.os.Bundle;
import android.os.Handler;
import android.os.Looper;
import android.util.Log;
import android.view.ContextMenu;
import android.view.LayoutInflater;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.widget.AdapterView;
import android.widget.ImageButton;
import android.widget.ImageView;
import android.widget.ProgressBar;
import android.widget.TextView;
import android.widget.Toast;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.appcompat.widget.AppCompatDrawableManager;
import androidx.appcompat.widget.Toolbar;
import androidx.fragment.app.Fragment;
import androidx.recyclerview.widget.RecyclerView;
import androidx.swiperefreshlayout.widget.SwipeRefreshLayout;
import com.bumptech.glide.Glide;
import com.bumptech.glide.request.RequestOptions;
import com.google.android.material.appbar.AppBarLayout;
import com.google.android.material.appbar.CollapsingToolbarLayout;
import com.google.android.material.snackbar.Snackbar;
import com.joanzapata.iconify.Iconify;
import com.joanzapata.iconify.widget.IconTextView;
import com.leinardi.android.speeddial.SpeedDialView;
import org.apache.commons.lang3.Validate;
import org.greenrobot.eventbus.EventBus;
import org.greenrobot.eventbus.Subscribe;
import org.greenrobot.eventbus.ThreadMode;
import java.util.List;
import java.util.Set;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.activity.MainActivity;
import de.danoeh.antennapod.adapter.EpisodeItemListAdapter;
import de.danoeh.antennapod.core.dialog.DownloadRequestErrorDialogCreator;
import de.danoeh.antennapod.core.event.DownloadEvent;
import de.danoeh.antennapod.core.event.DownloaderUpdate;
import de.danoeh.antennapod.core.event.FeedItemEvent;
import de.danoeh.antennapod.core.event.FeedListUpdateEvent;
import de.danoeh.antennapod.core.event.PlaybackPositionEvent;
import de.danoeh.antennapod.core.event.PlayerStatusEvent;
import de.danoeh.antennapod.core.event.UnreadItemsUpdateEvent;
import de.danoeh.antennapod.model.feed.Feed;
import de.danoeh.antennapod.core.feed.FeedEvent;
import de.danoeh.antennapod.model.feed.FeedItem;
import de.danoeh.antennapod.model.feed.FeedItemFilter;
import de.danoeh.antennapod.core.glide.ApGlideSettings;
import de.danoeh.antennapod.core.glide.FastBlurTransformation;
import de.danoeh.antennapod.core.service.download.DownloadService;
import de.danoeh.antennapod.core.storage.DBReader;
import de.danoeh.antennapod.core.storage.DBTasks;
import de.danoeh.antennapod.core.storage.DBWriter;
import de.danoeh.antennapod.core.storage.DownloadRequestException;
import de.danoeh.antennapod.core.storage.DownloadRequester;
import de.danoeh.antennapod.core.util.FeedItemPermutors;
import de.danoeh.antennapod.core.util.FeedItemUtil;
import de.danoeh.antennapod.core.util.gui.MoreContentListFooterUtil;
import de.danoeh.antennapod.dialog.FilterDialog;
import de.danoeh.antennapod.dialog.RemoveFeedDialog;
import de.danoeh.antennapod.dialog.RenameFeedDialog;
import de.danoeh.antennapod.fragment.actions.EpisodeMultiSelectActionHandler;
import de.danoeh.antennapod.menuhandler.FeedItemMenuHandler;
import de.danoeh.antennapod.menuhandler.FeedMenuHandler;
import de.danoeh.antennapod.menuhandler.MenuItemUtils;
import de.danoeh.antennapod.view.EpisodeItemListRecyclerView;
import de.danoeh.antennapod.view.ToolbarIconTintManager;
import de.danoeh.antennapod.view.viewholder.EpisodeItemViewHolder;
import io.reactivex.Observable;
import io.reactivex.android.schedulers.AndroidSchedulers;
import io.reactivex.disposables.Disposable;
import io.reactivex.schedulers.Schedulers;
/**
* Displays a list of FeedItems.
*/
public class FeedItemlistFragment extends Fragment implements AdapterView.OnItemClickListener,
Toolbar.OnMenuItemClickListener, EpisodeItemListAdapter.OnEndSelectModeListener {
private static final String TAG = "ItemlistFragment";
private static final String ARGUMENT_FEED_ID = "argument.de.danoeh.antennapod.feed_id";
private static final String KEY_UP_ARROW = "up_arrow";
private FeedItemListAdapter adapter;
private MoreContentListFooterUtil nextPageLoader;
private ProgressBar progressBar;
private EpisodeItemListRecyclerView recyclerView;
private TextView txtvTitle;
private IconTextView txtvFailure;
private ImageView imgvBackground;
private ImageView imgvCover;
private TextView txtvInformation;
private TextView txtvAuthor;
private TextView txtvUpdatesDisabled;
private ImageButton butShowInfo;
private ImageButton butShowSettings;
private View header;
private Toolbar toolbar;
private ToolbarIconTintManager iconTintManager;
private SpeedDialView speedDialView;
private boolean displayUpArrow;
private long feedID;
private Feed feed;
private boolean headerCreated = false;
private boolean isUpdatingFeed;
private Disposable disposable;
/**
* Creates new ItemlistFragment which shows the Feeditems of a specific
* feed. Sets 'showFeedtitle' to false
*
* @param feedId The id of the feed to show
* @return the newly created instance of an ItemlistFragment
*/
public static FeedItemlistFragment newInstance(long feedId) {
FeedItemlistFragment i = new FeedItemlistFragment();
Bundle b = new Bundle();
b.putLong(ARGUMENT_FEED_ID, feedId);
i.setArguments(b);
return i;
}
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setRetainInstance(true);
Bundle args = getArguments();
Validate.notNull(args);
feedID = args.getLong(ARGUMENT_FEED_ID);
}
@Nullable
@Override
public View onCreateView(@NonNull LayoutInflater inflater, @Nullable ViewGroup container,
@Nullable Bundle savedInstanceState) {
View root = inflater.inflate(R.layout.feed_item_list_fragment, container, false);
toolbar = root.findViewById(R.id.toolbar);
toolbar.inflateMenu(R.menu.feedlist);
toolbar.setOnMenuItemClickListener(this);
displayUpArrow = getParentFragmentManager().getBackStackEntryCount() != 0;
if (savedInstanceState != null) {
displayUpArrow = savedInstanceState.getBoolean(KEY_UP_ARROW);
}
((MainActivity) getActivity()).setupToolbarToggle(toolbar, displayUpArrow);
refreshToolbarState();
recyclerView = root.findViewById(R.id.recyclerView);
recyclerView.setRecycledViewPool(((MainActivity) getActivity()).getRecycledViewPool());
progressBar = root.findViewById(R.id.progLoading);
txtvTitle = root.findViewById(R.id.txtvTitle);
txtvAuthor = root.findViewById(R.id.txtvAuthor);
imgvBackground = root.findViewById(R.id.imgvBackground);
imgvCover = root.findViewById(R.id.imgvCover);
butShowInfo = root.findViewById(R.id.butShowInfo);
butShowSettings = root.findViewById(R.id.butShowSettings);
txtvInformation = root.findViewById(R.id.txtvInformation);
txtvFailure = root.findViewById(R.id.txtvFailure);
txtvUpdatesDisabled = root.findViewById(R.id.txtvUpdatesDisabled);
header = root.findViewById(R.id.headerContainer);
AppBarLayout appBar = root.findViewById(R.id.appBar);
CollapsingToolbarLayout collapsingToolbar = root.findViewById(R.id.collapsing_toolbar);
iconTintManager = new ToolbarIconTintManager(getContext(), toolbar, collapsingToolbar) {
@Override
protected void doTint(Context themedContext) {
toolbar.getMenu().findItem(R.id.sort_items)
.setIcon(AppCompatDrawableManager.get().getDrawable(themedContext, R.drawable.ic_sort));
toolbar.getMenu().findItem(R.id.filter_items)
.setIcon(AppCompatDrawableManager.get().getDrawable(themedContext, R.drawable.ic_filter));
toolbar.getMenu().findItem(R.id.refresh_item)
.setIcon(AppCompatDrawableManager.get().getDrawable(themedContext, R.drawable.ic_refresh));
toolbar.getMenu().findItem(R.id.action_search)
.setIcon(AppCompatDrawableManager.get().getDrawable(themedContext, R.drawable.ic_search));
}
};
iconTintManager.updateTint();
appBar.addOnOffsetChangedListener(iconTintManager);
nextPageLoader = new MoreContentListFooterUtil(root.findViewById(R.id.more_content_list_footer));
nextPageLoader.setClickListener(() -> {
if (feed != null) {
try {
DBTasks.loadNextPageOfFeed(getActivity(), feed, false);
} catch (DownloadRequestException e) {
e.printStackTrace();
DownloadRequestErrorDialogCreator.newRequestErrorDialog(getActivity(), e.getMessage());
}
}
});
recyclerView.addOnScrollListener(new RecyclerView.OnScrollListener() {
@Override
public void onScrolled(@NonNull RecyclerView view, int deltaX, int deltaY) {
super.onScrolled(view, deltaX, deltaY);
boolean hasMorePages = feed != null && feed.isPaged() && feed.getNextPageLink() != null;
nextPageLoader.getRoot().setVisibility(
(recyclerView.isScrolledToBottom() && hasMorePages) ? View.VISIBLE : View.GONE);
}
});
EventBus.getDefault().register(this);
SwipeRefreshLayout swipeRefreshLayout = root.findViewById(R.id.swipeRefresh);
swipeRefreshLayout.setOnRefreshListener(() -> {
try {
DBTasks.forceRefreshFeed(requireContext(), feed, true);
} catch (DownloadRequestException e) {
e.printStackTrace();
}
new Handler(Looper.getMainLooper()).postDelayed(() -> swipeRefreshLayout.setRefreshing(false),
getResources().getInteger(R.integer.swipe_to_refresh_duration_in_ms));
});
loadItems();
// Init action UI (via a FAB Speed Dial)
speedDialView = root.findViewById(R.id.fabSD);
speedDialView.inflate(R.menu.episodes_apply_action_speeddial);
speedDialView.setOnChangeListener(new SpeedDialView.OnChangeListener() {
@Override
public boolean onMainActionSelected() {
return false;
}
@Override
public void onToggleChanged(boolean open) {
if (open && adapter.getSelectedCount() == 0) {
((MainActivity) getActivity()).showSnackbarAbovePlayer(R.string.no_items_selected,
Snackbar.LENGTH_SHORT);
speedDialView.close();
}
}
});
speedDialView.setOnActionSelectedListener(actionItem -> {
new EpisodeMultiSelectActionHandler(((MainActivity) getActivity()), adapter.getSelectedItems())
.handleAction(actionItem.getId());
onEndSelectMode();
adapter.endSelectMode();
return true;
});
return root;
}
@Override
public void onDestroyView() {
super.onDestroyView();
EventBus.getDefault().unregister(this);
if (disposable != null) {
disposable.dispose();
}
adapter = null;
}
@Override
public void onSaveInstanceState(@NonNull Bundle outState) {
outState.putBoolean(KEY_UP_ARROW, displayUpArrow);
super.onSaveInstanceState(outState);
}
private final MenuItemUtils.UpdateRefreshMenuItemChecker updateRefreshMenuItemChecker = new MenuItemUtils.UpdateRefreshMenuItemChecker() {
@Override
public boolean isRefreshing() {
return feed != null && DownloadService.isRunning && DownloadRequester.getInstance().isDownloadingFile(feed);
}
};
private void refreshToolbarState() {
if (feed == null) {
return;
}
MenuItemUtils.setupSearchItem(toolbar.getMenu(), (MainActivity) getActivity(), feedID, feed.getTitle());
toolbar.getMenu().findItem(R.id.share_link_item).setVisible(feed.getLink() != null);
toolbar.getMenu().findItem(R.id.visit_website_item).setVisible(feed.getLink() != null);
isUpdatingFeed = MenuItemUtils.updateRefreshMenuItem(toolbar.getMenu(),
R.id.refresh_item, updateRefreshMenuItemChecker);
FeedMenuHandler.onPrepareOptionsMenu(toolbar.getMenu(), feed);
}
@Override
public void onConfigurationChanged(@NonNull Configuration newConfig) {
super.onConfigurationChanged(newConfig);
int horizontalSpacing = (int) getResources().getDimension(R.dimen.additional_horizontal_spacing);
header.setPadding(horizontalSpacing, header.getPaddingTop(), horizontalSpacing, header.getPaddingBottom());
}
@Override
public boolean onMenuItemClick(MenuItem item) {
if (item.getItemId() == R.id.action_search) {
item.getActionView().post(() -> iconTintManager.updateTint());
}
if (feed == null) {
((MainActivity) getActivity()).showSnackbarAbovePlayer(
R.string.please_wait_for_data, Toast.LENGTH_LONG);
return true;
}
boolean feedMenuHandled;
try {
feedMenuHandled = FeedMenuHandler.onOptionsItemClicked(getActivity(), item, feed);
} catch (DownloadRequestException e) {
e.printStackTrace();
DownloadRequestErrorDialogCreator.newRequestErrorDialog(getActivity(), e.getMessage());
return true;
}
if (feedMenuHandled) {
return true;
}
switch (item.getItemId()) {
case R.id.rename_item:
new RenameFeedDialog(getActivity(), feed).show();
return true;
case R.id.remove_item:
RemoveFeedDialog.show(getContext(), feed, () ->
((MainActivity) getActivity()).loadFragment(EpisodesFragment.TAG, null));
return true;
default:
return false;
}
}
@Override
public boolean onContextItemSelected(@NonNull MenuItem item) {
FeedItem selectedItem = adapter.getLongPressedItem();
if (selectedItem == null) {
Log.i(TAG, "Selected item at current position was null, ignoring selection");
return super.onContextItemSelected(item);
}
if (item.getItemId() == R.id.multi_select) {
if (feed.isLocalFeed()) {
speedDialView.removeActionItemById(R.id.download_batch);
speedDialView.removeActionItemById(R.id.delete_batch);
}
speedDialView.setVisibility(View.VISIBLE);
refreshToolbarState();
// Do not return: Let adapter handle its actions, too.
}
if (adapter.onContextItemSelected(item)) {
return true;
}
return FeedItemMenuHandler.onMenuItemClicked(this, item.getItemId(), selectedItem);
}
@Override
public void onItemClick(AdapterView<?> parent, View view, int position, long id) {
if (adapter == null) {
return;
}
MainActivity activity = (MainActivity) getActivity();
long[] ids = FeedItemUtil.getIds(feed.getItems());
activity.loadChildFragment(ItemPagerFragment.newInstance(ids, position));
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onEvent(FeedEvent event) {
Log.d(TAG, "onEvent() called with: " + "event = [" + event + "]");
if (event.feedId == feedID) {
loadItems();
}
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onEventMainThread(FeedItemEvent event) {
Log.d(TAG, "onEventMainThread() called with: " + "event = [" + event + "]");
if (feed == null || feed.getItems() == null) {
return;
} else if (adapter == null) {
loadItems();
return;
}
for (int i = 0, size = event.items.size(); i < size; i++) {
FeedItem item = event.items.get(i);
int pos = FeedItemUtil.indexOfItemWithId(feed.getItems(), item.getId());
if (pos >= 0) {
feed.getItems().remove(pos);
feed.getItems().add(pos, item);
adapter.notifyItemChangedCompat(pos);
}
}
}
@Subscribe(sticky = true, threadMode = ThreadMode.MAIN)
public void onEventMainThread(DownloadEvent event) {
Log.d(TAG, "onEventMainThread() called with: " + "event = [" + event + "]");
DownloaderUpdate update = event.update;
if (event.hasChangedFeedUpdateStatus(isUpdatingFeed)) {
updateSyncProgressBarVisibility();
}
if (adapter != null && update.mediaIds.length > 0 && feed != null) {
for (long mediaId : update.mediaIds) {
int pos = FeedItemUtil.indexOfItemWithMediaId(feed.getItems(), mediaId);
if (pos >= 0) {
adapter.notifyItemChangedCompat(pos);
}
}
}
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onEventMainThread(PlaybackPositionEvent event) {
if (adapter != null) {
for (int i = 0; i < adapter.getItemCount(); i++) {
EpisodeItemViewHolder holder = (EpisodeItemViewHolder) recyclerView.findViewHolderForAdapterPosition(i);
if (holder != null && holder.isCurrentlyPlayingItem()) {
holder.notifyPlaybackPositionUpdated(event);
break;
}
}
}
}
@Override
public void onEndSelectMode() {
speedDialView.close();
speedDialView.setVisibility(View.GONE);
}
private void updateUi() {
loadItems();
updateSyncProgressBarVisibility();
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onPlayerStatusChanged(PlayerStatusEvent event) {
updateUi();
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onUnreadItemsChanged(UnreadItemsUpdateEvent event) {
updateUi();
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onFeedListChanged(FeedListUpdateEvent event) {
if (feed != null && event.contains(feed)) {
updateUi();
}
}
private void updateSyncProgressBarVisibility() {
if (isUpdatingFeed != updateRefreshMenuItemChecker.isRefreshing()) {
refreshToolbarState();
}
if (!DownloadRequester.getInstance().isDownloadingFeeds()) {
nextPageLoader.getRoot().setVisibility(View.GONE);
}
nextPageLoader.setLoadingState(DownloadRequester.getInstance().isDownloadingFeeds());
}
private void displayList() {
if (getView() == null) {
Log.e(TAG, "Required root view is not yet created. Stop binding data to UI.");
return;
}
if (adapter == null) {
recyclerView.setAdapter(null);
adapter = new FeedItemListAdapter((MainActivity) getActivity());
adapter.setOnEndSelectModeListener(this);
recyclerView.setAdapter(adapter);
}
progressBar.setVisibility(View.GONE);
if (feed != null) {
adapter.updateItems(feed.getItems());
}
refreshToolbarState();
updateSyncProgressBarVisibility();
}
private void refreshHeaderView() {
setupHeaderView();
if (recyclerView == null || feed == null) {
Log.e(TAG, "Unable to refresh header view");
return;
}
loadFeedImage();
if (feed.hasLastUpdateFailed()) {
txtvFailure.setVisibility(View.VISIBLE);
} else {
txtvFailure.setVisibility(View.GONE);
}
if (!feed.getPreferences().getKeepUpdated()) {
txtvUpdatesDisabled.setText("{md-pause-circle-outline} " + this.getString(R.string.updates_disabled_label));
Iconify.addIcons(txtvUpdatesDisabled);
txtvUpdatesDisabled.setVisibility(View.VISIBLE);
} else {
txtvUpdatesDisabled.setVisibility(View.GONE);
}
txtvTitle.setText(feed.getTitle());
txtvAuthor.setText(feed.getAuthor());
if (feed.getItemFilter() != null) {
FeedItemFilter filter = feed.getItemFilter();
if (filter.getValues().length > 0) {
txtvInformation.setText("{md-info-outline} " + this.getString(R.string.filtered_label));
Iconify.addIcons(txtvInformation);
txtvInformation.setOnClickListener((l) -> {
FilterDialog filterDialog = new FilterDialog(requireContext(), feed.getItemFilter()) {
@Override
protected void updateFilter(Set<String> filterValues) {
feed.setItemFilter(filterValues.toArray(new String[0]));
DBWriter.setFeedItemsFilter(feed.getId(), filterValues);
}
};
filterDialog.openDialog();
});
txtvInformation.setVisibility(View.VISIBLE);
} else {
txtvInformation.setVisibility(View.GONE);
}
} else {
txtvInformation.setVisibility(View.GONE);
}
}
private void setupHeaderView() {
if (feed == null || headerCreated) {
return;
}
// https://github.com/bumptech/glide/issues/529
imgvBackground.setColorFilter(new LightingColorFilter(0xff666666, 0x000000));
butShowInfo.setVisibility(View.VISIBLE);
butShowInfo.setOnClickListener(v -> showFeedInfo());
imgvCover.setOnClickListener(v -> showFeedInfo());
butShowSettings.setVisibility(View.VISIBLE);
butShowSettings.setOnClickListener(v -> {
if (feed != null) {
FeedSettingsFragment fragment = FeedSettingsFragment.newInstance(feed);
((MainActivity) getActivity()).loadChildFragment(fragment, TransitionEffect.SLIDE);
}
});
txtvFailure.setOnClickListener(v -> {
Intent intent = new Intent(getContext(), MainActivity.class);
intent.putExtra(MainActivity.EXTRA_FRAGMENT_TAG, DownloadsFragment.TAG);
Bundle args = new Bundle();
args.putInt(DownloadsFragment.ARG_SELECTED_TAB, DownloadsFragment.POS_LOG);
intent.putExtra(MainActivity.EXTRA_FRAGMENT_ARGS, args);
startActivity(intent);
});
headerCreated = true;
}
private void showFeedInfo() {
if (feed != null) {
FeedInfoFragment fragment = FeedInfoFragment.newInstance(feed);
((MainActivity) getActivity()).loadChildFragment(fragment, TransitionEffect.SLIDE);
}
}
private void loadFeedImage() {
Glide.with(getActivity())
.load(feed.getImageUrl())
.apply(new RequestOptions()
.placeholder(R.color.image_readability_tint)
.error(R.color.image_readability_tint)
.diskCacheStrategy(ApGlideSettings.AP_DISK_CACHE_STRATEGY)
.transform(new FastBlurTransformation())
.dontAnimate())
.into(imgvBackground);
Glide.with(getActivity())
.load(feed.getImageUrl())
.apply(new RequestOptions()
.placeholder(R.color.light_gray)
.error(R.color.light_gray)
.diskCacheStrategy(ApGlideSettings.AP_DISK_CACHE_STRATEGY)
.fitCenter()
.dontAnimate())
.into(imgvCover);
}
private void loadItems() {
if (disposable != null) {
disposable.dispose();
}
progressBar.setVisibility(View.VISIBLE);
disposable = Observable.fromCallable(this::loadData)
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(
result -> {
feed = result;
refreshHeaderView();
displayList();
}, error -> {
feed = null;
refreshHeaderView();
displayList();
Log.e(TAG, Log.getStackTraceString(error));
});
}
@Nullable
private Feed loadData() {
Feed feed = DBReader.getFeed(feedID, true);
if (feed == null) {
return null;
}
DBReader.loadAdditionalFeedItemListData(feed.getItems());
if (feed.getSortOrder() != null) {
List<FeedItem> feedItems = feed.getItems();
FeedItemPermutors.getPermutor(feed.getSortOrder()).reorder(feedItems);
feed.setItems(feedItems);
}
return feed;
}
private static class FeedItemListAdapter extends EpisodeItemListAdapter {
public FeedItemListAdapter(MainActivity mainActivity) {
super(mainActivity);
}
@Override
protected void beforeBindViewHolder(EpisodeItemViewHolder holder, int pos) {
holder.coverHolder.setVisibility(View.GONE);
}
@Override
public void onCreateContextMenu(ContextMenu menu, View v, ContextMenu.ContextMenuInfo menuInfo) {
super.onCreateContextMenu(menu, v, menuInfo);
if (!inActionMode()) {
menu.findItem(R.id.multi_select).setVisible(true);
}
}
}
}
| 1 | 20,120 | Please revert unrelated changes | AntennaPod-AntennaPod | java |
@@ -1224,6 +1224,11 @@ var AppRouter = Backbone.Router.extend({
path: [countlyGlobal.cdn + 'localization/min/'],
mode: 'map',
callback: function() {
+ if (countlyGlobal.company) {
+ for (var key in jQuery.i18n.map) {
+ jQuery.i18n.map[key] = jQuery.i18n.map[key].replace("Countly", countlyGlobal.company);
+ }
+ }
self.origLang = JSON.stringify(jQuery.i18n.map);
}
}); | 1 | /* global Backbone, Handlebars, countlyEvent, countlyCommon, countlyGlobal, CountlyHelpers, countlySession, moment, Drop, _, store, countlyLocation, jQuery, $*/
/**
* Default Backbone View template from which all countly views should inherit.
* A countly view is defined as a page corresponding to a url fragment such
* as #/manage/apps. This interface defines common functions or properties
* the view object has. A view may override any function or property.
* @name countlyView
* @global
* @namespace countlyView
* @example <caption>Extending default view and overwriting its methods</caption>
* window.DashboardView = countlyView.extend({
* renderCommon:function () {
* if(countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID]){
* var type = countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID].type;
* type = jQuery.i18n.map["management-applications.types."+type] || type;
* $(this.el).html("<div id='no-app-type'><h1>"+jQuery.i18n.map["common.missing-type"]+": "+type+"</h1></div>");
* }
* else{
* $(this.el).html("<div id='no-app-type'><h1>"+jQuery.i18n.map["management-applications.no-app-warning"]+"</h1></div>");
* }
* }
* });
*/
var countlyView = Backbone.View.extend({
/**
* Checking state of view, if it is loaded
* @type {boolean}
* @instance
* @memberof countlyView
*/
isLoaded: false,
/**
* Handlebar template
* @type {object}
* @instance
* @memberof countlyView
*/
template: null, //handlebars template of the view
/**
* Data to pass to Handlebar template when building it
* @type {object}
* @instance
* @memberof countlyView
*/
templateData: {}, //data to be used while rendering the template
/**
* Main container which contents to replace by compiled template
* @type {jquery_object}
* @instance
* @memberof countlyView
*/
el: $('#content'), //jquery element to render view into
_myRequests: {}, //save requests called for this view
/**
* Initialize view, overwrite it with at least empty function if you are using some custom remote template
* @memberof countlyView
* @instance
*/
initialize: function() { //compile view template
this.template = Handlebars.compile($("#template-analytics-common").html());
},
_removeMyRequests: function() {
for (var url in this._myRequests) {
for (var data in this._myRequests[url]) {
//4 means done, less still in progress
if (parseInt(this._myRequests[url][data].readyState) !== 4) {
this._myRequests[url][data].abort();
}
}
}
this._myRequests = {};
},
/**
* This method is called when date is changed, default behavior is to call refresh method of the view
* @memberof countlyView
* @instance
*/
dateChanged: function() { //called when user changes the date selected
if (Backbone.history.fragment === "/") {
this.refresh(true);
}
else {
this.refresh();
}
},
/**
* This method is called when app is changed, default behavior is to reset preloaded data as events
* @param {function=} callback - callback function
* @memberof countlyView
* @instance
*/
appChanged: function(callback) { //called when user changes selected app from the sidebar
countlyEvent.reset();
$.when(countlyEvent.initialize()).always(function() {
if (callback) {
callback();
}
});
},
/**
* This method is called before calling render, load your data and remote template if needed here
* @returns {boolean} true
* @memberof countlyView
* @instance
* @example
*beforeRender: function() {
* if(this.template)
* return $.when(countlyDeviceDetails.initialize(), countlyTotalUsers.initialize("densities"), countlyDensity.initialize()).then(function () {});
* else{
* var self = this;
* return $.when($.get(countlyGlobal["path"]+'/density/templates/density.html', function(src){
* self.template = Handlebars.compile(src);
* }), countlyDeviceDetails.initialize(), countlyTotalUsers.initialize("densities"), countlyDensity.initialize()).then(function () {});
* }
*}
*/
beforeRender: function() {
return true;
},
/**
* This method is called after calling render method
* @memberof countlyView
* @instance
*/
afterRender: function() {
CountlyHelpers.makeSelectNative();
},
/**
* Main render method, better not to over write it, but use {@link countlyView.renderCommon} instead
* @returns {object} this
* @memberof countlyView
* @instance
*/
render: function() { //backbone.js view render function
var currLink = Backbone.history.fragment;
// Reset any active views and dropdowns
$("#main-views-container").find(".main-view").removeClass("active");
$("#top-bar").find(".dropdown.active").removeClass("active");
// Activate the main view and dropdown based on the active view
if (/^\/custom/.test(currLink) === true) {
$("#dashboards-main-view").addClass("active");
$("#dashboard-selection").addClass("active");
}
else {
$("#analytics-main-view").addClass("active");
$("#app-navigation").addClass("active");
}
$("#content-top").html("");
this.el.html('');
if (countlyCommon.ACTIVE_APP_ID) {
var self = this;
$.when(this.beforeRender(), initializeOnce()).always(function() {
if (app.activeView === self) {
self.isLoaded = true;
self.renderCommon();
self.afterRender();
app.pageScript();
}
});
}
else {
if (app.activeView === this) {
this.isLoaded = true;
this.renderCommon();
this.afterRender();
app.pageScript();
}
}
// Top bar dropdowns are hidden by default, fade them in when view render is complete
$("#top-bar").find(".dropdown").fadeIn(2000);
return this;
},
/**
* Do all your rendering in this method
* @param {boolean} isRefresh - render is called from refresh method, so do not need to do initialization
* @memberof countlyView
* @instance
* @example
*renderCommon:function (isRefresh) {
* //set initial data for template
* this.templateData = {
* "page-title":jQuery.i18n.map["density.title"],
* "logo-class":"densities",
* "chartHTML": chartHTML,
* };
*
* if (!isRefresh) {
* //populate template with data and add to html
* $(this.el).html(this.template(this.templateData));
* }
*}
*/
renderCommon: function(/* isRefresh*/) {}, // common render function of the view
/**
* Called when view is refreshed, you can reload data here or call {@link countlyView.renderCommon} with parameter true for code reusability
* @returns {boolean} true
* @memberof countlyView
* @instance
* @example
* refresh:function () {
* var self = this;
* //reload data from beforeRender method
* $.when(this.beforeRender()).then(function () {
* if (app.activeView != self) {
* return false;
* }
* //re render data again
* self.renderCommon(true);
*
* //replace some parts manually from templateData
* var newPage = $("<div>" + self.template(self.templateData) + "</div>");
* $(self.el).find(".widget-content").replaceWith(newPage.find(".widget-content"));
* $(self.el).find(".dashboard-summary").replaceWith(newPage.find(".dashboard-summary"));
* $(self.el).find(".density-widget").replaceWith(newPage.find(".density-widget"));
* });
*}
*/
refresh: function() { // resfresh function for the view called every 10 seconds by default
return true;
},
/**
* This method is called when user is active after idle period
* @memberof countlyView
* @instance
*/
restart: function() { // triggered when user is active after idle period
this.refresh();
},
/**
* This method is called when view is destroyed (user entered inactive state or switched to other view) you can clean up here if there is anything to be cleaned
* @memberof countlyView
* @instance
*/
destroy: function() { }
});
/**
* View class to expand by plugins which need configuration under Management->Applications.
*/
window.countlyManagementView = countlyView.extend({
/**
* Handy function which returns currently saved configuration of this plugin or empty object.
*
* @return {Object} app object
*/
config: function() {
return countlyGlobal.apps[this.appId] &&
countlyGlobal.apps[this.appId].plugins &&
countlyGlobal.apps[this.appId].plugins[this.plugin] || {};
},
/**
* Set current app id
* @param {string} appId - app Id to set
*/
setAppId: function(appId) {
if (appId !== this.appId) {
this.appId = appId;
this.resetTemplateData();
this.savedTemplateData = JSON.stringify(this.templateData);
}
},
/**
* Reset template data when changing app
*/
resetTemplateData: function() {
this.templateData = {};
},
/**
* Title of plugin configuration tab, override with your own title.
*
* @return {String} tab title
*/
titleString: function() {
return 'Default plugin configuration';
},
/**
* Saving string displayed when request takes more than 0.3 seconds, override if needed.
*
* @return {String} saving string
*/
savingString: function() {
return 'Saving...';
},
/**
* Callback function called before tab is expanded. Override if needed.
*/
beforeExpand: function() {},
/**
* Callback function called after tab is collapsed. Override if needed.
*/
afterCollapse: function() {},
/**
* Function used to determine whether save button should be visible. Used whenever UI is redrawn or some value changed. Override if needed.
*
* @return {Boolean} true if enabled
*/
isSaveAvailable: function() {
return JSON.stringify(this.templateData) !== this.savedTemplateData.toString();
},
/**
* Callback function called to apply changes. Override if validation is needed.
*
* @return {String} error to display to user if validation didn't pass
*/
validate: function() {
return null;
},
/**
* Function which prepares data to the format required by the server, must return a Promise.
*
* @return {Promise} which resolves to object of {plugin-name: {config: true, options: true}} format or rejects with error string otherwise
*/
prepare: function() {
var o = {}; o[this.plugin] = this.templateData; return $.when(o);
},
/**
* Show error message returned by server or by validate function. Override if needed.
* @param {string} error - error message to show
*/
showError: function(error) {
CountlyHelpers.alert(error, 'popStyleGreen', {title: jQuery.i18n.map['management-applications.plugins.smth'], image: 'empty-icon', button_title: jQuery.i18n.map['management-applications.plugins.ok']});
},
/**
* Called whenever element value with name in parameter have been changed. Override if needed.
*/
onChange: function(/* name */) { },
/**
* Called whenever element value with name in parameter have been changed.
* @param {string} name - key
* @param {string} value - value to set
*/
doOnChange: function(name, value) {
if (name && countlyCommon.dot(this.templateData, name) !== value) {
countlyCommon.dot(this.templateData, name, value);
}
if (this.isSaveAvailable()) {
this.el.find('.icon-button').show();
}
else {
this.el.find('.icon-button').hide();
}
if (name) {
this.onChange(name, value);
}
},
/**
* Save logic: validate, disable save button, submit to the server,
* show loading dialog if it takes long enough, hide it when done, show error if any, enable save button.
* @param {event} ev - event
* @returns {object} error
*/
save: function(ev) {
ev.preventDefault();
if (this.el.find('.icon-button').hasClass('disabled') || !this.isSaveAvailable()) {
return;
}
var error = this.validate(), self = this;
if (error) {
return this.showError(error === true ? jQuery.i18n.map['management-applications.plugins.save.nothing'] : error);
}
this.el.find('.icon-button').addClass('disabled');
this.prepare().then(function(data) {
var dialog, timeout = setTimeout(function() {
dialog = CountlyHelpers.loading(jQuery.i18n.map['management-applications.plugins.saving']);
}, 300);
$.ajax({
type: "POST",
url: countlyCommon.API_PARTS.apps.w + '/update/plugins',
data: {
app_id: self.appId,
args: JSON.stringify(data)
},
dataType: "json",
success: function(result) {
self.el.find('.icon-button').removeClass('disabled');
clearTimeout(timeout);
if (dialog) {
CountlyHelpers.removeDialog(dialog);
}
if (result.result === 'Nothing changed') {
CountlyHelpers.notify({type: 'warning', message: jQuery.i18n.map['management-applications.plugins.saved.nothing']});
}
else {
CountlyHelpers.notify({title: jQuery.i18n.map['management-applications.plugins.saved.title'], message: jQuery.i18n.map['management-applications.plugins.saved']});
if (!countlyGlobal.apps[result._id].plugins) {
countlyGlobal.apps[result._id].plugins = {};
}
self.savedTemplateData = JSON.stringify(self.templateData);
for (var k in result.plugins) {
countlyGlobal.apps[result._id].plugins[k] = result.plugins[k];
}
self.resetTemplateData();
self.render();
}
self.doOnChange();
},
error: function(resp) {
try {
resp = JSON.parse(resp.responseText);
}
catch (ignored) {
//ignored excep
}
self.el.find('.icon-button').removeClass('disabled');
clearTimeout(timeout);
if (dialog) {
CountlyHelpers.removeDialog(dialog);
}
self.showError(resp.result || jQuery.i18n.map['management-applications.plugins.error.server']);
}
});
}, function(error1) {
self.el.find('.icon-button').removeClass('disabled');
self.showError(error1);
});
},
beforeRender: function() {
if (this.template) {
return $.when();
}
else {
var self = this;
return $.when($.get(countlyGlobal.path + this.templatePath, function(src) {
self.template = Handlebars.compile(src);
}));
}
},
render: function() { //backbone.js view render function
if (!this.savedTemplateData) {
this.savedTemplateData = JSON.stringify(this.templateData);
}
this.el.html(this.template(this.templateData));
if (!this.el.find('.icon-button').length) {
$('<a class="icon-button green" data-localize="management-applications.plugins.save" href="#"></a>').hide().appendTo(this.el);
}
var self = this;
this.el.find('.cly-select').each(function(i, select) {
$(select).off('click', '.item').on('click', '.item', function() {
self.doOnChange($(select).data('name') || $(select).attr('id'), $(this).data('value'));
});
});
this.el.find('input[type=text], input[type=password], input[type=number]').off('input').on('input', function() {
self.doOnChange($(this).attr('name') || $(this).attr('id'), $(this).val());
});
this.el.find('input[type=file]').off('change').on('change', function() {
self.doOnChange($(this).attr('name') || $(this).attr('id'), $(this).val());
});
this.el.find('.on-off-switch input').on("change", function() {
var isChecked = $(this).is(":checked"),
attrID = $(this).attr("id");
self.doOnChange(attrID, isChecked);
});
this.el.find('.icon-button').off('click').on('click', this.save.bind(this));
if (this.isSaveAvailable()) {
this.el.find('.icon-button').show();
}
else {
this.el.find('.icon-button').hide();
}
app.localize();
this.afterRender();
return this;
},
});
/**
* Drop class with embeded countly theme, use as any Drop class/instance
* @name CountlyDrop
* @global
*/
var CountlyDrop = Drop.createContext({
classPrefix: 'countly-drop',
});
var initializeOnce = _.once(function() {
return $.when(countlyEvent.initialize()).then(function() { });
});
var Template = function() {
this.cached = {};
};
var T = new Template();
$.extend(Template.prototype, {
render: function(name, callback) {
if (T.isCached(name)) {
callback(T.cached[name]);
}
else {
$.get(T.urlFor(name), function(raw) {
T.store(name, raw);
T.render(name, callback);
});
}
},
renderSync: function(name, callback) {
if (!T.isCached(name)) {
T.fetch(name);
}
T.render(name, callback);
},
prefetch: function(name) {
$.get(T.urlFor(name), function(raw) {
T.store(name, raw);
});
},
fetch: function(name) {
// synchronous, for those times when you need it.
if (!T.isCached(name)) {
var raw = $.ajax({ 'url': T.urlFor(name), 'async': false }).responseText;
T.store(name, raw);
}
},
isCached: function(name) {
return !!T.cached[name];
},
store: function(name, raw) {
T.cached[name] = Handlebars.compile(raw);
},
urlFor: function(name) {
//return "/resources/templates/"+ name + ".handlebars";
return name + ".html";
}
});
//redefine contains selector for jquery to be case insensitive
$.expr[":"].contains = $.expr.createPseudo(function(arg) {
return function(elem) {
return $(elem).text().toUpperCase().indexOf(arg.toUpperCase()) >= 0;
};
});
/**
* Main app instance of Backbone AppRouter used to control views and view change flow
* @name app
* @global
* @instance
* @namespace app
*/
var AppRouter = Backbone.Router.extend({
routes: {
"/": "dashboard",
"*path": "main"
},
/**
* View that is currently being displayed
* @type {countlyView}
* @instance
* @memberof app
*/
activeView: null, //current view
dateToSelected: null, //date to selected from the date picker
dateFromSelected: null, //date from selected from the date picker
activeAppName: '',
activeAppKey: '',
_isFirstLoad: false, //to know if we are switching between two apps or just loading page
refreshActiveView: 0, //refresh interval function reference
_myRequests: {}, //save requests not connected with view to prevent calling the same if previous not finished yet.
/**
* Navigate to another view programmatically. If you need to change the view without user clicking anything, like redirect. You can do this using this method. This method is not define by countly but is direct method of AppRouter object in Backbone js
* @name app#navigate
* @function
* @instance
* @param {string} fragment - url path (hash part) where to redirect user
* @param {boolean=} triggerRoute - to trigger route call, like initialize new view, etc. Default is false, so you may want to use false when redirecting to URL for your own same view where you are already, so no need to reload it
* @memberof app
* @example <caption>Redirect to url of the same view</caption>
* //you are at #/manage/systemlogs
* app.navigate("#/manage/systemlogs/query/{}");
*
* @example <caption>Redirect to url of other view</caption>
* //you are at #/manage/systemlogs
* app.navigate("#/crashes", true);
*/
_removeUnfinishedRequests: function() {
for (var url in this._myRequests) {
for (var data in this._myRequests[url]) {
//4 means done, less still in progress
if (parseInt(this._myRequests[url][data].readyState) !== 4) {
this._myRequests[url][data].abort();
}
}
}
this._myRequests = {};
},
switchApp: function(app_id, callback) {
countlyCommon.setActiveApp(app_id);
$("#active-app-name").text(countlyGlobal.apps[app_id].name);
$("#active-app-name").attr('title', countlyGlobal.apps[app_id].name);
$("#active-app-icon").css("background-image", "url('" + countlyGlobal.path + "appimages/" + app_id + ".png')");
app.onAppSwitch(app_id, true);
//removing requests saved in app
app._removeUnfinishedRequests();
if (app && app.activeView) {
app.activeView._removeMyRequests();//remove requests for view(if not finished)
app.activeView.appChanged(callback);
}
},
main: function(/*forced*/) {
var change = true,
redirect = false;
// detect app switch like
//#/app/586e32ddc32cb30a01558cc1/analytics/events
if (Backbone.history.fragment.indexOf("/app/") === 0) {
var app_id = Backbone.history.fragment.replace("/app/", "");
redirect = "#/";
if (app_id && app_id.length) {
if (app_id.indexOf("/") !== -1) {
var parts = app_id.split("/");
app_id = parts.shift();
redirect = "#/" + parts.join("/");
}
if (app_id !== countlyCommon.ACTIVE_APP_ID && countlyGlobal.apps[app_id]) {
countlyCommon.setActiveApp(app_id);
$("#active-app-name").text(countlyGlobal.apps[app_id].name);
$("#active-app-name").attr('title', countlyGlobal.apps[app_id].name);
$("#active-app-icon").css("background-image", "url('" + countlyGlobal.path + "appimages/" + app_id + ".png')");
app.onAppSwitch(app_id);
app.activeView.appChanged(function() {
app.navigate(redirect, true);
});
return;
}
}
}
else if (Backbone.history.fragment !== "/" && countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID]) {
$("#" + countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].type + "-type a").each(function() {
if (this.hash !== "#/" && this.hash !== "") {
if ("#" + Backbone.history.fragment === this.hash && $(this).css('display') !== 'none') {
change = false;
return false;
}
else if (("#" + Backbone.history.fragment).indexOf(this.hash) === 0 && $(this).css('display') !== 'none') {
redirect = this.hash;
return false;
}
}
});
}
if (redirect) {
app.navigate(redirect, true);
}
else if (change) {
if (Backbone.history.fragment !== "/") {
this.navigate("#/", true);
}
else if (countlyCommon.APP_NAMESPACE !== false) {
this.navigate("#/" + countlyCommon.ACTIVE_APP_ID + Backbone.history.fragment, true);
}
else {
this.dashboard();
}
}
else {
if (countlyCommon.APP_NAMESPACE !== false) {
this.navigate("#/" + countlyCommon.ACTIVE_APP_ID + Backbone.history.fragment, true);
}
else {
this.activeView.render();
}
}
},
dashboard: function() {
if (countlyGlobal.member.restrict && countlyGlobal.member.restrict.indexOf("#/") !== -1) {
return;
}
if (_.isEmpty(countlyGlobal.apps)) {
this.renderWhenReady(this.manageAppsView);
}
else if (typeof this.appTypes[countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].type] !== "undefined") {
this.renderWhenReady(this.appTypes[countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].type]);
}
else {
this.renderWhenReady(this.dashboardView);
}
},
runRefreshScripts: function() {
var i = 0;
var l = 0;
if (this.refreshScripts[Backbone.history.fragment]) {
for (i = 0, l = this.refreshScripts[Backbone.history.fragment].length; i < l; i++) {
this.refreshScripts[Backbone.history.fragment][i]();
}
}
for (var k in this.refreshScripts) {
if (k !== '#' && k.indexOf('#') !== -1 && Backbone.history.fragment.match("^" + k.replace(/#/g, '.*'))) {
for (i = 0, l = this.refreshScripts[k].length; i < l; i++) {
this.refreshScripts[k][i]();
}
}
}
if (this.refreshScripts["#"]) {
for (i = 0, l = this.refreshScripts["#"].length; i < l; i++) {
this.refreshScripts["#"][i]();
}
}
},
performRefresh: function(self) {
//refresh only if we are on current period
if (countlyCommon.periodObj.periodContainsToday && self.activeView.isLoaded) {
self.activeView.isLoaded = false;
$.when(self.activeView.refresh()).always(function() {
self.activeView.isLoaded = true;
self.runRefreshScripts();
});
}
},
renderWhenReady: function(viewName) { //all view renders end up here
// If there is an active view call its destroy function to perform cleanups before a new view renders
if (this.activeView) {
this.activeView._removeMyRequests();
this.activeView.destroy();
}
if (window.components && window.components.slider && window.components.slider.instance) {
window.components.slider.instance.close();
}
this.activeView = viewName;
clearInterval(this.refreshActiveView);
if (typeof countlyGlobal.member.password_changed === "undefined") {
countlyGlobal.member.password_changed = Math.round(new Date().getTime() / 1000);
}
this.routesHit++;
if (_.isEmpty(countlyGlobal.apps)) {
if (Backbone.history.fragment !== "/manage/apps") {
this.navigate("/manage/apps", true);
}
else {
viewName.render();
}
return false;
}
else if ((countlyGlobal.security.password_expiration > 0) &&
(countlyGlobal.member.password_changed + countlyGlobal.security.password_expiration * 24 * 60 * 60 < new Date().getTime() / 1000) &&
(!countlyGlobal.ssr)) {
if (Backbone.history.fragment !== "/manage/user-settings/reset") {
this.navigate("/manage/user-settings/reset", true);
}
else {
viewName.render();
}
return false;
}
viewName.render();
var self = this;
this.refreshActiveView = setInterval(function() {
self.performRefresh(self);
}, countlyCommon.DASHBOARD_REFRESH_MS);
if (countlyGlobal && countlyGlobal.message) {
CountlyHelpers.parseAndShowMsg(countlyGlobal.message);
}
// Init sidebar based on the current url
self.sidebar.init();
},
sidebar: {
init: function() {
setTimeout(function() {
$("#sidebar-menu").find(".item").removeClass("active menu-active");
var selectedMenu = $($("#sidebar-menu").find("a[href='#" + Backbone.history.fragment + "']"));
if (!selectedMenu.length) {
var parts = Backbone.history.fragment.split("/");
selectedMenu = $($("#sidebar-menu").find("a[href='#/" + (parts[1] || "") + "']"));
if (!selectedMenu.length) {
selectedMenu = $($("#sidebar-menu").find("a[href='#/" + (parts[1] + "/" + parts[2] || "") + "']"));
}
}
var selectedSubmenu = selectedMenu.parents(".sidebar-submenu");
if (selectedSubmenu.length) {
selectedMenu.addClass("active");
selectedSubmenu.prev().addClass("active menu-active");
app.sidebar.submenu.toggle(selectedSubmenu);
}
else {
selectedMenu.addClass("active");
app.sidebar.submenu.toggle();
}
}, 1000);
},
submenu: {
toggle: function(el) {
$(".sidebar-submenu").removeClass("half-visible");
if (!el) {
$(".sidebar-submenu:visible").animate({ "right": "-170px" }, {
duration: 300,
easing: 'easeOutExpo',
complete: function() {
$(this).hide();
}
});
return true;
}
if (!el.is(":visible")) {
if ($(".sidebar-submenu").is(":visible")) {
$(".sidebar-submenu").hide();
el.css({ "right": "-110px" }).show().animate({ "right": "0" }, { duration: 300, easing: 'easeOutExpo' });
addText();
}
else {
el.css({ "right": "-170px" }).show().animate({ "right": "0" }, { duration: 300, easing: 'easeOutExpo' });
addText();
}
}
/** function add text to menu title */
function addText() {
var mainMenuText = $(el.prev()[0]).find(".text").text();
$(".menu-title").remove();
var menuTitle = $("<div class='menu-title'></div>").text(mainMenuText).prepend("<i class='submenu-close ion-close'></i>");
el.prepend(menuTitle);
// Try setting submenu title once again if it was empty
// during previous try
if (!mainMenuText) {
setTimeout(function() {
$(".menu-title").text($(el.prev()[0]).find(".text").text());
$(".menu-title").prepend("<i class='submenu-close ion-close'></i>");
}, 1000);
}
}
}
}
},
hasRoutingHistory: function() {
if (this.routesHit > 1) {
return true;
}
return false;
},
back: function(fallback_route) {
if (this.routesHit > 1) {
window.history.back();
}
else {
var fragment = Backbone.history.getFragment();
//route not passed, try to guess from current location
if (typeof fallback_route === "undefined" || fallback_route === "") {
if (fragment) {
var parts = fragment.split("/");
if (parts.length > 1) {
fallback_route = "/" + parts[1];
}
}
}
if (fallback_route === fragment) {
fallback_route = '/';
}
this.navigate(fallback_route || '/', {trigger: true, replace: true});
}
},
initialize: function() { //initialize the dashboard, register helpers etc.
this.bind("route", function(name/*, args*/) {
$('#content').removeClass(function(index, className) {
return (className.match(/(^|\s)routename-\S*/g) || []).join(' ');
}).addClass("routename-" + name);
});
this.appTypes = {};
this.pageScripts = {};
this.dataExports = {};
this.appSwitchCallbacks = [];
this.appManagementSwitchCallbacks = [];
this.appObjectModificators = [];
this.appManagementViews = {};
this.appAddTypeCallbacks = [];
this.userEditCallbacks = [];
this.refreshScripts = {};
this.appSettings = {};
this.widgetCallbacks = {};
this.routesHit = 0; //keep count of number of routes handled by your application
/**
* When rendering data from server using templates from frontend/express/views we are using ejs as templating engine. But when rendering templates on the browser side remotely loaded templates through ajax, we are using Handlebars templating engine. While in ejs everything is simple and your templating code is basically javascript code betwee <% %> tags. Then with Handlebars it is not that straightforward and we need helper functions to have some common templating logic
* @name Handlebars
* @global
* @instance
* @namespace Handlebars
*/
/**
* Display common date selecting UI elements
* @name date-selector
* @memberof Handlebars
* @example
* {{> date-selector }}
*/
Handlebars.registerPartial("date-selector", $("#template-date-selector").html());
/**
* Display common timezone selecting UI element
* @name timezones
* @memberof Handlebars
* @example
* {{> timezones }}
*/
Handlebars.registerPartial("timezones", $("#template-timezones").html());
/**
* Display common app category selecting UI element
* @name app-categories
* @memberof Handlebars
* @example
* {{> app-categories }}
*/
Handlebars.registerPartial("app-categories", $("#template-app-categories").html());
/**
* Iterate object with keys and values, creating variable "property" for object key and variable "value" for object value
* @name eachOfObject
* @memberof Handlebars
* @example
* {{#eachOfObject app_types}}
* <div data-value="{{property}}" class="item">{{value}}</div>
* {{/eachOfObject}}
*/
Handlebars.registerHelper('eachOfObject', function(context, options) {
var ret = "";
for (var prop in context) {
ret = ret + options.fn({ property: prop, value: context[prop] });
}
return ret;
});
/**
* Iterate only values of object, this will reference the value of current object
* @name eachOfObjectValue
* @memberof Handlebars
* @example
* {{#eachOfObjectValue apps}}
* <div class="app searchable">
* <div class="image" style="background-image: url('/appimages/{{this._id}}.png');"></div>
* <div class="name">{{this.name}}</div>
* <input class="app_id" type="hidden" value="{{this._id}}"/>
* </div>
* {{/eachOfObjectValue}}
*/
Handlebars.registerHelper('eachOfObjectValue', function(context, options) {
var ret = "";
for (var prop in context) {
ret = ret + options.fn(context[prop]);
}
return ret;
});
/**
* Iterate through array, creating variable "index" for element index and variable "value" for value at that index
* @name eachOfArray
* @memberof Handlebars
* @example
* {{#eachOfArray events}}
* <div class="searchable event-container {{#if value.is_active}}active{{/if}}" data-key="{{value.key}}">
* <div class="name">{{value.name}}</div>
* </div>
* {{/eachOfArray}}
*/
Handlebars.registerHelper('eachOfArray', function(context, options) {
var ret = "";
for (var i = 0; i < context.length; i++) {
ret = ret + options.fn({ index: i, value: context[i] });
}
return ret;
});
/**
* Print out json in pretty indented way
* @name prettyJSON
* @memberof Handlebars
* @example
* <td class="jh-value jh-object-value">{{prettyJSON value}}</td>
*/
Handlebars.registerHelper('prettyJSON', function(context) {
return JSON.stringify(context, undefined, 4);
});
/**
* Shorten number, Handlebar binding to {@link countlyCommon.getShortNumber}
* @name getShortNumber
* @memberof Handlebars
* @example
* <span class="value">{{getShortNumber this.data.total}}</span>
*/
Handlebars.registerHelper('getShortNumber', function(context) {
return countlyCommon.getShortNumber(context);
});
/**
* Format float number up to 2 values after dot
* @name getFormattedNumber
* @memberof Handlebars
* @example
* <div class="number">{{getFormattedNumber this.total}}</div>
*/
Handlebars.registerHelper('getFormattedNumber', function(context) {
if (isNaN(context)) {
return context;
}
var ret = parseFloat((parseFloat(context).toFixed(2)).toString()).toString();
return ret.replace(/(\d)(?=(\d\d\d)+(?!\d))/g, "$1,");
});
/**
* Convert text to upper case
* @name toUpperCase
* @memberof Handlebars
* @example
* <div class="title">{{toUpperCase page-title}}</div>
*/
Handlebars.registerHelper('toUpperCase', function(context) {
return context.toUpperCase();
});
/**
* Convert array of app ids to comma separate string of app names. Handlebar binding to {@link CountlyHelpers.appIdsToNames}
* @name appIdsToNames
* @memberof Handlebars
* @example
* <div class="apps">{{appIdsToNames appIds}}</div>
*/
Handlebars.registerHelper('appIdsToNames', function(context) {
return CountlyHelpers.appIdsToNames(context);
});
/**
* Loop for specified amount of times. Creating variable "count" as current index from 1 to provided value
* @name forNumberOfTimes
* @memberof Handlebars
* @example
* <ul>
* {{#forNumberOfTimes 10}}
* <li>{{count}}</li>
* {{/forNumberOfTimes}}
* </ul>
*/
Handlebars.registerHelper('forNumberOfTimes', function(context, options) {
var ret = "";
for (var i = 0; i < context; i++) {
ret = ret + options.fn({ count: i + 1 });
}
return ret;
});
/**
* Loop for specified amount of times. with variable "need" & "now", loop time will be ${need} - ${now}
* @name forNumberOfTimes
* @memberof Handlebars
* @example
* <ul>
* {{#forNumberOfTimes 10 3}} // will loop 7 times
* <li>{{count}}</li>
* {{/forNumberOfTimes}}
* </ul>
*/
Handlebars.registerHelper('forNumberOfTimesCalc', function(need, now, options) {
var ret = "";
var context = parseInt(need) - parseInt(now) ;
for (var i = 0; i < context; i++) {
ret = ret + options.fn({ count: i + 1 });
}
return ret;
});
/**
* Replaces part of a string with a string.
* @name replace
* @memberof Handlebars
* @example
* <span>{{#replace value "(" " ("}}{{/replace}}</span>
*/
Handlebars.registerHelper('replace', function(string, to_replace, replacement) {
return (string || '').replace(to_replace, replacement);
});
/**
* Limit string length.
* @name limitString
* @memberof Handlebars
* @example
* <span>{{#limitString value 15}}{{/limitString}}</span>
*/
Handlebars.registerHelper('limitString', function(string, limit) {
if (string.length > limit) {
return (string || '').substr(0, limit) + "..";
}
else {
return string;
}
});
Handlebars.registerHelper('include', function(templatename, options) {
var partial = Handlebars.partials[templatename];
var context = $.extend({}, this, options.hash);
return partial(context);
});
/**
* For loop in template providing start count, end count and increment
* @name for
* @memberof Handlebars
* @example
* {{#for start end 1}}
* {{#ifCond this "==" ../data.curPage}}
* <a href='#/manage/db/{{../../db}}/{{../../collection}}/page/{{this}}' class="current">{{this}}</a>
* {{else}}
* <a href='#/manage/db/{{../../db}}/{{../../collection}}/page/{{this}}'>{{this}}</a>
* {{/ifCond}}
* {{/for}}
*/
Handlebars.registerHelper('for', function(from, to, incr, block) {
var accum = '';
for (var i = from; i < to; i += incr) {
accum += block.fn(i);
}
return accum;
});
/**
* If condition with different operators, accepting first value, operator and second value.
* Accepted operators are ==, !=, ===, <, <=, >, >=, &&, ||
* @name ifCond
* @memberof Handlebars
* @example
* {{#ifCond this.data.trend "==" "u"}}
* <i class="material-icons">trending_up</i>
* {{else}}
* <i class="material-icons">trending_down</i>
* {{/ifCond}}
*/
Handlebars.registerHelper('ifCond', function(v1, operator, v2, options) {
switch (operator) {
case '==':
return (v1 == v2) ? options.fn(this) : options.inverse(this); // eslint-disable-line
case '!=':
return (v1 != v2) ? options.fn(this) : options.inverse(this); // eslint-disable-line
case '!==':
return (v1 !== v2) ? options.fn(this) : options.inverse(this);
case '===':
return (v1 === v2) ? options.fn(this) : options.inverse(this);
case '<':
return (v1 < v2) ? options.fn(this) : options.inverse(this);
case '<=':
return (v1 <= v2) ? options.fn(this) : options.inverse(this);
case '>':
return (v1 > v2) ? options.fn(this) : options.inverse(this);
case '>=':
return (v1 >= v2) ? options.fn(this) : options.inverse(this);
case '&&':
return (v1 && v2) ? options.fn(this) : options.inverse(this);
case '||':
return (v1 || v2) ? options.fn(this) : options.inverse(this);
default:
return options.inverse(this);
}
});
/**
* Format timestamp to twitter like time ago format, Handlebar binding to {@link countlyCommon.formatTimeAgo}
* @name formatTimeAgo
* @memberof Handlebars
* @example
* <div class="time">{{{formatTimeAgo value.time}}</div>
*/
Handlebars.registerHelper('formatTimeAgo', function(context) {
return countlyCommon.formatTimeAgo(parseInt(context) / 1000);
});
/**
* Get value form object by specific key, this will reference value of the object
* @name withItem
* @memberof Handlebars
* @example
* <p>{{#withItem ../apps key=app_id}}{{this}}{{/withItem}}</p>
*/
Handlebars.registerHelper('withItem', function(object, options) {
return options.fn(object[options.hash.key]);
});
var self = this;
$("body").addClass("lang-" + countlyCommon.BROWSER_LANG_SHORT);
jQuery.i18n.properties({
name: 'locale',
cache: true,
language: countlyCommon.BROWSER_LANG_SHORT,
countlyVersion: countlyGlobal.countlyVersion + "&" + countlyGlobal.pluginsSHA,
path: [countlyGlobal.cdn + 'localization/min/'],
mode: 'map',
callback: function() {
self.origLang = JSON.stringify(jQuery.i18n.map);
}
});
$(document).ready(function() {
CountlyHelpers.initializeSelect();
CountlyHelpers.initializeTextSelect();
CountlyHelpers.initializeMultiSelect();
$(document).on('DOMNodeInserted', '.cly-select', function() {
CountlyHelpers.makeSelectNative();
});
$.ajaxPrefilter(function(options) {
var last5char = options.url.substring(options.url.length - 5, options.url.length);
if (last5char === ".html") {
var version = countlyGlobal.countlyVersion || "";
options.url = options.url + "?v=" + version;
}
});
var validateSession = function() {
$.ajax({
url: countlyGlobal.path + "/session",
data: {check_session: true},
success: function(result) {
if (result === "logout") {
$("#user-logout").click();
}
if (result === "login") {
$("#user-logout").click();
window.location = "/login";
}
setTimeout(function() {
validateSession();
}, countlyCommon.DASHBOARD_VALIDATE_SESSION || 30000);
}
});
};
setTimeout(function() {
validateSession();
}, countlyCommon.DASHBOARD_VALIDATE_SESSION || 30000);//validates session each 30 seconds
if (parseInt(countlyGlobal.config.session_timeout)) {
var minTimeout, tenSecondTimeout, logoutTimeout;
var shouldRecordAction = false;
var extendSession = function() {
shouldRecordAction = false;
$.ajax({
url: countlyGlobal.path + "/session",
success: function(result) {
if (result === "logout") {
$("#user-logout").click();
}
if (result === "login") {
$("#user-logout").click();
window.location = "/login";
}
else if (result === "success") {
shouldRecordAction = false;
var myTimeoutValue = parseInt(countlyGlobal.config.session_timeout) * 1000 * 60;
if (myTimeoutValue > 2147483647) { //max value used by set timeout function
myTimeoutValue = 1800000;//30 minutes
}
setTimeout(function() {
shouldRecordAction = true;
}, Math.round(myTimeoutValue / 2));
resetSessionTimeouts(myTimeoutValue);
}
},
error: function() {
shouldRecordAction = true;
}
});
};
var resetSessionTimeouts = function(timeout) {
var minute = timeout - 60 * 1000;
if (minTimeout) {
clearTimeout(minTimeout);
minTimeout = null;
}
if (minute > 0) {
minTimeout = setTimeout(function() {
CountlyHelpers.notify({ title: jQuery.i18n.map["common.session-expiration"], message: jQuery.i18n.map["common.expire-minute"], info: jQuery.i18n.map["common.click-to-login"] });
}, minute);
}
var tenSeconds = timeout - 10 * 1000;
if (tenSecondTimeout) {
clearTimeout(tenSecondTimeout);
tenSecondTimeout = null;
}
if (tenSeconds > 0) {
tenSecondTimeout = setTimeout(function() {
CountlyHelpers.notify({ title: jQuery.i18n.map["common.session-expiration"], message: jQuery.i18n.map["common.expire-seconds"], info: jQuery.i18n.map["common.click-to-login"] });
}, tenSeconds);
}
if (logoutTimeout) {
clearTimeout(logoutTimeout);
logoutTimeout = null;
}
logoutTimeout = setTimeout(function() {
extendSession();
}, timeout + 1000);
};
var myTimeoutValue = parseInt(countlyGlobal.config.session_timeout) * 1000 * 60;
//max value used by set timeout function
if (myTimeoutValue > 2147483647) {
myTimeoutValue = 1800000;
}//30 minutes
resetSessionTimeouts(myTimeoutValue);
$(document).on("click mousemove extend-dashboard-user-session", function() {
if (shouldRecordAction) {
extendSession();
}
});
extendSession();
}
// If date range is selected initialize the calendar with these
var periodObj = countlyCommon.getPeriod();
if (Object.prototype.toString.call(periodObj) === '[object Array]' && periodObj.length === 2) {
self.dateFromSelected = countlyCommon.getPeriod()[0];
self.dateToSelected = countlyCommon.getPeriod()[1];
}
// Initialize localization related stuff
// Localization test
/*
$.each(jQuery.i18n.map, function (key, value) {
jQuery.i18n.map[key] = key;
});
*/
try {
moment.locale(countlyCommon.BROWSER_LANG_SHORT);
}
catch (e) {
moment.locale("en");
}
$(".reveal-language-menu").text(countlyCommon.BROWSER_LANG_SHORT.toUpperCase());
$("#sidebar-events").click(function(e) {
$.when(countlyEvent.refreshEvents()).then(function() {
if (countlyEvent.getEvents().length === 0) {
CountlyHelpers.alert(jQuery.i18n.map["events.no-event"], "black");
e.stopImmediatePropagation();
e.preventDefault();
}
});
});
// SIDEBAR
$("#sidebar-menu").on("click", ".submenu-close", function() {
$(this).parents(".sidebar-submenu").animate({ "right": "-170px" }, {
duration: 200,
easing: 'easeInExpo',
complete: function() {
$(".sidebar-submenu").hide();
$("#sidebar-menu>.sidebar-menu>.item").removeClass("menu-active");
}
});
});
$("#sidebar-menu").on("click", ".item", function() {
if ($(this).hasClass("menu-active")) {
return true;
}
$("#sidebar-menu>.sidebar-menu>.item").removeClass("menu-active");
var elNext = $(this).next();
if (elNext.hasClass("sidebar-submenu")) {
$(this).addClass("menu-active");
self.sidebar.submenu.toggle(elNext);
}
else {
$("#sidebar-menu").find(".item").removeClass("active");
$(this).addClass("active");
var mainMenuItem = $(this).parent(".sidebar-submenu").prev(".item");
if (mainMenuItem.length) {
mainMenuItem.addClass("active menu-active");
}
else {
self.sidebar.submenu.toggle();
}
}
});
$("#sidebar-menu").hoverIntent({
over: function() {
var visibleSubmenu = $(".sidebar-submenu:visible");
if (!$(this).hasClass("menu-active") && $(".sidebar-submenu").is(":visible") && !visibleSubmenu.hasClass("half-visible")) {
visibleSubmenu.addClass("half-visible");
visibleSubmenu.animate({ "right": "-110px" }, { duration: 300, easing: 'easeOutExpo' });
}
},
out: function() { },
selector: ".sidebar-menu>.item"
});
$("#sidebar-menu").hoverIntent({
over: function() { },
out: function() {
var visibleSubmenu = $(".sidebar-submenu:visible");
if ($(".sidebar-submenu").is(":visible") && visibleSubmenu.hasClass("half-visible")) {
visibleSubmenu.removeClass("half-visible");
visibleSubmenu.animate({ "right": "0" }, { duration: 300, easing: 'easeOutExpo' });
}
},
selector: ""
});
$("#sidebar-menu").hoverIntent({
over: function() {
var visibleSubmenu = $(".sidebar-submenu:visible");
if (visibleSubmenu.hasClass("half-visible")) {
visibleSubmenu.removeClass("half-visible");
visibleSubmenu.animate({ "right": "0" }, { duration: 300, easing: 'easeOutExpo' });
}
},
out: function() { },
selector: ".sidebar-submenu:visible"
});
$('#sidebar-menu').slimScroll({
height: ($(window).height()) + 'px',
railVisible: true,
railColor: '#4CC04F',
railOpacity: .2,
color: '#4CC04F',
disableFadeOut: false,
});
$(window).resize(function() {
$('#sidebar-menu').slimScroll({
height: ($(window).height()) + 'px'
});
});
$(".sidebar-submenu").on("click", ".item", function() {
if ($(this).hasClass("disabled")) {
return true;
}
$(".sidebar-submenu .item").removeClass("active");
$(this).addClass("active");
$(this).parent().prev(".item").addClass("active");
});
$("#language-menu .item").click(function() {
var langCode = $(this).data("language-code"),
langCodeUpper = langCode.toUpperCase();
store.set("countly_lang", langCode);
$(".reveal-language-menu").text(langCodeUpper);
countlyCommon.BROWSER_LANG_SHORT = langCode;
countlyCommon.BROWSER_LANG = langCode;
$("body").removeClass(function(index, className) {
return (className.match(/(^|\s)lang-\S*/g) || []).join(' ');
}).addClass("lang-" + langCode);
try {
moment.locale(countlyCommon.BROWSER_LANG_SHORT);
}
catch (e) {
moment.locale("en");
}
countlyCommon.getMonths(true);
$("#date-to").datepicker("option", $.datepicker.regional[countlyCommon.BROWSER_LANG]);
$("#date-from").datepicker("option", $.datepicker.regional[countlyCommon.BROWSER_LANG]);
$.ajax({
type: "POST",
url: countlyGlobal.path + "/user/settings/lang",
data: {
"username": countlyGlobal.member.username,
"lang": countlyCommon.BROWSER_LANG_SHORT,
_csrf: countlyGlobal.csrf_token
},
success: function() { }
});
jQuery.i18n.properties({
name: 'locale',
cache: true,
language: countlyCommon.BROWSER_LANG_SHORT,
countlyVersion: countlyGlobal.countlyVersion + "&" + countlyGlobal.pluginsSHA,
path: [countlyGlobal.cdn + 'localization/min/'],
mode: 'map',
callback: function() {
self.origLang = JSON.stringify(jQuery.i18n.map);
$.when(countlyLocation.changeLanguage()).then(function() {
self.activeView.render();
});
}
});
});
$("#save-account-details:not(.disabled)").live('click', function() {
var username = $(".dialog #username").val(),
old_pwd = $(".dialog #old_pwd").val(),
new_pwd = $(".dialog #new_pwd").val(),
re_new_pwd = $(".dialog #re_new_pwd").val(),
api_key = $(".dialog #api-key").val();
if (new_pwd !== re_new_pwd) {
$(".dialog #settings-save-result").addClass("red").text(jQuery.i18n.map["user-settings.password-match"]);
return true;
}
$(this).addClass("disabled");
$.ajax({
type: "POST",
url: countlyGlobal.path + "/user/settings",
data: {
"username": username,
"old_pwd": old_pwd,
"new_pwd": new_pwd,
_csrf: countlyGlobal.csrf_token
},
success: function(result) {
var saveResult = $(".dialog #settings-save-result");
if (result === "username-exists") {
saveResult.removeClass("green").addClass("red").text(jQuery.i18n.map["management-users.username.exists"]);
}
else if (!result) {
saveResult.removeClass("green").addClass("red").text(jQuery.i18n.map["user-settings.alert"]);
}
else {
saveResult.removeClass("red").addClass("green").text(jQuery.i18n.map["user-settings.success"]);
$(".dialog #old_pwd").val("");
$(".dialog #new_pwd").val("");
$(".dialog #re_new_pwd").val("");
$("#menu-username").text(username);
$("#user-api-key").val(api_key);
countlyGlobal.member.username = username;
countlyGlobal.member.api_key = api_key;
}
$(".dialog #save-account-details").removeClass("disabled");
}
});
});
var help = _.once(function() {
CountlyHelpers.alert(jQuery.i18n.map["help.help-mode-welcome"], "popStyleGreen popStyleGreenWide", {button_title: jQuery.i18n.map["common.okay"] + "!", title: jQuery.i18n.map["help.help-mode-welcome-title"], image: "welcome-to-help-mode"});
});
$(".help-toggle, #help-toggle").click(function(e) {
e.stopPropagation();
$(".help-toggle #help-toggle").toggleClass("active");
app.tipsify($(".help-toggle #help-toggle").hasClass("active"));
if ($(".help-toggle #help-toggle").hasClass("active")) {
help();
$.idleTimer('destroy');
clearInterval(self.refreshActiveView);
}
else {
self.refreshActiveView = setInterval(function() {
self.performRefresh(self);
}, countlyCommon.DASHBOARD_REFRESH_MS);
$.idleTimer(countlyCommon.DASHBOARD_IDLE_MS);
}
});
var logoutRequest = function() {
var logoutForm = document.createElement("form");
logoutForm.action = countlyGlobal.path + '/logout';
logoutForm.method = "post";
logoutForm.style.display = "none";
logoutForm.type = "submit";
var logoutForm_csrf = document.createElement("input");
logoutForm_csrf.name = '_csrf';
logoutForm_csrf.value = countlyGlobal.csrf_token;
logoutForm.appendChild(logoutForm_csrf);
document.body.appendChild(logoutForm);
logoutForm.submit();
document.body.removeChild(logoutForm);
};
$("#user-logout").click(function(e) {
e.preventDefault();
store.remove('countly_active_app');
store.remove('countly_date');
store.remove('countly_location_city');
logoutRequest();
});
$(".beta-button").click(function() {
CountlyHelpers.alert("This feature is currently in beta so the data you see in this view might change or disappear into thin air.<br/><br/>If you find any bugs or have suggestions please let us know!<br/><br/><a style='font-weight:500;'>Captain Obvious:</a> You can use the message box that appears when you click the question mark on the bottom right corner of this page.", "black");
});
$("#content").on("click", "#graph-note", function() {
CountlyHelpers.popup("#graph-note-popup");
$(".note-date:visible").datepicker({
numberOfMonths: 1,
showOtherMonths: true,
onSelect: function() {
dateText();
}
});
$.datepicker.setDefaults($.datepicker.regional[""]);
$(".note-date:visible").datepicker("option", $.datepicker.regional[countlyCommon.BROWSER_LANG]);
$('.note-popup:visible .time-picker, .note-popup:visible .note-list').slimScroll({
height: '100%',
start: 'top',
wheelStep: 10,
position: 'right',
disableFadeOut: true
});
$(".note-popup:visible .time-picker span").on("click", function() {
$(".note-popup:visible .time-picker span").removeClass("selected");
$(this).addClass("selected");
dateText();
});
$(".note-popup:visible .manage-notes-button").on("click", function() {
$(".note-popup:visible .note-create").hide();
$(".note-popup:visible .note-manage").show();
$(".note-popup:visible .create-note-button").show();
$(this).hide();
$(".note-popup:visible .create-note").hide();
});
$(".note-popup:visible .create-note-button").on("click", function() {
$(".note-popup:visible .note-create").show();
$(".note-popup:visible .note-manage").hide();
$(".note-popup:visible .manage-notes-button").show();
$(this).hide();
$(".note-popup:visible .create-note").show();
});
dateText();
/** sets selected date text */
function dateText() {
var selectedDate = $(".note-date:visible").val(),
instance = $(".note-date:visible").data("datepicker"),
date = $.datepicker.parseDate(instance.settings.dateFormat || $.datepicker._defaults.dateFormat, selectedDate, instance.settings);
$(".selected-date:visible").text(moment(date).format("D MMM YYYY") + ", " + $(".time-picker:visible span.selected").text());
}
if (countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID] && countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].notes) {
var noteDateIds = _.sortBy(_.keys(countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].notes), function(el) {
return -parseInt(el);
});
for (var i = 0; i < noteDateIds.length; i++) {
var currNotes = countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].notes[noteDateIds[i]];
for (var j = 0; j < currNotes.length; j++) {
$(".note-popup:visible .note-list").append(
'<div class="note">' +
'<div class="date" data-dateid="' + noteDateIds[i] + '">' + moment(noteDateIds[i], "YYYYMMDDHH").format("D MMM YYYY, HH:mm") + '</div>' +
'<div class="content">' + currNotes[j] + '</div>' +
'<div class="delete-note"><i class="fa fa-trash"></i></div>' +
'</div>'
);
}
}
}
if (!$(".note-popup:visible .note").length) {
$(".note-popup:visible .manage-notes-button").hide();
}
$('.note-popup:visible .note-content').textcounter({
max: 50,
countDown: true,
countDownText: "remaining "
});
$(".note-popup:visible .note .delete-note").on("click", function() {
var dateId = $(this).siblings(".date").data("dateid"),
note = $(this).siblings(".content").text();
$(this).parents(".note").fadeOut().remove();
$.ajax({
type: "POST",
url: countlyGlobal.path + '/graphnotes/delete',
data: {
"app_id": countlyCommon.ACTIVE_APP_ID,
"date_id": dateId,
"note": note,
_csrf: countlyGlobal.csrf_token
},
success: function(result) {
if (result === false) {
return false;
}
else {
updateGlobalNotes({ date_id: dateId, note: note }, "delete");
app.activeView.refresh();
}
}
});
if (!$(".note-popup:visible .note").length) {
$(".note-popup:visible .create-note-button").trigger("click");
$(".note-popup:visible .manage-notes-button").hide();
}
});
$(".note-popup:visible .create-note").on("click", function() {
if ($(this).hasClass("disabled")) {
return true;
}
$(this).addClass("disabled");
var selectedDate = $(".note-date:visible").val(),
instance = $(".note-date:visible").data("datepicker"),
date = $.datepicker.parseDate(instance.settings.dateFormat || $.datepicker._defaults.dateFormat, selectedDate, instance.settings),
dateId = moment(moment(date).format("D MMM YYYY") + ", " + $(".time-picker:visible span.selected").text(), "D MMM YYYY, HH:mm").format("YYYYMMDDHH"),
note = $(".note-popup:visible .note-content").val();
if (!note.length) {
$(".note-popup:visible .note-content").addClass("required-border");
$(this).removeClass("disabled");
return true;
}
else {
$(".note-popup:visible .note-content").removeClass("required-border");
}
$.ajax({
type: "POST",
url: countlyGlobal.path + '/graphnotes/create',
data: {
"app_id": countlyCommon.ACTIVE_APP_ID,
"date_id": dateId,
"note": note,
_csrf: countlyGlobal.csrf_token
},
success: function(result) {
if (result === false) {
return false;
}
else {
updateGlobalNotes({ date_id: dateId, note: result }, "create");
app.activeView.refresh();
app.recordEvent({
"key": "graph-note",
"count": 1,
"segmentation": {}
});
}
}
});
$("#overlay").trigger("click");
});
/** function updates global notes
* @param {object} noteObj - note object
* @param {string} operation - create or delete
*/
function updateGlobalNotes(noteObj, operation) {
var globalNotes = countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].notes;
if (operation === "create") {
if (globalNotes) {
if (globalNotes[noteObj.date_id]) {
countlyCommon.arrayAddUniq(globalNotes[noteObj.date_id], noteObj.note);
}
else {
globalNotes[noteObj.date_id] = [noteObj.note];
}
}
else {
var tmpNote = {};
tmpNote[noteObj.date_id] = [noteObj.note];
countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].notes = tmpNote;
}
}
else if (operation === "delete") {
if (globalNotes) {
if (globalNotes[noteObj.date_id]) {
globalNotes[noteObj.date_id] = _.without(globalNotes[noteObj.date_id], noteObj.note);
}
}
}
}
});
// TOPBAR
var $topbar = $("#top-bar"),
$appNavigation = $("#app-navigation");
$topbar.on("click", ".dropdown", function(e) {
var wasActive = $(this).hasClass("clicked");
$topbar.find(".dropdown").removeClass("clicked");
if (wasActive) {
$(this).removeClass("clicked");
}
else {
$(this).find(".nav-search input").val("");
$(this).find(".list").scrollTop(0);
$(this).addClass("clicked");
var _this = $(this);
setTimeout(function() {
_this.find(".nav-search input").focus();
}, 50);
}
e.stopPropagation();
});
$topbar.on("click", ".dropdown .nav-search", function(e) {
e.stopPropagation();
});
$topbar.on("click", ".dropdown .item", function(e) {
$topbar.find(".dropdown").removeClass("clicked");
e.stopPropagation();
});
$("body").on("click", function() {
$topbar.find(".dropdown").removeClass("clicked");
});
$("#user_api_key_item").click(function() {
$(this).find('input').first().select();
});
$topbar.on("click", "#hide-sidebar-button", function() {
var $analyticsMainView = $("#analytics-main-view");
$analyticsMainView.find("#sidebar").toggleClass("hidden");
$analyticsMainView.find("#content-container").toggleClass("cover-left");
});
// Prevent body scroll after list inside dropdown is scrolled till the end
// Applies to any element that has prevent-body-scroll class as well
$("body").on('DOMMouseScroll mousewheel', ".dropdown .list, .prevent-body-scroll", function(ev) {
var $this = $(this),
scrollTop = this.scrollTop,
scrollHeight = this.scrollHeight,
height = $this.innerHeight(),
delta = ev.originalEvent.wheelDelta,
up = delta > 0;
if (ev.target.className === 'item scrollable') {
return true;
}
var prevent = function() {
ev.stopPropagation();
ev.preventDefault();
ev.returnValue = false;
return false;
};
if (!up && -delta > scrollHeight - height - scrollTop) {
// Scrolling down, but this will take us past the bottom.
$this.scrollTop(scrollHeight);
return prevent();
}
else if (up && delta > scrollTop) {
// Scrolling up, but this will take us past the top.
$this.scrollTop(0);
return prevent();
}
});
$appNavigation.on("click", ".item", function() {
var appKey = $(this).data("key"),
appId = $(this).data("id"),
appName = $(this).find(".name").text(),
appImage = $(this).find(".app-icon").css("background-image");
$("#active-app-icon").css("background-image", appImage);
$("#active-app-name").text(appName);
$("#active-app-name").attr('title', appName);
if (self.activeAppKey !== appKey) {
self.activeAppName = appName;
self.activeAppKey = appKey;
countlyCommon.setActiveApp(appId);
self.activeView.appChanged(function() {
app.onAppSwitch(appId);
});
}
});
$appNavigation.on("click", function() {
var appList = $(this).find(".list"),
apps = _.sortBy(countlyGlobal.apps, function(app) {
return app.name.toLowerCase();
});
appList.html("");
for (var i = 0; i < apps.length; i++) {
var currApp = apps[i];
var app = $("<div></div>");
app.addClass("item searchable");
app.data("key", currApp.key);
app.data("id", currApp._id);
var appIcon = $("<div></div>");
appIcon.addClass("app-icon");
appIcon.css("background-image", "url(" + countlyGlobal.cdn + "appimages/" + currApp._id + ".png");
var appName = $("<div></div>");
appName.addClass("name");
appName.attr("title", currApp.name);
appName.text(currApp.name);
app.append(appIcon);
app.append(appName);
appList.append(app);
}
});
});
if (!_.isEmpty(countlyGlobal.apps)) {
if (!countlyCommon.ACTIVE_APP_ID) {
var activeApp = (countlyGlobal.member && countlyGlobal.member.active_app_id && countlyGlobal.apps[countlyGlobal.member.active_app_id])
? countlyGlobal.apps[countlyGlobal.member.active_app_id]
: countlyGlobal.defaultApp;
countlyCommon.setActiveApp(activeApp._id);
self.activeAppName = activeApp.name;
$('#active-app-name').html(activeApp.name);
$('#active-app-name').attr('title', activeApp.name);
$("#active-app-icon").css("background-image", "url('" + countlyGlobal.cdn + "appimages/" + countlyCommon.ACTIVE_APP_ID + ".png')");
}
else {
$("#active-app-icon").css("background-image", "url('" + countlyGlobal.cdn + "appimages/" + countlyCommon.ACTIVE_APP_ID + ".png')");
$("#active-app-name").text(countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].name);
$('#active-app-name').attr('title', countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].name);
self.activeAppName = countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].name;
}
}
else {
$("#new-install-overlay").show();
}
$.idleTimer(countlyCommon.DASHBOARD_IDLE_MS);
$(document).bind("idle.idleTimer", function() {
clearInterval(self.refreshActiveView);
});
$(document).bind("active.idleTimer", function() {
self.activeView.restart();
self.refreshActiveView = setInterval(function() {
self.performRefresh(self);
}, countlyCommon.DASHBOARD_REFRESH_MS);
});
$.fn.dataTableExt.oPagination.four_button = {
"fnInit": function(oSettings, nPaging, fnCallbackDraw) {
var nFirst = document.createElement('span');
var nPrevious = document.createElement('span');
var nNext = document.createElement('span');
var nLast = document.createElement('span');
nFirst.innerHTML = "<i class='fa fa-angle-double-left'></i>";
nPrevious.innerHTML = "<i class='fa fa-angle-left'></i>";
nNext.innerHTML = "<i class='fa fa-angle-right'></i>";
nLast.innerHTML = "<i class='fa fa-angle-double-right'></i>";
nFirst.className = "paginate_button first";
nPrevious.className = "paginate_button previous";
nNext.className = "paginate_button next";
nLast.className = "paginate_button last";
nPaging.appendChild(nFirst);
nPaging.appendChild(nPrevious);
nPaging.appendChild(nNext);
nPaging.appendChild(nLast);
$(nFirst).click(function() {
oSettings.oApi._fnPageChange(oSettings, "first");
fnCallbackDraw(oSettings);
});
$(nPrevious).click(function() {
oSettings.oApi._fnPageChange(oSettings, "previous");
fnCallbackDraw(oSettings);
});
$(nNext).click(function() {
oSettings.oApi._fnPageChange(oSettings, "next");
fnCallbackDraw(oSettings);
});
$(nLast).click(function() {
oSettings.oApi._fnPageChange(oSettings, "last");
fnCallbackDraw(oSettings);
});
$(nFirst).bind('selectstart', function() {
return false;
});
$(nPrevious).bind('selectstart', function() {
return false;
});
$(nNext).bind('selectstart', function() {
return false;
});
$(nLast).bind('selectstart', function() {
return false;
});
},
"fnUpdate": function(oSettings /*,fnCallbackDraw*/) {
if (!oSettings.aanFeatures.p) {
return;
}
var an = oSettings.aanFeatures.p;
for (var i = 0, iLen = an.length; i < iLen; i++) {
var buttons = an[i].getElementsByTagName('span');
if (oSettings._iDisplayStart === 0) {
buttons[0].className = "paginate_disabled_previous";
buttons[1].className = "paginate_disabled_previous";
}
else {
buttons[0].className = "paginate_enabled_previous";
buttons[1].className = "paginate_enabled_previous";
}
if (oSettings.fnDisplayEnd() === oSettings.fnRecordsDisplay()) {
buttons[2].className = "paginate_disabled_next";
buttons[3].className = "paginate_disabled_next";
}
else {
buttons[2].className = "paginate_enabled_next";
buttons[3].className = "paginate_enabled_next";
}
}
}
};
$.fn.dataTableExt.oApi.fnStandingRedraw = function(oSettings) {
if (oSettings.oFeatures.bServerSide === false) {
var before = oSettings._iDisplayStart;
oSettings.oApi._fnReDraw(oSettings);
// iDisplayStart has been reset to zero - so lets change it back
oSettings._iDisplayStart = before;
oSettings.oApi._fnCalculateEnd(oSettings);
}
// draw the 'current' page
oSettings.oApi._fnDraw(oSettings);
};
/** getCustomDateInt
* @param {string} s - date string
* @returns {number} number representating date
*/
function getCustomDateInt(s) {
s = moment(s, countlyCommon.getDateFormat(countlyCommon.periodObj.dateString)).format(countlyCommon.periodObj.dateString);
var dateParts = "";
if (s.indexOf(":") !== -1) {
if (s.indexOf(",") !== -1) {
s = s.replace(/,|:/g, "");
dateParts = s.split(" ");
return parseInt((countlyCommon.getMonths().indexOf(dateParts[1]) + 1) * 1000000) +
parseInt(dateParts[0]) * 10000 +
parseInt(dateParts[2]);
}
else {
return parseInt(s.replace(':', ''));
}
}
else if (s.length === 3) {
return countlyCommon.getMonths().indexOf(s) + 1;
}
else if (s.indexOf("W") === 0) {
s = s.replace(",", "");
s = s.replace("W", "");
dateParts = s.split(" ");
return (parseInt(dateParts[0])) + parseInt(dateParts.pop() * 10000);
}
else {
s = s.replace(",", "");
dateParts = s.split(" ");
if (dateParts.length === 3) {
return (parseInt(dateParts[2]) * 10000) + parseInt((countlyCommon.getMonths().indexOf(dateParts[1]) + 1) * 100) + parseInt(dateParts[0]);
}
else {
if (dateParts[0].length === 3) {
return parseInt((countlyCommon.getMonths().indexOf(dateParts[0]) + 1) * 100) + parseInt(dateParts[1] * 10000);
}
else {
return parseInt((countlyCommon.getMonths().indexOf(dateParts[1]) + 1) * 100) + parseInt(dateParts[0]);
}
}
}
}
jQuery.fn.dataTableExt.oSort['customDate-asc'] = function(x, y) {
x = getCustomDateInt(x);
y = getCustomDateInt(y);
return ((x < y) ? -1 : ((x > y) ? 1 : 0));
};
jQuery.fn.dataTableExt.oSort['customDate-desc'] = function(x, y) {
x = getCustomDateInt(x);
y = getCustomDateInt(y);
return ((x < y) ? 1 : ((x > y) ? -1 : 0));
};
/** getDateRangeInt
* @param {string} s - range string
* @returns {number} number representing range
*/
function getDateRangeInt(s) {
s = s.split("-")[0];
var mEnglish = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"];
if (s.indexOf(":") !== -1) {
var mName = (s.split(" ")[1]).split(",")[0];
return s.replace(mName, parseInt(mEnglish.indexOf(mName))).replace(/[:, ]/g, "");
}
else {
var parts = s.split(" ");
if (parts.length > 1) {
return parseInt(mEnglish.indexOf(parts[1]) * 100) + parseInt(parts[0]);
}
else {
return parts[0].replace(/[><]/g, "");
}
}
}
jQuery.fn.dataTableExt.oSort['dateRange-asc'] = function(x, y) {
x = getDateRangeInt(x);
y = getDateRangeInt(y);
return ((x < y) ? -1 : ((x > y) ? 1 : 0));
};
jQuery.fn.dataTableExt.oSort['dateRange-desc'] = function(x, y) {
x = getDateRangeInt(x);
y = getDateRangeInt(y);
return ((x < y) ? 1 : ((x > y) ? -1 : 0));
};
jQuery.fn.dataTableExt.oSort['percent-asc'] = function(x, y) {
x = parseFloat($("<a></a>").html(x).text().replace("%", ""));
y = parseFloat($("<a></a>").html(y).text().replace("%", ""));
return ((x < y) ? -1 : ((x > y) ? 1 : 0));
};
jQuery.fn.dataTableExt.oSort['percent-desc'] = function(x, y) {
x = parseFloat($("<a></a>").html(x).text().replace("%", ""));
y = parseFloat($("<a></a>").html(y).text().replace("%", ""));
return ((x < y) ? 1 : ((x > y) ? -1 : 0));
};
jQuery.fn.dataTableExt.oSort['formatted-num-asc'] = function(x, y) {
'use strict';
// Define vars
var a = [], b = [];
// Match any character except: digits (0-9), dash (-), period (.), or backslash (/) and replace those characters with empty string.
x = x.replace(/[^\d\-\.\/]/g, ''); // eslint-disable-line
y = y.replace(/[^\d\-\.\/]/g, ''); // eslint-disable-line
// Handle simple fractions
if (x.indexOf('/') >= 0) {
a = x.split("/");
x = parseInt(a[0], 10) / parseInt(a[1], 10);
}
if (y.indexOf('/') >= 0) {
b = y.split("/");
y = parseInt(b[0], 10) / parseInt(b[1], 10);
}
return x - y;
};
jQuery.fn.dataTableExt.oSort['formatted-num-desc'] = function(x, y) {
'use strict';
// Define vars
var a = [], b = [];
// Match any character except: digits (0-9), dash (-), period (.), or backslash (/) and replace those characters with empty string.
x = x.replace(/[^\d\-\.\/]/g, ''); // eslint-disable-line
y = y.replace(/[^\d\-\.\/]/g, ''); // eslint-disable-line
// Handle simple fractions
if (x.indexOf('/') >= 0) {
a = x.split("/");
x = parseInt(a[0], 10) / parseInt(a[1], 10);
}
if (y.indexOf('/') >= 0) {
b = y.split("/");
y = parseInt(b[0], 10) / parseInt(b[1], 10);
}
return y - x;
};
jQuery.fn.dataTableExt.oSort['loyalty-asc'] = function(x, y) {
x = countlySession.getLoyaltyIndex(x);
y = countlySession.getLoyaltyIndex(y);
return ((x < y) ? -1 : ((x > y) ? 1 : 0));
};
jQuery.fn.dataTableExt.oSort['loyalty-desc'] = function(x, y) {
x = countlySession.getLoyaltyIndex(x);
y = countlySession.getLoyaltyIndex(y);
return ((x < y) ? 1 : ((x > y) ? -1 : 0));
};
jQuery.fn.dataTableExt.oSort['frequency-asc'] = function(x, y) {
x = countlySession.getFrequencyIndex(x);
y = countlySession.getFrequencyIndex(y);
return ((x < y) ? -1 : ((x > y) ? 1 : 0));
};
jQuery.fn.dataTableExt.oSort['frequency-desc'] = function(x, y) {
x = countlySession.getFrequencyIndex(x);
y = countlySession.getFrequencyIndex(y);
return ((x < y) ? 1 : ((x > y) ? -1 : 0));
};
jQuery.fn.dataTableExt.oSort['session-duration-asc'] = function(x, y) {
x = countlySession.getDurationIndex(x);
y = countlySession.getDurationIndex(y);
return ((x < y) ? -1 : ((x > y) ? 1 : 0));
};
jQuery.fn.dataTableExt.oSort['session-duration-desc'] = function(x, y) {
x = countlySession.getDurationIndex(x);
y = countlySession.getDurationIndex(y);
return ((x < y) ? 1 : ((x > y) ? -1 : 0));
};
jQuery.fn.dataTableExt.oSort['format-ago-asc'] = function(x, y) {
return x - y;
};
jQuery.fn.dataTableExt.oSort['format-ago-desc'] = function(x, y) {
return y - x;
};
/** saves current page
* @param {object} dtable - data table
* @param {object} settings -data table settings
*/
function saveCurrentPage(dtable, settings) {
var data = dtable.fnGetData();
countlyCommon.dtSettings = countlyCommon.dtSettings || [];
var previosTableStatus = countlyCommon.dtSettings.filter(function(item) {
return (item.viewId === app.activeView.cid && item.selector === settings.sTableId);
})[0];
if (previosTableStatus) {
previosTableStatus.dataLength = data.length;
previosTableStatus.page = settings._iDisplayStart / settings._iDisplayLength;
}
else {
countlyCommon.dtSettings.push({
viewId: app.activeView.cid,
selector: settings.sTableId,
dataLength: data.length,
page: settings._iDisplayStart / settings._iDisplayLength
});
}
}
/** sets current page
* @param {object} dtable - data table
* @param {object} settings -data table settings
*/
function setCurrentPage(dtable, settings) {
var tablePersistSettings = countlyCommon.dtSettings.filter(function(item) {
return (item.viewId === app.activeView.cid && item.selector === settings.sTableId);
})[0];
if (tablePersistSettings && tablePersistSettings.dataLength === dtable.fnGetData().length) {
dtable.fnPageChange(tablePersistSettings.page);
}
}
/** gets page size
* @param {object} dtable - data table
* @param {object} settings -data table settings
* @returns {boolean} states if dtable is in active view
*/
function getPageSize(dtable, settings) {
var pageSizeSettings = countlyCommon.getPersistentSettings().pageSizeSettings;
if (!pageSizeSettings) {
pageSizeSettings = [];
}
var tablePersistSettings = pageSizeSettings.filter(function(item) {
return (item.viewId === app.activeView.cid && item.selector === settings.sTableId);
})[0];
var pageSize;
if (tablePersistSettings && tablePersistSettings.pageSize) {
pageSize = tablePersistSettings.pageSize;
}
else if (settings.oInit && settings.oInit.iDisplayLength) {
pageSize = settings.oInit.iDisplayLength;
}
else {
pageSize = settings.iDisplayLength || settings._iDisplayLength || 50;
}
return pageSize;
}
$.extend(true, $.fn.dataTable.defaults, {
"sDom": '<"dataTable-top"lfpT>t<"dataTable-bottom"i>',
"bAutoWidth": false,
"bLengthChange": true,
"bPaginate": true,
"sPaginationType": "four_button",
"iDisplayLength": 50,
"bDestroy": true,
"bDeferRender": true,
"oLanguage": {
"sZeroRecords": jQuery.i18n.map["common.table.no-data"],
"sInfoEmpty": jQuery.i18n.map["common.table.no-data"],
"sEmptyTable": jQuery.i18n.map["common.table.no-data"],
"sInfo": jQuery.i18n.map["common.showing"],
"sInfoFiltered": jQuery.i18n.map["common.filtered"],
"sSearch": jQuery.i18n.map["common.search"],
"sLengthMenu": jQuery.i18n.map["common.show-items"] + "<input type='number' id='dataTables_length_input'/>"
},
"fnInitComplete": function(oSettings) {
var dtable = this;
var saveHTML = "<div class='save-table-data' data-help='help.datatables-export'><i class='fa fa-download'></i></div>",
searchHTML = "<div class='search-table-data'><i class='fa fa-search'></i></div>",
tableWrapper = $("#" + oSettings.sTableId + "_wrapper");
countlyCommon.dtSettings = countlyCommon.dtSettings || [];
tableWrapper.bind('page', function(e, _oSettings) {
var dataTable = $(e.target).dataTable();
saveCurrentPage(dataTable, _oSettings);
});
tableWrapper.bind('init', function(e, _oSettings) {
var dataTable = $(e.target).dataTable();
if (_oSettings.oFeatures.bServerSide) {
setTimeout(function() {
setCurrentPage(dataTable, _oSettings);
oSettings.isInitFinished = true;
tableWrapper.show();
}, 0);
}
else {
setCurrentPage(dataTable, _oSettings);
oSettings.isInitFinished = true;
tableWrapper.show();
}
});
var selectButton = "<div class='select-column-table-data' style='display:none;'><p class='ion-gear-a'></p></div>";
$(selectButton).insertBefore(tableWrapper.find(".dataTables_filter"));
$(saveHTML).insertBefore(tableWrapper.find(".DTTT_container"));
$(searchHTML).insertBefore(tableWrapper.find(".dataTables_filter"));
tableWrapper.find(".dataTables_filter").html(tableWrapper.find(".dataTables_filter").find("input").attr("Placeholder", jQuery.i18n.map["common.search"]).clone(true));
tableWrapper.find(".search-table-data").on("click", function() {
$(this).next(".dataTables_filter").toggle();
$(this).next(".dataTables_filter").find("input").focus();
});
var exportDrop;
if (oSettings.oFeatures.bServerSide) {
tableWrapper.find(".dataTables_length").show();
tableWrapper.find('#dataTables_length_input').bind('change.DT', function(/*e, _oSettings*/) {
//store.set("iDisplayLength", $(this).val());
if ($(this).val() && $(this).val().length > 0) {
var pageSizeSettings = countlyCommon.getPersistentSettings().pageSizeSettings;
if (!pageSizeSettings) {
pageSizeSettings = [];
}
var previosTableStatus = pageSizeSettings.filter(function(item) {
return (item.viewId === app.activeView.cid && item.selector === oSettings.sTableId);
})[0];
if (previosTableStatus) {
previosTableStatus.pageSize = parseInt($(this).val());
}
else {
pageSizeSettings.push({
viewId: app.activeView.cid,
selector: oSettings.sTableId,
pageSize: parseInt($(this).val())
});
}
countlyCommon.setPersistentSettings({ pageSizeSettings: pageSizeSettings });
}
});
//slowdown serverside filtering
tableWrapper.find('.dataTables_filter input').unbind();
var timeout = null;
tableWrapper.find('.dataTables_filter input').bind('keyup', function() {
var $this = this;
if (timeout) {
clearTimeout(timeout);
timeout = null;
}
timeout = setTimeout(function() {
oSettings.oInstance.fnFilter($this.value);
}, 1000);
});
var exportView = $(dtable).data("view") || "activeView";
var exportAPIData = app[exportView].getExportAPI ? app[exportView].getExportAPI(oSettings.sTableId) : null;
var exportQueryData = app[exportView].getExportQuery ? app[exportView].getExportQuery(oSettings.sTableId) : null;
if (exportAPIData || exportQueryData) {
//create export dialog
exportDrop = new CountlyDrop({
target: tableWrapper.find('.save-table-data')[0],
content: "",
position: 'right middle',
classes: "server-export",
constrainToScrollParent: false,
remove: true,
openOn: "click"
});
exportDrop.on("open", function() {
if (exportAPIData) {
$(".server-export .countly-drop-content").empty().append(CountlyHelpers.export(oSettings._iRecordsDisplay, app[exportView].getExportAPI(oSettings.sTableId), null, true).removeClass("dialog"));
}
else if (exportQueryData) {
$(".server-export .countly-drop-content").empty().append(CountlyHelpers.export(oSettings._iRecordsDisplay, app[exportView].getExportQuery(oSettings.sTableId)).removeClass("dialog"));
}
exportDrop.position();
});
}
else {
tableWrapper.find(".dataTables_length").hide();
//create export dialog
var item = tableWrapper.find('.save-table-data')[0];
if (item) {
exportDrop = new CountlyDrop({
target: tableWrapper.find('.save-table-data')[0],
content: "",
position: 'right middle',
classes: "server-export",
constrainToScrollParent: false,
remove: true,
openOn: "click"
});
exportDrop.on("open", function() {
$(".server-export .countly-drop-content").empty().append(CountlyHelpers.tableExport(dtable, { api_key: countlyGlobal.member.api_key }, null, oSettings).removeClass("dialog"));
exportDrop.position();
});
}
}
}
else {
tableWrapper.find(".dataTables_length").hide();
//create export dialog
var item2 = tableWrapper.find('.save-table-data')[0];
if (item2) {
exportDrop = new CountlyDrop({
target: tableWrapper.find('.save-table-data')[0],
content: "",
position: 'right middle',
classes: "server-export",
constrainToScrollParent: false,
remove: true,
openOn: "click"
});
exportDrop.on("open", function() {
$(".server-export .countly-drop-content").empty().append(CountlyHelpers.tableExport(dtable, { api_key: countlyGlobal.member.api_key }).removeClass("dialog"));
exportDrop.position();
});
}
}
//tableWrapper.css({"min-height": tableWrapper.height()});
},
fnPreDrawCallback: function(oSettings) {
var tableWrapper = $("#" + oSettings.sTableId + "_wrapper");
if (oSettings.isInitFinished) {
tableWrapper.show();
}
else {
var dtable = $(oSettings.nTable).dataTable();
oSettings._iDisplayLength = getPageSize(dtable, oSettings);
$('.dataTables_length').find('input[type=number]').val(oSettings._iDisplayLength);
tableWrapper.hide();
}
if (tableWrapper.find(".table-placeholder").length === 0) {
var $placeholder = $('<div class="table-placeholder"><div class="top"></div><div class="header"></div></div>');
tableWrapper.append($placeholder);
}
if (tableWrapper.find(".table-loader").length === 0) {
tableWrapper.append("<div class='table-loader'></div>");
}
},
fnDrawCallback: function(oSettings) {
var tableWrapper = $("#" + oSettings.sTableId + "_wrapper");
tableWrapper.find(".dataTable-bottom").show();
tableWrapper.find(".table-placeholder").remove();
tableWrapper.find(".table-loader").remove();
}
});
$.fn.dataTableExt.sErrMode = 'throw';
$(document).ready(function() {
setTimeout(function() {
self.onAppSwitch(countlyCommon.ACTIVE_APP_ID, true, true);
}, 1);
});
},
/**
* Localize all found html elements with data-localize and data-help-localize attributes
* @param {jquery_object} el - jquery reference to parent element which contents to localize, by default all document is localized if not provided
* @memberof app
*/
localize: function(el) {
var helpers = {
onlyFirstUpper: function(str) {
return str.charAt(0).toUpperCase() + str.slice(1).toLowerCase();
},
upper: function(str) {
return str.toUpperCase();
}
};
// translate help module
(el ? el.find('[data-help-localize]') : $("[data-help-localize]")).each(function() {
var elem = $(this);
if (typeof elem.data("help-localize") !== "undefined") {
elem.data("help", jQuery.i18n.map[elem.data("help-localize")]);
}
});
// translate dashboard
(el ? el.find('[data-localize]') : $("[data-localize]")).each(function() {
var elem = $(this),
toLocal = elem.data("localize").split("!"),
localizedValue = "";
if (toLocal.length === 2) {
if (helpers[toLocal[0]]) {
localizedValue = helpers[toLocal[0]](jQuery.i18n.map[toLocal[1]]);
}
else {
localizedValue = jQuery.i18n.prop(toLocal[0], (toLocal[1]) ? jQuery.i18n.map[toLocal[1]] : "");
}
}
else {
localizedValue = jQuery.i18n.map[elem.data("localize")];
}
if (elem.is("input[type=text]") || elem.is("input[type=password]") || elem.is("textarea")) {
elem.attr("placeholder", localizedValue);
}
else if (elem.is("input[type=button]") || elem.is("input[type=submit]")) {
elem.attr("value", localizedValue);
}
else {
elem.html(localizedValue);
}
});
},
/**
* Toggle showing tooltips, which are usually used in help mode for all elements containing css class help-zone-vs or help-zone-vb and having data-help attributes (which are generated automatically from data-help-localize attributes upon localization)
* @param {boolean} enable - if true tooltips will be shown on hover, if false tooltips will be disabled
* @param {jquery_object} el - jquery reference to parent element which contents to check for tooltips, by default all document is checked if not provided
* @memberof app
* @instance
*/
tipsify: function(enable, el) {
var vs = el ? el.find('.help-zone-vs') : $('.help-zone-vs'),
vb = el ? el.find('.help-zone-vb') : $('.help-zone-vb'),
both = el ? el.find('.help-zone-vs, .help-zone-vb') : $(".help-zone-vs, .help-zone-vb");
vb.tipsy({
gravity: $.fn.tipsy.autoNS,
trigger: 'manual',
title: function() {
return $(this).data("help") || "";
},
fade: true,
offset: 5,
cssClass: 'yellow',
opacity: 1,
html: true
});
vs.tipsy({
gravity: $.fn.tipsy.autoNS,
trigger: 'manual',
title: function() {
return $(this).data("help") || "";
},
fade: true,
offset: 5,
cssClass: 'yellow narrow',
opacity: 1,
html: true
});
if (enable) {
both.off('mouseenter mouseleave')
.on('mouseenter', function() {
$(this).tipsy("show");
})
.on('mouseleave', function() {
$(this).tipsy("hide");
});
}
else {
both.off('mouseenter mouseleave');
}
},
/**
* Register new app type as mobile, web, desktop, etc. You can create new plugin to add new app type with its own dashboard
* @param {string} name - name of the app type as mobile, web, desktop etc
* @param {countlyView} view - instance of the countlyView to show as main dashboard for provided app type
* @memberof app
* @instance
* @example
* app.addAppType("mobile", MobileDashboardView);
*/
addAppType: function(name, view) {
this.appTypes[name] = new view();
var menu = $("#default-type").clone();
menu.attr("id", name + "-type");
$("#sidebar-menu").append(menu);
},
/**
* Add callback to be called when user changes app in dashboard, which can be used globally, outside of the view
* @param {function} callback - function receives app_id param which is app id of the new app to which user switched
* @memberof app
* @instance
* @example
* app.addAppSwitchCallback(function(appId){
* countlyCrashes.loadList(appId);
* });
*/
addAppSwitchCallback: function(callback) {
this.appSwitchCallbacks.push(callback);
},
/**
* Add callback to be called when user changes app in Managment -> Applications section, useful when providing custom input additions to app editing for different app types
* @param {function} callback - function receives app_id param which is app id and type which is app type
* @memberof app
* @instance
* @example
* app.addAppManagementSwitchCallback(function(appId, type){
* if (type == "mobile") {
* addPushHTMLIfNeeded(type);
* $("#view-app .appmng-push").show();
* } else {
* $("#view-app .appmng-push").hide();
* }
* });
*/
addAppManagementSwitchCallback: function(callback) {
this.appManagementSwitchCallbacks.push(callback);
},
/**
* Modify app object on app create/update before submitting it to server
* @param {function} callback - function args object with all data that will be submitted to server on app create/update
* @memberof app
* @instance
* @example
* app.addAppObjectModificatorfunction(args){
* if (args.type === "mobile") {
* //do something for mobile
* }
* });
*/
addAppObjectModificator: function(callback) {
this.appObjectModificators.push(callback);
},
/**
* Add a countlyManagementView-extending view which will be displayed in accordion tabs on Management->Applications screen
* @param {string} plugin - plugin name
* @param {string} title - plugin title
* @param {object} View - plugin view
*/
addAppManagementView: function(plugin, title, View) {
this.appManagementViews[plugin] = {title: title, view: View};
},
/**
* Add additional settings to app management. Allows you to inject html with css classes app-read-settings, app-write-settings and using data-id attribute for the key to store in app collection. And if your value or input needs additional processing, you may add the callbacks here
* @param {string} id - the same value on your input data-id attributes
* @param {object} options - different callbacks for data modification
* @param {function} options.toDisplay - function to be called when data is prepared for displaying, pases reference to html element with app-read-settings css class in which value should be displayed
* @param {function} options.toInput - function to be called when data is prepared for input, pases reference to html input element with app-write-settings css class in which value should be placed for editing
* @param {function} options.toSave - function to be called when data is prepared for saving, pases reference to object args that will be sent to server ad html input element with app-write-settings css class from which value should be taken and placed in args
* @param {function} options.toInject - function to be called when to inject HTML into app management view
* @memberof app
* @instance
* @example
* app.addAppSetting("my_setting", {
* toDisplay: function(appId, elem){$(elem).text(process(countlyGlobal['apps'][appId]["my_setting"]));},
* toInput: function(appId, elem){$(elem).val(process(countlyGlobal['apps'][appId]["my_setting"]));},
* toSave: function(appId, args, elem){
* args.my_setting = process($(elem).val());
* },
* toInject: function(){
* var addApp = '<tr class="help-zone-vs" data-help-localize="manage-apps.app-my_setting">'+
* '<td>'+
* '<span data-localize="management-applications.my_setting"></span>'+
* '</td>'+
* '<td>'+
* '<input type="text" value="" class="app-write-settings" data-localize="placeholder.my_setting" data-id="my_setting">'+
* '</td>'+
* '</tr>';
*
* $("#add-new-app table .table-add").before(addApp);
*
* var editApp = '<tr class="help-zone-vs" data-help-localize="manage-apps.app-my_settingt">'+
* '<td>'+
* '<span data-localize="management-applications.my_setting"></span>'+
* '</td>'+
* '<td>'+
* '<div class="read app-read-settings" data-id="my_setting"></div>'+
* '<div class="edit">'+
* '<input type="text" value="" class="app-write-settings" data-id="my_setting" data-localize="placeholder.my_setting">'+
* '</div>'+
* '</td>'+
* '</tr>';
*
* $(".app-details table .table-edit").before(editApp);
* }
* });
*/
addAppSetting: function(id, options) {
this.appSettings[id] = options;
},
/**
* Add callback to be called when user changes app type in UI in Managment -> Applications section (even without saving app type, just chaning in UI), useful when providing custom input additions to app editing for different app types
* @param {function} callback - function receives type which is app type
* @memberof app
* @instance
* @example
* app.addAppAddTypeCallback(function(type){
* if (type == "mobile") {
* $("#view-app .appmng-push").show();
* } else {
* $("#view-app .appmng-push").hide();
* }
* });
*/
addAppAddTypeCallback: function(callback) {
this.appAddTypeCallbacks.push(callback);
},
/**
* Add callback to be called when user open user edit UI in Managment -> Users section (even without saving, just opening), useful when providing custom input additions to user editing
* @param {function} callback - function receives user object and paramm which can be true if saving data, false if opening data, string to modify data
* @memberof app
* @instance
*/
addUserEditCallback: function(callback) {
this.userEditCallbacks.push(callback);
},
/**
* Add custom data export handler from datatables to csv/xls exporter. Provide exporter name and callback function.
* Then add the same name as sExport attribute to the first datatables column.
* Then when user will want to export data from this table, your callback function will be called to get the data.
* You must perpare array of objects all with the same keys, where keys are columns and value are table data and return it from callback
* to be processed by exporter.
* @param {string} name - name of the export to expect in datatables sExport attribute
* @param {function} callback - callback to call when getting data
* @memberof app
* @instance
* @example
* app.addDataExport("userinfo", function(){
* var ret = [];
* var elem;
* for(var i = 0; i < tableData.length; i++){
* //use same keys for each array element with different user data
* elem ={
* "fullname": tableData[i].firstname + " " + tableData[i].lastname,
* "job": tableData[i].company + ", " + tableData[i].jobtitle,
* "email": tableData[i].email
* };
* ret.push(elem);
* }
* //return array
* return ret;
* });
*/
addDataExport: function(name, callback) {
this.dataExports[name] = callback;
},
/**
* Add callback to be called everytime new view/page is loaded, so you can modify view with javascript after it has been loaded
* @param {string} view - view url/hash or with possible # as wildcard or simply providing # for any view
* @param {function} callback - function to be called when view loaded
* @memberof app
* @instance
* @example <caption>Adding to single specific view with specific url</caption>
* //this will work only for view bind to #/analytics/events
* app.addPageScript("/analytics/events", function(){
* $("#event-nav-head").after(
* "<a href='#/analytics/events/compare'>" +
* "<div id='compare-events' class='event-container'>" +
* "<div class='icon'></div>" +
* "<div class='name'>" + jQuery.i18n.map["compare.button"] + "</div>" +
* "</div>" +
* "</a>"
* );
* });
* @example <caption>Add to all view subpages</caption>
* //this will work /users/ and users/1 and users/abs etc
* app.addPageScript("/users#", modifyUserDetailsForPush);
* @example <caption>Adding script to any view</caption>
* //this will work for any view
* app.addPageScript("#", function(){
* alert("I am an annoying popup appearing on each view");
* });
*/
addPageScript: function(view, callback) {
if (!this.pageScripts[view]) {
this.pageScripts[view] = [];
}
this.pageScripts[view].push(callback);
},
/**
* Add callback to be called everytime view is refreshed, because view may reset some html, and we may want to remodify it again. By default this happens every 10 seconds, so not cpu intensive tasks
* @param {string} view - view url/hash or with possible # as wildcard or simply providing # for any view
* @param {function} callback - function to be called when view refreshed
* @memberof app
* @instance
* @example <caption>Adding to single specific view with specific url</caption>
* //this will work only for view bind to #/analytics/events
* app.addPageScript("/analytics/events", function(){
* $("#event-nav-head").after(
* "<a href='#/analytics/events/compare'>" +
* "<div id='compare-events' class='event-container'>" +
* "<div class='icon'></div>" +
* "<div class='name'>" + jQuery.i18n.map["compare.button"] + "</div>" +
* "</div>" +
* "</a>"
* );
* });
* @example <caption>Add to all view subpage refreshed</caption>
* //this will work /users/ and users/1 and users/abs etc
* app.addRefreshScript("/users#", modifyUserDetailsForPush);
* @example <caption>Adding script to any view</caption>
* //this will work for any view
* app.addRefreshScript("#", function(){
* alert("I am an annoying popup appearing on each refresh of any view");
* });
*/
addRefreshScript: function(view, callback) {
if (!this.refreshScripts[view]) {
this.refreshScripts[view] = [];
}
this.refreshScripts[view].push(callback);
},
onAppSwitch: function(appId, refresh, firstLoad) {
if (appId !== 0) {
this._isFirstLoad = firstLoad;
jQuery.i18n.map = JSON.parse(app.origLang);
if (!refresh) {
app.main(true);
if (window.components && window.components.slider && window.components.slider.instance) {
window.components.slider.instance.close();
}
}
$("#sidebar-menu .sidebar-menu").hide();
var type = countlyGlobal.apps[appId].type;
if ($("#sidebar-menu #" + type + "-type").length) {
$("#sidebar-menu #" + type + "-type").show();
}
else {
$("#sidebar-menu #default-type").show();
}
for (var i = 0; i < this.appSwitchCallbacks.length; i++) {
this.appSwitchCallbacks[i](appId);
}
app.localize();
}
},
onAppManagementSwitch: function(appId, type) {
for (var i = 0; i < this.appManagementSwitchCallbacks.length; i++) {
this.appManagementSwitchCallbacks[i](appId, type || countlyGlobal.apps[appId].type);
}
if ($("#app-add-name").length) {
var newAppName = $("#app-add-name").val();
$("#app-container-new .name").text(newAppName);
$(".new-app-name").text(newAppName);
}
},
onAppAddTypeSwitch: function(type) {
for (var i = 0; i < this.appAddTypeCallbacks.length; i++) {
this.appAddTypeCallbacks[i](type);
}
},
onUserEdit: function(user, param) {
for (var i = 0; i < this.userEditCallbacks.length; i++) {
param = this.userEditCallbacks[i](user, param);
}
return param;
},
pageScript: function() { //scripts to be executed on each view change
$("#month").text(moment().year());
$("#day").text(moment().format("MMM"));
$("#yesterday").text(moment().subtract(1, "days").format("Do"));
var self = this;
$(document).ready(function() {
var selectedDateID = countlyCommon.getPeriod();
if (Object.prototype.toString.call(selectedDateID) !== '[object Array]') {
$("#" + selectedDateID).addClass("active");
}
var i = 0;
var l = 0;
if (self.pageScripts[Backbone.history.fragment]) {
for (i = 0, l = self.pageScripts[Backbone.history.fragment].length; i < l; i++) {
self.pageScripts[Backbone.history.fragment][i]();
}
}
for (var k in self.pageScripts) {
if (k !== '#' && k.indexOf('#') !== -1 && Backbone.history.fragment.match("^" + k.replace(/#/g, '.*'))) {
for (i = 0, l = self.pageScripts[k].length; i < l; i++) {
self.pageScripts[k][i]();
}
}
}
if (self.pageScripts["#"]) {
for (i = 0, l = self.pageScripts["#"].length; i < l; i++) {
self.pageScripts["#"][i]();
}
}
// Translate all elements with a data-help-localize or data-localize attribute
self.localize();
if ($("#help-toggle").hasClass("active")) {
$('.help-zone-vb').tipsy({
gravity: $.fn.tipsy.autoNS,
trigger: 'manual',
title: function() {
return ($(this).data("help")) ? $(this).data("help") : "";
},
fade: true,
offset: 5,
cssClass: 'yellow',
opacity: 1,
html: true
});
$('.help-zone-vs').tipsy({
gravity: $.fn.tipsy.autoNS,
trigger: 'manual',
title: function() {
return ($(this).data("help")) ? $(this).data("help") : "";
},
fade: true,
offset: 5,
cssClass: 'yellow narrow',
opacity: 1,
html: true
});
$.idleTimer('destroy');
clearInterval(self.refreshActiveView);
$(".help-zone-vs, .help-zone-vb").hover(
function() {
$(this).tipsy("show");
},
function() {
$(this).tipsy("hide");
}
);
}
$(".usparkline").peity("bar", { width: "100%", height: "30", colour: "#83C986", strokeColour: "#83C986", strokeWidth: 2 });
$(".dsparkline").peity("bar", { width: "100%", height: "30", colour: "#DB6E6E", strokeColour: "#DB6E6E", strokeWidth: 2 });
CountlyHelpers.setUpDateSelectors(self.activeView);
$(window).click(function() {
$("#date-picker").hide();
$(".cly-select").removeClass("active");
});
$("#date-picker").click(function(e) {
e.stopPropagation();
});
$("#date-picker-button").click(function(e) {
$("#date-picker").toggle();
if (self.dateToSelected) {
dateTo.datepicker("setDate", moment(self.dateToSelected).toDate());
dateFrom.datepicker("option", "maxDate", moment(self.dateToSelected).toDate());
}
else {
self.dateToSelected = moment().toDate().getTime();
dateTo.datepicker("setDate", moment().toDate());
dateFrom.datepicker("option", "maxDate", moment(self.dateToSelected).toDate());
}
if (self.dateFromSelected) {
dateFrom.datepicker("setDate", moment(self.dateFromSelected).toDate());
dateTo.datepicker("option", "minDate", moment(self.dateFromSelected).toDate());
}
else {
var extendDate = moment(dateTo.datepicker("getDate")).subtract(30, 'days').toDate();
dateFrom.datepicker("setDate", extendDate);
self.dateFromSelected = moment(dateTo.datepicker("getDate")).subtract(30, 'days').toDate().getTime();
dateTo.datepicker("option", "minDate", moment(self.dateFromSelected).toDate());
}
setSelectedDate();
e.stopPropagation();
});
var dateTo = $("#date-to").datepicker({
numberOfMonths: 1,
showOtherMonths: true,
maxDate: moment().toDate(),
onSelect: function(selectedDate) {
var instance = $(this).data("datepicker"),
date = $.datepicker.parseDate(instance.settings.dateFormat || $.datepicker._defaults.dateFormat, selectedDate, instance.settings);
if (date.getTime() < self.dateFromSelected) {
self.dateFromSelected = date.getTime();
}
dateFrom.datepicker("option", "maxDate", date);
self.dateToSelected = date.getTime();
setSelectedDate();
}
});
var dateFrom = $("#date-from").datepicker({
numberOfMonths: 1,
showOtherMonths: true,
maxDate: moment().subtract(1, 'days').toDate(),
onSelect: function(selectedDate) {
var instance = $(this).data("datepicker"),
date = $.datepicker.parseDate(instance.settings.dateFormat || $.datepicker._defaults.dateFormat, selectedDate, instance.settings);
if (date.getTime() > self.dateToSelected) {
self.dateToSelected = date.getTime();
}
dateTo.datepicker("option", "minDate", date);
self.dateFromSelected = date.getTime();
setSelectedDate();
}
});
/** function sets selected date */
function setSelectedDate() {
var from = moment(dateFrom.datepicker("getDate")).format("D MMM, YYYY"),
to = moment(dateTo.datepicker("getDate")).format("D MMM, YYYY");
$("#selected-date").text(from + " - " + to);
}
$.datepicker.setDefaults($.datepicker.regional[""]);
$("#date-to").datepicker("option", $.datepicker.regional[countlyCommon.BROWSER_LANG]);
$("#date-from").datepicker("option", $.datepicker.regional[countlyCommon.BROWSER_LANG]);
$("#date-submit").click(function() {
if (!self.dateFromSelected && !self.dateToSelected) {
return false;
}
var tzCorr = countlyCommon.getOffsetCorrectionForTimestamp(self.dateFromSelected);
countlyCommon.setPeriod([self.dateFromSelected - tzCorr, self.dateToSelected - tzCorr]);
self.activeView.dateChanged();
app.runRefreshScripts();
$(".date-selector").removeClass("selected").removeClass("active");
});
$('.scrollable').slimScroll({
height: '100%',
start: 'top',
wheelStep: 10,
position: 'right',
disableFadeOut: true
});
$(".checkbox").on('click', function() {
$(this).toggleClass("checked");
});
$(".resource-link").on('click', function() {
if ($(this).data("link")) {
CountlyHelpers.openResource($(this).data("link"));
}
});
$("#sidebar-menu").find(".item").each(function() {
if ($(this).next().hasClass("sidebar-submenu") && $(this).find(".ion-chevron-right").length === 0) {
$(this).append("<span class='ion-chevron-right'></span>");
}
});
$('.nav-search').on('input', "input", function() {
var searchText = new RegExp($(this).val().toLowerCase().replace(/[-[\]{}()*+?.,\\^$|#\s]/g, '\\$&')),
searchInside = $(this).parent().next().find(".searchable");
searchInside.filter(function() {
return !(searchText.test($(this).text().toLowerCase()));
}).css('display', 'none');
searchInside.filter(function() {
return searchText.test($(this).text().toLowerCase());
}).css('display', 'block');
});
$(document).on('input', "#listof-apps .search input", function() {
var searchText = new RegExp($(this).val().toLowerCase()),
searchInside = $(this).parent().next().find(".searchable");
searchInside.filter(function() {
return !(searchText.test($(this).text().toLowerCase()));
}).css('display', 'none');
searchInside.filter(function() {
return searchText.test($(this).text().toLowerCase());
}).css('display', 'block');
});
$(document).on('mouseenter', ".bar-inner", function() {
var number = $(this).parent().next();
number.text($(this).data("item"));
number.css({ "color": $(this).css("background-color") });
});
$(document).on('mouseleave', ".bar-inner", function() {
var number = $(this).parent().next();
number.text(number.data("item"));
number.css({ "color": $(this).parent().find(".bar-inner:first-child").css("background-color") });
});
/*
Auto expand left navigation (events, management > apps etc)
if ellipsis is applied to children
*/
var closeLeftNavExpand;
var leftNavSelector = "#event-nav, #app-management-bar, #configs-title-bar";
var $leftNav = $(leftNavSelector);
$leftNav.hoverIntent({
over: function() {
var parentLeftNav = $(this).parents(leftNavSelector);
if (leftNavNeedsExpand(parentLeftNav)) {
parentLeftNav.addClass("expand");
}
},
out: function() {
// Delay shrinking and allow movement towards the top section cancel it
closeLeftNavExpand = setTimeout(function() {
$(this).parents(leftNavSelector).removeClass("expand");
}, 500);
},
selector: ".slimScrollDiv"
});
$leftNav.on("mousemove", function() {
if ($(this).hasClass("expand")) {
clearTimeout(closeLeftNavExpand);
}
});
$leftNav.on("mouseleave", function() {
$(this).removeClass("expand");
});
/** Checks if nav needs to expand
@param {object} $nav html element
@returns {boolean} true or false
*/
function leftNavNeedsExpand($nav) {
var makeExpandable = false;
$nav.find(".event-container:not(#compare-events) .name, .app-container .name, .config-container .name").each(function(z, el) {
if (el.offsetWidth < el.scrollWidth) {
makeExpandable = true;
return false;
}
});
return makeExpandable;
}
/* End of auto expand code */
});
}
});
Backbone.history || (Backbone.history = new Backbone.History);
Backbone.history._checkUrl = Backbone.history.checkUrl;
Backbone.history.urlChecks = [];
Backbone.history.checkOthers = function() {
var proceed = true;
for (var i = 0; i < Backbone.history.urlChecks.length; i++) {
if (!Backbone.history.urlChecks[i]()) {
proceed = false;
}
}
return proceed;
};
Backbone.history.checkUrl = function() {
if (Backbone.history.checkOthers()) {
Backbone.history._checkUrl();
}
};
Backbone.history.noHistory = function(hash) {
if (history && history.replaceState) {
history.replaceState(undefined, undefined, hash);
}
else {
location.replace(hash);
}
};
Backbone.history.__checkUrl = Backbone.history.checkUrl;
Backbone.history._getFragment = Backbone.history.getFragment;
Backbone.history.appIds = [];
for (var i in countlyGlobal.apps) {
Backbone.history.appIds.push(i);
}
Backbone.history.getFragment = function() {
var fragment = Backbone.history._getFragment();
if (fragment.indexOf("/" + countlyCommon.ACTIVE_APP_ID) === 0) {
fragment = fragment.replace("/" + countlyCommon.ACTIVE_APP_ID, "");
}
return fragment;
};
Backbone.history.checkUrl = function() {
var app_id = Backbone.history._getFragment().split("/")[1] || "";
if (countlyCommon.APP_NAMESPACE !== false && countlyCommon.ACTIVE_APP_ID !== 0 && countlyCommon.ACTIVE_APP_ID !== app_id && Backbone.history.appIds.indexOf(app_id) === -1) {
Backbone.history.noHistory("#/" + countlyCommon.ACTIVE_APP_ID + Backbone.history._getFragment());
app_id = countlyCommon.ACTIVE_APP_ID;
}
if (countlyCommon.ACTIVE_APP_ID !== 0 && countlyCommon.ACTIVE_APP_ID !== app_id && Backbone.history.appIds.indexOf(app_id) !== -1) {
app.switchApp(app_id, function() {
if (Backbone.history.checkOthers()) {
Backbone.history.__checkUrl();
}
});
}
else {
if (Backbone.history.checkOthers()) {
Backbone.history.__checkUrl();
}
}
};
//initial hash check
(function() {
var app_id = Backbone.history._getFragment().split("/")[1] || "";
if (countlyCommon.ACTIVE_APP_ID === app_id || Backbone.history.appIds.indexOf(app_id) !== -1) {
//we have app id
if (app_id !== countlyCommon.ACTIVE_APP_ID) {
// but it is not currently selected app, so let' switch
countlyCommon.setActiveApp(app_id);
$("#active-app-name").text(countlyGlobal.apps[app_id].name);
$('#active-app-name').attr('title', countlyGlobal.apps[app_id].name);
$("#active-app-icon").css("background-image", "url('" + countlyGlobal.path + "appimages/" + app_id + ".png')");
}
}
else if (countlyCommon.APP_NAMESPACE !== false) {
//add current app id
Backbone.history.noHistory("#/" + countlyCommon.ACTIVE_APP_ID + Backbone.history._getFragment());
}
})();
var app = new AppRouter();
/**
* Navigate to another hash address programmatically, without trigering view route and without leaving trace in history, if possible
* @param {string} hash - url path (hash part) to change
* @memberof app
* @example
* //you are at #/manage/systemlogs
* app.noHistory("#/manage/systemlogs/query/{}");
* //now pressing back would not go to #/manage/systemlogs
*/
app.noHistory = function(hash) {
if (countlyCommon.APP_NAMESPACE !== false) {
hash = "#/" + countlyCommon.ACTIVE_APP_ID + hash.substr(1);
}
if (history && history.replaceState) {
history.replaceState(undefined, undefined, hash);
}
else {
location.replace(hash);
}
};
//collects requests for active views to dscard them if views changed
$.ajaxPrefilter(function(options, originalOptions, jqXHR) {
//add to options for independent!!!
if (originalOptions && (originalOptions.type === 'GET' || originalOptions.type === 'get') && originalOptions.url.substr(0, 2) === '/o') {
if (originalOptions.data && originalOptions.data.preventGlobalAbort && originalOptions.data.preventGlobalAbort === true) {
return true;
}
var myurl = "";
var mydata = "";
if (originalOptions && originalOptions.url) {
myurl = originalOptions.url;
}
if (originalOptions && originalOptions.data) {
mydata = JSON.stringify(originalOptions.data);
}
//request which is not killed on view change(only on app change)
jqXHR.my_set_url = myurl;
jqXHR.my_set_data = mydata;
if (originalOptions.data && originalOptions.data.preventRequestAbort && originalOptions.data.preventRequestAbort === true) {
if (app._myRequests[myurl] && app._myRequests[myurl][mydata]) {
jqXHR.abort(); //we already have same working request
}
else {
jqXHR.always(function(data, textStatus, jqXHR1) {
//if success jqxhr object is third, errored jqxhr object is in first parameter.
if (jqXHR1 && jqXHR1.my_set_url && jqXHR1.my_set_data) {
if (app._myRequests[jqXHR1.my_set_url] && app._myRequests[jqXHR1.my_set_url][jqXHR1.my_set_data]) {
delete app._myRequests[jqXHR1.my_set_url][jqXHR1.my_set_data];
}
}
else if (data && data.my_set_url && data.my_set_data) {
if (app._myRequests[data.my_set_url] && app._myRequests[data.my_set_url][data.my_set_data]) {
delete app._myRequests[data.my_set_url][data.my_set_data];
}
}
});
//save request in our object
if (!app._myRequests[myurl]) {
app._myRequests[myurl] = {};
}
app._myRequests[myurl][mydata] = jqXHR;
}
}
else {
if (app.activeView) {
if (app.activeView._myRequests[myurl] && app.activeView._myRequests[myurl][mydata]) {
jqXHR.abort(); //we already have same working request
}
else {
jqXHR.always(function(data, textStatus, jqXHR1) {
//if success jqxhr object is third, errored jqxhr object is in first parameter.
if (jqXHR1 && jqXHR1.my_set_url && jqXHR1.my_set_data) {
if (app.activeView._myRequests[jqXHR1.my_set_url] && app.activeView._myRequests[jqXHR1.my_set_url][jqXHR1.my_set_data]) {
delete app.activeView._myRequests[jqXHR1.my_set_url][jqXHR1.my_set_data];
}
}
else if (data && data.my_set_url && data.my_set_data) {
if (app.activeView._myRequests[data.my_set_url] && app.activeView._myRequests[data.my_set_url][data.my_set_data]) {
delete app.activeView._myRequests[data.my_set_url][data.my_set_data];
}
}
});
//save request in our object
if (!app.activeView._myRequests[myurl]) {
app.activeView._myRequests[myurl] = {};
}
app.activeView._myRequests[myurl][mydata] = jqXHR;
}
}
}
}
}); | 1 | 13,300 | As far as I know, this will replace only first occurance of Countly, not others, if there are more than one Countly word in localized string | Countly-countly-server | js |
@@ -111,9 +111,9 @@ public class LocalRepository implements Repository {
if (StringUtils.isBlank(jsonTypeDTO.getId())) {
if (!StringUtils.isBlank(jsonTypeDTO.getName())) {
- typeDTOBuilder.withId(jsonTypeDTO.getName().replaceAll("[^a-zA-Z0-9]", ""));
+ typeDTOBuilder.withId(jsonTypeDTO.getName().replaceAll("[^a-zA-Z0-9_]", ""));
} else {
- typeDTOBuilder.withId(jsonTypeDTO.getName().replaceAll("[^a-zA-Z0-9]", ""));
+ typeDTOBuilder.withId(jsonTypeDTO.getName().replaceAll("[^a-zA-Z0-9_]", ""));
}
}
| 1 | /*
* Copyright (C) 2015-2017 PÂRIS Quentin
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package org.phoenicis.repository.types;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.lang.builder.ToStringBuilder;
import org.phoenicis.configuration.localisation.Localisation;
import org.phoenicis.configuration.localisation.PropertiesResourceBundle;
import org.phoenicis.repository.RepositoryException;
import org.phoenicis.repository.dto.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.URI;
import java.util.*;
import java.util.stream.Collectors;
public class LocalRepository implements Repository {
private final static Logger LOGGER = LoggerFactory.getLogger(LocalRepository.class);
// file name of the icon for a type or category
private static final String ICON_NAME = "icon.png";
private final File repositoryDirectory;
private final ObjectMapper objectMapper;
private final URI repositorySource;
private LocalRepository(File repositoryDirectory, URI repositorySource, ObjectMapper objectMapper) {
this.repositoryDirectory = repositoryDirectory;
this.objectMapper = objectMapper;
this.repositorySource = repositorySource;
}
private LocalRepository(File repositoryDirectory, ObjectMapper objectMapper) {
this(repositoryDirectory, repositoryDirectory.toURI(), objectMapper);
}
@Override
public RepositoryDTO fetchInstallableApplications() {
if (!repositoryDirectory.exists()) {
throw new RepositoryException(
String.format("Repository %s directory does not exist.", repositoryDirectory));
}
final File[] typeDirectories = repositoryDirectory.listFiles();
if (typeDirectories == null) {
return new RepositoryDTO.Builder().build();
}
LOGGER.info("Reading directory : " + repositoryDirectory);
final RepositoryDTO.Builder repositoryDTOBuilder = new RepositoryDTO.Builder()
.withName(repositoryDirectory.getName()).withTypes(fetchTypes(typeDirectories));
final File i18nDirectory = new File(repositoryDirectory, "i18n");
if (i18nDirectory.exists()) {
final File[] translationFiles = i18nDirectory
.listFiles((dir, name) -> name.endsWith(Locale.getDefault().getLanguage() + ".properties"));
Properties mergedProperties = new Properties();
for (File translationFile : translationFiles) {
try {
Properties langProperties = new Properties();
langProperties.load(new FileInputStream(translationFile));
mergedProperties.putAll(langProperties);
} catch (IOException e) {
LOGGER.error("Could not read translation properties", e);
}
}
repositoryDTOBuilder.withTranslations(new TranslationDTO.Builder()
.withLanguage(Locale.getDefault().getLanguage()).withProperties(mergedProperties).build());
Localisation.setAdditionalTranslations(new PropertiesResourceBundle(mergedProperties));
}
return repositoryDTOBuilder.build();
}
private List<TypeDTO> fetchTypes(File[] typeDirectories) {
final List<TypeDTO> results = new ArrayList<>();
for (File typeDirectory : typeDirectories) {
if (typeDirectory.isDirectory() && !typeDirectory.getName().startsWith(".")) {
final File typeJson = new File(typeDirectory, "type.json");
if (typeJson.exists()) {
final TypeDTO jsonTypeDTO = unSerializeType(typeJson);
final TypeDTO.Builder typeDTOBuilder = new TypeDTO.Builder(jsonTypeDTO);
if (StringUtils.isBlank(jsonTypeDTO.getId())) {
if (!StringUtils.isBlank(jsonTypeDTO.getName())) {
typeDTOBuilder.withId(jsonTypeDTO.getName().replaceAll("[^a-zA-Z0-9]", ""));
} else {
typeDTOBuilder.withId(jsonTypeDTO.getName().replaceAll("[^a-zA-Z0-9]", ""));
}
}
final File typeIconFile = new File(typeDirectory, ICON_NAME);
if (typeIconFile.exists()) {
typeDTOBuilder.withIcon(typeIconFile.toURI());
}
typeDTOBuilder.withCategories(fetchCategories(typeDTOBuilder.getId(), typeDirectory));
final TypeDTO type = typeDTOBuilder.build();
results.add(type);
}
}
}
results.sort(Comparator.comparing(TypeDTO::getName));
return results;
}
private List<CategoryDTO> fetchCategories(String typeId, File typeDirectory) {
final File[] categoryDirectories = typeDirectory.listFiles();
if (categoryDirectories == null) {
return Collections.emptyList();
}
final List<CategoryDTO> results = new ArrayList<>();
for (File categoryDirectory : categoryDirectories) {
if (categoryDirectory.isDirectory() && !categoryDirectory.getName().startsWith(".")) {
final File categoryJson = new File(categoryDirectory, "category.json");
if (categoryJson.exists()) {
final CategoryDTO jsonCategoryDTO = unSerializeCategory(categoryJson);
final CategoryDTO.Builder categoryDTOBuilder = new CategoryDTO.Builder(jsonCategoryDTO);
categoryDTOBuilder.withTypeId(typeId);
if (StringUtils.isBlank(jsonCategoryDTO.getId())) {
if (!StringUtils.isBlank(jsonCategoryDTO.getName())) {
categoryDTOBuilder.withId(jsonCategoryDTO.getName().replaceAll("[^a-zA-Z0-9]", ""));
} else {
categoryDTOBuilder.withId(jsonCategoryDTO.getName().replaceAll("[^a-zA-Z0-9]", ""));
}
}
final File categoryIconFile = new File(categoryDirectory, ICON_NAME);
if (categoryIconFile.exists()) {
categoryDTOBuilder.withIcon(categoryIconFile.toURI());
}
categoryDTOBuilder.withApplications(fetchApplications(categoryDTOBuilder.getTypeId(),
categoryDTOBuilder.getId(), categoryDirectory));
final CategoryDTO category = categoryDTOBuilder.build();
results.add(category);
}
}
}
results.sort(Comparator.comparing(CategoryDTO::getName));
return results;
}
private List<ApplicationDTO> fetchApplications(String typeId, String categoryId, File categoryDirectory) {
final File[] applicationDirectories = categoryDirectory.listFiles();
if (applicationDirectories == null) {
return Collections.emptyList();
}
final List<ApplicationDTO> results = new ArrayList<>();
for (File applicationDirectory : applicationDirectories) {
if (applicationDirectory.isDirectory()) {
final ApplicationDTO.Builder applicationDTOBuilder;
final File applicationJson = new File(applicationDirectory, "application.json");
if (applicationJson.exists()) {
applicationDTOBuilder = new ApplicationDTO.Builder(
unSerializeApplication(applicationJson));
} else {
applicationDTOBuilder = new ApplicationDTO.Builder();
}
applicationDTOBuilder.withTypeId(typeId)
.withCategoryId(categoryId);
if (StringUtils.isBlank(applicationDTOBuilder.getId())) {
if (!StringUtils.isBlank(applicationDTOBuilder.getName())) {
applicationDTOBuilder.withId(applicationDTOBuilder.getName().replaceAll("[^a-zA-Z0-9]", ""));
} else {
applicationDTOBuilder.withId(applicationDirectory.getName().replaceAll("[^a-zA-Z0-9]", ""));
}
}
final File miniaturesDirectory = new File(applicationDirectory, "miniatures");
if (miniaturesDirectory.exists() && miniaturesDirectory.isDirectory()) {
try {
applicationDTOBuilder.withMiniatures(fetchMiniatures(miniaturesDirectory));
} catch (IOException e) {
LOGGER.warn("Unable to read miniatures", e);
}
}
applicationDTOBuilder.withScripts(fetchScripts(applicationDTOBuilder.getTypeId(),
applicationDTOBuilder.getCategoryId(), applicationDTOBuilder.getId(), applicationDirectory))
.withResources(fetchResources(applicationDirectory));
ApplicationDTO app = applicationDTOBuilder.build();
results.add(app);
}
}
Collections.sort(results, Comparator.comparing(ApplicationDTO::getName));
return results;
}
private List<URI> fetchMiniatures(File miniaturesDirectory) throws IOException {
final File[] miniatureFiles = miniaturesDirectory.listFiles();
return Arrays.stream(miniatureFiles)
.filter(miniatureFile -> !miniatureFile.isDirectory() && !miniatureFile.getName().startsWith("."))
.map(File::toURI).collect(Collectors.toList());
}
private List<ResourceDTO> fetchResources(File applicationDirectory) {
final File[] resources = new File(applicationDirectory, "resources").listFiles();
if (resources == null) {
return Collections.emptyList();
}
final List<ResourceDTO> results = new ArrayList<>();
for (File resourceFile : resources) {
if (!resourceFile.isDirectory() && !resourceFile.getName().startsWith(".")) {
try {
results.add(new ResourceDTO(resourceFile.getName(),
IOUtils.toByteArray(new FileInputStream(resourceFile))));
} catch (IOException ignored) {
}
}
}
return results;
}
private List<ScriptDTO> fetchScripts(String typeId, String categoryId, String applicationId,
File applicationDirectory) {
final File[] scriptDirectories = applicationDirectory.listFiles();
if (scriptDirectories == null) {
return Collections.emptyList();
}
final List<ScriptDTO> results = new ArrayList<>();
for (File scriptDirectory : scriptDirectories) {
if (scriptDirectory.isDirectory() && !"miniatures".equals(scriptDirectory.getName())
&& !"resources".equals(scriptDirectory.getName())) {
final ScriptDTO.Builder scriptDTOBuilder;
final File scriptJson = new File(scriptDirectory, "script.json");
if (scriptJson.exists()) {
final ScriptDTO scriptDTOFromJsonFile = unSerializeScript(scriptJson);
scriptDTOBuilder = new ScriptDTO.Builder(scriptDTOFromJsonFile);
if (StringUtils.isBlank(scriptDTOFromJsonFile.getScriptName())) {
scriptDTOBuilder.withScriptName(scriptDirectory.getName());
}
} else {
scriptDTOBuilder = new ScriptDTO.Builder();
scriptDTOBuilder.withScriptName(scriptDirectory.getName());
}
scriptDTOBuilder.withTypeId(typeId)
.withCategoryId(categoryId)
.withApplicationId(applicationId)
.withId(scriptDirectory.getName())
.withScriptSource(repositorySource);
final File scriptFile = new File(scriptDirectory, "script.js");
if (scriptFile.exists()) {
try {
scriptDTOBuilder.withScript(new String(IOUtils.toByteArray(new FileInputStream(scriptFile))));
} catch (IOException e) {
LOGGER.warn("Script not found", e);
}
}
final File iconFile = new File(scriptDirectory, "icon.png");
if (iconFile.exists()) {
scriptDTOBuilder.withIcon(iconFile.toURI());
} else {
LOGGER.debug("Icon not found");
}
results.add(scriptDTOBuilder.build());
}
}
return results;
}
private TypeDTO unSerializeType(File jsonFile) {
try {
return objectMapper.readValue(jsonFile, TypeDTO.class);
} catch (IOException e) {
LOGGER.debug("JSON file not found", e);
return new TypeDTO.Builder().build();
}
}
private CategoryDTO unSerializeCategory(File jsonFile) {
try {
return objectMapper.readValue(jsonFile, CategoryDTO.class);
} catch (IOException e) {
LOGGER.debug("JSON file not found", e);
return new CategoryDTO.Builder().build();
}
}
private ScriptDTO unSerializeScript(File jsonFile) {
try {
return objectMapper.readValue(jsonFile, ScriptDTO.class);
} catch (IOException e) {
LOGGER.debug("JSON file not found");
return new ScriptDTO.Builder().build();
}
}
private ApplicationDTO unSerializeApplication(File jsonFile) {
try {
return objectMapper.readValue(jsonFile, ApplicationDTO.class);
} catch (IOException e) {
LOGGER.debug("JSON file not found", e);
return new ApplicationDTO.Builder().build();
}
}
@Override
public String toString() {
return new ToStringBuilder(this).append("repositorySource", repositorySource)
.append("repositoryDirectory", repositoryDirectory).toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
LocalRepository that = (LocalRepository) o;
return new EqualsBuilder()
.append(repositoryDirectory, that.repositoryDirectory)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder()
.append(repositoryDirectory)
.toHashCode();
}
public static class Factory {
private final ObjectMapper objectMapper;
public Factory(ObjectMapper objectMapper) {
this.objectMapper = objectMapper;
}
public LocalRepository createInstance(File path) {
return new LocalRepository(path, objectMapper);
}
public LocalRepository createInstance(File path, URI source) {
return new LocalRepository(path, source, objectMapper);
}
}
}
| 1 | 11,664 | I suggest, that we move the regex (`[^a-zA-Z0-9_]`) to a separate constant field, because we're using it in multiple places and I think it's quite prone to misspellings. | PhoenicisOrg-phoenicis | java |
@@ -1,7 +1,7 @@
import logging
import time
from collections import OrderedDict, defaultdict
-from typing import Iterable, Optional
+from typing import Dict, Iterable, List, Optional, Union
from dagster import check
from dagster.core.definitions.events import AssetKey | 1 | import logging
import time
from collections import OrderedDict, defaultdict
from typing import Iterable, Optional
from dagster import check
from dagster.core.definitions.events import AssetKey
from dagster.core.events import DagsterEventType
from dagster.core.events.log import EventLogEntry
from dagster.serdes import ConfigurableClass
from .base import (
EventLogRecord,
EventLogStorage,
EventRecordsFilter,
RunShardedEventsCursor,
extract_asset_events_cursor,
)
class InMemoryEventLogStorage(EventLogStorage, ConfigurableClass):
"""
In memory only event log storage. Used by ephemeral DagsterInstance or for testing purposes.
WARNING: Dagit and other core functionality will not work if this is used on a real DagsterInstance
"""
def __init__(self, inst_data=None, preload=None):
self._logs = defaultdict(list)
self._handlers = defaultdict(set)
self._inst_data = inst_data
self._asset_tags = defaultdict(dict)
self._wiped_asset_keys = defaultdict(float)
if preload:
for payload in preload:
self._logs[payload.pipeline_run.run_id] = payload.event_list
super().__init__()
@property
def inst_data(self):
return self._inst_data
@classmethod
def config_type(cls):
return {}
@classmethod
def from_config_value(cls, inst_data, config_value):
return cls(inst_data)
def get_logs_for_run(
self,
run_id,
cursor=-1,
of_type=None,
limit=None,
):
check.str_param(run_id, "run_id")
check.int_param(cursor, "cursor")
check.invariant(
cursor >= -1,
"Don't know what to do with negative cursor {cursor}".format(cursor=cursor),
)
check.opt_inst_param(of_type, "of_type", DagsterEventType)
cursor = cursor + 1
if of_type:
events = list(
filter(
lambda r: r.is_dagster_event
and r.dagster_event.event_type_value == of_type.value,
self._logs[run_id][cursor:],
)
)
else:
events = self._logs[run_id][cursor:]
if limit:
events = events[:limit]
return events
def store_event(self, event):
check.inst_param(event, "event", EventLogEntry)
run_id = event.run_id
self._logs[run_id].append(event)
if event.is_dagster_event and event.dagster_event.asset_key:
materialization = event.dagster_event.step_materialization_data.materialization
self._asset_tags[event.dagster_event.asset_key] = materialization.tags or {}
# snapshot handlers
handlers = list(self._handlers[run_id])
for handler in handlers:
try:
handler(event)
except Exception:
logging.exception("Exception in callback for event watch on run %s.", run_id)
def delete_events(self, run_id):
del self._logs[run_id]
def upgrade(self):
pass
def reindex_events(self, print_fn=None, force=False):
pass
def reindex_assets(self, print_fn=None, force=False):
pass
def wipe(self):
self._logs = defaultdict(list)
def watch(self, run_id, _start_cursor, callback):
self._handlers[run_id].add(callback)
def end_watch(self, run_id, handler):
if handler in self._handlers[run_id]:
self._handlers[run_id].remove(handler)
@property
def is_persistent(self):
return False
def get_event_records(
self,
event_records_filter: Optional[EventRecordsFilter] = None,
limit: Optional[int] = None,
ascending: bool = False,
) -> Iterable[EventLogRecord]:
after_id = (
(
event_records_filter.after_cursor.id
if isinstance(event_records_filter.after_cursor, RunShardedEventsCursor)
else event_records_filter.after_cursor
)
if event_records_filter
else None
)
before_id = (
(
event_records_filter.before_cursor.id
if isinstance(event_records_filter.before_cursor, RunShardedEventsCursor)
else event_records_filter.before_cursor
)
if event_records_filter
else None
)
filtered_events = []
def _apply_filters(record):
if not event_records_filter:
return True
if (
event_records_filter.event_type
and record.dagster_event.event_type_value != event_records_filter.event_type.value
):
return False
if (
event_records_filter.asset_key
and record.dagster_event.asset_key != event_records_filter.asset_key
):
return False
if (
event_records_filter.asset_key
and self._wiped_asset_keys[event_records_filter.asset_key] > record.timestamp
):
return False
if (
event_records_filter.asset_partitions
and record.dagster_event.partition not in event_records_filter.asset_partitions
):
return False
if (
event_records_filter.after_timestamp
and record.timestamp >= event_records_filter.after_timestamp
):
return False
if (
event_records_filter.before_timestamp
and record.timestamp >= event_records_filter.before_timestamp
):
return False
return True
for records in self._logs.values():
filtered_events += list(filter(_apply_filters, records))
event_records = [
EventLogRecord(storage_id=event_id, event_log_entry=event)
for event_id, event in enumerate(filtered_events)
if (after_id is None or event_id > after_id)
and (before_id is None or event_id < before_id)
]
event_records = sorted(event_records, key=lambda x: x.storage_id, reverse=not ascending)
if limit:
event_records = event_records[:limit]
return event_records
def has_asset_key(self, asset_key: AssetKey) -> bool:
for records in self._logs.values():
for record in records:
if (
record.is_dagster_event
and record.dagster_event.asset_key
and record.dagster_event.asset_key == asset_key
and self._wiped_asset_keys[record.dagster_event.asset_key] < record.timestamp
):
return True
return False
def all_asset_keys(self):
asset_records = []
for records in self._logs.values():
asset_records += [
record
for record in records
if record.is_dagster_event and record.dagster_event.asset_key
]
asset_events = [
record.dagster_event
for record in sorted(asset_records, key=lambda x: x.timestamp, reverse=True)
if self._wiped_asset_keys[record.dagster_event.asset_key] < record.timestamp
]
asset_keys = OrderedDict()
for event in asset_events:
asset_keys["/".join(event.asset_key.path)] = event.asset_key
return list(asset_keys.values())
def get_asset_events(
self,
asset_key,
partitions=None,
before_cursor=None,
after_cursor=None,
limit=None,
ascending=False,
include_cursor=False,
before_timestamp=None,
cursor=None,
):
before_cursor, after_cursor = extract_asset_events_cursor(
cursor, before_cursor, after_cursor, ascending
)
event_records = self.get_event_records(
EventRecordsFilter(
asset_key=asset_key,
asset_partitions=partitions,
before_cursor=before_cursor,
after_cursor=after_cursor,
before_timestamp=before_timestamp,
),
limit=limit,
ascending=ascending,
)
if include_cursor:
return [tuple([record.storage_id, record.event_log_entry]) for record in event_records]
else:
return [record.event_log_entry for record in event_records]
def get_asset_run_ids(self, asset_key):
asset_run_ids = set()
for run_id, records in self._logs.items():
for record in records:
if (
record.is_dagster_event
and record.dagster_event.asset_key == asset_key
and self._wiped_asset_keys[record.dagster_event.asset_key] < record.timestamp
):
asset_run_ids.add(run_id)
break
return list(asset_run_ids)
def wipe_asset(self, asset_key):
check.inst_param(asset_key, "asset_key", AssetKey)
self._wiped_asset_keys[asset_key] = time.time()
if asset_key in self._asset_tags:
del self._asset_tags[asset_key]
| 1 | 17,749 | Feel free to disregard, but I've been trying to use `Mapping` and `Sequence` instead of `Dict` and `List` when possible, because they communicate that the type is immutable, and also are covariant. | dagster-io-dagster | py |
@@ -27,6 +27,9 @@ import (
"testing"
"time"
+ "github.com/nats-io/jwt/v2"
+ "github.com/nats-io/nkeys"
+
"github.com/nats-io/nats.go"
)
| 1 | // Copyright 2012-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/tls"
"flag"
"fmt"
"io/ioutil"
"net/url"
"os"
"reflect"
"runtime"
"strings"
"testing"
"time"
"github.com/nats-io/nats.go"
)
func checkOptionsEqual(t *testing.T, golden, opts *Options) {
t.Helper()
// Clone them so we can remove private fields that we don't
// want to be compared.
goldenClone := golden.Clone()
goldenClone.inConfig, goldenClone.inCmdLine = nil, nil
optsClone := opts.Clone()
optsClone.inConfig, optsClone.inCmdLine = nil, nil
if !reflect.DeepEqual(goldenClone, optsClone) {
t.Fatalf("Options are incorrect.\nexpected: %+v\ngot: %+v", goldenClone, optsClone)
}
}
func TestDefaultOptions(t *testing.T) {
golden := &Options{
Host: DEFAULT_HOST,
Port: DEFAULT_PORT,
MaxConn: DEFAULT_MAX_CONNECTIONS,
HTTPHost: DEFAULT_HOST,
PingInterval: DEFAULT_PING_INTERVAL,
MaxPingsOut: DEFAULT_PING_MAX_OUT,
TLSTimeout: float64(TLS_TIMEOUT) / float64(time.Second),
AuthTimeout: float64(AUTH_TIMEOUT) / float64(time.Second),
MaxControlLine: MAX_CONTROL_LINE_SIZE,
MaxPayload: MAX_PAYLOAD_SIZE,
MaxPending: MAX_PENDING_SIZE,
WriteDeadline: DEFAULT_FLUSH_DEADLINE,
MaxClosedClients: DEFAULT_MAX_CLOSED_CLIENTS,
LameDuckDuration: DEFAULT_LAME_DUCK_DURATION,
LameDuckGracePeriod: DEFAULT_LAME_DUCK_GRACE_PERIOD,
LeafNode: LeafNodeOpts{
ReconnectInterval: DEFAULT_LEAF_NODE_RECONNECT,
},
ConnectErrorReports: DEFAULT_CONNECT_ERROR_REPORTS,
ReconnectErrorReports: DEFAULT_RECONNECT_ERROR_REPORTS,
MaxTracedMsgLen: 0,
JetStreamMaxMemory: -1,
JetStreamMaxStore: -1,
}
opts := &Options{}
setBaselineOptions(opts)
checkOptionsEqual(t, golden, opts)
}
func TestOptions_RandomPort(t *testing.T) {
opts := &Options{Port: RANDOM_PORT}
setBaselineOptions(opts)
if opts.Port != 0 {
t.Fatalf("Process of options should have resolved random port to "+
"zero.\nexpected: %d\ngot: %d", 0, opts.Port)
}
}
func TestConfigFile(t *testing.T) {
golden := &Options{
ConfigFile: "./configs/test.conf",
ServerName: "testing_server",
Host: "127.0.0.1",
Port: 4242,
Username: "derek",
Password: "porkchop",
AuthTimeout: 1.0,
Debug: false,
Trace: true,
Logtime: false,
HTTPPort: 8222,
HTTPBasePath: "/nats",
PidFile: "/tmp/nats-server.pid",
ProfPort: 6543,
Syslog: true,
RemoteSyslog: "udp://foo.com:33",
MaxControlLine: 2048,
MaxPayload: 65536,
MaxConn: 100,
MaxSubs: 1000,
MaxPending: 10000000,
PingInterval: 60 * time.Second,
MaxPingsOut: 3,
WriteDeadline: 3 * time.Second,
LameDuckDuration: 4 * time.Minute,
ConnectErrorReports: 86400,
ReconnectErrorReports: 5,
}
opts, err := ProcessConfigFile("./configs/test.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
checkOptionsEqual(t, golden, opts)
}
func TestTLSConfigFile(t *testing.T) {
golden := &Options{
ConfigFile: "./configs/tls.conf",
Host: "127.0.0.1",
Port: 4443,
Username: "derek",
Password: "foo",
AuthTimeout: 1.0,
TLSTimeout: 2.0,
}
opts, err := ProcessConfigFile("./configs/tls.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
tlsConfig := opts.TLSConfig
if tlsConfig == nil {
t.Fatal("Expected opts.TLSConfig to be non-nil")
}
opts.TLSConfig = nil
checkOptionsEqual(t, golden, opts)
// Now check TLSConfig a bit more closely
// CipherSuites
ciphers := defaultCipherSuites()
if !reflect.DeepEqual(tlsConfig.CipherSuites, ciphers) {
t.Fatalf("Got incorrect cipher suite list: [%+v]", tlsConfig.CipherSuites)
}
if tlsConfig.MinVersion != tls.VersionTLS12 {
t.Fatalf("Expected MinVersion of 1.2 [%v], got [%v]", tls.VersionTLS12, tlsConfig.MinVersion)
}
if !tlsConfig.PreferServerCipherSuites {
t.Fatal("Expected PreferServerCipherSuites to be true")
}
// Verify hostname is correct in certificate
if len(tlsConfig.Certificates) != 1 {
t.Fatal("Expected 1 certificate")
}
cert := tlsConfig.Certificates[0].Leaf
if err := cert.VerifyHostname("127.0.0.1"); err != nil {
t.Fatalf("Could not verify hostname in certificate: %v", err)
}
// Now test adding cipher suites.
opts, err = ProcessConfigFile("./configs/tls_ciphers.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
tlsConfig = opts.TLSConfig
if tlsConfig == nil {
t.Fatal("Expected opts.TLSConfig to be non-nil")
}
// CipherSuites listed in the config - test all of them.
ciphers = []uint16{
tls.TLS_RSA_WITH_RC4_128_SHA,
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
}
if !reflect.DeepEqual(tlsConfig.CipherSuites, ciphers) {
t.Fatalf("Got incorrect cipher suite list: [%+v]", tlsConfig.CipherSuites)
}
// Test an unrecognized/bad cipher
if _, err := ProcessConfigFile("./configs/tls_bad_cipher.conf"); err == nil {
t.Fatal("Did not receive an error from a unrecognized cipher")
}
// Test an empty cipher entry in a config file.
if _, err := ProcessConfigFile("./configs/tls_empty_cipher.conf"); err == nil {
t.Fatal("Did not receive an error from empty cipher_suites")
}
// Test a curve preference from the config.
curves := []tls.CurveID{
tls.CurveP256,
}
// test on a file that will load the curve preference defaults
opts, err = ProcessConfigFile("./configs/tls_ciphers.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
if !reflect.DeepEqual(opts.TLSConfig.CurvePreferences, defaultCurvePreferences()) {
t.Fatalf("Got incorrect curve preference list: [%+v]", tlsConfig.CurvePreferences)
}
// Test specifying a single curve preference
opts, err = ProcessConfigFile("./configs/tls_curve_prefs.conf")
if err != nil {
t.Fatal("Did not receive an error from a unrecognized cipher.")
}
if !reflect.DeepEqual(opts.TLSConfig.CurvePreferences, curves) {
t.Fatalf("Got incorrect cipher suite list: [%+v]", tlsConfig.CurvePreferences)
}
// Test an unrecognized/bad curve preference
if _, err := ProcessConfigFile("./configs/tls_bad_curve_prefs.conf"); err == nil {
t.Fatal("Did not receive an error from a unrecognized curve preference")
}
// Test an empty curve preference
if _, err := ProcessConfigFile("./configs/tls_empty_curve_prefs.conf"); err == nil {
t.Fatal("Did not receive an error from empty curve preferences")
}
}
func TestMergeOverrides(t *testing.T) {
golden := &Options{
ConfigFile: "./configs/test.conf",
ServerName: "testing_server",
Host: "127.0.0.1",
Port: 2222,
Username: "derek",
Password: "porkchop",
AuthTimeout: 1.0,
Debug: true,
Trace: true,
Logtime: false,
HTTPPort: DEFAULT_HTTP_PORT,
HTTPBasePath: DEFAULT_HTTP_BASE_PATH,
PidFile: "/tmp/nats-server.pid",
ProfPort: 6789,
Syslog: true,
RemoteSyslog: "udp://foo.com:33",
MaxControlLine: 2048,
MaxPayload: 65536,
MaxConn: 100,
MaxSubs: 1000,
MaxPending: 10000000,
PingInterval: 60 * time.Second,
MaxPingsOut: 3,
Cluster: ClusterOpts{
NoAdvertise: true,
ConnectRetries: 2,
},
WriteDeadline: 3 * time.Second,
LameDuckDuration: 4 * time.Minute,
ConnectErrorReports: 86400,
ReconnectErrorReports: 5,
}
fopts, err := ProcessConfigFile("./configs/test.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
// Overrides via flags
opts := &Options{
Port: 2222,
Password: "porkchop",
Debug: true,
HTTPPort: DEFAULT_HTTP_PORT,
HTTPBasePath: DEFAULT_HTTP_BASE_PATH,
ProfPort: 6789,
Cluster: ClusterOpts{
NoAdvertise: true,
ConnectRetries: 2,
},
}
merged := MergeOptions(fopts, opts)
checkOptionsEqual(t, golden, merged)
}
func TestRemoveSelfReference(t *testing.T) {
url1, _ := url.Parse("nats-route://user:[email protected]:4223")
url2, _ := url.Parse("nats-route://user:[email protected]:4223")
url3, _ := url.Parse("nats-route://user:[email protected]:4223")
routes := []*url.URL{url1, url2, url3}
newroutes, err := RemoveSelfReference(4223, routes)
if err != nil {
t.Fatalf("Error during RemoveSelfReference: %v", err)
}
if len(newroutes) != 1 {
t.Fatalf("Wrong number of routes: %d", len(newroutes))
}
if newroutes[0] != routes[0] {
t.Fatalf("Self reference IP address %s in Routes", routes[0])
}
}
func TestAllowRouteWithDifferentPort(t *testing.T) {
url1, _ := url.Parse("nats-route://user:[email protected]:4224")
routes := []*url.URL{url1}
newroutes, err := RemoveSelfReference(4223, routes)
if err != nil {
t.Fatalf("Error during RemoveSelfReference: %v", err)
}
if len(newroutes) != 1 {
t.Fatalf("Wrong number of routes: %d", len(newroutes))
}
}
func TestRouteFlagOverride(t *testing.T) {
routeFlag := "nats-route://ruser:[email protected]:8246"
rurl, _ := url.Parse(routeFlag)
golden := &Options{
ConfigFile: "./configs/srv_a.conf",
Host: "127.0.0.1",
Port: 7222,
Cluster: ClusterOpts{
Name: "abc",
Host: "127.0.0.1",
Port: 7244,
Username: "ruser",
Password: "top_secret",
AuthTimeout: 0.5,
},
Routes: []*url.URL{rurl},
RoutesStr: routeFlag,
}
fopts, err := ProcessConfigFile("./configs/srv_a.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
// Overrides via flags
opts := &Options{
RoutesStr: routeFlag,
}
merged := MergeOptions(fopts, opts)
checkOptionsEqual(t, golden, merged)
}
func TestClusterFlagsOverride(t *testing.T) {
routeFlag := "nats-route://ruser:[email protected]:7246"
rurl, _ := url.Parse(routeFlag)
// In this test, we override the cluster listen string. Note that in
// the golden options, the cluster other infos correspond to what
// is recovered from the configuration file, this explains the
// discrepency between ClusterListenStr and the rest.
// The server would then process the ClusterListenStr override and
// correctly override ClusterHost/ClustherPort/etc..
golden := &Options{
ConfigFile: "./configs/srv_a.conf",
Host: "127.0.0.1",
Port: 7222,
Cluster: ClusterOpts{
Name: "abc",
Host: "127.0.0.1",
Port: 7244,
ListenStr: "nats://127.0.0.1:8224",
Username: "ruser",
Password: "top_secret",
AuthTimeout: 0.5,
},
Routes: []*url.URL{rurl},
}
fopts, err := ProcessConfigFile("./configs/srv_a.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
// Overrides via flags
opts := &Options{
Cluster: ClusterOpts{
ListenStr: "nats://127.0.0.1:8224",
},
}
merged := MergeOptions(fopts, opts)
checkOptionsEqual(t, golden, merged)
}
func TestRouteFlagOverrideWithMultiple(t *testing.T) {
routeFlag := "nats-route://ruser:[email protected]:8246, nats-route://ruser:[email protected]:8266"
rurls := RoutesFromStr(routeFlag)
golden := &Options{
ConfigFile: "./configs/srv_a.conf",
Host: "127.0.0.1",
Port: 7222,
Cluster: ClusterOpts{
Host: "127.0.0.1",
Name: "abc",
Port: 7244,
Username: "ruser",
Password: "top_secret",
AuthTimeout: 0.5,
},
Routes: rurls,
RoutesStr: routeFlag,
}
fopts, err := ProcessConfigFile("./configs/srv_a.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
// Overrides via flags
opts := &Options{
RoutesStr: routeFlag,
}
merged := MergeOptions(fopts, opts)
checkOptionsEqual(t, golden, merged)
}
func TestDynamicPortOnListen(t *testing.T) {
opts, err := ProcessConfigFile("./configs/listen-1.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
if opts.Port != -1 {
t.Fatalf("Received incorrect port %v, expected -1", opts.Port)
}
if opts.HTTPPort != -1 {
t.Fatalf("Received incorrect monitoring port %v, expected -1", opts.HTTPPort)
}
if opts.HTTPSPort != -1 {
t.Fatalf("Received incorrect secure monitoring port %v, expected -1", opts.HTTPSPort)
}
}
func TestListenConfig(t *testing.T) {
opts, err := ProcessConfigFile("./configs/listen.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
setBaselineOptions(opts)
// Normal clients
host := "10.0.1.22"
port := 4422
monHost := "127.0.0.1"
if opts.Host != host {
t.Fatalf("Received incorrect host %q, expected %q", opts.Host, host)
}
if opts.HTTPHost != monHost {
t.Fatalf("Received incorrect host %q, expected %q", opts.HTTPHost, monHost)
}
if opts.Port != port {
t.Fatalf("Received incorrect port %v, expected %v", opts.Port, port)
}
// Clustering
clusterHost := "127.0.0.1"
clusterPort := 4244
if opts.Cluster.Host != clusterHost {
t.Fatalf("Received incorrect cluster host %q, expected %q", opts.Cluster.Host, clusterHost)
}
if opts.Cluster.Port != clusterPort {
t.Fatalf("Received incorrect cluster port %v, expected %v", opts.Cluster.Port, clusterPort)
}
// HTTP
httpHost := "127.0.0.1"
httpPort := 8422
if opts.HTTPHost != httpHost {
t.Fatalf("Received incorrect http host %q, expected %q", opts.HTTPHost, httpHost)
}
if opts.HTTPPort != httpPort {
t.Fatalf("Received incorrect http port %v, expected %v", opts.HTTPPort, httpPort)
}
// HTTPS
httpsPort := 9443
if opts.HTTPSPort != httpsPort {
t.Fatalf("Received incorrect https port %v, expected %v", opts.HTTPSPort, httpsPort)
}
}
func TestListenPortOnlyConfig(t *testing.T) {
opts, err := ProcessConfigFile("./configs/listen_port.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
setBaselineOptions(opts)
port := 8922
if opts.Host != DEFAULT_HOST {
t.Fatalf("Received incorrect host %q, expected %q", opts.Host, DEFAULT_HOST)
}
if opts.HTTPHost != DEFAULT_HOST {
t.Fatalf("Received incorrect host %q, expected %q", opts.Host, DEFAULT_HOST)
}
if opts.Port != port {
t.Fatalf("Received incorrect port %v, expected %v", opts.Port, port)
}
}
func TestListenPortWithColonConfig(t *testing.T) {
opts, err := ProcessConfigFile("./configs/listen_port_with_colon.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
setBaselineOptions(opts)
port := 8922
if opts.Host != DEFAULT_HOST {
t.Fatalf("Received incorrect host %q, expected %q", opts.Host, DEFAULT_HOST)
}
if opts.HTTPHost != DEFAULT_HOST {
t.Fatalf("Received incorrect host %q, expected %q", opts.Host, DEFAULT_HOST)
}
if opts.Port != port {
t.Fatalf("Received incorrect port %v, expected %v", opts.Port, port)
}
}
func TestListenMonitoringDefault(t *testing.T) {
opts := &Options{
Host: "10.0.1.22",
}
setBaselineOptions(opts)
host := "10.0.1.22"
if opts.Host != host {
t.Fatalf("Received incorrect host %q, expected %q", opts.Host, host)
}
if opts.HTTPHost != host {
t.Fatalf("Received incorrect host %q, expected %q", opts.Host, host)
}
if opts.Port != DEFAULT_PORT {
t.Fatalf("Received incorrect port %v, expected %v", opts.Port, DEFAULT_PORT)
}
}
func TestMultipleUsersConfig(t *testing.T) {
opts, err := ProcessConfigFile("./configs/multiple_users.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
setBaselineOptions(opts)
}
// Test highly depends on contents of the config file listed below. Any changes to that file
// may very well break this test.
func TestAuthorizationConfig(t *testing.T) {
opts, err := ProcessConfigFile("./configs/authorization.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
setBaselineOptions(opts)
lu := len(opts.Users)
if lu != 5 {
t.Fatalf("Expected 5 users, got %d", lu)
}
// Build a map
mu := make(map[string]*User)
for _, u := range opts.Users {
mu[u.Username] = u
}
// Alice
alice, ok := mu["alice"]
if !ok {
t.Fatalf("Expected to see user Alice")
}
// Check for permissions details
if alice.Permissions == nil {
t.Fatalf("Expected Alice's permissions to be non-nil")
}
if alice.Permissions.Publish == nil {
t.Fatalf("Expected Alice's publish permissions to be non-nil")
}
if len(alice.Permissions.Publish.Allow) != 1 {
t.Fatalf("Expected Alice's publish permissions to have 1 element, got %d",
len(alice.Permissions.Publish.Allow))
}
pubPerm := alice.Permissions.Publish.Allow[0]
if pubPerm != "*" {
t.Fatalf("Expected Alice's publish permissions to be '*', got %q", pubPerm)
}
if alice.Permissions.Subscribe == nil {
t.Fatalf("Expected Alice's subscribe permissions to be non-nil")
}
if len(alice.Permissions.Subscribe.Allow) != 1 {
t.Fatalf("Expected Alice's subscribe permissions to have 1 element, got %d",
len(alice.Permissions.Subscribe.Allow))
}
subPerm := alice.Permissions.Subscribe.Allow[0]
if subPerm != ">" {
t.Fatalf("Expected Alice's subscribe permissions to be '>', got %q", subPerm)
}
// Bob
bob, ok := mu["bob"]
if !ok {
t.Fatalf("Expected to see user Bob")
}
if bob.Permissions == nil {
t.Fatalf("Expected Bob's permissions to be non-nil")
}
// Susan
susan, ok := mu["susan"]
if !ok {
t.Fatalf("Expected to see user Susan")
}
if susan.Permissions == nil {
t.Fatalf("Expected Susan's permissions to be non-nil")
}
// Check susan closely since she inherited the default permissions.
if susan.Permissions == nil {
t.Fatalf("Expected Susan's permissions to be non-nil")
}
if susan.Permissions.Publish != nil {
t.Fatalf("Expected Susan's publish permissions to be nil")
}
if susan.Permissions.Subscribe == nil {
t.Fatalf("Expected Susan's subscribe permissions to be non-nil")
}
if len(susan.Permissions.Subscribe.Allow) != 1 {
t.Fatalf("Expected Susan's subscribe permissions to have 1 element, got %d",
len(susan.Permissions.Subscribe.Allow))
}
subPerm = susan.Permissions.Subscribe.Allow[0]
if subPerm != "PUBLIC.>" {
t.Fatalf("Expected Susan's subscribe permissions to be 'PUBLIC.>', got %q", subPerm)
}
// Service A
svca, ok := mu["svca"]
if !ok {
t.Fatalf("Expected to see user Service A")
}
if svca.Permissions == nil {
t.Fatalf("Expected Service A's permissions to be non-nil")
}
if svca.Permissions.Subscribe == nil {
t.Fatalf("Expected Service A's subscribe permissions to be non-nil")
}
if len(svca.Permissions.Subscribe.Allow) != 1 {
t.Fatalf("Expected Service A's subscribe permissions to have 1 element, got %d",
len(svca.Permissions.Subscribe.Allow))
}
subPerm = svca.Permissions.Subscribe.Allow[0]
if subPerm != "my.service.req" {
t.Fatalf("Expected Service A's subscribe permissions to be 'my.service.req', got %q", subPerm)
}
// We want allow_responses to essentially set deny all, or allow none in this case.
if svca.Permissions.Publish == nil {
t.Fatalf("Expected Service A's publish permissions to be non-nil")
}
if len(svca.Permissions.Publish.Allow) != 0 {
t.Fatalf("Expected Service A's publish permissions to have no elements, got %d",
len(svca.Permissions.Publish.Allow))
}
// We should have a ResponsePermission present with default values.
if svca.Permissions.Response == nil {
t.Fatalf("Expected Service A's response permissions to be non-nil")
}
if svca.Permissions.Response.MaxMsgs != DEFAULT_ALLOW_RESPONSE_MAX_MSGS {
t.Fatalf("Expected Service A's response permissions of max msgs to be %d, got %d",
DEFAULT_ALLOW_RESPONSE_MAX_MSGS, svca.Permissions.Response.MaxMsgs,
)
}
if svca.Permissions.Response.Expires != DEFAULT_ALLOW_RESPONSE_EXPIRATION {
t.Fatalf("Expected Service A's response permissions of expiration to be %v, got %v",
DEFAULT_ALLOW_RESPONSE_EXPIRATION, svca.Permissions.Response.Expires,
)
}
// Service B
svcb, ok := mu["svcb"]
if !ok {
t.Fatalf("Expected to see user Service B")
}
if svcb.Permissions == nil {
t.Fatalf("Expected Service B's permissions to be non-nil")
}
if svcb.Permissions.Subscribe == nil {
t.Fatalf("Expected Service B's subscribe permissions to be non-nil")
}
if len(svcb.Permissions.Subscribe.Allow) != 1 {
t.Fatalf("Expected Service B's subscribe permissions to have 1 element, got %d",
len(svcb.Permissions.Subscribe.Allow))
}
subPerm = svcb.Permissions.Subscribe.Allow[0]
if subPerm != "my.service.req" {
t.Fatalf("Expected Service B's subscribe permissions to be 'my.service.req', got %q", subPerm)
}
// We want allow_responses to essentially set deny all, or allow none in this case.
if svcb.Permissions.Publish == nil {
t.Fatalf("Expected Service B's publish permissions to be non-nil")
}
if len(svcb.Permissions.Publish.Allow) != 0 {
t.Fatalf("Expected Service B's publish permissions to have no elements, got %d",
len(svcb.Permissions.Publish.Allow))
}
// We should have a ResponsePermission present with default values.
if svcb.Permissions.Response == nil {
t.Fatalf("Expected Service B's response permissions to be non-nil")
}
if svcb.Permissions.Response.MaxMsgs != 10 {
t.Fatalf("Expected Service B's response permissions of max msgs to be %d, got %d",
10, svcb.Permissions.Response.MaxMsgs,
)
}
if svcb.Permissions.Response.Expires != time.Minute {
t.Fatalf("Expected Service B's response permissions of expiration to be %v, got %v",
time.Minute, svcb.Permissions.Response.Expires,
)
}
}
// Test highly depends on contents of the config file listed below. Any changes to that file
// may very well break this test.
func TestNewStyleAuthorizationConfig(t *testing.T) {
opts, err := ProcessConfigFile("./configs/new_style_authorization.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
setBaselineOptions(opts)
lu := len(opts.Users)
if lu != 2 {
t.Fatalf("Expected 2 users, got %d", lu)
}
// Build a map
mu := make(map[string]*User)
for _, u := range opts.Users {
mu[u.Username] = u
}
// Alice
alice, ok := mu["alice"]
if !ok {
t.Fatalf("Expected to see user Alice")
}
if alice.Permissions == nil {
t.Fatalf("Expected Alice's permissions to be non-nil")
}
if alice.Permissions.Publish == nil {
t.Fatalf("Expected Alice's publish permissions to be non-nil")
}
if len(alice.Permissions.Publish.Allow) != 3 {
t.Fatalf("Expected Alice's allowed publish permissions to have 3 elements, got %d",
len(alice.Permissions.Publish.Allow))
}
pubPerm := alice.Permissions.Publish.Allow[0]
if pubPerm != "foo" {
t.Fatalf("Expected Alice's first allowed publish permission to be 'foo', got %q", pubPerm)
}
pubPerm = alice.Permissions.Publish.Allow[1]
if pubPerm != "bar" {
t.Fatalf("Expected Alice's second allowed publish permission to be 'bar', got %q", pubPerm)
}
pubPerm = alice.Permissions.Publish.Allow[2]
if pubPerm != "baz" {
t.Fatalf("Expected Alice's third allowed publish permission to be 'baz', got %q", pubPerm)
}
if len(alice.Permissions.Publish.Deny) != 0 {
t.Fatalf("Expected Alice's denied publish permissions to have 0 elements, got %d",
len(alice.Permissions.Publish.Deny))
}
if alice.Permissions.Subscribe == nil {
t.Fatalf("Expected Alice's subscribe permissions to be non-nil")
}
if len(alice.Permissions.Subscribe.Allow) != 0 {
t.Fatalf("Expected Alice's allowed subscribe permissions to have 0 elements, got %d",
len(alice.Permissions.Subscribe.Allow))
}
if len(alice.Permissions.Subscribe.Deny) != 1 {
t.Fatalf("Expected Alice's denied subscribe permissions to have 1 element, got %d",
len(alice.Permissions.Subscribe.Deny))
}
subPerm := alice.Permissions.Subscribe.Deny[0]
if subPerm != "$SYS.>" {
t.Fatalf("Expected Alice's only denied subscribe permission to be '$SYS.>', got %q", subPerm)
}
// Bob
bob, ok := mu["bob"]
if !ok {
t.Fatalf("Expected to see user Bob")
}
if bob.Permissions == nil {
t.Fatalf("Expected Bob's permissions to be non-nil")
}
if bob.Permissions.Publish == nil {
t.Fatalf("Expected Bobs's publish permissions to be non-nil")
}
if len(bob.Permissions.Publish.Allow) != 1 {
t.Fatalf("Expected Bob's allowed publish permissions to have 1 element, got %d",
len(bob.Permissions.Publish.Allow))
}
pubPerm = bob.Permissions.Publish.Allow[0]
if pubPerm != "$SYS.>" {
t.Fatalf("Expected Bob's first allowed publish permission to be '$SYS.>', got %q", pubPerm)
}
if len(bob.Permissions.Publish.Deny) != 0 {
t.Fatalf("Expected Bob's denied publish permissions to have 0 elements, got %d",
len(bob.Permissions.Publish.Deny))
}
if bob.Permissions.Subscribe == nil {
t.Fatalf("Expected Bob's subscribe permissions to be non-nil")
}
if len(bob.Permissions.Subscribe.Allow) != 0 {
t.Fatalf("Expected Bob's allowed subscribe permissions to have 0 elements, got %d",
len(bob.Permissions.Subscribe.Allow))
}
if len(bob.Permissions.Subscribe.Deny) != 3 {
t.Fatalf("Expected Bobs's denied subscribe permissions to have 3 elements, got %d",
len(bob.Permissions.Subscribe.Deny))
}
subPerm = bob.Permissions.Subscribe.Deny[0]
if subPerm != "foo" {
t.Fatalf("Expected Bobs's first denied subscribe permission to be 'foo', got %q", subPerm)
}
subPerm = bob.Permissions.Subscribe.Deny[1]
if subPerm != "bar" {
t.Fatalf("Expected Bobs's second denied subscribe permission to be 'bar', got %q", subPerm)
}
subPerm = bob.Permissions.Subscribe.Deny[2]
if subPerm != "baz" {
t.Fatalf("Expected Bobs's third denied subscribe permission to be 'baz', got %q", subPerm)
}
}
// Test new nkey users
func TestNkeyUsersConfig(t *testing.T) {
confFileName := createConfFile(t, []byte(`
authorization {
users = [
{nkey: "UDKTV7HZVYJFJN64LLMYQBUR6MTNNYCDC3LAZH4VHURW3GZLL3FULBXV"}
{nkey: "UA3C5TBZYK5GJQJRWPMU6NFY5JNAEVQB2V2TUZFZDHFJFUYVKTTUOFKZ"}
]
}`))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
lu := len(opts.Nkeys)
if lu != 2 {
t.Fatalf("Expected 2 nkey users, got %d", lu)
}
}
func TestNkeyUsersDefaultPermissionsConfig(t *testing.T) {
confFileName := createConfFile(t, []byte(`
authorization {
default_permissions = {
publish = "foo"
}
users = [
{ user: "user", password: "pwd"}
{ user: "other", password: "pwd",
permissions = {
subscribe = "bar"
}
}
{ nkey: "UDKTV7HZVYJFJN64LLMYQBUR6MTNNYCDC3LAZH4VHURW3GZLL3FULBXV" }
{ nkey: "UA3C5TBZYK5GJQJRWPMU6NFY5JNAEVQB2V2TUZFZDHFJFUYVKTTUOFKZ",
permissions = {
subscribe = "bar"
}
}
]
}
accounts {
A {
default_permissions = {
publish = "foo"
}
users = [
{ user: "accuser", password: "pwd"}
{ user: "accother", password: "pwd",
permissions = {
subscribe = "bar"
}
}
{ nkey: "UC4YEYJHYKTU4LHROX7UEKEIO5RP5OUWDYXELHWXZOQHZYXHUD44LCRS" }
{ nkey: "UDLSDF4UY3YW7JJQCYE6T2D4KFDCH6RGF3R65KHK247G3POJPI27VMQ3",
permissions = {
subscribe = "bar"
}
}
]
}
}
`))
checkPerms := func(permsDef *Permissions, permsNonDef *Permissions) {
if permsDef.Publish.Allow[0] != "foo" {
t.Fatal("Publish allow foo missing")
} else if permsDef.Subscribe != nil {
t.Fatal("Has unexpected Subscribe permission")
} else if permsNonDef.Subscribe.Allow[0] != "bar" {
t.Fatal("Subscribe allow bar missing")
} else if permsNonDef.Publish != nil {
t.Fatal("Has unexpected Publish permission")
}
}
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
findUsers := func(u1, u2 string) (found []*User) {
find := []string{u1, u2}
for _, f := range find {
for _, u := range opts.Users {
if u.Username == f {
found = append(found, u)
break
}
}
}
return
}
findNkeyUsers := func(nk1, nk2 string) (found []*NkeyUser) {
find := []string{nk1, nk2}
for _, f := range find {
for _, u := range opts.Nkeys {
if strings.HasPrefix(u.Nkey, f) {
found = append(found, u)
break
}
}
}
return
}
if lu := len(opts.Users); lu != 4 {
t.Fatalf("Expected 4 nkey users, got %d", lu)
}
foundU := findUsers("user", "other")
checkPerms(foundU[0].Permissions, foundU[1].Permissions)
foundU = findUsers("accuser", "accother")
checkPerms(foundU[0].Permissions, foundU[1].Permissions)
if lu := len(opts.Nkeys); lu != 4 {
t.Fatalf("Expected 4 nkey users, got %d", lu)
}
foundNk := findNkeyUsers("UDK", "UA3")
checkPerms(foundNk[0].Permissions, foundNk[1].Permissions)
foundNk = findNkeyUsers("UC4", "UDL")
checkPerms(foundNk[0].Permissions, foundNk[1].Permissions)
}
func TestNkeyUsersWithPermsConfig(t *testing.T) {
confFileName := createConfFile(t, []byte(`
authorization {
users = [
{nkey: "UDKTV7HZVYJFJN64LLMYQBUR6MTNNYCDC3LAZH4VHURW3GZLL3FULBXV",
permissions = {
publish = "$SYS.>"
subscribe = { deny = ["foo", "bar", "baz"] }
}
}
]
}`))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
lu := len(opts.Nkeys)
if lu != 1 {
t.Fatalf("Expected 1 nkey user, got %d", lu)
}
nk := opts.Nkeys[0]
if nk.Permissions == nil {
t.Fatal("Expected to have permissions")
}
if nk.Permissions.Publish == nil {
t.Fatal("Expected to have publish permissions")
}
if nk.Permissions.Publish.Allow[0] != "$SYS.>" {
t.Fatalf("Expected publish to allow \"$SYS.>\", but got %v", nk.Permissions.Publish.Allow[0])
}
if nk.Permissions.Subscribe == nil {
t.Fatal("Expected to have subscribe permissions")
}
if nk.Permissions.Subscribe.Allow != nil {
t.Fatal("Expected to have no subscribe allow permissions")
}
deny := nk.Permissions.Subscribe.Deny
if deny == nil || len(deny) != 3 ||
deny[0] != "foo" || deny[1] != "bar" || deny[2] != "baz" {
t.Fatalf("Expected to have subscribe deny permissions, got %v", deny)
}
}
func TestBadNkeyConfig(t *testing.T) {
confFileName := "nkeys_bad.conf"
defer os.Remove(confFileName)
content := `
authorization {
users = [ {nkey: "Ufoo"}]
}`
if err := ioutil.WriteFile(confFileName, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
if _, err := ProcessConfigFile(confFileName); err == nil {
t.Fatalf("Expected an error from nkey entry with password")
}
}
func TestNkeyWithPassConfig(t *testing.T) {
confFileName := "nkeys_pass.conf"
defer os.Remove(confFileName)
content := `
authorization {
users = [
{nkey: "UDKTV7HZVYJFJN64LLMYQBUR6MTNNYCDC3LAZH4VHURW3GZLL3FULBXV", pass: "foo"}
]
}`
if err := ioutil.WriteFile(confFileName, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
if _, err := ProcessConfigFile(confFileName); err == nil {
t.Fatalf("Expected an error from bad nkey entry")
}
}
func TestTokenWithUserPass(t *testing.T) {
confFileName := "test.conf"
defer os.Remove(confFileName)
content := `
authorization={
user: user
pass: password
token: $2a$11$whatever
}`
if err := ioutil.WriteFile(confFileName, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
_, err := ProcessConfigFile(confFileName)
if err == nil {
t.Fatal("Expected error, got none")
}
if !strings.Contains(err.Error(), "token") {
t.Fatalf("Expected error related to token, got %v", err)
}
}
func TestTokenWithUsers(t *testing.T) {
confFileName := "test.conf"
defer os.Remove(confFileName)
content := `
authorization={
token: $2a$11$whatever
users: [
{user: test, password: test}
]
}`
if err := ioutil.WriteFile(confFileName, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
_, err := ProcessConfigFile(confFileName)
if err == nil {
t.Fatal("Expected error, got none")
}
if !strings.Contains(err.Error(), "token") {
t.Fatalf("Expected error related to token, got %v", err)
}
}
func TestParseWriteDeadline(t *testing.T) {
confFile := "test.conf"
defer os.Remove(confFile)
if err := ioutil.WriteFile(confFile, []byte("write_deadline: \"1x\""), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
_, err := ProcessConfigFile(confFile)
if err == nil {
t.Fatal("Expected error, got none")
}
if !strings.Contains(err.Error(), "parsing") {
t.Fatalf("Expected error related to parsing, got %v", err)
}
os.Remove(confFile)
if err := ioutil.WriteFile(confFile, []byte("write_deadline: \"1s\""), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
opts, err := ProcessConfigFile(confFile)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if opts.WriteDeadline != time.Second {
t.Fatalf("Expected write_deadline to be 1s, got %v", opts.WriteDeadline)
}
os.Remove(confFile)
oldStdout := os.Stdout
_, w, _ := os.Pipe()
defer func() {
w.Close()
os.Stdout = oldStdout
}()
os.Stdout = w
if err := ioutil.WriteFile(confFile, []byte("write_deadline: 2"), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
opts, err = ProcessConfigFile(confFile)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if opts.WriteDeadline != 2*time.Second {
t.Fatalf("Expected write_deadline to be 2s, got %v", opts.WriteDeadline)
}
}
func TestOptionsClone(t *testing.T) {
opts := &Options{
ConfigFile: "./configs/test.conf",
Host: "127.0.0.1",
Port: 2222,
Username: "derek",
Password: "porkchop",
AuthTimeout: 1.0,
Debug: true,
Trace: true,
Logtime: false,
HTTPPort: DEFAULT_HTTP_PORT,
HTTPBasePath: DEFAULT_HTTP_BASE_PATH,
PidFile: "/tmp/nats-server.pid",
ProfPort: 6789,
Syslog: true,
RemoteSyslog: "udp://foo.com:33",
MaxControlLine: 2048,
MaxPayload: 65536,
MaxConn: 100,
PingInterval: 60 * time.Second,
MaxPingsOut: 3,
Cluster: ClusterOpts{
NoAdvertise: true,
ConnectRetries: 2,
},
Gateway: GatewayOpts{
Name: "A",
Gateways: []*RemoteGatewayOpts{
{Name: "B", URLs: []*url.URL{{Scheme: "nats", Host: "host:5222"}}},
{Name: "C"},
},
},
WriteDeadline: 3 * time.Second,
Routes: []*url.URL{{}},
Users: []*User{{Username: "foo", Password: "bar"}},
}
clone := opts.Clone()
if !reflect.DeepEqual(opts, clone) {
t.Fatalf("Cloned Options are incorrect.\nexpected: %+v\ngot: %+v",
clone, opts)
}
clone.Users[0].Password = "baz"
if reflect.DeepEqual(opts, clone) {
t.Fatal("Expected Options to be different")
}
opts.Gateway.Gateways[0].URLs[0] = nil
if reflect.DeepEqual(opts.Gateway.Gateways[0], clone.Gateway.Gateways[0]) {
t.Fatal("Expected Options to be different")
}
if clone.Gateway.Gateways[0].URLs[0].Host != "host:5222" {
t.Fatalf("Unexpected URL: %v", clone.Gateway.Gateways[0].URLs[0])
}
}
func TestOptionsCloneNilLists(t *testing.T) {
opts := &Options{}
clone := opts.Clone()
if clone.Routes != nil {
t.Fatalf("Expected Routes to be nil, got: %v", clone.Routes)
}
if clone.Users != nil {
t.Fatalf("Expected Users to be nil, got: %v", clone.Users)
}
}
func TestOptionsCloneNil(t *testing.T) {
opts := (*Options)(nil)
clone := opts.Clone()
if clone != nil {
t.Fatalf("Expected nil, got: %+v", clone)
}
}
func TestEmptyConfig(t *testing.T) {
opts, err := ProcessConfigFile("")
if err != nil {
t.Fatalf("Expected no error from empty config, got: %+v", err)
}
if opts.ConfigFile != "" {
t.Fatalf("Expected empty config, got: %+v", opts)
}
}
func TestMalformedListenAddress(t *testing.T) {
opts, err := ProcessConfigFile("./configs/malformed_listen_address.conf")
if err == nil {
t.Fatalf("Expected an error reading config file: got %+v", opts)
}
}
func TestMalformedClusterAddress(t *testing.T) {
opts, err := ProcessConfigFile("./configs/malformed_cluster_address.conf")
if err == nil {
t.Fatalf("Expected an error reading config file: got %+v", opts)
}
}
func TestPanic(t *testing.T) {
conf := createConfFile(t, []byte(`port: "this_string_trips_a_panic"`))
defer os.Remove(conf)
opts, err := ProcessConfigFile(conf)
if err == nil {
t.Fatalf("Expected an error reading config file: got %+v", opts)
} else {
if !strings.Contains(err.Error(), ":1:0: interface conversion:") {
t.Fatalf("This was supposed to trip a panic on interface conversion right at the beginning")
}
}
}
func TestPingIntervalOld(t *testing.T) {
conf := createConfFile(t, []byte(`ping_interval: 5`))
defer os.Remove(conf)
opts := &Options{}
err := opts.ProcessConfigFile(conf)
if err == nil {
t.Fatalf("expected an error")
}
errTyped, ok := err.(*processConfigErr)
if !ok {
t.Fatalf("expected an error of type processConfigErr")
}
if len(errTyped.warnings) != 1 {
t.Fatalf("expected processConfigErr to have one warning")
}
if len(errTyped.errors) != 0 {
t.Fatalf("expected processConfigErr to have no error")
}
if opts.PingInterval != 5*time.Second {
t.Fatalf("expected ping interval to be 5 seconds")
}
}
func TestPingIntervalNew(t *testing.T) {
conf := createConfFile(t, []byte(`ping_interval: "5m"`))
defer os.Remove(conf)
opts := &Options{}
if err := opts.ProcessConfigFile(conf); err != nil {
t.Fatalf("expected no error")
}
if opts.PingInterval != 5*time.Minute {
t.Fatalf("expected ping interval to be 5 minutes")
}
}
func TestOptionsProcessConfigFile(t *testing.T) {
// Create options with default values of Debug and Trace
// that are the opposite of what is in the config file.
// Set another option that is not present in the config file.
logFileName := "test.log"
opts := &Options{
Debug: true,
Trace: false,
LogFile: logFileName,
}
configFileName := "./configs/test.conf"
if err := opts.ProcessConfigFile(configFileName); err != nil {
t.Fatalf("Error processing config file: %v", err)
}
// Verify that values are as expected
if opts.ConfigFile != configFileName {
t.Fatalf("Expected ConfigFile to be set to %q, got %v", configFileName, opts.ConfigFile)
}
if opts.Debug {
t.Fatal("Debug option should have been set to false from config file")
}
if !opts.Trace {
t.Fatal("Trace option should have been set to true from config file")
}
if opts.LogFile != logFileName {
t.Fatalf("Expected LogFile to be %q, got %q", logFileName, opts.LogFile)
}
}
func TestConfigureOptions(t *testing.T) {
// Options.Configure() will snapshot the flags. This is used by the reload code.
// We need to set it back to nil otherwise it will impact reload tests.
defer func() { FlagSnapshot = nil }()
ch := make(chan bool, 1)
checkPrintInvoked := func() {
ch <- true
}
usage := func() { panic("should not get there") }
var fs *flag.FlagSet
type testPrint struct {
args []string
version, help, tlsHelp func()
}
testFuncs := []testPrint{
{[]string{"-v"}, checkPrintInvoked, usage, PrintTLSHelpAndDie},
{[]string{"version"}, checkPrintInvoked, usage, PrintTLSHelpAndDie},
{[]string{"-h"}, PrintServerAndExit, checkPrintInvoked, PrintTLSHelpAndDie},
{[]string{"help"}, PrintServerAndExit, checkPrintInvoked, PrintTLSHelpAndDie},
{[]string{"-help_tls"}, PrintServerAndExit, usage, checkPrintInvoked},
}
for _, tf := range testFuncs {
fs = flag.NewFlagSet("test", flag.ContinueOnError)
opts, err := ConfigureOptions(fs, tf.args, tf.version, tf.help, tf.tlsHelp)
if err != nil {
t.Fatalf("Error on configure: %v", err)
}
if opts != nil {
t.Fatalf("Expected options to be nil, got %v", opts)
}
select {
case <-ch:
case <-time.After(time.Second):
t.Fatalf("Should have invoked print function for args=%v", tf.args)
}
}
// Helper function that expect parsing with given args to not produce an error.
mustNotFail := func(args []string) *Options {
fs := flag.NewFlagSet("test", flag.ContinueOnError)
opts, err := ConfigureOptions(fs, args, PrintServerAndExit, fs.Usage, PrintTLSHelpAndDie)
if err != nil {
stackFatalf(t, "Error on configure: %v", err)
}
return opts
}
// Helper function that expect configuration to fail.
expectToFail := func(args []string, errContent ...string) {
fs := flag.NewFlagSet("test", flag.ContinueOnError)
// Silence the flagSet so that on failure nothing is printed.
// (flagSet would print error message about unknown flags, etc..)
silenceOuput := &bytes.Buffer{}
fs.SetOutput(silenceOuput)
opts, err := ConfigureOptions(fs, args, PrintServerAndExit, fs.Usage, PrintTLSHelpAndDie)
if opts != nil || err == nil {
stackFatalf(t, "Expected no option and an error, got opts=%v and err=%v", opts, err)
}
for _, testErr := range errContent {
if strings.Contains(err.Error(), testErr) {
// We got the error we wanted.
return
}
}
stackFatalf(t, "Expected errors containing any of those %v, got %v", errContent, err)
}
// Basic test with port number
opts := mustNotFail([]string{"-p", "1234"})
if opts.Port != 1234 {
t.Fatalf("Expected port to be 1234, got %v", opts.Port)
}
// Should fail because of unknown parameter
expectToFail([]string{"foo"}, "command")
// Should fail because unknown flag
expectToFail([]string{"-xxx", "foo"}, "flag")
// Should fail because of config file missing
expectToFail([]string{"-c", "xxx.cfg"}, "file")
// Should fail because of too many args for signal command
expectToFail([]string{"-sl", "quit=pid=foo"}, "signal")
// Should fail because of invalid pid
// On windows, if not running with admin privileges, you would get access denied.
expectToFail([]string{"-sl", "quit=pid"}, "pid", "denied")
// The config file set Trace to true.
opts = mustNotFail([]string{"-c", "./configs/test.conf"})
if !opts.Trace {
t.Fatal("Trace should have been set to true")
}
// The config file set Trace to true, but was overridden by param -V=false
opts = mustNotFail([]string{"-c", "./configs/test.conf", "-V=false"})
if opts.Trace {
t.Fatal("Trace should have been set to false")
}
// The config file set Trace to true, but was overridden by param -DV=false
opts = mustNotFail([]string{"-c", "./configs/test.conf", "-DV=false"})
if opts.Debug || opts.Trace {
t.Fatal("Debug and Trace should have been set to false")
}
// The config file set Trace to true, but was overridden by param -DV
opts = mustNotFail([]string{"-c", "./configs/test.conf", "-DV"})
if !opts.Debug || !opts.Trace {
t.Fatal("Debug and Trace should have been set to true")
}
// This should fail since -cluster is missing
expectedURL, _ := url.Parse("nats://127.0.0.1:6223")
expectToFail([]string{"-routes", expectedURL.String()}, "solicited routes")
// Ensure that we can set cluster and routes from command line
opts = mustNotFail([]string{"-cluster", "nats://127.0.0.1:6222", "-routes", expectedURL.String()})
if opts.Cluster.ListenStr != "nats://127.0.0.1:6222" {
t.Fatalf("Unexpected Cluster.ListenStr=%q", opts.Cluster.ListenStr)
}
if opts.RoutesStr != "nats://127.0.0.1:6223" || len(opts.Routes) != 1 || opts.Routes[0].String() != expectedURL.String() {
t.Fatalf("Unexpected RoutesStr: %q and Routes: %v", opts.RoutesStr, opts.Routes)
}
// Use a config with cluster configuration and explicit route defined.
// Override with empty routes string.
opts = mustNotFail([]string{"-c", "./configs/srv_a.conf", "-routes", ""})
if opts.RoutesStr != "" || len(opts.Routes) != 0 {
t.Fatalf("Unexpected RoutesStr: %q and Routes: %v", opts.RoutesStr, opts.Routes)
}
// Use a config with cluster configuration and override cluster listen string
expectedURL, _ = url.Parse("nats-route://ruser:[email protected]:7246")
opts = mustNotFail([]string{"-c", "./configs/srv_a.conf", "-cluster", "nats://ivan:[email protected]:6222"})
if opts.Cluster.Username != "ivan" || opts.Cluster.Password != "pwd" || opts.Cluster.Port != 6222 ||
len(opts.Routes) != 1 || opts.Routes[0].String() != expectedURL.String() {
t.Fatalf("Unexpected Cluster and/or Routes: %#v - %v", opts.Cluster, opts.Routes)
}
// Disable clustering from command line
opts = mustNotFail([]string{"-c", "./configs/srv_a.conf", "-cluster", ""})
if opts.Cluster.Port != 0 {
t.Fatalf("Unexpected Cluster: %v", opts.Cluster)
}
// Various erros due to malformed cluster listen string.
// (adding -routes to have more than 1 set flag to check
// that Visit() stops when an error is found).
expectToFail([]string{"-cluster", ":", "-routes", ""}, "protocol")
expectToFail([]string{"-cluster", "nats://127.0.0.1", "-routes", ""}, "port")
expectToFail([]string{"-cluster", "nats://127.0.0.1:xxx", "-routes", ""}, "invalid port")
expectToFail([]string{"-cluster", "nats://ivan:127.0.0.1:6222", "-routes", ""}, "colons")
expectToFail([]string{"-cluster", "nats://[email protected]:6222", "-routes", ""}, "password")
// Override config file's TLS configuration from command line, and completely disable TLS
opts = mustNotFail([]string{"-c", "./configs/tls.conf", "-tls=false"})
if opts.TLSConfig != nil || opts.TLS {
t.Fatal("Expected TLS to be disabled")
}
// Override config file's TLS configuration from command line, and force TLS verification.
// However, since TLS config has to be regenerated, user need to provide -tlscert and -tlskey too.
// So this should fail.
expectToFail([]string{"-c", "./configs/tls.conf", "-tlsverify"}, "valid")
// Now same than above, but with all valid params.
opts = mustNotFail([]string{"-c", "./configs/tls.conf", "-tlsverify", "-tlscert", "./configs/certs/server.pem", "-tlskey", "./configs/certs/key.pem"})
if opts.TLSConfig == nil || !opts.TLSVerify {
t.Fatal("Expected TLS to be configured and force verification")
}
// Configure TLS, but some TLS params missing
expectToFail([]string{"-tls"}, "valid")
expectToFail([]string{"-tls", "-tlscert", "./configs/certs/server.pem"}, "valid")
// One of the file does not exist
expectToFail([]string{"-tls", "-tlscert", "./configs/certs/server.pem", "-tlskey", "./configs/certs/notfound.pem"}, "file")
// Configure TLS and check that this results in a TLSConfig option.
opts = mustNotFail([]string{"-tls", "-tlscert", "./configs/certs/server.pem", "-tlskey", "./configs/certs/key.pem"})
if opts.TLSConfig == nil || !opts.TLS {
t.Fatal("Expected TLSConfig to be set")
}
}
func TestClusterPermissionsConfig(t *testing.T) {
template := `
cluster {
port: 1234
%s
authorization {
user: ivan
password: pwd
permissions {
import {
allow: "foo"
}
export {
allow: "bar"
}
}
}
}
`
conf := createConfFile(t, []byte(fmt.Sprintf(template, "")))
defer os.Remove(conf)
opts, err := ProcessConfigFile(conf)
if err != nil {
if cerr, ok := err.(*processConfigErr); ok && len(cerr.Errors()) > 0 {
t.Fatalf("Error processing config file: %v", err)
}
}
if opts.Cluster.Permissions == nil {
t.Fatal("Expected cluster permissions to be set")
}
if opts.Cluster.Permissions.Import == nil {
t.Fatal("Expected cluster import permissions to be set")
}
if len(opts.Cluster.Permissions.Import.Allow) != 1 || opts.Cluster.Permissions.Import.Allow[0] != "foo" {
t.Fatalf("Expected cluster import permissions to have %q, got %v", "foo", opts.Cluster.Permissions.Import.Allow)
}
if opts.Cluster.Permissions.Export == nil {
t.Fatal("Expected cluster export permissions to be set")
}
if len(opts.Cluster.Permissions.Export.Allow) != 1 || opts.Cluster.Permissions.Export.Allow[0] != "bar" {
t.Fatalf("Expected cluster export permissions to have %q, got %v", "bar", opts.Cluster.Permissions.Export.Allow)
}
// Now add permissions in top level cluster and check
// that this is the one that is being used.
conf = createConfFile(t, []byte(fmt.Sprintf(template, `
permissions {
import {
allow: "baz"
}
export {
allow: "bat"
}
}
`)))
defer os.Remove(conf)
opts, err = ProcessConfigFile(conf)
if err != nil {
t.Fatalf("Error processing config file: %v", err)
}
if opts.Cluster.Permissions == nil {
t.Fatal("Expected cluster permissions to be set")
}
if opts.Cluster.Permissions.Import == nil {
t.Fatal("Expected cluster import permissions to be set")
}
if len(opts.Cluster.Permissions.Import.Allow) != 1 || opts.Cluster.Permissions.Import.Allow[0] != "baz" {
t.Fatalf("Expected cluster import permissions to have %q, got %v", "baz", opts.Cluster.Permissions.Import.Allow)
}
if opts.Cluster.Permissions.Export == nil {
t.Fatal("Expected cluster export permissions to be set")
}
if len(opts.Cluster.Permissions.Export.Allow) != 1 || opts.Cluster.Permissions.Export.Allow[0] != "bat" {
t.Fatalf("Expected cluster export permissions to have %q, got %v", "bat", opts.Cluster.Permissions.Export.Allow)
}
// Tests with invalid permissions
invalidPerms := []string{
`permissions: foo`,
`permissions {
unknown_field: "foo"
}`,
`permissions {
import: [1, 2, 3]
}`,
`permissions {
import {
unknown_field: "foo"
}
}`,
`permissions {
import {
allow {
x: y
}
}
}`,
`permissions {
import {
deny {
x: y
}
}
}`,
`permissions {
export: [1, 2, 3]
}`,
`permissions {
export {
unknown_field: "foo"
}
}`,
`permissions {
export {
allow {
x: y
}
}
}`,
`permissions {
export {
deny {
x: y
}
}
}`,
}
for _, perms := range invalidPerms {
conf = createConfFile(t, []byte(fmt.Sprintf(`
cluster {
port: 1234
%s
}
`, perms)))
_, err := ProcessConfigFile(conf)
os.Remove(conf)
if err == nil {
t.Fatalf("Expected failure for permissions %s", perms)
}
}
for _, perms := range invalidPerms {
conf = createConfFile(t, []byte(fmt.Sprintf(`
cluster {
port: 1234
authorization {
user: ivan
password: pwd
%s
}
}
`, perms)))
_, err := ProcessConfigFile(conf)
os.Remove(conf)
if err == nil {
t.Fatalf("Expected failure for permissions %s", perms)
}
}
}
func TestParseServiceLatency(t *testing.T) {
cases := []struct {
name string
conf string
want *serviceLatency
wantErr bool
}{
{
name: "block with percent sample default value",
conf: `system_account = nats.io
accounts {
nats.io {
exports [{
service: nats.add
latency: {
sampling: 100%
subject: latency.tracking.add
}
}]
}
}`,
want: &serviceLatency{
subject: "latency.tracking.add",
sampling: 100,
},
},
{
name: "block with percent sample nondefault value",
conf: `system_account = nats.io
accounts {
nats.io {
exports [{
service: nats.add
latency: {
sampling: 33%
subject: latency.tracking.add
}
}]
}
}`,
want: &serviceLatency{
subject: "latency.tracking.add",
sampling: 33,
},
},
{
name: "block with number sample nondefault value",
conf: `system_account = nats.io
accounts {
nats.io {
exports [{
service: nats.add
latency: {
sampling: 87
subject: latency.tracking.add
}
}]
}
}`,
want: &serviceLatency{
subject: "latency.tracking.add",
sampling: 87,
},
},
{
name: "field with subject",
conf: `system_account = nats.io
accounts {
nats.io {
exports [{
service: nats.add
latency: latency.tracking.add
}]
}
}`,
want: &serviceLatency{
subject: "latency.tracking.add",
sampling: 100,
},
},
{
name: "block with missing subject",
conf: `system_account = nats.io
accounts {
nats.io {
exports [{
service: nats.add
latency: {
sampling: 87
}
}]
}
}`,
wantErr: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
f := createConfFile(t, []byte(c.conf))
opts, err := ProcessConfigFile(f)
os.Remove(f)
switch {
case c.wantErr && err == nil:
t.Fatalf("Expected ProcessConfigFile to fail, but didn't")
case c.wantErr && err != nil:
// We wanted an error and got one, test passed.
return
case !c.wantErr && err == nil:
// We didn't want an error and didn't get one, keep going.
break
case !c.wantErr && err != nil:
t.Fatalf("Failed to process config: %v", err)
}
if len(opts.Accounts) != 1 {
t.Fatalf("Expected accounts to have len %d, got %d", 1, len(opts.Accounts))
}
if len(opts.Accounts[0].exports.services) != 1 {
t.Fatalf("Expected export services to have len %d, got %d", 1, len(opts.Accounts[0].exports.services))
}
s, ok := opts.Accounts[0].exports.services["nats.add"]
if !ok {
t.Fatalf("Expected export service nats.add, missing")
}
if !reflect.DeepEqual(s.latency, c.want) {
t.Fatalf("Expected latency to be %#v, got %#v", c.want, s.latency)
}
})
}
}
func TestAccountUsersLoadedProperly(t *testing.T) {
conf := createConfFile(t, []byte(`
listen: "127.0.0.1:-1"
authorization {
users [
{user: ivan, password: bar}
{nkey : UC6NLCN7AS34YOJVCYD4PJ3QB7QGLYG5B5IMBT25VW5K4TNUJODM7BOX}
]
}
accounts {
synadia {
users [
{user: derek, password: foo}
{nkey : UBAAQWTW6CG2G6ANGNKB5U2B7HRWHSGMZEZX3AQSAJOQDAUGJD46LD2E}
]
}
}
`))
check := func(t *testing.T) {
t.Helper()
s, _ := RunServerWithConfig(conf)
defer s.Shutdown()
opts := s.getOpts()
if n := len(opts.Users); n != 2 {
t.Fatalf("Should have 2 users, got %v", n)
}
if n := len(opts.Nkeys); n != 2 {
t.Fatalf("Should have 2 nkeys, got %v", n)
}
}
// Repeat test since issue was with ordering of processing
// of authorization vs accounts that depends on range of a map (after actual parsing)
for i := 0; i < 20; i++ {
check(t)
}
}
func TestParsingGateways(t *testing.T) {
content := `
gateway {
name: "A"
listen: "127.0.0.1:4444"
host: "127.0.0.1"
port: 4444
authorization {
user: "ivan"
password: "pwd"
timeout: 2.0
}
tls {
cert_file: "./configs/certs/server.pem"
key_file: "./configs/certs/key.pem"
timeout: 3.0
}
advertise: "me:1"
connect_retries: 10
gateways: [
{
name: "B"
urls: ["nats://user1:pwd1@host2:5222", "nats://user1:pwd1@host3:6222"]
}
{
name: "C"
url: "nats://host4:7222"
}
]
}
`
file := "server_config_gateways.conf"
defer os.Remove(file)
if err := ioutil.WriteFile(file, []byte(content), 0600); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
opts, err := ProcessConfigFile(file)
if err != nil {
t.Fatalf("Error processing file: %v", err)
}
expected := &GatewayOpts{
Name: "A",
Host: "127.0.0.1",
Port: 4444,
Username: "ivan",
Password: "pwd",
AuthTimeout: 2.0,
Advertise: "me:1",
ConnectRetries: 10,
TLSTimeout: 3.0,
}
u1, _ := url.Parse("nats://user1:pwd1@host2:5222")
u2, _ := url.Parse("nats://user1:pwd1@host3:6222")
urls := []*url.URL{u1, u2}
gw := &RemoteGatewayOpts{
Name: "B",
URLs: urls,
}
expected.Gateways = append(expected.Gateways, gw)
u1, _ = url.Parse("nats://host4:7222")
urls = []*url.URL{u1}
gw = &RemoteGatewayOpts{
Name: "C",
URLs: urls,
}
expected.Gateways = append(expected.Gateways, gw)
// Just make sure that TLSConfig is set.. we have aother test
// to check proper generating TLSConfig from config file...
if opts.Gateway.TLSConfig == nil {
t.Fatalf("Expected TLSConfig, got none")
}
opts.Gateway.TLSConfig = nil
if !reflect.DeepEqual(&opts.Gateway, expected) {
t.Fatalf("Expected %v, got %v", expected, opts.Gateway)
}
}
func TestParsingGatewaysErrors(t *testing.T) {
for _, test := range []struct {
name string
content string
expectedErr string
}{
{
"bad_type",
`gateway: "bad_type"`,
"Expected gateway to be a map",
},
{
"bad_listen",
`gateway {
name: "A"
port: -1
listen: "bad::address"
}`,
"parse address",
},
{
"bad_auth",
`gateway {
name: "A"
port: -1
authorization {
users {
}
}
}`,
"be an array",
},
{
"unknown_field",
`gateway {
name: "A"
port: -1
reject_unknown: true
unknown_field: 1
}`,
"unknown field",
},
{
"users_not_supported",
`gateway {
name: "A"
port: -1
authorization {
users [
{user: alice, password: foo}
{user: bob, password: bar}
]
}
}`,
"does not allow multiple users",
},
{
"tls_error",
`gateway {
name: "A"
port: -1
tls {
cert_file: 123
}
}`,
"to be filename",
},
{
"tls_gen_error_cert_file_not_found",
`gateway {
name: "A"
port: -1
tls {
cert_file: "./configs/certs/missing.pem"
key_file: "./configs/certs/server-key.pem"
}
}`,
"certificate/key pair",
},
{
"tls_gen_error_key_file_not_found",
`gateway {
name: "A"
port: -1
tls {
cert_file: "./configs/certs/server.pem"
key_file: "./configs/certs/missing.pem"
}
}`,
"certificate/key pair",
},
{
"tls_gen_error_key_file_missing",
`gateway {
name: "A"
port: -1
tls {
cert_file: "./configs/certs/server.pem"
}
}`,
`missing 'key_file' in TLS configuration`,
},
{
"tls_gen_error_cert_file_missing",
`gateway {
name: "A"
port: -1
tls {
key_file: "./configs/certs/server-key.pem"
}
}`,
`missing 'cert_file' in TLS configuration`,
},
{
"tls_gen_error_key_file_not_found",
`gateway {
name: "A"
port: -1
tls {
cert_file: "./configs/certs/server.pem"
key_file: "./configs/certs/missing.pem"
}
}`,
"certificate/key pair",
},
{
"gateways_needs_to_be_an_array",
`gateway {
name: "A"
gateways {
name: "B"
}
}`,
"Expected gateways field to be an array",
},
{
"gateways_entry_needs_to_be_a_map",
`gateway {
name: "A"
gateways [
"g1", "g2"
]
}`,
"Expected gateway entry to be a map",
},
{
"bad_url",
`gateway {
name: "A"
gateways [
{
name: "B"
url: "nats://wrong url"
}
]
}`,
"error parsing gateway url",
},
{
"bad_urls",
`gateway {
name: "A"
gateways [
{
name: "B"
urls: ["nats://wrong url", "nats://host:5222"]
}
]
}`,
"error parsing gateway url",
},
{
"gateway_tls_error",
`gateway {
name: "A"
port: -1
gateways [
{
name: "B"
tls {
cert_file: 123
}
}
]
}`,
"to be filename",
},
{
"gateway_unknown_field",
`gateway {
name: "A"
port: -1
gateways [
{
name: "B"
unknown_field: 1
}
]
}`,
"unknown field",
},
} {
t.Run(test.name, func(t *testing.T) {
file := fmt.Sprintf("server_config_gateways_%s.conf", test.name)
defer os.Remove(file)
if err := ioutil.WriteFile(file, []byte(test.content), 0600); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
_, err := ProcessConfigFile(file)
if err == nil {
t.Fatalf("Expected to fail, did not. Content:\n%s", test.content)
} else if !strings.Contains(err.Error(), test.expectedErr) {
t.Fatalf("Expected error containing %q, got %q, for content:\n%s", test.expectedErr, err, test.content)
}
})
}
}
func TestParsingLeafNodesListener(t *testing.T) {
content := `
leafnodes {
listen: "127.0.0.1:3333"
host: "127.0.0.1"
port: 3333
advertise: "me:22"
authorization {
user: "derek"
password: "s3cr3t!"
timeout: 2.2
}
tls {
cert_file: "./configs/certs/server.pem"
key_file: "./configs/certs/key.pem"
timeout: 3.3
}
}
`
conf := createConfFile(t, []byte(content))
defer os.Remove(conf)
opts, err := ProcessConfigFile(conf)
if err != nil {
t.Fatalf("Error processing file: %v", err)
}
expected := &LeafNodeOpts{
Host: "127.0.0.1",
Port: 3333,
Username: "derek",
Password: "s3cr3t!",
AuthTimeout: 2.2,
Advertise: "me:22",
TLSTimeout: 3.3,
}
if opts.LeafNode.TLSConfig == nil {
t.Fatalf("Expected TLSConfig, got none")
}
opts.LeafNode.TLSConfig = nil
if !reflect.DeepEqual(&opts.LeafNode, expected) {
t.Fatalf("Expected %v, got %v", expected, opts.LeafNode)
}
}
func TestParsingLeafNodeRemotes(t *testing.T) {
t.Run("parse config file with relative path", func(t *testing.T) {
content := `
leafnodes {
remotes = [
{
url: nats-leaf://127.0.0.1:2222
account: foobar // Local Account to bind to..
credentials: "./my.creds"
}
]
}
`
conf := createConfFile(t, []byte(content))
defer os.Remove(conf)
opts, err := ProcessConfigFile(conf)
if err != nil {
t.Fatalf("Error processing file: %v", err)
}
if len(opts.LeafNode.Remotes) != 1 {
t.Fatalf("Expected 1 remote, got %d", len(opts.LeafNode.Remotes))
}
expected := &RemoteLeafOpts{
LocalAccount: "foobar",
Credentials: "./my.creds",
}
u, _ := url.Parse("nats-leaf://127.0.0.1:2222")
expected.URLs = append(expected.URLs, u)
if !reflect.DeepEqual(opts.LeafNode.Remotes[0], expected) {
t.Fatalf("Expected %v, got %v", expected, opts.LeafNode.Remotes[0])
}
})
t.Run("parse config file with tilde path", func(t *testing.T) {
if runtime.GOOS == "windows" {
t.SkipNow()
}
origHome := os.Getenv("HOME")
defer os.Setenv("HOME", origHome)
os.Setenv("HOME", "/home/foo")
content := `
leafnodes {
remotes = [
{
url: nats-leaf://127.0.0.1:2222
account: foobar // Local Account to bind to..
credentials: "~/my.creds"
}
]
}
`
conf := createConfFile(t, []byte(content))
defer os.Remove(conf)
opts, err := ProcessConfigFile(conf)
if err != nil {
t.Fatalf("Error processing file: %v", err)
}
expected := &RemoteLeafOpts{
LocalAccount: "foobar",
Credentials: "/home/foo/my.creds",
}
u, _ := url.Parse("nats-leaf://127.0.0.1:2222")
expected.URLs = append(expected.URLs, u)
if !reflect.DeepEqual(opts.LeafNode.Remotes[0], expected) {
t.Fatalf("Expected %v, got %v", expected, opts.LeafNode.Remotes[0])
}
})
}
func TestLargeMaxControlLine(t *testing.T) {
confFileName := "big_mcl.conf"
defer os.Remove(confFileName)
content := `
max_control_line = 3000000000
`
if err := ioutil.WriteFile(confFileName, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
if _, err := ProcessConfigFile(confFileName); err == nil {
t.Fatalf("Expected an error from too large of a max_control_line entry")
}
}
func TestLargeMaxPayload(t *testing.T) {
confFileName := "big_mp.conf"
defer os.Remove(confFileName)
content := `
max_payload = 3000000000
`
if err := ioutil.WriteFile(confFileName, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
if _, err := ProcessConfigFile(confFileName); err == nil {
t.Fatalf("Expected an error from too large of a max_payload entry")
}
}
func TestHandleUnknownTopLevelConfigurationField(t *testing.T) {
conf := createConfFile(t, []byte(`
port: 1234
streaming {
id: "me"
}
`))
defer os.Remove(conf)
// Verify that we get an error because of unknown "streaming" field.
opts := &Options{}
if err := opts.ProcessConfigFile(conf); err == nil || !strings.Contains(err.Error(), "streaming") {
t.Fatal("Expected error, got none")
}
// Verify that if that is set, we get no error
NoErrOnUnknownFields(true)
defer NoErrOnUnknownFields(false)
if err := opts.ProcessConfigFile(conf); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if opts.Port != 1234 {
t.Fatalf("Port was not parsed correctly: %v", opts.Port)
}
// Verify that ignore works only on top level fields.
changeCurrentConfigContentWithNewContent(t, conf, []byte(`
port: 1234
cluster {
non_top_level_unknown_field: 123
}
streaming {
id: "me"
}
`))
if err := opts.ProcessConfigFile(conf); err == nil || !strings.Contains(err.Error(), "non_top_level") {
t.Fatal("Expected error, got none")
}
}
func TestSublistNoCacheConfig(t *testing.T) {
confFileName := createConfFile(t, []byte(`
disable_sublist_cache: true
`))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
if !opts.NoSublistCache {
t.Fatalf("Expected sublist cache to be disabled")
}
}
func TestSublistNoCacheConfigOnAccounts(t *testing.T) {
confFileName := createConfFile(t, []byte(`
listen: "127.0.0.1:-1"
disable_sublist_cache: true
accounts {
synadia {
users [ {nkey : UBAAQWTW6CG2G6ANGNKB5U2B7HRWHSGMZEZX3AQSAJOQDAUGJD46LD2E} ]
}
nats.io {
users [ {nkey : UC6NLCN7AS34YOJVCYD4PJ3QB7QGLYG5B5IMBT25VW5K4TNUJODM7BOX} ]
}
}
no_sys_acc = true
`))
defer os.Remove(confFileName)
s, _ := RunServerWithConfig(confFileName)
defer s.Shutdown()
// Check that all account sublists do not have caching enabled.
ta := s.numReservedAccounts() + 2
if la := s.numAccounts(); la != ta {
t.Fatalf("Expected to have a server with %d active accounts, got %v", ta, la)
}
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
if acc == nil {
t.Fatalf("Expected non-nil sublist for account")
}
if acc.sl.CacheEnabled() {
t.Fatalf("Expected the account sublist to not have caching enabled")
}
return true
})
}
func TestParsingResponsePermissions(t *testing.T) {
template := `
listen: "127.0.0.1:-1"
authorization {
users [
{
user: ivan
password: pwd
permissions {
allow_responses {
%s
%s
}
}
}
]
}
`
check := func(t *testing.T, conf string, expectedError string, expectedMaxMsgs int, expectedTTL time.Duration) {
t.Helper()
opts, err := ProcessConfigFile(conf)
if expectedError != "" {
if err == nil || !strings.Contains(err.Error(), expectedError) {
t.Fatalf("Expected error about %q, got %q", expectedError, err)
}
// OK!
return
}
if err != nil {
t.Fatalf("Error on process: %v", err)
}
u := opts.Users[0]
p := u.Permissions.Response
if p == nil {
t.Fatalf("Expected response permissions to be set, it was not")
}
if n := p.MaxMsgs; n != expectedMaxMsgs {
t.Fatalf("Expected response max msgs to be %v, got %v", expectedMaxMsgs, n)
}
if ttl := p.Expires; ttl != expectedTTL {
t.Fatalf("Expected response ttl to be %v, got %v", expectedTTL, ttl)
}
}
// Check defaults
conf := createConfFile(t, []byte(fmt.Sprintf(template, "", "")))
defer os.Remove(conf)
check(t, conf, "", DEFAULT_ALLOW_RESPONSE_MAX_MSGS, DEFAULT_ALLOW_RESPONSE_EXPIRATION)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: 10", "")))
defer os.Remove(conf)
check(t, conf, "", 10, DEFAULT_ALLOW_RESPONSE_EXPIRATION)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "", "ttl: 5s")))
defer os.Remove(conf)
check(t, conf, "", DEFAULT_ALLOW_RESPONSE_MAX_MSGS, 5*time.Second)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: 0", "")))
defer os.Remove(conf)
check(t, conf, "", DEFAULT_ALLOW_RESPONSE_MAX_MSGS, DEFAULT_ALLOW_RESPONSE_EXPIRATION)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "", `ttl: "0s"`)))
defer os.Remove(conf)
check(t, conf, "", DEFAULT_ALLOW_RESPONSE_MAX_MSGS, DEFAULT_ALLOW_RESPONSE_EXPIRATION)
// Check normal values
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: 10", `ttl: "5s"`)))
defer os.Remove(conf)
check(t, conf, "", 10, 5*time.Second)
// Check negative values ok
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: -1", `ttl: "5s"`)))
defer os.Remove(conf)
check(t, conf, "", -1, 5*time.Second)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: 10", `ttl: "-1s"`)))
defer os.Remove(conf)
check(t, conf, "", 10, -1*time.Second)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: -1", `ttl: "-1s"`)))
defer os.Remove(conf)
check(t, conf, "", -1, -1*time.Second)
// Check parsing errors
conf = createConfFile(t, []byte(fmt.Sprintf(template, "unknown_field: 123", "")))
defer os.Remove(conf)
check(t, conf, "Unknown field", 0, 0)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: 10", "ttl: 123")))
defer os.Remove(conf)
check(t, conf, "not a duration string", 0, 0)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: 10", "ttl: xyz")))
defer os.Remove(conf)
check(t, conf, "error parsing expires", 0, 0)
}
func TestExpandPath(t *testing.T) {
if runtime.GOOS == "windows" {
origUserProfile := os.Getenv("USERPROFILE")
origHomeDrive, origHomePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH")
defer func() {
os.Setenv("USERPROFILE", origUserProfile)
os.Setenv("HOMEDRIVE", origHomeDrive)
os.Setenv("HOMEPATH", origHomePath)
}()
cases := []struct {
path string
userProfile string
homeDrive string
homePath string
wantPath string
wantErr bool
}{
// Missing HOMEDRIVE and HOMEPATH.
{path: "/Foo/Bar", userProfile: `C:\Foo\Bar`, wantPath: "/Foo/Bar"},
{path: "Foo/Bar", userProfile: `C:\Foo\Bar`, wantPath: "Foo/Bar"},
{path: "~/Fizz", userProfile: `C:\Foo\Bar`, wantPath: `C:\Foo\Bar\Fizz`},
{path: `${HOMEDRIVE}${HOMEPATH}\Fizz`, homeDrive: `C:`, homePath: `\Foo\Bar`, wantPath: `C:\Foo\Bar\Fizz`},
// Missing USERPROFILE.
{path: "~/Fizz", homeDrive: "X:", homePath: `\Foo\Bar`, wantPath: `X:\Foo\Bar\Fizz`},
// Set all environment variables. HOMEDRIVE and HOMEPATH take
// precedence.
{path: "~/Fizz", userProfile: `C:\Foo\Bar`,
homeDrive: "X:", homePath: `\Foo\Bar`, wantPath: `X:\Foo\Bar\Fizz`},
// Missing all environment variables.
{path: "~/Fizz", wantErr: true},
}
for i, c := range cases {
t.Run(fmt.Sprintf("windows case %d", i), func(t *testing.T) {
os.Setenv("USERPROFILE", c.userProfile)
os.Setenv("HOMEDRIVE", c.homeDrive)
os.Setenv("HOMEPATH", c.homePath)
gotPath, err := expandPath(c.path)
if !c.wantErr && err != nil {
t.Fatalf("unexpected error: got=%v; want=%v", err, nil)
} else if c.wantErr && err == nil {
t.Fatalf("unexpected success: got=%v; want=%v", nil, "err")
}
if gotPath != c.wantPath {
t.Fatalf("unexpected path: got=%v; want=%v", gotPath, c.wantPath)
}
})
}
return
}
// Unix tests
origHome := os.Getenv("HOME")
defer os.Setenv("HOME", origHome)
cases := []struct {
path string
home string
wantPath string
wantErr bool
}{
{path: "/foo/bar", home: "/fizz/buzz", wantPath: "/foo/bar"},
{path: "foo/bar", home: "/fizz/buzz", wantPath: "foo/bar"},
{path: "~/fizz", home: "/foo/bar", wantPath: "/foo/bar/fizz"},
{path: "$HOME/fizz", home: "/foo/bar", wantPath: "/foo/bar/fizz"},
// missing HOME env var
{path: "~/fizz", wantErr: true},
}
for i, c := range cases {
t.Run(fmt.Sprintf("unix case %d", i), func(t *testing.T) {
os.Setenv("HOME", c.home)
gotPath, err := expandPath(c.path)
if !c.wantErr && err != nil {
t.Fatalf("unexpected error: got=%v; want=%v", err, nil)
} else if c.wantErr && err == nil {
t.Fatalf("unexpected success: got=%v; want=%v", nil, "err")
}
if gotPath != c.wantPath {
t.Fatalf("unexpected path: got=%v; want=%v", gotPath, c.wantPath)
}
})
}
}
func TestNoAuthUserCode(t *testing.T) {
confFileName := createConfFile(t, []byte(`
listen: "127.0.0.1:-1"
no_auth_user: $NO_AUTH_USER
accounts {
synadia {
users [
{user: "a", password: "a"},
{nkey : UBAAQWTW6CG2G6ANGNKB5U2B7HRWHSGMZEZX3AQSAJOQDAUGJD46LD2E},
]
}
acc {
users [
{user: "c", password: "c"}
]
}
}
# config for $G
authorization {
users [
{user: "b", password: "b"}
]
}
`))
defer os.Remove(confFileName)
defer os.Unsetenv("NO_AUTH_USER")
for _, user := range []string{"a", "b", "b"} {
t.Run(user, func(t *testing.T) {
os.Setenv("NO_AUTH_USER", user)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
} else {
opts.NoLog = true
srv := RunServer(opts)
nc, err := nats.Connect(fmt.Sprintf("nats://127.0.0.1:%d", opts.Port))
if err != nil {
t.Fatalf("couldn't connect %s", err)
}
nc.Close()
srv.Shutdown()
}
})
}
for _, badUser := range []string{"notthere", "UBAAQWTW6CG2G6ANGNKB5U2B7HRWHSGMZEZX3AQSAJOQDAUGJD46LD2E"} {
t.Run(badUser, func(t *testing.T) {
os.Setenv("NO_AUTH_USER", badUser)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
s, err := NewServer(opts)
if err != nil {
if !strings.HasPrefix(err.Error(), "no_auth_user") {
t.Fatalf("Received unexpected error %s", err)
}
return // error looks as expected
}
s.Shutdown()
t.Fatalf("Received no error, where no_auth_user error was expected")
})
}
}
const operatorJwtWithSysAccAndUrlResolver = `
listen: "127.0.0.1:-1"
operator: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJJVEdJNjNCUUszM1VNN1pBSzZWT1RXNUZEU01ESlNQU1pRQ0RMNUlLUzZQTVhBU0ROQ01RIiwiaWF0IjoxNTg5ODM5MjA1LCJpc3MiOiJPQ1k2REUyRVRTTjNVT0RGVFlFWEJaTFFMSTdYNEdTWFI1NE5aQzRCQkxJNlFDVFpVVDY1T0lWTiIsIm5hbWUiOiJPUCIsInN1YiI6Ik9DWTZERTJFVFNOM1VPREZUWUVYQlpMUUxJN1g0R1NYUjU0TlpDNEJCTEk2UUNUWlVUNjVPSVZOIiwidHlwZSI6Im9wZXJhdG9yIiwibmF0cyI6eyJhY2NvdW50X3NlcnZlcl91cmwiOiJodHRwOi8vbG9jYWxob3N0OjgwMDAvand0L3YxIiwib3BlcmF0b3Jfc2VydmljZV91cmxzIjpbIm5hdHM6Ly9sb2NhbGhvc3Q6NDIyMiJdLCJzeXN0ZW1fYWNjb3VudCI6IkFEWjU0N0IyNFdIUExXT0s3VE1MTkJTQTdGUUZYUjZVTTJOWjRISE5JQjdSREZWWlFGT1o0R1FRIn19.3u710KqMLwgXwsMvhxfEp9xzK84XyAZ-4dd6QY0T6hGj8Bw9mS-HcQ7HbvDDNU01S61tNFfpma_JR6LtB3ixBg
`
func TestReadOperatorJWT(t *testing.T) {
confFileName := createConfFile(t, []byte(operatorJwtWithSysAccAndUrlResolver))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
if opts.SystemAccount != "ADZ547B24WHPLWOK7TMLNBSA7FQFXR6UM2NZ4HHNIB7RDFVZQFOZ4GQQ" {
t.Fatalf("Expected different SystemAccount: %s", opts.SystemAccount)
}
if r, ok := opts.AccountResolver.(*URLAccResolver); !ok {
t.Fatalf("Expected different SystemAccount: %s", opts.SystemAccount)
} else if r.url != "http://localhost:8000/jwt/v1/accounts/" {
t.Fatalf("Expected different SystemAccount: %s", r.url)
}
}
// using memory resolver so this test does not have to start the memory resolver
const operatorJwtWithSysAccAndMemResolver = `
listen: "127.0.0.1:-1"
// Operator "TESTOP"
operator: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJLRTZRU0tWTU1VWFFKNFZCTDNSNDdGRFlIWElaTDRZSE1INjVIT0k1UjZCNUpPUkxVQlZBIiwiaWF0IjoxNTg5OTE2MzgyLCJpc3MiOiJPQVRUVkJYTElVTVRRT1FXVUEySU0zRkdUQlFRSEFHUEZaQTVET05NTlFSUlRQUjYzTERBTDM1WiIsIm5hbWUiOiJURVNUT1AiLCJzdWIiOiJPQVRUVkJYTElVTVRRT1FXVUEySU0zRkdUQlFRSEFHUEZaQTVET05NTlFSUlRQUjYzTERBTDM1WiIsInR5cGUiOiJvcGVyYXRvciIsIm5hdHMiOnsic3lzdGVtX2FjY291bnQiOiJBRFNQT1lNSFhKTjZKVllRQ0xSWjVYUTVJVU42QTNTMzNYQTROVjRWSDc0NDIzVTdVN1lSNFlWVyJ9fQ.HiyUtlk8kectKHeQHtuqFcjFt0RbYZE_WAqPCcoWlV2IFVdXuOTzShYEMgDmtgvsFG_zxNQOj08Gr6a06ovwBA
resolver: MEMORY
resolver_preload: {
// Account "TESTSYS"
ADSPOYMHXJN6JVYQCLRZ5XQ5IUN6A3S33XA4NV4VH74423U7U7YR4YVW: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiI2WEtYUFZNTjdEVFlBSUE0R1JDWUxXUElSM1ZEM1Q2UVk2RFg3NURHTVFVWkdVWTJSRFNRIiwiaWF0IjoxNTg5OTE2MzIzLCJpc3MiOiJPQVRUVkJYTElVTVRRT1FXVUEySU0zRkdUQlFRSEFHUEZaQTVET05NTlFSUlRQUjYzTERBTDM1WiIsIm5hbWUiOiJURVNUU1lTIiwic3ViIjoiQURTUE9ZTUhYSk42SlZZUUNMUlo1WFE1SVVONkEzUzMzWEE0TlY0Vkg3NDQyM1U3VTdZUjRZVlciLCJ0eXBlIjoiYWNjb3VudCIsIm5hdHMiOnsibGltaXRzIjp7InN1YnMiOi0xLCJjb25uIjotMSwibGVhZiI6LTEsImltcG9ydHMiOi0xLCJleHBvcnRzIjotMSwiZGF0YSI6LTEsInBheWxvYWQiOi0xLCJ3aWxkY2FyZHMiOnRydWV9fX0.vhtWanIrOncdNfg-yO-7L61ccc-yRacvVtEsaIgWBEmW4czlEPhsiF1MkUKG91rtgcbwUf73ZIFEfja5MgFBAQ
}
`
func TestReadOperatorJWTSystemAccountMatch(t *testing.T) {
confFileName := createConfFile(t, []byte(operatorJwtWithSysAccAndMemResolver+`
system_account: ADSPOYMHXJN6JVYQCLRZ5XQ5IUN6A3S33XA4NV4VH74423U7U7YR4YVW
`))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
s, err := NewServer(opts)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
s.Shutdown()
}
func TestReadOperatorJWTSystemAccountMismatch(t *testing.T) {
confFileName := createConfFile(t, []byte(operatorJwtWithSysAccAndMemResolver+`
system_account: ADXJJCDCSRSMCOV25FXQW7R4QOG7R763TVEXBNWJHLBMBGWOJYG5XZBG
`))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
s, err := NewServer(opts)
if err == nil {
s.Shutdown()
t.Fatalf("Received no error")
} else if !strings.Contains(err.Error(), "system_account in config and operator JWT must be identical") {
t.Fatalf("Received unexpected error %s", err)
}
}
const operatorJwtAssertVersion_1_2_3 = `
listen: "127.0.0.1:-1"
// Operator "TESTOP"
operator: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5LW5rZXkifQ.eyJqdGkiOiJYWFhGNEREQTdRSEtCSU5MUlBSNTZPUFFTUjc0RFdLSjZCQkZWN1BSVEpMNUtDUUdCUFhBIiwiaWF0IjoxNTkwNTI4NTI1LCJpc3MiOiJPRDRYSkwyM0haSlozTzZKQ0FGUTZSVk9ZR0JZWUtGU0tIRlJFWkRaUEJGN1I0SlpQSTVWNzNLTSIsInN1YiI6Ik9ENFhKTDIzSFpKWjNPNkpDQUZRNlJWT1lHQllZS0ZTS0hGUkVaRFpQQkY3UjRKWlBJNVY3M0tNIiwibmF0cyI6eyJhc3NlcnRfc2VydmVyX3ZlcnNpb24iOiIxLjIuMyIsInR5cGUiOiJvcGVyYXRvciIsInZlcnNpb24iOjJ9fQ.ERRsFUekK7W5tZbeYlkLlwU3AGMpZTtlh5jKIj2rWoLnoWLlWcjXYD4uKdaT-tNGbsahiQZV82C5_K89BCWODA
resolver: MEMORY
resolver_preload: {
}
`
func TestReadOperatorAssertVersion(t *testing.T) {
confFileName := createConfFile(t, []byte(operatorJwtAssertVersion_1_2_3))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
s, err := NewServer(opts)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
s.Shutdown()
}
const operatorJwtAssertVersion_10_20_30 = `
listen: "127.0.0.1:-1"
// Operator "TESTOP"
operator: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5LW5rZXkifQ.eyJqdGkiOiJJRjNXQkFENk9JWE5HTzRWUFkzQ0hTS1ZMUDJXT0ZLQkZPR0RMUFRHSERVR0hRV1BUVlNRIiwiaWF0IjoxNTkwNTI4NTI1LCJpc3MiOiJPQ0laQUFVN0dDNkREU0QyTk1VQU9VQ0JRS09SQlNEQUxPSVBNRDRSVEM0SUhCRUZQRVE0TjZERSIsInN1YiI6Ik9DSVpBQVU3R0M2RERTRDJOTVVBT1VDQlFLT1JCU0RBTE9JUE1ENFJUQzRJSEJFRlBFUTRONkRFIiwibmF0cyI6eyJhc3NlcnRfc2VydmVyX3ZlcnNpb24iOiIxMC4yMC4zMCIsInR5cGUiOiJvcGVyYXRvciIsInZlcnNpb24iOjJ9fQ.bGFUCQIa2D5GjluEbXYJQGZnsM_O1r46b_xq2AUp4cEYGqCqvBAJZp9coBTgXL-4MPjyoPZSj2RglC1yUy9aDQ
resolver: MEMORY
resolver_preload: {
}
`
func TestReadOperatorAssertVersionFail(t *testing.T) {
confFileName := createConfFile(t, []byte(operatorJwtAssertVersion_10_20_30))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
s, err := NewServer(opts)
if err == nil {
s.Shutdown()
t.Fatalf("Received no error")
} else if !strings.Contains(err.Error(), "expected major version 10 > server major version") {
t.Fatal("expected different error got: ", err)
}
}
func TestClusterNameAndGatewayNameConflict(t *testing.T) {
conf := createConfFile(t, []byte(`
listen: 127.0.0.1:-1
cluster {
name: A
listen: 127.0.0.1:-1
}
gateway {
name: B
listen: 127.0.0.1:-1
}
`))
defer os.Remove(conf)
opts, err := ProcessConfigFile(conf)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if err := validateOptions(opts); err != ErrClusterNameConfigConflict {
t.Fatalf("Expected ErrClusterNameConfigConflict got %v", err)
}
}
| 1 | 11,319 | No need for blank line here. This block will then be reordered alphabatically. | nats-io-nats-server | go |
@@ -5,9 +5,14 @@ import (
"fmt"
k8s "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
+const (
+ localCluster = "local"
+)
+
type ClientsetManager interface {
Clientsets() map[string]ContextClientset
GetK8sClientset(clientset, cluster, namespace string) (ContextClientset, error) | 1 | package k8s
import (
"errors"
"fmt"
k8s "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
type ClientsetManager interface {
Clientsets() map[string]ContextClientset
GetK8sClientset(clientset, cluster, namespace string) (ContextClientset, error)
}
type ContextClientset interface {
k8s.Interface
Namespace() string
Cluster() string
}
func NewContextClientset(namespace string, cluster string, clientset k8s.Interface) ContextClientset {
return &ctxClientsetImpl{
Interface: clientset,
namespace: namespace,
cluster: cluster,
}
}
type ctxClientsetImpl struct {
k8s.Interface
namespace string
cluster string
}
func (c *ctxClientsetImpl) Namespace() string { return c.namespace }
func (c *ctxClientsetImpl) Cluster() string { return c.cluster }
func newClientsetManager(rules *clientcmd.ClientConfigLoadingRules) (ClientsetManager, error) {
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, &clientcmd.ConfigOverrides{})
apiConfig, err := kubeConfig.RawConfig()
if err != nil {
return nil, fmt.Errorf("could not load apiconfig: %w", err)
}
lookup := make(map[string]*ctxClientsetImpl, len(apiConfig.Contexts))
for name, ctxInfo := range apiConfig.Contexts {
contextConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
rules,
&clientcmd.ConfigOverrides{CurrentContext: name},
)
restConfig, err := contextConfig.ClientConfig()
if err != nil {
return nil, fmt.Errorf("could not load restconfig: %w", err)
}
clientset, err := k8s.NewForConfig(restConfig)
if err != nil {
return nil, fmt.Errorf("could not create k8s clientset from config: %w", err)
}
ns, _, err := contextConfig.Namespace()
if err != nil {
return nil, err
}
lookup[name] = &ctxClientsetImpl{Interface: clientset, namespace: ns, cluster: ctxInfo.Cluster}
}
return &managerImpl{clientsets: lookup}, nil
}
type managerImpl struct {
clientsets map[string]*ctxClientsetImpl
}
func (m *managerImpl) Clientsets() map[string]ContextClientset {
ret := make(map[string]ContextClientset)
for k, v := range m.clientsets {
ret[k] = v
}
return ret
}
func (m *managerImpl) GetK8sClientset(clientset, cluster, namespace string) (ContextClientset, error) {
cs, ok := m.clientsets[clientset]
if !ok {
return nil, errors.New("not found")
}
if cluster != "" && cluster != cs.cluster {
return nil, errors.New("specified cluster does not match clientset")
}
if namespace == "" {
// Use the clients' default namespace.
return cs, nil
}
// Shallow copy and update namespace.
ret := *cs
ret.namespace = namespace
return &ret, nil
}
| 1 | 8,248 | let's call it `in-cluster` instead of `local`. less chance of confusion. | lyft-clutch | go |
@@ -95,6 +95,8 @@ class Product extends BaseAction implements EventSubscriberInterface
$con->beginTransaction();
try {
+ $prev_ref = $product->getRef();
+
$product
->setDispatcher($event->getDispatcher())
->setRef($event->getRef()) | 1 | <?php
/*************************************************************************************/
/* This file is part of the Thelia package. */
/* */
/* Copyright (c) OpenStudio */
/* email : [email protected] */
/* web : http://www.thelia.net */
/* */
/* For the full copyright and license information, please view the LICENSE.txt */
/* file that was distributed with this source code. */
/*************************************************************************************/
namespace Thelia\Action;
use Propel\Runtime\Exception\PropelException;
use Symfony\Component\EventDispatcher\EventSubscriberInterface;
use Thelia\Core\Event\File\FileDeleteEvent;
use Thelia\Model\Map\ProductTableMap;
use Thelia\Model\ProductDocument;
use Thelia\Model\ProductImage;
use Thelia\Model\ProductQuery;
use Thelia\Model\Product as ProductModel;
use Thelia\Model\ProductAssociatedContent;
use Thelia\Model\ProductAssociatedContentQuery;
use Thelia\Model\ProductCategory;
use Thelia\Model\TaxRuleQuery;
use Thelia\Model\AccessoryQuery;
use Thelia\Model\Accessory;
use Thelia\Model\FeatureProduct;
use Thelia\Model\FeatureProductQuery;
use Thelia\Model\ProductCategoryQuery;
use Thelia\Model\ProductSaleElementsQuery;
use Thelia\Core\Event\TheliaEvents;
use Thelia\Core\Event\Product\ProductUpdateEvent;
use Thelia\Core\Event\Product\ProductCreateEvent;
use Thelia\Core\Event\Product\ProductDeleteEvent;
use Thelia\Core\Event\Product\ProductToggleVisibilityEvent;
use Thelia\Core\Event\Product\ProductAddContentEvent;
use Thelia\Core\Event\Product\ProductDeleteContentEvent;
use Thelia\Core\Event\UpdatePositionEvent;
use Thelia\Core\Event\UpdateSeoEvent;
use Thelia\Core\Event\FeatureProduct\FeatureProductUpdateEvent;
use Thelia\Core\Event\FeatureProduct\FeatureProductDeleteEvent;
use Thelia\Core\Event\Product\ProductSetTemplateEvent;
use Thelia\Core\Event\Product\ProductDeleteCategoryEvent;
use Thelia\Core\Event\Product\ProductAddCategoryEvent;
use Thelia\Core\Event\Product\ProductAddAccessoryEvent;
use Thelia\Core\Event\Product\ProductDeleteAccessoryEvent;
use Propel\Runtime\Propel;
class Product extends BaseAction implements EventSubscriberInterface
{
/**
* Create a new product entry
*
* @param \Thelia\Core\Event\Product\ProductCreateEvent $event
*/
public function create(ProductCreateEvent $event)
{
$product = new ProductModel();
$product
->setDispatcher($event->getDispatcher())
->setRef($event->getRef())
->setLocale($event->getLocale())
->setTitle($event->getTitle())
->setVisible($event->getVisible() ? 1 : 0)
->setVirtual($event->getVirtual() ? 1 : 0)
// Set the default tax rule to this product
->setTaxRule(TaxRuleQuery::create()->findOneByIsDefault(true))
->create(
$event->getDefaultCategory(),
$event->getBasePrice(),
$event->getCurrencyId(),
$event->getTaxRuleId(),
$event->getBaseWeight()
);
;
$event->setProduct($product);
}
/**
* Change a product
*
* @param \Thelia\Core\Event\Product\ProductUpdateEvent $event
*/
public function update(ProductUpdateEvent $event)
{
if (null !== $product = ProductQuery::create()->findPk($event->getProductId())) {
$con = Propel::getWriteConnection(ProductTableMap::DATABASE_NAME);
$con->beginTransaction();
try {
$product
->setDispatcher($event->getDispatcher())
->setRef($event->getRef())
->setLocale($event->getLocale())
->setTitle($event->getTitle())
->setDescription($event->getDescription())
->setChapo($event->getChapo())
->setPostscriptum($event->getPostscriptum())
->setVisible($event->getVisible() ? 1 : 0)
->setVirtual($event->getVirtual() ? 1 : 0)
->setBrandId($event->getBrandId() <= 0 ? null : $event->getBrandId())
->save($con)
;
// Update default category (ifd required)
$product->updateDefaultCategory($event->getDefaultCategory());
$event->setProduct($product);
$con->commit();
} catch (PropelException $e) {
$con->rollBack();
throw $e;
}
}
}
/**
* Change a product SEO
*
* @param \Thelia\Core\Event\UpdateSeoEvent $event
*/
public function updateSeo(UpdateSeoEvent $event)
{
return $this->genericUpdateSeo(ProductQuery::create(), $event);
}
/**
* Delete a product entry
*
* @param \Thelia\Core\Event\Product\ProductDeleteEvent $event
*/
public function delete(ProductDeleteEvent $event)
{
if (null !== $product = ProductQuery::create()->findPk($event->getProductId())) {
$product
->setDispatcher($event->getDispatcher())
->delete()
;
$event->setProduct($product);
}
}
/**
* Toggle product visibility. No form used here
*
* @param ActionEvent $event
*/
public function toggleVisibility(ProductToggleVisibilityEvent $event)
{
$product = $event->getProduct();
$product
->setDispatcher($event->getDispatcher())
->setVisible($product->getVisible() ? false : true)
->save()
;
$event->setProduct($product);
}
/**
* Changes position, selecting absolute ou relative change.
*
* @param ProductChangePositionEvent $event
*/
public function updatePosition(UpdatePositionEvent $event)
{
$this->genericUpdatePosition(ProductQuery::create(), $event);
}
public function addContent(ProductAddContentEvent $event)
{
if (ProductAssociatedContentQuery::create()
->filterByContentId($event->getContentId())
->filterByProduct($event->getProduct())->count() <= 0) {
$content = new ProductAssociatedContent();
$content
->setDispatcher($event->getDispatcher())
->setProduct($event->getProduct())
->setContentId($event->getContentId())
->save()
;
}
}
public function removeContent(ProductDeleteContentEvent $event)
{
$content = ProductAssociatedContentQuery::create()
->filterByContentId($event->getContentId())
->filterByProduct($event->getProduct())->findOne()
;
if ($content !== null) {
$content
->setDispatcher($event->getDispatcher())
->delete()
;
}
}
public function addCategory(ProductAddCategoryEvent $event)
{
if (ProductCategoryQuery::create()
->filterByProduct($event->getProduct())
->filterByCategoryId($event->getCategoryId())
->count() <= 0) {
$productCategory = new ProductCategory();
$productCategory
->setProduct($event->getProduct())
->setCategoryId($event->getCategoryId())
->setDefaultCategory(false)
->save()
;
}
}
public function removeCategory(ProductDeleteCategoryEvent $event)
{
$productCategory = ProductCategoryQuery::create()
->filterByProduct($event->getProduct())
->filterByCategoryId($event->getCategoryId())
->findOne();
if ($productCategory != null) {
$productCategory->delete();
}
}
public function addAccessory(ProductAddAccessoryEvent $event)
{
if (AccessoryQuery::create()
->filterByAccessory($event->getAccessoryId())
->filterByProductId($event->getProduct()->getId())->count() <= 0) {
$accessory = new Accessory();
$accessory
->setDispatcher($event->getDispatcher())
->setProductId($event->getProduct()->getId())
->setAccessory($event->getAccessoryId())
->save()
;
}
}
public function removeAccessory(ProductDeleteAccessoryEvent $event)
{
$accessory = AccessoryQuery::create()
->filterByAccessory($event->getAccessoryId())
->filterByProductId($event->getProduct()->getId())->findOne()
;
if ($accessory !== null) {
$accessory
->setDispatcher($event->getDispatcher())
->delete()
;
}
}
public function setProductTemplate(ProductSetTemplateEvent $event)
{
$con = Propel::getWriteConnection(ProductTableMap::DATABASE_NAME);
$con->beginTransaction();
try {
$product = $event->getProduct();
// Delete all product feature relations
if (null !== $featureProducts = FeatureProductQuery::create()->findByProductId($product->getId())) {
/** @var \Thelia\Model\FeatureProduct $featureProduct */
foreach ($featureProducts as $featureProduct) {
$eventDelete = new FeatureProductDeleteEvent($product->getId(), $featureProduct->getFeatureId());
$event->getDispatcher()->dispatch(TheliaEvents::PRODUCT_FEATURE_DELETE_VALUE, $eventDelete);
}
}
// Delete all product attributes sale elements
ProductSaleElementsQuery::create()->filterByProduct($product)->delete($con);
// Update the product template
$template_id = $event->getTemplateId();
// Set it to null if it's zero.
if ($template_id <= 0) {
$template_id = null;
}
$product->setTemplateId($template_id)->save($con);
// Create a new default product sale element
$product->createProductSaleElement($con, 0, 0, 0, $event->getCurrencyId(), true);
$product->clearProductSaleElementss();
$event->setProduct($product);
// Store all the stuff !
$con->commit();
} catch (\Exception $ex) {
$con->rollback();
throw $ex;
}
}
/**
* Changes accessry position, selecting absolute ou relative change.
*
* @param ProductChangePositionEvent $event
*/
public function updateAccessoryPosition(UpdatePositionEvent $event)
{
return $this->genericUpdatePosition(AccessoryQuery::create(), $event);
}
/**
* Changes position, selecting absolute ou relative change.
*
* @param ProductChangePositionEvent $event
*/
public function updateContentPosition(UpdatePositionEvent $event)
{
return $this->genericUpdatePosition(ProductAssociatedContentQuery::create(), $event);
}
/**
* Update the value of a product feature.
*
* @param FeatureProductUpdateEvent $event
*/
public function updateFeatureProductValue(FeatureProductUpdateEvent $event)
{
// If the feature is not free text, it may have one ore more values.
// If the value exists, we do not change it
// If the value does not exists, we create it.
//
// If the feature is free text, it has only a single value.
// Etiher create or update it.
$featureProductQuery = FeatureProductQuery::create()
->filterByFeatureId($event->getFeatureId())
->filterByProductId($event->getProductId())
;
if ($event->getIsTextValue() !== true) {
$featureProductQuery->filterByFeatureAvId($event->getFeatureValue());
}
$featureProduct = $featureProductQuery->findOne();
if ($featureProduct == null) {
$featureProduct = new FeatureProduct();
$featureProduct
->setDispatcher($event->getDispatcher())
->setProductId($event->getProductId())
->setFeatureId($event->getFeatureId())
;
}
if ($event->getIsTextValue() == true) {
$featureProduct->setFreeTextValue($event->getFeatureValue());
} else {
$featureProduct->setFeatureAvId($event->getFeatureValue());
}
$featureProduct->save();
$event->setFeatureProduct($featureProduct);
}
/**
* Delete a product feature value
*
* @param FeatureProductDeleteEvent $event
*/
public function deleteFeatureProductValue(FeatureProductDeleteEvent $event)
{
FeatureProductQuery::create()
->filterByProductId($event->getProductId())
->filterByFeatureId($event->getFeatureId())
->delete()
;
}
public function deleteImagePSEAssociations(FileDeleteEvent $event)
{
$model = $event->getFileToDelete();
if ($model instanceof ProductImage) {
$model->getProductSaleElementsProductImages()->delete();
}
}
public function deleteDocumentPSEAssociations(FileDeleteEvent $event)
{
$model = $event->getFileToDelete();
if ($model instanceof ProductDocument) {
$model->getProductSaleElementsProductDocuments()->delete();
}
}
/**
* {@inheritDoc}
*/
public static function getSubscribedEvents()
{
return array(
TheliaEvents::PRODUCT_CREATE => array("create", 128),
TheliaEvents::PRODUCT_UPDATE => array("update", 128),
TheliaEvents::PRODUCT_DELETE => array("delete", 128),
TheliaEvents::PRODUCT_TOGGLE_VISIBILITY => array("toggleVisibility", 128),
TheliaEvents::PRODUCT_UPDATE_POSITION => array("updatePosition", 128),
TheliaEvents::PRODUCT_UPDATE_SEO => array("updateSeo", 128),
TheliaEvents::PRODUCT_ADD_CONTENT => array("addContent", 128),
TheliaEvents::PRODUCT_REMOVE_CONTENT => array("removeContent", 128),
TheliaEvents::PRODUCT_UPDATE_CONTENT_POSITION => array("updateContentPosition", 128),
TheliaEvents::PRODUCT_ADD_ACCESSORY => array("addAccessory", 128),
TheliaEvents::PRODUCT_REMOVE_ACCESSORY => array("removeAccessory", 128),
TheliaEvents::PRODUCT_UPDATE_ACCESSORY_POSITION => array("updateAccessoryPosition", 128),
TheliaEvents::PRODUCT_ADD_CATEGORY => array("addCategory", 128),
TheliaEvents::PRODUCT_REMOVE_CATEGORY => array("removeCategory", 128),
TheliaEvents::PRODUCT_SET_TEMPLATE => array("setProductTemplate", 128),
TheliaEvents::PRODUCT_FEATURE_UPDATE_VALUE => array("updateFeatureProductValue", 128),
TheliaEvents::PRODUCT_FEATURE_DELETE_VALUE => array("deleteFeatureProductValue", 128),
// Those two has to be executed before
TheliaEvents::IMAGE_DELETE => array("deleteImagePSEAssociations", 192),
TheliaEvents::DOCUMENT_DELETE => array("deleteDocumentPSEAssociations", 192),
);
}
}
| 1 | 11,490 | Please use camelCase instead of underscore_case | thelia-thelia | php |
@@ -87,7 +87,8 @@ def get_contents(bucket, key, ext, *, etag, version_id, s3_client, size):
s3_client=s3_client,
version_id=version_id
)
- content = extract_parquet(get_bytes(obj["Body"], compression), as_html=False)[0]
+ body, info = extract_parquet(get_bytes(obj["Body"], compression), as_html=False)
+ content = trim_to_bytes(f"{str(info)}\n{body}", ELASTIC_LIMIT_BYTES)
else:
content = get_plain_text(
bucket, | 1 | """
phone data into elastic for supported file extensions.
note: we truncate outbound documents to DOC_SIZE_LIMIT characters
(to bound memory pressure and request size to elastic)
"""
import datetime
import json
import pathlib
from urllib.parse import unquote, unquote_plus
import boto3
import botocore
import nbformat
from tenacity import retry, retry_if_exception, stop_after_attempt, wait_exponential
from t4_lambda_shared.preview import (
ELASTIC_LIMIT_BYTES,
ELASTIC_LIMIT_LINES,
extract_parquet,
get_bytes,
get_preview_lines,
trim_to_bytes
)
from document_queue import (
DocumentQueue,
CONTENT_INDEX_EXTS,
MAX_RETRY,
OBJECT_DELETE,
OBJECT_PUT
)
# 10 MB, see https://amzn.to/2xJpngN
NB_VERSION = 4 # default notebook version for nbformat
TEST_EVENT = "s3:TestEvent"
# we need to filter out GetObject and HeadObject calls generated by the present
# lambda in order to display accurate analytics in the Quilt catalog
# a custom user agent enables said filtration
USER_AGENT_EXTRA = " quilt3-lambdas-es-indexer"
def now_like_boto3():
"""ensure timezone UTC for consistency with boto3:
Example of what boto3 returns on head_object:
'LastModified': datetime.datetime(2019, 11, 6, 3, 1, 16, tzinfo=tzutc()),
"""
return datetime.datetime.now(tz=datetime.timezone.utc)
def should_retry_exception(exception):
"""don't retry certain 40X errors"""
error_code = exception.response.get('Error', {}).get('Code', 218)
return error_code not in ["402", "403", "404"]
def get_contents(bucket, key, ext, *, etag, version_id, s3_client, size):
"""get the byte contents of a file"""
if ext.endswith('.gz'):
compression = 'gz'
ext = ext[:-len('.gz')]
else:
compression = None
content = ""
if ext in CONTENT_INDEX_EXTS:
if ext == ".ipynb":
content = trim_to_bytes(
# we have no choice but to fetch the entire notebook, because we
# are going to parse it
# warning: huge notebooks could spike memory here
get_notebook_cells(
bucket,
key,
size,
compression,
etag=etag,
s3_client=s3_client,
version_id=version_id
),
ELASTIC_LIMIT_BYTES
)
elif ext == ".parquet":
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
content = extract_parquet(get_bytes(obj["Body"], compression), as_html=False)[0]
else:
content = get_plain_text(
bucket,
key,
size,
compression,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
return content
def extract_text(notebook_str):
""" Extract code and markdown
Args:
* nb - notebook as a string
Returns:
* str - select code and markdown source (and outputs)
Pre:
* notebook is well-formed per notebook version 4
* "cell_type" is defined for all cells
* "source" defined for all "code" and "markdown" cells
Throws:
* Anything nbformat.reads() can throw :( which is diverse and poorly
documented, hence the `except Exception` in handler()
Notes:
* Deliberately decided not to index output streams and display strings
because they were noisy and low value
* Tested this code against ~6400 Jupyter notebooks in
s3://alpha-quilt-storage/tree/notebook-search/
* Might be useful to index "cell_type" : "raw" in the future
See also:
* Format reference https://nbformat.readthedocs.io/en/latest/format_description.html
"""
formatted = nbformat.reads(notebook_str, as_version=NB_VERSION)
text = []
for cell in formatted.get("cells", []):
if "source" in cell and cell.get("cell_type") in ("code", "markdown"):
text.append(cell["source"])
return "\n".join(text)
def get_notebook_cells(bucket, key, size, compression, *, etag, s3_client, version_id):
"""extract cells for ipynb notebooks for indexing"""
text = ""
try:
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
version_id=version_id
)
data = get_bytes(obj["Body"], compression)
notebook = data.getvalue().decode("utf-8")
try:
text = extract_text(notebook)
except (json.JSONDecodeError, nbformat.reader.NotJSONError):
print(f"Invalid JSON in {key}.")
except (KeyError, AttributeError) as err:
print(f"Missing key in {key}: {err}")
# there might be more errors than covered by test_read_notebook
# better not to fail altogether
except Exception as exc:#pylint: disable=broad-except
print(f"Exception in file {key}: {exc}")
except UnicodeDecodeError as uni:
print(f"Unicode decode error in {key}: {uni}")
return text
def get_plain_text(bucket, key, size, compression, *, etag, s3_client, version_id):
"""get plain text object contents"""
text = ""
try:
obj = retry_s3(
"get",
bucket,
key,
size,
etag=etag,
s3_client=s3_client,
limit=ELASTIC_LIMIT_BYTES,
version_id=version_id
)
lines = get_preview_lines(
obj["Body"],
compression,
ELASTIC_LIMIT_LINES,
ELASTIC_LIMIT_BYTES
)
text = '\n'.join(lines)
except UnicodeDecodeError as ex:
print(f"Unicode decode error in {key}", ex)
return text
def make_s3_client():
"""make a client with a custom user agent string so that we can
filter the present lambda's requests to S3 from object analytics"""
configuration = botocore.config.Config(user_agent_extra=USER_AGENT_EXTRA)
return boto3.client("s3", config=configuration)
def handler(event, context):
"""enumerate S3 keys in event, extract relevant data and metadata,
queue events, send to elastic via bulk() API
"""
# message is a proper SQS message, which either contains a single event
# (from the bucket notification system) or batch-many events as determined
# by enterprise/**/bulk_loader.py
# An exception that we'll want to re-raise after the batch sends
content_exception = None
for message in event["Records"]:
body = json.loads(message["body"])
body_message = json.loads(body["Message"])
if "Records" not in body_message:
if body_message.get("Event") == TEST_EVENT:
# Consume and ignore this event, which is an initial message from
# SQS; see https://forums.aws.amazon.com/thread.jspa?threadID=84331
continue
else:
print("Unexpected message['body']. No 'Records' key.", message)
raise Exception("Unexpected message['body']. No 'Records' key.")
batch_processor = DocumentQueue(context)
events = body_message.get("Records", [])
s3_client = make_s3_client()
# event is a single S3 event
for event_ in events:
try:
event_name = event_["eventName"]
# only process these two event types
if event_name not in [OBJECT_DELETE, OBJECT_PUT]:
continue
bucket = unquote(event_["s3"]["bucket"]["name"])
# In the grand tradition of IE6, S3 events turn spaces into '+'
key = unquote_plus(event_["s3"]["object"]["key"])
version_id = event_["s3"]["object"].get("versionId")
version_id = unquote(version_id) if version_id else None
etag = unquote(event_["s3"]["object"]["eTag"])
# Get two levels of extensions to handle files like .csv.gz
path = pathlib.PurePosixPath(key)
ext1 = path.suffix
ext2 = path.with_suffix('').suffix
ext = (ext2 + ext1).lower()
# Handle delete first and then continue so that
# head_object and get_object (below) don't fail
if event_name == OBJECT_DELETE:
batch_processor.append(
event_name,
bucket=bucket,
ext=ext,
etag=etag,
key=key,
last_modified=now_like_boto3(),
text="",
version_id=version_id
)
continue
try:
head = retry_s3(
"head",
bucket,
key,
s3_client=s3_client,
version_id=version_id,
etag=etag
)
except botocore.exceptions.ClientError as exception:
# "null" version sometimes results in 403s for buckets
# that have changed versioning, retry without it
if (exception.response.get('Error', {}).get('Code') == "403"
and version_id == "null"):
head = retry_s3(
"head",
bucket,
key,
s3_client=s3_client,
version_id=None,
etag=etag
)
else:
raise exception
size = head["ContentLength"]
last_modified = head["LastModified"]
meta = head["Metadata"]
try:
text = get_contents(
bucket,
key,
ext,
etag=etag,
version_id=version_id,
s3_client=s3_client,
size=size
)
# we still want an entry for this document in elastic so that, e.g.,
# the file counts from elastic are correct. re-raise below.
except Exception as exc:#pylint: disable=broad-except
text = ""
content_exception = exc
print("Content extraction failed", exc, bucket, key, etag, version_id)
# decode Quilt-specific metadata
if meta and "helium" in meta:
try:
decoded_helium = json.loads(meta["helium"])
meta["helium"] = decoded_helium or {}
except (KeyError, json.JSONDecodeError):
print("Unable to parse Quilt 'helium' metadata", meta)
batch_processor.append(
event_name,
bucket=bucket,
key=key,
ext=ext,
meta=meta,
etag=etag,
version_id=version_id,
last_modified=last_modified,
size=size,
text=text
)
except botocore.exceptions.ClientError as boto_exc:
if not should_retry_exception(boto_exc):
continue
else:
print("Fatal exception for record", event_, boto_exc)
import traceback
traceback.print_tb(boto_exc.__traceback__)
raise boto_exc
# flush the queue
batch_processor.send_all()
# note: if there are multiple content exceptions in the batch, this will
# only raise the most recent one;
# re-raise so that get_contents() failures end up in the DLQ
if content_exception:
raise content_exception
def retry_s3(
operation,
bucket,
key,
size=None,
limit=None,
*,
etag,
version_id,
s3_client
):
"""retry head or get operation to S3 with; stop before we run out of time.
retry is necessary since, due to eventual consistency, we may not
always get the required version of the object.
"""
if operation == "head":
function_ = s3_client.head_object
elif operation == "get":
function_ = s3_client.get_object
else:
raise ValueError(f"unexpected operation: {operation}")
# Keyword arguments to function_
arguments = {
"Bucket": bucket,
"Key": key
}
if operation == 'get' and size and limit:
# can only request range if file is not empty
arguments['Range'] = f"bytes=0-{limit}"
if version_id:
arguments['VersionId'] = version_id
else:
arguments['IfMatch'] = etag
@retry(
# debug
reraise=True,
stop=stop_after_attempt(MAX_RETRY),
wait=wait_exponential(multiplier=2, min=4, max=30),
retry=(retry_if_exception(should_retry_exception))
)
def call():
"""local function so we can set stop_after_delay dynamically"""
# TODO: remove all this, stop_after_delay is not dynamically loaded anymore
return function_(**arguments)
return call()
| 1 | 18,353 | Ideally, we'd fold the schema into an expanded system_meta, but this is a good first step. | quiltdata-quilt | py |
@@ -95,6 +95,7 @@ public class TableMetadataParser {
static final String SNAPSHOT_ID = "snapshot-id";
static final String TIMESTAMP_MS = "timestamp-ms";
static final String SNAPSHOT_LOG = "snapshot-log";
+ static final String FIELDS = "fields";
public static void overwrite(TableMetadata metadata, OutputFile outputFile) {
internalWrite(metadata, outputFile, true); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.StringWriter;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.SortedSet;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import org.apache.iceberg.TableMetadata.SnapshotLogEntry;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.io.InputFile;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.util.JsonUtil;
public class TableMetadataParser {
public enum Codec {
NONE(""),
GZIP(".gz");
private final String extension;
Codec(String extension) {
this.extension = extension;
}
public static Codec fromName(String codecName) {
Preconditions.checkArgument(codecName != null, "Codec name is null");
return Codec.valueOf(codecName.toUpperCase(Locale.ENGLISH));
}
public static Codec fromFileName(String fileName) {
Preconditions.checkArgument(fileName.contains(".metadata.json"),
"%s is not a valid metadata file", fileName);
// we have to be backward-compatible with .metadata.json.gz files
if (fileName.endsWith(".metadata.json.gz")) {
return Codec.GZIP;
}
String fileNameWithoutSuffix = fileName.substring(0, fileName.lastIndexOf(".metadata.json"));
if (fileNameWithoutSuffix.endsWith(Codec.GZIP.extension)) {
return Codec.GZIP;
} else {
return Codec.NONE;
}
}
}
private TableMetadataParser() {}
// visible for testing
static final String FORMAT_VERSION = "format-version";
static final String TABLE_UUID = "table-uuid";
static final String LOCATION = "location";
static final String LAST_UPDATED_MILLIS = "last-updated-ms";
static final String LAST_COLUMN_ID = "last-column-id";
static final String SCHEMA = "schema";
static final String PARTITION_SPEC = "partition-spec";
static final String PARTITION_SPECS = "partition-specs";
static final String DEFAULT_SPEC_ID = "default-spec-id";
static final String PROPERTIES = "properties";
static final String CURRENT_SNAPSHOT_ID = "current-snapshot-id";
static final String SNAPSHOTS = "snapshots";
static final String SNAPSHOT_ID = "snapshot-id";
static final String TIMESTAMP_MS = "timestamp-ms";
static final String SNAPSHOT_LOG = "snapshot-log";
public static void overwrite(TableMetadata metadata, OutputFile outputFile) {
internalWrite(metadata, outputFile, true);
}
public static void write(TableMetadata metadata, OutputFile outputFile) {
internalWrite(metadata, outputFile, false);
}
public static void internalWrite(
TableMetadata metadata, OutputFile outputFile, boolean overwrite) {
boolean isGzip = Codec.fromFileName(outputFile.location()) == Codec.GZIP;
OutputStream stream = overwrite ? outputFile.createOrOverwrite() : outputFile.create();
try (OutputStreamWriter writer = new OutputStreamWriter(isGzip ? new GZIPOutputStream(stream) : stream)) {
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
generator.useDefaultPrettyPrinter();
toJson(metadata, generator);
generator.flush();
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write json to file: %s", outputFile);
}
}
public static String getFileExtension(String codecName) {
return getFileExtension(Codec.fromName(codecName));
}
public static String getFileExtension(Codec codec) {
return codec.extension + ".metadata.json";
}
public static String getOldFileExtension(Codec codec) {
// we have to be backward-compatible with .metadata.json.gz files
return ".metadata.json" + codec.extension;
}
public static String toJson(TableMetadata metadata) {
StringWriter writer = new StringWriter();
try {
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
toJson(metadata, generator);
generator.flush();
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write json for: %s", metadata);
}
return writer.toString();
}
private static void toJson(TableMetadata metadata, JsonGenerator generator) throws IOException {
generator.writeStartObject();
generator.writeNumberField(FORMAT_VERSION, TableMetadata.TABLE_FORMAT_VERSION);
generator.writeStringField(TABLE_UUID, metadata.uuid());
generator.writeStringField(LOCATION, metadata.location());
generator.writeNumberField(LAST_UPDATED_MILLIS, metadata.lastUpdatedMillis());
generator.writeNumberField(LAST_COLUMN_ID, metadata.lastColumnId());
generator.writeFieldName(SCHEMA);
SchemaParser.toJson(metadata.schema(), generator);
// for older readers, continue writing the default spec as "partition-spec"
generator.writeFieldName(PARTITION_SPEC);
PartitionSpecParser.toJsonFields(metadata.spec(), generator);
// write the default spec ID and spec list
generator.writeNumberField(DEFAULT_SPEC_ID, metadata.defaultSpecId());
generator.writeArrayFieldStart(PARTITION_SPECS);
for (PartitionSpec spec : metadata.specs()) {
PartitionSpecParser.toJson(spec, generator);
}
generator.writeEndArray();
generator.writeObjectFieldStart(PROPERTIES);
for (Map.Entry<String, String> keyValue : metadata.properties().entrySet()) {
generator.writeStringField(keyValue.getKey(), keyValue.getValue());
}
generator.writeEndObject();
generator.writeNumberField(CURRENT_SNAPSHOT_ID,
metadata.currentSnapshot() != null ? metadata.currentSnapshot().snapshotId() : -1);
generator.writeArrayFieldStart(SNAPSHOTS);
for (Snapshot snapshot : metadata.snapshots()) {
SnapshotParser.toJson(snapshot, generator);
}
generator.writeEndArray();
generator.writeArrayFieldStart(SNAPSHOT_LOG);
for (HistoryEntry logEntry : metadata.snapshotLog()) {
generator.writeStartObject();
generator.writeNumberField(TIMESTAMP_MS, logEntry.timestampMillis());
generator.writeNumberField(SNAPSHOT_ID, logEntry.snapshotId());
generator.writeEndObject();
}
generator.writeEndArray();
generator.writeEndObject();
}
public static TableMetadata read(TableOperations ops, InputFile file) {
Codec codec = Codec.fromFileName(file.location());
try (InputStream is = codec == Codec.GZIP ? new GZIPInputStream(file.newStream()) : file.newStream()) {
return fromJson(ops, file, JsonUtil.mapper().readValue(is, JsonNode.class));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read file: %s", file);
}
}
static TableMetadata fromJson(TableOperations ops, InputFile file, JsonNode node) {
Preconditions.checkArgument(node.isObject(),
"Cannot parse metadata from a non-object: %s", node);
int formatVersion = JsonUtil.getInt(FORMAT_VERSION, node);
Preconditions.checkArgument(formatVersion == TableMetadata.TABLE_FORMAT_VERSION,
"Cannot read unsupported version %d", formatVersion);
String uuid = JsonUtil.getStringOrNull(TABLE_UUID, node);
String location = JsonUtil.getString(LOCATION, node);
int lastAssignedColumnId = JsonUtil.getInt(LAST_COLUMN_ID, node);
Schema schema = SchemaParser.fromJson(node.get(SCHEMA));
JsonNode specArray = node.get(PARTITION_SPECS);
List<PartitionSpec> specs;
int defaultSpecId;
if (specArray != null) {
Preconditions.checkArgument(specArray.isArray(),
"Cannot parse partition specs from non-array: %s", specArray);
// default spec ID is required when the spec array is present
defaultSpecId = JsonUtil.getInt(DEFAULT_SPEC_ID, node);
// parse the spec array
ImmutableList.Builder<PartitionSpec> builder = ImmutableList.builder();
for (JsonNode spec : specArray) {
builder.add(PartitionSpecParser.fromJson(schema, spec));
}
specs = builder.build();
} else {
// partition spec is required for older readers, but is always set to the default if the spec
// array is set. it is only used to default the spec map is missing, indicating that the
// table metadata was written by an older writer.
defaultSpecId = TableMetadata.INITIAL_SPEC_ID;
specs = ImmutableList.of(PartitionSpecParser.fromJsonFields(
schema, TableMetadata.INITIAL_SPEC_ID, node.get(PARTITION_SPEC)));
}
Map<String, String> properties = JsonUtil.getStringMap(PROPERTIES, node);
long currentVersionId = JsonUtil.getLong(CURRENT_SNAPSHOT_ID, node);
long lastUpdatedMillis = JsonUtil.getLong(LAST_UPDATED_MILLIS, node);
JsonNode snapshotArray = node.get(SNAPSHOTS);
Preconditions.checkArgument(snapshotArray.isArray(),
"Cannot parse snapshots from non-array: %s", snapshotArray);
List<Snapshot> snapshots = Lists.newArrayListWithExpectedSize(snapshotArray.size());
Iterator<JsonNode> iterator = snapshotArray.elements();
while (iterator.hasNext()) {
snapshots.add(SnapshotParser.fromJson(ops, iterator.next()));
}
SortedSet<SnapshotLogEntry> entries =
Sets.newTreeSet(Comparator.comparingLong(SnapshotLogEntry::timestampMillis));
if (node.has(SNAPSHOT_LOG)) {
Iterator<JsonNode> logIterator = node.get(SNAPSHOT_LOG).elements();
while (logIterator.hasNext()) {
JsonNode entryNode = logIterator.next();
entries.add(new SnapshotLogEntry(
JsonUtil.getLong(TIMESTAMP_MS, entryNode), JsonUtil.getLong(SNAPSHOT_ID, entryNode)));
}
}
return new TableMetadata(ops, file, uuid, location,
lastUpdatedMillis, lastAssignedColumnId, schema, defaultSpecId, specs, properties,
currentVersionId, snapshots, ImmutableList.copyOf(entries.iterator()));
}
}
| 1 | 15,746 | Is this used? | apache-iceberg | java |
@@ -16,7 +16,7 @@
using System;
using System.Diagnostics;
-#if NET45 || NET46
+#if NET452 || NET46
using System.Diagnostics.CodeAnalysis;
using System.Threading;
#endif | 1 | // <copyright file="PreciseTimestamp.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Diagnostics;
#if NET45 || NET46
using System.Diagnostics.CodeAnalysis;
using System.Threading;
#endif
#if API
namespace OpenTelemetry.Api.Utils
#else
namespace OpenTelemetry.Utils
#endif
{
internal class PreciseTimestamp
{
/// <summary>
/// Multiplier to convert Stopwatch ticks to TimeSpan ticks.
/// </summary>
internal static readonly double StopwatchTicksToTimeSpanTicks = (double)TimeSpan.TicksPerSecond / Stopwatch.Frequency;
#if NET45 || NET46
private static readonly Timer SyncTimeUpdater;
private static TimeSync timeSync = new TimeSync();
[SuppressMessage("Microsoft.Performance", "CA1810:InitializeReferenceTypeStaticFieldsInline", Justification = "Enforcing static fields initialization.")]
static PreciseTimestamp()
{
SyncTimeUpdater = InitializeSyncTimer();
}
#endif
/// <summary>
/// Returns high resolution (1 DateTime tick) current UTC DateTime.
/// </summary>
/// <returns>DateTime UTC now with high resolution.</returns>
public static DateTimeOffset GetUtcNow()
{
#if NET45 || NET46
// DateTime.UtcNow accuracy on .NET Framework is ~16ms, this method
// uses combination of Stopwatch and DateTime to calculate accurate UtcNow.
var tmp = timeSync;
// Timer ticks need to be converted to DateTime ticks
long dateTimeTicksDiff = (long)((Stopwatch.GetTimestamp() - tmp.SyncStopwatchTicks) * StopwatchTicksToTimeSpanTicks);
// DateTime.AddSeconds (or Milliseconds) rounds value to 1 ms, use AddTicks to prevent it
return tmp.SyncUtcNow.AddTicks(dateTimeTicksDiff);
#else
return DateTimeOffset.UtcNow;
#endif
}
#if NET45 || NET46
private static void Sync()
{
// wait for DateTimeOffset.UtcNow update to the next granular value
Thread.Sleep(1);
timeSync = new TimeSync();
}
private static Timer InitializeSyncTimer()
{
Timer timer;
// Don't capture the current ExecutionContext and its AsyncLocals onto the timer causing them to live forever
bool restoreFlow = false;
try
{
if (!ExecutionContext.IsFlowSuppressed())
{
ExecutionContext.SuppressFlow();
restoreFlow = true;
}
// fire timer every 2 hours, Stopwatch is not very precise over long periods of time,
// so we need to correct it from time to time
// https://docs.microsoft.com/en-us/windows/desktop/SysInfo/acquiring-high-resolution-time-stamps
timer = new Timer(s => { Sync(); }, null, 0, 7200000);
}
finally
{
// Restore the current ExecutionContext
if (restoreFlow)
{
ExecutionContext.RestoreFlow();
}
}
return timer;
}
private class TimeSync
{
public readonly DateTimeOffset SyncUtcNow = DateTimeOffset.UtcNow;
public readonly long SyncStopwatchTicks = Stopwatch.GetTimestamp();
}
#endif
}
}
| 1 | 14,366 | Should we use `#if NETFRAMEWORK` for consistency with the other projects? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -46,7 +46,12 @@ type DecodedContainerRecord struct {
func (d *ContianerRecordDecoder) DecodeContainerRecord(ctx context.Context, record *v1alpha1.Record) (decoded DecodedContainerRecord, err error) {
var pod v1.Pod
- podId, containerName := controller.ParseNamespacedNameContainer(record.Id)
+ podId, containerName, err := controller.ParseNamespacedNameContainer(record.Id)
+ if err != nil {
+ // TODO: organize the error in a better way
+ err = NewFailToFindContainer(pod.Namespace, pod.Name, containerName, err)
+ return
+ }
err = d.Client.Get(ctx, podId, &pod)
if err != nil {
// TODO: organize the error in a better way | 1 | // Copyright 2021 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"context"
v1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/chaos-mesh/chaos-mesh/api/v1alpha1"
"github.com/chaos-mesh/chaos-mesh/controllers/utils/chaosdaemon"
"github.com/chaos-mesh/chaos-mesh/controllers/utils/controller"
chaosdaemonclient "github.com/chaos-mesh/chaos-mesh/pkg/chaosdaemon/client"
)
type ContianerRecordDecoder struct {
client.Client
*chaosdaemon.ChaosDaemonClientBuilder
}
func NewContainerRecordDecoder(c client.Client, builder *chaosdaemon.ChaosDaemonClientBuilder) *ContianerRecordDecoder {
return &ContianerRecordDecoder{
Client: c,
ChaosDaemonClientBuilder: builder,
}
}
type DecodedContainerRecord struct {
PbClient chaosdaemonclient.ChaosDaemonClientInterface
ContainerId string
Pod *v1.Pod
}
func (d *ContianerRecordDecoder) DecodeContainerRecord(ctx context.Context, record *v1alpha1.Record) (decoded DecodedContainerRecord, err error) {
var pod v1.Pod
podId, containerName := controller.ParseNamespacedNameContainer(record.Id)
err = d.Client.Get(ctx, podId, &pod)
if err != nil {
// TODO: organize the error in a better way
err = NewFailToFindContainer(pod.Namespace, pod.Name, containerName, err)
return
}
decoded.Pod = &pod
if len(pod.Status.ContainerStatuses) == 0 {
// TODO: organize the error in a better way
err = NewFailToFindContainer(pod.Namespace, pod.Name, containerName, nil)
return
}
for _, container := range pod.Status.ContainerStatuses {
if container.Name == containerName {
decoded.ContainerId = container.ContainerID
break
}
}
if len(decoded.ContainerId) == 0 {
// TODO: organize the error in a better way
err = NewFailToFindContainer(pod.Namespace, pod.Name, containerName, nil)
return
}
decoded.PbClient, err = d.ChaosDaemonClientBuilder.Build(ctx, &pod)
if err != nil {
return
}
return
}
| 1 | 23,482 | If it parses failed, both of the `containerName` and `podId` should be the empty strings, it's non-sense to define this error, I suggest just return it. | chaos-mesh-chaos-mesh | go |
@@ -61,17 +61,6 @@ export default Service.extend({
}
},
- sendTestRequest(testApplicationId) {
- let url = `${API_URL}/photos/random`;
- let headers = {};
-
- headers.Authorization = `Client-ID ${testApplicationId}`;
- headers['Accept-Version'] = API_VERSION;
-
- return fetch(url, {headers})
- .then((response) => this._checkStatus(response));
- },
-
actions: {
updateSearch(term) {
if (term === this.get('searchTerm')) { | 1 | import Service from '@ember/service';
import fetch from 'fetch';
import {inject as injectService} from '@ember/service';
import {isEmpty} from '@ember/utils';
import {or} from '@ember/object/computed';
import {reject, resolve} from 'rsvp';
import {task, taskGroup, timeout} from 'ember-concurrency';
const API_URL = 'https://api.unsplash.com';
const API_VERSION = 'v1';
const DEBOUNCE_MS = 600;
export default Service.extend({
config: injectService(),
settings: injectService(),
columnCount: 3,
columns: null,
error: '',
photos: null,
searchTerm: '',
_columnHeights: null,
_pagination: null,
applicationId: or('config.unsplashAPI.applicationId', 'settings.unsplash.applicationId'),
isLoading: or('_search.isRunning', '_loadingTasks.isRunning'),
init() {
this._super(...arguments);
this._reset();
},
loadNew() {
this._reset();
return this.get('_loadNew').perform();
},
loadNextPage() {
// protect against scroll trigger firing when the photos are reset
if (this.get('_search.isRunning')) {
return;
}
if (isEmpty(this.get('photos'))) {
return this.get('_loadNew').perform();
}
if (this._pagination.next) {
return this.get('_loadNextPage').perform();
}
// TODO: return error?
return reject();
},
changeColumnCount(newColumnCount) {
if (newColumnCount !== this.get('columnCount')) {
this.set('columnCount', newColumnCount);
this._resetColumns();
}
},
sendTestRequest(testApplicationId) {
let url = `${API_URL}/photos/random`;
let headers = {};
headers.Authorization = `Client-ID ${testApplicationId}`;
headers['Accept-Version'] = API_VERSION;
return fetch(url, {headers})
.then((response) => this._checkStatus(response));
},
actions: {
updateSearch(term) {
if (term === this.get('searchTerm')) {
return;
}
this.set('searchTerm', term);
this._reset();
if (term) {
return this.get('_search').perform(term);
} else {
return this.get('_loadNew').perform();
}
}
},
_loadingTasks: taskGroup().drop(),
_loadNew: task(function* () {
let url = `${API_URL}/photos?per_page=30`;
yield this._makeRequest(url);
}).group('_loadingTasks'),
_loadNextPage: task(function* () {
yield this._makeRequest(this._pagination.next);
}).group('_loadingTasks'),
_retryLastRequest: task(function* () {
yield this._makeRequest(this._lastRequestUrl);
}).group('_loadingTasks'),
_search: task(function* (term) {
yield timeout(DEBOUNCE_MS);
let url = `${API_URL}/search/photos?query=${term}&per_page=30`;
yield this._makeRequest(url);
}).restartable(),
_addPhotosFromResponse(response) {
let photos = response.results || response;
photos.forEach((photo) => this._addPhoto(photo));
},
_addPhoto(photo) {
// pre-calculate ratio for later use
photo.ratio = photo.height / photo.width;
// add to general photo list
this.get('photos').pushObject(photo);
// add to least populated column
this._addPhotoToColumns(photo);
},
_addPhotoToColumns(photo) {
let min = Math.min(...this._columnHeights);
let columnIndex = this._columnHeights.indexOf(min);
// use a fixed width when calculating height to compensate for different
// overall image sizes
this._columnHeights[columnIndex] += 300 * photo.ratio;
this.get('columns')[columnIndex].pushObject(photo);
},
_reset() {
this.set('photos', []);
this._pagination = {};
this._resetColumns();
},
_resetColumns() {
let columns = [];
let columnHeights = [];
// pre-fill column arrays based on columnCount
for (let i = 0; i < this.get('columnCount'); i++) {
columns[i] = [];
columnHeights[i] = 0;
}
this.set('columns', columns);
this._columnHeights = columnHeights;
if (!isEmpty(this.get('photos'))) {
this.get('photos').forEach((photo) => {
this._addPhotoToColumns(photo);
});
}
},
_makeRequest(url) {
let headers = {};
// clear any previous error
this.set('error', '');
// store the url so it can be retried if needed
this._lastRequestUrl = url;
headers.Authorization = `Client-ID ${this.get('applicationId')}`;
headers['Accept-Version'] = API_VERSION;
headers['App-Pragma'] = 'no-cache';
return fetch(url, {headers})
.then((response) => this._checkStatus(response))
.then((response) => this._extractPagination(response))
.then((response) => response.json())
.then((response) => this._addPhotosFromResponse(response))
.catch(() => {
// if the error text isn't already set then we've get a connection error from `fetch`
if (!this.get('error')) {
this.set('error', 'Uh-oh! Trouble reaching the Unsplash API, please check your connection');
}
});
},
_checkStatus(response) {
// successful request
if (response.status >= 200 && response.status < 300) {
return resolve(response);
}
let errorText = '';
let responseTextPromise = resolve();
if (response.headers.map['content-type'] === 'application/json') {
responseTextPromise = response.json().then((json) => {
return json.errors[0];
});
} else if (response.headers.map['content-type'] === 'text/xml') {
responseTextPromise = response.text();
}
return responseTextPromise.then((responseText) => {
if (response.status === 403 && response.headers.map['x-ratelimit-remaining'] === '0') {
// we've hit the ratelimit on the API
errorText = 'Unsplash API rate limit reached, please try again later.';
}
errorText = errorText || responseText || `Error ${response.status}: Uh-oh! Trouble reaching the Unsplash API`;
// set error text for display in UI
this.set('error', errorText);
// throw error to prevent further processing
let error = new Error(errorText);
error.response = response;
throw error;
});
},
_extractPagination(response) {
let pagination = {};
let linkRegex = new RegExp('<(.*)>; rel="(.*)"');
let {link} = response.headers.map;
if (link) {
link.split(',').forEach((link) => {
let [, url, rel] = linkRegex.exec(link);
pagination[rel] = url;
});
}
this._pagination = pagination;
return response;
}
});
| 1 | 8,618 | line 26 can be removed i think > applicationId: or('config.unsplashAPI.applicationId', 'settings.unsplash.applicationId'), | TryGhost-Admin | js |
@@ -340,9 +340,9 @@ void Config::CheckParamConflict() {
Log::Warning("CUDA currently requires double precision calculations.");
gpu_use_dp = true;
}
- // linear tree learner must be serial type and cpu device
+ // linear tree learner must be serial type and run on cpu device
if (linear_tree) {
- if (device_type == std::string("gpu")) {
+ if (device_type != std::string("cpu")) {
device_type = "cpu";
Log::Warning("Linear tree learner only works with CPU.");
} | 1 | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#include <LightGBM/config.h>
#include <LightGBM/cuda/vector_cudahost.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/random.h>
#include <limits>
namespace LightGBM {
void Config::KV2Map(std::unordered_map<std::string, std::string>* params, const char* kv) {
std::vector<std::string> tmp_strs = Common::Split(kv, '=');
if (tmp_strs.size() == 2 || tmp_strs.size() == 1) {
std::string key = Common::RemoveQuotationSymbol(Common::Trim(tmp_strs[0]));
std::string value = "";
if (tmp_strs.size() == 2) {
value = Common::RemoveQuotationSymbol(Common::Trim(tmp_strs[1]));
}
if (key.size() > 0) {
auto value_search = params->find(key);
if (value_search == params->end()) { // not set
params->emplace(key, value);
} else {
Log::Warning("%s is set=%s, %s=%s will be ignored. Current value: %s=%s",
key.c_str(), value_search->second.c_str(), key.c_str(), value.c_str(),
key.c_str(), value_search->second.c_str());
}
}
} else {
Log::Warning("Unknown parameter %s", kv);
}
}
std::unordered_map<std::string, std::string> Config::Str2Map(const char* parameters) {
std::unordered_map<std::string, std::string> params;
auto args = Common::Split(parameters, " \t\n\r");
for (auto arg : args) {
KV2Map(¶ms, Common::Trim(arg).c_str());
}
ParameterAlias::KeyAliasTransform(¶ms);
return params;
}
void GetBoostingType(const std::unordered_map<std::string, std::string>& params, std::string* boosting) {
std::string value;
if (Config::GetString(params, "boosting", &value)) {
std::transform(value.begin(), value.end(), value.begin(), Common::tolower);
if (value == std::string("gbdt") || value == std::string("gbrt")) {
*boosting = "gbdt";
} else if (value == std::string("dart")) {
*boosting = "dart";
} else if (value == std::string("goss")) {
*boosting = "goss";
} else if (value == std::string("rf") || value == std::string("random_forest")) {
*boosting = "rf";
} else {
Log::Fatal("Unknown boosting type %s", value.c_str());
}
}
}
void ParseMetrics(const std::string& value, std::vector<std::string>* out_metric) {
std::unordered_set<std::string> metric_sets;
out_metric->clear();
std::vector<std::string> metrics = Common::Split(value.c_str(), ',');
for (auto& met : metrics) {
auto type = ParseMetricAlias(met);
if (metric_sets.count(type) <= 0) {
out_metric->push_back(type);
metric_sets.insert(type);
}
}
}
void GetObjectiveType(const std::unordered_map<std::string, std::string>& params, std::string* objective) {
std::string value;
if (Config::GetString(params, "objective", &value)) {
std::transform(value.begin(), value.end(), value.begin(), Common::tolower);
*objective = ParseObjectiveAlias(value);
}
}
void GetMetricType(const std::unordered_map<std::string, std::string>& params, std::vector<std::string>* metric) {
std::string value;
if (Config::GetString(params, "metric", &value)) {
std::transform(value.begin(), value.end(), value.begin(), Common::tolower);
ParseMetrics(value, metric);
}
// add names of objective function if not providing metric
if (metric->empty() && value.size() == 0) {
if (Config::GetString(params, "objective", &value)) {
std::transform(value.begin(), value.end(), value.begin(), Common::tolower);
ParseMetrics(value, metric);
}
}
}
void GetTaskType(const std::unordered_map<std::string, std::string>& params, TaskType* task) {
std::string value;
if (Config::GetString(params, "task", &value)) {
std::transform(value.begin(), value.end(), value.begin(), Common::tolower);
if (value == std::string("train") || value == std::string("training")) {
*task = TaskType::kTrain;
} else if (value == std::string("predict") || value == std::string("prediction")
|| value == std::string("test")) {
*task = TaskType::kPredict;
} else if (value == std::string("convert_model")) {
*task = TaskType::kConvertModel;
} else if (value == std::string("refit") || value == std::string("refit_tree")) {
*task = TaskType::KRefitTree;
} else {
Log::Fatal("Unknown task type %s", value.c_str());
}
}
}
void GetDeviceType(const std::unordered_map<std::string, std::string>& params, std::string* device_type) {
std::string value;
if (Config::GetString(params, "device_type", &value)) {
std::transform(value.begin(), value.end(), value.begin(), Common::tolower);
if (value == std::string("cpu")) {
*device_type = "cpu";
} else if (value == std::string("gpu")) {
*device_type = "gpu";
} else if (value == std::string("cuda")) {
*device_type = "cuda";
} else {
Log::Fatal("Unknown device type %s", value.c_str());
}
}
}
void GetTreeLearnerType(const std::unordered_map<std::string, std::string>& params, std::string* tree_learner) {
std::string value;
if (Config::GetString(params, "tree_learner", &value)) {
std::transform(value.begin(), value.end(), value.begin(), Common::tolower);
if (value == std::string("serial")) {
*tree_learner = "serial";
} else if (value == std::string("feature") || value == std::string("feature_parallel")) {
*tree_learner = "feature";
} else if (value == std::string("data") || value == std::string("data_parallel")) {
*tree_learner = "data";
} else if (value == std::string("voting") || value == std::string("voting_parallel")) {
*tree_learner = "voting";
} else {
Log::Fatal("Unknown tree learner type %s", value.c_str());
}
}
}
void Config::GetAucMuWeights() {
if (auc_mu_weights.empty()) {
// equal weights for all classes
auc_mu_weights_matrix = std::vector<std::vector<double>> (num_class, std::vector<double>(num_class, 1));
for (size_t i = 0; i < static_cast<size_t>(num_class); ++i) {
auc_mu_weights_matrix[i][i] = 0;
}
} else {
auc_mu_weights_matrix = std::vector<std::vector<double>> (num_class, std::vector<double>(num_class, 0));
if (auc_mu_weights.size() != static_cast<size_t>(num_class * num_class)) {
Log::Fatal("auc_mu_weights must have %d elements, but found %d", num_class * num_class, auc_mu_weights.size());
}
for (size_t i = 0; i < static_cast<size_t>(num_class); ++i) {
for (size_t j = 0; j < static_cast<size_t>(num_class); ++j) {
if (i == j) {
auc_mu_weights_matrix[i][j] = 0;
if (std::fabs(auc_mu_weights[i * num_class + j]) > kZeroThreshold) {
Log::Info("AUC-mu matrix must have zeros on diagonal. Overwriting value in position %d of auc_mu_weights with 0.", i * num_class + j);
}
} else {
if (std::fabs(auc_mu_weights[i * num_class + j]) < kZeroThreshold) {
Log::Fatal("AUC-mu matrix must have non-zero values for non-diagonal entries. Found zero value in position %d of auc_mu_weights.", i * num_class + j);
}
auc_mu_weights_matrix[i][j] = auc_mu_weights[i * num_class + j];
}
}
}
}
}
void Config::GetInteractionConstraints() {
if (interaction_constraints == "") {
interaction_constraints_vector = std::vector<std::vector<int>>();
} else {
interaction_constraints_vector = Common::StringToArrayofArrays<int>(interaction_constraints, '[', ']', ',');
}
}
void Config::Set(const std::unordered_map<std::string, std::string>& params) {
// generate seeds by seed.
if (GetInt(params, "seed", &seed)) {
Random rand(seed);
int int_max = std::numeric_limits<int16_t>::max();
data_random_seed = static_cast<int>(rand.NextShort(0, int_max));
bagging_seed = static_cast<int>(rand.NextShort(0, int_max));
drop_seed = static_cast<int>(rand.NextShort(0, int_max));
feature_fraction_seed = static_cast<int>(rand.NextShort(0, int_max));
objective_seed = static_cast<int>(rand.NextShort(0, int_max));
extra_seed = static_cast<int>(rand.NextShort(0, int_max));
}
GetTaskType(params, &task);
GetBoostingType(params, &boosting);
GetMetricType(params, &metric);
GetObjectiveType(params, &objective);
GetDeviceType(params, &device_type);
if (device_type == std::string("cuda")) {
LGBM_config_::current_device = lgbm_device_cuda;
}
GetTreeLearnerType(params, &tree_learner);
GetMembersFromString(params);
GetAucMuWeights();
GetInteractionConstraints();
// sort eval_at
std::sort(eval_at.begin(), eval_at.end());
std::vector<std::string> new_valid;
for (size_t i = 0; i < valid.size(); ++i) {
if (valid[i] != data) {
// Only push the non-training data
new_valid.push_back(valid[i]);
} else {
is_provide_training_metric = true;
}
}
valid = new_valid;
// check for conflicts
CheckParamConflict();
if (verbosity == 1) {
LightGBM::Log::ResetLogLevel(LightGBM::LogLevel::Info);
} else if (verbosity == 0) {
LightGBM::Log::ResetLogLevel(LightGBM::LogLevel::Warning);
} else if (verbosity >= 2) {
LightGBM::Log::ResetLogLevel(LightGBM::LogLevel::Debug);
} else {
LightGBM::Log::ResetLogLevel(LightGBM::LogLevel::Fatal);
}
}
bool CheckMultiClassObjective(const std::string& objective) {
return (objective == std::string("multiclass") || objective == std::string("multiclassova"));
}
void Config::CheckParamConflict() {
// check if objective, metric, and num_class match
int num_class_check = num_class;
bool objective_type_multiclass = CheckMultiClassObjective(objective) || (objective == std::string("custom") && num_class_check > 1);
if (objective_type_multiclass) {
if (num_class_check <= 1) {
Log::Fatal("Number of classes should be specified and greater than 1 for multiclass training");
}
} else {
if (task == TaskType::kTrain && num_class_check != 1) {
Log::Fatal("Number of classes must be 1 for non-multiclass training");
}
}
for (std::string metric_type : metric) {
bool metric_type_multiclass = (CheckMultiClassObjective(metric_type)
|| metric_type == std::string("multi_logloss")
|| metric_type == std::string("multi_error")
|| metric_type == std::string("auc_mu")
|| (metric_type == std::string("custom") && num_class_check > 1));
if ((objective_type_multiclass && !metric_type_multiclass)
|| (!objective_type_multiclass && metric_type_multiclass)) {
Log::Fatal("Multiclass objective and metrics don't match");
}
}
if (num_machines > 1) {
is_parallel = true;
} else {
is_parallel = false;
tree_learner = "serial";
}
bool is_single_tree_learner = tree_learner == std::string("serial");
if (is_single_tree_learner) {
is_parallel = false;
num_machines = 1;
}
if (is_single_tree_learner || tree_learner == std::string("feature")) {
is_data_based_parallel = false;
} else if (tree_learner == std::string("data")
|| tree_learner == std::string("voting")) {
is_data_based_parallel = true;
if (histogram_pool_size >= 0
&& tree_learner == std::string("data")) {
Log::Warning("Histogram LRU queue was enabled (histogram_pool_size=%f).\n"
"Will disable this to reduce communication costs",
histogram_pool_size);
// Change pool size to -1 (no limit) when using data parallel to reduce communication costs
histogram_pool_size = -1;
}
}
if (is_data_based_parallel) {
if (!forcedsplits_filename.empty()) {
Log::Fatal("Don't support forcedsplits in %s tree learner",
tree_learner.c_str());
}
}
// Check max_depth and num_leaves
if (max_depth > 0) {
double full_num_leaves = std::pow(2, max_depth);
if (full_num_leaves > num_leaves
&& num_leaves == kDefaultNumLeaves) {
Log::Warning("Accuracy may be bad since you didn't explicitly set num_leaves OR 2^max_depth > num_leaves."
" (num_leaves=%d).",
num_leaves);
}
if (full_num_leaves < num_leaves) {
// Fits in an int, and is more restrictive than the current num_leaves
num_leaves = static_cast<int>(full_num_leaves);
}
}
// force col-wise for gpu & CUDA
if (device_type == std::string("gpu") || device_type == std::string("cuda")) {
force_col_wise = true;
force_row_wise = false;
if (deterministic) {
Log::Warning("Although \"deterministic\" is set, the results ran by GPU may be non-deterministic.");
}
}
// force gpu_use_dp for CUDA
if (device_type == std::string("cuda") && !gpu_use_dp) {
Log::Warning("CUDA currently requires double precision calculations.");
gpu_use_dp = true;
}
// linear tree learner must be serial type and cpu device
if (linear_tree) {
if (device_type == std::string("gpu")) {
device_type = "cpu";
Log::Warning("Linear tree learner only works with CPU.");
}
if (tree_learner != std::string("serial")) {
tree_learner = "serial";
Log::Warning("Linear tree learner must be serial.");
}
if (zero_as_missing) {
Log::Fatal("zero_as_missing must be false when fitting linear trees.");
}
if (objective == std::string("regresson_l1")) {
Log::Fatal("Cannot use regression_l1 objective when fitting linear trees.");
}
}
// min_data_in_leaf must be at least 2 if path smoothing is active. This is because when the split is calculated
// the count is calculated using the proportion of hessian in the leaf which is rounded up to nearest int, so it can
// be 1 when there is actually no data in the leaf. In rare cases this can cause a bug because with path smoothing the
// calculated split gain can be positive even with zero gradient and hessian.
if (path_smooth > kEpsilon && min_data_in_leaf < 2) {
min_data_in_leaf = 2;
Log::Warning("min_data_in_leaf has been increased to 2 because this is required when path smoothing is active.");
}
if (is_parallel && (monotone_constraints_method == std::string("intermediate") || monotone_constraints_method == std::string("advanced"))) {
// In distributed mode, local node doesn't have histograms on all features, cannot perform "intermediate" monotone constraints.
Log::Warning("Cannot use \"intermediate\" or \"advanced\" monotone constraints in parallel learning, auto set to \"basic\" method.");
monotone_constraints_method = "basic";
}
if (feature_fraction_bynode != 1.0 && (monotone_constraints_method == std::string("intermediate") || monotone_constraints_method == std::string("advanced"))) {
// "intermediate" monotone constraints need to recompute splits. If the features are sampled when computing the
// split initially, then the sampling needs to be recorded or done once again, which is currently not supported
Log::Warning("Cannot use \"intermediate\" or \"advanced\" monotone constraints with feature fraction different from 1, auto set monotone constraints to \"basic\" method.");
monotone_constraints_method = "basic";
}
if (max_depth > 0 && monotone_penalty >= max_depth) {
Log::Warning("Monotone penalty greater than tree depth. Monotone features won't be used.");
}
if (min_data_in_leaf <= 0 && min_sum_hessian_in_leaf <= kEpsilon) {
Log::Warning(
"Cannot set both min_data_in_leaf and min_sum_hessian_in_leaf to 0. "
"Will set min_data_in_leaf to 1.");
min_data_in_leaf = 1;
}
}
std::string Config::ToString() const {
std::stringstream str_buf;
str_buf << "[boosting: " << boosting << "]\n";
str_buf << "[objective: " << objective << "]\n";
str_buf << "[metric: " << Common::Join(metric, ",") << "]\n";
str_buf << "[tree_learner: " << tree_learner << "]\n";
str_buf << "[device_type: " << device_type << "]\n";
str_buf << SaveMembersToString();
return str_buf.str();
}
} // namespace LightGBM
| 1 | 27,541 | To generalize for possible future new enum options. | microsoft-LightGBM | cpp |
@@ -877,6 +877,9 @@ def main(argv):
from scapy.arch.windows import route_add_loopback
route_add_loopback()
+ # Add SCAPY_ROOT_DIR environment variable, used for tests
+ os.environ['SCAPY_ROOT_DIR'] = os.environ.get("PWD", os.getcwd())
+
except getopt.GetoptError as msg:
print("ERROR:", msg, file=sys.stderr)
raise SystemExit | 1 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more informations
# Copyright (C) Philippe Biondi <[email protected]>
# This program is published under a GPLv2 license
# flake8: noqa: E501
"""
Unit testing infrastructure for Scapy
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import getopt
import imp
import glob
import importlib
import hashlib
import copy
import bz2
import base64
import os.path
import time
import traceback
import zlib
from scapy.consts import WINDOWS
import scapy.modules.six as six
from scapy.modules.six.moves import range
# Util class #
class Bunch:
__init__ = lambda self, **kw: setattr(self, '__dict__', kw)
# Import tool #
def import_module(name):
name = os.path.realpath(name)
thepath = os.path.dirname(name)
name = os.path.basename(name)
if name.endswith(".py"):
name = name[:-3]
f, path, desc = imp.find_module(name, [thepath])
try:
return imp.load_module(name, f, path, desc)
finally:
if f:
f.close()
# INTERNAL/EXTERNAL FILE EMBEDDING #
class File:
def __init__(self, name, URL, local):
self.name = name
self.local = local.encode("utf8")
self.URL = URL
def get_local(self):
return bz2.decompress(base64.decodestring(self.local))
def get_URL(self):
return self.URL
def write(self, dir):
if dir:
dir += "/"
open(dir + self.name, "wb").write(self.get_local())
# Embed a base64 encoded bziped version of js and css files
# to work if you can't reach Internet.
class External_Files:
UTscapy_js = File("UTscapy.js", "https://scapy.net/files/UTscapy/UTscapy.js", # noqa: E501
"""QlpoOTFBWSZTWWVijKQAAXxfgERUYOvAChIhBAC
/79+qQAH8AFA0poANAMjQAAAGABo0NGEZNBo0\n0BhgAaNDRhGTQaNNAYFURJinp
lGaKbRkJiekzSenqmpA0Gm1LFMpRUklVQlK9WUTZYpNFI1IiEWE\nFT09Sfj5uO+
qO6S5DQwKIxM92+Zku94wL6V/1KTKan2c66Ug6SmVKy1ZIrgauxMVLF5xLH0lJRQ
u\nKlqLF10iatlTzqvw7S9eS3+h4lu3GZyMgoOude3NJ1pQy8eo+X96IYZw+yneh
siPj73m0rnvQ3QX\nZ9BJQiZQYQ5/uNcl2WOlC5vyQqV/BWsnr2NZYLYXQLDs/Bf
fk4ZfR4/SH6GfA5Xlek4xHNHqbSsR\nbREOgueXo3kcYi94K6hSO3ldD2O/qJXOF
qJ8o3TE2aQahxtQpCVUKQMvODHwu2YkaORYZC6gihEa\nllcHDIAtRPScBACAJnU
ggYhLDX6DEko7nC9GvAw5OcEkiyDUbLdiGCzDaXWMC2DuQ2Y6sGf6NcRu\nON7QS
bhHsPc4KKmZ/xdyRThQkGVijKQ=\n""")
UTscapy_css = File("UTscapy.css", "https://scapy.net/files/UTscapy/UTscapy.css", # noqa: E501
"""QlpoOTFBWSZTWbpATIwAAFpfgHwQSB//+Cpj2Q
C//9/6UAS5t7qcLut3NNDp0gxKMmpqaep6n6iP\n1J+pPU0yAAaeoaDI0BJCTJqa
j1BoaGhoAAPSAAAJNSRqmmk8TQmj1DT1Hom1HkQABoNDmmJgATAB\nMAAJgACYJI
hDQUzCR5Q0niRoaAGgGmZS+faw7LNbkliDG1Q52WJCd85cxRVVKegld8qCRISoto
GD\nEGREFEYRW0CxAgTb13lodjuN7E1aCFgRFVhiEmZAZ/ek+XR0c8DWiAKpBgY2
LNpQ1rOvlnoUI1Al\n0ySaP1w2MyFxoQqRicScCm6WnQOxDnufxk8s2deLLKlN+r
fvxyTTCGRAWZONkVGIxVQRZGZLeAwH\nbpQXZcYj467i85knEOYWmLcokaqEGYGS
xMCpD+cOIaL7GCxEU/aNSlWFNCvQBvzb915huAgdIdD2\nya9ZQGoqrmtommfAxu
7FGTDBNBfir9UkAMmT1KRzxasJ0n2OE+mlgTZzJnhydbJaMtAk8DJzUuvv\nZpc3
CJLVyr8F3NmIQO5E3SJSY3SQnk1CQwlELqFutXjeWWzmiywo7xJk5rUcVOV9+Ro4
96WmXsUr\nkKhNocbnFztqPhesccW5kja+KuNFmzdw4DVOBJ2JPhGOYSwCUiwUe2
kOshYBdULUmwYwToAGdgA9\n5n3bSpG85LUFIE0Cw78EYVgY0ESnYW5UdfgBhj1w
PiiXDEG2vAtr38O9kdwg3tFU/0okilEjDYDa\nEfkomkLUSokmE8g1fMYBqQyyaP
RWmySO3EtAuMVhQqIuMldOzLqWubl7k1MnhuBaELOgtB2TChcS\n0k7jvgdBKIef
UkdAf3t2GO/LVSrDvkcb4l4TrwrI7JeCo8pBvXqZBqZJSqbsAziG7QDQVNqdtFGz
\nEvMKOvKvUQ6mJFigLxBnziGQGQDEMQPSGhlV2BwAN6rZEmLwgED0OrEiSxXDcB
MDskp36AV7IbKa\nCila/Wm1BKhBF+ZIqtiFyYpUhI1Q5+JK0zK7aVyLS9y7GaSr
NCRpr7uaa1UgapVKs6wKKQzYCWsV\n8iCGrAkgWZEnDMJWCGUZOIpcmMle1UXSAl
d5OoUYXNo0L7WSOcxEkSGjCcRhjvMRP1pAUuBPRCRA\n2lhC0ZgLYDAf5V2agMUa
ki1ZgOQDXQ7aIDTdjGRTgnzPML0V1X+tIoSSZmZhrxZbluMWGEkwwky6\n0ObWIM
cEbX4cawPPBVc6m5UUPbEmBANyjtNvTKE2ri7oOmBVKIMLqQKm+4rlmisu2uGSxW
zTov5w\nqQDp61FkHk40wzQUKk4YcBlbQT1l8VXeZJYAVFjSJIcC8JykBYZJ1yka
I4LDm5WP7s2NaRkhhV7A\nFVSD5zA8V/DJzfTk0QHmCT2wRgwPKjP60EqqlDUaST
/i7kinChIXSAmRgA==\n""")
def get_local_dict(cls):
return {x: y.name for (x, y) in six.iteritems(cls.__dict__)
if isinstance(y, File)}
get_local_dict = classmethod(get_local_dict)
def get_URL_dict(cls):
return {x: y.URL for (x, y) in six.iteritems(cls.__dict__)
if isinstance(y, File)}
get_URL_dict = classmethod(get_URL_dict)
# HELPER CLASSES FOR PARAMETRING OUTPUT FORMAT #
class EnumClass:
def from_string(cls, x):
return cls.__dict__[x.upper()]
from_string = classmethod(from_string)
class Format(EnumClass):
TEXT = 1
ANSI = 2
HTML = 3
LATEX = 4
XUNIT = 5
# TEST CLASSES #
class TestClass:
def __getitem__(self, item):
return getattr(self, item)
def add_keywords(self, kws):
if isinstance(kws, six.string_types):
kws = [kws]
for kwd in kws:
if kwd.startswith('-'):
try:
self.keywords.remove(kwd[1:])
except KeyError:
pass
else:
self.keywords.add(kwd)
class TestCampaign(TestClass):
def __init__(self, title):
self.title = title
self.filename = None
self.headcomments = ""
self.campaign = []
self.keywords = set()
self.crc = None
self.sha = None
self.preexec = None
self.preexec_output = None
self.end_pos = 0
self.interrupted = False
def add_testset(self, testset):
self.campaign.append(testset)
testset.keywords.update(self.keywords)
def trunc(self, index):
self.campaign = self.campaign[:index]
def startNum(self, beginpos):
for ts in self:
for t in ts:
t.num = beginpos
beginpos += 1
self.end_pos = beginpos
def __iter__(self):
return self.campaign.__iter__()
def all_tests(self):
for ts in self:
for t in ts:
yield t
class TestSet(TestClass):
def __init__(self, name):
self.name = name
self.tests = []
self.comments = ""
self.keywords = set()
self.crc = None
self.expand = 1
def add_test(self, test):
self.tests.append(test)
test.keywords.update(self.keywords)
def trunc(self, index):
self.tests = self.tests[:index]
def __iter__(self):
return self.tests.__iter__()
class UnitTest(TestClass):
def __init__(self, name):
self.name = name
self.test = ""
self.comments = ""
self.result = "passed" # make instance True at init to have a different truth value than None
self.output = ""
self.num = -1
self.keywords = set()
self.crc = None
self.expand = 1
def decode(self):
if six.PY2:
self.test = self.test.decode("utf8", "ignore")
self.output = self.output.decode("utf8", "ignore")
self.comments = self.comments.decode("utf8", "ignore")
self.result = self.result.decode("utf8", "ignore")
def __nonzero__(self):
return self.result == "passed"
__bool__ = __nonzero__
# Careful note: all data not included will be set by default.
# Use -c as first argument !!
def parse_config_file(config_path, verb=3):
"""Parse provided json to get configuration
Empty default json:
{
"testfiles": [],
"breakfailed": false,
"onlyfailed": false,
"verb": 2,
"dump": 0,
"crc": true,
"scapy": "scapy",
"preexec": {},
"global_preexec": "",
"outputfile": null,
"local": true,
"format": "ansi",
"num": null,
"modules": [],
"kw_ok": [],
"kw_ko": []
}
"""
import json
import unicodedata
with open(config_path) as config_file:
data = json.load(config_file, encoding="utf8")
if verb > 2:
print("### Loaded config file", config_path, file=sys.stderr)
def get_if_exist(key, default):
return data[key] if key in data else default
return Bunch(testfiles=get_if_exist("testfiles", []),
breakfailed=get_if_exist("breakfailed", False),
remove_testfiles=get_if_exist("remove_testfiles", []),
onlyfailed=get_if_exist("onlyfailed", False),
verb=get_if_exist("verb", 3),
dump=get_if_exist("dump", 0), crc=get_if_exist("crc", 1),
scapy=get_if_exist("scapy", "scapy"),
preexec=get_if_exist("preexec", {}),
global_preexec=get_if_exist("global_preexec", ""),
outfile=get_if_exist("outputfile", sys.stdout),
local=get_if_exist("local", False),
num=get_if_exist("num", None),
modules=get_if_exist("modules", []),
kw_ok=get_if_exist("kw_ok", []),
kw_ko=get_if_exist("kw_ko", []),
format=get_if_exist("format", "ansi"))
# PARSE CAMPAIGN #
def parse_campaign_file(campaign_file):
test_campaign = TestCampaign("Test campaign")
test_campaign.filename = campaign_file.name
testset = None
test = None
testnb = 0
for l in campaign_file.readlines():
if l[0] == '#':
continue
if l[0] == "~":
(test or testset or test_campaign).add_keywords(l[1:].split())
elif l[0] == "%":
test_campaign.title = l[1:].strip()
elif l[0] == "+":
testset = TestSet(l[1:].strip())
test_campaign.add_testset(testset)
test = None
elif l[0] == "=":
test = UnitTest(l[1:].strip())
test.num = testnb
testnb += 1
testset.add_test(test)
elif l[0] == "*":
if test is not None:
test.comments += l[1:]
elif testset is not None:
testset.comments += l[1:]
else:
test_campaign.headcomments += l[1:]
else:
if test is None:
if l.strip():
print("Unknown content [%s]" % l.strip(), file=sys.stderr)
else:
test.test += l
return test_campaign
def dump_campaign(test_campaign):
print("#" * (len(test_campaign.title) + 6))
print("## %(title)s ##" % test_campaign)
print("#" * (len(test_campaign.title) + 6))
if test_campaign.sha and test_campaign.crc:
print("CRC=[%(crc)s] SHA=[%(sha)s]" % test_campaign)
print("from file %(filename)s" % test_campaign)
print()
for ts in test_campaign:
if ts.crc:
print("+--[%s]%s(%s)--" % (ts.name, "-" * max(2, 80 - len(ts.name) - 18), ts.crc)) # noqa: E501
else:
print("+--[%s]%s" % (ts.name, "-" * max(2, 80 - len(ts.name) - 6)))
if ts.keywords:
print(" kw=%s" % ",".join(ts.keywords))
for t in ts:
print("%(num)03i %(name)s" % t)
c = k = ""
if t.keywords:
k = "kw=%s" % ",".join(t.keywords)
if t.crc:
c = "[%(crc)s] " % t
if c or k:
print(" %s%s" % (c, k))
# COMPUTE CAMPAIGN DIGESTS #
if six.PY2:
def crc32(x):
return "%08X" % (0xffffffff & zlib.crc32(x))
def sha1(x):
return hashlib.sha1(x).hexdigest().upper()
else:
def crc32(x):
return "%08X" % (0xffffffff & zlib.crc32(bytearray(x, "utf8")))
def sha1(x):
return hashlib.sha1(x.encode("utf8")).hexdigest().upper()
def compute_campaign_digests(test_campaign):
dc = ""
for ts in test_campaign:
dts = ""
for t in ts:
dt = t.test.strip()
t.crc = crc32(dt)
dts += "\0" + dt
ts.crc = crc32(dts)
dc += "\0\x01" + dts
test_campaign.crc = crc32(dc)
test_campaign.sha = sha1(open(test_campaign.filename).read())
# FILTER CAMPAIGN #
def filter_tests_on_numbers(test_campaign, num):
if num:
for ts in test_campaign:
ts.tests = [t for t in ts.tests if t.num in num]
test_campaign.campaign = [ts for ts in test_campaign.campaign
if ts.tests]
def filter_tests_keep_on_keywords(test_campaign, kw):
def kw_match(lst, kw):
for k in lst:
if k in kw:
return True
return False
if kw:
for ts in test_campaign:
ts.tests = [t for t in ts.tests if kw_match(t.keywords, kw)]
def filter_tests_remove_on_keywords(test_campaign, kw):
def kw_match(lst, kw):
for k in kw:
if k in lst:
return True
return False
if kw:
for ts in test_campaign:
ts.tests = [t for t in ts.tests if not kw_match(t.keywords, kw)]
def remove_empty_testsets(test_campaign):
test_campaign.campaign = [ts for ts in test_campaign.campaign if ts.tests]
#### RUN TEST #####
def run_test(test, get_interactive_session, verb=3, ignore_globals=None):
test.output, res = get_interactive_session(test.test.strip(), ignore_globals=ignore_globals)
test.result = "failed"
try:
if res is None or res:
test.result = "passed"
if test.output.endswith('KeyboardInterrupt\n'):
test.result = "interrupted"
raise KeyboardInterrupt
except Exception:
test.output += "UTscapy: Error during result interpretation:\n"
test.output += "".join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2],))
finally:
test.decode()
if verb > 1:
print("%(result)6s %(crc)s %(name)s" % test, file=sys.stderr)
return bool(test)
#### RUN CAMPAIGN #####
def run_campaign(test_campaign, get_interactive_session, verb=3, ignore_globals=None): # noqa: E501
passed = failed = 0
if test_campaign.preexec:
test_campaign.preexec_output = get_interactive_session(test_campaign.preexec.strip(), ignore_globals=ignore_globals)[0]
try:
for i, testset in enumerate(test_campaign):
for j, t in enumerate(testset):
if run_test(t, get_interactive_session, verb):
passed += 1
else:
failed += 1
except KeyboardInterrupt:
failed += 1
testset.trunc(j+1)
test_campaign.trunc(i+1)
test_campaign.interrupted = True
if verb:
print("Campaign interrupted!", file=sys.stderr)
test_campaign.passed = passed
test_campaign.failed = failed
if verb:
print("Campaign CRC=%(crc)s SHA=%(sha)s" % test_campaign, file=sys.stderr) # noqa: E501
print("PASSED=%i FAILED=%i" % (passed, failed), file=sys.stderr)
return failed
# INFO LINES #
def info_line(test_campaign):
filename = test_campaign.filename
if filename is None:
return "Run %s by UTscapy" % time.ctime()
else:
return "Run %s from [%s] by UTscapy" % (time.ctime(), filename)
def html_info_line(test_campaign):
filename = test_campaign.filename
if filename is None:
return """Run %s by <a href="http://www.secdev.org/projects/UTscapy/">UTscapy</a><br>""" % time.ctime() # noqa: E501
else:
return """Run %s from [%s] by <a href="http://www.secdev.org/projects/UTscapy/">UTscapy</a><br>""" % (time.ctime(), filename) # noqa: E501
# CAMPAIGN TO something #
def campaign_to_TEXT(test_campaign):
output="%(title)s\n" % test_campaign
output += "-- "+info_line(test_campaign)+"\n\n"
output += "Passed=%(passed)i\nFailed=%(failed)i\n\n%(headcomments)s\n" % test_campaign
for testset in test_campaign:
if any(t.expand for t in testset):
output += "######\n## %(name)s\n######\n%(comments)s\n\n" % testset
for t in testset:
if t.expand:
output += "###(%(num)03i)=[%(result)s] %(name)s\n%(comments)s\n%(output)s\n\n" % t # noqa: E501
return output
def campaign_to_ANSI(test_campaign):
output="%(title)s\n" % test_campaign
output += "-- "+info_line(test_campaign)+"\n\n"
output += "Passed=%(passed)i\nFailed=%(failed)i\n\n%(headcomments)s\n" % test_campaign
for testset in test_campaign:
if any(t.expand for t in testset):
output += "######\n## %(name)s\n######\n%(comments)s\n\n" % testset
for t in testset:
if t.expand:
output += "###(%(num)03i)=[%(result)s] %(name)s\n%(comments)s\n%(output)s\n\n" % t # noqa: E501
return output
def campaign_to_xUNIT(test_campaign):
output = '<?xml version="1.0" encoding="UTF-8" ?>\n<testsuite>\n'
for testset in test_campaign:
for t in testset:
output += ' <testcase classname="%s"\n' % testset.name.encode("string_escape").replace('"', ' ') # noqa: E501
output += ' name="%s"\n' % t.name.encode("string_escape").replace('"', ' ') # noqa: E501
output += ' duration="0">\n' % t
if not t:
output += '<error><![CDATA[%(output)s]]></error>\n' % t
output += "</testcase>\n"
output += '</testsuite>'
return output
def campaign_to_HTML(test_campaign):
output = """
<h1>%(title)s</h1>
<p>
""" % test_campaign
if test_campaign.crc is not None and test_campaign.sha is not None:
output += "CRC=<span class=crc>%(crc)s</span> SHA=<span class=crc>%(sha)s</span><br>" % test_campaign
output += "<small><em>"+html_info_line(test_campaign)+"</em></small>"
output += "".join([
test_campaign.headcomments,
"\n<p>",
"PASSED=%(passed)i FAILED=%(failed)i" % test_campaign,
" <span class=warn_interrupted>INTERRUPTED!</span>" if test_campaign.interrupted else "",
"<p>\n\n",
])
for testset in test_campaign:
output += "<h2>" % testset
if testset.crc is not None:
output += "<span class=crc>%(crc)s</span> " % testset
output += "%(name)s</h2>\n%(comments)s\n<ul>\n" % testset
for t in testset:
output += """<li class=%(result)s id="tst%(num)il">\n""" % t
if t.expand == 2:
output += """
<span id="tst%(num)i+" class="button%(result)s" onClick="show('tst%(num)i')" style="POSITION: absolute; VISIBILITY: hidden;">+%(num)03i+</span>
<span id="tst%(num)i-" class="button%(result)s" onClick="hide('tst%(num)i')">-%(num)03i-</span>
""" % t
else:
output += """
<span id="tst%(num)i+" class="button%(result)s" onClick="show('tst%(num)i')">+%(num)03i+</span>
<span id="tst%(num)i-" class="button%(result)s" onClick="hide('tst%(num)i')" style="POSITION: absolute; VISIBILITY: hidden;">-%(num)03i-</span>
""" % t
if t.crc is not None:
output += "<span class=crc>%(crc)s</span>\n" % t
output += """%(name)s\n<span class="comment %(result)s" id="tst%(num)i" """ % t # noqa: E501
if t.expand < 2:
output += """ style="POSITION: absolute; VISIBILITY: hidden;" """ # noqa: E501
output += """><br>%(comments)s
<pre>
%(output)s</pre></span>
""" % t
output += "\n</ul>\n\n"
return output
def pack_html_campaigns(runned_campaigns, data, local=False, title=None):
output = """
<html>
<head>
<title>%(title)s</title>
<h1>UTScapy tests</h1>
<span class=control_button onClick="hide_all('tst')">Shrink All</span>
<span class=control_button onClick="show_all('tst')">Expand All</span>
<span class=control_button onClick="show_passed('tst')">Expand Passed</span>
<span class=control_button onClick="show_failed('tst')">Expand Failed</span>
<p>
"""
for test_campaign in runned_campaigns:
for ts in test_campaign:
for t in ts:
output += """<span class=button%(result)s onClick="goto_id('tst%(num)il')">%(num)03i</span>\n""" % t
output += """</p>\n\n
<link rel="stylesheet" href="%(UTscapy_css)s" type="text/css">
<script language="JavaScript" src="%(UTscapy_js)s" type="text/javascript"></script>
</head>
<body>
%(data)s
</body></html>
"""
out_dict = {'data': data, 'title': title if title else "UTScapy tests"}
if local:
External_Files.UTscapy_js.write(os.path.dirname(test_campaign.output_file.name)) # noqa: E501
External_Files.UTscapy_css.write(os.path.dirname(test_campaign.output_file.name)) # noqa: E501
out_dict.update(External_Files.get_local_dict())
else:
out_dict.update(External_Files.get_URL_dict())
output %= out_dict
return output
def campaign_to_LATEX(test_campaign):
output = r"""\documentclass{report}
\usepackage{alltt}
\usepackage{xcolor}
\usepackage{a4wide}
\usepackage{hyperref}
\title{%(title)s}
\date{%%s}
\begin{document}
\maketitle
\tableofcontents
\begin{description}
\item[Passed:] %(passed)i
\item[Failed:] %(failed)i
\end{description}
%(headcomments)s
""" % test_campaign
output %= info_line(test_campaign)
for testset in test_campaign:
output += "\\chapter{%(name)s}\n\n%(comments)s\n\n" % testset
for t in testset:
if t.expand:
output += r"""\section{%(name)s}
[%(num)03i] [%(result)s]
%(comments)s
\begin{alltt}
%(output)s
\end{alltt}
""" % t
output += "\\end{document}\n"
return output
#### USAGE ####
def usage():
print("""Usage: UTscapy [-m module] [-f {text|ansi|HTML|LaTeX}] [-o output_file]
[-t testfile] [-T testfile] [-k keywords [-k ...]] [-K keywords [-K ...]]
[-l] [-b] [-d|-D] [-F] [-q[q]] [-P preexecute_python_code]
[-s /path/to/scapy] [-c configfile]
-t\t\t: provide test files (can be used many times)
-T\t\t: if -t is used with *, remove a specific file (can be used many times)
-l\t\t: generate local .js and .css files
-F\t\t: expand only failed tests
-b\t\t: stop at first failed campaign
-d\t\t: dump campaign
-D\t\t: dump campaign and stop
-C\t\t: don't calculate CRC and SHA
-s\t\t: path to scapy.py
-c\t\t: load a .utsc config file
-q\t\t: quiet mode
-qq\t\t: [silent mode]
-n <testnum>\t: only tests whose numbers are given (eg. 1,3-7,12)
-m <module>\t: additional module to put in the namespace
-k <kw1>,<kw2>,...\t: include only tests with one of those keywords (can be used many times)
-K <kw1>,<kw2>,...\t: remove tests with one of those keywords (can be used many times)
-P <preexecute_python_code>
""", file=sys.stderr)
raise SystemExit
# MAIN #
def execute_campaign(TESTFILE, OUTPUTFILE, PREEXEC, NUM, KW_OK, KW_KO, DUMP,
FORMAT, VERB, ONLYFAILED, CRC, autorun_func, pos_begin=0, ignore_globals=None): # noqa: E501
# Parse test file
test_campaign = parse_campaign_file(TESTFILE)
# Report parameters
if PREEXEC:
test_campaign.preexec = PREEXEC
# Compute campaign CRC and SHA
if CRC:
compute_campaign_digests(test_campaign)
# Filter out unwanted tests
filter_tests_on_numbers(test_campaign, NUM)
for k in KW_OK:
filter_tests_keep_on_keywords(test_campaign, k)
for k in KW_KO:
filter_tests_remove_on_keywords(test_campaign, k)
remove_empty_testsets(test_campaign)
# Dump campaign
if DUMP:
dump_campaign(test_campaign)
if DUMP > 1:
sys.exit()
# Run tests
test_campaign.output_file = OUTPUTFILE
result = run_campaign(test_campaign, autorun_func[FORMAT], verb=VERB, ignore_globals=None) # noqa: E501
# Shrink passed
if ONLYFAILED:
for t in test_campaign.all_tests():
if t:
t.expand = 0
else:
t.expand = 2
# Generate report
if FORMAT == Format.TEXT:
output = campaign_to_TEXT(test_campaign)
elif FORMAT == Format.ANSI:
output = campaign_to_ANSI(test_campaign)
elif FORMAT == Format.HTML:
test_campaign.startNum(pos_begin)
output = campaign_to_HTML(test_campaign)
elif FORMAT == Format.LATEX:
output = campaign_to_LATEX(test_campaign)
elif FORMAT == Format.XUNIT:
output = campaign_to_xUNIT(test_campaign)
return output, (result == 0), test_campaign
def resolve_testfiles(TESTFILES):
for tfile in TESTFILES[:]:
if "*" in tfile:
TESTFILES.remove(tfile)
TESTFILES.extend(glob.glob(tfile))
return TESTFILES
def main(argv):
ignore_globals = list(six.moves.builtins.__dict__.keys())
# Parse arguments
FORMAT = Format.ANSI
OUTPUTFILE = sys.stdout
LOCAL = 0
NUM = None
KW_OK = []
KW_KO = []
DUMP = 0
CRC = True
BREAKFAILED = False
ONLYFAILED = False
VERB = 3
GLOB_PREEXEC = ""
PREEXEC_DICT = {}
SCAPY = "scapy"
MODULES = []
TESTFILES = []
try:
opts = getopt.getopt(argv, "o:t:T:c:f:hbln:m:k:K:DdCFqP:s:")
for opt, optarg in opts[0]:
if opt == "-h":
usage()
elif opt == "-b":
BREAKFAILED = True
elif opt == "-F":
ONLYFAILED = True
elif opt == "-q":
VERB -= 1
elif opt == "-D":
DUMP = 2
elif opt == "-d":
DUMP = 1
elif opt == "-C":
CRC = False
elif opt == "-s":
SCAPY = optarg
elif opt == "-P":
GLOB_PREEXEC += "\n" + optarg
elif opt == "-f":
try:
FORMAT = Format.from_string(optarg)
except KeyError as msg:
raise getopt.GetoptError("Unknown output format %s" % msg)
elif opt == "-t":
TESTFILES.append(optarg)
TESTFILES = resolve_testfiles(TESTFILES)
elif opt == "-T":
TESTFILES.remove(optarg)
elif opt == "-c":
data = parse_config_file(optarg, VERB)
BREAKFAILED = data.breakfailed
ONLYFAILED = data.onlyfailed
VERB = data.verb
DUMP = data.dump
CRC = data.crc
SCAPY = data.scapy
PREEXEC_DICT = data.preexec
GLOB_PREEXEC = data.global_preexec
OUTPUTFILE = data.outfile
TESTFILES = data.testfiles
LOCAL = 1 if data.local else 0
NUM = data.num
MODULES = data.modules
KW_OK = [data.kw_ok]
KW_KO = [data.kw_ko]
try:
FORMAT = Format.from_string(data.format)
except KeyError as msg:
raise getopt.GetoptError("Unknown output format %s" % msg)
TESTFILES = resolve_testfiles(TESTFILES)
for testfile in resolve_testfiles(data.remove_testfiles):
TESTFILES.remove(testfile)
elif opt == "-o":
OUTPUTFILE = optarg
if not os.access(os.path.dirname(os.path.abspath(OUTPUTFILE)), os.W_OK):
raise getopt.GetoptError("Cannot write to file %s" % OUTPUTFILE)
elif opt == "-l":
LOCAL = 1
elif opt == "-n":
NUM = []
for v in (x.strip() for x in optarg.split(",")):
try:
NUM.append(int(v))
except ValueError:
v1, v2 = [int(e) for e in v.split('-', 1)]
NUM.extend(range(v1, v2 + 1))
elif opt == "-m":
MODULES.append(optarg)
elif opt == "-k":
KW_OK.append(optarg.split(","))
elif opt == "-K":
KW_KO.append(optarg.split(","))
# Discard Python3 tests when using Python2
if six.PY2:
KW_KO.append(["python3_only"])
if VERB > 2:
print("### Booting scapy...", file=sys.stderr)
try:
from scapy import all as scapy
except ImportError as e:
raise getopt.GetoptError("cannot import [%s]: %s" % (SCAPY, e))
for m in MODULES:
try:
mod = import_module(m)
six.moves.builtins.__dict__.update(mod.__dict__)
except ImportError as e:
raise getopt.GetoptError("cannot import [%s]: %s" % (m, e))
if WINDOWS:
from scapy.arch.windows import route_add_loopback
route_add_loopback()
except getopt.GetoptError as msg:
print("ERROR:", msg, file=sys.stderr)
raise SystemExit
autorun_func = {
Format.TEXT: scapy.autorun_get_text_interactive_session,
Format.ANSI: scapy.autorun_get_ansi_interactive_session,
Format.HTML: scapy.autorun_get_html_interactive_session,
Format.LATEX: scapy.autorun_get_latex_interactive_session,
Format.XUNIT: scapy.autorun_get_text_interactive_session,
}
if VERB > 2:
print("### Starting tests...", file=sys.stderr)
glob_output = ""
glob_result = 0
glob_title = None
UNIQUE = len(TESTFILES) == 1
# Resolve tags and asterix
for prex in six.iterkeys(copy.copy(PREEXEC_DICT)):
if "*" in prex:
pycode = PREEXEC_DICT[prex]
del PREEXEC_DICT[prex]
for gl in glob.iglob(prex):
_pycode = pycode.replace("%name%", os.path.splitext(os.path.split(gl)[1])[0]) # noqa: E501
PREEXEC_DICT[gl] = _pycode
pos_begin = 0
runned_campaigns = []
# Execute all files
for TESTFILE in TESTFILES:
if VERB > 2:
print("### Loading:", TESTFILE, file=sys.stderr)
PREEXEC = PREEXEC_DICT[TESTFILE] if TESTFILE in PREEXEC_DICT else GLOB_PREEXEC
with open(TESTFILE) as testfile:
output, result, campaign = execute_campaign(testfile, OUTPUTFILE,
PREEXEC, NUM, KW_OK, KW_KO,
DUMP, FORMAT, VERB, ONLYFAILED,
CRC, autorun_func, pos_begin,
ignore_globals)
runned_campaigns.append(campaign)
pos_begin = campaign.end_pos
if UNIQUE:
glob_title = campaign.title
glob_output += output
if not result and BREAKFAILED:
glob_result = 1
break
if VERB > 2:
print("### Writing output...", file=sys.stderr)
# Concenate outputs
if FORMAT == Format.HTML:
glob_output = pack_html_campaigns(runned_campaigns, glob_output, LOCAL, glob_title)
if OUTPUTFILE == sys.stdout:
OUTPUTFILE.write(glob_output.encode("utf8", "ignore")
if 'b' in OUTPUTFILE.mode else glob_output)
else:
with open(OUTPUTFILE, "wb") as f:
f.write(glob_output.encode("utf8", "ignore")
if 'b' in f.mode else glob_output)
# Return state
return glob_result
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 1 | 13,431 | Shall we remove this variable at the end of UTScapy execution? | secdev-scapy | py |
@@ -29,6 +29,10 @@ public class ProtoModels {
/** Gets the interfaces for the apis in the service config. */
public static List<Interface> getInterfaces(Model model) {
+ if (model.getServiceConfig().getApisCount() == 0) {
+ // A valid service config was not given.
+ return model.getSymbolTable().getInterfaces().asList();
+ }
return model
.getServiceConfig()
.getApisList() | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.gapic;
import com.google.api.tools.framework.model.Interface;
import com.google.api.tools.framework.model.Model;
import com.google.common.collect.ImmutableList;
import java.util.List;
/**
* An interface-based view of model, consisting of a strategy for getting the interfaces of the
* model.
*/
public class ProtoModels {
private ProtoModels() {}
/** Gets the interfaces for the apis in the service config. */
public static List<Interface> getInterfaces(Model model) {
return model
.getServiceConfig()
.getApisList()
.stream()
.map(api -> model.getSymbolTable().lookupInterface(api.getName()))
.collect(ImmutableList.toImmutableList());
}
}
| 1 | 28,161 | What is this checking for? Why is returning the list of interfaces the correct behaviour in this case? | googleapis-gapic-generator | java |
@@ -20,6 +20,9 @@ namespace Nethermind.Blockchain
{
public interface ISyncConfig : IConfig
{
+ [ConfigItem(Description = "HMB Sync.", DefaultValue = "false")]
+ bool HmbSync { get; set; }
+
[ConfigItem(Description = "If set to 'true' then the Fast Sync (eth/63) synchronization algorithm will be used.", DefaultValue = "false")]
bool FastSync { get; set; }
| 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using Nethermind.Config;
namespace Nethermind.Blockchain
{
public interface ISyncConfig : IConfig
{
[ConfigItem(Description = "If set to 'true' then the Fast Sync (eth/63) synchronization algorithm will be used.", DefaultValue = "false")]
bool FastSync { get; set; }
[ConfigItem(Description = "If set to 'true' then in the Fast Sync mode blocks will be first downloaded from the provided PivotNumber downwards. This allows for parallelization of requests with many sync peers and with no need to worry about syncing a valid branch (syncing downwards to 0). You need to enter the pivot block number, hash and total difficulty from a trusted source (you can use etherscan and confirm with other sources if you wan to change it).", DefaultValue = "false")]
bool FastBlocks { get; set; }
[ConfigItem(Description = "If set to 'true' then in the Fast Blocks mode Nethermind generates smaller requests to avoid Geth from disconnecting. On the Geth heavy networks (mainnet) it is desired while on Parity or Nethermind heavy networks (Goerli, AuRa) it slows down the sync by a factor of ~4", DefaultValue = "true")]
public bool UseGethLimitsInFastBlocks { get; set; }
[ConfigItem(Description = "If set to 'true' then the block bodies will be downloaded in the Fast Sync mode.", DefaultValue = "true")]
bool DownloadBodiesInFastSync { get; set; }
[ConfigItem(Description = "If set to 'true' then the receipts will be downloaded in the Fast Sync mode. This will slow down the process by a few hours but will allow you to interact with dApps that execute extensive historical logs searches (like Maker CDPs).", DefaultValue = "true")]
bool DownloadReceiptsInFastSync { get; set; }
[ConfigItem(Description = "Total Difficulty of the pivot block for the Fast Blocks sync (not - this is total difficulty and not difficulty).", DefaultValue = "null")]
string PivotTotalDifficulty { get; }
[ConfigItem(Description = "Number of the pivot block for the Fast Blocks sync.", DefaultValue = "null")]
string PivotNumber { get; }
[ConfigItem(Description = "Hash of the pivot block for the Fast Blocks sync.", DefaultValue = "null")]
string PivotHash { get; }
}
} | 1 | 22,880 | Please name it correctly as Beam ;) | NethermindEth-nethermind | .cs |
@@ -184,9 +184,9 @@ func shouldRetryWithWait(tripper http.RoundTripper, err error, multiplier int) b
apiErr, ok := err.(*googleapi.Error)
var retry bool
switch {
- case !ok && tkValid:
- // Not a googleapi.Error and the token is still valid.
- return false
+ case !ok:
+ // Not a googleapi.Error. Likely a transport error.
+ retry = true
case apiErr.Code >= 500 && apiErr.Code <= 599:
retry = true
case apiErr.Code >= 429: | 1 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package compute provides access to the Google Compute API.
package compute
import (
"context"
"fmt"
"math/rand"
"net/http"
"time"
"golang.org/x/oauth2"
computeBeta "google.golang.org/api/compute/v0.beta"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
"google.golang.org/api/transport"
)
// Client is a client for interacting with Google Cloud Compute.
type Client interface {
AttachDisk(project, zone, instance string, d *compute.AttachedDisk) error
DetachDisk(project, zone, instance, disk string) error
CreateDisk(project, zone string, d *compute.Disk) error
CreateForwardingRule(project, region string, fr *compute.ForwardingRule) error
CreateFirewallRule(project string, i *compute.Firewall) error
CreateImage(project string, i *compute.Image) error
CreateImageBeta(project string, i *computeBeta.Image) error
CreateInstance(project, zone string, i *compute.Instance) error
CreateNetwork(project string, n *compute.Network) error
CreateSubnetwork(project, region string, n *compute.Subnetwork) error
CreateTargetInstance(project, zone string, ti *compute.TargetInstance) error
DeleteDisk(project, zone, name string) error
DeleteForwardingRule(project, region, name string) error
DeleteFirewallRule(project, name string) error
DeleteImage(project, name string) error
DeleteInstance(project, zone, name string) error
StartInstance(project, zone, name string) error
StopInstance(project, zone, name string) error
DeleteNetwork(project, name string) error
DeleteSubnetwork(project, region, name string) error
DeleteTargetInstance(project, zone, name string) error
DeprecateImage(project, name string, deprecationstatus *compute.DeprecationStatus) error
GetMachineType(project, zone, machineType string) (*compute.MachineType, error)
GetProject(project string) (*compute.Project, error)
GetSerialPortOutput(project, zone, name string, port, start int64) (*compute.SerialPortOutput, error)
GetZone(project, zone string) (*compute.Zone, error)
GetInstance(project, zone, name string) (*compute.Instance, error)
GetDisk(project, zone, name string) (*compute.Disk, error)
GetForwardingRule(project, region, name string) (*compute.ForwardingRule, error)
GetFirewallRule(project, name string) (*compute.Firewall, error)
GetImage(project, name string) (*compute.Image, error)
GetImageBeta(project, name string) (*computeBeta.Image, error)
GetImageFromFamily(project, family string) (*compute.Image, error)
GetLicense(project, name string) (*compute.License, error)
GetNetwork(project, name string) (*compute.Network, error)
GetSubnetwork(project, region, name string) (*compute.Subnetwork, error)
GetTargetInstance(project, zone, name string) (*compute.TargetInstance, error)
InstanceStatus(project, zone, name string) (string, error)
InstanceStopped(project, zone, name string) (bool, error)
ListMachineTypes(project, zone string, opts ...ListCallOption) ([]*compute.MachineType, error)
ListZones(project string, opts ...ListCallOption) ([]*compute.Zone, error)
ListRegions(project string, opts ...ListCallOption) ([]*compute.Region, error)
ListInstances(project, zone string, opts ...ListCallOption) ([]*compute.Instance, error)
ListDisks(project, zone string, opts ...ListCallOption) ([]*compute.Disk, error)
ListForwardingRules(project, zone string, opts ...ListCallOption) ([]*compute.ForwardingRule, error)
ListFirewallRules(project string, opts ...ListCallOption) ([]*compute.Firewall, error)
ListImages(project string, opts ...ListCallOption) ([]*compute.Image, error)
ListNetworks(project string, opts ...ListCallOption) ([]*compute.Network, error)
ListSubnetworks(project, region string, opts ...ListCallOption) ([]*compute.Subnetwork, error)
ListTargetInstances(project, zone string, opts ...ListCallOption) ([]*compute.TargetInstance, error)
ResizeDisk(project, zone, disk string, drr *compute.DisksResizeRequest) error
SetInstanceMetadata(project, zone, name string, md *compute.Metadata) error
SetCommonInstanceMetadata(project string, md *compute.Metadata) error
// Beta API calls
GetGuestAttributes(project, zone, name, queryPath, variableKey string) (*computeBeta.GuestAttributes, error)
Retry(f func(opts ...googleapi.CallOption) (*compute.Operation, error), opts ...googleapi.CallOption) (op *compute.Operation, err error)
RetryBeta(f func(opts ...googleapi.CallOption) (*computeBeta.Operation, error), opts ...googleapi.CallOption) (op *computeBeta.Operation, err error)
BasePath() string
}
// A ListCallOption is an option for a Google Compute API *ListCall.
type ListCallOption interface {
listCallOptionApply(interface{}) interface{}
}
// OrderBy sets the optional parameter "orderBy": Sorts list results by a
// certain order. By default, results are returned in alphanumerical order
// based on the resource name.
type OrderBy string
func (o OrderBy) listCallOptionApply(i interface{}) interface{} {
switch c := i.(type) {
case *compute.FirewallsListCall:
return c.OrderBy(string(o))
case *compute.ImagesListCall:
return c.OrderBy(string(o))
case *compute.MachineTypesListCall:
return c.OrderBy(string(o))
case *compute.ZonesListCall:
return c.OrderBy(string(o))
case *compute.InstancesListCall:
return c.OrderBy(string(o))
case *compute.DisksListCall:
return c.OrderBy(string(o))
case *compute.NetworksListCall:
return c.OrderBy(string(o))
case *compute.SubnetworksListCall:
return c.OrderBy(string(o))
}
return i
}
// Filter sets the optional parameter "filter": Sets a filter {expression} for
// filtering listed resources. Your {expression} must be in the format:
// field_name comparison_string literal_string.
type Filter string
func (o Filter) listCallOptionApply(i interface{}) interface{} {
switch c := i.(type) {
case *compute.FirewallsListCall:
return c.Filter(string(o))
case *compute.ImagesListCall:
return c.Filter(string(o))
case *compute.MachineTypesListCall:
return c.Filter(string(o))
case *compute.ZonesListCall:
return c.Filter(string(o))
case *compute.InstancesListCall:
return c.Filter(string(o))
case *compute.DisksListCall:
return c.Filter(string(o))
case *compute.NetworksListCall:
return c.Filter(string(o))
case *compute.SubnetworksListCall:
return c.Filter(string(o))
}
return i
}
type clientImpl interface {
Client
zoneOperationsWait(project, zone, name string) error
regionOperationsWait(project, region, name string) error
globalOperationsWait(project, name string) error
}
type client struct {
i clientImpl
hc *http.Client
raw *compute.Service
rawBeta *computeBeta.Service
}
// shouldRetryWithWait returns true if the HTTP response / error indicates
// that the request should be attempted again.
func shouldRetryWithWait(tripper http.RoundTripper, err error, multiplier int) bool {
if err == nil {
return false
}
tkValid := true
trans, ok := tripper.(*oauth2.Transport)
if ok {
if tk, err := trans.Source.Token(); err == nil {
tkValid = tk.Valid()
}
}
apiErr, ok := err.(*googleapi.Error)
var retry bool
switch {
case !ok && tkValid:
// Not a googleapi.Error and the token is still valid.
return false
case apiErr.Code >= 500 && apiErr.Code <= 599:
retry = true
case apiErr.Code >= 429:
// Too many API requests.
retry = true
case !tkValid:
// This was probably a failure to get new token from metadata server.
retry = true
}
if !retry {
return false
}
sleep := (time.Duration(rand.Intn(1000))*time.Millisecond + 1*time.Second) * time.Duration(multiplier)
time.Sleep(sleep)
return true
}
// NewClient creates a new Google Cloud Compute client.
func NewClient(ctx context.Context, opts ...option.ClientOption) (Client, error) {
o := []option.ClientOption{option.WithScopes(compute.ComputeScope, compute.DevstorageReadWriteScope)}
opts = append(o, opts...)
hc, ep, err := transport.NewHTTPClient(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("error creating HTTP API client: %v", err)
}
rawService, err := compute.New(hc)
if err != nil {
return nil, fmt.Errorf("compute client: %v", err)
}
if ep != "" {
rawService.BasePath = ep
}
rawBetaService, err := computeBeta.New(hc)
if err != nil {
return nil, fmt.Errorf("beta compute client: %v", err)
}
if ep != "" {
rawBetaService.BasePath = ep
}
c := &client{hc: hc, raw: rawService, rawBeta: rawBetaService}
c.i = c
return c, nil
}
// BasePath returns the base path for this client.
func (c *client) BasePath() string {
return c.raw.BasePath
}
type operationGetterFunc func() (*compute.Operation, error)
func (c *client) zoneOperationsWait(project, zone, name string) error {
return c.operationsWaitHelper(project, name, func() (op *compute.Operation, err error) {
op, err = c.Retry(c.raw.ZoneOperations.Get(project, zone, name).Do)
if err != nil {
err = fmt.Errorf("failed to get zone operation %s: %v", name, err)
}
return op, err
})
}
func (c *client) regionOperationsWait(project, region, name string) error {
return c.operationsWaitHelper(project, name, func() (op *compute.Operation, err error) {
op, err = c.Retry(c.raw.RegionOperations.Get(project, region, name).Do)
if err != nil {
err = fmt.Errorf("failed to get region operation %s: %v", name, err)
}
return op, err
})
}
func (c *client) globalOperationsWait(project, name string) error {
return c.operationsWaitHelper(project, name, func() (op *compute.Operation, err error) {
op, err = c.Retry(c.raw.GlobalOperations.Get(project, name).Do)
if err != nil {
err = fmt.Errorf("failed to get global operation %s: %v", name, err)
}
return op, err
})
}
func (c *client) operationsWaitHelper(project, name string, getOperation operationGetterFunc) error {
for {
op, err := getOperation()
if err != nil {
return err
}
switch op.Status {
case "PENDING", "RUNNING":
time.Sleep(1 * time.Second)
continue
case "DONE":
if op.Error != nil {
var operrs string
for _, operr := range op.Error.Errors {
operrs = operrs + fmt.Sprintf("\n Code: %s, Message: %s", operr.Code, operr.Message)
}
return fmt.Errorf("operation failed %+v: %s", op, operrs)
}
default:
return fmt.Errorf("unknown operation status %q: %+v", op.Status, op)
}
return nil
}
}
// Retry invokes the given function, retrying it multiple times if the HTTP
// status response indicates the request should be attempted again or the
// oauth Token is no longer valid.
func (c *client) Retry(f func(opts ...googleapi.CallOption) (*compute.Operation, error), opts ...googleapi.CallOption) (op *compute.Operation, err error) {
for i := 1; i < 4; i++ {
op, err = f(opts...)
if err == nil {
return op, nil
}
if !shouldRetryWithWait(c.hc.Transport, err, i) {
return nil, err
}
}
return
}
// RetryBeta invokes the given function, retrying it multiple times if the HTTP
// status response indicates the request should be attempted again or the
// oauth Token is no longer valid.
func (c *client) RetryBeta(f func(opts ...googleapi.CallOption) (*computeBeta.Operation, error), opts ...googleapi.CallOption) (op *computeBeta.Operation, err error) {
for i := 1; i < 4; i++ {
op, err = f(opts...)
if err == nil {
return op, nil
}
if !shouldRetryWithWait(c.hc.Transport, err, i) {
return nil, err
}
}
return
}
// AttachDisk attaches a GCE persistent disk to an instance.
func (c *client) AttachDisk(project, zone, instance string, d *compute.AttachedDisk) error {
op, err := c.Retry(c.raw.Instances.AttachDisk(project, zone, instance, d).Do)
if err != nil {
return err
}
return c.i.zoneOperationsWait(project, zone, op.Name)
}
// DetachDisk detaches a GCE persistent disk to an instance.
func (c *client) DetachDisk(project, zone, instance, disk string) error {
op, err := c.Retry(c.raw.Instances.DetachDisk(project, zone, instance, disk).Do)
if err != nil {
return err
}
return c.i.zoneOperationsWait(project, zone, op.Name)
}
// CreateDisk creates a GCE persistent disk.
func (c *client) CreateDisk(project, zone string, d *compute.Disk) error {
op, err := c.Retry(c.raw.Disks.Insert(project, zone, d).Do)
if err != nil {
return err
}
if err := c.i.zoneOperationsWait(project, zone, op.Name); err != nil {
return err
}
var createdDisk *compute.Disk
if createdDisk, err = c.i.GetDisk(project, zone, d.Name); err != nil {
return err
}
*d = *createdDisk
return nil
}
// CreateForwardingRule creates a GCE forwarding rule.
func (c *client) CreateForwardingRule(project, region string, fr *compute.ForwardingRule) error {
op, err := c.Retry(c.raw.ForwardingRules.Insert(project, region, fr).Do)
if err != nil {
return err
}
if err := c.i.regionOperationsWait(project, region, op.Name); err != nil {
return err
}
var createdForwardingRule *compute.ForwardingRule
if createdForwardingRule, err = c.i.GetForwardingRule(project, region, fr.Name); err != nil {
return err
}
*fr = *createdForwardingRule
return nil
}
func (c *client) CreateFirewallRule(project string, i *compute.Firewall) error {
op, err := c.Retry(c.raw.Firewalls.Insert(project, i).Do)
if err != nil {
return err
}
if err := c.i.globalOperationsWait(project, op.Name); err != nil {
return err
}
var createdFirewallRule *compute.Firewall
if createdFirewallRule, err = c.i.GetFirewallRule(project, i.Name); err != nil {
return err
}
*i = *createdFirewallRule
return nil
}
// CreateImage creates a GCE image.
// Only one of sourceDisk or sourceFile must be specified, sourceDisk is the
// url (full or partial) to the source disk, sourceFile is the full Google
// Cloud Storage URL where the disk image is stored.
func (c *client) CreateImage(project string, i *compute.Image) error {
op, err := c.Retry(c.raw.Images.Insert(project, i).Do)
if err != nil {
return err
}
if err := c.i.globalOperationsWait(project, op.Name); err != nil {
return err
}
var createdImage *compute.Image
if createdImage, err = c.i.GetImage(project, i.Name); err != nil {
return err
}
*i = *createdImage
return nil
}
// CreateImageBeta creates a GCE image using Beta API.
// Only one of sourceDisk or sourceFile must be specified, sourceDisk is the
// url (full or partial) to the source disk, sourceFile is the full Google
// Cloud Storage URL where the disk image is stored.
func (c *client) CreateImageBeta(project string, i *computeBeta.Image) error {
op, err := c.RetryBeta(c.rawBeta.Images.Insert(project, i).Do)
if err != nil {
return err
}
if err := c.i.globalOperationsWait(project, op.Name); err != nil {
return err
}
var createdImage *computeBeta.Image
if createdImage, err = c.i.GetImageBeta(project, i.Name); err != nil {
return err
}
*i = *createdImage
return nil
}
func (c *client) CreateInstance(project, zone string, i *compute.Instance) error {
op, err := c.Retry(c.raw.Instances.Insert(project, zone, i).Do)
if err != nil {
return err
}
if err := c.i.zoneOperationsWait(project, zone, op.Name); err != nil {
return err
}
var createdInstance *compute.Instance
if createdInstance, err = c.i.GetInstance(project, zone, i.Name); err != nil {
return err
}
*i = *createdInstance
return nil
}
func (c *client) CreateNetwork(project string, n *compute.Network) error {
op, err := c.Retry(c.raw.Networks.Insert(project, n).Do)
if err != nil {
return err
}
if err := c.i.globalOperationsWait(project, op.Name); err != nil {
return err
}
var createdNetwork *compute.Network
if createdNetwork, err = c.i.GetNetwork(project, n.Name); err != nil {
return err
}
*n = *createdNetwork
return nil
}
func (c *client) CreateSubnetwork(project, region string, n *compute.Subnetwork) error {
op, err := c.Retry(c.raw.Subnetworks.Insert(project, region, n).Do)
if err != nil {
return err
}
if err := c.i.regionOperationsWait(project, region, op.Name); err != nil {
return err
}
var createdSubnetwork *compute.Subnetwork
if createdSubnetwork, err = c.i.GetSubnetwork(project, region, n.Name); err != nil {
return err
}
*n = *createdSubnetwork
return nil
}
// CreateTargetInstance creates a GCE Target Instance, which can be used as
// target on ForwardingRule
func (c *client) CreateTargetInstance(project, zone string, ti *compute.TargetInstance) error {
op, err := c.Retry(c.raw.TargetInstances.Insert(project, zone, ti).Do)
if err != nil {
return err
}
if err := c.i.zoneOperationsWait(project, zone, op.Name); err != nil {
return err
}
var createdTargetInstance *compute.TargetInstance
if createdTargetInstance, err = c.i.GetTargetInstance(project, zone, ti.Name); err != nil {
return err
}
*ti = *createdTargetInstance
return nil
}
// DeleteFirewallRule deletes a GCE FirewallRule.
func (c *client) DeleteFirewallRule(project, name string) error {
op, err := c.Retry(c.raw.Firewalls.Delete(project, name).Do)
if err != nil {
return err
}
return c.i.globalOperationsWait(project, op.Name)
}
// DeleteImage deletes a GCE image.
func (c *client) DeleteImage(project, name string) error {
op, err := c.Retry(c.raw.Images.Delete(project, name).Do)
if err != nil {
return err
}
return c.i.globalOperationsWait(project, op.Name)
}
// DeleteDisk deletes a GCE persistent disk.
func (c *client) DeleteDisk(project, zone, name string) error {
op, err := c.Retry(c.raw.Disks.Delete(project, zone, name).Do)
if err != nil {
return err
}
return c.i.zoneOperationsWait(project, zone, op.Name)
}
// DeleteForwardingRule deletes a GCE ForwardingRule.
func (c *client) DeleteForwardingRule(project, region, name string) error {
op, err := c.Retry(c.raw.ForwardingRules.Delete(project, region, name).Do)
if err != nil {
return err
}
return c.i.regionOperationsWait(project, region, op.Name)
}
// DeleteInstance deletes a GCE instance.
func (c *client) DeleteInstance(project, zone, name string) error {
op, err := c.Retry(c.raw.Instances.Delete(project, zone, name).Do)
if err != nil {
return err
}
return c.i.zoneOperationsWait(project, zone, op.Name)
}
// StartInstance starts a GCE instance.
func (c *client) StartInstance(project, zone, name string) error {
op, err := c.Retry(c.raw.Instances.Start(project, zone, name).Do)
if err != nil {
return err
}
return c.i.zoneOperationsWait(project, zone, op.Name)
}
// StopInstance stops a GCE instance.
func (c *client) StopInstance(project, zone, name string) error {
op, err := c.Retry(c.raw.Instances.Stop(project, zone, name).Do)
if err != nil {
return err
}
return c.i.zoneOperationsWait(project, zone, op.Name)
}
// DeleteNetwork deletes a GCE network.
func (c *client) DeleteNetwork(project, name string) error {
op, err := c.Retry(c.raw.Networks.Delete(project, name).Do)
if err != nil {
return err
}
return c.i.globalOperationsWait(project, op.Name)
}
// DeleteSubnetwork deletes a GCE subnetwork.
func (c *client) DeleteSubnetwork(project, region, name string) error {
op, err := c.Retry(c.raw.Subnetworks.Delete(project, region, name).Do)
if err != nil {
return err
}
return c.i.regionOperationsWait(project, region, op.Name)
}
// DeleteTargetInstance deletes a GCE TargetInstance.
func (c *client) DeleteTargetInstance(project, zone, name string) error {
op, err := c.Retry(c.raw.TargetInstances.Delete(project, zone, name).Do)
if err != nil {
return err
}
return c.i.zoneOperationsWait(project, zone, op.Name)
}
// DeprecateImage sets deprecation status on a GCE image.
func (c *client) DeprecateImage(project, name string, deprecationstatus *compute.DeprecationStatus) error {
op, err := c.Retry(c.raw.Images.Deprecate(project, name, deprecationstatus).Do)
if err != nil {
return err
}
return c.i.globalOperationsWait(project, op.Name)
}
// GetMachineType gets a GCE MachineType.
func (c *client) GetMachineType(project, zone, machineType string) (*compute.MachineType, error) {
mt, err := c.raw.MachineTypes.Get(project, zone, machineType).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return c.raw.MachineTypes.Get(project, zone, machineType).Do()
}
return mt, err
}
// ListMachineTypes gets a list of GCE MachineTypes.
func (c *client) ListMachineTypes(project, zone string, opts ...ListCallOption) ([]*compute.MachineType, error) {
var mts []*compute.MachineType
var pt string
call := c.raw.MachineTypes.List(project, zone)
for _, opt := range opts {
call = opt.listCallOptionApply(call).(*compute.MachineTypesListCall)
}
for mtl, err := call.PageToken(pt).Do(); ; mtl, err = call.PageToken(pt).Do() {
if shouldRetryWithWait(c.hc.Transport, err, 2) {
mtl, err = call.PageToken(pt).Do()
}
if err != nil {
return nil, err
}
mts = append(mts, mtl.Items...)
if mtl.NextPageToken == "" {
return mts, nil
}
pt = mtl.NextPageToken
}
}
// GetProject gets a GCE Project.
func (c *client) GetProject(project string) (*compute.Project, error) {
p, err := c.raw.Projects.Get(project).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return c.raw.Projects.Get(project).Do()
}
return p, err
}
// GetSerialPortOutput gets the serial port output of a GCE instance.
func (c *client) GetSerialPortOutput(project, zone, name string, port, start int64) (*compute.SerialPortOutput, error) {
sp, err := c.raw.Instances.GetSerialPortOutput(project, zone, name).Start(start).Port(port).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return c.raw.Instances.GetSerialPortOutput(project, zone, name).Start(start).Port(port).Do()
}
return sp, err
}
// GetZone gets a GCE Zone.
func (c *client) GetZone(project, zone string) (*compute.Zone, error) {
z, err := c.raw.Zones.Get(project, zone).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return c.raw.Zones.Get(project, zone).Do()
}
return z, err
}
// ListZones gets a list GCE Zones.
func (c *client) ListZones(project string, opts ...ListCallOption) ([]*compute.Zone, error) {
var zs []*compute.Zone
var pt string
call := c.raw.Zones.List(project)
for _, opt := range opts {
call = opt.listCallOptionApply(call).(*compute.ZonesListCall)
}
for zl, err := call.PageToken(pt).Do(); ; zl, err = call.PageToken(pt).Do() {
if shouldRetryWithWait(c.hc.Transport, err, 2) {
zl, err = call.PageToken(pt).Do()
}
if err != nil {
return nil, err
}
zs = append(zs, zl.Items...)
if zl.NextPageToken == "" {
return zs, nil
}
pt = zl.NextPageToken
}
}
// ListRegions gets a list GCE Regions.
func (c *client) ListRegions(project string, opts ...ListCallOption) ([]*compute.Region, error) {
var rs []*compute.Region
var pt string
call := c.raw.Regions.List(project)
for _, opt := range opts {
call = opt.listCallOptionApply(call).(*compute.RegionsListCall)
}
for rl, err := call.PageToken(pt).Do(); ; rl, err = call.PageToken(pt).Do() {
if shouldRetryWithWait(c.hc.Transport, err, 2) {
rl, err = call.PageToken(pt).Do()
}
if err != nil {
return nil, err
}
rs = append(rs, rl.Items...)
if rl.NextPageToken == "" {
return rs, nil
}
pt = rl.NextPageToken
}
}
// GetInstance gets a GCE Instance.
func (c *client) GetInstance(project, zone, name string) (*compute.Instance, error) {
i, err := c.raw.Instances.Get(project, zone, name).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return c.raw.Instances.Get(project, zone, name).Do()
}
return i, err
}
// ListInstances gets a list of GCE Instances.
func (c *client) ListInstances(project, zone string, opts ...ListCallOption) ([]*compute.Instance, error) {
var is []*compute.Instance
var pt string
call := c.raw.Instances.List(project, zone)
for _, opt := range opts {
call = opt.listCallOptionApply(call).(*compute.InstancesListCall)
}
for il, err := call.PageToken(pt).Do(); ; il, err = call.PageToken(pt).Do() {
if shouldRetryWithWait(c.hc.Transport, err, 2) {
il, err = call.PageToken(pt).Do()
}
if err != nil {
return nil, err
}
is = append(is, il.Items...)
if il.NextPageToken == "" {
return is, nil
}
pt = il.NextPageToken
}
}
// GetDisk gets a GCE Disk.
func (c *client) GetDisk(project, zone, name string) (*compute.Disk, error) {
d, err := c.raw.Disks.Get(project, zone, name).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return c.raw.Disks.Get(project, zone, name).Do()
}
return d, err
}
// ListDisks gets a list of GCE Disks.
func (c *client) ListDisks(project, zone string, opts ...ListCallOption) ([]*compute.Disk, error) {
var ds []*compute.Disk
var pt string
call := c.raw.Disks.List(project, zone)
for _, opt := range opts {
call = opt.listCallOptionApply(call).(*compute.DisksListCall)
}
for dl, err := call.PageToken(pt).Do(); ; dl, err = call.PageToken(pt).Do() {
if shouldRetryWithWait(c.hc.Transport, err, 2) {
dl, err = call.PageToken(pt).Do()
}
if err != nil {
return nil, err
}
ds = append(ds, dl.Items...)
if dl.NextPageToken == "" {
return ds, nil
}
pt = dl.NextPageToken
}
}
// GetForwardingRule gets a GCE ForwardingRule.
func (c *client) GetForwardingRule(project, region, name string) (*compute.ForwardingRule, error) {
n, err := c.raw.ForwardingRules.Get(project, region, name).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return c.raw.ForwardingRules.Get(project, region, name).Do()
}
return n, err
}
// ListForwardingRules gets a list of GCE ForwardingRules.
func (c *client) ListForwardingRules(project, region string, opts ...ListCallOption) ([]*compute.ForwardingRule, error) {
var frs []*compute.ForwardingRule
var pt string
call := c.raw.ForwardingRules.List(project, region)
for _, opt := range opts {
call = opt.listCallOptionApply(call).(*compute.ForwardingRulesListCall)
}
for frl, err := call.PageToken(pt).Do(); ; frl, err = call.PageToken(pt).Do() {
if shouldRetryWithWait(c.hc.Transport, err, 2) {
frl, err = call.PageToken(pt).Do()
}
if err != nil {
return nil, err
}
frs = append(frs, frl.Items...)
if frl.NextPageToken == "" {
return frs, nil
}
pt = frl.NextPageToken
}
}
// GetFirewallRule gets a GCE FirewallRule.
func (c *client) GetFirewallRule(project, name string) (*compute.Firewall, error) {
i, err := c.raw.Firewalls.Get(project, name).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return c.raw.Firewalls.Get(project, name).Do()
}
return i, err
}
// ListFirewallRules gets a list of GCE FirewallRules.
func (c *client) ListFirewallRules(project string, opts ...ListCallOption) ([]*compute.Firewall, error) {
var is []*compute.Firewall
var pt string
call := c.raw.Firewalls.List(project)
for _, opt := range opts {
call = opt.listCallOptionApply(call).(*compute.FirewallsListCall)
}
for il, err := call.PageToken(pt).Do(); ; il, err = call.PageToken(pt).Do() {
if shouldRetryWithWait(c.hc.Transport, err, 2) {
il, err = call.PageToken(pt).Do()
}
if err != nil {
return nil, err
}
is = append(is, il.Items...)
if il.NextPageToken == "" {
return is, nil
}
pt = il.NextPageToken
}
}
// GetImage gets a GCE Image.
func (c *client) GetImage(project, name string) (*compute.Image, error) {
i, err := c.raw.Images.Get(project, name).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return c.raw.Images.Get(project, name).Do()
}
return i, err
}
// GetImageBeta gets a GCE Image using Beta API
func (c *client) GetImageBeta(project, name string) (*computeBeta.Image, error) {
i, err := c.rawBeta.Images.Get(project, name).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return c.rawBeta.Images.Get(project, name).Do()
}
return i, err
}
// GetImageFromFamily gets a GCE Image from an image family.
func (c *client) GetImageFromFamily(project, family string) (*compute.Image, error) {
i, err := c.raw.Images.GetFromFamily(project, family).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return c.raw.Images.GetFromFamily(project, family).Do()
}
return i, err
}
// ListImages gets a list of GCE Images.
func (c *client) ListImages(project string, opts ...ListCallOption) ([]*compute.Image, error) {
var is []*compute.Image
var pt string
call := c.raw.Images.List(project)
for _, opt := range opts {
call = opt.listCallOptionApply(call).(*compute.ImagesListCall)
}
for il, err := call.PageToken(pt).Do(); ; il, err = call.PageToken(pt).Do() {
if shouldRetryWithWait(c.hc.Transport, err, 2) {
il, err = call.PageToken(pt).Do()
}
if err != nil {
return nil, err
}
is = append(is, il.Items...)
if il.NextPageToken == "" {
return is, nil
}
pt = il.NextPageToken
}
}
// GetNetwork gets a GCE Network.
func (c *client) GetNetwork(project, name string) (*compute.Network, error) {
n, err := c.raw.Networks.Get(project, name).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return c.raw.Networks.Get(project, name).Do()
}
return n, err
}
// ListNetworks gets a list of GCE Networks.
func (c *client) ListNetworks(project string, opts ...ListCallOption) ([]*compute.Network, error) {
var ns []*compute.Network
var pt string
call := c.raw.Networks.List(project)
for _, opt := range opts {
call = opt.listCallOptionApply(call).(*compute.NetworksListCall)
}
for nl, err := call.PageToken(pt).Do(); ; nl, err = call.PageToken(pt).Do() {
if shouldRetryWithWait(c.hc.Transport, err, 2) {
nl, err = call.PageToken(pt).Do()
}
if err != nil {
return nil, err
}
ns = append(ns, nl.Items...)
if nl.NextPageToken == "" {
return ns, nil
}
pt = nl.NextPageToken
}
}
// GetSubnetwork gets a GCE subnetwork.
func (c *client) GetSubnetwork(project, region, name string) (*compute.Subnetwork, error) {
n, err := c.raw.Subnetworks.Get(project, region, name).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return c.raw.Subnetworks.Get(project, region, name).Do()
}
return n, err
}
// ListSubnetworks gets a list of GCE subnetworks.
func (c *client) ListSubnetworks(project, region string, opts ...ListCallOption) ([]*compute.Subnetwork, error) {
var ns []*compute.Subnetwork
var pt string
call := c.raw.Subnetworks.List(project, region)
for _, opt := range opts {
call = opt.listCallOptionApply(call).(*compute.SubnetworksListCall)
}
for nl, err := call.PageToken(pt).Do(); ; nl, err = call.PageToken(pt).Do() {
if shouldRetryWithWait(c.hc.Transport, err, 2) {
nl, err = call.PageToken(pt).Do()
}
if err != nil {
return nil, err
}
ns = append(ns, nl.Items...)
if nl.NextPageToken == "" {
return ns, nil
}
pt = nl.NextPageToken
}
}
// GetTargetInstance gets a GCE TargetInstance.
func (c *client) GetTargetInstance(project, zone, name string) (*compute.TargetInstance, error) {
n, err := c.raw.TargetInstances.Get(project, zone, name).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return c.raw.TargetInstances.Get(project, zone, name).Do()
}
return n, err
}
// ListTargetInstances gets a list of GCE TargetInstances.
func (c *client) ListTargetInstances(project, zone string, opts ...ListCallOption) ([]*compute.TargetInstance, error) {
var tis []*compute.TargetInstance
var pt string
call := c.raw.TargetInstances.List(project, zone)
for _, opt := range opts {
call = opt.listCallOptionApply(call).(*compute.TargetInstancesListCall)
}
for til, err := call.PageToken(pt).Do(); ; til, err = call.PageToken(pt).Do() {
if shouldRetryWithWait(c.hc.Transport, err, 2) {
til, err = call.PageToken(pt).Do()
}
if err != nil {
return nil, err
}
tis = append(tis, til.Items...)
if til.NextPageToken == "" {
return tis, nil
}
pt = til.NextPageToken
}
}
// GetLicense gets a GCE License.
func (c *client) GetLicense(project, name string) (*compute.License, error) {
l, err := c.raw.Licenses.Get(project, name).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return c.raw.Licenses.Get(project, name).Do()
}
return l, err
}
// InstanceStatus returns an instances Status.
func (c *client) InstanceStatus(project, zone, name string) (string, error) {
is, err := c.raw.Instances.Get(project, zone, name).Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
is, err = c.raw.Instances.Get(project, zone, name).Do()
}
if err != nil {
return "", err
}
return is.Status, nil
}
// InstanceStopped checks if a GCE instance is in a 'TERMINATED' or 'STOPPED' state.
func (c *client) InstanceStopped(project, zone, name string) (bool, error) {
status, err := c.i.InstanceStatus(project, zone, name)
if err != nil {
return false, err
}
switch status {
case "PROVISIONING", "RUNNING", "STAGING", "STOPPING":
return false, nil
case "TERMINATED", "STOPPED":
return true, nil
default:
return false, fmt.Errorf("unexpected instance status %q", status)
}
}
// ResizeDisk resizes a GCE persistent disk. You can only increase the size of the disk.
func (c *client) ResizeDisk(project, zone, disk string, drr *compute.DisksResizeRequest) error {
op, err := c.Retry(c.raw.Disks.Resize(project, zone, disk, drr).Do)
if err != nil {
return err
}
return c.i.zoneOperationsWait(project, zone, op.Name)
}
// SetInstanceMetadata sets an instances metadata.
func (c *client) SetInstanceMetadata(project, zone, name string, md *compute.Metadata) error {
op, err := c.Retry(c.raw.Instances.SetMetadata(project, zone, name, md).Do)
if err != nil {
return err
}
return c.i.zoneOperationsWait(project, zone, op.Name)
}
// SetCommonInstanceMetadata sets an instances metadata.
func (c *client) SetCommonInstanceMetadata(project string, md *compute.Metadata) error {
op, err := c.Retry(c.raw.Projects.SetCommonInstanceMetadata(project, md).Do)
if err != nil {
return err
}
return c.i.globalOperationsWait(project, op.Name)
}
// GetGuestAttributes gets a Guest Attributes.
func (c *client) GetGuestAttributes(project, zone, name, queryPath, variableKey string) (*computeBeta.GuestAttributes, error) {
call := c.rawBeta.Instances.GetGuestAttributes(project, zone, name)
if queryPath != "" {
call = call.QueryPath(queryPath)
}
if variableKey != "" {
call = call.VariableKey(variableKey)
}
a, err := call.Do()
if shouldRetryWithWait(c.hc.Transport, err, 2) {
return call.Do()
}
return a, err
}
| 1 | 9,456 | After chatting with Andrew, we think it's better to look for this particular error string that is causing issues ("connection reset by peer") instead of blindly retrying on any error we don't know about. | GoogleCloudPlatform-compute-image-tools | go |
@@ -25,14 +25,14 @@ namespace AutoRest.Swagger.Validation.Core
/// <summary>
/// What kind of open api document type this rule should be applied to
/// </summary>
- public virtual ServiceDefinitionDocumentType ServiceDefinitionDocumentType => ServiceDefinitionDocumentType.ARM;
+ public abstract ServiceDefinitionDocumentType ServiceDefinitionDocumentType { get; }
/// <summary>
/// When to apply the validation rule, before or after it has been merged as a part of
/// its merged document as specified in the corresponding '.md' file
/// By default consider all rules to be applied for After only
/// </summary>
- public virtual ServiceDefinitionDocumentState ValidationRuleMergeState => ServiceDefinitionDocumentState.Composed;
+ public abstract ServiceDefinitionDocumentState ValidationRuleMergeState { get; }
/// <summary>
/// Violation category of the Rule. | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
using System.Collections.Generic;
using AutoRest.Core.Logging;
using AutoRest.Swagger.Model;
using System;
namespace AutoRest.Swagger.Validation.Core
{
/// <summary>
/// Defines validation logic for an object
/// </summary>
public abstract class Rule
{
protected Rule()
{
}
/// <summary>
/// Id of the Rule.
/// </summary>
public abstract string Id { get; }
/// <summary>
/// What kind of open api document type this rule should be applied to
/// </summary>
public virtual ServiceDefinitionDocumentType ServiceDefinitionDocumentType => ServiceDefinitionDocumentType.ARM;
/// <summary>
/// When to apply the validation rule, before or after it has been merged as a part of
/// its merged document as specified in the corresponding '.md' file
/// By default consider all rules to be applied for After only
/// </summary>
public virtual ServiceDefinitionDocumentState ValidationRuleMergeState => ServiceDefinitionDocumentState.Composed;
/// <summary>
/// Violation category of the Rule.
/// </summary>
public abstract ValidationCategory ValidationCategory { get; }
/// <summary>
/// What kind of change implementing this rule can cause.
/// </summary>
public virtual ValidationChangesImpact ValidationChangesImpact => ValidationChangesImpact.None;
/// <summary>
/// The template message for this Rule.
/// </summary>
/// <remarks>
/// This may contain placeholders '{0}' for parameterized messages.
/// </remarks>
public abstract string MessageTemplate { get; }
/// <summary>
/// The severity of this message (ie, debug/info/warning/error/fatal, etc)
/// </summary>
public abstract Category Severity { get; }
/// <summary>
/// Returns the validation messages resulting from validating this object
/// </summary>
/// <param name="entity">The object to validate</param>
/// <returns></returns>
public abstract IEnumerable<ValidationMessage> GetValidationMessages(object entity, RuleContext context);
}
}
| 1 | 25,104 | any reason to not have defaults here like you had before? (ARM)? | Azure-autorest | java |
@@ -1,4 +1,3 @@
-<% page_info = paginate_params(@response) %>
<%= tag :meta, :name => "totalResults", :content => @response.total %>
-<%= tag :meta, :name => "startIndex", :content => (page_info.current_page == 1 ? 1 : @response.start ) %>
-<%= tag :meta, :name => "itemsPerPage", :content => page_info.limit_value %>
+<%= tag :meta, :name => "startIndex", :content => @response.start %>
+<%= tag :meta, :name => "itemsPerPage", :content => @response.limit_value %> | 1 | <% page_info = paginate_params(@response) %>
<%= tag :meta, :name => "totalResults", :content => @response.total %>
<%= tag :meta, :name => "startIndex", :content => (page_info.current_page == 1 ? 1 : @response.start ) %>
<%= tag :meta, :name => "itemsPerPage", :content => page_info.limit_value %>
| 1 | 4,602 | This is a change. The old code was just wrong before, but now here (and one other machine-readable place) we expose the start index as 0 for the first item. | projectblacklight-blacklight | rb |
@@ -178,11 +178,11 @@ namespace RDKit {
};
} // end of SLNParse namespace
- RWMol *SLNToMol(std::string sln,bool sanitize,int debugParse){
+ RWMol *SLNToMol(const std::string &sln,bool sanitize,int debugParse){
// FIX: figure out how to reset lexer state
yysln_debug = debugParse;
// strip any leading/trailing whitespace:
- boost::trim_if(sln,boost::is_any_of(" \t\r\n"));
+ //boost::trim_if(sln,boost::is_any_of(" \t\r\n"));
RWMol *res = SLNParse::toMol(sln,false,debugParse);
if(res){ | 1 | // $Id$
//
// Copyright (c) 2008, Novartis Institutes for BioMedical Research Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Novartis Institutes for BioMedical Research Inc.
// nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Created by Greg Landrum, Sept. 2006
//
#include <GraphMol/RDKitBase.h>
#include <GraphMol/RDKitQueries.h>
#include <GraphMol/SLNParse/SLNParse.h>
#include <GraphMol/SLNParse/SLNAttribs.h>
#include <RDGeneral/RDLog.h>
#include <RDGeneral/Invariant.h>
#include <boost/algorithm/string.hpp>
#include <boost/regex.hpp>
int yysln_parse (const char *,std::vector<RDKit::RWMol *>*,bool,void *);
int yysln_lex_init (void **);
void yysln_set_extra (void *,void *);
int yysln_lex_destroy (void *);
void setup_sln_string(const std::string &text,void *);
extern int yysln_debug;
int sln_parse(const std::string &inp,
bool doQueries,
std::vector<RDKit::RWMol *> &molVect){
void *scanner;
TEST_ASSERT(!yysln_lex_init(&scanner));
setup_sln_string(inp,scanner);
yysln_set_extra((void *)doQueries,scanner);
int res=yysln_parse(inp.c_str(),&molVect,doQueries,scanner);
yysln_lex_destroy(scanner);
return res;
}
namespace RDKit {
namespace SLNParse {
std::vector<RDKit::RWMol *> molList_g;
void finalizeQueryMol(ROMol *mol,bool mergeHs){
PRECONDITION(mol,"bad query molecule");
// do we need to remove the Hs from the molecule?
if(mergeHs){
for(ROMol::AtomIterator atomIt=mol->beginAtoms();
atomIt!=mol->endAtoms();++atomIt){
// set a query for the H count:
if((*atomIt)->getNumExplicitHs()){
(*atomIt)->expandQuery(makeAtomHCountQuery((*atomIt)->getNumExplicitHs()));
}
}
}
// we don't want to sanitize, but we do need to get
// some ring info:
VECT_INT_VECT sssr;
MolOps::symmetrizeSSSR(*mol,sssr);
int rootIdx=-1;
for(ROMol::AtomIterator atomIt=mol->beginAtoms();
atomIt!=mol->endAtoms();++atomIt){
SLNParse::parseFinalAtomAttribs(*atomIt,true);
if((*atomIt)->hasProp(common_properties::_starred)){
if(rootIdx>-1){
BOOST_LOG(rdErrorLog)<<"SLN Error: mulitple starred atoms in a recursive query. Extra stars ignored" << std::endl;
} else {
rootIdx=(*atomIt)->getIdx();
}
}
}
if(rootIdx>-1){
mol->setProp(common_properties::_queryRootAtom,rootIdx);
}
}
std::string replaceSLNMacroAtoms(std::string inp,int debugParse){
const boost::regex defn("\\{(.+?):(.+?)\\}");
const char *empty="";
std::string res;
// remove any macro definitions:
res=boost::regex_replace(inp,defn,empty,boost::match_default|boost::format_all);
if(res!=inp){
// there are macro definitions, we're going to replace
// the macro atoms in the input:
std::string::const_iterator start, end;
start=inp.begin();
end=inp.end();
boost::match_results<std::string::const_iterator> what;
boost::match_flag_type flags = boost::match_default;
while(regex_search(start, end, what, defn, flags)){
std::string macroNm(what[1].first,what[1].second);
std::string macroVal(what[2].first,what[2].second);
res = boost::regex_replace(res,boost::regex(macroNm),macroVal.c_str(),
boost::match_default|boost::format_all);
// update search position:
start = what[0].second;
// update flags:
flags |= boost::match_prev_avail;
flags |= boost::match_not_bob;
}
}
return res;
}
RWMol *toMol(std::string inp,bool doQueries,int debugParse){
RWMol *res;
inp = replaceSLNMacroAtoms(inp,debugParse);
if(debugParse){
std::cerr<<"****** PARSING SLN: ->"<<inp<<"<-"<<std::endl;
}
std::vector<RDKit::RWMol *> molVect;
try {
sln_parse(inp,doQueries,molVect);
if(molVect.size()<=0){
res = 0;
} else {
res = molVect[0];
molVect[0]=0;
for(ROMol::BOND_BOOKMARK_MAP::const_iterator bmIt=res->getBondBookmarks()->begin();
bmIt != res->getBondBookmarks()->end();++bmIt){
if(bmIt->first>0 && bmIt->first<static_cast<int>(res->getNumAtoms())){
std::stringstream err;
err << "SLN Parser error: Ring closure " << bmIt->first << " does not have a corresponding opener.";
throw SLNParseException(err.str());
}
}
}
} catch (SLNParseException &e) {
BOOST_LOG(rdErrorLog) << e.message() << std::endl;
res = 0;
}
if(res){
// cleanup:
res->clearAllAtomBookmarks();
res->clearAllBondBookmarks();
// set up the chirality flags as soon as the molecule is finished
// since we'll be removing Hs later and that will break things:
adjustAtomChiralities(res);
}
for(std::vector<RDKit::RWMol *>::iterator iter=molVect.begin();
iter!=molVect.end();++iter){
if(*iter) delete *iter;
}
return res;
};
} // end of SLNParse namespace
RWMol *SLNToMol(std::string sln,bool sanitize,int debugParse){
// FIX: figure out how to reset lexer state
yysln_debug = debugParse;
// strip any leading/trailing whitespace:
boost::trim_if(sln,boost::is_any_of(" \t\r\n"));
RWMol *res = SLNParse::toMol(sln,false,debugParse);
if(res){
for(ROMol::AtomIterator atomIt=res->beginAtoms();
atomIt!=res->endAtoms();++atomIt){
SLNParse::parseFinalAtomAttribs(*atomIt,false);
}
if(sanitize){
// we're going to remove explicit Hs from the graph,
// this triggers a sanitization, so we do not need to
// worry about doing one here:
try{
MolOps::removeHs(*res,false,false);
} catch (...) {
delete res;
throw;
}
}
}
return res;
};
RWMol *SLNQueryToMol(std::string sln,bool mergeHs,int debugParse){
yysln_debug = debugParse;
// strip any leading/trailing whitespace:
boost::trim_if(sln,boost::is_any_of(" \t\r\n"));
RWMol *res = SLNParse::toMol(sln,true,debugParse);
if(res){
SLNParse::finalizeQueryMol(res,mergeHs);
}
return res;
};
}
| 1 | 14,181 | This changes the behavior of the parser, right? Any particular reason to do this aside from the fact that you have to since the function takes a const? | rdkit-rdkit | cpp |
@@ -17,5 +17,16 @@ function Portal(props) {
* @param {import('./internal').PreactElement} container The DOM node to continue rendering in to.
*/
export function createPortal(vnode, container) {
+ // Note: We can't use Fragment here because a component that returned a Portal
+ // (e.g. `const App = () => createPortal(...)`) wouldn't work. Our diff
+ // collapses Fragments without keys that are returned directly from components
+ // into just an array and sets that as the children array of the component.
+ //
+ // We also can't use keyed Fragments here cuz it might lead to weird edge
+ // cases when toggling between two sibling portals if we use a shared keyed or
+ // lead to unnecessary re-mounts if trying to generate a new key on each call.
+ //
+ // So the simplest solution seems to be just to use an unique type for Portal
+ // to skip the Fragment collapsing logic when diffing components
return createElement(Portal, { _parentDom: container }, vnode);
} | 1 | import { createElement } from 'preact';
/**
* Portal component
* @this {import('./internal').Component}
* @param {object | null | undefined} props
*
* TODO: use createRoot() instead of fake root
*/
function Portal(props) {
return props.children;
}
/**
* Create a `Portal` to continue rendering the vnode tree at a different DOM node
* @param {import('./internal').VNode} vnode The vnode to render
* @param {import('./internal').PreactElement} container The DOM node to continue rendering in to.
*/
export function createPortal(vnode, container) {
return createElement(Portal, { _parentDom: container }, vnode);
}
| 1 | 16,850 | Given how simple the Portal implementation is, I wonder if we should move it core... Though we'd have to export it which would be more bytes we can't crawl back... | preactjs-preact | js |
@@ -348,6 +348,7 @@ Schema.prototype.clone = function() {
s.callQueue = this.callQueue.map(function(f) { return f; });
s.methods = utils.clone(this.methods);
s.statics = utils.clone(this.statics);
+ s.s.hooks = utils.clone(this.s.hooks);
return s;
};
| 1 | /*!
* Module dependencies.
*/
var readPref = require('./drivers').ReadPreference;
var EventEmitter = require('events').EventEmitter;
var VirtualType = require('./virtualtype');
var utils = require('./utils');
var MongooseTypes;
var Kareem = require('kareem');
var each = require('async/each');
var SchemaType = require('./schematype');
var IS_KAREEM_HOOK = {
count: true,
find: true,
findOne: true,
findOneAndUpdate: true,
findOneAndRemove: true,
insertMany: true,
update: true,
updateMany: true,
updateOne: true
};
/**
* Schema constructor.
*
* ####Example:
*
* var child = new Schema({ name: String });
* var schema = new Schema({ name: String, age: Number, children: [child] });
* var Tree = mongoose.model('Tree', schema);
*
* // setting schema options
* new Schema({ name: String }, { _id: false, autoIndex: false })
*
* ####Options:
*
* - [autoIndex](/docs/guide.html#autoIndex): bool - defaults to null (which means use the connection's autoIndex option)
* - [bufferCommands](/docs/guide.html#bufferCommands): bool - defaults to true
* - [capped](/docs/guide.html#capped): bool - defaults to false
* - [collection](/docs/guide.html#collection): string - no default
* - [emitIndexErrors](/docs/guide.html#emitIndexErrors): bool - defaults to false.
* - [id](/docs/guide.html#id): bool - defaults to true
* - [_id](/docs/guide.html#_id): bool - defaults to true
* - `minimize`: bool - controls [document#toObject](#document_Document-toObject) behavior when called manually - defaults to true
* - [read](/docs/guide.html#read): string
* - [safe](/docs/guide.html#safe): bool - defaults to true.
* - [shardKey](/docs/guide.html#shardKey): bool - defaults to `null`
* - [strict](/docs/guide.html#strict): bool - defaults to true
* - [toJSON](/docs/guide.html#toJSON) - object - no default
* - [toObject](/docs/guide.html#toObject) - object - no default
* - [typeKey](/docs/guide.html#typeKey) - string - defaults to 'type'
* - [useNestedStrict](/docs/guide.html#useNestedStrict) - boolean - defaults to false
* - [validateBeforeSave](/docs/guide.html#validateBeforeSave) - bool - defaults to `true`
* - [versionKey](/docs/guide.html#versionKey): string - defaults to "__v"
*
* ####Note:
*
* _When nesting schemas, (`children` in the example above), always declare the child schema first before passing it into its parent._
*
* @param {Object} definition
* @param {Object} [options]
* @inherits NodeJS EventEmitter http://nodejs.org/api/events.html#events_class_events_eventemitter
* @event `init`: Emitted after the schema is compiled into a `Model`.
* @api public
*/
function Schema(obj, options) {
if (!(this instanceof Schema)) {
return new Schema(obj, options);
}
this.obj = obj;
this.paths = {};
this.subpaths = {};
this.virtuals = {};
this.singleNestedPaths = {};
this.nested = {};
this.inherits = {};
this.callQueue = [];
this._indexes = [];
this.methods = {};
this.statics = {};
this.tree = {};
this.query = {};
this.childSchemas = [];
this.s = {
hooks: new Kareem(),
kareemHooks: IS_KAREEM_HOOK
};
this.options = this.defaultOptions(options);
// build paths
if (obj) {
this.add(obj);
}
// check if _id's value is a subdocument (gh-2276)
var _idSubDoc = obj && obj._id && utils.isObject(obj._id);
// ensure the documents get an auto _id unless disabled
var auto_id = !this.paths['_id'] &&
(!this.options.noId && this.options._id) && !_idSubDoc;
if (auto_id) {
obj = {_id: {auto: true}};
obj._id[this.options.typeKey] = Schema.ObjectId;
this.add(obj);
}
// ensure the documents receive an id getter unless disabled
var autoid = !this.paths['id'] &&
(!this.options.noVirtualId && this.options.id);
if (autoid) {
this.virtual('id').get(idGetter);
}
for (var i = 0; i < this._defaultMiddleware.length; ++i) {
var m = this._defaultMiddleware[i];
this[m.kind](m.hook, !!m.isAsync, m.fn);
}
if (this.options.timestamps) {
this.setupTimestamp(this.options.timestamps);
}
}
/*!
* Returns this documents _id cast to a string.
*/
function idGetter() {
if (this.$__._id) {
return this.$__._id;
}
this.$__._id = this._id == null
? null
: String(this._id);
return this.$__._id;
}
/*!
* Inherit from EventEmitter.
*/
Schema.prototype = Object.create(EventEmitter.prototype);
Schema.prototype.constructor = Schema;
Schema.prototype.instanceOfSchema = true;
/**
* Default middleware attached to a schema. Cannot be changed.
*
* This field is used to make sure discriminators don't get multiple copies of
* built-in middleware. Declared as a constant because changing this at runtime
* may lead to instability with Model.prototype.discriminator().
*
* @api private
* @property _defaultMiddleware
*/
Object.defineProperty(Schema.prototype, '_defaultMiddleware', {
configurable: false,
enumerable: false,
writable: false,
value: [
{
kind: 'pre',
hook: 'save',
fn: function(next, options) {
var _this = this;
// Nested docs have their own presave
if (this.ownerDocument) {
return next();
}
var hasValidateBeforeSaveOption = options &&
(typeof options === 'object') &&
('validateBeforeSave' in options);
var shouldValidate;
if (hasValidateBeforeSaveOption) {
shouldValidate = !!options.validateBeforeSave;
} else {
shouldValidate = this.schema.options.validateBeforeSave;
}
// Validate
if (shouldValidate) {
// HACK: use $__original_validate to avoid promises so bluebird doesn't
// complain
if (this.$__original_validate) {
this.$__original_validate({__noPromise: true}, function(error) {
return _this.schema.s.hooks.execPost('save:error', _this, [_this], { error: error }, function(error) {
next(error);
});
});
} else {
this.validate({__noPromise: true}, function(error) {
return _this.schema.s.hooks.execPost('save:error', _this, [ _this], { error: error }, function(error) {
next(error);
});
});
}
} else {
next();
}
}
},
{
kind: 'pre',
hook: 'save',
isAsync: true,
fn: function(next, done) {
var _this = this;
var subdocs = this.$__getAllSubdocs();
if (!subdocs.length || this.$__preSavingFromParent) {
done();
next();
return;
}
each(subdocs, function(subdoc, cb) {
subdoc.$__preSavingFromParent = true;
subdoc.save(function(err) {
cb(err);
});
}, function(error) {
for (var i = 0; i < subdocs.length; ++i) {
delete subdocs[i].$__preSavingFromParent;
}
if (error) {
return _this.schema.s.hooks.execPost('save:error', _this, [_this], { error: error }, function(error) {
done(error);
});
}
next();
done();
});
}
},
{
kind: 'pre',
hook: 'validate',
isAsync: true,
fn: function(next, done) {
// Hack to ensure that we always wrap validate() in a promise
next();
done();
}
},
{
kind: 'pre',
hook: 'remove',
isAsync: true,
fn: function(next, done) {
if (this.ownerDocument) {
done();
next();
return;
}
var subdocs = this.$__getAllSubdocs();
if (!subdocs.length || this.$__preSavingFromParent) {
done();
next();
return;
}
each(subdocs, function(subdoc, cb) {
subdoc.remove({ noop: true }, function(err) {
cb(err);
});
}, function(error) {
if (error) {
done(error);
return;
}
next();
done();
});
}
}
]
});
/**
* The original object passed to the schema constructor
*
* ####Example:
*
* var schema = new Schema({ a: String }).add({ b: String });
* schema.obj; // { a: String }
*
* @api public
* @property obj
*/
Schema.prototype.obj;
/**
* Schema as flat paths
*
* ####Example:
* {
* '_id' : SchemaType,
* , 'nested.key' : SchemaType,
* }
*
* @api private
* @property paths
*/
Schema.prototype.paths;
/**
* Schema as a tree
*
* ####Example:
* {
* '_id' : ObjectId
* , 'nested' : {
* 'key' : String
* }
* }
*
* @api private
* @property tree
*/
Schema.prototype.tree;
/**
* Returns a deep copy of the schema
*
* @return {Schema} the cloned schema
* @api public
*/
Schema.prototype.clone = function() {
var s = new Schema(this.obj, this.options);
// Clone the call queue
s.callQueue = this.callQueue.map(function(f) { return f; });
s.methods = utils.clone(this.methods);
s.statics = utils.clone(this.statics);
return s;
};
/**
* Returns default options for this schema, merged with `options`.
*
* @param {Object} options
* @return {Object}
* @api private
*/
Schema.prototype.defaultOptions = function(options) {
if (options && options.safe === false) {
options.safe = {w: 0};
}
if (options && options.safe && options.safe.w === 0) {
// if you turn off safe writes, then versioning goes off as well
options.versionKey = false;
}
options = utils.options({
strict: true,
bufferCommands: true,
capped: false, // { size, max, autoIndexId }
versionKey: '__v',
discriminatorKey: '__t',
minimize: true,
autoIndex: null,
shardKey: null,
read: null,
validateBeforeSave: true,
// the following are only applied at construction time
noId: false, // deprecated, use { _id: false }
_id: true,
noVirtualId: false, // deprecated, use { id: false }
id: true,
typeKey: 'type',
retainKeyOrder: false
}, options);
if (options.read) {
options.read = readPref(options.read);
}
return options;
};
/**
* Adds key path / schema type pairs to this schema.
*
* ####Example:
*
* var ToySchema = new Schema;
* ToySchema.add({ name: 'string', color: 'string', price: 'number' });
*
* @param {Object} obj
* @param {String} prefix
* @api public
*/
Schema.prototype.add = function add(obj, prefix) {
prefix = prefix || '';
var keys = Object.keys(obj);
for (var i = 0; i < keys.length; ++i) {
var key = keys[i];
if (obj[key] == null) {
throw new TypeError('Invalid value for schema path `' + prefix + key + '`');
}
if (Array.isArray(obj[key]) && obj[key].length === 1 && obj[key][0] == null) {
throw new TypeError('Invalid value for schema Array path `' + prefix + key + '`');
}
if (utils.isObject(obj[key]) &&
(!obj[key].constructor || utils.getFunctionName(obj[key].constructor) === 'Object') &&
(!obj[key][this.options.typeKey] || (this.options.typeKey === 'type' && obj[key].type.type))) {
if (Object.keys(obj[key]).length) {
// nested object { last: { name: String }}
this.nested[prefix + key] = true;
this.add(obj[key], prefix + key + '.');
} else {
if (prefix) {
this.nested[prefix.substr(0, prefix.length - 1)] = true;
}
this.path(prefix + key, obj[key]); // mixed type
}
} else {
if (prefix) {
this.nested[prefix.substr(0, prefix.length - 1)] = true;
}
this.path(prefix + key, obj[key]);
}
}
};
/**
* Reserved document keys.
*
* Keys in this object are names that are rejected in schema declarations b/c they conflict with mongoose functionality. Using these key name will throw an error.
*
* on, emit, _events, db, get, set, init, isNew, errors, schema, options, modelName, collection, _pres, _posts, toObject
*
* _NOTE:_ Use of these terms as method names is permitted, but play at your own risk, as they may be existing mongoose document methods you are stomping on.
*
* var schema = new Schema(..);
* schema.methods.init = function () {} // potentially breaking
*/
Schema.reserved = Object.create(null);
var reserved = Schema.reserved;
// Core object
reserved['prototype'] =
// EventEmitter
reserved.emit =
reserved.on =
reserved.once =
reserved.listeners =
reserved.removeListener =
// document properties and functions
reserved.collection =
reserved.db =
reserved.errors =
reserved.init =
reserved.isModified =
reserved.isNew =
reserved.get =
reserved.modelName =
reserved.save =
reserved.schema =
reserved.set =
reserved.toObject =
reserved.validate =
// hooks.js
reserved._pres = reserved._posts = 1;
/*!
* Document keys to print warnings for
*/
var warnings = {};
warnings.increment = '`increment` should not be used as a schema path name ' +
'unless you have disabled versioning.';
/**
* Gets/sets schema paths.
*
* Sets a path (if arity 2)
* Gets a path (if arity 1)
*
* ####Example
*
* schema.path('name') // returns a SchemaType
* schema.path('name', Number) // changes the schemaType of `name` to Number
*
* @param {String} path
* @param {Object} constructor
* @api public
*/
Schema.prototype.path = function(path, obj) {
if (obj === undefined) {
if (this.paths[path]) {
return this.paths[path];
}
if (this.subpaths[path]) {
return this.subpaths[path];
}
if (this.singleNestedPaths[path]) {
return this.singleNestedPaths[path];
}
// subpaths?
return /\.\d+\.?.*$/.test(path)
? getPositionalPath(this, path)
: undefined;
}
// some path names conflict with document methods
if (reserved[path]) {
throw new Error('`' + path + '` may not be used as a schema pathname');
}
if (warnings[path]) {
console.log('WARN: ' + warnings[path]);
}
// update the tree
var subpaths = path.split(/\./),
last = subpaths.pop(),
branch = this.tree;
subpaths.forEach(function(sub, i) {
if (!branch[sub]) {
branch[sub] = {};
}
if (typeof branch[sub] !== 'object') {
var msg = 'Cannot set nested path `' + path + '`. '
+ 'Parent path `'
+ subpaths.slice(0, i).concat([sub]).join('.')
+ '` already set to type ' + branch[sub].name
+ '.';
throw new Error(msg);
}
branch = branch[sub];
});
branch[last] = utils.clone(obj);
this.paths[path] = Schema.interpretAsType(path, obj, this.options);
if (this.paths[path].$isSingleNested) {
for (var key in this.paths[path].schema.paths) {
this.singleNestedPaths[path + '.' + key] =
this.paths[path].schema.paths[key];
}
for (key in this.paths[path].schema.singleNestedPaths) {
this.singleNestedPaths[path + '.' + key] =
this.paths[path].schema.singleNestedPaths[key];
}
this.childSchemas.push(this.paths[path].schema);
} else if (this.paths[path].$isMongooseDocumentArray) {
this.childSchemas.push(this.paths[path].schema);
}
return this;
};
/**
* Converts type arguments into Mongoose Types.
*
* @param {String} path
* @param {Object} obj constructor
* @api private
*/
Schema.interpretAsType = function(path, obj, options) {
if (obj.constructor) {
var constructorName = utils.getFunctionName(obj.constructor);
if (constructorName !== 'Object') {
var oldObj = obj;
obj = {};
obj[options.typeKey] = oldObj;
}
}
// Get the type making sure to allow keys named "type"
// and default to mixed if not specified.
// { type: { type: String, default: 'freshcut' } }
var type = obj[options.typeKey] && (options.typeKey !== 'type' || !obj.type.type)
? obj[options.typeKey]
: {};
if (utils.getFunctionName(type.constructor) === 'Object' || type === 'mixed') {
return new MongooseTypes.Mixed(path, obj);
}
if (Array.isArray(type) || Array === type || type === 'array') {
// if it was specified through { type } look for `cast`
var cast = (Array === type || type === 'array')
? obj.cast
: type[0];
if (cast && cast.instanceOfSchema) {
return new MongooseTypes.DocumentArray(path, cast, obj);
}
if (Array.isArray(cast)) {
return new MongooseTypes.Array(path, Schema.interpretAsType(path, cast, options), obj);
}
if (typeof cast === 'string') {
cast = MongooseTypes[cast.charAt(0).toUpperCase() + cast.substring(1)];
} else if (cast && (!cast[options.typeKey] || (options.typeKey === 'type' && cast.type.type))
&& utils.getFunctionName(cast.constructor) === 'Object') {
if (Object.keys(cast).length) {
// The `minimize` and `typeKey` options propagate to child schemas
// declared inline, like `{ arr: [{ val: { $type: String } }] }`.
// See gh-3560
var childSchemaOptions = {minimize: options.minimize};
if (options.typeKey) {
childSchemaOptions.typeKey = options.typeKey;
}
//propagate 'strict' option to child schema
if (options.hasOwnProperty('strict')) {
childSchemaOptions.strict = options.strict;
}
var childSchema = new Schema(cast, childSchemaOptions);
childSchema.$implicitlyCreated = true;
return new MongooseTypes.DocumentArray(path, childSchema, obj);
} else {
// Special case: empty object becomes mixed
return new MongooseTypes.Array(path, MongooseTypes.Mixed, obj);
}
}
if (cast) {
type = cast[options.typeKey] && (options.typeKey !== 'type' || !cast.type.type)
? cast[options.typeKey]
: cast;
name = typeof type === 'string'
? type
: type.schemaName || utils.getFunctionName(type);
if (!(name in MongooseTypes)) {
throw new TypeError('Undefined type `' + name + '` at array `' + path +
'`');
}
}
return new MongooseTypes.Array(path, cast || MongooseTypes.Mixed, obj, options);
}
if (type && type.instanceOfSchema) {
return new MongooseTypes.Embedded(type, path, obj);
}
var name;
if (Buffer.isBuffer(type)) {
name = 'Buffer';
} else {
name = typeof type === 'string'
? type
// If not string, `type` is a function. Outside of IE, function.name
// gives you the function name. In IE, you need to compute it
: type.schemaName || utils.getFunctionName(type);
}
if (name) {
name = name.charAt(0).toUpperCase() + name.substring(1);
}
if (undefined == MongooseTypes[name]) {
throw new TypeError('Undefined type `' + name + '` at `' + path +
'`\n Did you try nesting Schemas? ' +
'You can only nest using refs or arrays.');
}
return new MongooseTypes[name](path, obj);
};
/**
* Iterates the schemas paths similar to Array#forEach.
*
* The callback is passed the pathname and schemaType as arguments on each iteration.
*
* @param {Function} fn callback function
* @return {Schema} this
* @api public
*/
Schema.prototype.eachPath = function(fn) {
var keys = Object.keys(this.paths),
len = keys.length;
for (var i = 0; i < len; ++i) {
fn(keys[i], this.paths[keys[i]]);
}
return this;
};
/**
* Returns an Array of path strings that are required by this schema.
*
* @api public
* @param {Boolean} invalidate refresh the cache
* @return {Array}
*/
Schema.prototype.requiredPaths = function requiredPaths(invalidate) {
if (this._requiredpaths && !invalidate) {
return this._requiredpaths;
}
var paths = Object.keys(this.paths),
i = paths.length,
ret = [];
while (i--) {
var path = paths[i];
if (this.paths[path].isRequired) {
ret.push(path);
}
}
this._requiredpaths = ret;
return this._requiredpaths;
};
/**
* Returns indexes from fields and schema-level indexes (cached).
*
* @api private
* @return {Array}
*/
Schema.prototype.indexedPaths = function indexedPaths() {
if (this._indexedpaths) {
return this._indexedpaths;
}
this._indexedpaths = this.indexes();
return this._indexedpaths;
};
/**
* Returns the pathType of `path` for this schema.
*
* Given a path, returns whether it is a real, virtual, nested, or ad-hoc/undefined path.
*
* @param {String} path
* @return {String}
* @api public
*/
Schema.prototype.pathType = function(path) {
if (path in this.paths) {
return 'real';
}
if (path in this.virtuals) {
return 'virtual';
}
if (path in this.nested) {
return 'nested';
}
if (path in this.subpaths) {
return 'real';
}
if (path in this.singleNestedPaths) {
return 'real';
}
if (/\.\d+\.|\.\d+$/.test(path)) {
return getPositionalPathType(this, path);
}
return 'adhocOrUndefined';
};
/**
* Returns true iff this path is a child of a mixed schema.
*
* @param {String} path
* @return {Boolean}
* @api private
*/
Schema.prototype.hasMixedParent = function(path) {
var subpaths = path.split(/\./g);
path = '';
for (var i = 0; i < subpaths.length; ++i) {
path = i > 0 ? path + '.' + subpaths[i] : subpaths[i];
if (path in this.paths &&
this.paths[path] instanceof MongooseTypes.Mixed) {
return true;
}
}
return false;
};
/**
* Setup updatedAt and createdAt timestamps to documents if enabled
*
* @param {Boolean|Object} timestamps timestamps options
* @api private
*/
Schema.prototype.setupTimestamp = function(timestamps) {
if (timestamps) {
var createdAt = timestamps.createdAt || 'createdAt';
var updatedAt = timestamps.updatedAt || 'updatedAt';
var schemaAdditions = {};
var parts = createdAt.split('.');
var i;
var cur = schemaAdditions;
for (i = 0; i < parts.length; ++i) {
cur[parts[i]] = (i < parts.length - 1 ?
cur[parts[i]] || {} :
Date);
}
parts = updatedAt.split('.');
cur = schemaAdditions;
for (i = 0; i < parts.length; ++i) {
cur[parts[i]] = (i < parts.length - 1 ?
cur[parts[i]] || {} :
Date);
}
this.add(schemaAdditions);
this.pre('save', function(next) {
var defaultTimestamp = new Date();
var auto_id = this._id && this._id.auto;
if (!this.get(createdAt) && this.isSelected(createdAt)) {
this.set(createdAt, auto_id ? this._id.getTimestamp() : defaultTimestamp);
}
if (this.isNew || this.isModified()) {
this.set(updatedAt, this.isNew ? this.get(createdAt) : defaultTimestamp);
}
next();
});
var genUpdates = function(currentUpdate, overwrite) {
var now = new Date();
var updates = {};
if (overwrite) {
if (!currentUpdate[updatedAt]) {
updates[updatedAt] = now;
}
if (!currentUpdate[createdAt]) {
updates[createdAt] = now;
}
return updates;
}
updates = { $set: {} };
currentUpdate = currentUpdate || {};
updates.$set[updatedAt] = now;
if (currentUpdate[createdAt]) {
delete currentUpdate[createdAt];
}
if (currentUpdate.$set && currentUpdate.$set[createdAt]) {
delete currentUpdate.$set[createdAt];
}
updates.$setOnInsert = {};
updates.$setOnInsert[createdAt] = now;
return updates;
};
this.methods.initializeTimestamps = function() {
if (!this.get(createdAt)) {
this.set(createdAt, new Date());
}
if (!this.get(updatedAt)) {
this.set(updatedAt, new Date());
}
return this;
};
this.pre('findOneAndUpdate', function(next) {
var overwrite = this.options.overwrite;
this.findOneAndUpdate({}, genUpdates(this.getUpdate(), overwrite), {
overwrite: overwrite
});
applyTimestampsToChildren(this);
next();
});
this.pre('update', function(next) {
var overwrite = this.options.overwrite;
this.update({}, genUpdates(this.getUpdate(), overwrite), {
overwrite: overwrite
});
applyTimestampsToChildren(this);
next();
});
}
};
/*!
* ignore
*/
function applyTimestampsToChildren(query) {
var now = new Date();
var update = query.getUpdate();
var keys = Object.keys(update);
var key;
var schema = query.model.schema;
var len;
var createdAt;
var updatedAt;
var timestamps;
var path;
var hasDollarKey = keys.length && keys[0].charAt(0) === '$';
if (hasDollarKey) {
if (update.$push) {
for (key in update.$push) {
var $path = schema.path(key);
if (update.$push[key] &&
$path &&
$path.$isMongooseDocumentArray &&
$path.schema.options.timestamps) {
timestamps = $path.schema.options.timestamps;
createdAt = timestamps.createdAt || 'createdAt';
updatedAt = timestamps.updatedAt || 'updatedAt';
if (update.$push[key].$each) {
update.$push[key].$each.forEach(function(subdoc) {
subdoc[updatedAt] = now;
subdoc[createdAt] = now;
});
} else {
update.$push[key][updatedAt] = now;
update.$push[key][createdAt] = now;
}
}
}
}
if (update.$set) {
for (key in update.$set) {
path = schema.path(key);
if (!path) {
continue;
}
if (Array.isArray(update.$set[key]) && path.$isMongooseDocumentArray) {
len = update.$set[key].length;
timestamps = schema.path(key).schema.options.timestamps;
if (timestamps) {
createdAt = timestamps.createdAt || 'createdAt';
updatedAt = timestamps.updatedAt || 'updatedAt';
for (var i = 0; i < len; ++i) {
update.$set[key][i][updatedAt] = now;
update.$set[key][i][createdAt] = now;
}
}
} else if (update.$set[key] && path.$isSingleNested) {
timestamps = schema.path(key).schema.options.timestamps;
if (timestamps) {
createdAt = timestamps.createdAt || 'createdAt';
updatedAt = timestamps.updatedAt || 'updatedAt';
update.$set[key][updatedAt] = now;
update.$set[key][createdAt] = now;
}
}
}
}
}
}
/*!
* ignore
*/
function getPositionalPathType(self, path) {
var subpaths = path.split(/\.(\d+)\.|\.(\d+)$/).filter(Boolean);
if (subpaths.length < 2) {
return self.paths[subpaths[0]];
}
var val = self.path(subpaths[0]);
var isNested = false;
if (!val) {
return val;
}
var last = subpaths.length - 1,
subpath,
i = 1;
for (; i < subpaths.length; ++i) {
isNested = false;
subpath = subpaths[i];
if (i === last && val && !/\D/.test(subpath)) {
if (val.$isMongooseDocumentArray) {
var oldVal = val;
val = new SchemaType(subpath);
val.cast = function(value, doc, init) {
return oldVal.cast(value, doc, init)[0];
};
val.caster = oldVal.caster;
val.schema = oldVal.schema;
} else if (val instanceof MongooseTypes.Array) {
// StringSchema, NumberSchema, etc
val = val.caster;
} else {
val = undefined;
}
break;
}
// ignore if its just a position segment: path.0.subpath
if (!/\D/.test(subpath)) {
continue;
}
if (!(val && val.schema)) {
val = undefined;
break;
}
var type = val.schema.pathType(subpath);
isNested = (type === 'nested');
val = val.schema.path(subpath);
}
self.subpaths[path] = val;
if (val) {
return 'real';
}
if (isNested) {
return 'nested';
}
return 'adhocOrUndefined';
}
/*!
* ignore
*/
function getPositionalPath(self, path) {
getPositionalPathType(self, path);
return self.subpaths[path];
}
/**
* Adds a method call to the queue.
*
* @param {String} name name of the document method to call later
* @param {Array} args arguments to pass to the method
* @api public
*/
Schema.prototype.queue = function(name, args) {
this.callQueue.push([name, args]);
return this;
};
/**
* Defines a pre hook for the document.
*
* ####Example
*
* var toySchema = new Schema(..);
*
* toySchema.pre('save', function (next) {
* if (!this.created) this.created = new Date;
* next();
* })
*
* toySchema.pre('validate', function (next) {
* if (this.name !== 'Woody') this.name = 'Woody';
* next();
* })
*
* @param {String} method
* @param {Function} callback
* @see hooks.js https://github.com/bnoguchi/hooks-js/tree/31ec571cef0332e21121ee7157e0cf9728572cc3
* @api public
*/
Schema.prototype.pre = function() {
var name = arguments[0];
if (IS_KAREEM_HOOK[name]) {
this.s.hooks.pre.apply(this.s.hooks, arguments);
return this;
}
return this.queue('pre', arguments);
};
/**
* Defines a post hook for the document
*
* var schema = new Schema(..);
* schema.post('save', function (doc) {
* console.log('this fired after a document was saved');
* });
*
* shema.post('find', function(docs) {
* console.log('this fired after you run a find query');
* });
*
* var Model = mongoose.model('Model', schema);
*
* var m = new Model(..);
* m.save(function(err) {
* console.log('this fires after the `post` hook');
* });
*
* m.find(function(err, docs) {
* console.log('this fires after the post find hook');
* });
*
* @param {String} method name of the method to hook
* @param {Function} fn callback
* @see middleware http://mongoosejs.com/docs/middleware.html
* @see hooks.js https://www.npmjs.com/package/hooks-fixed
* @see kareem http://npmjs.org/package/kareem
* @api public
*/
Schema.prototype.post = function(method, fn) {
if (IS_KAREEM_HOOK[method]) {
this.s.hooks.post.apply(this.s.hooks, arguments);
return this;
}
// assuming that all callbacks with arity < 2 are synchronous post hooks
if (fn.length < 2) {
return this.queue('on', [arguments[0], function(doc) {
return fn.call(doc, doc);
}]);
}
if (fn.length === 3) {
this.s.hooks.post(method + ':error', fn);
return this;
}
return this.queue('post', [arguments[0], function(next) {
// wrap original function so that the callback goes last,
// for compatibility with old code that is using synchronous post hooks
var _this = this;
var args = Array.prototype.slice.call(arguments, 1);
fn.call(this, this, function(err) {
return next.apply(_this, [err].concat(args));
});
}]);
};
/**
* Registers a plugin for this schema.
*
* @param {Function} plugin callback
* @param {Object} [opts]
* @see plugins
* @api public
*/
Schema.prototype.plugin = function(fn, opts) {
fn(this, opts);
return this;
};
/**
* Adds an instance method to documents constructed from Models compiled from this schema.
*
* ####Example
*
* var schema = kittySchema = new Schema(..);
*
* schema.method('meow', function () {
* console.log('meeeeeoooooooooooow');
* })
*
* var Kitty = mongoose.model('Kitty', schema);
*
* var fizz = new Kitty;
* fizz.meow(); // meeeeeooooooooooooow
*
* If a hash of name/fn pairs is passed as the only argument, each name/fn pair will be added as methods.
*
* schema.method({
* purr: function () {}
* , scratch: function () {}
* });
*
* // later
* fizz.purr();
* fizz.scratch();
*
* @param {String|Object} method name
* @param {Function} [fn]
* @api public
*/
Schema.prototype.method = function(name, fn) {
if (typeof name !== 'string') {
for (var i in name) {
this.methods[i] = name[i];
}
} else {
this.methods[name] = fn;
}
return this;
};
/**
* Adds static "class" methods to Models compiled from this schema.
*
* ####Example
*
* var schema = new Schema(..);
* schema.static('findByName', function (name, callback) {
* return this.find({ name: name }, callback);
* });
*
* var Drink = mongoose.model('Drink', schema);
* Drink.findByName('sanpellegrino', function (err, drinks) {
* //
* });
*
* If a hash of name/fn pairs is passed as the only argument, each name/fn pair will be added as statics.
*
* @param {String|Object} name
* @param {Function} [fn]
* @api public
*/
Schema.prototype.static = function(name, fn) {
if (typeof name !== 'string') {
for (var i in name) {
this.statics[i] = name[i];
}
} else {
this.statics[name] = fn;
}
return this;
};
/**
* Defines an index (most likely compound) for this schema.
*
* ####Example
*
* schema.index({ first: 1, last: -1 })
*
* @param {Object} fields
* @param {Object} [options] Options to pass to [MongoDB driver's `createIndex()` function](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#createIndex)
* @param {String} [options.expires=null] Mongoose-specific syntactic sugar, uses [ms](https://www.npmjs.com/package/ms) to convert `expires` option into seconds for the `expireAfterSeconds` in the above link.
* @api public
*/
Schema.prototype.index = function(fields, options) {
options || (options = {});
if (options.expires) {
utils.expires(options);
}
this._indexes.push([fields, options]);
return this;
};
/**
* Sets/gets a schema option.
*
* ####Example
*
* schema.set('strict'); // 'true' by default
* schema.set('strict', false); // Sets 'strict' to false
* schema.set('strict'); // 'false'
*
* @param {String} key option name
* @param {Object} [value] if not passed, the current option value is returned
* @see Schema ./
* @api public
*/
Schema.prototype.set = function(key, value, _tags) {
if (arguments.length === 1) {
return this.options[key];
}
switch (key) {
case 'read':
this.options[key] = readPref(value, _tags);
break;
case 'safe':
this.options[key] = value === false
? {w: 0}
: value;
break;
case 'timestamps':
this.setupTimestamp(value);
this.options[key] = value;
break;
default:
this.options[key] = value;
}
return this;
};
/**
* Gets a schema option.
*
* @param {String} key option name
* @api public
*/
Schema.prototype.get = function(key) {
return this.options[key];
};
/**
* The allowed index types
*
* @static indexTypes
* @receiver Schema
* @api public
*/
var indexTypes = '2d 2dsphere hashed text'.split(' ');
Object.defineProperty(Schema, 'indexTypes', {
get: function() {
return indexTypes;
},
set: function() {
throw new Error('Cannot overwrite Schema.indexTypes');
}
});
/**
* Compiles indexes from fields and schema-level indexes
*
* @api public
*/
Schema.prototype.indexes = function() {
'use strict';
var indexes = [];
var seenPrefix = {};
var collectIndexes = function(schema, prefix) {
if (seenPrefix[prefix]) {
return;
}
seenPrefix[prefix] = true;
prefix = prefix || '';
var key, path, index, field, isObject, options, type;
var keys = Object.keys(schema.paths);
for (var i = 0; i < keys.length; ++i) {
key = keys[i];
path = schema.paths[key];
if ((path instanceof MongooseTypes.DocumentArray) || path.$isSingleNested) {
collectIndexes(path.schema, key + '.');
} else {
index = path._index;
if (index !== false && index !== null && index !== undefined) {
field = {};
isObject = utils.isObject(index);
options = isObject ? index : {};
type = typeof index === 'string' ? index :
isObject ? index.type :
false;
if (type && ~Schema.indexTypes.indexOf(type)) {
field[prefix + key] = type;
} else if (options.text) {
field[prefix + key] = 'text';
delete options.text;
} else {
field[prefix + key] = 1;
}
delete options.type;
if (!('background' in options)) {
options.background = true;
}
indexes.push([field, options]);
}
}
}
if (prefix) {
fixSubIndexPaths(schema, prefix);
} else {
schema._indexes.forEach(function(index) {
if (!('background' in index[1])) {
index[1].background = true;
}
});
indexes = indexes.concat(schema._indexes);
}
};
collectIndexes(this);
return indexes;
/*!
* Checks for indexes added to subdocs using Schema.index().
* These indexes need their paths prefixed properly.
*
* schema._indexes = [ [indexObj, options], [indexObj, options] ..]
*/
function fixSubIndexPaths(schema, prefix) {
var subindexes = schema._indexes,
len = subindexes.length,
indexObj,
newindex,
klen,
keys,
key,
i = 0,
j;
for (i = 0; i < len; ++i) {
indexObj = subindexes[i][0];
keys = Object.keys(indexObj);
klen = keys.length;
newindex = {};
// use forward iteration, order matters
for (j = 0; j < klen; ++j) {
key = keys[j];
newindex[prefix + key] = indexObj[key];
}
indexes.push([newindex, subindexes[i][1]]);
}
}
};
/**
* Creates a virtual type with the given name.
*
* @param {String} name
* @param {Object} [options]
* @return {VirtualType}
*/
Schema.prototype.virtual = function(name, options) {
if (options && options.ref) {
if (!options.localField) {
throw new Error('Reference virtuals require `localField` option');
}
if (!options.foreignField) {
throw new Error('Reference virtuals require `foreignField` option');
}
this.pre('init', function(next, obj) {
if (name in obj) {
if (!this.$$populatedVirtuals) {
this.$$populatedVirtuals = {};
}
if (options.justOne) {
this.$$populatedVirtuals[name] = Array.isArray(obj[name]) ?
obj[name][0] :
obj[name];
} else {
this.$$populatedVirtuals[name] = Array.isArray(obj[name]) ?
obj[name] :
obj[name] == null ? [] : [obj[name]];
}
delete obj[name];
}
if (this.ownerDocument) {
next();
return this;
} else {
next();
}
});
var virtual = this.virtual(name);
virtual.options = options;
return virtual.
get(function() {
if (!this.$$populatedVirtuals) {
this.$$populatedVirtuals = {};
}
if (name in this.$$populatedVirtuals) {
return this.$$populatedVirtuals[name];
}
return null;
}).
set(function(v) {
if (!this.$$populatedVirtuals) {
this.$$populatedVirtuals = {};
}
this.$$populatedVirtuals[name] = v;
});
}
var virtuals = this.virtuals;
var parts = name.split('.');
if (this.pathType(name) === 'real') {
throw new Error('Virtual path "' + name + '"' +
' conflicts with a real path in the schema');
}
virtuals[name] = parts.reduce(function(mem, part, i) {
mem[part] || (mem[part] = (i === parts.length - 1)
? new VirtualType(options, name)
: {});
return mem[part];
}, this.tree);
return virtuals[name];
};
/*!
* ignore
*/
Schema.prototype._getVirtual = function(name) {
return _getVirtual(this, name);
};
/*!
* ignore
*/
function _getVirtual(schema, name) {
if (schema.virtuals[name]) {
return schema.virtuals[name];
}
var parts = name.split('.');
var cur = '';
var nestedSchemaPath = '';
for (var i = 0; i < parts.length; ++i) {
cur += (cur.length > 0 ? '.' : '') + parts[i];
if (schema.virtuals[cur]) {
if (i === parts.length - 1) {
schema.virtuals[cur].$nestedSchemaPath = nestedSchemaPath;
return schema.virtuals[cur];
}
continue;
} else if (schema.paths[cur] && schema.paths[cur].schema) {
schema = schema.paths[cur].schema;
nestedSchemaPath += (nestedSchemaPath.length > 0 ? '.' : '') + cur;
cur = '';
} else {
return null;
}
}
}
/**
* Returns the virtual type with the given `name`.
*
* @param {String} name
* @return {VirtualType}
*/
Schema.prototype.virtualpath = function(name) {
return this.virtuals[name];
};
/**
* Removes the given `path` (or [`paths`]).
*
* @param {String|Array} path
*
* @api public
*/
Schema.prototype.remove = function(path) {
if (typeof path === 'string') {
path = [path];
}
if (Array.isArray(path)) {
path.forEach(function(name) {
if (this.path(name)) {
delete this.paths[name];
var pieces = name.split('.');
var last = pieces.pop();
var branch = this.tree;
for (var i = 0; i < pieces.length; ++i) {
branch = branch[pieces[i]];
}
delete branch[last];
}
}, this);
}
};
/**
* Loads an ES6 class into a schema. Maps setters + getters, static methods, and instance methods to schema virtuals, statics, and methods.
*
* @param {Function} model
*/
Schema.prototype.loadClass = function(model, virtualsOnly) {
if (model === Object.prototype ||
model === Function.prototype ||
model.prototype.hasOwnProperty('$isMongooseModelPrototype')) {
return this;
}
// Add static methods
if (!virtualsOnly) {
Object.getOwnPropertyNames(model).forEach(function(name) {
if (name.match(/^(length|name|prototype)$/)) {
return;
}
var method = Object.getOwnPropertyDescriptor(model, name);
if (typeof method.value === 'function') this.static(name, method.value);
}, this);
}
// Add methods and virtuals
Object.getOwnPropertyNames(model.prototype).forEach(function(name) {
if (name.match(/^(constructor)$/)) {
return;
}
var method = Object.getOwnPropertyDescriptor(model.prototype, name);
if (!virtualsOnly) {
if (typeof method.value === 'function') {
this.method(name, method.value);
}
}
if (typeof method.get === 'function') {
this.virtual(name).get(method.get);
}
if (typeof method.set === 'function') {
this.virtual(name).set(method.set);
}
}, this);
return (this.loadClass(Object.getPrototypeOf(model)));
};
/*!
* ignore
*/
Schema.prototype._getSchema = function(path) {
var _this = this;
var pathschema = _this.path(path);
var resultPath = [];
if (pathschema) {
pathschema.$fullPath = path;
return pathschema;
}
function search(parts, schema) {
var p = parts.length + 1,
foundschema,
trypath;
while (p--) {
trypath = parts.slice(0, p).join('.');
foundschema = schema.path(trypath);
if (foundschema) {
resultPath.push(trypath);
if (foundschema.caster) {
// array of Mixed?
if (foundschema.caster instanceof MongooseTypes.Mixed) {
foundschema.caster.$fullPath = resultPath.join('.');
return foundschema.caster;
}
// Now that we found the array, we need to check if there
// are remaining document paths to look up for casting.
// Also we need to handle array.$.path since schema.path
// doesn't work for that.
// If there is no foundschema.schema we are dealing with
// a path like array.$
if (p !== parts.length && foundschema.schema) {
var ret;
if (parts[p] === '$') {
if (p + 1 === parts.length) {
// comments.$
return foundschema;
}
// comments.$.comments.$.title
ret = search(parts.slice(p + 1), foundschema.schema);
if (ret) {
ret.$isUnderneathDocArray = ret.$isUnderneathDocArray ||
!foundschema.schema.$isSingleNested;
}
return ret;
}
// this is the last path of the selector
ret = search(parts.slice(p), foundschema.schema);
if (ret) {
ret.$isUnderneathDocArray = ret.$isUnderneathDocArray ||
!foundschema.schema.$isSingleNested;
}
return ret;
}
}
foundschema.$fullPath = resultPath.join('.');
return foundschema;
}
}
}
// look for arrays
return search(path.split('.'), _this);
};
/*!
* ignore
*/
Schema.prototype._getPathType = function(path) {
var _this = this;
var pathschema = _this.path(path);
if (pathschema) {
return 'real';
}
function search(parts, schema) {
var p = parts.length + 1,
foundschema,
trypath;
while (p--) {
trypath = parts.slice(0, p).join('.');
foundschema = schema.path(trypath);
if (foundschema) {
if (foundschema.caster) {
// array of Mixed?
if (foundschema.caster instanceof MongooseTypes.Mixed) {
return { schema: foundschema, pathType: 'mixed' };
}
// Now that we found the array, we need to check if there
// are remaining document paths to look up for casting.
// Also we need to handle array.$.path since schema.path
// doesn't work for that.
// If there is no foundschema.schema we are dealing with
// a path like array.$
if (p !== parts.length && foundschema.schema) {
if (parts[p] === '$') {
if (p === parts.length - 1) {
return { schema: foundschema, pathType: 'nested' };
}
// comments.$.comments.$.title
return search(parts.slice(p + 1), foundschema.schema);
}
// this is the last path of the selector
return search(parts.slice(p), foundschema.schema);
}
return {
schema: foundschema,
pathType: foundschema.$isSingleNested ? 'nested' : 'array'
};
}
return { schema: foundschema, pathType: 'real' };
} else if (p === parts.length && schema.nested[trypath]) {
return { schema: schema, pathType: 'nested' };
}
}
return { schema: foundschema || schema, pathType: 'undefined' };
}
// look for arrays
return search(path.split('.'), _this);
};
/*!
* Module exports.
*/
module.exports = exports = Schema;
// require down here because of reference issues
/**
* The various built-in Mongoose Schema Types.
*
* ####Example:
*
* var mongoose = require('mongoose');
* var ObjectId = mongoose.Schema.Types.ObjectId;
*
* ####Types:
*
* - [String](#schema-string-js)
* - [Number](#schema-number-js)
* - [Boolean](#schema-boolean-js) | Bool
* - [Array](#schema-array-js)
* - [Buffer](#schema-buffer-js)
* - [Date](#schema-date-js)
* - [ObjectId](#schema-objectid-js) | Oid
* - [Mixed](#schema-mixed-js)
*
* Using this exposed access to the `Mixed` SchemaType, we can use them in our schema.
*
* var Mixed = mongoose.Schema.Types.Mixed;
* new mongoose.Schema({ _user: Mixed })
*
* @api public
*/
Schema.Types = MongooseTypes = require('./schema/index');
/*!
* ignore
*/
exports.ObjectId = MongooseTypes.ObjectId;
| 1 | 13,441 | The more correct way of doing this is `this.s.hooks.clone()` but either way works. Thanks for finding this :+1: | Automattic-mongoose | js |
@@ -123,14 +123,13 @@ func (b *ClusterNetworkPolicySpecBuilder) GetAppliedToPeer(podSelector map[strin
func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc v1.Protocol,
port *int32, portName *string, endPort *int32, cidr *string,
podSelector map[string]string, nsSelector map[string]string,
- podSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool,
+ podSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS string,
ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1alpha1.RuleAction, ruleClusterGroup, name string) *ClusterNetworkPolicySpecBuilder {
var pSel *metav1.LabelSelector
var nSel *metav1.LabelSelector
var ns *crdv1alpha1.PeerNamespaces
var appliedTos []crdv1alpha1.NetworkPolicyPeer
- matchSelf := crdv1alpha1.NamespaceMatchSelf
if b.Spec.Ingress == nil {
b.Spec.Ingress = []crdv1alpha1.Rule{} | 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
crdv1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1"
legacysecv1alpha1 "antrea.io/antrea/pkg/legacyapis/security/v1alpha1"
)
type ClusterNetworkPolicySpecBuilder struct {
Spec crdv1alpha1.ClusterNetworkPolicySpec
Name string
}
type ACNPAppliedToSpec struct {
PodSelector map[string]string
NSSelector map[string]string
PodSelectorMatchExp []metav1.LabelSelectorRequirement
NSSelectorMatchExp []metav1.LabelSelectorRequirement
Group string
}
func (b *ClusterNetworkPolicySpecBuilder) Get() *crdv1alpha1.ClusterNetworkPolicy {
if b.Spec.Ingress == nil {
b.Spec.Ingress = []crdv1alpha1.Rule{}
}
if b.Spec.Egress == nil {
b.Spec.Egress = []crdv1alpha1.Rule{}
}
return &crdv1alpha1.ClusterNetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: b.Name,
},
Spec: b.Spec,
}
}
func (b *ClusterNetworkPolicySpecBuilder) GetLegacy() *legacysecv1alpha1.ClusterNetworkPolicy {
if b.Spec.Ingress == nil {
b.Spec.Ingress = []crdv1alpha1.Rule{}
}
if b.Spec.Egress == nil {
b.Spec.Egress = []crdv1alpha1.Rule{}
}
return &legacysecv1alpha1.ClusterNetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: b.Name,
},
Spec: b.Spec,
}
}
func (b *ClusterNetworkPolicySpecBuilder) SetName(name string) *ClusterNetworkPolicySpecBuilder {
b.Name = name
return b
}
func (b *ClusterNetworkPolicySpecBuilder) SetPriority(p float64) *ClusterNetworkPolicySpecBuilder {
b.Spec.Priority = p
return b
}
func (b *ClusterNetworkPolicySpecBuilder) SetTier(tier string) *ClusterNetworkPolicySpecBuilder {
b.Spec.Tier = tier
return b
}
func (b *ClusterNetworkPolicySpecBuilder) SetAppliedToGroup(specs []ACNPAppliedToSpec) *ClusterNetworkPolicySpecBuilder {
for _, spec := range specs {
appliedToPeer := b.GetAppliedToPeer(spec.PodSelector, spec.NSSelector, spec.PodSelectorMatchExp, spec.NSSelectorMatchExp, spec.Group)
b.Spec.AppliedTo = append(b.Spec.AppliedTo, appliedToPeer)
}
return b
}
func (b *ClusterNetworkPolicySpecBuilder) GetAppliedToPeer(podSelector map[string]string,
nsSelector map[string]string,
podSelectorMatchExp []metav1.LabelSelectorRequirement,
nsSelectorMatchExp []metav1.LabelSelectorRequirement,
appliedToCG string) crdv1alpha1.NetworkPolicyPeer {
var ps *metav1.LabelSelector
var ns *metav1.LabelSelector
if podSelector != nil || podSelectorMatchExp != nil {
ps = &metav1.LabelSelector{
MatchLabels: podSelector,
MatchExpressions: podSelectorMatchExp,
}
}
if nsSelector != nil || nsSelectorMatchExp != nil {
ns = &metav1.LabelSelector{
MatchLabels: nsSelector,
MatchExpressions: nsSelectorMatchExp,
}
}
peer := crdv1alpha1.NetworkPolicyPeer{
PodSelector: ps,
NamespaceSelector: ns,
}
if appliedToCG != "" {
peer.Group = appliedToCG
}
return peer
}
func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc v1.Protocol,
port *int32, portName *string, endPort *int32, cidr *string,
podSelector map[string]string, nsSelector map[string]string,
podSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool,
ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1alpha1.RuleAction, ruleClusterGroup, name string) *ClusterNetworkPolicySpecBuilder {
var pSel *metav1.LabelSelector
var nSel *metav1.LabelSelector
var ns *crdv1alpha1.PeerNamespaces
var appliedTos []crdv1alpha1.NetworkPolicyPeer
matchSelf := crdv1alpha1.NamespaceMatchSelf
if b.Spec.Ingress == nil {
b.Spec.Ingress = []crdv1alpha1.Rule{}
}
if podSelector != nil || podSelectorMatchExp != nil {
pSel = &metav1.LabelSelector{
MatchLabels: podSelector,
MatchExpressions: podSelectorMatchExp,
}
}
if nsSelector != nil || nsSelectorMatchExp != nil {
nSel = &metav1.LabelSelector{
MatchLabels: nsSelector,
MatchExpressions: nsSelectorMatchExp,
}
}
if selfNS == true {
ns = &crdv1alpha1.PeerNamespaces{
Match: matchSelf,
}
}
var ipBlock *crdv1alpha1.IPBlock
if cidr != nil {
ipBlock = &crdv1alpha1.IPBlock{
CIDR: *cidr,
}
}
for _, at := range ruleAppliedToSpecs {
appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group))
}
// An empty From/To in ACNP rules evaluates to match all addresses.
policyPeer := make([]crdv1alpha1.NetworkPolicyPeer, 0)
if pSel != nil || nSel != nil || ns != nil || ipBlock != nil || ruleClusterGroup != "" {
policyPeer = []crdv1alpha1.NetworkPolicyPeer{{
PodSelector: pSel,
NamespaceSelector: nSel,
Namespaces: ns,
IPBlock: ipBlock,
Group: ruleClusterGroup,
}}
}
var ports []crdv1alpha1.NetworkPolicyPort
if port != nil && portName != nil {
panic("specify portname or port, not both")
}
if portName != nil {
ports = []crdv1alpha1.NetworkPolicyPort{
{
Port: &intstr.IntOrString{Type: intstr.String, StrVal: *portName},
Protocol: &protoc,
},
}
}
if port != nil || endPort != nil {
var pVal *intstr.IntOrString
if port != nil {
pVal = &intstr.IntOrString{IntVal: *port}
}
ports = []crdv1alpha1.NetworkPolicyPort{
{
Port: pVal,
EndPort: endPort,
Protocol: &protoc,
},
}
}
newRule := crdv1alpha1.Rule{
From: policyPeer,
Ports: ports,
Action: &action,
Name: name,
AppliedTo: appliedTos,
}
b.Spec.Ingress = append(b.Spec.Ingress, newRule)
return b
}
func (b *ClusterNetworkPolicySpecBuilder) AddEgress(protoc v1.Protocol,
port *int32, portName *string, endPort *int32, cidr *string,
podSelector map[string]string, nsSelector map[string]string,
podSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool,
ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1alpha1.RuleAction, ruleClusterGroup, name string) *ClusterNetworkPolicySpecBuilder {
// For simplicity, we just reuse the Ingress code here. The underlying data model for ingress/egress is identical
// With the exception of calling the rule `To` vs. `From`.
c := &ClusterNetworkPolicySpecBuilder{}
c.AddIngress(protoc, port, portName, endPort, cidr, podSelector, nsSelector,
podSelectorMatchExp, nsSelectorMatchExp, selfNS, ruleAppliedToSpecs, action, ruleClusterGroup, name)
theRule := c.Get().Spec.Ingress[0]
b.Spec.Egress = append(b.Spec.Egress, crdv1alpha1.Rule{
To: theRule.From,
Ports: theRule.Ports,
Action: theRule.Action,
Name: theRule.Name,
AppliedTo: theRule.AppliedTo,
})
return b
}
func (b *ClusterNetworkPolicySpecBuilder) AddFQDNRule(fqdn string,
protoc v1.Protocol, port *int32, portName *string, endPort *int32, name string,
ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1alpha1.RuleAction) *ClusterNetworkPolicySpecBuilder {
var appliedTos []crdv1alpha1.NetworkPolicyPeer
for _, at := range ruleAppliedToSpecs {
appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group))
}
policyPeer := []crdv1alpha1.NetworkPolicyPeer{{FQDN: fqdn}}
var ports []crdv1alpha1.NetworkPolicyPort
if portName != nil {
ports = []crdv1alpha1.NetworkPolicyPort{
{
Port: &intstr.IntOrString{Type: intstr.String, StrVal: *portName},
Protocol: &protoc,
},
}
}
if port != nil || endPort != nil {
var pVal *intstr.IntOrString
if port != nil {
pVal = &intstr.IntOrString{IntVal: *port}
}
ports = []crdv1alpha1.NetworkPolicyPort{
{
Port: pVal,
EndPort: endPort,
Protocol: &protoc,
},
}
}
newRule := crdv1alpha1.Rule{
To: policyPeer,
Ports: ports,
Action: &action,
Name: name,
AppliedTo: appliedTos,
}
b.Spec.Egress = append(b.Spec.Egress, newRule)
return b
}
func (b *ClusterNetworkPolicySpecBuilder) AddToServicesRule(svcRefs []crdv1alpha1.ServiceReference,
name string, ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1alpha1.RuleAction) *ClusterNetworkPolicySpecBuilder {
var appliedTos []crdv1alpha1.NetworkPolicyPeer
for _, at := range ruleAppliedToSpecs {
appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group))
}
newRule := crdv1alpha1.Rule{
To: make([]crdv1alpha1.NetworkPolicyPeer, 0),
ToServices: svcRefs,
Action: &action,
Name: name,
AppliedTo: appliedTos,
}
b.Spec.Egress = append(b.Spec.Egress, newRule)
return b
}
// AddEgressDNS mutates the nth policy rule to allow DNS, convenience method
func (b *ClusterNetworkPolicySpecBuilder) WithEgressDNS() *ClusterNetworkPolicySpecBuilder {
protocolUDP := v1.ProtocolUDP
route53 := crdv1alpha1.NetworkPolicyPort{
Protocol: &protocolUDP,
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53},
}
for i, e := range b.Spec.Egress {
e.Ports = append(e.Ports, route53)
b.Spec.Egress[i] = e
}
return b
}
func (b *ClusterNetworkPolicySpecBuilder) AddEgressLogging() *ClusterNetworkPolicySpecBuilder {
for i, e := range b.Spec.Egress {
e.EnableLogging = true
b.Spec.Egress[i] = e
}
return b
}
| 1 | 42,368 | Can we update bool to an enum instead of a string? This helps future extensibility | antrea-io-antrea | go |
@@ -8,6 +8,13 @@ module Ncr
EXPENSE_TYPES = %w(BA60 BA61 BA80)
BUILDING_NUMBERS = YAML.load_file("#{Rails.root}/config/data/ncr/building_numbers.yml")
+ FY16 = Time.zone.parse('2015-10-01')
+ if Time.zone.now > FY16
+ MAX_AMOUNT = 3500.0
+ else
+ MAX_AMOUNT = 3000.0
+ end
+ MIN_AMOUNT = 0.0
class WorkOrder < ActiveRecord::Base
include ValueHelper | 1 | require 'csv'
module Ncr
# Make sure all table names use 'ncr_XXX'
def self.table_name_prefix
'ncr_'
end
EXPENSE_TYPES = %w(BA60 BA61 BA80)
BUILDING_NUMBERS = YAML.load_file("#{Rails.root}/config/data/ncr/building_numbers.yml")
class WorkOrder < ActiveRecord::Base
include ValueHelper
include ProposalDelegate
# This is a hack to be able to attribute changes to the correct user. This attribute needs to be set explicitly, then the update comment will use them as the "commenter". Defaults to the requester.
attr_accessor :modifier
after_initialize :set_defaults
before_validation :normalize_values
before_update :record_changes
# @TODO: use integer number of cents to avoid floating point issues
validates :amount, numericality: {
less_than_or_equal_to: 3000,
message: "must be less than or equal to $3,000"
}
validates :amount, numericality: {
greater_than_or_equal_to: 0,
message: "must be greater than or equal to $0"
}
validates :cl_number, format: {
with: /\ACL\d{7}\z/,
message: "must start with 'CL', followed by seven numbers"
}, allow_blank: true
validates :expense_type, inclusion: {in: EXPENSE_TYPES}, presence: true
validates :function_code, format: {
with: /\APG[A-Z0-9]{3}\z/,
message: "must start with 'PG', followed by three letters or numbers"
}, allow_blank: true
validates :project_title, presence: true
validates :vendor, presence: true
validates :building_number, presence: true
validates :rwa_number, presence: true, if: :ba80?
validates :rwa_number, format: {
with: /\A[a-zA-Z][0-9]{7}\z/,
message: "must be one letter followed by 7 numbers"
}, allow_blank: true
validates :soc_code, format: {
with: /\A[A-Z0-9]{3}\z/,
message: "must be three letters or numbers"
}, allow_blank: true
def set_defaults
self.direct_pay ||= false
self.not_to_exceed ||= false
self.emergency ||= false
end
# For budget attributes, converts empty strings to `nil`, so that the request isn't shown as being modified when the fields appear in the edit form.
def normalize_values
if self.cl_number.present?
self.cl_number = self.cl_number.upcase
self.cl_number.prepend('CL') unless self.cl_number.start_with?('CL')
else
self.cl_number = nil
end
if self.function_code.present?
self.function_code.upcase!
self.function_code.prepend('PG') unless self.function_code.start_with?('PG')
else
self.function_code = nil
end
if self.soc_code.present?
self.soc_code.upcase!
else
self.soc_code = nil
end
end
def approver_email_frozen?
approval = self.individual_approvals.first
approval && !approval.actionable?
end
def approver_changed?(approval_email)
self.approving_official && self.approving_official.email_address != approval_email
end
# Check the approvers, accounting for frozen approving official
def approvers_emails(selected_approving_official_email)
emails = self.system_approver_emails
if self.approver_email_frozen?
emails.unshift(self.approving_official.email_address)
else
emails.unshift(selected_approving_official_email)
end
emails
end
def setup_approvals_and_observers(selected_approving_official_email)
emails = self.approvers_emails(selected_approving_official_email)
if self.emergency
emails.each{|e| self.add_observer(e)}
# skip state machine
self.proposal.update(status: 'approved')
else
original_approvers = self.proposal.individual_approvals.non_pending.map(&:user)
self.force_approvers(emails)
self.notify_removed_approvers(original_approvers)
end
end
def approving_official
self.approvers.first
end
def email_approvers
Dispatcher.on_proposal_update(self.proposal)
end
# Ignore values in certain fields if they aren't relevant. May want to
# split these into different models
def self.relevant_fields(expense_type)
fields = [:description, :amount, :expense_type, :vendor, :not_to_exceed,
:building_number, :org_code, :direct_pay, :cl_number, :function_code, :soc_code]
case expense_type
when 'BA61'
fields << :emergency
when 'BA80'
fields.concat([:rwa_number, :code])
end
fields
end
def relevant_fields
Ncr::WorkOrder.relevant_fields(self.expense_type)
end
# Methods for Client Data interface
def fields_for_display
attributes = self.relevant_fields
attributes.map{|key| [WorkOrder.human_attribute_name(key), self[key]]}
end
# will return nil if the `org_code` is blank or not present in Organization list
def organization
# TODO reference by `code` rather than storing the whole thing
code = (self.org_code || '').split(' ', 2)[0]
Ncr::Organization.find(code)
end
def ba80?
self.expense_type == 'BA80'
end
def public_identifier
"FY" + self.fiscal_year.to_s.rjust(2, "0") + "-#{self.proposal.id}"
end
def total_price
self.amount || 0.0
end
# may be replaced with paper-trail or similar at some point
def version
self.updated_at.to_i
end
def name
self.project_title
end
def system_approver_emails
results = []
if %w(BA60 BA61).include?(self.expense_type)
unless self.organization.try(:whsc?)
results << self.class.ba61_tier1_budget_mailbox
end
results << self.class.ba61_tier2_budget_mailbox
else # BA80
if self.organization.try(:ool?)
results << self.class.ool_ba80_budget_mailbox
else
results << self.class.ba80_budget_mailbox
end
end
results
end
def self.ba61_tier1_budget_mailbox
ENV['NCR_BA61_TIER1_BUDGET_MAILBOX'] || '[email protected]'
end
def self.ba61_tier2_budget_mailbox
ENV['NCR_BA61_TIER2_BUDGET_MAILBOX'] || '[email protected]'
end
def self.ba80_budget_mailbox
ENV['NCR_BA80_BUDGET_MAILBOX'] || '[email protected]'
end
def self.ool_ba80_budget_mailbox
ENV['NCR_OOL_BA80_BUDGET_MAILBOX'] || '[email protected]'
end
def org_id
self.organization.try(:code)
end
def building_id
regex = /\A(\w{8}) .*\z/
if self.building_number && regex.match(self.building_number)
regex.match(self.building_number)[1]
else
self.building_number
end
end
def as_json
super.merge(org_id: self.org_id, building_id: self.building_id)
end
protected
# TODO move to Proposal model
def record_changes
changed_attributes = self.changed_attributes.except(:updated_at)
comment_texts = []
bullet = changed_attributes.length > 1 ? '- ' : ''
changed_attributes.each do |key, value|
former = property_to_s(self.send(key + "_was"))
value = property_to_s(self[key])
property_name = WorkOrder.human_attribute_name(key)
comment_texts << WorkOrder.update_comment_format(property_name, value, bullet, former)
end
if !comment_texts.empty?
if self.approved?
comment_texts << "_Modified post-approval_"
end
self.proposal.comments.create(
comment_text: comment_texts.join("\n"),
update_comment: true,
user: self.modifier || self.requester
)
end
end
def self.update_comment_format key, value, bullet, former=nil
from = former ? "from #{former} " : ''
"#{bullet}*#{key}* was changed " + from + "to #{value}"
end
def fiscal_year
year = self.created_at.nil? ? Time.now.year : self.created_at.year
month = self.created_at.nil? ? Time.now.month : self.created_at.month
if month >= 10
year += 1
end
year % 100 # convert to two-digit
end
# Generally shouldn't be called directly as it doesn't account for
# emergencies, or notify removed approvers
def force_approvers(emails)
individuals = emails.map do |email|
user = User.for_email(email)
# Reuse existing approvals, if present
self.proposal.existing_approval_for(user) || Approvals::Individual.new(user: user)
end
self.proposal.root_approval = Approvals::Serial.new(child_approvals: individuals)
end
def notify_removed_approvers(original_approvers)
current_approvers = self.proposal.individual_approvals.non_pending.map(&:user)
removed_approvers_to_notify = original_approvers - current_approvers
Dispatcher.on_approver_removal(self.proposal, removed_approvers_to_notify)
end
end
end
| 1 | 14,041 | Should we share, since this is also being used with gsa18f? | 18F-C2 | rb |
@@ -1835,7 +1835,7 @@ window.CrashgroupView = countlyView.extend({
// `d` is the original data object for the row
var str = '';
if (data) {
- str += '<div class="datatablesubrow">' +
+ str += '<div id="crash-data-subrow" class="datatablesubrow">' +
'<div class="error_menu">' +
'<div class="error-details-menu" data-id="' + data._id + '">' +
'<a class="right icon-button cly-button-menu-trigger"></a>' + | 1 | /*globals countlyView,_,countlyDeviceDetails,countlyDeviceList,marked,addDrill,extendViewWithFilter,hljs,production,countlyUserdata,moment,store,jQuery,countlySession,$,countlyGlobal,Handlebars,countlyCrashes,app,CountlyHelpers,CrashesView,CrashgroupView,countlySegmentation,countlyCommon */
window.CrashesView = countlyView.extend({
convertFilter: {
"sg.crash": {prop: "_id", type: "string"},
"sg.cpu": {prop: "cpu", type: "segment"},
"sg.opengl": {prop: "opengl", type: "segment"},
"sg.os": {prop: "os", type: "string"},
"sg.orientation": {prop: "orientation", type: "segment"},
"sg.nonfatal": {prop: "nonfatal", type: "booltype"},
"sg.root": {prop: "root", type: "boolsegment"},
"sg.online": {prop: "online", type: "boolsegment"},
"sg.signal": {prop: "signal", type: "boolsegment"},
"sg.muted": {prop: "muted", type: "boolsegment"},
"sg.background": {prop: "background", type: "boolsegment"},
"up.d": {prop: "device", type: "segment"},
"up.pv": {prop: "os_version", type: "segment"},
"up.av": {prop: "app_version", type: "segment"},
"up.r": {prop: "resolution", type: "segment"},
"up.ls": {prop: "lastTs", type: "date"},
"up.fs": {prop: "startTs", type: "date"},
"is_new": {prop: "is_new", type: "booltype"},
"is_resolved": {prop: "is_resolved", type: "booltype"},
"is_hidden": {prop: "is_hidden", type: "booltype"},
"is_renewed": {prop: "is_renewed", type: "booltype"},
"reports": {prop: "reports", type: "number"},
"users": {prop: "reports", type: "number"},
"ram_min": {prop: "ram.min", type: "number"},
"ram_max": {prop: "ram.max", type: "number"},
"bat_min": {prop: "bat.min", type: "number"},
"bat_max": {prop: "bat.max", type: "number"},
"disk_min": {prop: "disk.min", type: "number"},
"disk_max": {prop: "disk.max", type: "number"},
"run_min": {prop: "run.min", type: "number"},
"run_max": {prop: "run.max", type: "number"}
},
initialize: function() {
this.loaded = true;
this.filter = (store.get("countly_crashfilter")) ? store.get("countly_crashfilter") : "crash-all";
this.curMetric = "cr";
this.metrics = {
cr: jQuery.i18n.map["crashes.total"],
cru: jQuery.i18n.map["crashes.unique"],
crnf: jQuery.i18n.map["crashes.nonfatal"] + " " + jQuery.i18n.map["crashes.title"],
crf: jQuery.i18n.map["crashes.fatal"] + " " + jQuery.i18n.map["crashes.title"],
crru: jQuery.i18n.map["crashes.resolved-users"]
};
},
showOnGraph: {"crashes-fatal": true, "crashes-nonfatal": true, "crashes-total": true},
beforeRender: function() {
this.selectedCrashes = {};
this.selectedCrashesIds = [];
countlySession.initialize();
if (this.template) {
return $.when(countlyCrashes.initialize()).then(function() {});
}
else {
var self = this;
return $.when($.get(countlyGlobal.path + '/crashes/templates/crashes.html', function(src) {
self.template = Handlebars.compile(src);
}), countlyCrashes.initialize()).then(function() {});
}
},
getExportAPI: function(tableID) {
if (tableID === 'd-table-crashes') {
var userDetails = countlyUserdata.getUserdetails();
var requestPath = '/o?method=user_crashes&api_key=' + countlyGlobal.member.api_key +
"&app_id=" + countlyCommon.ACTIVE_APP_ID + "&uid=" + userDetails.uid +
"&iDisplayStart=0&fromExportAPI=true";
var apiQueryData = {
api_key: countlyGlobal.member.api_key,
app_id: countlyCommon.ACTIVE_APP_ID,
path: requestPath,
method: "GET",
filename: "User_Crashes_on_" + moment().format("DD-MMM-YYYY"),
prop: ['aaData']
};
return apiQueryData;
}
return null;
},
processData: function() {
var self = this;
var crashData = countlyCrashes.getData();
this.dtable = $('#crash-table').dataTable($.extend({}, $.fn.dataTable.defaults, {
"aaSorting": [[ 5, "desc" ]],
"bServerSide": true,
"sAjaxSource": countlyCommon.API_PARTS.data.r + "?app_id=" + countlyCommon.ACTIVE_APP_ID + "&method=crashes",
"fnServerData": function(sSource, aoData, fnCallback) {
$.ajax({
"type": "POST",
"url": sSource,
"data": aoData,
"success": function(data) {
fnCallback(data);
$("#view-filter .bar-values").text(jQuery.i18n.prop('crashes.of-users', data.iTotalDisplayRecords, data.iTotalRecords));
$("#view-filter .bar span").text(Math.floor((data.iTotalDisplayRecords / data.iTotalRecords) * 100) + "%");
$("#view-filter .bar .bar-inner").animate({width: Math.floor((data.iTotalDisplayRecords / data.iTotalRecords) * 100) + "%"}, 1000);
}
});
},
"fnServerParams": function(aoData) {
if (self.filter) {
aoData.push({ "name": "filter", "value": self.filter });
}
if (self._query) {
aoData.push({ "name": "query", "value": JSON.stringify(self._query) });
}
},
"fnRowCallback": function(nRow, aData) {
$(nRow).attr("id", aData._id);
if (aData.is_resolved) {
$(nRow).addClass("resolvedcrash");
}
else if (aData.is_new) {
$(nRow).addClass("newcrash");
}
else if (aData.is_renewed) {
$(nRow).addClass("renewedcrash");
}
$(nRow).find(".tag").tipsy({gravity: 'w'});
},
"aoColumns": [
{
"mData": function(row) {
if (self.selectedCrashes[row._id]) {
return "<a class='fa fa-check-square check-green' id=\"" + row._id + "\"></a>";
}
else {
return "<a class='fa fa-square-o check-green' id=\"" + row._id + "\"></a>";
}
},
"sType": "numeric",
"sClass": "center",
"sWidth": "30px",
"bSortable": false,
"sTitle": "<a class='fa fa-square-o check-green check-header'></a>"
},
{
"mData": function(row, type) {
if (type !== "display") {
return row.name;
}
var tagDivs = "";
// This separator is not visible in the UI but | is visible in exported data
var separator = "<span class='separator'>|</span>";
if (row.is_resolving) {
tagDivs += separator + "<div class='tag'>" + "<span style='color:green;'>" + jQuery.i18n.map["crashes.resolving"] + "</span>" + "</div>";
}
else if (row.is_resolved) {
tagDivs += separator + "<div class='tag'>" + "<span style='color:green;'>" + jQuery.i18n.map["crashes.resolved"] + " (" + row.latest_version.replace(/:/g, '.') + ")</span>" + "</div>";
}
else {
tagDivs += separator + "<div class='tag'>" + "<span style='color:red;'>" + jQuery.i18n.map["crashes.unresolved"] + "</span>" + "</div>";
}
if (row.nonfatal) {
tagDivs += separator + "<div class='tag'>" + jQuery.i18n.map["crashes.nonfatal"] + "</div>";
}
else {
tagDivs += separator + "<div class='tag'>" + jQuery.i18n.map["crashes.fatal"] + "</div>";
}
if (row.session) {
tagDivs += separator + "<div class='tag'>" + ((Math.round(row.session.total / row.session.count) * 100) / 100) + " " + jQuery.i18n.map["crashes.sessions"] + "</div>";
}
else {
tagDivs += separator + "<div class='tag'>" + jQuery.i18n.map["crashes.first-crash"] + "</div>";
}
tagDivs += "<div class='tag not-viewed' title='" + jQuery.i18n.map["crashes.not-viewed"] + "'><i class='fa fa-eye-slash'></i></div>";
tagDivs += "<div class='tag re-occurred' title='" + jQuery.i18n.map["crashes.re-occurred"] + "'><i class='fa fa-refresh'></i></div>";
return "<div class='truncated'>" + row.name + "</div>" + tagDivs;
},
"sType": "string",
"sTitle": jQuery.i18n.map["crashes.error"]
},
{
"mData": function(row) {
return (row.not_os_specific) ? jQuery.i18n.map["crashes.varies"] : row.os;
},
"sType": "string",
"sTitle": jQuery.i18n.map["crashes.platform"],
"sWidth": "90px"
},
{
"mData": "reports",
"sType": "numeric",
"sTitle": jQuery.i18n.map["crashes.reports"],
"sWidth": "90px"
},
{
"mData": function(row, type) {
row.users = row.users || 1;
if (type === "display") {
return row.users + " (" + ((row.users / crashData.users.total) * 100).toFixed(2) + "%)";
}
else {
return row.users;
}
},
"sType": "string",
"sTitle": jQuery.i18n.map["crashes.users"],
"sWidth": "90px"
},
{
"mData": function(row, type) {
if (type === "display") {
return countlyCommon.formatTimeAgo(row.lastTs);
}
else {
return row.lastTs;
}
},
"sType": "format-ago",
"sTitle": jQuery.i18n.map["crashes.last_time"],
"sWidth": "150px"
},
{
"mData": function(row) {
return row.latest_version.replace(/:/g, '.');
},
"sType": "string",
"sTitle": jQuery.i18n.map["crashes.latest_app"],
"sWidth": "90px"
},
{
"mData": function(row) {
return "<a class='extable-link table-link green' href='#/crashes/" + row._id + "' target='_blank'>" +
"<i class='material-icons'>open_in_new</i></a>" +
"<a class='table-link green external'>" + jQuery.i18n.map["common.view"] + "</a>";
},
"sType": "numeric",
"sClass": "center",
"sWidth": "90px",
"bSortable": false
}
],
"fnInitComplete": function(oSettings, json) {
$.fn.dataTable.defaults.fnInitComplete(oSettings, json);
var tableWrapper = $("#" + oSettings.sTableId + "_wrapper");
tableWrapper.find(".dataTables_filter input").attr("placeholder", jQuery.i18n.map["crashes.search"]);
// init sticky headers here in order to wait for correct
// table width (for multi select checkboxes to render)
self.dtable.stickyTableHeaders();
$(".extable-link").off('click').on('click', function(e) {
e.stopPropagation();
});
}
}));
//this.dtable.fnSort( [ [5,'desc'] ] );
this.dtable.find("thead .check-green").click(function() {
if ($(this).hasClass("fa-check-square")) {
$(".sticky-header .check-green").removeClass("fa-check-square").addClass("fa-square-o");
self.dtable.find(".check-green").removeClass("fa-check-square").addClass("fa-square-o");
self.selectedCrashesIds = [];
self.selectedCrashes = {};
$(".action-segmentation").addClass("disabled");
}
else {
$(".sticky-header .check-green").removeClass("fa-square-o").addClass("fa-check-square");
self.dtable.find(".check-green").removeClass("fa-square-o").addClass("fa-check-square");
self.dtable.find(".check-green").parents("tr").each(function() {
var id = $(this).attr("id");
if (id) {
if (!self.selectedCrashes[id]) {
self.selectedCrashesIds.push(id);
}
self.selectedCrashes[id] = true;
$(".action-segmentation").removeClass("disabled");
}
});
}
});
$('.crashes tbody ').on("click", "tr", function() {
var id = $(this).attr("id");
if (id) {
var link = "#/crashes/" + id ;
window.open(link, "_self");
}
});
$('.crashes tbody ').on("click", "td:first-child", function(e) {
e.cancelBubble = true; // IE Stop propagation
if (e.stopPropagation) {
e.stopPropagation();
} // Other Broswers
var id = $(this).parent().attr("id");
if (id) {
if (self.selectedCrashes[id]) {
$(this).find(".check-green").removeClass("fa-check-square").addClass("fa-square-o");
self.selectedCrashes[id] = null;
var index = self.selectedCrashesIds.indexOf(id);
if (index !== -1) {
self.selectedCrashesIds.splice(index, 1);
}
}
else {
self.selectedCrashes[id] = true;
self.selectedCrashesIds.push(id);
$(this).find(".check-green").removeClass("fa-square-o").addClass("fa-check-square");
}
if (self.selectedCrashesIds.length) {
$(".action-segmentation").removeClass("disabled");
}
else {
$(".action-segmentation").addClass("disabled");
}
}
});
$(".filter-segmentation").on("cly-select-change", function(e, val) {
self.filterCrashes(val);
});
$(".action-segmentation").on("cly-select-change", function(e, val) {
if (val !== "") {
$(".action-segmentation").clySelectSetSelection("", jQuery.i18n.map["crashes.make-action"]);
if (val === "crash-resolve") {
CountlyHelpers.confirm(jQuery.i18n.prop("crashes.confirm-action-resolved", self.selectedCrashesIds.length), "red", function(result) {
if (!result) {
return true;
}
countlyCrashes.markResolve(self.selectedCrashesIds, function(data) {
if (!data) {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
self.resetSelection(true);
}
});
});
}
else if (val === "crash-unresolve") {
CountlyHelpers.confirm(jQuery.i18n.prop("crashes.confirm-action-unresolved", self.selectedCrashesIds.length), "red", function(result) {
if (!result) {
return true;
}
countlyCrashes.markUnresolve(self.selectedCrashesIds, function(data) {
if (!data) {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
}
else {
self.resetSelection(true);
}
});
});
}
else if (val === "crash-hide") {
CountlyHelpers.confirm(jQuery.i18n.prop("crashes.confirm-action-hide", self.selectedCrashesIds.length), "red", function(result) {
if (!result) {
return true;
}
countlyCrashes.hide(self.selectedCrashesIds, function(data) {
if (!data) {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
}
else {
self.resetSelection(true);
}
});
});
}
else if (val === "crash-resolving") {
CountlyHelpers.confirm(jQuery.i18n.prop("crashes.confirm-action-resolving", self.selectedCrashesIds.length), "red", function(result) {
if (!result) {
return true;
}
countlyCrashes.resolving(self.selectedCrashesIds, function(data) {
if (!data) {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
}
else {
self.resetSelection(true);
}
});
});
}
else if (val === "crash-delete") {
CountlyHelpers.confirm(jQuery.i18n.prop("crashes.confirm-action-delete", self.selectedCrashesIds.length), "red", function(result) {
if (!result) {
return true;
}
countlyCrashes.del(self.selectedCrashesIds, function(data) {
if (!data) {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
}
else {
self.resetSelection(true);
}
});
});
}
}
});
},
resetSelection: function(flash) {
if (flash) {
this.dtable.find(".fa-check-square.check-green").parents("tr").addClass("flash");
}
this.selectedCrashesIds = [];
this.selectedCrashes = {};
this.dtable.find(".check-green").removeClass("fa-check-square").addClass("fa-square-o");
$(".action-segmentation").addClass("disabled");
this.refresh();
},
renderCommon: function(isRefresh) {
var crashData = countlyCrashes.getData();
var chartData = countlyCrashes.getChartData(this.curMetric, this.metrics[this.curMetric]);
var dashboard = countlyCrashes.getDashboardData();
this.templateData = {
"page-title": jQuery.i18n.map["crashes.title"],
"no-data": jQuery.i18n.map["common.bar.no-data"],
"usage": [
{
"title": jQuery.i18n.map["crashes.total"],
"data": dashboard.usage.cr,
"id": "crash-cr",
"help": "crashes.help-total"
},
{
"title": jQuery.i18n.map["crashes.unique"],
"data": dashboard.usage.cru,
"id": "crash-cru",
"help": "crashes.help-unique"
},
{
"title": jQuery.i18n.map["crashes.nonfatal"] + " " + jQuery.i18n.map["crashes.title"],
"data": dashboard.usage.crnf,
"id": "crash-crnf",
"help": "crashes.help-nonfatal"
},
{
"title": jQuery.i18n.map["crashes.fatal"] + " " + jQuery.i18n.map["crashes.title"],
"data": dashboard.usage.crf,
"id": "crash-crf",
"help": "crashes.help-fatal"
},
{//toal crashes pes session
"title": jQuery.i18n.map["crashes.total-per-session"],
"data": dashboard.usage.crt,
"id": "crash-cr-session",
"help": "crashes.help-session"
}/*,
{
"title":jQuery.i18n.map["crashes.resolved-users"],
"data":dashboard.usage['crru'],
"id":"crash-crru",
"help":"crashes.help-resolved-users"
}*/
],
"chart-select": [
{
title: jQuery.i18n.map["crashes.total_overall"],
trend: dashboard.usage.crt['trend-total'],
total: dashboard.usage.crt.total,
myclass: "crashes-total"
},
{
title: jQuery.i18n.map["crashes.fatal"],
trend: dashboard.usage.crt['trend-fatal'],
total: dashboard.usage.crt['total-fatal'],
myclass: "crashes-fatal"
},
{
title: jQuery.i18n.map["crashes.nonfatal"],
trend: dashboard.usage.crt['trend-nonfatal'],
total: dashboard.usage.crt['total-nonfatal'],
myclass: "crashes-nonfatal"
},
],
"big-numbers": {
"items": [
{
"title": jQuery.i18n.map["crashes.unresolved-crashes"],
"total": countlyCommon.getShortNumber(crashData.crashes.unresolved),
"help": "crashes.help-unresolved"
},
{
"title": jQuery.i18n.map["crashes.highest-version"],
"total": crashData.crashes.highest_app,
"help": "crashes.help-latest-version"
},
{
"title": jQuery.i18n.map["crashes.new-crashes"],
"total": countlyCommon.getShortNumber(crashData.crashes.news),
"help": "crashes.help-new"
},
{
"title": jQuery.i18n.map["crashes.renew-crashes"],
"total": countlyCommon.getShortNumber(crashData.crashes.renewed),
"help": "crashes.help-reoccurred"
}
]
},
"bars": [
{
"title": jQuery.i18n.map["crashes.resolution-status"],
"data": countlyCrashes.getResolvedBars(),
"help": "crashes.help-resolved"
},
{
"title": jQuery.i18n.map["crashes.affected-users"],
"data": countlyCrashes.getAffectedUsers(),
"help": "crashes.help-affected-levels"
},
{
"title": jQuery.i18n.map["crashes.platform"],
"data": countlyCrashes.getPlatformBars(),
"help": "crashes.help-platforms"
},
{
"title": jQuery.i18n.map["crashes.fatality"],
"data": countlyCrashes.getFatalBars(),
"help": "crashes.help-fatals"
}
],
hasDrill: typeof this.initDrill !== "undefined",
"active-filter": jQuery.i18n.map["crashes.all"],
"active-action": jQuery.i18n.map["crashes.make-action"]
};
if (crashData.loss) {
this.templateData.loss = true;
this.templateData["big-numbers"].items.push({
"title": jQuery.i18n.map["crashes.loss"],
"total": crashData.loss.toFixed(2),
"help": "crashes.help-loss"
});
}
var self = this;
if (!isRefresh) {
countlyCommon.drawTimeGraph(chartData.chartDP, "#dashboard-graph");
chartData = countlyCrashes.getChartData(self.curMetric, self.metrics[self.curMetric], self.showOnGraph);
$(this.el).html(this.template(this.templateData));
self.switchMetric();
$("#total-user-estimate-ind").on("click", function() {
CountlyHelpers.alert(jQuery.i18n.map["common.estimation"], "black");
});
$(".filter-segmentation").clySelectSetSelection(this.filter, jQuery.i18n.map["crashes." + this.filter.split("-").pop()]);
$("#crash-" + this.curMetric).parents(".big-numbers").addClass("active");
$(".widget-content .inner").click(function() {
$(".big-numbers").removeClass("active");
$(".big-numbers .select").removeClass("selected");
$(this).parent(".big-numbers").addClass("active");
$(this).find('.select').addClass("selected");
});
$(".big-numbers .inner").click(function() {
var elID = $(this).find('.select').attr("id");
if (elID) {
if (self.curMetric === elID.replace("crash-", "")) {
return true;
}
self.curMetric = elID.replace("crash-", "");
self.switchMetric();
}
});
if (typeof self.initDrill !== "undefined") {
self.byDisabled = true;
$.when(countlySegmentation.initialize("[CLY]_crash")).then(function() {
self.initDrill();
setTimeout(function() {
self.filterBlockClone = $("#filter-view").clone(true);
if (self._filter) {
$("#filter-view").show();
$(".filter-view-container").show();
self.adjustFilters();
var lookup = {};
for (var i in self.convertFilter) {
lookup[self.convertFilter[i].prop] = i;
}
var filter = self._query;
var inputs = [];
var subs = {};
for (var n in filter) {
inputs.push(n);
subs[n] = [];
for (var j in filter[n]) {
if (filter[n][j].length) {
for (var k = 0; k < filter[n][j].length; k++) {
subs[n].push([j, filter[n][j][k]]);
}
}
else {
subs[n].push([j, filter[n][j]]);
}
}
}
var setInput = function(cur, sub, total) {
sub = sub || 0;
if (inputs[cur]) {
var filterType = subs[inputs[cur]][sub][0];
if (filterType === "$in" || filterType === "$eq") {
filterType = "=";
}
else if (filterType === "$nin" || filterType === "$ne") {
filterType = "!=";
}
else if (filterType === "$exists") {
if (subs[inputs[cur]][sub][0]) {
filterType = "=";
}
else {
filterType = "!=";
}
}
var val = subs[inputs[cur]][sub][1];
var el = $(".query:nth-child(" + (total) + ")");
el.find(".filter-name").trigger("click");
el.find(".filter-type").trigger("click");
var name = inputs[cur];
if (lookup[name]) {
name = lookup[name];
}
else if (name.indexOf(".") !== -1) {
var parts = name.split(".");
if (lookup[parts[0]]) {
name = lookup[parts[0]];
val = parts[1];
}
}
$(el).data("query_value", val + ""); //saves value as attribute for selected query
el.find(".filter-name").find(".select-items .item[data-value='" + name + "']").trigger("click");
el.find(".filter-type").find(".select-items .item[data-value='" + filterType + "']").trigger("click");
setTimeout(function() {
el.find(".filter-value").not(".hidden").trigger("click");
if (el.find(".filter-value").not(".hidden").find(".select-items .item[data-value='" + val + "']").length) {
el.find(".filter-value").not(".hidden").find(".select-items .item[data-value='" + val + "']").trigger("click");
}
else if (_.isNumber(val) && (val + "").length === 10) {
el.find(".filter-value.date").find("input").val(countlyCommon.formatDate(moment(val * 1000), "DD MMMM, YYYY"));
el.find(".filter-value.date").find("input").data("timestamp", val);
}
else {
el.find(".filter-value").not(".hidden").find("input").val(val);
}
if (subs[inputs[cur]].length === sub + 1) {
cur++;
sub = 0;
}
else {
sub++;
}
total++;
if (inputs[cur]) {
$("#filter-add-container").trigger("click");
if (sub > 0) {
setTimeout(function() {
el = $(".query:nth-child(" + (total) + ")");
el.find(".and-or").find(".select-items .item[data-value='OR']").trigger("click");
setInput(cur, sub, total);
}, 500);
}
else {
setInput(cur, sub, total);
}
}
else {
setTimeout(function() {
$("#apply-filter").removeClass("disabled");
$("#no-filter").hide();
var filterData = self.getFilterObjAndByVal();
$("#current-filter").show().find(".text").text(filterData.bookmarkText);
$("#connector-container").show();
}, 500);
}
}, 500);
}
};
setInput(0, 0, 1);
}
}, 0);
self.processData();
});
}
else {
$("#view-filter").hide();
self.processData();
}
self.pageScripts();
$('.action-segmentation').attr('data-tooltip-content', "#action-segmentation-tooltip");
$('.action-segmentation').tooltipster({
theme: ['tooltipster-borderless'],
contentCloning: false,
interactive: false,
trigger: 'hover',
side: 'left',
zIndex: 2,
functionBefore: function() {
if (!$('.action-segmentation').hasClass("disabled")) {
return false;
}
}
});
}
},
refresh: function() {
var self = this;
if (this.loaded) {
this.loaded = false;
$.when(countlyCrashes.refresh()).then(function() {
self.loaded = true;
if (app.activeView !== self) {
return false;
}
self.renderCommon(true);
var chartData = countlyCrashes.getChartData(self.curMetric, self.metrics[self.curMetric], self.showOnGraph);
var newPage = $("<div>" + self.template(self.templateData) + "</div>");
$(".crashoveral .dashboard").replaceWith(newPage.find(".dashboard"));
$(".crash-big-numbers").replaceWith(newPage.find(".crash-big-numbers"));
$(".dashboard-summary").replaceWith(newPage.find(".dashboard-summary"));
$("#data-selector").replaceWith(newPage.find("#data-selector"));
$("#crash-" + self.curMetric).parents(".big-numbers").addClass("active");
$(".widget-content .inner").click(function() {
$(".big-numbers").removeClass("active");
$(".big-numbers .select").removeClass("selected");
$(this).parent(".big-numbers").addClass("active");
$(this).find('.select').addClass("selected");
});
$(".big-numbers .inner").click(function() {
var elID = $(this).find('.select').attr("id");
if (elID) {
if (self.curMetric === elID.replace("crash-", "")) {
return true;
}
self.curMetric = elID.replace("crash-", "");
self.switchMetric();
}
});
self.dtable.fnDraw(false);
self.pageScripts();
countlyCommon.drawTimeGraph(chartData.chartDP, "#dashboard-graph");
//app.localize();
});
}
},
getExportQuery: function() {
var replacer = function(key, value) {
if (value instanceof RegExp) {
return ("__REGEXP " + value.toString());
}
else {
return value;
}
};
var qstring = {
api_key: countlyGlobal.member.api_key,
db: "countly",
collection: "app_crashgroups" + countlyCommon.ACTIVE_APP_ID,
query: this._query || {}
};
if ($('.dataTables_filter input').val().length) {
qstring.query.name = {"$regex": new RegExp(".*" + $('.dataTables_filter input').val() + ".*", 'i')};
}
if (this.filter && this.filter !== "") {
switch (this.filter) {
case "crash-resolved":
qstring.query.is_resolved = true;
break;
case "crash-hidden":
qstring.query.is_hidden = true;
break;
case "crash-unresolved":
qstring.query.is_resolved = false;
break;
case "crash-nonfatal":
qstring.query.nonfatal = true;
break;
case "crash-fatal":
qstring.query.nonfatal = false;
break;
case "crash-new":
qstring.query.is_new = true;
break;
case "crash-viewed":
qstring.query.is_new = false;
break;
case "crash-reoccurred":
qstring.query.is_renewed = true;
break;
case "crash-resolving":
qstring.query.is_resolving = true;
break;
}
}
if (this.filter !== "crash-hidden") {
qstring.query.is_hidden = {$ne: true};
}
qstring.query._id = {$ne: "meta"};
qstring.query = JSON.stringify(qstring.query, replacer);
return qstring;
},
filterCrashes: function(filter) {
this.filter = filter;
store.set("countly_crashfilter", filter);
$("#" + this.filter).addClass("selected").addClass("active");
this.dtable.fnDraw();
},
pageScripts: function() {
var self = this;
$(".crashes-show-switch").unbind("click");
$(".crashes-show-switch").removeClass("selected");
for (var i in this.showOnGraph) {
if (this.showOnGraph[i]) {
$(".crashes-show-switch." + i).addClass("selected");
}
}
$(".crashes-show-switch").on("click", function() {
if ($(this).hasClass("selected")) {
self.showOnGraph[$(this).data("type")] = false;
}
else {
self.showOnGraph[$(this).data("type")] = true;
}
$(this).toggleClass("selected");
self.refresh();
});
if (this.curMetric === "cr-session") {
$("#data-selector").css("display", "block");
}
else {
$("#data-selector").css("display", "none");
}
},
switchMetric: function() {
var chartData = countlyCrashes.getChartData(this.curMetric, this.metrics[this.curMetric], this.showOnGraph);
countlyCommon.drawTimeGraph(chartData.chartDP, "#dashboard-graph");
this.pageScripts();
},
getFilters: function(currEvent) {
var self = this;
var usedFilters = {};
$(".query:visible").each(function() {
var filterType = $(this).find(".filter-name .text").data("type");
// number and date types can be used multiple times for range queries
if (filterType !== "n" && filterType !== "d") {
usedFilters[$(this).find(".filter-name .text").data("value")] = true;
}
});
var defaultFilters = countlySegmentation.getFilters(currEvent),
allFilters = "";
var filters = [];
for (var i = 0; i < defaultFilters.length; i++) {
if (defaultFilters[i].id) {
if (self.convertFilter[defaultFilters[i].id]) {
filters.push(defaultFilters[i]);
}
}
}
var add = {
"is_new": jQuery.i18n.map["crashes.new-crashes"],
"is_resolved": jQuery.i18n.map["crashes.resolved"],
"is_hidden": jQuery.i18n.map["crashes.hidden"],
"is_renewed": jQuery.i18n.map["crashes.renew-crashes"],
"reports": jQuery.i18n.map["crashes.reports"],
"users": jQuery.i18n.map["crashes.affected-users"],
"ram_min": jQuery.i18n.map["crashes.ram"] + " " + jQuery.i18n.map["crashes.min"].toLowerCase(),
"ram_max": jQuery.i18n.map["crashes.ram"] + " " + jQuery.i18n.map["crashes.max"].toLowerCase(),
"bat_min": jQuery.i18n.map["crashes.battery"] + " " + jQuery.i18n.map["crashes.min"].toLowerCase(),
"bat_max": jQuery.i18n.map["crashes.battery"] + " " + jQuery.i18n.map["crashes.max"].toLowerCase(),
"disk_min": jQuery.i18n.map["crashes.disk"] + " " + jQuery.i18n.map["crashes.min"].toLowerCase(),
"disk_max": jQuery.i18n.map["crashes.disk"] + " " + jQuery.i18n.map["crashes.max"].toLowerCase(),
"run_min": jQuery.i18n.map["crashes.run"] + " " + jQuery.i18n.map["crashes.min"].toLowerCase(),
"run_max": jQuery.i18n.map["crashes.run"] + " " + jQuery.i18n.map["crashes.max"].toLowerCase()
};
for (var addKey in add) {
filters.push({id: addKey, name: add[addKey], type: (addKey.indexOf("is_") === 0) ? "l" : "n"});
}
if (filters.length === 0) {
CountlyHelpers.alert(jQuery.i18n.map["drill.no-filters"], "black");
}
for (i = 0; i < filters.length; i++) {
if (typeof filters[i].id !== "undefined") {
if (usedFilters[filters[i].id] === true) {
continue;
}
var tmpItem = $("<div>");
tmpItem.addClass("item");
tmpItem.attr("data-type", filters[i].type);
tmpItem.attr("data-value", filters[i].id);
tmpItem.text(filters[i].name);
allFilters += tmpItem.prop('outerHTML');
}
else {
var tmpItemWithFilterName = $("<div>");
tmpItemWithFilterName.addClass("group");
tmpItemWithFilterName.text(filters[i].name);
allFilters += tmpItemWithFilterName.prop('outerHTML');
}
}
return allFilters;
},
setUpFilters: function(elem) {
var rootHTML = $(elem).parents(".query").find(".filter-value .select-items>div");
if (this.convertFilter[$(elem).data("value")] && this.convertFilter[$(elem).data("value")].type === "boolsegment") {
this.setUpFilterValues(rootHTML, ["yes", "no"], ["yes", "no"]);
}
else if (this.convertFilter[$(elem).data("value")] && this.convertFilter[$(elem).data("value")].type === "booltype") {
this.setUpFilterValues(rootHTML, [true, false], ["yes", "no"]);
}
else {
this.setUpFilterValues(rootHTML, countlySegmentation.getFilterValues($(elem).data("value")), countlySegmentation.getFilterNames($(elem).data("value")));
}
},
generateFilter: function(filterObj, filterObjTypes) {
var self = this;
var dbFilter = {};
for (var prop in filterObj) {
var filter = (self.convertFilter[prop]) ? self.convertFilter[prop].prop : prop.replace("sg.", "");
for (var i = 0; i < filterObj[prop].length; i++) {
if (_.isObject(filterObj[prop][i])) {
dbFilter[filter] = {};
for (var tmpFilter in filterObj[prop][i]) {
dbFilter[filter][tmpFilter] = filterObj[prop][i][tmpFilter];
}
}
else if (filterObjTypes[prop][i] === "!=") {
if (!self.convertFilter[prop] || self.convertFilter[prop].type === "segment" || self.convertFilter[prop].type === "boolsegment") {
if (filter === "os_version") {
filterObj[prop][i] = countlyDeviceDetails.getCleanVersion(filterObj[prop][i]);
}
dbFilter[filter + "." + filterObj[prop][i]] = {$exists: false};
}
else if (self.convertFilter[prop].type === "booltype") {
if (filterObj[prop][i] === "true") {
dbFilter[filter] = {$ne: true};
}
else {
dbFilter[filter] = {$eq: true};
}
}
else {
dbFilter[filter] = {};
if (!dbFilter[filter].$nin) {
dbFilter[filter].$nin = [];
}
dbFilter[filter].$nin.push(filterObj[prop][i]);
}
}
else {
if (!self.convertFilter[prop] || self.convertFilter[prop].type === "segment" || self.convertFilter[prop].type === "boolsegment") {
if (filter === "os_version") {
filterObj[prop][i] = countlyDeviceDetails.getCleanVersion(filterObj[prop][i]);
}
dbFilter[filter + "." + filterObj[prop][i]] = {$exists: true};
}
else if (self.convertFilter[prop].type === "booltype") {
if (filterObj[prop][i] === "true") {
dbFilter[filter] = {$eq: true};
}
else {
dbFilter[filter] = {$ne: true};
}
}
else {
dbFilter[filter] = {};
if (!dbFilter[filter].$in) {
dbFilter[filter].$in = [];
}
dbFilter[filter].$in.push(filterObj[prop][i]);
}
}
}
}
return dbFilter;
},
loadAndRefresh: function() {
var filter = {};
for (var i in this.filterObj) {
filter[i.replace("up.", "")] = this.filterObj[i];
}
this._query = filter;
app.navigate("/crashes/filter/" + JSON.stringify(filter), false);
this.dtable.fnPageChange(0);
this.refresh(true);
}
});
window.CrashgroupView = countlyView.extend({
initialize: function() {
this.loaded = true;
},
beforeRender: function() {
this.old = false;
countlyCrashes.reset();
if (this.template) {
return $.when(countlyCrashes.initialize(this.id)).then(function() {});
}
else {
var self = this;
return $.when($.get(countlyGlobal.path + '/crashes/templates/crashgroup.html', function(src) {
self.template = Handlebars.compile(src);
}), countlyCrashes.initialize(this.id)).then(function() {});
}
},
renderCommon: function(isRefresh) {
var url = location.protocol + '//' + location.hostname + (location.port ? ':' + location.port : '') + countlyGlobal.path + "/crash/";
var crashData = countlyCrashes.getGroupData();
if (crashData.url) {
url += crashData.url;
}
crashData.latest_version = crashData.latest_version.replace(/:/g, '.');
if (this.old) {
crashData.reserved_error = crashData.reserved_error || crashData.error;
crashData.reserved_threads = crashData.reserved_threads || crashData.threads;
crashData.error = crashData.olderror || crashData.error;
crashData.threads = crashData.oldthreads || crashData.threads;
}
else {
crashData.error = crashData.reserved_error || crashData.error;
crashData.threads = crashData.reserved_threads || crashData.threads;
}
this.comments = {};
if (typeof marked !== "undefined") {
marked.setOptions({
breaks: true
});
}
if (crashData.comments) {
for (var i = 0; i < crashData.comments.length; i++) {
this.comments[crashData.comments[i]._id] = crashData.comments[i].text;
if (typeof marked !== "undefined") {
crashData.comments[i].html = marked(crashData.comments[i].text);
}
else {
crashData.comments[i].html = crashData.comments[i].text;
}
}
}
if (!isRefresh) {
this.metrics = countlyCrashes.getMetrics();
for (var k in this.metrics) {
for (var j in this.metrics[k]) {
this.curMetric = j;
this.curTitle = this.metrics[k][j];
break;
}
break;
}
}
var ranges = ["ram", "disk", "bat", "run"];
for (var r = 0; r < ranges.length; r++) {
if (!crashData[ranges[r]]) {
crashData[ranges[r]] = {min: 0, max: 0, total: 0, count: 1};
}
}
this.templateData = {
"page-title": jQuery.i18n.map["crashes.crashes-by"],
"note-placeholder": jQuery.i18n.map["crashes.editnote"],
"hasPermission": (countlyGlobal.member.global_admin || countlyGlobal.admin_apps[countlyCommon.ACTIVE_APP_ID]) ? true : false,
"url": url,
"data": crashData,
"error": crashData.name.substr(0, 80),
"fatal": (crashData.nonfatal) ? jQuery.i18n.map["crashes.nonfatal"] : jQuery.i18n.map["crashes.fatal"],
"active-segmentation": this.curTitle,
"segmentations": this.metrics,
"big-numbers": {
"class": "four-column",
"items": [
{
"title": jQuery.i18n.map["crashes.platform"],
"total": (crashData.not_os_specific) ? jQuery.i18n.map["crashes.varies"] : crashData.os,
"help": "crashes.help-platform"
},
{
"title": jQuery.i18n.map["crashes.reports"],
"total": crashData.reports,
"help": "crashes.help-reports"
},
{
"title": jQuery.i18n.map["crashes.affected-users"],
"total": crashData.users + " (" + ((crashData.users / crashData.total) * 100).toFixed(2) + "%)",
"help": "crashes.help-affected"
},
{
"title": jQuery.i18n.map["crashes.highest-version"],
"total": crashData.latest_version.replace(/:/g, '.'),
"help": "crashes.help-app-version"
}
]
}
};
if (countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].type !== "web") {
this.templateData.ranges = [
{
"title": jQuery.i18n.map["crashes.ram"],
"icon": "memory",
"help": "crashes.help-ram",
"min": crashData.ram.min + " %",
"max": crashData.ram.max + " %",
"avg": (crashData.ram.total / crashData.ram.count).toFixed(2) + " %"
},
{
"title": jQuery.i18n.map["crashes.disk"],
"icon": "sd_storage",
"help": "crashes.help-disk",
"min": crashData.disk.min + " %",
"max": crashData.disk.max + " %",
"avg": (crashData.disk.total / crashData.disk.count).toFixed(2) + " %"
},
{
"title": jQuery.i18n.map["crashes.battery"],
"icon": "battery_full",
"help": "crashes.help-battery",
"min": crashData.bat.min + " %",
"max": crashData.bat.max + " %",
"avg": (crashData.bat.total / crashData.bat.count).toFixed(2) + " %"
},
{
"title": jQuery.i18n.map["crashes.run"],
"icon": "play_arrow",
"help": "crashes.help-run",
"min": countlyCommon.timeString(crashData.run.min / 60),
"max": countlyCommon.timeString(crashData.run.max / 60),
"avg": countlyCommon.timeString((crashData.run.total / crashData.run.count) / 60)
}
];
this.templateData.bars = [
{
"title": jQuery.i18n.map["crashes.root"],
"data": countlyCrashes.getBoolBars("root"),
"help": "crashes.help-root"
},
{
"title": jQuery.i18n.map["crashes.online"],
"data": countlyCrashes.getBoolBars("online"),
"help": "crashes.help-online"
},
{
"title": jQuery.i18n.map["crashes.muted"],
"data": countlyCrashes.getBoolBars("muted"),
"help": "crashes.help-muted"
},
{
"title": jQuery.i18n.map["crashes.background"],
"data": countlyCrashes.getBoolBars("background"),
"help": "crashes.help-background"
}
];
}
if (crashData.loss) {
this.templateData.loss = true;
this.templateData["big-numbers"].items.push({
"title": jQuery.i18n.map["crashes.loss"],
"total": parseFloat(crashData.loss).toFixed(2),
"help": "crashes.help-loss"
});
}
if (this.templateData["big-numbers"].items.length === 3) {
this.templateData["big-numbers"].class = "three-column";
}
else if (this.templateData["big-numbers"].items.length === 5) {
this.templateData["big-numbers"].class = "five-column";
}
if (crashData.session && this.templateData.ranges) {
this.templateData.frequency = true;
this.templateData.ranges.push({
"title": jQuery.i18n.map["crashes.sessions"],
"icon": "repeat",
"help": "crashes.help-frequency",
"min": crashData.session.min,
"max": crashData.session.max,
"avg": ((Math.round(crashData.session.total / crashData.session.count) * 100) / 100)
});
}
var changeResolveStateText = function() {
if (crashData.is_resolving) {
$("#resolve-state").text(jQuery.i18n.map["crashes.resolving"]);
$("#resolve-state").attr('class', 'resolving-text');
$("#crash-resolving-button").hide();
$("#crash-resolve-button").show();
$("#crash-unresolve-button").show();
}
else if (crashData.is_resolved) {
$("#resolve-state").text(jQuery.i18n.map["crashes.resolved"] + "(" + crashData.resolved_version + ")");
$("#resolve-state").attr('class', 'resolved-text');
$("#crash-resolving-button").show();
$("#crash-resolve-button").hide();
$("#crash-unresolve-button").show();
}
else {
$("#resolve-state").text(jQuery.i18n.map["crashes.unresolved"]);
$("#resolve-state").attr('class', 'unresolved-text');
$("#crash-resolving-button").show();
$("#crash-resolve-button").show();
$("#crash-unresolve-button").hide();
}
if (crashData.is_hidden) {
$("#crash-hide-button").hide();
$("#crash-show-button").show();
}
else {
$("#crash-hide-button").show();
$("#crash-show-button").hide();
}
app.localize();
};
var self = this;
if (!isRefresh) {
$(this.el).html(this.template(this.templateData));
changeResolveStateText(crashData);
$('#crash-notes').click(function() {
$('#tabs').addClass('hide-message');
});
$('#crash-errors').click(function() {
$('#tabs').removeClass('hide-message');
});
if (typeof addDrill !== "undefined") {
$("#content .widget:first-child .widget-header>.right").append(addDrill("sg.crash", this.id, "[CLY]_crash"));
}
$(".back-link").click(function(e) {
e.preventDefault();
app.back("/crashes");
return false;
});
if (crashData.comments) {
var count = 0;
for (var n = 0; n < crashData.comments.length; n++) {
if (!crashData.comments[n].is_owner && typeof store.get("countly_" + this.id + "_" + crashData.comments[n]._id) === "undefined") {
count++;
}
}
if (count > 0) {
$(".crash-comment-count span").text(count + "");
$(".crash-comment-count").show();
}
}
$(".segmentation-option").on("click", function() {
self.switchMetric($(this).data("value"));
});
this.dtable = $('.d-table').dataTable($.extend({}, $.fn.dataTable.defaults, {
"aaSorting": [[1, 'desc']],
"aaData": crashData.data || [],
"fnRowCallback": function(nRow, aData) {
$(nRow).attr("id", aData._id);
},
"aoColumns": [
CountlyHelpers.expandRowIconColumn(),
{
"mData": function(row, type) {
if (type === "display") {
return countlyCommon.formatTimeAgo(row.ts);
}
else {
return row.ts;
}
},
"sType": "format-ago",
"sTitle": jQuery.i18n.map["crashes.crashed"]
},
{
"mData": function(row) {
var str = row.os;
if (row.os_version) {
str += " " + row.os_version.replace(/:/g, '.');
} return str;
},
"sType": "string",
"sTitle": jQuery.i18n.map["crashes.os_version"]
},
{
"mData": function(row) {
var str = ""; if (row.manufacture) {
str += row.manufacture + " ";
} if (row.device) {
str += countlyDeviceList[row.device] || row.device;
} return str;
},
"sType": "string",
"sTitle": jQuery.i18n.map["crashes.device"]
},
{
"mData": function(row) {
return row.app_version.replace(/:/g, '.');
},
"sType": "string",
"sTitle": jQuery.i18n.map["crashes.app_version"]
}
]
}));
this.dtable.stickyTableHeaders();
/*$('.crash-reports tbody').on("click", "tr", function (){
var id = $(this).attr("id");
if(id)
window.location.hash = window.location.hash.toString()+"/"+id;
});*/
CountlyHelpers.expandRows(this.dtable, this.formatData, this);
countlyCommon.drawGraph(crashData.dp[this.curMetric], "#dashboard-graph", "bar");
$("#crash-share-button").click(function() {
if ($(this).hasClass("active")) {
$(this).removeClass("active");
$("#crash-share-list").hide();
}
else {
$(this).addClass("active");
$("#crash-share-list").show();
}
});
$("#share-crash-done").click(function() {
$("#crash-share-button").removeClass("active");
$("#crash-share-list").hide();
});
if (crashData.is_public) {
$('#crash-share-public').attr('checked', true);
$(".crash-share").show();
}
else {
$('#crash-share-public').attr('checked', false);
$(".crash-share").hide();
}
if (crashData.share) {
for (var c in crashData.share) {
if (crashData.share[c]) {
$('#crash-share-' + c).attr('checked', true);
}
}
}
$('.crash-share input[type=checkbox]').change(function() {
var opts = {};
$('.crash-share input[type=checkbox]').each(function() {
opts[this.id.replace("crash-share-", "")] = ($(this).is(":checked")) ? 1 : 0;
});
countlyCrashes.modifyShare(crashData._id, opts);
});
$('#crash-share-public').change(function() {
if ($(this).is(":checked")) {
countlyCrashes.share(crashData._id, function(data) {
if (data) {
app.recordEvent({
"key": "crash-share",
"count": 1,
"segmentation": {}
});
$(".crash-share").show();
}
else {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
}
});
}
else {
countlyCrashes.unshare(crashData._id, function(data) {
if (data) {
$(".crash-share").hide();
}
else {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
}
});
}
});
this.tabs = $("#tabs").tabs({
select: function() {
$(".flot-text").hide().show(0);
}
});
this.tabs.on("tabsshow", function(event, ui) {
if (ui && ui.panel) {
var id = $(ui.panel).attr("id") + "";
if (id === "notes") {
$(ui.panel).closest("#tabs").find(".error_menu").hide();
}
else {
$(ui.panel).closest("#tabs").find(".error_menu").show();
}
}
});
$("#crash-notes").click(function() {
var crashNoteData = countlyCrashes.getGroupData();
if (crashNoteData.comments) {
for (var a = 0; a < crashNoteData.comments.length; a++) {
store.set("countly_" + self.id + "_" + crashNoteData.comments[a]._id, true);
}
$(".crash-comment-count").hide();
}
});
$("#add_comment").click(function() {
var comment = {};
comment.time = new Date().getTime();
comment.text = $("#comment").val();
countlyCrashes.addComment(crashData._id, comment, function(data) {
if (data) {
self.refresh();
$("#comment").val("");
}
else {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
}
});
});
$("#notes").on("click", ".crash-comment-edit", function() {
var container = $(this).parents(".comment");
if (!container.find("#comment_edit").length) {
var comment_id = $(this).data("id");
container.find(".text").hide();
container.append($("#comment_edit").clone());
container.find("textarea").val(self.comments[comment_id]);
container.find(".cancel_comment").click(function() {
container.find("#comment_edit").remove();
container.find(".text").show();
});
container.find(".edit_comment").click(function() {
var comment = {};
comment.time = new Date().getTime();
comment.text = container.find("#edited_comment").val();
comment.comment_id = comment_id;
countlyCrashes.editComment(crashData._id, comment, function(data) {
if (data) {
self.refresh();
container.find("#comment_edit").remove();
container.find(".text").show();
}
else {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
}
});
});
}
});
$("#notes").on("click", ".crash-comment-delete", function() {
var ob = {};
ob.comment_id = $(this).data("id");
CountlyHelpers.confirm(jQuery.i18n.map["crashes.confirm-comment-delete"], "red", function(result) {
if (!result) {
return true;
}
countlyCrashes.deleteComment(crashData._id, ob, function(data) {
if (data) {
$("#comment_" + ob.comment_id).remove();
self.refresh();
}
else {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
}
});
});
});
$("#expand-crash").on("click", function() {
$(this).toggleClass("active");
$("#expandable").toggleClass("collapsed");
});
var errorHeight = $("#expandable").find("code").outerHeight();
if (errorHeight < 200) {
$("#expandable").removeClass("collapsed");
$("#expand-crash").hide();
}
else {
$("#expandable").addClass("collapsed");
$("#expand-crash").show();
}
$("#threads").on("click", ".expand-row-icon", function() {
var el = $(this);
if (el.hasClass("expand-row-icon")) {
var thread = el.closest(".thread");
var id = parseInt(thread.attr("data-id"));
if (typeof id !== "undefined") {
var code = thread.find("code");
if (code.hasClass("short_code")) {
el.text("keyboard_arrow_up");
code.html(crashData.threads[id].error);
}
else {
el.text("keyboard_arrow_down");
code.html(crashData.threads[id].short_error);
}
code.toggleClass("short_code");
}
}
});
$("#expand-thread").on("click", function() {
$(this).toggleClass("active");
$("#expandable_thread").toggleClass("collapsed");
});
$("document").ready(function() {
self.highlightStacktrace(crashData.error, function(highlighted) {
$("#error pre code").html(highlighted);
});
});
$("#crashgroup-manipulation-trigger").off("click").on("click", function() {
$("#crashgroup-manipulation-menu").toggle();
var isHidden = $("#crashgroup-manipulation-menu").is(":hidden");
$("#crashgroup-manipulation-menu").css("opacity", isHidden ? 0 : 1);
if (isHidden) {
$("#crashgroup-manipulation-trigger i").removeClass("ion-chevron-up").addClass("ion-chevron-down");
}
else {
$("#crashgroup-manipulation-trigger i").removeClass("ion-chevron-down").addClass("ion-chevron-up");
}
});
$(document).on("click", function(e) {
var $menu = $("#crashgroup-manipulation-menu");
var $trigger = $("#crashgroup-manipulation-trigger");
if (!$trigger.is(e.target) && $trigger.has(e.target).length === 0 && !$menu.is(e.target) && $menu.has(e.target).length === 0) {
$("#crashgroup-manipulation-menu").css("opacity", 0);
$("#crashgroup-manipulation-menu").hide();
}
if ($("#crashgroup-manipulation-menu").is(":hidden")) {
$("#crashgroup-manipulation-trigger i").removeClass("ion-chevron-up").addClass("ion-chevron-down");
}
else {
$("#crashgroup-manipulation-trigger i").removeClass("ion-chevron-down").addClass("ion-chevron-up");
}
});
$("#crashgroup-manipulation-menu .item.crash-manipulation-button").off("click").on("click", function(event) {
switch ($(event.target).attr("id")) {
case "crash-resolve-button":
countlyCrashes.markResolve(crashData._id, function(version) {
if (version) {
crashData.is_resolved = true;
crashData.is_resolving = false;
crashData.resolved_version = version;
changeResolveStateText(crashData);
}
else {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
}
});
break;
case "crash-resolving-button":
countlyCrashes.resolving([crashData._id], function(data) {
if (!data) {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
}
else {
crashData.is_resolving = true;
changeResolveStateText(crashData);
}
});
break;
case "crash-unresolve-button":
countlyCrashes.markUnresolve(crashData._id, function(data) {
if (data) {
crashData.is_resolved = false;
crashData.is_resolving = false;
changeResolveStateText(crashData);
}
else {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
}
});
break;
case "crash-hide-button":
countlyCrashes.hide(crashData._id, function(data) {
if (data) {
crashData.is_hidden = true;
changeResolveStateText(crashData);
}
else {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
}
});
break;
case "crash-show-button":
countlyCrashes.show(crashData._id, function(data) {
if (data) {
crashData.is_hidden = false;
changeResolveStateText(crashData);
}
else {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
}
});
break;
case "crash-delete-button":
CountlyHelpers.confirm(jQuery.i18n.map["crashes.confirm-delete"], "red", function(result) {
if (!result) {
return true;
}
countlyCrashes.del(crashData._id, function(data) {
if (data) {
if (data.result === "Success") {
window.location.hash = "/crashes";
}
else {
CountlyHelpers.alert(data.result, "red");
}
}
else {
CountlyHelpers.alert(jQuery.i18n.map["crashes.try-later"], "red");
}
});
});
break;
}
});
$(".routename-crashgroup").off("click", ".cly-button-menu-trigger").on("click", ".cly-button-menu-trigger", function(event) {
var menu = $(this).closest(".error-details-menu");
event.stopPropagation();
$(event.target).toggleClass("active");
if ($(event.target).hasClass("active")) {
menu.find('.cly-button-menu').focus();
}
else {
$(event.target).removeClass("active");
}
});
$(".routename-crashgroup").off("blur", ".cly-button-menu").on("blur", ".cly-button-menu", function() {
$(this).closest(".error-details-menu").find(".cly-button-menu-trigger").removeClass("active");
});
$(".routename-crashgroup").on("click", ".error-download-stracktrace", function() {
var menu = $(this).closest(".error-details-menu");
menu.find(".cly-button-menu-trigger").toggleClass("active");
var id = menu.attr("data-id");
if (id) {
var win = window.open(countlyCommon.API_PARTS.data.r + "/crashes/download_stacktrace?auth_token=" + countlyGlobal.auth_token + "&app_id=" + countlyCommon.ACTIVE_APP_ID + "&crash_id=" + id, '_blank');
win.focus();
}
});
$(".routename-crashgroup").on("click", ".error-download-binary", function() {
var menu = $(this).closest(".error-details-menu");
menu.find(".cly-button-menu-trigger").toggleClass("active");
var id = menu.attr("data-id");
if (id) {
var win = window.open(countlyCommon.API_PARTS.data.r + "/crashes/download_binary?auth_token=" + countlyGlobal.auth_token + "&app_id=" + countlyCommon.ACTIVE_APP_ID + "&crash_id=" + id, '_blank');
win.focus();
}
});
if (crashData.native_cpp) {
$(".error-download-binary").show();
}
}
},
highlightStacktrace: function(code, callback) {
// create virtual element for clean escapes
var span = document.createElement('span');
span.innerHTML = code;
code = span.innerText;
var lines = '';
// generate lines
var num = code.split(/\r\n|\n|\r/).length;
for (var i = 0; i < num; i++) {
lines += '<span>' + (i + 1) + '</span>';
}
if (typeof Worker !== "undefined") {
var worker = new Worker(countlyGlobal.path + '/javascripts/utils/highlight/highlight.worker.js');
worker.onmessage = function(event) {
worker.terminate();
worker = undefined;
callback('<span class="line-number">' + lines + '</span>' + event.data + '<span class="cl"></span>');
};
worker.postMessage(code);
}
else if (typeof hljs !== "undefined") {
callback('<span class="line-number">' + lines + '</span>' + hljs.highlightBlock(code) + '<span class="cl"></span>');
}
},
refresh: function(force) {
var self = this;
if (this.loaded || force) {
this.loaded = false;
$.when(countlyCrashes.initialize(this.id, true)).then(function() {
self.loaded = true;
if (app.activeView !== self) {
return false;
}
self.resetData();
});
}
},
resetData: function() {
var self = this;
self.renderCommon(true);
var newPage = $("<div>" + self.template(self.templateData) + "</div>");
$("#big-numbers-container").replaceWith(newPage.find("#big-numbers-container"));
$(".grouped-numbers").replaceWith(newPage.find(".grouped-numbers"));
$(".crash-bars").replaceWith(newPage.find(".crash-bars"));
$("#error-title").replaceWith(newPage.find("#error-title"));
var crashData = countlyCrashes.getGroupData();
if (self.old) {
crashData.reserved_error = crashData.reserved_error || crashData.error;
crashData.reserved_threads = crashData.reserved_threads || crashData.threads;
crashData.error = crashData.olderror || crashData.error;
crashData.threads = crashData.oldthreads || crashData.threads;
}
else {
crashData.error = crashData.reserved_error || crashData.error;
crashData.threads = crashData.reserved_threads || crashData.threads;
}
self.highlightStacktrace(crashData.error, function(highlighted) {
$("#error pre code").html(highlighted);
var errorHeight = $("#expandable").find("code").outerHeight();
//self.redecorateStacktrace();
if (errorHeight < 200) {
$("#expandable").removeClass("collapsed");
$("#expand-crash").hide();
}
else {
if ($('#expand-crash:visible').length === 0) {
$("#expandable").addClass("collapsed");
$("#expand-crash").show();
}
}
});
if (crashData.threads) {
var opened_threads = [];
$(".threads-list code").each(function() {
var code = $(this);
if (!code.hasClass("short_code")) {
var id = parseInt(code.closest(".thread").attr("data-id"));
if (id) {
opened_threads.push(id);
}
}
});
$(".threads-list").replaceWith(newPage.find(".threads-list"));
var thread;
for (var j = 0; j < opened_threads.length; j++) {
thread = $('.thread[data-id="' + opened_threads[j] + '"]');
thread.find("code").removeClass("short_code").html(crashData.threads[opened_threads[j]].error);
thread.find(".expand-row-icon").text("keyboard_arrow_up");
}
}
if (crashData.comments) {
var container = $("#comments");
var comment, parent;
var count = 0;
for (var i = 0; i < crashData.comments.length; i++) {
self.comments[crashData.comments[i]._id] = crashData.comments[i].text;
comment = crashData.comments[i];
if (container.find("#comment_" + comment._id).length) {
parent = container.find("#comment_" + comment._id);
parent.find(".text").html(newPage.find("#comment_" + comment._id + " .text").html());
parent.find(".author").html(newPage.find("#comment_" + comment._id + " .author").html());
parent.find(".time").html(newPage.find("#comment_" + comment._id + " .time").html());
}
else {
container.append(newPage.find("#comment_" + comment._id));
}
if (!crashData.comments[i].is_owner && typeof store.get("countly_" + self.id + "_" + comment._id) === "undefined") {
count++;
}
}
if (count > 0) {
$(".crash-comment-count span").text(count + "");
$(".crash-comment-count").show();
}
}
var ids = self.dtable.find(".cly-button-menu-trigger.active").map(function() {
return $(this).closest(".error-details-menu").attr("data-id");
});
CountlyHelpers.refreshTable(self.dtable, crashData.data);
countlyCommon.drawGraph(crashData.dp[self.curMetric], "#dashboard-graph", "bar");
CountlyHelpers.reopenRows(self.dtable, self.formatData, self);
for (var k = 0; k < ids.length; k++) {
$('.error-details-menu[data-id="' + ids[k] + '"]').find(".cly-button-menu-trigger").addClass("active");
}
app.localize();
},
formatData: function(data, self) {
// `d` is the original data object for the row
var str = '';
if (data) {
str += '<div class="datatablesubrow">' +
'<div class="error_menu">' +
'<div class="error-details-menu" data-id="' + data._id + '">' +
'<a class="right icon-button cly-button-menu-trigger"></a>' +
'<div class="cly-button-menu" tabindex="100">' +
'<div class="error-download-stracktrace item">' + jQuery.i18n.map["crashes.download-stacktrace"] + '</div>';
if (data.native_cpp) {
str += '<div class="error-download-binary item">' + jQuery.i18n.map["crashes.download-binary"] + '</div>';
}
str += '</div>' +
'</div>' +
'</div>' +
'<table>' +
'<tr>' +
'<td class="text-left">' + jQuery.i18n.map["crashes.build_info"] + '</td>' +
'<td class="text-left">' + jQuery.i18n.map["crashes.device"] + '</td>' +
'<td class="text-left">' + jQuery.i18n.map["crashes.state"] + '</td>';
if (data.custom) {
str += '<td class="text-left">' + jQuery.i18n.map["crashes.custom"] + '</td>';
}
str += '</tr>' +
'<tr>' +
'<td class="text-left">' + jQuery.i18n.map["crashes.app_version"] + ": " + data.app_version.replace(/:/g, '.');
if (data.os === 'iOS') {
str += '<br>' + jQuery.i18n.map["crashes.build_id"] + ": " + data.app_build;
}
str += '</td>' +
'<td class="text-left">' + data.os + ' ';
if (data.os_version) {
str += data.os_version.replace(/:/g, '.') + '<br/>';
}
if (data.manufacture) {
str += data.manufacture;
}+' ';
if (data.device) {
str += countlyDeviceList[data.device] || data.device;
}
if (data.cpu) {
str += ' (' + data.cpu + ')';
}
str += '<br/>';
if (data.opengl) {
str += jQuery.i18n.map["crashes.opengl"] + ': ' + data.opengl + '<br/>';
}
if (data.resolution) {
str += jQuery.i18n.map["crashes.resolution"] + ': ' + data.resolution + '<br/>';
}
str += jQuery.i18n.map["crashes.root"] + ': ' + ((data.root) ? "yes" : "no") + '<br/>';
str += '</td>' +
'<td class="text-left">';
if (data.ram_current && data.ram_total) {
str += jQuery.i18n.map["crashes.ram"] + ': ' + data.ram_current + '/' + data.ram_total + ' Mb<br/>';
}
if (data.disk_current && data.disk_total) {
str += jQuery.i18n.map["crashes.disk"] + ': ' + data.disk_current + '/' + data.disk_total + ' Mb<br/>';
}
if (data.bat_current) {
str += jQuery.i18n.map["crashes.battery"] + ': ' + data.bat_current + '%<br/>';
}
if (data.run) {
str += jQuery.i18n.map["crashes.run"] + ': ' + countlyCommon.timeString(data.run / 60) + '<br/>';
}
if (data.session) {
str += jQuery.i18n.map["crashes.after"] + ' ' + data.session + ' ' + jQuery.i18n.map["crashes.sessions"] + '<br/>';
}
else {
str += jQuery.i18n.map["crashes.frequency"] + ': ' + jQuery.i18n.map["crashes.first-crash"] + '<br/>';
}
str += jQuery.i18n.map["crashes.online"] + ": " + ((data.online) ? "yes" : "no") + "<br/>";
str += jQuery.i18n.map["crashes.background"] + ": " + ((data.background) ? "yes" : "no") + "<br/>";
str += jQuery.i18n.map["crashes.muted"] + ": " + ((data.muted) ? "yes" : "no") + "<br/>";
str += '</td>';
var span = 3;
if (data.custom) {
str += '<td class="text-left">';
for (var i in data.custom) {
str += i + ': ' + data.custom[i] + '<br/>';
}
str += '</td>';
span = 4;
}
str += '</tr>';
if (data.threads) {
if (self.old) {
data.reserved_threads = data.reserved_threads || data.threads;
data.threads = data.oldthreads || data.threads;
}
else {
data.threads = data.reserved_threads || data.threads;
}
str += '<tr class="header">';
str += '<td>' + jQuery.i18n.map["crashes.all-threads"] + '</td>';
str += '<td colspan="' + (span - 1) + '">';
str += jQuery.i18n.map["crashes.stacktrace"];
str += '</td>';
str += '</tr>';
for (var j = 0; j < data.threads.length; j++) {
str += '<tr class="thread" data-id="' + data.threads[j].id + '">';
str += '<td class="thread-name"><p>' + data.threads[j].name + '</p>';
if (data.threads[j].crashed) {
str += '<span data-localize="crashes.crashed" class="tag">' + jQuery.i18n.map["crashes.crashed"] + '</span>';
}
str += '</td>';
str += '<td colspan="' + (span - 1) + '">';
str += '<pre><code class="short_code">' + data.threads[j].error + '</code></pre>';
str += '</td>';
str += '</tr>';
}
}
else {
if (self.old) {
data.reserved_error = data.reserved_error || data.error;
data.error = data.olderror || data.error;
}
else {
data.error = data.reserved_error || data.error;
}
str += '<tr class="header">';
str += '<td colspan="' + span + '">';
str += jQuery.i18n.map["crashes.stacktrace"];
str += '</td>';
str += '</tr>';
str += '<tr>' +
'<td colspan="' + span + '" class="stack-trace">';
str += '<pre>' + data.error + '</pre>';
str += '</td>';
str += '</tr>';
}
if (data.logs) {
str += '<tr class="header">' +
'<td colspan="' + span + '">' + jQuery.i18n.map["crashes.logs"] + '</td>' +
'</tr>';
str += '<tr>' +
'<td colspan="' + span + '">' +
'<p>' + jQuery.i18n.map["crashes.logs"] + '</p>' +
'<pre>' + data.logs + '</pre></td>' +
'</tr>';
}
str += '</table>' +
'</div>';
}
return str;
},
switchMetric: function(metric) {
this.curMetric = metric;
var crashData = countlyCrashes.getGroupData();
countlyCommon.drawGraph(crashData.dp[this.curMetric], "#dashboard-graph", "bar");
}
});
//register views
app.crashesView = new CrashesView();
app.crashgroupView = new CrashgroupView();
app.route('/crashes', 'crashes', function() {
this.crashesView._filter = false;
this.crashesView._query = null;
this.renderWhenReady(this.crashesView);
});
app.route('/crashes/filter/*query', 'userdata', function(query) {
try {
query = JSON.parse(query);
}
catch (ex) {
query = null;
}
this.crashesView._query = query;
this.crashesView._filter = true;
this.renderWhenReady(this.crashesView);
});
app.route('/crashes/:group', 'crashgroup', function(group) {
this.crashgroupView.id = group;
this.renderWhenReady(this.crashgroupView);
});
app.addPageScript("/drill#", function() {
var drillClone;
var self = app.drillView;
var record_crashes = countlyGlobal.record_crashes;
if (countlyGlobal.apps && countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID] && countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].plugins && countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].plugins.drill && typeof countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].plugins.drill.record_crashes !== "undefined") {
record_crashes = countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].plugins.drill.record_crashes;
}
if (record_crashes) {
$("#drill-types").append('<div id="drill-type-crashes" class="item"><div class="inner"><span class="icon crashes"><i class="material-icons">warning</i></span><span class="text">' + jQuery.i18n.map["crashes.title"] + '</span></div></div>');
$("#drill-type-crashes").on("click", function() {
if ($(this).hasClass("active")) {
return true;
}
$("#drill-types").find(".item").removeClass("active");
$(this).addClass("active");
$("#event-selector").hide();
$("#drill-no-event").fadeOut();
$("#segmentation-start").fadeOut().remove();
var currEvent = "[CLY]_crash";
self.graphType = "line";
self.graphVal = "times";
self.filterObj = {};
self.byVal = "";
self.drillChartDP = {};
self.drillChartData = {};
self.activeSegmentForTable = "";
countlySegmentation.reset();
$("#drill-navigation").find(".menu[data-open=table-view]").hide();
$.when(countlySegmentation.initialize(currEvent)).then(function() {
$("#drill").replaceWith(drillClone.clone(true));
self.adjustFilters();
self.draw(true, false);
});
});
setTimeout(function() {
drillClone = $("#drill").clone(true);
}, 0);
}
});
app.addPageScript("/users/#", function() {
if (app.activeView && app.activeView.tabs) {
app.activeView.tabs.tabs('add', '#usertab-crashes', jQuery.i18n.map["crashes.title"]);
app.activeView.tabs.tabs("refresh");
var userDetails = countlyUserdata.getUserdetails();
$("#usertab-crashes").append("<div class='widget-header'><div class='left'><div class='title'>" + jQuery.i18n.map["userdata.crashes"] + "</div></div></div><table data-view='crashesView' id='d-table-crashes' class='d-table sortable help-zone-vb' cellpadding='0' cellspacing='0'></table>");
app.activeView.dtablecrashes = $('#d-table-crashes').dataTable($.extend({}, $.fn.dataTable.defaults, {
"iDisplayLength": 30,
"aaSorting": [[ 2, "desc" ]],
"bServerSide": true,
"bFilter": false,
"sAjaxSource": countlyCommon.API_PARTS.data.r + "?api_key=" + countlyGlobal.member.api_key + "&app_id=" + countlyCommon.ACTIVE_APP_ID + "&method=user_crashes&uid=" + userDetails.uid,
"fnServerData": function(sSource, aoData, fnCallback) {
self.request = $.ajax({
"dataType": 'json',
"type": "POST",
"url": sSource,
"data": aoData,
"success": function(data) {
fnCallback(data);
}
});
},
"aoColumns": [
{
"mData": function(row) {
return countlyCrashes.getCrashName(row.group);
},
"sType": "numeric",
"sTitle": jQuery.i18n.map["crashes.error"],
"sClass": "break web-50",
"bSortable": false,
"sWidth": "45%"
},
{
"mData": function(row) {
return row.reports;
},
"sType": "numeric",
"sTitle": jQuery.i18n.map["crashes.reports"],
"sWidth": "20%"
},
{
"mData": function(row, type) {
if (type === "display") {
return (row.last === 0) ? jQuery.i18n.map["common.unknown"] + " <a class='extable-link table-link green' href='#/crashes/" + row.group + "' target='_blank'><i class='material-icons'>open_in_new</i></a><a class='extable-link table-link green' href='#/crashes/" + row.group + "' style='float: right;' >" + jQuery.i18n.map["common.view"] + "</a>" : countlyCommon.formatTimeAgo(row.last) + " <a class='extable-link table-link green' href='#/crashes/" + row.group + "' target='_blank'><i class='material-icons'>open_in_new</i></a><a class='extable-link table-link green' href='#/crashes/" + row.group + "' style='float: right;' >" + jQuery.i18n.map["common.view"] + "</a>";
}
else {
return row.last;
}
},
"sType": "numeric",
"sTitle": jQuery.i18n.map["crashes.last_time"]
}
]
}));
}
});
$(document).ready(function() {
if (typeof extendViewWithFilter === "function") {
extendViewWithFilter(app.crashesView);
}
app.addAppSwitchCallback(function(appId) {
if (app._isFirstLoad !== true) {
countlyCrashes.loadList(appId);
}
});
if (!production) {
CountlyHelpers.loadJS("crashes/javascripts/marked.min.js");
}
app.addMenu("improve", {code: "crashes", text: "crashes.title", icon: '<div class="logo ion-alert-circled"></div>', priority: 10});
app.addSubMenu("crashes", {code: "crash", url: "#/crashes", text: "sidebar.dashboard", priority: 10});
//check if configuration view exists
if (app.configurationsView) {
app.configurationsView.registerLabel("crashes", "crashes.title");
}
}); | 1 | 13,349 | There can be multiple subrows, so it is not a good idea to use `id` there, as id should identify single unique element. Instead you could just add the id of whole table or of whole crash plugin view | Countly-countly-server | js |
@@ -284,7 +284,6 @@ class TestAnalyze(unittest.TestCase):
# We expect a failure archive to be in the failed directory.
failed_files = os.listdir(failed_dir)
self.assertEquals(len(failed_files), 1)
- self.assertIn("failure.c", failed_files[0])
fail_zip = os.path.join(failed_dir, failed_files[0])
| 1 | #
# -----------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -----------------------------------------------------------------------------
"""
Test case for the CodeChecker analyze command's direct functionality.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import json
import os
import re
import shutil
import subprocess
import unittest
import zipfile
from libtest import env
class TestAnalyze(unittest.TestCase):
_ccClient = None
def setUp(self):
# TEST_WORKSPACE is automatically set by test package __init__.py .
self.test_workspace = os.environ['TEST_WORKSPACE']
test_class = self.__class__.__name__
print('Running ' + test_class + ' tests in ' + self.test_workspace)
# Get the CodeChecker cmd if needed for the tests.
self._codechecker_cmd = env.codechecker_cmd()
self.report_dir = os.path.join(self.test_workspace, "reports")
self.test_dir = os.path.join(os.path.dirname(__file__), 'test_files')
# Change working dir to testfile dir so CodeChecker can be run easily.
self.__old_pwd = os.getcwd()
os.chdir(self.test_dir)
self.missing_checker_regex = re.compile(
r"No checker\(s\) with these names was found")
def tearDown(self):
"""Restore environment after tests have ran."""
os.chdir(self.__old_pwd)
if os.path.isdir(self.report_dir):
shutil.rmtree(self.report_dir)
def __analyze_incremental(self, content_, build_json, reports_dir,
plist_count, failed_count):
"""
Helper function to test analyze incremental mode. It's create a file
with the given content. Run analyze on that file and checks the count
of the plist end error files.
"""
source_file = os.path.join(self.test_workspace, "simple.cpp")
# Write content to the test file
with open(source_file, 'w') as source:
source.write(content_)
# Create analyze command.
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", reports_dir]
# Run analyze
process = subprocess.Popen(
analyze_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.test_dir)
out, err = process.communicate()
print(out)
print(err)
errcode = process.returncode
self.assertEquals(errcode, 0)
# Check the count of the plist files.
plist_files = [os.path.join(reports_dir, filename)
for filename in os.listdir(reports_dir)
if filename.endswith('.plist')]
self.assertEquals(len(plist_files), plist_count)
# Check the count of the error files.
failed_dir = os.path.join(reports_dir, "failed")
failed_file_count = 0
if os.path.exists(failed_dir):
failed_files = [os.path.join(failed_dir, filename)
for filename in os.listdir(failed_dir)
if filename.endswith('.zip')]
failed_file_count = len(failed_files)
for f in failed_files:
os.remove(f)
self.assertEquals(failed_file_count, failed_count)
def test_compiler_info_files(self):
'''
Test that the compiler info files are generated
'''
# GIVEN
build_json = os.path.join(self.test_workspace, "build_simple.json")
reports_dir = self.report_dir
source_file = os.path.join(self.test_workspace, "simple.cpp")
# Create a compilation database.
build_log = [{"directory": self.test_workspace,
"command": "g++ -c " + source_file,
"file": source_file
},
{"directory": self.test_workspace,
"command": "clang++ -c " + source_file,
"file": source_file
}
]
with open(build_json, 'w') as outfile:
json.dump(build_log, outfile)
# Test file contents
simple_file_content = "int main() { return 0; }"
# Write content to the test file
with open(source_file, 'w') as source:
source.write(simple_file_content)
# Create analyze command.
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", reports_dir]
# WHEN
# Run analyze.
process = subprocess.Popen(
analyze_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.test_dir)
process.communicate()
# THEN
errcode = process.returncode
self.assertEquals(errcode, 0)
info_File = os.path.join(reports_dir, 'compiler_info.json')
self.assertEquals(os.path.exists(info_File), True)
self.assertNotEqual(os.stat(info_File).st_size, 0)
# Test the validity of the json files.
with open(info_File, 'r') as f:
try:
data = json.load(f)
self.assertEquals(len(data), 2)
self.assertTrue("clang++" in data)
self.assertTrue("g++" in data)
except ValueError:
self.fail("json.load should successfully parse the file %s"
% info_File)
def test_compiler_info_file_is_loaded(self):
'''
Test that compiler info file is loaded if option is set.
'''
reports_dir = self.report_dir
build_json = os.path.join(self.test_workspace, "build_simple.json")
source_file = os.path.join(self.test_workspace, "simple.cpp")
compiler_info_file = os.path.join(self.test_workspace,
"compiler_info.json")
# Create a compilation database.
build_log = [{"directory": self.test_workspace,
"command": "clang++ -c " + source_file,
"file": source_file}]
with open(build_json, 'w') as outfile:
json.dump(build_log, outfile)
# Test file contents
simple_file_content = "int main() { return 0; }"
# Write content to the test file
with open(source_file, 'w') as source:
source.write(simple_file_content)
with open(compiler_info_file, 'w') as source:
source.write('''{
"clang++": {
"c++": {
"compiler_standard": "-std=FAKE_STD",
"target": "FAKE_TARGET",
"compiler_includes": [
"-isystem /FAKE_INCLUDE_DIR"
]
}
}
}''')
# Create analyze command.
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--compiler-info-file", compiler_info_file,
"--analyzers", "clangsa", "--verbose", "debug",
"-o", reports_dir]
# Run analyze.
process = subprocess.Popen(
analyze_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.test_dir)
out, _ = process.communicate()
print(out)
self.assertTrue("-std=FAKE_STD" in out)
self.assertTrue("--target=FAKE_TARGET" in out)
self.assertTrue("-isystem /FAKE_INCLUDE_DIR" in out)
def test_capture_analysis_output(self):
"""
Test if reports/success/<output_file>.[stdout,stderr].txt
files are created
"""
build_json = os.path.join(self.test_workspace, "build_success.json")
success_dir = os.path.join(self.report_dir, "success")
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", self.report_dir,
"--capture-analysis-output"]
source_file = os.path.join(self.test_dir, "success.c")
build_log = [{"directory": self.test_workspace,
"command": "gcc -c " + source_file,
"file": source_file
}]
with open(build_json, 'w') as outfile:
json.dump(build_log, outfile)
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.test_dir)
out, err = process.communicate()
print(out)
print(err)
errcode = process.returncode
self.assertEquals(errcode, 0)
# We expect the sucess stderr file in the success directory.
success_files = os.listdir(success_dir)
print(success_files)
self.assertEquals(len(success_files), 1)
self.assertIn("success.c", success_files[0])
os.remove(os.path.join(success_dir, success_files[0]))
def test_failure(self):
"""
Test if reports/failed/<failed_file>.zip file is created
"""
build_json = os.path.join(self.test_workspace, "build.json")
failed_dir = os.path.join(self.report_dir, "failed")
source_file = os.path.join(self.test_dir, "failure.c")
# Create a compilation database.
build_log = [{"directory": self.test_workspace,
"command": "gcc -c " + source_file,
"file": source_file
}]
with open(build_json, 'w') as outfile:
json.dump(build_log, outfile)
# Create and run analyze command.
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "--verbose", "debug",
"-o", self.report_dir]
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.test_dir)
out, err = process.communicate()
print(out)
print(err)
errcode = process.returncode
self.assertEquals(errcode, 0)
# We expect a failure archive to be in the failed directory.
failed_files = os.listdir(failed_dir)
self.assertEquals(len(failed_files), 1)
self.assertIn("failure.c", failed_files[0])
fail_zip = os.path.join(failed_dir, failed_files[0])
with zipfile.ZipFile(fail_zip, 'r') as archive:
files = archive.namelist()
self.assertIn("build-action", files)
self.assertIn("analyzer-command", files)
with archive.open("build-action", 'r') as archived_buildcmd:
self.assertEqual(archived_buildcmd.read(),
"gcc -c " + source_file)
source_in_archive = os.path.join("sources-root",
source_file.lstrip('/'))
self.assertIn(source_in_archive, files)
with archive.open(source_in_archive, 'r') as archived_code:
with open(source_file, 'r') as source_code:
self.assertEqual(archived_code.read(), source_code.read())
os.remove(os.path.join(failed_dir, failed_files[0]))
def test_robustness_for_dependencygen_failure(self):
"""
Test if failure ZIP is created even if the dependency generator creates
an invalid output.
"""
build_json = os.path.join(self.test_workspace, "build.json")
failed_dir = os.path.join(self.report_dir, "failed")
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "--verbose", "debug",
"-o", self.report_dir]
source_file = os.path.join(self.test_dir, "failure.c")
build_log = [{"directory": self.test_workspace,
"command": "cc -c -std=c++11 " + source_file,
"file": source_file
}]
# cc -std=c++11 writes error "-std=c++11 valid for C++ but not for C"
# to its output when invoked as a dependency generator for this
# build command.
with open(build_json, 'w') as outfile:
json.dump(build_log, outfile)
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.test_dir)
process.communicate()
errcode = process.returncode
self.assertEquals(errcode, 0)
# We expect a failure archive to be in the failed directory.
failed_files = os.listdir(failed_dir)
print(failed_files)
self.assertEquals(len(failed_files), 1)
self.assertIn("failure.c", failed_files[0])
os.remove(os.path.join(failed_dir, failed_files[0]))
def test_incremental_analyze(self):
"""
Test incremental mode to analysis command which overwrites only those
plist files that were update by the current build command.
"""
build_json = os.path.join(self.test_workspace, "build_simple.json")
reports_dir = os.path.join(self.test_workspace, "reports_incremental")
source_file = os.path.join(self.test_workspace, "simple.cpp")
# Create a compilation database.
build_log = [{"directory": self.test_workspace,
"command": "g++ -c " + source_file,
"file": source_file
}]
with open(build_json, 'w') as outfile:
json.dump(build_log, outfile)
# Test file contents
simple_file_content = "int main() { return 0; }"
failed_file_content = "int main() { err; return 0; }"
# Run analyze on the simple file.
self.__analyze_incremental(simple_file_content, build_json,
reports_dir, 1, 0)
# Run analyze on the failed file.
self.__analyze_incremental(failed_file_content, build_json,
reports_dir, 0, 1)
# Run analyze on the simple file again.
self.__analyze_incremental(simple_file_content, build_json,
reports_dir, 1, 0)
def test_relative_include_paths(self):
"""
Test if the build json contains relative paths.
"""
build_json = os.path.join(self.test_workspace, "build_simple_rel.json")
report_dir = os.path.join(self.test_workspace, "reports_relative")
source_file = os.path.join(self.test_dir, "simple.c")
failed_dir = os.path.join(report_dir, "failed")
# Create a compilation database.
build_log = [{"directory": self.test_dir,
"command": "cc -c " + source_file + " -Iincludes",
"file": source_file
}]
with open(build_json, 'w') as outfile:
json.dump(build_log, outfile)
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", report_dir]
# CodeChecker is executed in a different
# dir than the containing folder of simple.c.
process = subprocess.Popen(
analyze_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.test_workspace)
process.communicate()
errcode = process.returncode
self.assertEquals(errcode, 0)
self.assertFalse(os.path.isdir(failed_dir))
def unique_json_helper(self, unique_json, is_a, is_b, is_s):
with open(unique_json) as json_file:
data = json.load(json_file)
simple_a = False
simple_b = False
success = False
for d in data:
if "simple_a.o" in d["command"]:
simple_a = True
if "simple_b.o" in d["command"]:
simple_b = True
if "success.o" in d["command"]:
success = True
self.assertEqual(simple_a, is_a)
self.assertEqual(simple_b, is_b)
self.assertEqual(success, is_s)
def test_compile_uniqueing(self):
"""
Test complilation uniqueing
"""
build_json = os.path.join(self.test_workspace, "build_simple_rel.json")
report_dir = os.path.join(self.test_workspace, "reports_relative")
source_file = os.path.join(self.test_dir, "simple.c")
source_file2 = os.path.join(self.test_dir, "success.c")
failed_dir = os.path.join(report_dir, "failed")
unique_json = os.path.join(report_dir, "unique_compile_commands.json")
# Create a compilation database.
build_log = [{"directory": self.test_dir,
"command": "cc -c " + source_file +
" -Iincludes -o simple_b.o",
"file": source_file},
{"directory": self.test_dir,
"command": "cc -c " + source_file +
" -Iincludes -o simple_a.o",
"file": source_file},
{"directory": self.test_dir,
"command": "cc -c " + source_file2 +
" -Iincludes -o success.o",
"file": source_file2}]
with open(build_json, 'w') as outfile:
json.dump(build_log, outfile)
# Testing alphabetic uniqueing mode.
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", report_dir,
"--compile-uniqueing", "alpha"]
process = subprocess.Popen(
analyze_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.test_workspace)
process.communicate()
errcode = process.returncode
self.assertEquals(errcode, 0)
self.assertFalse(os.path.isdir(failed_dir))
self.unique_json_helper(unique_json, True, False, True)
# Testing regex mode.
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", report_dir,
"--compile-uniqueing", ".*_b.*"]
process = subprocess.Popen(
analyze_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.test_workspace)
process.communicate()
errcode = process.returncode
self.assertEquals(errcode, 0)
self.assertFalse(os.path.isdir(failed_dir))
self.unique_json_helper(unique_json, False, True, True)
# Testing regex mode.error handling
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", report_dir,
"--compile-uniqueing", ".*simple.*"]
process = subprocess.Popen(
analyze_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.test_workspace)
process.communicate()
errcode = process.returncode
# Since .*simple.* matches 2 files, thus we get an error
self.assertEquals(errcode, 1)
# Testing strict mode
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", report_dir,
"--compile-uniqueing", "strict", "--verbose", "debug"]
process = subprocess.Popen(
analyze_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.test_workspace)
process.communicate()
# In strict mode the analysis must fail
# if there are more than one build
# commands for a single source.
errcode = process.returncode
self.assertEquals(errcode, 1)
self.assertFalse(os.path.isdir(failed_dir))
# Testing None mode.
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", report_dir,
"--compile-uniqueing", "none"]
process = subprocess.Popen(
analyze_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.test_workspace)
process.communicate()
errcode = process.returncode
self.assertEquals(errcode, 0)
self.assertFalse(os.path.isdir(failed_dir))
self.unique_json_helper(unique_json, True, True, True)
def test_invalid_enabled_checker_name(self):
"""Warn in case of an invalid enabled checker."""
build_json = os.path.join(self.test_workspace, "build_success.json")
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", self.report_dir,
"-e", "non-existing-checker-name"]
source_file = os.path.join(self.test_dir, "success.c")
build_log = [{"directory": self.test_workspace,
"command": "gcc -c " + source_file,
"file": source_file
}]
with open(build_json, 'w') as outfile:
json.dump(build_log, outfile)
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.test_dir)
out, _ = process.communicate()
match = self.missing_checker_regex.search(out)
self.assertIsNotNone(match)
self.assertTrue("non-existing-checker-name" in out)
errcode = process.returncode
self.assertEquals(errcode, 0)
def test_invalid_disabled_checker_name(self):
"""Warn in case of an invalid disabled checker."""
build_json = os.path.join(self.test_workspace, "build_success.json")
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", self.report_dir,
"-d", "non-existing-checker-name"]
source_file = os.path.join(self.test_dir, "success.c")
build_log = [{"directory": self.test_workspace,
"command": "gcc -c " + source_file,
"file": source_file
}]
with open(build_json, 'w') as outfile:
json.dump(build_log, outfile)
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.test_dir)
out, _ = process.communicate()
match = self.missing_checker_regex.search(out)
self.assertIsNotNone(match)
self.assertTrue("non-existing-checker-name" in out)
errcode = process.returncode
self.assertEquals(errcode, 0)
def test_multiple_invalid_checker_names(self):
"""Warn in case of multiple invalid checker names."""
build_json = os.path.join(self.test_workspace, "build_success.json")
analyze_cmd = [self._codechecker_cmd, "analyze", build_json,
"--analyzers", "clangsa", "-o", self.report_dir,
"-e", "non-existing-checker-name",
"-e", "non-existing-checker",
"-d", "missing.checker",
"-d", "other.missing.checker"]
source_file = os.path.join(self.test_dir, "success.c")
build_log = [{"directory": self.test_workspace,
"command": "gcc -c " + source_file,
"file": source_file
}]
with open(build_json, 'w') as outfile:
json.dump(build_log, outfile)
print(analyze_cmd)
process = subprocess.Popen(
analyze_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.test_dir)
out, _ = process.communicate()
match = self.missing_checker_regex.search(out)
self.assertIsNotNone(match)
self.assertTrue("non-existing-checker-name" in out)
self.assertTrue("non-existing-checker" in out)
self.assertTrue("missing.checker" in out)
self.assertTrue("other.missing.checker" in out)
errcode = process.returncode
self.assertEquals(errcode, 0)
| 1 | 11,178 | Why was this assert removed? Shouldn't we check if the file is in the zip? | Ericsson-codechecker | c |
@@ -149,13 +149,17 @@ func (cb *ControllerBuilder) Build() (*Controller, error) {
return cb.Controller, nil
}
-// addSpc is the add event handler for spc.
+// addSpc is the add event handler for spc
func (c *Controller) addSpc(obj interface{}) {
spc, ok := obj.(*apis.StoragePoolClaim)
if !ok {
runtime.HandleError(fmt.Errorf("Couldn't get spc object %#v", obj))
return
}
+ if spc.Labels[string(apis.OpenEBSUpgradeKey)] == "true" {
+ glog.Infof("spc %s is not reconciled reason upgrade value: %s", spc.Name, spc.Labels[string(apis.OpenEBSUpgradeKey)])
+ return
+ }
glog.V(4).Infof("Queuing SPC %s for add event", spc.Name)
c.enqueueSpc(spc)
} | 1 | /*
Copyright 2017 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spc
import (
"fmt"
"github.com/golang/glog"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
clientset "github.com/openebs/maya/pkg/client/generated/clientset/versioned"
openebsScheme "github.com/openebs/maya/pkg/client/generated/clientset/versioned/scheme"
informers "github.com/openebs/maya/pkg/client/generated/informers/externalversions"
listers "github.com/openebs/maya/pkg/client/generated/listers/openebs.io/v1alpha1"
ndmclientset "github.com/openebs/maya/pkg/client/generated/openebs.io/ndm/v1alpha1/clientset/internalclientset"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
)
const controllerAgentName = "spc-controller"
// Controller is the controller implementation for SPC resources
type Controller struct {
// kubeclientset is a standard kubernetes clientset
kubeclientset kubernetes.Interface
// clientset is a openebs custom resource package generated for custom API group.
clientset clientset.Interface
// ndmclientset is a ndm custom resource package generated for custom API group.
ndmclientset ndmclientset.Interface
spcLister listers.StoragePoolClaimLister
// spcSynced is used for caches sync to get populated
spcSynced cache.InformerSynced
// workqueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workqueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
}
// ControllerBuilder is the builder object for controller.
type ControllerBuilder struct {
Controller *Controller
}
// NewControllerBuilder returns an empty instance of controller builder.
func NewControllerBuilder() *ControllerBuilder {
return &ControllerBuilder{
Controller: &Controller{},
}
}
// withKubeClient fills kube client to controller object.
func (cb *ControllerBuilder) withKubeClient(ks kubernetes.Interface) *ControllerBuilder {
cb.Controller.kubeclientset = ks
return cb
}
// withOpenEBSClient fills openebs client to controller object.
func (cb *ControllerBuilder) withOpenEBSClient(cs clientset.Interface) *ControllerBuilder {
cb.Controller.clientset = cs
return cb
}
// withNDMClient fills ndm client to controller object.
func (cb *ControllerBuilder) withNDMClient(ndmcs ndmclientset.Interface) *ControllerBuilder {
cb.Controller.ndmclientset = ndmcs
return cb
}
// withSpcLister fills spc lister to controller object.
func (cb *ControllerBuilder) withSpcLister(sl informers.SharedInformerFactory) *ControllerBuilder {
spcInformer := sl.Openebs().V1alpha1().StoragePoolClaims()
cb.Controller.spcLister = spcInformer.Lister()
return cb
}
// withspcSynced adds object sync information in cache to controller object.
func (cb *ControllerBuilder) withspcSynced(sl informers.SharedInformerFactory) *ControllerBuilder {
spcInformer := sl.Openebs().V1alpha1().StoragePoolClaims()
cb.Controller.spcSynced = spcInformer.Informer().HasSynced
return cb
}
// withWorkqueue adds workqueue to controller object.
func (cb *ControllerBuilder) withWorkqueueRateLimiting() *ControllerBuilder {
cb.Controller.workqueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "SPC")
return cb
}
// withRecorder adds recorder to controller object.
func (cb *ControllerBuilder) withRecorder(ks kubernetes.Interface) *ControllerBuilder {
glog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: ks.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
cb.Controller.recorder = recorder
return cb
}
// withEventHandler adds event handlers controller object.
func (cb *ControllerBuilder) withEventHandler(spcInformerFactory informers.SharedInformerFactory) *ControllerBuilder {
spcInformer := spcInformerFactory.Openebs().V1alpha1().StoragePoolClaims()
// Set up an event handler for when SPC resources change
spcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: cb.Controller.addSpc,
UpdateFunc: cb.Controller.updateSpc,
// This will enter the sync loop and no-op, because the spc has been deleted from the store.
DeleteFunc: cb.Controller.deleteSpc,
})
return cb
}
// Build returns a controller instance.
func (cb *ControllerBuilder) Build() (*Controller, error) {
err := openebsScheme.AddToScheme(scheme.Scheme)
if err != nil {
return nil, err
}
return cb.Controller, nil
}
// addSpc is the add event handler for spc.
func (c *Controller) addSpc(obj interface{}) {
spc, ok := obj.(*apis.StoragePoolClaim)
if !ok {
runtime.HandleError(fmt.Errorf("Couldn't get spc object %#v", obj))
return
}
glog.V(4).Infof("Queuing SPC %s for add event", spc.Name)
c.enqueueSpc(spc)
}
// updateSpc is the update event handler for spc.
func (c *Controller) updateSpc(oldSpc, newSpc interface{}) {
spc, ok := newSpc.(*apis.StoragePoolClaim)
if !ok {
runtime.HandleError(fmt.Errorf("Couldn't get spc object %#v", newSpc))
return
}
// Enqueue spc only when there is a pending pool to be created.
if c.isPoolPending(spc) {
c.enqueueSpc(newSpc)
}
}
// deleteSpc is the delete event handler for spc.
func (c *Controller) deleteSpc(obj interface{}) {
spc, ok := obj.(*apis.StoragePoolClaim)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
runtime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
spc, ok = tombstone.Obj.(*apis.StoragePoolClaim)
if !ok {
runtime.HandleError(fmt.Errorf("Tombstone contained object that is not a storagepoolclaim %#v", obj))
return
}
}
glog.V(4).Infof("Deleting storagepoolclaim %s", spc.Name)
c.enqueueSpc(spc)
}
| 1 | 16,234 | This logic should be handled at informer handle functions i.e. AddFunc, UpdateFunc, DeleteFunc | openebs-maya | go |
@@ -39,3 +39,15 @@ type PSList struct {
func (c *PSList) Len() int {
return len(c.items)
}
+
+// IsStripePoolSpec returns true if pool spec is stripe pool or else it will
+// return false
+func IsStripePoolSpec(poolSpec *apisv1alpha1.PoolSpec) bool {
+ if len(poolSpec.RaidGroups[0].Type) != 0 {
+ if apisv1alpha1.PoolType(poolSpec.RaidGroups[0].Type) == apisv1alpha1.PoolStriped {
+ return true
+ }
+ return false
+ }
+ return apisv1alpha1.PoolType(poolSpec.PoolConfig.DefaultRaidGroupType) == apisv1alpha1.PoolStriped
+} | 1 | /*
Copyright 2019 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cstorpoolspecs
import (
apisv1alpha1 "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
)
// PS is a wrapper over poolspec api
// object. It provides build, validations and other common
// logic to be used by various feature specific callers.
type PS struct {
object *apisv1alpha1.PoolSpec
}
// PSList is a wrapper over poolspec api
// object. It provides build, validations and other common
// logic to be used by various feature specific callers.
type PSList struct {
items []apisv1alpha1.PoolSpec
}
// Len returns the number of items present
// in the PSList
func (c *PSList) Len() int {
return len(c.items)
}
| 1 | 17,932 | S1008: should use 'return <expr>' instead of 'if <expr> { return <bool> }; return <bool>' (from `gosimple`) | openebs-maya | go |
@@ -22,9 +22,15 @@ import (
)
func TestProcessIssueEvent(t *testing.T) {
+ const (
+ defaultTitle = "foo: bar"
+ )
+
tests := []struct {
description string
action string
+ title string // defaults to defaultTitle
+ prevTitle string
labels []string
want *issueEdits
}{ | 1 | // Copyright 2018 The Go Cloud Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-github/github"
)
func TestProcessIssueEvent(t *testing.T) {
tests := []struct {
description string
action string
labels []string
want *issueEdits
}{
// Remove "in progress" label from closed issues.
{
description: "close with random label -> no change",
action: "closed",
labels: []string{"foo"},
want: &issueEdits{},
},
{
description: "open with in progress label -> no change",
action: "opened",
labels: []string{"in progress"},
want: &issueEdits{},
},
{
description: "close with in progress label -> remove it",
action: "closed",
labels: []string{"in progress"},
want: &issueEdits{
RemoveLabels: []string{"in progress"},
},
},
}
for _, tc := range tests {
t.Run(tc.description, func(t *testing.T) {
lbls := make([]github.Label, len(tc.labels))
for i, label := range tc.labels {
lbls[i] = github.Label{Name: &label}
}
iss := &github.Issue{
Labels: lbls,
}
data := &issueData{
Action: tc.action,
Issue: iss,
}
got := processIssueEvent(data)
if diff := cmp.Diff(tc.want, got); diff != "" {
t.Errorf("diff: (-want +got)\n%s", diff)
}
})
}
}
| 1 | 11,078 | Just to remove branching, use `defaultTitle` explicitly in the test cases. (Is this gofmt'd?) | google-go-cloud | go |
@@ -64,7 +64,7 @@ public class CSharpGapicSnippetsTransformer implements ModelToViewTransformer<Pr
private final StaticLangApiMethodTransformer apiMethodTransformer =
new CSharpApiMethodTransformer();
private final CSharpCommonTransformer csharpCommonTransformer = new CSharpCommonTransformer();
- private final SampleTransformer sampleTransformer = new SampleTransformer(SampleType.IN_CODE);
+ private final SampleTransformer sampleTransformer = SampleTransformer.create(SampleType.IN_CODE);
private final InitCodeTransformer initCodeTransformer = new InitCodeTransformer();
public CSharpGapicSnippetsTransformer(GapicCodePathMapper pathMapper) { | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer.csharp;
import com.google.api.codegen.config.FieldConfig;
import com.google.api.codegen.config.FlatteningConfig;
import com.google.api.codegen.config.GapicProductConfig;
import com.google.api.codegen.config.InterfaceModel;
import com.google.api.codegen.config.MethodConfig;
import com.google.api.codegen.config.MethodModel;
import com.google.api.codegen.config.PageStreamingConfig;
import com.google.api.codegen.config.ProtoApiModel;
import com.google.api.codegen.config.SampleSpec.SampleType;
import com.google.api.codegen.gapic.GapicCodePathMapper;
import com.google.api.codegen.metacode.InitCodeContext.InitCodeOutputType;
import com.google.api.codegen.transformer.FileHeaderTransformer;
import com.google.api.codegen.transformer.GapicInterfaceContext;
import com.google.api.codegen.transformer.InitCodeTransformer;
import com.google.api.codegen.transformer.InterfaceContext;
import com.google.api.codegen.transformer.MethodContext;
import com.google.api.codegen.transformer.ModelToViewTransformer;
import com.google.api.codegen.transformer.ParamWithSimpleDoc;
import com.google.api.codegen.transformer.SampleTransformer;
import com.google.api.codegen.transformer.StandardImportSectionTransformer;
import com.google.api.codegen.transformer.StaticLangApiMethodTransformer;
import com.google.api.codegen.transformer.SurfaceNamer;
import com.google.api.codegen.util.csharp.CSharpAliasMode;
import com.google.api.codegen.viewmodel.CallingForm;
import com.google.api.codegen.viewmodel.ClientMethodType;
import com.google.api.codegen.viewmodel.SnippetsFileView;
import com.google.api.codegen.viewmodel.StaticLangApiMethodSnippetView;
import com.google.api.codegen.viewmodel.StaticLangApiMethodView;
import com.google.api.codegen.viewmodel.ViewModel;
import com.google.common.collect.ImmutableList;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
/* Transforms a ProtoApiModel into the standalone C# code snippets of an API. */
public class CSharpGapicSnippetsTransformer implements ModelToViewTransformer<ProtoApiModel> {
private static final String SNIPPETS_TEMPLATE_FILENAME = "csharp/gapic_snippets.snip";
private static final CSharpAliasMode ALIAS_MODE = CSharpAliasMode.MessagesOnly;
private final GapicCodePathMapper pathMapper;
private final FileHeaderTransformer fileHeaderTransformer =
new FileHeaderTransformer(new StandardImportSectionTransformer());
private final StaticLangApiMethodTransformer apiMethodTransformer =
new CSharpApiMethodTransformer();
private final CSharpCommonTransformer csharpCommonTransformer = new CSharpCommonTransformer();
private final SampleTransformer sampleTransformer = new SampleTransformer(SampleType.IN_CODE);
private final InitCodeTransformer initCodeTransformer = new InitCodeTransformer();
public CSharpGapicSnippetsTransformer(GapicCodePathMapper pathMapper) {
this.pathMapper = pathMapper;
}
@Override
public List<ViewModel> transform(ProtoApiModel model, GapicProductConfig productConfig) {
List<ViewModel> surfaceDocs = new ArrayList<>();
SurfaceNamer namer = new CSharpSurfaceNamer(productConfig.getPackageName(), ALIAS_MODE);
for (InterfaceModel apiInterface : model.getInterfaces()) {
if (!productConfig.hasInterfaceConfig(apiInterface)) {
continue;
}
GapicInterfaceContext context =
GapicInterfaceContext.create(
apiInterface,
productConfig,
csharpCommonTransformer.createTypeTable(namer.getExamplePackageName(), ALIAS_MODE),
namer,
new CSharpFeatureConfig());
csharpCommonTransformer.addCommonImports(context);
context.getImportTypeTable().saveNicknameFor("Google.Protobuf.Bytestring");
context.getImportTypeTable().saveNicknameFor("System.Linq.__import__");
surfaceDocs.add(generateSnippets(context));
}
return surfaceDocs;
}
@Override
public List<String> getTemplateFileNames() {
return Arrays.asList(SNIPPETS_TEMPLATE_FILENAME);
}
private SnippetsFileView generateSnippets(GapicInterfaceContext context) {
SurfaceNamer namer = context.getNamer();
String name = namer.getApiSnippetsClassName(context.getInterfaceConfig());
SnippetsFileView.Builder snippetsBuilder = SnippetsFileView.newBuilder();
snippetsBuilder.templateFileName(SNIPPETS_TEMPLATE_FILENAME);
String outputPath =
pathMapper.getOutputPath(context.getInterface().getFullName(), context.getProductConfig());
snippetsBuilder.outputPath(
outputPath + File.separator + name.replace("Generated", "") + ".g.cs");
snippetsBuilder.name(name);
snippetsBuilder.snippetMethods(generateMethods(context));
// must be done as the last step to catch all imports
snippetsBuilder.fileHeader(fileHeaderTransformer.generateFileHeader(context));
return snippetsBuilder.build();
}
private List<StaticLangApiMethodSnippetView> generateMethods(InterfaceContext context) {
List<StaticLangApiMethodSnippetView> methods = new ArrayList<>();
for (MethodModel method : csharpCommonTransformer.getSupportedMethods(context)) {
MethodConfig methodConfig = context.getMethodConfig(method);
MethodContext methodContext = context.asRequestMethodContext(method);
if (methodConfig.isGrpcStreaming()) {
methods.add(generateGrpcStreamingRequestMethod(methodContext));
} else if (methodConfig.isLongRunningOperation()) {
if (methodConfig.isFlattening()) {
ImmutableList<FlatteningConfig> flatteningGroups = methodConfig.getFlatteningConfigs();
boolean requiresNameSuffix = flatteningGroups.size() > 1;
for (int i = 0; i < flatteningGroups.size(); i++) {
FlatteningConfig flatteningGroup = flatteningGroups.get(i);
String nameSuffix = requiresNameSuffix ? Integer.toString(i + 1) : "";
MethodContext methodContextFlat =
context.asFlattenedMethodContext(method, flatteningGroup);
methods.add(generateOperationFlattenedAsyncMethod(methodContextFlat, nameSuffix));
methods.add(generateOperationFlattenedMethod(methodContextFlat, nameSuffix));
}
}
methods.add(generateOperationRequestAsyncMethod(methodContext));
methods.add(generateOperationRequestMethod(methodContext));
} else if (methodConfig.isPageStreaming()) {
if (methodConfig.isFlattening()) {
ImmutableList<FlatteningConfig> flatteningGroups = methodConfig.getFlatteningConfigs();
// Find flattenings that have ambiguous parameters, and mark them to use named arguments.
// Ambiguity occurs in a page-stream flattening that has one or two extra string
// parameters (that are not resource-names) compared to any other flattening of this same
// method.
// Create a string for each flattening, encoding which parameters are strings and
// not-strings. Each character in the string refers to a parameter. Each string refers
// to a flattening.
String[] stringParams =
flatteningGroups
.stream()
.map(
flat ->
flat.getFlattenedFieldConfigs()
.values()
.stream()
.map(
field ->
field.getField().getType().isStringType()
&& field.getResourceNameConfig() == null
? 's'
: '.')
.collect(
StringBuilder::new,
StringBuilder::appendCodePoint,
StringBuilder::append)
.toString())
.toArray(String[]::new);
// Array of which flattenings need to use named arguments.
// Each array entry refers to the correspondingly indexed flattening.
Boolean[] requiresNamedParameters =
Arrays.stream(stringParams)
.map(
a ->
Arrays.stream(stringParams)
.anyMatch(b -> a.startsWith(b + "s") || a.startsWith(b + "ss")))
.toArray(Boolean[]::new);
boolean requiresNameSuffix = flatteningGroups.size() > 1;
// Build method list.
for (int i = 0; i < flatteningGroups.size(); i++) {
FlatteningConfig flatteningGroup = flatteningGroups.get(i);
String nameSuffix = requiresNameSuffix ? Integer.toString(i + 1) : "";
MethodContext methodContextFlat =
context.asFlattenedMethodContext(method, flatteningGroup);
methods.add(
generatePagedFlattenedAsyncMethod(
methodContextFlat, nameSuffix, requiresNamedParameters[i]));
methods.add(
generatePagedFlattenedMethod(
methodContextFlat, nameSuffix, requiresNamedParameters[i]));
}
}
methods.add(generatePagedRequestAsyncMethod(methodContext));
methods.add(generatePagedRequestMethod(methodContext));
} else {
if (methodConfig.isFlattening()) {
ImmutableList<FlatteningConfig> flatteningGroups = methodConfig.getFlatteningConfigs();
boolean requiresNameSuffix = flatteningGroups.size() > 1;
for (int i = 0; i < flatteningGroups.size(); i++) {
FlatteningConfig flatteningGroup = flatteningGroups.get(i);
String nameSuffix = requiresNameSuffix ? Integer.toString(i + 1) : "";
MethodContext methodContextFlat =
context.asFlattenedMethodContext(method, flatteningGroup);
methods.add(generateFlattenedAsyncMethod(methodContextFlat, nameSuffix));
methods.add(generateFlattenedMethod(methodContextFlat, nameSuffix));
}
}
methods.add(generateRequestAsyncMethod(methodContext));
methods.add(generateRequestMethod(methodContext));
}
}
return methods;
}
private StaticLangApiMethodSnippetView generateGrpcStreamingRequestMethod(
MethodContext methodContext) {
SurfaceNamer namer = methodContext.getNamer();
StaticLangApiMethodView method =
generateInitCode(
apiMethodTransformer.generateGrpcStreamingRequestObjectMethod(methodContext),
methodContext,
methodContext.getMethodConfig().getRequiredFieldConfigs(),
InitCodeOutputType.SingleObject,
CallingForm.RequestStreamingServer);
String callerResponseTypeName = method.name() + "Stream";
return StaticLangApiMethodSnippetView.newBuilder()
.method(method)
.snippetMethodName(method.name())
.callerResponseTypeName(callerResponseTypeName)
.apiClassName(namer.getApiWrapperClassName(methodContext.getInterfaceConfig()))
.apiVariableName(method.apiVariableName())
.build();
}
private StaticLangApiMethodSnippetView generateOperationFlattenedAsyncMethod(
MethodContext methodContext, String suffix) {
SurfaceNamer namer = methodContext.getNamer();
StaticLangApiMethodView method =
generateInitCode(
apiMethodTransformer.generateAsyncOperationFlattenedMethod(
methodContext,
Collections.<ParamWithSimpleDoc>emptyList(),
ClientMethodType.AsyncOperationFlattenedMethod,
true),
methodContext,
methodContext.getFlatteningConfig().getFlattenedFieldConfigs().values(),
InitCodeOutputType.FieldList,
CallingForm.LongRunningFlattenedAsync);
String callerResponseTypeName = method.operationMethod().clientReturnTypeName();
return StaticLangApiMethodSnippetView.newBuilder()
.method(method)
.snippetMethodName(method.name() + suffix)
.callerResponseTypeName(callerResponseTypeName)
.apiClassName(namer.getApiWrapperClassName(methodContext.getInterfaceConfig()))
.apiVariableName(method.apiVariableName())
.build();
}
private StaticLangApiMethodSnippetView generateOperationFlattenedMethod(
MethodContext methodContext, String suffix) {
SurfaceNamer namer = methodContext.getNamer();
StaticLangApiMethodView method =
generateInitCode(
apiMethodTransformer.generateOperationFlattenedMethod(
methodContext, Collections.<ParamWithSimpleDoc>emptyList()),
methodContext,
methodContext.getFlatteningConfig().getFlattenedFieldConfigs().values(),
InitCodeOutputType.FieldList,
CallingForm.LongRunningFlattened);
String callerResponseTypeName = method.operationMethod().clientReturnTypeName();
return StaticLangApiMethodSnippetView.newBuilder()
.method(method)
.snippetMethodName(method.name() + suffix)
.callerResponseTypeName(callerResponseTypeName)
.apiClassName(namer.getApiWrapperClassName(methodContext.getInterfaceConfig()))
.apiVariableName(method.apiVariableName())
.build();
}
private StaticLangApiMethodSnippetView generateOperationRequestAsyncMethod(
MethodContext methodContext) {
SurfaceNamer namer = methodContext.getNamer();
StaticLangApiMethodView method =
generateInitCode(
apiMethodTransformer.generateAsyncOperationRequestObjectMethod(
methodContext, Collections.<ParamWithSimpleDoc>emptyList(), true),
methodContext,
methodContext.getMethodConfig().getRequiredFieldConfigs(),
InitCodeOutputType.SingleObject,
CallingForm.LongRunningRequestAsync);
String callerResponseTypeName = method.operationMethod().clientReturnTypeName();
return StaticLangApiMethodSnippetView.newBuilder()
.method(method)
.snippetMethodName(method.name() + "_RequestObject")
.callerResponseTypeName(callerResponseTypeName)
.apiClassName(namer.getApiWrapperClassName(methodContext.getInterfaceConfig()))
.apiVariableName(method.apiVariableName())
.build();
}
private StaticLangApiMethodSnippetView generateOperationRequestMethod(
MethodContext methodContext) {
SurfaceNamer namer = methodContext.getNamer();
StaticLangApiMethodView method =
generateInitCode(
apiMethodTransformer.generateOperationRequestObjectMethod(methodContext),
methodContext,
methodContext.getMethodConfig().getRequiredFieldConfigs(),
InitCodeOutputType.SingleObject,
CallingForm.LongRunningRequest);
String callerResponseTypeName = method.operationMethod().clientReturnTypeName();
return StaticLangApiMethodSnippetView.newBuilder()
.method(method)
.snippetMethodName(method.name() + "_RequestObject")
.callerResponseTypeName(callerResponseTypeName)
.apiClassName(namer.getApiWrapperClassName(methodContext.getInterfaceConfig()))
.apiVariableName(method.apiVariableName())
.build();
}
private StaticLangApiMethodSnippetView generatePagedFlattenedAsyncMethod(
MethodContext methodContext, String suffix, boolean requiresNamedArguments) {
StaticLangApiMethodView method =
generateInitCode(
apiMethodTransformer.generatePagedFlattenedAsyncMethod(
methodContext, csharpCommonTransformer.pagedMethodAdditionalParams()),
methodContext,
methodContext.getFlatteningConfig().getFlattenedFieldConfigs().values(),
InitCodeOutputType.FieldList,
CallingForm.FlattenedAsyncPaged);
SurfaceNamer namer = methodContext.getNamer();
PageStreamingConfig pageStreaming = methodContext.getMethodConfig().getPageStreaming();
FieldConfig resourceFieldConfig = pageStreaming.getResourcesFieldConfig();
String callerResponseTypeName =
namer.getAndSaveCallerAsyncPagedResponseTypeName(methodContext, resourceFieldConfig);
return StaticLangApiMethodSnippetView.newBuilder()
.method(method)
.snippetMethodName(method.name() + suffix)
.callerResponseTypeName(callerResponseTypeName)
.apiClassName(namer.getApiWrapperClassName(methodContext.getInterfaceConfig()))
.apiVariableName(method.apiVariableName())
.requiresNamedArguments(requiresNamedArguments)
.build();
}
private StaticLangApiMethodSnippetView generatePagedFlattenedMethod(
MethodContext methodContext, String suffix, boolean requiresNamedArguments) {
StaticLangApiMethodView method =
generateInitCode(
apiMethodTransformer.generatePagedFlattenedMethod(
methodContext, csharpCommonTransformer.pagedMethodAdditionalParams()),
methodContext,
methodContext.getFlatteningConfig().getFlattenedFieldConfigs().values(),
InitCodeOutputType.FieldList,
CallingForm.FlattenedPaged);
SurfaceNamer namer = methodContext.getNamer();
PageStreamingConfig pageStreaming = methodContext.getMethodConfig().getPageStreaming();
FieldConfig resourceFieldConfig = pageStreaming.getResourcesFieldConfig();
String callerResponseTypeName =
namer.getAndSaveCallerPagedResponseTypeName(methodContext, resourceFieldConfig);
return StaticLangApiMethodSnippetView.newBuilder()
.method(method)
.snippetMethodName(method.name() + suffix)
.callerResponseTypeName(callerResponseTypeName)
.apiClassName(namer.getApiWrapperClassName(methodContext.getInterfaceConfig()))
.apiVariableName(method.apiVariableName())
.requiresNamedArguments(requiresNamedArguments)
.build();
}
private StaticLangApiMethodSnippetView generatePagedRequestAsyncMethod(
MethodContext methodContext) {
StaticLangApiMethodView method =
generateInitCode(
apiMethodTransformer.generatePagedRequestObjectAsyncMethod(
methodContext, csharpCommonTransformer.pagedMethodAdditionalParams()),
methodContext,
methodContext.getMethodConfig().getRequiredFieldConfigs(),
InitCodeOutputType.SingleObject,
CallingForm.RequestAsyncPaged);
SurfaceNamer namer = methodContext.getNamer();
PageStreamingConfig pageStreaming = methodContext.getMethodConfig().getPageStreaming();
FieldConfig resourceFieldConfig = pageStreaming.getResourcesFieldConfig();
String callerResponseTypeName =
namer.getAndSaveCallerAsyncPagedResponseTypeName(methodContext, resourceFieldConfig);
return StaticLangApiMethodSnippetView.newBuilder()
.method(method)
.snippetMethodName(method.name() + "_RequestObject")
.callerResponseTypeName(callerResponseTypeName)
.apiClassName(namer.getApiWrapperClassName(methodContext.getInterfaceConfig()))
.apiVariableName(method.apiVariableName())
.build();
}
private StaticLangApiMethodSnippetView generatePagedRequestMethod(MethodContext methodContext) {
StaticLangApiMethodView method =
generateInitCode(
apiMethodTransformer.generatePagedRequestObjectMethod(
methodContext, csharpCommonTransformer.pagedMethodAdditionalParams()),
methodContext,
methodContext.getMethodConfig().getRequiredFieldConfigs(),
InitCodeOutputType.SingleObject,
CallingForm.RequestPaged);
SurfaceNamer namer = methodContext.getNamer();
PageStreamingConfig pageStreaming = methodContext.getMethodConfig().getPageStreaming();
FieldConfig resourceFieldConfig = pageStreaming.getResourcesFieldConfig();
String callerResponseTypeName =
namer.getAndSaveCallerPagedResponseTypeName(methodContext, resourceFieldConfig);
return StaticLangApiMethodSnippetView.newBuilder()
.method(method)
.snippetMethodName(method.name() + "_RequestObject")
.callerResponseTypeName(callerResponseTypeName)
.apiClassName(namer.getApiWrapperClassName(methodContext.getInterfaceConfig()))
.apiVariableName(method.apiVariableName())
.build();
}
private StaticLangApiMethodSnippetView generateFlattenedAsyncMethod(
MethodContext methodContext, String suffix) {
StaticLangApiMethodView method =
generateInitCode(
apiMethodTransformer.generateFlattenedAsyncMethod(
methodContext, ClientMethodType.FlattenedAsyncCallSettingsMethod),
methodContext,
methodContext.getFlatteningConfig().getFlattenedFieldConfigs().values(),
InitCodeOutputType.FieldList,
CallingForm.FlattenedAsync);
SurfaceNamer namer = methodContext.getNamer();
String callerResponseTypeName =
methodContext
.getTypeTable()
.getAndSaveNicknameFor(namer.getStaticLangCallerAsyncReturnTypeName(methodContext));
return StaticLangApiMethodSnippetView.newBuilder()
.method(method)
.snippetMethodName(method.name() + suffix)
.callerResponseTypeName(callerResponseTypeName)
.apiClassName(namer.getApiWrapperClassName(methodContext.getInterfaceConfig()))
.apiVariableName(method.apiVariableName())
.build();
}
private StaticLangApiMethodSnippetView generateFlattenedMethod(
MethodContext methodContext, String suffix) {
StaticLangApiMethodView method =
generateInitCode(
apiMethodTransformer.generateFlattenedMethod(methodContext),
methodContext,
methodContext.getFlatteningConfig().getFlattenedFieldConfigs().values(),
InitCodeOutputType.FieldList,
CallingForm.Flattened);
SurfaceNamer namer = methodContext.getNamer();
String callerResponseTypeName =
methodContext
.getTypeTable()
.getAndSaveNicknameFor(namer.getStaticLangCallerReturnTypeName(methodContext));
return StaticLangApiMethodSnippetView.newBuilder()
.method(method)
.snippetMethodName(method.name() + suffix)
.callerResponseTypeName(callerResponseTypeName)
.apiClassName(namer.getApiWrapperClassName(methodContext.getInterfaceConfig()))
.apiVariableName(method.apiVariableName())
.build();
}
private StaticLangApiMethodSnippetView generateRequestMethod(MethodContext methodContext) {
SurfaceNamer namer = methodContext.getNamer();
StaticLangApiMethodView method =
generateInitCode(
apiMethodTransformer.generateRequestObjectMethod(methodContext),
methodContext,
methodContext.getMethodConfig().getRequiredFieldConfigs(),
InitCodeOutputType.SingleObject,
CallingForm.Request);
String callerResponseTypeName =
methodContext
.getTypeTable()
.getAndSaveNicknameFor(namer.getStaticLangCallerAsyncReturnTypeName(methodContext));
return StaticLangApiMethodSnippetView.newBuilder()
.method(method)
.snippetMethodName(method.name() + "_RequestObject")
.callerResponseTypeName(callerResponseTypeName)
.apiClassName(namer.getApiWrapperClassName(methodContext.getInterfaceConfig()))
.apiVariableName(method.apiVariableName())
.build();
}
private StaticLangApiMethodSnippetView generateRequestAsyncMethod(MethodContext methodContext) {
SurfaceNamer namer = methodContext.getNamer();
StaticLangApiMethodView method =
generateInitCode(
apiMethodTransformer.generateRequestObjectAsyncMethod(methodContext),
methodContext,
methodContext.getMethodConfig().getRequiredFieldConfigs(),
InitCodeOutputType.SingleObject,
CallingForm.RequestAsync);
String callerResponseTypeName =
methodContext
.getTypeTable()
.getAndSaveNicknameFor(namer.getStaticLangCallerAsyncReturnTypeName(methodContext));
return StaticLangApiMethodSnippetView.newBuilder()
.method(method)
.snippetMethodName(method.name() + "_RequestObject")
.callerResponseTypeName(callerResponseTypeName)
.apiClassName(namer.getApiWrapperClassName(methodContext.getInterfaceConfig()))
.apiVariableName(method.apiVariableName())
.build();
}
private StaticLangApiMethodView generateInitCode(
StaticLangApiMethodView method,
MethodContext context,
Collection<FieldConfig> fieldConfigs,
InitCodeOutputType initCodeOutputType,
CallingForm callingForm) {
// Replace the sample/init code using the same context as for the whole snippet file.
// This is a bit hacky, but fixes the problem that initcode is generated using a different
// context. Without this, the per-snippet imports don't get included in the snippet file.
StaticLangApiMethodView.Builder builder = method.toBuilder();
sampleTransformer.generateSamples(
builder,
context,
fieldConfigs,
initCodeOutputType,
initCodeContext -> initCodeTransformer.generateInitCode(context, initCodeContext),
Arrays.asList(callingForm));
return builder.build();
}
}
| 1 | 28,025 | I don't think you use this variable anywhere. (You call `SampleTransformer.newBuilder()` below) | googleapis-gapic-generator | java |
@@ -54,6 +54,18 @@ char *chirp_wrap_whoami(const char *hostname, time_t stoptime)
return xxstrdup(id);
}
+char *chirp_wrap_hash(const char *hostname, const char *path, const char *algorithm, time_t stoptime) {
+ int result;
+ unsigned char digest[CHIRP_DIGEST_MAX];
+
+ result = chirp_reli_hash(hostname, path, algorithm, digest, stoptime);
+
+ if(result < 0)
+ return NULL;
+
+ return xxstrdup( (char *) digest);
+}
+
int64_t chirp_wrap_job_create (const char *host, const char *json, time_t stoptime)
{
chirp_jobid_t id; | 1 | #include "buffer.h"
#include "chirp_reli.h"
#include "chirp_types.h"
#include "xxmalloc.h"
static void accumulate_one_acl(const char *line, void *args)
{
buffer_t *B = (struct buffer *) args;
if(buffer_pos(B) > 0) {
buffer_printf(B, "\n");
}
buffer_putstring(B, line);
}
struct chirp_stat *chirp_wrap_stat(const char *hostname, const char *path, time_t stoptime) {
struct chirp_stat *info = malloc(sizeof(struct chirp_stat));
int status = chirp_reli_stat(hostname, path, info, stoptime);
if(status < 0) {
free(info);
return NULL;
}
return info;
}
char *chirp_wrap_listacl(const char *hostname, const char *path, time_t stoptime)
{
buffer_t B[1];
buffer_init(B);
buffer_abortonfailure(B, 1);
int status = chirp_reli_getacl(hostname, path, accumulate_one_acl, B, stoptime);
char *acls = NULL;
if(status >= 0) {
buffer_dup(B, &acls);
}
buffer_free(B);
return acls;
}
char *chirp_wrap_whoami(const char *hostname, time_t stoptime)
{
char id[4096] = "";
chirp_reli_whoami(hostname, id, sizeof(id), stoptime);
return xxstrdup(id);
}
int64_t chirp_wrap_job_create (const char *host, const char *json, time_t stoptime)
{
chirp_jobid_t id;
int64_t result;
result = chirp_reli_job_create(host, json, &id, stoptime);
if(result < 0)
return result;
return id;
}
int64_t chirp_wrap_job_commit (const char *host, const char *json, time_t stoptime)
{
int64_t result;
result = chirp_reli_job_commit(host, json, stoptime);
return result;
}
int64_t chirp_wrap_job_kill (const char *host, const char *json, time_t stoptime)
{
int64_t result;
result = chirp_reli_job_kill(host, json, stoptime);
return result;
}
int64_t chirp_wrap_job_reap (const char *host, const char *json, time_t stoptime)
{
int64_t result;
result = chirp_reli_job_reap(host, json, stoptime);
return result;
}
char *chirp_wrap_job_status (const char *host, const char *json, time_t stoptime)
{
char *status;
int64_t result;
result = chirp_reli_job_status(host, json, &status, stoptime);
if(result < 0)
return NULL;
return status;
}
char *chirp_wrap_job_wait (const char *host, chirp_jobid_t id, int64_t timeout, time_t stoptime)
{
char *status;
int64_t result;
result = chirp_reli_job_wait(host, id, timeout, &status, stoptime);
if(result < 0)
return NULL;
return status;
}
/* vim: set noexpandtab tabstop=4: */
| 1 | 12,150 | The digest is in binary and variable size (the digest size is result if > 0). So we can't use xxstrdup. | cooperative-computing-lab-cctools | c |
@@ -25,6 +25,9 @@ public interface Span extends AutoCloseable, TraceContext {
Span setAttribute(String key, Number value);
Span setAttribute(String key, String value);
+ Span addEvent(String name);
+ Span addEvent(String name, long timestamp);
+
Span setStatus(Status status);
@Override | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote.tracing;
public interface Span extends AutoCloseable, TraceContext {
Span setName(String name);
Span setAttribute(String key, boolean value);
Span setAttribute(String key, Number value);
Span setAttribute(String key, String value);
Span setStatus(Status status);
@Override
void close();
enum Kind {
CLIENT("client"),
SERVER("server"),
PRODUCER("producer"),
CONSUMER("consumer"),
;
// The nice name is the name expected in an OT trace.
private final String niceName;
private Kind(String niceName) {
this.niceName = niceName;
}
@Override
public String toString() {
return niceName;
}
}
}
| 1 | 17,759 | We don't need this additional method. | SeleniumHQ-selenium | java |
@@ -44,4 +44,5 @@ type ClientConfig interface {
// have already been started.
GetUnaryOutbound() UnaryOutbound
GetOnewayOutbound() OnewayOutbound
+ GetStreamOutbound() StreamOutbound
} | 1 | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package transport
// ClientConfigProvider builds ClientConfigs from the current service to other services.
type ClientConfigProvider interface {
// Retrieves a new ClientConfig that will make requests to the given service.
//
// This MAY panic if the given service is unknown.
ClientConfig(service string) ClientConfig
}
// A ClientConfig is a stream of communication between a single caller-service
// pair.
type ClientConfig interface {
// Name of the service making the request.
Caller() string
// Name of the service to which the request is being made.
Service() string
// Returns an outbound to send the request through or panics if there is no
// outbound for this service
//
// MAY be called multiple times for a request. The returned outbound MUST
// have already been started.
GetUnaryOutbound() UnaryOutbound
GetOnewayOutbound() OnewayOutbound
}
| 1 | 16,576 | We can't do this. Adding a method to an interface is a breaking change. This was an oversight on our part when we converted ClientConfig from a struct to an interface. OutboundConfig was introduced to fix this, the idea being that we should use OutboundConfig everywhere instead of ClientConfig. In case of Dispatcher, we want to use the OutboundConfig and MustOutboundConfig methods instead of ClientConfig, and for streaming, the client needs to attempt to upcast a ClientConfig to OutboundConfig and error out if that's not possible. | yarpc-yarpc-go | go |
@@ -32,7 +32,9 @@ namespace domain {
DomainParticipant::DomainParticipant(
uint32_t did)
: dds::core::Reference<detail::DomainParticipant>(
- eprosima::fastdds::dds::DomainParticipantFactory::get_instance()->create_participant(did))
+ eprosima::fastdds::dds::DomainParticipantFactory::get_instance()->create_participant(
+ did,
+ eprosima::fastdds::dds::DomainParticipantFactory::get_instance()->get_default_participant_qos()))
{
}
| 1 | /*
* Copyright 2019, Proyectos y Sistemas de Mantenimiento SL (eProsima).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* @file DomainParticipantImpl.cpp
*/
#include <dds/domain/DomainParticipant.hpp>
#include <dds/domain/DomainParticipantListener.hpp>
#include <dds/core/Exception.hpp>
#include <fastdds/dds/domain/DomainParticipantFactory.hpp>
#include <fastdds/rtps/common/Time_t.h>
namespace dds {
namespace domain {
DomainParticipant::DomainParticipant(
uint32_t did)
: dds::core::Reference<detail::DomainParticipant>(
eprosima::fastdds::dds::DomainParticipantFactory::get_instance()->create_participant(did))
{
}
DomainParticipant::DomainParticipant(
uint32_t id,
const dds::domain::qos::DomainParticipantQos& qos,
dds::domain::DomainParticipantListener* listener,
const dds::core::status::StatusMask& mask)
: dds::core::Reference<detail::DomainParticipant>(
eprosima::fastdds::dds::DomainParticipantFactory::get_instance()->create_participant(
id,
qos,
listener,
mask))
{
}
DomainParticipant::~DomainParticipant()
{
}
//void DomainParticipant::listener(
// Listener* /*listener*/,
// const ::dds::core::status::StatusMask& /*event_mask*/)
//{
// this->delegate()->set_listener(listener /*, event_mask*/);
//}
//typename DomainParticipant::Listener* DomainParticipant::listener() const
//{
// return dynamic_cast<Listener*>(this->delegate()->get_listener());
//}
const dds::domain::qos::DomainParticipantQos& DomainParticipant::qos() const
{
return this->delegate()->get_qos();
}
void DomainParticipant::qos(
const dds::domain::qos::DomainParticipantQos& qos)
{
ReturnCode_t code = this->delegate()->set_qos(qos);
if (code == ReturnCode_t::RETCODE_IMMUTABLE_POLICY)
{
throw dds::core::ImmutablePolicyError("Immutable Qos");
}
else if ( code == ReturnCode_t::RETCODE_INCONSISTENT_POLICY)
{
throw dds::core::InconsistentPolicyError("Inconsistent Qos");
}
else if (code == ReturnCode_t::RETCODE_UNSUPPORTED)
{
throw dds::core::UnsupportedError("Unsupported values on DomainParticipantQos");
}
}
//uint32_t DomainParticipant::domain_id() const
//{
// return this->delegate()->get_domain_id();
//}
//void DomainParticipant::assert_liveliness()
//{
// this->delegate()->assert_liveliness();
//}
//bool DomainParticipant::contains_entity(
// const ::dds::core::InstanceHandle& /*handle*/)
//{
// return this->delegate()->contains_entity(handle);
//}
//dds::core::Time DomainParticipant::current_time() const
//{
// eprosima::fastrtps::Time_t now;
// this->delegate()->get_current_time(now);
// return core::Time(now.seconds, now.nanosec);
//}
dds::domain::qos::DomainParticipantQos DomainParticipant::default_participant_qos()
{
qos::DomainParticipantQos qos;
eprosima::fastdds::dds::DomainParticipantFactory::get_instance()->get_default_participant_qos(qos);
return qos;
}
void DomainParticipant::default_participant_qos(
const ::dds::domain::qos::DomainParticipantQos& qos)
{
ReturnCode_t code = eprosima::fastdds::dds::DomainParticipantFactory::get_instance()->set_default_participant_qos(
qos);
if (code == ReturnCode_t::RETCODE_INCONSISTENT_POLICY)
{
throw dds::core::InconsistentPolicyError("Inconsistent Qos");
}
else if (code == ReturnCode_t::RETCODE_UNSUPPORTED)
{
throw dds::core::UnsupportedError("Unsupported values on DomainParticipantQos");
}
}
dds::pub::qos::PublisherQos DomainParticipant::default_publisher_qos() const
{
return this->delegate()->get_default_publisher_qos();
}
DomainParticipant& DomainParticipant::default_publisher_qos(
const ::dds::pub::qos::PublisherQos& qos)
{
ReturnCode_t code = this->delegate()->set_default_publisher_qos(qos);
if (code == ReturnCode_t::RETCODE_INCONSISTENT_POLICY)
{
throw dds::core::InconsistentPolicyError("Inconsistent Qos");
}
else if (code == ReturnCode_t::RETCODE_UNSUPPORTED)
{
throw dds::core::UnsupportedError("Unsupported values on PublisherQos");
}
return *this;
}
dds::sub::qos::SubscriberQos DomainParticipant::default_subscriber_qos() const
{
return this->delegate()->get_default_subscriber_qos();
}
DomainParticipant& DomainParticipant::default_subscriber_qos(
const ::dds::sub::qos::SubscriberQos& qos)
{
ReturnCode_t result = delegate()->set_default_subscriber_qos(qos);
if (result == ReturnCode_t::RETCODE_INCONSISTENT_POLICY)
{
throw dds::core::InconsistentPolicyError("Inconsistent Qos");
}
if (result == ReturnCode_t::RETCODE_UNSUPPORTED)
{
throw dds::core::UnsupportedError("Unsupported Qos");
}
return *this;
}
dds::topic::qos::TopicQos DomainParticipant::default_topic_qos() const
{
return this->delegate()->get_default_topic_qos();
}
DomainParticipant& DomainParticipant::default_topic_qos(
const dds::topic::qos::TopicQos& qos)
{
ReturnCode_t ret_code = this->delegate()->set_default_topic_qos(qos);
if (ret_code == ReturnCode_t::RETCODE_INCONSISTENT_POLICY)
{
throw dds::core::InconsistentPolicyError("Inconsistent Qos");
}
else if (ret_code == ReturnCode_t::RETCODE_UNSUPPORTED)
{
throw dds::core::UnsupportedError("Unsupported values on TopicQos");
}
return *this;
}
DomainParticipant& DomainParticipant::operator <<(
const dds::domain::qos::DomainParticipantQos& qos)
{
this->qos(qos);
return *this;
}
const DomainParticipant& DomainParticipant::operator >>(
dds::domain::qos::DomainParticipantQos& qos) const
{
qos = this->qos();
return *this;
}
} //namespace domain
} //namespace dds
| 1 | 18,620 | Why not using the constant `PARTICIPANT_QOS_DEFAULT` here? | eProsima-Fast-DDS | cpp |
@@ -29,7 +29,8 @@ from pkg_resources import resource_stream
def getScalarMetricWithTimeOfDayAnomalyParams(metricData,
minVal=None,
maxVal=None,
- minResolution=None):
+ minResolution=None,
+ tmImplementation = "cpp"):
"""
Return a dict that can be used to create an anomaly model via OPF's
ModelFactory. | 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import json
import numpy as np
import os
from pkg_resources import resource_stream
def getScalarMetricWithTimeOfDayAnomalyParams(metricData,
minVal=None,
maxVal=None,
minResolution=None):
"""
Return a dict that can be used to create an anomaly model via OPF's
ModelFactory.
:param metricData: numpy array of metric data. Used to calculate minVal
and maxVal if either is unspecified
:param minVal: minimum value of metric. Used to set up encoders. If None
will be derived from metricData.
:param maxVal: maximum value of metric. Used to set up input encoders. If
None will be derived from metricData
:param minResolution: minimum resolution of metric. Used to set up
encoders. If None, will use default value of 0.001.
:returns: a dict containing "modelConfig" and "inferenceArgs" top-level
properties. The value of the "modelConfig" property is for passing to
the OPF `ModelFactory.create()` method as the `modelConfig` parameter. The
"inferenceArgs" property is for passing to the resulting model's
`enableInference()` method as the inferenceArgs parameter. NOTE: the
timestamp field corresponds to input "c0"; the predicted field corresponds
to input "c1".
:rtype: dict
Example:
from nupic.frameworks.opf.modelfactory import ModelFactory
from nupic.frameworks.opf.common_models.cluster_params import (
getScalarMetricWithTimeOfDayAnomalyParams)
params = getScalarMetricWithTimeOfDayAnomalyParams(
metricData=[0],
minVal=0.0,
maxVal=100.0)
model = ModelFactory.create(modelConfig=params["modelConfig"])
model.enableLearning()
model.enableInference(params["inferenceArgs"])
"""
# Default values
if minResolution is None:
minResolution = 0.001
# Compute min and/or max from the data if not specified
if minVal is None or maxVal is None:
compMinVal, compMaxVal = _rangeGen(metricData)
if minVal is None:
minVal = compMinVal
if maxVal is None:
maxVal = compMaxVal
# Handle the corner case where the incoming min and max are the same
if minVal == maxVal:
maxVal = minVal + 1
# Load model parameters and update encoder params
paramFileRelativePath = os.path.join(
"anomaly_params_random_encoder",
"best_single_metric_anomaly_params.json")
with resource_stream(__name__, paramFileRelativePath) as infile:
paramSet = json.load(infile)
_fixupRandomEncoderParams(paramSet, minVal, maxVal, minResolution)
return paramSet
def _rangeGen(data, std=1):
"""
Return reasonable min/max values to use given the data.
"""
dataStd = np.std(data)
if dataStd == 0:
dataStd = 1
minval = np.min(data) - std * dataStd
maxval = np.max(data) + std * dataStd
return minval, maxval
def _fixupRandomEncoderParams(params, minVal, maxVal, minResolution):
"""
Given model params, figure out the correct parameters for the
RandomDistributed encoder. Modifies params in place.
"""
encodersDict = (
params["modelConfig"]["modelParams"]["sensorParams"]["encoders"]
)
for encoder in encodersDict.itervalues():
if encoder is not None:
if encoder["type"] == "RandomDistributedScalarEncoder":
resolution = max(minResolution,
(maxVal - minVal) / encoder.pop("numBuckets")
)
encodersDict["c1"]["resolution"] = resolution
| 1 | 20,909 | Shouldn't this be `tm_cpp` to match `temporalImp`? (There are multiple CPP implementations, so 'cpp' is ambiguous.) | numenta-nupic | py |
@@ -30,7 +30,18 @@ module RSpec
# @param line [String] current code line
# @return [String] relative path to line
def self.relative_path(line)
- line = line.sub(File.expand_path("."), ".")
+ # Matches strings either at the beginning of the input or prefixed with a whitespace,
+ # containing the current path, either postfixed with the separator, or at the end of the string.
+ # Match groups are the character before and the character after the string if any.
+ #
+ # http://rubular.com/r/fT0gmX6VJX
+ # http://rubular.com/r/duOrD4i3wb
+ # http://rubular.com/r/sbAMHFrOx1
+ #
+
+ regex = /(\A|\s)#{File.expand_path('.')}(#{File::SEPARATOR}|\s|\Z)/
+
+ line = line.sub(regex, "\\1.\\2")
line = line.sub(/\A([^:]+:\d+)$/, '\\1')
return nil if line == '-e:1'
line | 1 | module RSpec
module Core
# Each ExampleGroup class and Example instance owns an instance of
# Metadata, which is Hash extended to support lazy evaluation of values
# associated with keys that may or may not be used by any example or group.
#
# In addition to metadata that is used internally, this also stores
# user-supplied metadata, e.g.
#
# describe Something, :type => :ui do
# it "does something", :slow => true do
# # ...
# end
# end
#
# `:type => :ui` is stored in the Metadata owned by the example group, and
# `:slow => true` is stored in the Metadata owned by the example. These can
# then be used to select which examples are run using the `--tag` option on
# the command line, or several methods on `Configuration` used to filter a
# run (e.g. `filter_run_including`, `filter_run_excluding`, etc).
#
# @see Example#metadata
# @see ExampleGroup.metadata
# @see FilterManager
# @see Configuration#filter_run_including
# @see Configuration#filter_run_excluding
module Metadata
# @api private
#
# @param line [String] current code line
# @return [String] relative path to line
def self.relative_path(line)
line = line.sub(File.expand_path("."), ".")
line = line.sub(/\A([^:]+:\d+)$/, '\\1')
return nil if line == '-e:1'
line
rescue SecurityError
nil
end
# @private
# Used internally to build a hash from an args array.
# Symbols are converted into hash keys with a value of `true`.
# This is done to support simple tagging using a symbol, rather
# than needing to do `:symbol => true`.
def self.build_hash_from(args, warn_about_example_group_filtering=false)
hash = args.last.is_a?(Hash) ? args.pop : {}
hash[args.pop] = true while args.last.is_a?(Symbol)
if warn_about_example_group_filtering && hash.key?(:example_group)
RSpec.deprecate("Filtering by an `:example_group` subhash",
:replacement => "the subhash to filter directly")
end
hash
end
# @private
def self.backtrace_from(block)
return caller unless block.respond_to?(:source_location)
[block.source_location.join(':')]
end
# @private
# Used internally to populate metadata hashes with computed keys
# managed by RSpec.
class HashPopulator
attr_reader :metadata, :user_metadata, :description_args, :block
def initialize(metadata, user_metadata, description_args, block)
@metadata = metadata
@user_metadata = user_metadata
@description_args = description_args
@block = block
end
def populate
ensure_valid_user_keys
metadata[:execution_result] = Example::ExecutionResult.new
metadata[:block] = block
metadata[:description_args] = description_args
metadata[:description] = build_description_from(*metadata[:description_args])
metadata[:full_description] = full_description
metadata[:described_class] = described_class
populate_location_attributes
metadata.update(user_metadata)
RSpec.configuration.apply_derived_metadata_to(metadata)
end
private
def populate_location_attributes
backtrace = user_metadata.delete(:caller)
file_path, line_number = if backtrace
file_path_and_line_number_from(backtrace)
elsif block.respond_to?(:source_location)
block.source_location
else
file_path_and_line_number_from(caller)
end
file_path = Metadata.relative_path(file_path)
metadata[:file_path] = file_path
metadata[:line_number] = line_number.to_i
metadata[:location] = "#{file_path}:#{line_number}"
end
def file_path_and_line_number_from(backtrace)
first_caller_from_outside_rspec = backtrace.find { |l| l !~ CallerFilter::LIB_REGEX }
first_caller_from_outside_rspec ||= backtrace.first
/(.+?):(\d+)(?:|:\d+)/.match(first_caller_from_outside_rspec).captures
end
def description_separator(parent_part, child_part)
if parent_part.is_a?(Module) && child_part =~ /^(#|::|\.)/
''
else
' '
end
end
def build_description_from(parent_description=nil, my_description=nil)
return parent_description.to_s unless my_description
separator = description_separator(parent_description, my_description)
parent_description.to_s + separator + my_description.to_s
end
def ensure_valid_user_keys
RESERVED_KEYS.each do |key|
next unless user_metadata.key?(key)
raise <<-EOM.gsub(/^\s+\|/, '')
|#{"*" * 50}
|:#{key} is not allowed
|
|RSpec reserves some hash keys for its own internal use,
|including :#{key}, which is used on:
|
| #{CallerFilter.first_non_rspec_line}.
|
|Here are all of RSpec's reserved hash keys:
|
| #{RESERVED_KEYS.join("\n ")}
|#{"*" * 50}
EOM
end
end
end
# @private
class ExampleHash < HashPopulator
def self.create(group_metadata, user_metadata, description, block)
example_metadata = group_metadata.dup
group_metadata = Hash.new(&ExampleGroupHash.backwards_compatibility_default_proc do |hash|
hash[:parent_example_group]
end)
group_metadata.update(example_metadata)
example_metadata[:example_group] = group_metadata
example_metadata.delete(:parent_example_group)
hash = new(example_metadata, user_metadata, [description].compact, block)
hash.populate
hash.metadata
end
private
def described_class
metadata[:example_group][:described_class]
end
def full_description
build_description_from(
metadata[:example_group][:full_description],
metadata[:description]
)
end
end
# @private
class ExampleGroupHash < HashPopulator
def self.create(parent_group_metadata, user_metadata, *args, &block)
group_metadata = hash_with_backwards_compatibility_default_proc
if parent_group_metadata
group_metadata.update(parent_group_metadata)
group_metadata[:parent_example_group] = parent_group_metadata
end
hash = new(group_metadata, user_metadata, args, block)
hash.populate
hash.metadata
end
def self.hash_with_backwards_compatibility_default_proc
Hash.new(&backwards_compatibility_default_proc { |hash| hash })
end
def self.backwards_compatibility_default_proc(&example_group_selector)
Proc.new do |hash, key|
case key
when :example_group
# We commonly get here when rspec-core is applying a previously configured
# filter rule, such as when a gem configures:
#
# RSpec.configure do |c|
# c.include MyGemHelpers, :example_group => { :file_path => /spec\/my_gem_specs/ }
# end
#
# It's confusing for a user to get a deprecation at this point in the code, so instead
# we issue a deprecation from the config APIs that take a metadata hash, and MetadataFilter
# sets this thread local to silence the warning here since it would be so confusing.
unless RSpec.thread_local_metadata[:silence_metadata_example_group_deprecations]
RSpec.deprecate("The `:example_group` key in an example group's metadata hash",
:replacement => "the example group's hash directly for the " \
"computed keys and `:parent_example_group` to access the parent " \
"example group metadata")
end
group_hash = example_group_selector.call(hash)
LegacyExampleGroupHash.new(group_hash) if group_hash
when :example_group_block
RSpec.deprecate("`metadata[:example_group_block]`",
:replacement => "`metadata[:block]`")
hash[:block]
when :describes
RSpec.deprecate("`metadata[:describes]`",
:replacement => "`metadata[:described_class]`")
hash[:described_class]
end
end
end
private
def described_class
candidate = metadata[:description_args].first
return candidate unless NilClass === candidate || String === candidate
parent_group = metadata[:parent_example_group]
parent_group && parent_group[:described_class]
end
def full_description
description = metadata[:description]
parent_example_group = metadata[:parent_example_group]
return description unless parent_example_group
parent_description = parent_example_group[:full_description]
separator = description_separator(parent_example_group[:description_args].last,
metadata[:description_args].first)
parent_description + separator + description
end
end
# @private
RESERVED_KEYS = [
:description,
:example_group,
:parent_example_group,
:execution_result,
:file_path,
:full_description,
:line_number,
:location,
:block
]
end
# Mixin that makes the including class imitate a hash for backwards
# compatibility. The including class should use `attr_accessor` to
# declare attributes.
# @private
module HashImitatable
def self.included(klass)
klass.extend ClassMethods
end
def to_h
hash = extra_hash_attributes.dup
self.class.hash_attribute_names.each do |name|
hash[name] = __send__(name)
end
hash
end
(Hash.public_instance_methods - Object.public_instance_methods).each do |method_name|
next if [:[], :[]=, :to_h].include?(method_name.to_sym)
define_method(method_name) do |*args, &block|
issue_deprecation(method_name, *args)
hash = hash_for_delegation
self.class.hash_attribute_names.each do |name|
hash.delete(name) unless instance_variable_defined?(:"@#{name}")
end
hash.__send__(method_name, *args, &block).tap do
# apply mutations back to the object
hash.each do |name, value|
if directly_supports_attribute?(name)
set_value(name, value)
else
extra_hash_attributes[name] = value
end
end
end
end
end
def [](key)
issue_deprecation(:[], key)
if directly_supports_attribute?(key)
get_value(key)
else
extra_hash_attributes[key]
end
end
def []=(key, value)
issue_deprecation(:[]=, key, value)
if directly_supports_attribute?(key)
set_value(key, value)
else
extra_hash_attributes[key] = value
end
end
private
def extra_hash_attributes
@extra_hash_attributes ||= {}
end
def directly_supports_attribute?(name)
self.class.hash_attribute_names.include?(name)
end
def get_value(name)
__send__(name)
end
def set_value(name, value)
__send__(:"#{name}=", value)
end
def hash_for_delegation
to_h
end
def issue_deprecation(_method_name, *_args)
# no-op by default: subclasses can override
end
# @private
module ClassMethods
def hash_attribute_names
@hash_attribute_names ||= []
end
def attr_accessor(*names)
hash_attribute_names.concat(names)
super
end
end
end
# @private
# Together with the example group metadata hash default block,
# provides backwards compatibility for the old `:example_group`
# key. In RSpec 2.x, the computed keys of a group's metadata
# were exposed from a nested subhash keyed by `[:example_group]`, and
# then the parent group's metadata was exposed by sub-subhash
# keyed by `[:example_group][:example_group]`.
#
# In RSpec 3, we reorganized this to that the computed keys are
# exposed directly of the group metadata hash (no nesting), and
# `:parent_example_group` returns the parent group's metadata.
#
# Maintaining backwards compatibility was difficult: we wanted
# `:example_group` to return an object that:
#
# * Exposes the top-level metadata keys that used to be nested
# under `:example_group`.
# * Supports mutation (rspec-rails, for example, assigns
# `metadata[:example_group][:described_class]` when you use
# anonymous controller specs) such that changes are written
# back to the top-level metadata hash.
# * Exposes the parent group metadata as `[:example_group][:example_group]`.
class LegacyExampleGroupHash
include HashImitatable
def initialize(metadata)
@metadata = metadata
parent_group_metadata = metadata.fetch(:parent_example_group) { {} }[:example_group]
self[:example_group] = parent_group_metadata if parent_group_metadata
end
def to_h
super.merge(@metadata)
end
private
def directly_supports_attribute?(name)
name != :example_group
end
def get_value(name)
@metadata[name]
end
def set_value(name, value)
@metadata[name] = value
end
end
end
end
| 1 | 14,070 | Need to remove this empty line for rubocop to be happy. | rspec-rspec-core | rb |
@@ -63,9 +63,9 @@ func newController(objects ...runtime.Object) (*fake.Clientset, *networkPolicyCo
addressGroupStore := store.NewAddressGroupStore()
internalNetworkPolicyStore := store.NewNetworkPolicyStore()
npController := NewNetworkPolicyController(client, informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Namespaces(), informerFactory.Networking().V1().NetworkPolicies(), addressGroupStore, appliedToGroupStore, internalNetworkPolicyStore)
- npController.podListerSynced = alwaysReady
- npController.namespaceListerSynced = alwaysReady
- npController.networkPolicyListerSynced = alwaysReady
+ //npController.podListerSynced = alwaysReady
+ //npController.namespaceListerSynced = alwaysReady
+ //npController.networkPolicyListerSynced = alwaysReady
return client, &networkPolicyController{
npController,
informerFactory.Core().V1().Pods().Informer().GetStore(), | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package networkpolicy
import (
"bytes"
"fmt"
"net"
"reflect"
"testing"
"time"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
k8stesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"github.com/vmware-tanzu/antrea/pkg/apis/networking"
"github.com/vmware-tanzu/antrea/pkg/apiserver/storage"
"github.com/vmware-tanzu/antrea/pkg/controller/networkpolicy/store"
antreatypes "github.com/vmware-tanzu/antrea/pkg/controller/types"
)
var alwaysReady = func() bool { return true }
const informerDefaultResync time.Duration = 30 * time.Second
type networkPolicyController struct {
*NetworkPolicyController
podStore cache.Store
namespaceStore cache.Store
networkPolicyStore cache.Store
appliedToGroupStore storage.Interface
addressGroupStore storage.Interface
internalNetworkPolicyStore storage.Interface
informerFactory informers.SharedInformerFactory
}
func newController(objects ...runtime.Object) (*fake.Clientset, *networkPolicyController) {
client := newClientset(objects...)
informerFactory := informers.NewSharedInformerFactory(client, informerDefaultResync)
appliedToGroupStore := store.NewAppliedToGroupStore()
addressGroupStore := store.NewAddressGroupStore()
internalNetworkPolicyStore := store.NewNetworkPolicyStore()
npController := NewNetworkPolicyController(client, informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Namespaces(), informerFactory.Networking().V1().NetworkPolicies(), addressGroupStore, appliedToGroupStore, internalNetworkPolicyStore)
npController.podListerSynced = alwaysReady
npController.namespaceListerSynced = alwaysReady
npController.networkPolicyListerSynced = alwaysReady
return client, &networkPolicyController{
npController,
informerFactory.Core().V1().Pods().Informer().GetStore(),
informerFactory.Core().V1().Namespaces().Informer().GetStore(),
informerFactory.Networking().V1().NetworkPolicies().Informer().GetStore(),
appliedToGroupStore,
addressGroupStore,
internalNetworkPolicyStore,
informerFactory,
}
}
func newClientset(objects ...runtime.Object) *fake.Clientset {
client := fake.NewSimpleClientset(objects...)
client.PrependReactor("create", "networkpolicies", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) {
np := action.(k8stesting.CreateAction).GetObject().(*networkingv1.NetworkPolicy)
if np.ObjectMeta.GenerateName != "" {
np.ObjectMeta.Name = fmt.Sprintf("%s-%s", np.ObjectMeta.GenerateName, rand.String(8))
np.ObjectMeta.GenerateName = ""
}
return false, np, nil
}))
return client
}
func TestAddNetworkPolicy(t *testing.T) {
protocolTCP := networking.ProtocolTCP
intstr80, intstr81 := intstr.FromInt(80), intstr.FromInt(81)
int80, int81 := intstr.FromInt(80), intstr.FromInt(81)
selectorA := metav1.LabelSelector{MatchLabels: map[string]string{"foo1": "bar1"}}
selectorB := metav1.LabelSelector{MatchLabels: map[string]string{"foo2": "bar2"}}
selectorC := metav1.LabelSelector{MatchLabels: map[string]string{"foo3": "bar3"}}
selectorAll := metav1.LabelSelector{}
matchAllPeerEgress := matchAllPeer
matchAllPeerEgress.AddressGroups = []string{getNormalizedUID(toGroupSelector("", nil, &selectorAll).NormalizedName)}
tests := []struct {
name string
inputPolicy *networkingv1.NetworkPolicy
expPolicy *antreatypes.NetworkPolicy
expAppliedToGroups int
expAddressGroups int
}{
{
name: "default-allow-ingress",
inputPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npA", UID: "uidA"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
Ingress: []networkingv1.NetworkPolicyIngressRule{{}},
},
},
expPolicy: &antreatypes.NetworkPolicy{
UID: "uidA",
Name: "npA",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{{
Direction: networking.DirectionIn,
From: matchAllPeer,
Services: nil,
}},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &metav1.LabelSelector{}, nil).NormalizedName)},
},
expAppliedToGroups: 1,
expAddressGroups: 0,
},
{
name: "default-allow-egress",
inputPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npB", UID: "uidB"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
Egress: []networkingv1.NetworkPolicyEgressRule{{}},
},
},
expPolicy: &antreatypes.NetworkPolicy{
UID: "uidB",
Name: "npB",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{{
Direction: networking.DirectionOut,
To: matchAllPeerEgress,
Services: nil,
}},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &metav1.LabelSelector{}, nil).NormalizedName)},
},
expAppliedToGroups: 1,
expAddressGroups: 1,
},
{
name: "default-deny-ingress",
inputPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npC", UID: "uidC"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
},
},
expPolicy: &antreatypes.NetworkPolicy{
UID: "uidC",
Name: "npC",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{
denyAllIngressRule,
},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &metav1.LabelSelector{}, nil).NormalizedName)},
},
expAppliedToGroups: 1,
expAddressGroups: 0,
},
{
name: "default-deny-egress",
inputPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npD", UID: "uidD"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
},
},
expPolicy: &antreatypes.NetworkPolicy{
UID: "uidD",
Name: "npD",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{
denyAllEgressRule,
},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &metav1.LabelSelector{}, nil).NormalizedName)},
},
expAppliedToGroups: 1,
expAddressGroups: 0,
},
{
name: "rules-with-same-selectors",
inputPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npE", UID: "uidE"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: selectorA,
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
Ports: []networkingv1.NetworkPolicyPort{
{
Port: &intstr80,
},
},
From: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
NamespaceSelector: &selectorC,
},
},
},
},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
Ports: []networkingv1.NetworkPolicyPort{
{
Port: &intstr81,
},
},
To: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
NamespaceSelector: &selectorC,
},
},
},
},
},
},
expPolicy: &antreatypes.NetworkPolicy{
UID: "uidE",
Name: "npE",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{
{
Direction: networking.DirectionIn,
From: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, &selectorC).NormalizedName)},
},
Services: []networking.Service{
{
Protocol: &protocolTCP,
Port: &int80,
},
},
},
{
Direction: networking.DirectionOut,
To: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, &selectorC).NormalizedName)},
},
Services: []networking.Service{
{
Protocol: &protocolTCP,
Port: &int81,
},
},
},
},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorA, nil).NormalizedName)},
},
expAppliedToGroups: 1,
expAddressGroups: 1,
},
{
name: "rules-with-different-selectors",
inputPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npF", UID: "uidF"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: selectorA,
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
Ports: []networkingv1.NetworkPolicyPort{
{
Port: &intstr80,
},
},
From: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
},
},
},
{
Ports: []networkingv1.NetworkPolicyPort{
{
Port: &intstr81,
},
},
From: []networkingv1.NetworkPolicyPeer{
{
NamespaceSelector: &selectorC,
},
},
},
},
},
},
expPolicy: &antreatypes.NetworkPolicy{
UID: "uidF",
Name: "npF",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{
{
Direction: networking.DirectionIn,
From: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, nil).NormalizedName)},
},
Services: []networking.Service{
{
Protocol: &protocolTCP,
Port: &int80,
},
},
},
{
Direction: networking.DirectionIn,
From: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", nil, &selectorC).NormalizedName)},
},
Services: []networking.Service{
{
Protocol: &protocolTCP,
Port: &int81,
},
},
},
},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorA, nil).NormalizedName)},
},
expAppliedToGroups: 1,
expAddressGroups: 2,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, npc := newController()
npc.addNetworkPolicy(tt.inputPolicy)
key, _ := keyFunc(tt.inputPolicy)
actualPolicyObj, _, _ := npc.internalNetworkPolicyStore.Get(key)
actualPolicy := actualPolicyObj.(*antreatypes.NetworkPolicy)
if !reflect.DeepEqual(actualPolicy, tt.expPolicy) {
t.Errorf("addNetworkPolicy() got %v, want %v", actualPolicy, tt.expPolicy)
}
if actualAddressGroups := len(npc.addressGroupStore.List()); actualAddressGroups != tt.expAddressGroups {
t.Errorf("len(addressGroupStore.List()) got %v, want %v", actualAddressGroups, tt.expAddressGroups)
}
if actualAppliedToGroups := len(npc.appliedToGroupStore.List()); actualAppliedToGroups != tt.expAppliedToGroups {
t.Errorf("len(appliedToGroupStore.List()) got %v, want %v", actualAppliedToGroups, tt.expAppliedToGroups)
}
})
}
_, npc := newController()
for _, tt := range tests {
npc.addNetworkPolicy(tt.inputPolicy)
}
assert.Equal(t, npc.GetNetworkPolicyNum(), 6, "expected networkPolicy number is 6")
assert.Equal(t, npc.GetAddressGroupNum(), 4, "expected addressGroup number is 4")
assert.Equal(t, npc.GetAppliedToGroupNum(), 2, "appliedToGroup number is 2")
}
func TestDeleteNetworkPolicy(t *testing.T) {
npObj := getK8sNetworkPolicyObj()
ns := npObj.ObjectMeta.Namespace
pSelector := npObj.Spec.PodSelector
pLabelSelector, _ := metav1.LabelSelectorAsSelector(&pSelector)
apgID := getNormalizedUID(generateNormalizedName(ns, pLabelSelector, nil))
_, npc := newController()
npc.addNetworkPolicy(npObj)
npc.deleteNetworkPolicy(npObj)
_, found, _ := npc.appliedToGroupStore.Get(apgID)
assert.False(t, found, "expected AppliedToGroup to be deleted")
adgs := npc.addressGroupStore.List()
assert.Len(t, adgs, 0, "expected empty AddressGroup list")
key, _ := keyFunc(npObj)
_, found, _ = npc.internalNetworkPolicyStore.Get(key)
assert.False(t, found, "expected internal NetworkPolicy to be deleted")
}
func TestUpdateNetworkPolicy(t *testing.T) {
selectorA := metav1.LabelSelector{MatchLabels: map[string]string{"foo1": "bar1"}}
selectorB := metav1.LabelSelector{MatchLabels: map[string]string{"foo2": "bar2"}}
selectorC := metav1.LabelSelector{MatchLabels: map[string]string{"foo3": "bar3"}}
oldNP := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npA", UID: "uidA"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
From: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
NamespaceSelector: &selectorC,
},
},
},
},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
To: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
},
},
},
},
},
}
tests := []struct {
name string
updatedNetworkPolicy *networkingv1.NetworkPolicy
expNetworkPolicy *antreatypes.NetworkPolicy
expAppliedToGroups int
expAddressGroups int
}{
{
name: "update-pod-selector",
updatedNetworkPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npA", UID: "uidA"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: selectorA,
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
From: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
NamespaceSelector: &selectorC,
},
},
},
},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
To: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
},
},
},
},
},
},
expNetworkPolicy: &antreatypes.NetworkPolicy{
UID: "uidA",
Name: "npA",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{
{
Direction: networking.DirectionIn,
From: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, &selectorC).NormalizedName)},
},
},
{
Direction: networking.DirectionOut,
To: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, nil).NormalizedName)},
},
},
},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorA, nil).NormalizedName)},
},
expAppliedToGroups: 1,
expAddressGroups: 2,
},
{
name: "remove-ingress-rule",
updatedNetworkPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npA", UID: "uidA"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
To: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
NamespaceSelector: &selectorC,
},
},
},
},
},
},
expNetworkPolicy: &antreatypes.NetworkPolicy{
UID: "uidA",
Name: "npA",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{
{
Direction: networking.DirectionOut,
To: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, &selectorC).NormalizedName)},
},
},
},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &metav1.LabelSelector{}, nil).NormalizedName)},
},
expAppliedToGroups: 1,
expAddressGroups: 1,
},
{
name: "remove-egress-rule",
updatedNetworkPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npA", UID: "uidA"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
From: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
NamespaceSelector: &selectorC,
},
},
},
},
},
},
expNetworkPolicy: &antreatypes.NetworkPolicy{
UID: "uidA",
Name: "npA",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{
{
Direction: networking.DirectionIn,
From: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, &selectorC).NormalizedName)},
},
},
},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &metav1.LabelSelector{}, nil).NormalizedName)},
},
expAppliedToGroups: 1,
expAddressGroups: 1,
},
{
name: "remove-all-rules",
updatedNetworkPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npA", UID: "uidA"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
},
},
expNetworkPolicy: &antreatypes.NetworkPolicy{
UID: "uidA",
Name: "npA",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &metav1.LabelSelector{}, nil).NormalizedName)},
},
expAppliedToGroups: 1,
expAddressGroups: 0,
},
{
name: "add-ingress-rule",
updatedNetworkPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npA", UID: "uidA"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
From: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
NamespaceSelector: &selectorC,
},
},
},
{
From: []networkingv1.NetworkPolicyPeer{
{
NamespaceSelector: &selectorA,
},
},
},
},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
To: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
},
},
},
},
},
},
expNetworkPolicy: &antreatypes.NetworkPolicy{
UID: "uidA",
Name: "npA",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{
{
Direction: networking.DirectionIn,
From: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, &selectorC).NormalizedName)},
},
},
{
Direction: networking.DirectionIn,
From: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("", nil, &selectorA).NormalizedName)},
},
},
{
Direction: networking.DirectionOut,
To: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, nil).NormalizedName)},
},
},
},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &metav1.LabelSelector{}, nil).NormalizedName)},
},
expAppliedToGroups: 1,
expAddressGroups: 3,
},
{
name: "update-egress-rule-selector",
updatedNetworkPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npA", UID: "uidA"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
From: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
NamespaceSelector: &selectorC,
},
},
},
},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
To: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorA,
},
},
},
},
},
},
expNetworkPolicy: &antreatypes.NetworkPolicy{
UID: "uidA",
Name: "npA",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{
{
Direction: networking.DirectionIn,
From: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, &selectorC).NormalizedName)},
},
},
{
Direction: networking.DirectionOut,
To: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorA, nil).NormalizedName)},
},
},
},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &metav1.LabelSelector{}, nil).NormalizedName)},
},
expAppliedToGroups: 1,
expAddressGroups: 2,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, npc := newController()
npc.addNetworkPolicy(oldNP)
npc.updateNetworkPolicy(oldNP, tt.updatedNetworkPolicy)
key, _ := keyFunc(oldNP)
actualPolicyObj, _, _ := npc.internalNetworkPolicyStore.Get(key)
actualPolicy := actualPolicyObj.(*antreatypes.NetworkPolicy)
if actualAppliedToGroups := len(npc.appliedToGroupStore.List()); actualAppliedToGroups != tt.expAppliedToGroups {
t.Errorf("updateNetworkPolicy() got %v, want %v", actualAppliedToGroups, tt.expAppliedToGroups)
}
if actualAddressGroups := len(npc.addressGroupStore.List()); actualAddressGroups != tt.expAddressGroups {
t.Errorf("updateNetworkPolicy() got %v, want %v", actualAddressGroups, tt.expAddressGroups)
}
if !reflect.DeepEqual(actualPolicy, tt.expNetworkPolicy) {
t.Errorf("updateNetworkPolicy() got %#v, want %#v", actualPolicy, tt.expNetworkPolicy)
}
})
}
}
func TestAddPod(t *testing.T) {
selectorSpec := metav1.LabelSelector{
MatchLabels: map[string]string{"group": "appliedTo"},
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "role",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"db", "app"},
},
},
}
selectorIn := metav1.LabelSelector{
MatchLabels: map[string]string{"inGroup": "inAddress"},
}
selectorOut := metav1.LabelSelector{
MatchLabels: map[string]string{"outGroup": "outAddress"},
}
testNPObj := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "npA",
Namespace: "nsA",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: selectorSpec,
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
From: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorIn,
},
},
},
},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
To: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorOut,
},
},
},
},
},
}
tests := []struct {
name string
addedPod *v1.Pod
appGroupMatch bool
inAddressGroupMatch bool
outAddressGroupMatch bool
}{
{
name: "not-match-spec-podselector-match-labels",
addedPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "podA",
Namespace: "nsA",
Labels: map[string]string{"group": "appliedTo"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "container-1",
}},
NodeName: "nodeA",
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
PodIP: "1.2.3.4",
},
},
appGroupMatch: false,
inAddressGroupMatch: false,
outAddressGroupMatch: false,
},
{
name: "not-match-spec-podselector-match-exprs",
addedPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "podA",
Namespace: "nsA",
Labels: map[string]string{"role": "db"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "container-1",
}},
NodeName: "nodeA",
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
PodIP: "1.2.3.4",
},
},
appGroupMatch: false,
inAddressGroupMatch: false,
outAddressGroupMatch: false,
},
{
name: "match-spec-podselector",
addedPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "podA",
Namespace: "nsA",
Labels: map[string]string{
"role": "db",
"group": "appliedTo",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "container-1",
}},
NodeName: "nodeA",
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
PodIP: "1.2.3.4",
},
},
appGroupMatch: true,
inAddressGroupMatch: false,
outAddressGroupMatch: false,
},
{
name: "match-ingress-podselector",
addedPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "podA",
Namespace: "nsA",
Labels: map[string]string{"inGroup": "inAddress"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "container-1",
}},
NodeName: "nodeA",
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
PodIP: "1.2.3.4",
},
},
appGroupMatch: false,
inAddressGroupMatch: true,
outAddressGroupMatch: false,
},
{
name: "match-egress-podselector",
addedPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "podA",
Namespace: "nsA",
Labels: map[string]string{"outGroup": "outAddress"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "container-1",
}},
NodeName: "nodeA",
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
PodIP: "1.2.3.4",
},
},
appGroupMatch: false,
inAddressGroupMatch: false,
outAddressGroupMatch: true,
},
{
name: "match-all-selectors",
addedPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "podA",
Namespace: "nsA",
Labels: map[string]string{
"role": "app",
"group": "appliedTo",
"inGroup": "inAddress",
"outGroup": "outAddress",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "container-1",
}},
NodeName: "nodeA",
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
PodIP: "1.2.3.4",
},
},
appGroupMatch: true,
inAddressGroupMatch: true,
outAddressGroupMatch: true,
},
{
name: "match-spec-podselector-no-podip",
addedPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "podA",
Namespace: "nsA",
Labels: map[string]string{"group": "appliedTo"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "container-1",
}},
NodeName: "nodeA",
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
},
},
appGroupMatch: false,
inAddressGroupMatch: false,
outAddressGroupMatch: false,
},
{
name: "match-rule-podselector-no-ip",
addedPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "podA",
Namespace: "nsA",
Labels: map[string]string{"inGroup": "inAddress"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "container-1",
}},
NodeName: "nodeA",
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
},
},
appGroupMatch: false,
inAddressGroupMatch: false,
outAddressGroupMatch: false,
},
{
name: "no-match-spec-podselector",
addedPod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "podA",
Namespace: "nsA",
Labels: map[string]string{"group": "none"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "container-1",
}},
NodeName: "nodeA",
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
PodIP: "1.2.3.4",
},
},
appGroupMatch: false,
inAddressGroupMatch: false,
outAddressGroupMatch: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, npc := newController()
npc.addNetworkPolicy(testNPObj)
npc.podStore.Add(tt.addedPod)
appGroupID := getNormalizedUID(toGroupSelector("nsA", &selectorSpec, nil).NormalizedName)
inGroupID := getNormalizedUID(toGroupSelector("nsA", &selectorIn, nil).NormalizedName)
outGroupID := getNormalizedUID(toGroupSelector("nsA", &selectorOut, nil).NormalizedName)
npc.syncAppliedToGroup(appGroupID)
npc.syncAddressGroup(inGroupID)
npc.syncAddressGroup(outGroupID)
appGroupObj, _, _ := npc.appliedToGroupStore.Get(appGroupID)
appGroup := appGroupObj.(*antreatypes.AppliedToGroup)
podsAdded := appGroup.PodsByNode["nodeA"]
updatedInAddrGroupObj, _, _ := npc.addressGroupStore.Get(inGroupID)
updatedInAddrGroup := updatedInAddrGroupObj.(*antreatypes.AddressGroup)
updatedOutAddrGroupObj, _, _ := npc.addressGroupStore.Get(outGroupID)
updatedOutAddrGroup := updatedOutAddrGroupObj.(*antreatypes.AddressGroup)
if tt.appGroupMatch {
assert.Len(t, podsAdded, 1, "expected Pod to match AppliedToGroup")
} else {
assert.Len(t, podsAdded, 0, "expected Pod not to match AppliedToGroup")
}
memberPod := &networking.GroupMemberPod{IP: ipStrToIPAddress("1.2.3.4")}
assert.Equal(t, tt.inAddressGroupMatch, updatedInAddrGroup.Pods.Has(memberPod))
assert.Equal(t, tt.outAddressGroupMatch, updatedOutAddrGroup.Pods.Has(memberPod))
})
}
}
func TestDeletePod(t *testing.T) {
ns := metav1.NamespaceDefault
nodeName := "node1"
matchNPName := "testNP"
matchLabels := map[string]string{"group": "appliedTo"}
ruleLabels := map[string]string{"group": "address"}
matchSelector := metav1.LabelSelector{
MatchLabels: matchLabels,
}
mLabelSelector, _ := metav1.LabelSelectorAsSelector(&matchSelector)
inPSelector := metav1.LabelSelector{
MatchLabels: ruleLabels,
}
matchAppGID := getNormalizedUID(generateNormalizedName(ns, mLabelSelector, nil))
ingressRules := []networkingv1.NetworkPolicyIngressRule{
{
From: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &inPSelector,
},
},
},
}
matchNPObj := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: matchNPName,
Namespace: ns,
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: matchSelector,
Ingress: ingressRules,
},
}
p1IP := "1.1.1.1"
p2IP := "2.2.2.2"
p1 := getPod("p1", ns, "", p1IP, false)
// Ensure Pod p1 matches AppliedToGroup.
p1.Labels = matchLabels
p2 := getPod("p2", ns, "", p2IP, false)
// Ensure Pod p2 matches AddressGroup.
p2.Labels = ruleLabels
_, npc := newController()
npc.addNetworkPolicy(matchNPObj)
npc.podStore.Add(p1)
npc.podStore.Add(p2)
npc.syncAppliedToGroup(matchAppGID)
// Retrieve AddressGroup.
adgs := npc.addressGroupStore.List()
// Considering the NP, there should be only one AddressGroup for tests.
addrGroupObj := adgs[0]
addrGroup := addrGroupObj.(*antreatypes.AddressGroup)
npc.syncAddressGroup(addrGroup.Name)
// Delete Pod P1 matching the AppliedToGroup.
npc.podStore.Delete(p1)
npc.syncAppliedToGroup(matchAppGID)
appGroupObj, _, _ := npc.appliedToGroupStore.Get(matchAppGID)
appGroup := appGroupObj.(*antreatypes.AppliedToGroup)
podsAdded := appGroup.PodsByNode[nodeName]
// Ensure Pod1 reference is removed from AppliedToGroup.
assert.Len(t, podsAdded, 0, "expected Pod to be deleted from AppliedToGroup")
// Delete Pod P2 matching the NetworkPolicy Rule.
npc.podStore.Delete(p2)
npc.syncAddressGroup(addrGroup.Name)
updatedAddrGroupObj, _, _ := npc.addressGroupStore.Get(addrGroup.Name)
updatedAddrGroup := updatedAddrGroupObj.(*antreatypes.AddressGroup)
// Ensure Pod2 IP is removed from AddressGroup.
memberPod2 := &networking.GroupMemberPod{IP: ipStrToIPAddress(p2IP)}
assert.False(t, updatedAddrGroup.Pods.Has(memberPod2))
}
func TestAddNamespace(t *testing.T) {
selectorSpec := metav1.LabelSelector{}
selectorIn := metav1.LabelSelector{
MatchLabels: map[string]string{"inGroup": "inAddress"},
}
selectorOut := metav1.LabelSelector{
MatchLabels: map[string]string{"outGroup": "outAddress"},
}
testNPObj := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "npA",
Namespace: "nsA",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: selectorSpec,
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
From: []networkingv1.NetworkPolicyPeer{
{
NamespaceSelector: &selectorIn,
},
},
},
},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
To: []networkingv1.NetworkPolicyPeer{
{
NamespaceSelector: &selectorOut,
},
},
},
},
},
}
tests := []struct {
name string
addedNamespace *v1.Namespace
inAddressGroupMatch bool
outAddressGroupMatch bool
}{
{
name: "match-namespace-ingress-rule",
addedNamespace: &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "nsA",
Labels: map[string]string{"inGroup": "inAddress"},
},
},
inAddressGroupMatch: true,
outAddressGroupMatch: false,
},
{
name: "match-namespace-egress-rule",
addedNamespace: &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "nsA",
Labels: map[string]string{"outGroup": "outAddress"},
},
},
inAddressGroupMatch: false,
outAddressGroupMatch: true,
},
{
name: "match-namespace-all",
addedNamespace: &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "nsA",
Labels: map[string]string{
"inGroup": "inAddress",
"outGroup": "outAddress",
},
},
},
inAddressGroupMatch: true,
outAddressGroupMatch: true,
},
{
name: "match-namespace-none",
addedNamespace: &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "nsA",
Labels: map[string]string{"group": "none"},
},
},
inAddressGroupMatch: false,
outAddressGroupMatch: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, npc := newController()
npc.addNetworkPolicy(testNPObj)
npc.namespaceStore.Add(tt.addedNamespace)
p1 := getPod("p1", "nsA", "nodeA", "1.2.3.4", false)
p2 := getPod("p2", "nsA", "nodeA", "2.2.3.4", false)
npc.podStore.Add(p1)
npc.podStore.Add(p2)
inGroupID := getNormalizedUID(toGroupSelector("", nil, &selectorIn).NormalizedName)
outGroupID := getNormalizedUID(toGroupSelector("", nil, &selectorOut).NormalizedName)
npc.syncAddressGroup(inGroupID)
npc.syncAddressGroup(outGroupID)
updatedInAddrGroupObj, _, _ := npc.addressGroupStore.Get(inGroupID)
updatedInAddrGroup := updatedInAddrGroupObj.(*antreatypes.AddressGroup)
updatedOutAddrGroupObj, _, _ := npc.addressGroupStore.Get(outGroupID)
updatedOutAddrGroup := updatedOutAddrGroupObj.(*antreatypes.AddressGroup)
memberPod1 := &networking.GroupMemberPod{IP: ipStrToIPAddress("1.2.3.4")}
memberPod2 := &networking.GroupMemberPod{IP: ipStrToIPAddress("2.2.3.4")}
assert.Equal(t, tt.inAddressGroupMatch, updatedInAddrGroup.Pods.Has(memberPod1))
assert.Equal(t, tt.inAddressGroupMatch, updatedInAddrGroup.Pods.Has(memberPod2))
assert.Equal(t, tt.outAddressGroupMatch, updatedOutAddrGroup.Pods.Has(memberPod1))
assert.Equal(t, tt.outAddressGroupMatch, updatedOutAddrGroup.Pods.Has(memberPod2))
})
}
}
func TestDeleteNamespace(t *testing.T) {
selectorSpec := metav1.LabelSelector{}
selectorIn := metav1.LabelSelector{
MatchLabels: map[string]string{"inGroup": "inAddress"},
}
selectorOut := metav1.LabelSelector{
MatchLabels: map[string]string{"outGroup": "outAddress"},
}
testNPObj := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "npA",
Namespace: "nsA",
},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: selectorSpec,
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
From: []networkingv1.NetworkPolicyPeer{
{
NamespaceSelector: &selectorIn,
},
},
},
},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
To: []networkingv1.NetworkPolicyPeer{
{
NamespaceSelector: &selectorOut,
},
},
},
},
},
}
tests := []struct {
name string
deletedNamespace *v1.Namespace
inAddressGroupMatch bool
outAddressGroupMatch bool
}{
{
name: "match-namespace-ingress-rule",
deletedNamespace: &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "nsA",
Labels: map[string]string{"inGroup": "inAddress"},
},
},
inAddressGroupMatch: true,
outAddressGroupMatch: false,
},
{
name: "match-namespace-egress-rule",
deletedNamespace: &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "nsA",
Labels: map[string]string{"outGroup": "outAddress"},
},
},
inAddressGroupMatch: false,
outAddressGroupMatch: true,
},
{
name: "match-namespace-all",
deletedNamespace: &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "nsA",
Labels: map[string]string{
"inGroup": "inAddress",
"outGroup": "outAddress",
},
},
},
inAddressGroupMatch: true,
outAddressGroupMatch: true,
},
{
name: "match-namespace-none",
deletedNamespace: &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "nsA",
Labels: map[string]string{"group": "none"},
},
},
inAddressGroupMatch: false,
outAddressGroupMatch: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, npc := newController()
npc.addNetworkPolicy(testNPObj)
p1 := getPod("p1", "nsA", "", "1.1.1.1", false)
p2 := getPod("p2", "nsA", "", "1.1.1.2", false)
npc.namespaceStore.Add(tt.deletedNamespace)
npc.podStore.Add(p1)
npc.podStore.Add(p2)
npc.namespaceStore.Delete(tt.deletedNamespace)
inGroupID := getNormalizedUID(toGroupSelector("", nil, &selectorIn).NormalizedName)
outGroupID := getNormalizedUID(toGroupSelector("", nil, &selectorOut).NormalizedName)
npc.syncAddressGroup(inGroupID)
npc.syncAddressGroup(outGroupID)
npc.podStore.Delete(p1)
npc.podStore.Delete(p2)
npc.namespaceStore.Delete(tt.deletedNamespace)
npc.syncAddressGroup(inGroupID)
npc.syncAddressGroup(outGroupID)
updatedInAddrGroupObj, _, _ := npc.addressGroupStore.Get(inGroupID)
updatedInAddrGroup := updatedInAddrGroupObj.(*antreatypes.AddressGroup)
updatedOutAddrGroupObj, _, _ := npc.addressGroupStore.Get(outGroupID)
updatedOutAddrGroup := updatedOutAddrGroupObj.(*antreatypes.AddressGroup)
memberPod1 := &networking.GroupMemberPod{IP: ipStrToIPAddress("1.1.1.1")}
memberPod2 := &networking.GroupMemberPod{IP: ipStrToIPAddress("1.1.1.2")}
if tt.inAddressGroupMatch {
assert.False(t, updatedInAddrGroup.Pods.Has(memberPod1))
assert.False(t, updatedInAddrGroup.Pods.Has(memberPod2))
}
if tt.outAddressGroupMatch {
assert.False(t, updatedOutAddrGroup.Pods.Has(memberPod1))
assert.False(t, updatedOutAddrGroup.Pods.Has(memberPod2))
}
})
}
}
func TestToGroupSelector(t *testing.T) {
pSelector := metav1.LabelSelector{}
pLabelSelector, _ := metav1.LabelSelectorAsSelector(&pSelector)
nSelector := metav1.LabelSelector{}
nLabelSelector, _ := metav1.LabelSelectorAsSelector(&nSelector)
tests := []struct {
name string
namespace string
podSelector *metav1.LabelSelector
nsSelector *metav1.LabelSelector
expGroupSelector *antreatypes.GroupSelector
}{
{
"to-group-selector-ns-pod-selector",
"nsName",
&pSelector,
nil,
&antreatypes.GroupSelector{
Namespace: "nsName",
NamespaceSelector: nil,
PodSelector: pLabelSelector,
NormalizedName: generateNormalizedName("nsName", pLabelSelector, nil),
},
},
{
"to-group-selector-ns-selector",
"nsName",
nil,
&nSelector,
&antreatypes.GroupSelector{
Namespace: "",
NamespaceSelector: nLabelSelector,
PodSelector: nil,
NormalizedName: generateNormalizedName("", nil, nLabelSelector),
},
},
{
"to-group-selector-pod-selector",
"nsName",
&pSelector,
nil,
&antreatypes.GroupSelector{
Namespace: "nsName",
NamespaceSelector: nil,
PodSelector: pLabelSelector,
NormalizedName: generateNormalizedName("nsName", pLabelSelector, nil),
},
},
{
"to-group-selector-ns-selector-pod-selector",
"nsName",
&pSelector,
&nSelector,
&antreatypes.GroupSelector{
Namespace: "",
NamespaceSelector: nLabelSelector,
PodSelector: pLabelSelector,
NormalizedName: generateNormalizedName("", pLabelSelector, nLabelSelector),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
group := toGroupSelector(tt.namespace, tt.podSelector, tt.nsSelector)
if group.Namespace != tt.expGroupSelector.Namespace {
t.Errorf("Group Namespace incorrectly set. Expected %s, got: %s", tt.expGroupSelector.Namespace, group.Namespace)
}
if group.NormalizedName != tt.expGroupSelector.NormalizedName {
t.Errorf("Group normalized Name incorrectly set. Expected %s, got: %s", tt.expGroupSelector.NormalizedName, group.NormalizedName)
}
if group.NamespaceSelector != nil && tt.expGroupSelector.NamespaceSelector != nil {
if !reflect.DeepEqual(group.NamespaceSelector, tt.expGroupSelector.NamespaceSelector) {
t.Errorf("Group NamespaceSelector incorrectly set. Expected %v, got: %v", tt.expGroupSelector.NamespaceSelector, group.NamespaceSelector)
}
}
if group.PodSelector != nil && tt.expGroupSelector.PodSelector != nil {
if !reflect.DeepEqual(group.PodSelector, tt.expGroupSelector.PodSelector) {
t.Errorf("Group PodSelector incorrectly set. Expected %v, got: %v", tt.expGroupSelector.PodSelector, group.PodSelector)
}
}
})
}
}
func TestGenerateNormalizedName(t *testing.T) {
pLabels := map[string]string{"app": "client"}
req1 := metav1.LabelSelectorRequirement{
Key: "role",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"db", "app"},
}
pExprs := []metav1.LabelSelectorRequirement{req1}
normalizedPodSelector := "app=client,role in (app,db)"
nLabels := map[string]string{"scope": "test"}
req2 := metav1.LabelSelectorRequirement{
Key: "env",
Operator: metav1.LabelSelectorOpNotIn,
Values: []string{"staging", "prod"},
}
nExprs := []metav1.LabelSelectorRequirement{req2}
normalizedNSSelector := "env notin (prod,staging),scope=test"
pSelector := metav1.LabelSelector{
MatchLabels: pLabels,
MatchExpressions: pExprs,
}
pLabelSelector, _ := metav1.LabelSelectorAsSelector(&pSelector)
nSelector := metav1.LabelSelector{
MatchLabels: nLabels,
MatchExpressions: nExprs,
}
nLabelSelector, _ := metav1.LabelSelectorAsSelector(&nSelector)
tables := []struct {
namespace string
pSelector labels.Selector
nSelector labels.Selector
expName string
}{
{
"nsName",
pLabelSelector,
nil,
fmt.Sprintf("namespace=nsName And podSelector=%s", normalizedPodSelector),
},
{
"nsName",
nil,
nil,
"namespace=nsName",
},
{
"nsName",
nil,
nLabelSelector,
fmt.Sprintf("namespaceSelector=%s", normalizedNSSelector),
},
{
"nsName",
pLabelSelector,
nLabelSelector,
fmt.Sprintf("namespaceSelector=%s And podSelector=%s", normalizedNSSelector, normalizedPodSelector),
},
}
for _, table := range tables {
name := generateNormalizedName(table.namespace, table.pSelector, table.nSelector)
if table.expName != name {
t.Errorf("Unexpected normalized name. Expected %s, got %s", table.expName, name)
}
}
}
func TestToAntreaProtocol(t *testing.T) {
udpProto := v1.ProtocolUDP
tcpProto := v1.ProtocolTCP
sctpProto := v1.ProtocolSCTP
tables := []struct {
proto *v1.Protocol
expInternalProto networking.Protocol
}{
{nil, networking.ProtocolTCP},
{&udpProto, networking.ProtocolUDP},
{&tcpProto, networking.ProtocolTCP},
{&sctpProto, networking.ProtocolSCTP},
}
for _, table := range tables {
protocol := toAntreaProtocol(table.proto)
if *protocol != table.expInternalProto {
t.Errorf("Unexpected Antrea protocol. Expected %v, got %v", table.expInternalProto, *protocol)
}
}
}
func TestToAntreaServices(t *testing.T) {
tcpProto := v1.ProtocolTCP
portNum := intstr.FromInt(80)
tables := []struct {
ports []networkingv1.NetworkPolicyPort
expValues []networking.Service
}{
{
getK8sNetworkPolicyPorts(tcpProto),
[]networking.Service{
{
Protocol: toAntreaProtocol(&tcpProto),
Port: &portNum,
},
},
},
}
for _, table := range tables {
services := toAntreaServices(table.ports)
service := services[0]
expValue := table.expValues[0]
if *service.Protocol != *expValue.Protocol {
t.Errorf("Unexpected Antrea Protocol in Antrea Service. Expected %v, got %v", *expValue.Protocol, *service.Protocol)
}
if *service.Port != *expValue.Port {
t.Errorf("Unexpected Antrea Port in Antrea Service. Expected %v, got %v", *expValue.Port, *service.Port)
}
}
}
func TestToAntreaIPBlock(t *testing.T) {
expIPNet := networking.IPNet{
IP: ipStrToIPAddress("10.0.0.0"),
PrefixLength: 24,
}
tables := []struct {
ipBlock *networkingv1.IPBlock
expValue networking.IPBlock
err error
}{
{
&networkingv1.IPBlock{
CIDR: "10.0.0.0/24",
},
networking.IPBlock{
CIDR: expIPNet,
},
nil,
},
{
&networkingv1.IPBlock{
CIDR: "10.0.0.0",
},
networking.IPBlock{},
fmt.Errorf("invalid format for IPBlock CIDR: 10.0.0.0"),
},
}
for _, table := range tables {
antreaIPBlock, err := toAntreaIPBlock(table.ipBlock)
if err != nil {
if err.Error() != table.err.Error() {
t.Errorf("Unexpected error in Antrea IPBlock conversion. Expected %v, got %v", table.err, err)
}
}
if antreaIPBlock == nil {
continue
}
ipNet := antreaIPBlock.CIDR
if bytes.Compare(ipNet.IP, table.expValue.CIDR.IP) != 0 {
t.Errorf("Unexpected IP in Antrea IPBlock conversion. Expected %v, got %v", table.expValue.CIDR.IP, ipNet.IP)
}
if table.expValue.CIDR.PrefixLength != ipNet.PrefixLength {
t.Errorf("Unexpected PrefixLength in Antrea IPBlock conversion. Expected %v, got %v", table.expValue.CIDR.PrefixLength, ipNet.PrefixLength)
}
}
}
func TestToAntreaPeer(t *testing.T) {
testNPObj := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: "npA",
Namespace: "nsA",
},
}
cidr := "10.0.0.0/16"
cidrIPNet, _ := cidrStrToIPNet(cidr)
exc1 := "10.0.1.0/24"
exc2 := "10.0.2.0/24"
excSlice := []string{exc1, exc2}
exc1Net, _ := cidrStrToIPNet(exc1)
exc2Net, _ := cidrStrToIPNet(exc2)
selectorIP := networkingv1.IPBlock{CIDR: cidr}
selectorIPAndExc := networkingv1.IPBlock{CIDR: cidr,
Except: excSlice}
selectorA := metav1.LabelSelector{MatchLabels: map[string]string{"foo1": "bar1"}}
selectorB := metav1.LabelSelector{MatchLabels: map[string]string{"foo2": "bar2"}}
selectorC := metav1.LabelSelector{MatchLabels: map[string]string{"foo3": "bar3"}}
selectorAll := metav1.LabelSelector{}
matchAllPodsPeer := matchAllPeer
matchAllPodsPeer.AddressGroups = []string{getNormalizedUID(toGroupSelector("", nil, &selectorAll).NormalizedName)}
tests := []struct {
name string
inPeers []networkingv1.NetworkPolicyPeer
outPeer networking.NetworkPolicyPeer
direction networking.Direction
}{
{
name: "pod-ns-selector-peer-ingress",
inPeers: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorA,
NamespaceSelector: &selectorB,
},
{
PodSelector: &selectorC,
},
},
outPeer: networking.NetworkPolicyPeer{
AddressGroups: []string{
getNormalizedUID(toGroupSelector("nsA", &selectorA, &selectorB).NormalizedName),
getNormalizedUID(toGroupSelector("nsA", &selectorC, nil).NormalizedName),
},
},
direction: networking.DirectionIn,
},
{
name: "pod-ns-selector-peer-egress",
inPeers: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorA,
NamespaceSelector: &selectorB,
},
{
PodSelector: &selectorC,
},
},
outPeer: networking.NetworkPolicyPeer{
AddressGroups: []string{
getNormalizedUID(toGroupSelector("nsA", &selectorA, &selectorB).NormalizedName),
getNormalizedUID(toGroupSelector("nsA", &selectorC, nil).NormalizedName),
},
},
direction: networking.DirectionOut,
},
{
name: "ipblock-selector-peer-ingress",
inPeers: []networkingv1.NetworkPolicyPeer{
{
IPBlock: &selectorIP,
},
},
outPeer: networking.NetworkPolicyPeer{
IPBlocks: []networking.IPBlock{
{
CIDR: *cidrIPNet,
},
},
},
direction: networking.DirectionIn,
},
{
name: "ipblock-selector-peer-egress",
inPeers: []networkingv1.NetworkPolicyPeer{
{
IPBlock: &selectorIP,
},
},
outPeer: networking.NetworkPolicyPeer{
IPBlocks: []networking.IPBlock{
{
CIDR: *cidrIPNet,
},
},
},
direction: networking.DirectionOut,
},
{
name: "ipblock-with-exc-selector-peer-ingress",
inPeers: []networkingv1.NetworkPolicyPeer{
{
IPBlock: &selectorIPAndExc,
},
},
outPeer: networking.NetworkPolicyPeer{
IPBlocks: []networking.IPBlock{
{
CIDR: *cidrIPNet,
Except: []networking.IPNet{*exc1Net, *exc2Net},
},
},
},
direction: networking.DirectionIn,
},
{
name: "ipblock-with-exc-selector-peer-egress",
inPeers: []networkingv1.NetworkPolicyPeer{
{
IPBlock: &selectorIPAndExc,
},
},
outPeer: networking.NetworkPolicyPeer{
IPBlocks: []networking.IPBlock{
{
CIDR: *cidrIPNet,
Except: []networking.IPNet{*exc1Net, *exc2Net},
},
},
},
direction: networking.DirectionOut,
},
{
name: "empty-peer-ingress",
inPeers: []networkingv1.NetworkPolicyPeer{},
outPeer: matchAllPeer,
direction: networking.DirectionIn,
},
{
name: "empty-peer-egress",
inPeers: []networkingv1.NetworkPolicyPeer{},
outPeer: matchAllPodsPeer,
direction: networking.DirectionOut,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, npc := newController()
actualPeer := npc.toAntreaPeer(tt.inPeers, testNPObj, tt.direction)
if !reflect.DeepEqual(tt.outPeer.AddressGroups, (*actualPeer).AddressGroups) {
t.Errorf("Unexpected AddressGroups in Antrea Peer conversion. Expected %v, got %v", tt.outPeer.AddressGroups, (*actualPeer).AddressGroups)
}
if len(tt.outPeer.IPBlocks) != len((*actualPeer).IPBlocks) {
t.Errorf("Unexpected number of IPBlocks in Antrea Peer conversion. Expected %v, got %v", len(tt.outPeer.IPBlocks), len((*actualPeer).IPBlocks))
}
for i := 0; i < len(tt.outPeer.IPBlocks); i++ {
if !compareIPBlocks(&(tt.outPeer.IPBlocks[i]), &((*actualPeer).IPBlocks[i])) {
t.Errorf("Unexpected IPBlocks in Antrea Peer conversion. Expected %v, got %v", tt.outPeer.IPBlocks[i], (*actualPeer).IPBlocks[i])
}
}
})
}
}
func TestProcessNetworkPolicy(t *testing.T) {
protocolTCP := networking.ProtocolTCP
intstr80, intstr81 := intstr.FromInt(80), intstr.FromInt(81)
selectorA := metav1.LabelSelector{MatchLabels: map[string]string{"foo1": "bar1"}}
selectorB := metav1.LabelSelector{MatchLabels: map[string]string{"foo2": "bar2"}}
selectorC := metav1.LabelSelector{MatchLabels: map[string]string{"foo3": "bar3"}}
tests := []struct {
name string
inputPolicy *networkingv1.NetworkPolicy
expectedPolicy *antreatypes.NetworkPolicy
expectedAppliedToGroups int
expectedAddressGroups int
}{
{
name: "default-allow-ingress",
inputPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npA", UID: "uidA"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
Ingress: []networkingv1.NetworkPolicyIngressRule{{}},
},
},
expectedPolicy: &antreatypes.NetworkPolicy{
UID: "uidA",
Name: "npA",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{{
Direction: networking.DirectionIn,
From: matchAllPeer,
Services: nil,
}},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &metav1.LabelSelector{}, nil).NormalizedName)},
},
expectedAppliedToGroups: 1,
expectedAddressGroups: 0,
},
{
name: "default-deny-egress",
inputPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npA", UID: "uidA"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
},
},
expectedPolicy: &antreatypes.NetworkPolicy{
UID: "uidA",
Name: "npA",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{denyAllEgressRule},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &metav1.LabelSelector{}, nil).NormalizedName)},
},
expectedAppliedToGroups: 1,
expectedAddressGroups: 0,
},
{
name: "rules-with-same-selectors",
inputPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npA", UID: "uidA"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: selectorA,
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
Ports: []networkingv1.NetworkPolicyPort{
{
Port: &intstr80,
},
},
From: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
NamespaceSelector: &selectorC,
},
},
},
},
Egress: []networkingv1.NetworkPolicyEgressRule{
{
Ports: []networkingv1.NetworkPolicyPort{
{
Port: &intstr81,
},
},
To: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
NamespaceSelector: &selectorC,
},
},
},
},
},
},
expectedPolicy: &antreatypes.NetworkPolicy{
UID: "uidA",
Name: "npA",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{
{
Direction: networking.DirectionIn,
From: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, &selectorC).NormalizedName)},
},
Services: []networking.Service{
{
Protocol: &protocolTCP,
Port: &intstr80,
},
},
},
{
Direction: networking.DirectionOut,
To: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, &selectorC).NormalizedName)},
},
Services: []networking.Service{
{
Protocol: &protocolTCP,
Port: &intstr81,
},
},
},
},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorA, nil).NormalizedName)},
},
expectedAppliedToGroups: 1,
expectedAddressGroups: 1,
},
{
name: "rules-with-different-selectors",
inputPolicy: &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npA", UID: "uidA"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: selectorA,
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
Ports: []networkingv1.NetworkPolicyPort{
{
Port: &intstr80,
},
},
From: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &selectorB,
},
},
},
{
Ports: []networkingv1.NetworkPolicyPort{
{
Port: &intstr81,
},
},
From: []networkingv1.NetworkPolicyPeer{
{
NamespaceSelector: &selectorC,
},
},
},
},
},
},
expectedPolicy: &antreatypes.NetworkPolicy{
UID: "uidA",
Name: "npA",
Namespace: "nsA",
Rules: []networking.NetworkPolicyRule{
{
Direction: networking.DirectionIn,
From: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorB, nil).NormalizedName)},
},
Services: []networking.Service{
{
Protocol: &protocolTCP,
Port: &intstr80,
},
},
},
{
Direction: networking.DirectionIn,
From: networking.NetworkPolicyPeer{
AddressGroups: []string{getNormalizedUID(toGroupSelector("nsA", nil, &selectorC).NormalizedName)},
},
Services: []networking.Service{
{
Protocol: &protocolTCP,
Port: &intstr81,
},
},
},
},
AppliedToGroups: []string{getNormalizedUID(toGroupSelector("nsA", &selectorA, nil).NormalizedName)},
},
expectedAppliedToGroups: 1,
expectedAddressGroups: 2,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, c := newController()
if actualPolicy := c.processNetworkPolicy(tt.inputPolicy); !reflect.DeepEqual(actualPolicy, tt.expectedPolicy) {
t.Errorf("processNetworkPolicy() got %v, want %v", actualPolicy, tt.expectedPolicy)
}
if actualAddressGroups := len(c.addressGroupStore.List()); actualAddressGroups != tt.expectedAddressGroups {
t.Errorf("len(addressGroupStore.List()) got %v, want %v", actualAddressGroups, tt.expectedAddressGroups)
}
if actualAppliedToGroups := len(c.appliedToGroupStore.List()); actualAppliedToGroups != tt.expectedAppliedToGroups {
t.Errorf("len(appliedToGroupStore.List()) got %v, want %v", actualAppliedToGroups, tt.expectedAppliedToGroups)
}
})
}
}
func TestPodToMemberPod(t *testing.T) {
namedPod := getPod("", "", "", "", true)
unNamedPod := getPod("", "", "", "", false)
tests := []struct {
name string
inputPod *v1.Pod
expMemberPod networking.GroupMemberPod
includeIP bool
includeRef bool
namedPort bool
}{
{
name: "namedport-pod-with-ip-ref",
inputPod: namedPod,
expMemberPod: networking.GroupMemberPod{
IP: ipStrToIPAddress(namedPod.Status.PodIP),
Pod: &networking.PodReference{
Name: namedPod.Name,
Namespace: namedPod.Namespace,
},
Ports: []networking.NamedPort{
{
Port: 80,
Name: "http",
Protocol: "tcp",
},
},
},
includeIP: true,
includeRef: true,
namedPort: true,
},
{
name: "namedport-pod-with-ip",
inputPod: namedPod,
expMemberPod: networking.GroupMemberPod{
IP: ipStrToIPAddress(namedPod.Status.PodIP),
Ports: []networking.NamedPort{
{
Port: 80,
Name: "http",
Protocol: "tcp",
},
},
},
includeIP: true,
includeRef: false,
namedPort: true,
},
{
name: "namedport-pod-with-ref",
inputPod: namedPod,
expMemberPod: networking.GroupMemberPod{
Pod: &networking.PodReference{
Name: namedPod.Name,
Namespace: namedPod.Namespace,
},
Ports: []networking.NamedPort{
{
Port: 80,
Name: "http",
Protocol: "tcp",
},
},
},
includeIP: false,
includeRef: true,
namedPort: true,
},
{
name: "unnamedport-pod-with-ref",
inputPod: unNamedPod,
expMemberPod: networking.GroupMemberPod{
Pod: &networking.PodReference{
Name: unNamedPod.Name,
Namespace: unNamedPod.Namespace,
},
},
includeIP: false,
includeRef: true,
namedPort: false,
},
{
name: "unnamedport-pod-with-ip",
inputPod: unNamedPod,
expMemberPod: networking.GroupMemberPod{
IP: ipStrToIPAddress(unNamedPod.Status.PodIP),
},
includeIP: true,
includeRef: false,
namedPort: false,
},
{
name: "unnamedport-pod-with-ip-ref",
inputPod: unNamedPod,
expMemberPod: networking.GroupMemberPod{
IP: ipStrToIPAddress(unNamedPod.Status.PodIP),
Pod: &networking.PodReference{
Name: unNamedPod.Name,
Namespace: unNamedPod.Namespace,
},
Ports: []networking.NamedPort{
{
Port: 80,
Name: "http",
Protocol: "tcp",
},
},
},
includeIP: true,
includeRef: true,
namedPort: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actualMemberPod := podToMemberPod(tt.inputPod, tt.includeIP, tt.includeRef)
// Case where the PodReference must not be populated.
if !tt.includeRef {
if actualMemberPod.Pod != nil {
t.Errorf("podToMemberPod() got unexpected PodReference %v, want nil", *(*actualMemberPod).Pod)
}
} else if !reflect.DeepEqual(*(*actualMemberPod).Pod, *(tt.expMemberPod).Pod) {
t.Errorf("podToMemberPod() got unexpected PodReference %v, want %v", *(*actualMemberPod).Pod, *(tt.expMemberPod).Pod)
}
// Case where the IPAddress must not be populated.
if !tt.includeIP {
if actualMemberPod.IP != nil {
t.Errorf("podToMemberPod() got unexpected IP %v, want nil", actualMemberPod.IP)
}
} else if bytes.Compare(actualMemberPod.IP, tt.expMemberPod.IP) != 0 {
t.Errorf("podToMemberPod() got unexpected IP %v, want %v", actualMemberPod.IP, tt.expMemberPod.IP)
}
if !tt.namedPort {
if len(actualMemberPod.Ports) > 0 {
t.Errorf("podToMemberPod() got unexpected Ports %v, want []", actualMemberPod.Ports)
}
} else if !reflect.DeepEqual(actualMemberPod.Ports, tt.expMemberPod.Ports) {
t.Errorf("podToMemberPod() got unexpected Ports %v, want %v", actualMemberPod.Ports, tt.expMemberPod.Ports)
}
})
}
}
func TestCIDRStrToIPNet(t *testing.T) {
tests := []struct {
name string
inC string
expC *networking.IPNet
}{
{
name: "cidr-valid",
inC: "10.0.0.0/16",
expC: &networking.IPNet{
IP: ipStrToIPAddress("10.0.0.0"),
PrefixLength: int32(16),
},
},
{
name: "cidr-invalid",
inC: "10.0.0.0/",
expC: nil,
},
{
name: "cidr-prefix-invalid",
inC: "10.0.0.0/a",
expC: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actC, _ := cidrStrToIPNet(tt.inC)
if !reflect.DeepEqual(actC, tt.expC) {
t.Errorf("cidrStrToIPNet() got unexpected IPNet %v, want %v", actC, tt.expC)
}
})
}
}
func TestIPStrToIPAddress(t *testing.T) {
ip1 := "10.0.1.10"
expIP1 := net.ParseIP(ip1)
ip2 := "1090.0.1.10"
tests := []struct {
name string
ipStr string
expIP networking.IPAddress
}{
{
name: "str-ip-valid",
ipStr: ip1,
expIP: networking.IPAddress(expIP1),
},
{
name: "str-ip-invalid",
ipStr: ip2,
expIP: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actIP := ipStrToIPAddress(tt.ipStr)
if bytes.Compare(actIP, tt.expIP) != 0 {
t.Errorf("ipStrToIPAddress() got unexpected IPAddress %v, want %v", actIP, tt.expIP)
}
})
}
}
func TestDeleteFinalStateUnknownPod(t *testing.T) {
_, c := newController()
c.heartbeatCh = make(chan heartbeat, 2)
ns := metav1.NamespaceDefault
pod := getPod("p1", ns, "", "1.1.1.1", false)
c.addPod(pod)
key, _ := cache.MetaNamespaceKeyFunc(pod)
c.deletePod(cache.DeletedFinalStateUnknown{Key: key, Obj: pod})
close(c.heartbeatCh)
var ok bool
_, ok = <-c.heartbeatCh
assert.True(t, ok, "Missing event on channel")
_, ok = <-c.heartbeatCh
assert.True(t, ok, "Missing event on channel")
}
func TestDeleteFinalStateUnknownNamespace(t *testing.T) {
_, c := newController()
c.heartbeatCh = make(chan heartbeat, 2)
ns := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "nsA",
},
}
c.addNamespace(ns)
c.deleteNamespace(cache.DeletedFinalStateUnknown{Key: "nsA", Obj: ns})
close(c.heartbeatCh)
var ok bool
_, ok = <-c.heartbeatCh
assert.True(t, ok, "Missing event on channel")
_, ok = <-c.heartbeatCh
assert.True(t, ok, "Missing event on channel")
}
func TestDeleteFinalStateUnknownNetworkPolicy(t *testing.T) {
_, c := newController()
c.heartbeatCh = make(chan heartbeat, 2)
np := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", Name: "npA", UID: "uidA"},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: metav1.LabelSelector{},
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
},
}
c.addNetworkPolicy(np)
key, _ := cache.MetaNamespaceKeyFunc(np)
c.deleteNetworkPolicy(cache.DeletedFinalStateUnknown{Key: key, Obj: np})
close(c.heartbeatCh)
var ok bool
_, ok = <-c.heartbeatCh
assert.True(t, ok, "Missing event on channel")
_, ok = <-c.heartbeatCh
assert.True(t, ok, "Missing event on channel")
}
// util functions for testing.
func getK8sNetworkPolicyPorts(proto v1.Protocol) []networkingv1.NetworkPolicyPort {
portNum := intstr.FromInt(80)
port := networkingv1.NetworkPolicyPort{
Protocol: &proto,
Port: &portNum,
}
ports := []networkingv1.NetworkPolicyPort{port}
return ports
}
func getK8sNetworkPolicyObj() *networkingv1.NetworkPolicy {
ns := metav1.NamespaceDefault
npName := "testing-1"
pSelector := metav1.LabelSelector{}
inNsSelector := metav1.LabelSelector{}
outPSelector := metav1.LabelSelector{}
ingressRules := []networkingv1.NetworkPolicyIngressRule{
{
From: []networkingv1.NetworkPolicyPeer{
{
NamespaceSelector: &inNsSelector,
},
},
},
}
egressRules := []networkingv1.NetworkPolicyEgressRule{
{
To: []networkingv1.NetworkPolicyPeer{
{
PodSelector: &outPSelector,
},
},
},
}
npObj := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{Name: npName, Namespace: ns},
Spec: networkingv1.NetworkPolicySpec{
PodSelector: pSelector,
Ingress: ingressRules,
Egress: egressRules,
},
}
return npObj
}
func getPod(name, ns, nodeName, podIP string, namedPort bool) *v1.Pod {
if name == "" {
name = "testPod"
}
if nodeName == "" {
nodeName = "node1"
}
if ns == "" {
ns = metav1.NamespaceDefault
}
if podIP == "" {
podIP = "1.2.3.4"
}
ctrPort := v1.ContainerPort{
ContainerPort: 80,
Protocol: "tcp",
}
if namedPort {
ctrPort.Name = "http"
}
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "container-1",
Ports: []v1.ContainerPort{ctrPort},
}},
NodeName: nodeName,
},
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
PodIP: podIP,
},
}
}
// compareIPBlocks is a util function to compare the contents of two IPBlocks.
func compareIPBlocks(ipb1, ipb2 *networking.IPBlock) bool {
if ipb1 == nil && ipb2 == nil {
return true
}
if (ipb1 == nil && ipb2 != nil) || (ipb1 != nil && ipb2 == nil) {
return false
}
ipNet1 := (*ipb1).CIDR
ipNet2 := (*ipb2).CIDR
if !compareIPNet(ipNet1, ipNet2) {
return false
}
exc1 := (*ipb1).Except
exc2 := (*ipb2).Except
if len(exc1) != len(exc2) {
return false
}
for i := 0; i < len(exc1); i++ {
if !compareIPNet(exc1[i], exc2[i]) {
return false
}
}
return true
}
// compareIPNet is a util function to compare the contents of two IPNets.
func compareIPNet(ipn1, ipn2 networking.IPNet) bool {
if bytes.Compare(ipn1.IP, ipn2.IP) != 0 {
return false
}
if ipn1.PrefixLength != ipn2.PrefixLength {
return false
}
return true
}
| 1 | 15,534 | just checking: do we need to remove this because otherwise the `List` operations may not return the entire set of Pods / Namespaces? do you know why we used `alwaysReady` in the first place, I can't remember? | antrea-io-antrea | go |
@@ -439,10 +439,14 @@ function createTopology(mongoClient, topologyType, options, callback) {
}
const MongoClient = loadClient();
- const connectionString =
- os.platform() === 'win32'
- ? 'mongodb://localhost:27020/?serverSelectionTimeoutMS=1000'
- : 'mongodb://%2Ftmp%2Fmongocryptd.sock/?serverSelectionTimeoutMS=1000';
+ let connectionString;
+ if (mongoClient.s.options.autoEncryption.cryptdConnectionString != null) {
+ connectionString = mongoClient.s.options.autoEncryption.cryptdConnectionString;
+ } else if (os.platform() === 'win32') {
+ connectionString = 'mongodb://localhost:27020/?serverSelectionTimeoutMS=1000';
+ } else {
+ connectionString = 'mongodb://%2Ftmp%2Fmongocryptd.sock/?serverSelectionTimeoutMS=1000';
+ }
const mongocryptdClient = new MongoClient(connectionString, { useUnifiedTopology: true });
mongocryptdClient.connect(err => { | 1 | 'use strict';
const deprecate = require('util').deprecate;
const Logger = require('../core').Logger;
const MongoError = require('../core').MongoError;
const Mongos = require('../topologies/mongos');
const parse = require('../core').parseConnectionString;
const ReadPreference = require('../core').ReadPreference;
const ReplSet = require('../topologies/replset');
const Server = require('../topologies/server');
const ServerSessionPool = require('../core').Sessions.ServerSessionPool;
const NativeTopology = require('../topologies/native_topology');
const MongoCredentials = require('../core').MongoCredentials;
const ReadConcern = require('../read_concern');
const os = require('os');
let client;
function loadClient() {
if (!client) {
client = require('../mongo_client');
}
return client;
}
const monitoringEvents = [
'timeout',
'close',
'serverOpening',
'serverDescriptionChanged',
'serverHeartbeatStarted',
'serverHeartbeatSucceeded',
'serverHeartbeatFailed',
'serverClosed',
'topologyOpening',
'topologyClosed',
'topologyDescriptionChanged',
'commandStarted',
'commandSucceeded',
'commandFailed',
'joined',
'left',
'ping',
'ha',
'all',
'fullsetup',
'open'
];
const ignoreOptionNames = ['native_parser'];
const legacyOptionNames = ['server', 'replset', 'replSet', 'mongos', 'db'];
const legacyParse = deprecate(
require('../url_parser'),
'current URL string parser is deprecated, and will be removed in a future version. ' +
'To use the new parser, pass option { useNewUrlParser: true } to MongoClient.connect.'
);
const validOptionNames = [
'poolSize',
'ssl',
'sslValidate',
'sslCA',
'sslCert',
'sslKey',
'sslPass',
'sslCRL',
'autoReconnect',
'noDelay',
'keepAlive',
'keepAliveInitialDelay',
'connectTimeoutMS',
'family',
'socketTimeoutMS',
'reconnectTries',
'reconnectInterval',
'ha',
'haInterval',
'replicaSet',
'secondaryAcceptableLatencyMS',
'acceptableLatencyMS',
'connectWithNoPrimary',
'authSource',
'w',
'wtimeout',
'j',
'forceServerObjectId',
'serializeFunctions',
'ignoreUndefined',
'raw',
'bufferMaxEntries',
'readPreference',
'pkFactory',
'promiseLibrary',
'readConcern',
'maxStalenessSeconds',
'loggerLevel',
'logger',
'promoteValues',
'promoteBuffers',
'promoteLongs',
'domainsEnabled',
'checkServerIdentity',
'validateOptions',
'appname',
'auth',
'user',
'password',
'authMechanism',
'compression',
'fsync',
'readPreferenceTags',
'numberOfRetries',
'auto_reconnect',
'minSize',
'monitorCommands',
'retryWrites',
'useNewUrlParser',
'useUnifiedTopology',
'serverSelectionTimeoutMS',
'useRecoveryToken',
'autoEncryption'
];
function addListeners(mongoClient, topology) {
topology.on('authenticated', createListener(mongoClient, 'authenticated'));
topology.on('error', createListener(mongoClient, 'error'));
topology.on('timeout', createListener(mongoClient, 'timeout'));
topology.on('close', createListener(mongoClient, 'close'));
topology.on('parseError', createListener(mongoClient, 'parseError'));
topology.once('open', createListener(mongoClient, 'open'));
topology.once('fullsetup', createListener(mongoClient, 'fullsetup'));
topology.once('all', createListener(mongoClient, 'all'));
topology.on('reconnect', createListener(mongoClient, 'reconnect'));
}
function assignTopology(client, topology) {
client.topology = topology;
topology.s.sessionPool =
topology instanceof NativeTopology
? new ServerSessionPool(topology)
: new ServerSessionPool(topology.s.coreTopology);
}
// Clear out all events
function clearAllEvents(topology) {
monitoringEvents.forEach(event => topology.removeAllListeners(event));
}
// Collect all events in order from SDAM
function collectEvents(mongoClient, topology) {
let MongoClient = loadClient();
const collectedEvents = [];
if (mongoClient instanceof MongoClient) {
monitoringEvents.forEach(event => {
topology.on(event, (object1, object2) => {
if (event === 'open') {
collectedEvents.push({ event: event, object1: mongoClient });
} else {
collectedEvents.push({ event: event, object1: object1, object2: object2 });
}
});
});
}
return collectedEvents;
}
/**
* Connect to MongoDB using a url as documented at
*
* docs.mongodb.org/manual/reference/connection-string/
*
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
*
* @method
* @param {MongoClient} mongoClient The MongoClient instance with which to connect.
* @param {string} url The connection URI string
* @param {object} [options] Optional settings. See MongoClient.prototype.connect for a list of options.
* @param {MongoClient~connectCallback} [callback] The command result callback
*/
function connect(mongoClient, url, options, callback) {
options = Object.assign({}, options);
// If callback is null throw an exception
if (callback == null) {
throw new Error('no callback function provided');
}
let didRequestAuthentication = false;
const logger = Logger('MongoClient', options);
// Did we pass in a Server/ReplSet/Mongos
if (url instanceof Server || url instanceof ReplSet || url instanceof Mongos) {
return connectWithUrl(mongoClient, url, options, connectCallback);
}
const parseFn = options.useNewUrlParser ? parse : legacyParse;
const transform = options.useNewUrlParser ? transformUrlOptions : legacyTransformUrlOptions;
parseFn(url, options, (err, _object) => {
// Do not attempt to connect if parsing error
if (err) return callback(err);
// Flatten
const object = transform(_object);
// Parse the string
const _finalOptions = createUnifiedOptions(object, options);
// Check if we have connection and socket timeout set
if (_finalOptions.socketTimeoutMS == null) _finalOptions.socketTimeoutMS = 360000;
if (_finalOptions.connectTimeoutMS == null) _finalOptions.connectTimeoutMS = 30000;
if (_finalOptions.retryWrites == null) _finalOptions.retryWrites = true;
if (_finalOptions.useRecoveryToken == null) _finalOptions.useRecoveryToken = true;
if (_finalOptions.db_options && _finalOptions.db_options.auth) {
delete _finalOptions.db_options.auth;
}
// Store the merged options object
mongoClient.s.options = _finalOptions;
// Failure modes
if (object.servers.length === 0) {
return callback(new Error('connection string must contain at least one seed host'));
}
if (_finalOptions.auth && !_finalOptions.credentials) {
try {
didRequestAuthentication = true;
_finalOptions.credentials = generateCredentials(
mongoClient,
_finalOptions.auth.user,
_finalOptions.auth.password,
_finalOptions
);
} catch (err) {
return callback(err);
}
}
if (_finalOptions.useUnifiedTopology) {
return createTopology(mongoClient, 'unified', _finalOptions, connectCallback);
}
// Do we have a replicaset then skip discovery and go straight to connectivity
if (_finalOptions.replicaSet || _finalOptions.rs_name) {
return createTopology(mongoClient, 'replicaset', _finalOptions, connectCallback);
} else if (object.servers.length > 1) {
return createTopology(mongoClient, 'mongos', _finalOptions, connectCallback);
} else {
return createServer(mongoClient, _finalOptions, connectCallback);
}
});
function connectCallback(err, topology) {
const warningMessage = `seed list contains no mongos proxies, replicaset connections requires the parameter replicaSet to be supplied in the URI or options object, mongodb://server:port/db?replicaSet=name`;
if (err && err.message === 'no mongos proxies found in seed list') {
if (logger.isWarn()) {
logger.warn(warningMessage);
}
// Return a more specific error message for MongoClient.connect
return callback(new MongoError(warningMessage));
}
if (didRequestAuthentication) {
mongoClient.emit('authenticated', null, true);
}
// Return the error and db instance
callback(err, topology);
}
}
/**
* Connect to MongoDB using a url as documented at
*
* docs.mongodb.org/manual/reference/connection-string/
*
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
*
* @method
* @param {MongoClient} mongoClient The MongoClient instance with which to connect.
* @param {MongoClient~connectCallback} [callback] The command result callback
*/
function connectOp(mongoClient, err, callback) {
// Did we have a validation error
if (err) return callback(err);
// Fallback to callback based connect
connect(mongoClient, mongoClient.s.url, mongoClient.s.options, err => {
if (err) return callback(err);
callback(null, mongoClient);
});
}
function connectWithUrl(mongoClient, url, options, connectCallback) {
// Set the topology
assignTopology(mongoClient, url);
// Add listeners
addListeners(mongoClient, url);
// Propagate the events to the client
relayEvents(mongoClient, url);
let finalOptions = Object.assign({}, options);
// If we have a readPreference passed in by the db options, convert it from a string
if (typeof options.readPreference === 'string' || typeof options.read_preference === 'string') {
finalOptions.readPreference = new ReadPreference(
options.readPreference || options.read_preference
);
}
const isDoingAuth = finalOptions.user || finalOptions.password || finalOptions.authMechanism;
if (isDoingAuth && !finalOptions.credentials) {
try {
finalOptions.credentials = generateCredentials(
mongoClient,
finalOptions.user,
finalOptions.password,
finalOptions
);
} catch (err) {
return connectCallback(err, url);
}
}
return url.connect(finalOptions, connectCallback);
}
function createListener(mongoClient, event) {
const eventSet = new Set(['all', 'fullsetup', 'open', 'reconnect']);
return (v1, v2) => {
if (eventSet.has(event)) {
return mongoClient.emit(event, mongoClient);
}
mongoClient.emit(event, v1, v2);
};
}
function createServer(mongoClient, options, callback) {
// Pass in the promise library
options.promiseLibrary = mongoClient.s.promiseLibrary;
// Set default options
const servers = translateOptions(options);
const server = servers[0];
// Propagate the events to the client
const collectedEvents = collectEvents(mongoClient, server);
// Connect to topology
server.connect(options, (err, topology) => {
if (err) {
server.close(true);
return callback(err);
}
// Clear out all the collected event listeners
clearAllEvents(server);
// Relay all the events
relayEvents(mongoClient, server);
// Add listeners
addListeners(mongoClient, server);
// Check if we are really speaking to a mongos
const ismaster = topology.lastIsMaster();
// Set the topology
assignTopology(mongoClient, topology);
// Do we actually have a mongos
if (ismaster && ismaster.msg === 'isdbgrid') {
// Destroy the current connection
topology.close();
// Create mongos connection instead
return createTopology(mongoClient, 'mongos', options, callback);
}
// Fire all the events
replayEvents(mongoClient, collectedEvents);
// Otherwise callback
callback(err, topology);
});
}
function createTopology(mongoClient, topologyType, options, callback) {
// Pass in the promise library
options.promiseLibrary = mongoClient.s.promiseLibrary;
const translationOptions = {};
if (topologyType === 'unified') translationOptions.createServers = false;
// Set default options
const servers = translateOptions(options, translationOptions);
// Create the topology
let topology;
if (topologyType === 'mongos') {
topology = new Mongos(servers, options);
} else if (topologyType === 'replicaset') {
topology = new ReplSet(servers, options);
} else if (topologyType === 'unified') {
topology = new NativeTopology(options.servers, options);
}
// Add listeners
addListeners(mongoClient, topology);
// Propagate the events to the client
relayEvents(mongoClient, topology);
// Open the connection
topology.connect(options, (err, newTopology) => {
if (err) {
topology.close(true);
return callback(err);
}
assignTopology(mongoClient, newTopology);
if (options.autoEncryption == null) {
callback(null, newTopology);
return;
}
// setup for client side encryption
let AutoEncrypter;
try {
AutoEncrypter = require('mongodb-client-encryption').AutoEncrypter;
} catch (err) {
callback(
new MongoError(
'Auto-encryption requested, but the module is not installed. Please add `mongodb-client-encryption` as a dependency of your project'
)
);
return;
}
const MongoClient = loadClient();
const connectionString =
os.platform() === 'win32'
? 'mongodb://localhost:27020/?serverSelectionTimeoutMS=1000'
: 'mongodb://%2Ftmp%2Fmongocryptd.sock/?serverSelectionTimeoutMS=1000';
const mongocryptdClient = new MongoClient(connectionString, { useUnifiedTopology: true });
mongocryptdClient.connect(err => {
if (err) return callback(err, null);
const mongoCryptOptions = Object.assign({}, options.autoEncryption, {
mongocryptdClient
});
topology.s.options.autoEncrypter = new AutoEncrypter(mongoClient, mongoCryptOptions);
callback(null, newTopology);
});
});
}
function createUnifiedOptions(finalOptions, options) {
const childOptions = [
'mongos',
'server',
'db',
'replset',
'db_options',
'server_options',
'rs_options',
'mongos_options'
];
const noMerge = ['readconcern', 'compression'];
for (const name in options) {
if (noMerge.indexOf(name.toLowerCase()) !== -1) {
finalOptions[name] = options[name];
} else if (childOptions.indexOf(name.toLowerCase()) !== -1) {
finalOptions = mergeOptions(finalOptions, options[name], false);
} else {
if (
options[name] &&
typeof options[name] === 'object' &&
!Buffer.isBuffer(options[name]) &&
!Array.isArray(options[name])
) {
finalOptions = mergeOptions(finalOptions, options[name], true);
} else {
finalOptions[name] = options[name];
}
}
}
return finalOptions;
}
function legacyTransformUrlOptions(object) {
return mergeOptions(createUnifiedOptions({}, object), object, false);
}
function mergeOptions(target, source, flatten) {
for (const name in source) {
if (source[name] && typeof source[name] === 'object' && flatten) {
target = mergeOptions(target, source[name], flatten);
} else {
target[name] = source[name];
}
}
return target;
}
function relayEvents(mongoClient, topology) {
const serverOrCommandEvents = [
'serverOpening',
'serverDescriptionChanged',
'serverHeartbeatStarted',
'serverHeartbeatSucceeded',
'serverHeartbeatFailed',
'serverClosed',
'topologyOpening',
'topologyClosed',
'topologyDescriptionChanged',
'commandStarted',
'commandSucceeded',
'commandFailed',
'joined',
'left',
'ping',
'ha'
];
serverOrCommandEvents.forEach(event => {
topology.on(event, (object1, object2) => {
mongoClient.emit(event, object1, object2);
});
});
}
//
// Replay any events due to single server connection switching to Mongos
//
function replayEvents(mongoClient, events) {
for (let i = 0; i < events.length; i++) {
mongoClient.emit(events[i].event, events[i].object1, events[i].object2);
}
}
const LEGACY_OPTIONS_MAP = validOptionNames.reduce((obj, name) => {
obj[name.toLowerCase()] = name;
return obj;
}, {});
function transformUrlOptions(_object) {
let object = Object.assign({ servers: _object.hosts }, _object.options);
for (let name in object) {
const camelCaseName = LEGACY_OPTIONS_MAP[name];
if (camelCaseName) {
object[camelCaseName] = object[name];
}
}
const hasUsername = _object.auth && _object.auth.username;
const hasAuthMechanism = _object.options && _object.options.authMechanism;
if (hasUsername || hasAuthMechanism) {
object.auth = Object.assign({}, _object.auth);
if (object.auth.db) {
object.authSource = object.authSource || object.auth.db;
}
if (object.auth.username) {
object.auth.user = object.auth.username;
}
}
if (_object.defaultDatabase) {
object.dbName = _object.defaultDatabase;
}
if (object.maxpoolsize) {
object.poolSize = object.maxpoolsize;
}
if (object.readconcernlevel) {
object.readConcern = new ReadConcern(object.readconcernlevel);
}
if (object.wtimeoutms) {
object.wtimeout = object.wtimeoutms;
}
if (_object.srvHost) {
object.srvHost = _object.srvHost;
}
return object;
}
function translateOptions(options, translationOptions) {
translationOptions = Object.assign({}, { createServers: true }, translationOptions);
// If we have a readPreference passed in by the db options
if (typeof options.readPreference === 'string' || typeof options.read_preference === 'string') {
options.readPreference = new ReadPreference(options.readPreference || options.read_preference);
}
// Do we have readPreference tags, add them
if (options.readPreference && (options.readPreferenceTags || options.read_preference_tags)) {
options.readPreference.tags = options.readPreferenceTags || options.read_preference_tags;
}
// Do we have maxStalenessSeconds
if (options.maxStalenessSeconds) {
options.readPreference.maxStalenessSeconds = options.maxStalenessSeconds;
}
// Set the socket and connection timeouts
if (options.socketTimeoutMS == null) options.socketTimeoutMS = 360000;
if (options.connectTimeoutMS == null) options.connectTimeoutMS = 30000;
if (!translationOptions.createServers) {
return;
}
// Create server instances
return options.servers.map(serverObj => {
return serverObj.domain_socket
? new Server(serverObj.domain_socket, 27017, options)
: new Server(serverObj.host, serverObj.port, options);
});
}
// Validate options object
function validOptions(options) {
const _validOptions = validOptionNames.concat(legacyOptionNames);
for (const name in options) {
if (ignoreOptionNames.indexOf(name) !== -1) {
continue;
}
if (_validOptions.indexOf(name) === -1) {
if (options.validateOptions) {
return new MongoError(`option ${name} is not supported`);
} else {
console.warn(`the options [${name}] is not supported`);
}
}
if (legacyOptionNames.indexOf(name) !== -1) {
console.warn(
`the server/replset/mongos/db options are deprecated, ` +
`all their options are supported at the top level of the options object [${validOptionNames}]`
);
}
}
}
const VALID_AUTH_MECHANISMS = new Set([
'DEFAULT',
'MONGODB-CR',
'PLAIN',
'MONGODB-X509',
'SCRAM-SHA-1',
'SCRAM-SHA-256',
'GSSAPI'
]);
const AUTH_MECHANISM_INTERNAL_MAP = {
DEFAULT: 'default',
'MONGODB-CR': 'mongocr',
PLAIN: 'plain',
'MONGODB-X509': 'x509',
'SCRAM-SHA-1': 'scram-sha-1',
'SCRAM-SHA-256': 'scram-sha-256'
};
function generateCredentials(client, username, password, options) {
options = Object.assign({}, options);
// the default db to authenticate against is 'self'
// if authenticate is called from a retry context, it may be another one, like admin
const source = options.authSource || options.authdb || options.dbName;
// authMechanism
const authMechanismRaw = options.authMechanism || 'DEFAULT';
const authMechanism = authMechanismRaw.toUpperCase();
if (!VALID_AUTH_MECHANISMS.has(authMechanism)) {
throw MongoError.create({
message: `authentication mechanism ${authMechanismRaw} not supported', options.authMechanism`,
driver: true
});
}
if (authMechanism === 'GSSAPI') {
return new MongoCredentials({
mechanism: process.platform === 'win32' ? 'sspi' : 'gssapi',
mechanismProperties: options,
source,
username,
password
});
}
return new MongoCredentials({
mechanism: AUTH_MECHANISM_INTERNAL_MAP[authMechanism],
source,
username,
password
});
}
function closeOperation(client, force, callback) {
const completeClose = err => {
client.emit('close', client);
for (const name in client.s.dbCache) {
client.s.dbCache[name].emit('close', client);
}
client.removeAllListeners('close');
callback(err, null);
};
if (client.topology == null) {
completeClose();
return;
}
client.topology.close(force, completeClose);
}
module.exports = { connectOp, validOptions, closeOperation };
| 1 | 15,914 | maybe we can replace `mongoClient.s.options.autoEncryption.cryptdConnectionString` with `options.autoEncryption.cryptdConnectionString`? | mongodb-node-mongodb-native | js |
@@ -3719,9 +3719,11 @@ dr_create_client_thread(void (*func)(void *param), void *arg)
# endif
LOG(THREAD, LOG_ALL, 1, "dr_create_client_thread xsp=" PFX " dstack=" PFX "\n", xsp,
get_clone_record_dstack(crec));
+ os_clone_pre(dcontext);
thread_id_t newpid =
dynamorio_clone(flags, xsp, NULL, IF_X86_ELSE(IF_X64_ELSE(NULL, &desc), NULL),
NULL, client_thread_run);
+ os_clone_post(dcontext);
/* i#501 switch to app's tls before creating client thread */
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false))
os_switch_lib_tls(dcontext, false /*to dr*/); | 1 | /* *******************************************************************************
* Copyright (c) 2010-2019 Google, Inc. All rights reserved.
* Copyright (c) 2011 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* *******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/*
* os.c - Linux specific routines
*/
/* Easiest to match kernel stat struct by using 64-bit.
* This limits us to 2.4+ kernel but that's ok.
* I don't really want to get into requiring kernel headers to build
* general release packages, though that would be fine for targeted builds.
* There are 3 different stat syscalls (SYS_oldstat, SYS_stat, and SYS_stat64)
* and using _LARGEFILE64_SOURCE with SYS_stat64 is the best match.
*/
#define _LARGEFILE64_SOURCE
/* for mmap-related #defines */
#include <sys/types.h>
#include <sys/mman.h>
/* in case MAP_32BIT is missing */
#ifndef MAP_32BIT
# define MAP_32BIT 0x40
#endif
#ifndef MAP_ANONYMOUS
# define MAP_ANONYMOUS MAP_ANON /* MAP_ANON on Mac */
#endif
/* for open */
#include <sys/stat.h>
#include <fcntl.h>
#include "../globals.h"
#include "../hashtable.h"
#include "../native_exec.h"
#include <unistd.h> /* for write and usleep and _exit */
#include <limits.h>
#ifdef MACOS
# include <sys/sysctl.h> /* for sysctl */
# ifndef SYS___sysctl
/* The name was changed on Yosemite */
# define SYS___sysctl SYS_sysctl
# endif
# include <mach/mach_traps.h> /* for swtch_pri */
# include "include/syscall_mach.h"
#endif
#ifdef LINUX
# include <sys/vfs.h> /* for statfs */
#elif defined(MACOS)
# include <sys/mount.h> /* for statfs */
# include <mach/mach.h>
# include <mach/task.h>
# include <mach/semaphore.h>
# include <mach/sync_policy.h>
#endif
#include <dirent.h>
/* for getrlimit */
#include <sys/time.h>
#include <sys/resource.h>
#ifndef X64
struct compat_rlimit {
uint rlim_cur;
uint rlim_max;
};
#endif
#ifdef MACOS
typedef struct rlimit rlimit64_t;
#else
typedef struct rlimit64 rlimit64_t;
#endif
#ifdef LINUX
/* For clone and its flags, the manpage says to include sched.h with _GNU_SOURCE
* defined. _GNU_SOURCE brings in unwanted extensions and causes name
* conflicts. Instead, we include unix/sched.h which comes from the Linux
* kernel headers.
*/
# include <linux/sched.h>
#endif
#include "module.h" /* elf */
#include "tls.h"
#if defined(X86) && defined(DEBUG)
# include "os_asm_defines.asm" /* for TLS_SELF_OFFSET_ASM */
#endif
#ifndef F_DUPFD_CLOEXEC /* in linux 2.6.24+ */
# define F_DUPFD_CLOEXEC 1030
#endif
/* This is not always sufficient to identify a syscall return value.
* For example, MacOS has some 32-bit syscalls that return 64-bit
* values in xdx:xax.
*/
#define MCXT_SYSCALL_RES(mc) ((mc)->IF_X86_ELSE(xax, r0))
#if defined(AARCH64)
# define ASM_R2 "x2"
# define ASM_R3 "x3"
# define READ_TP_TO_R3_DISP_IN_R2 \
"mrs " ASM_R3 ", tpidr_el0\n\t" \
"ldr " ASM_R3 ", [" ASM_R3 ", " ASM_R2 "] \n\t"
#elif defined(ARM)
# define ASM_R2 "r2"
# define ASM_R3 "r3"
# define READ_TP_TO_R3_DISP_IN_R2 \
"mrc p15, 0, " ASM_R3 \
", c13, c0, " STRINGIFY(USR_TLS_REG_OPCODE) " \n\t" \
"ldr " ASM_R3 ", [" ASM_R3 \
", " ASM_R2 "] \n\t"
#endif /* ARM */
/* Prototype for all functions in .init_array. */
typedef int (*init_fn_t)(int argc, char **argv, char **envp);
/* For STATIC_LIBRARY we do not cache environ so the app can change it. */
#ifndef STATIC_LIBRARY
/* i#46: Private __environ pointer. Points at the environment variable array
* on the stack, which is different from what libc __environ may point at. We
* use the environment for following children and setting options, so its OK
* that we don't see what libc says.
*/
char **our_environ;
#endif
#include <errno.h>
/* avoid problems with use of errno as var name in rest of file */
#if !defined(STANDALONE_UNIT_TEST) && !defined(MACOS)
# undef errno
#endif
/* we define __set_errno below */
/* must be prior to <link.h> => <elf.h> => INT*_{MIN,MAX} */
#include "instr.h" /* for get_app_segment_base() */
#include "decode_fast.h" /* decode_cti: maybe os_handle_mov_seg should be ifdef X86? */
#include <dlfcn.h>
#include <stdlib.h>
#include <stdio.h>
#include <signal.h>
#include <syslog.h> /* vsyslog */
#include "../vmareas.h"
#ifdef RCT_IND_BRANCH
# include "../rct.h"
#endif
#ifdef LINUX
# include "include/syscall.h" /* our own local copy */
#else
# include <sys/syscall.h>
#endif
#include "../module_shared.h"
#include "os_private.h"
#include "../synch.h"
#include "memquery.h"
#include "ksynch.h"
#ifndef HAVE_MEMINFO_QUERY
# include "memcache.h"
#endif
#ifdef CLIENT_INTERFACE
# include "instrument.h"
#endif
#ifdef MACOS
# define SYSNUM_EXIT_PROCESS SYS_exit
# define SYSNUM_EXIT_THREAD SYS_bsdthread_terminate
#else
# define SYSNUM_EXIT_PROCESS SYS_exit_group
# define SYSNUM_EXIT_THREAD SYS_exit
#endif
#ifdef ANDROID
/* Custom prctl flags specific to Android (xref i#1861) */
# define PR_SET_VMA 0x53564d41
# define PR_SET_VMA_ANON_NAME 0
#endif
/* Guards data written by os_set_app_thread_area(). */
DECLARE_CXTSWPROT_VAR(static mutex_t set_thread_area_lock,
INIT_LOCK_FREE(set_thread_area_lock));
static bool first_thread_tls_initialized;
static bool last_thread_tls_exited;
tls_type_t tls_global_type;
#ifndef HAVE_TLS
/* We use a table lookup to find a thread's dcontext */
/* Our only current no-TLS target, VMKernel (VMX86_SERVER), doesn't have apps with
* tons of threads anyway
*/
# define MAX_THREADS 512
typedef struct _tls_slot_t {
thread_id_t tid;
dcontext_t *dcontext;
} tls_slot_t;
/* Stored in heap for self-prot */
static tls_slot_t *tls_table;
/* not static so deadlock_avoidance_unlock() can look for it */
DECLARE_CXTSWPROT_VAR(mutex_t tls_lock, INIT_LOCK_FREE(tls_lock));
#endif
#ifdef CLIENT_INTERFACE
/* Should we place this in a client header? Currently mentioned in
* dr_raw_tls_calloc() docs.
*/
static bool client_tls_allocated[MAX_NUM_CLIENT_TLS];
DECLARE_CXTSWPROT_VAR(static mutex_t client_tls_lock, INIT_LOCK_FREE(client_tls_lock));
#endif
#include <stddef.h> /* for offsetof */
#include <sys/utsname.h> /* for struct utsname */
/* forward decl */
static void
handle_execve_post(dcontext_t *dcontext);
static bool
os_switch_lib_tls(dcontext_t *dcontext, bool to_app);
static bool
os_switch_seg_to_context(dcontext_t *dcontext, reg_id_t seg, bool to_app);
#ifdef X86
static bool
os_set_dr_tls_base(dcontext_t *dcontext, os_local_state_t *tls, byte *base);
#endif
#ifdef LINUX
static bool
handle_app_mremap(dcontext_t *dcontext, byte *base, size_t size, byte *old_base,
size_t old_size, uint old_prot, uint old_type);
static void
handle_app_brk(dcontext_t *dcontext, byte *lowest_brk /*if known*/, byte *old_brk,
byte *new_brk);
static void
restartable_region_init(void);
static bool
handle_restartable_region_syscall_pre(dcontext_t *dcontext);
static void
handle_restartable_region_syscall_post(dcontext_t *dcontext, bool success);
#endif
/* full path to our own library, used for execve */
static char dynamorio_library_path[MAXIMUM_PATH]; /* just dir */
static char dynamorio_library_filepath[MAXIMUM_PATH];
/* Issue 20: path to other architecture */
static char dynamorio_alt_arch_path[MAXIMUM_PATH];
static char dynamorio_alt_arch_filepath[MAXIMUM_PATH]; /* just dir */
/* Makefile passes us LIBDIR_X{86,64} defines */
#define DR_LIBDIR_X86 STRINGIFY(LIBDIR_X86)
#define DR_LIBDIR_X64 STRINGIFY(LIBDIR_X64)
/* pc values delimiting dynamo dll image */
static app_pc dynamo_dll_start = NULL;
static app_pc dynamo_dll_end = NULL; /* open-ended */
static app_pc executable_start = NULL;
static app_pc executable_end = NULL;
/* Used by get_application_name(). */
static char executable_path[MAXIMUM_PATH];
static char *executable_basename;
/* does the kernel provide tids that must be used to distinguish threads in a group? */
static bool kernel_thread_groups;
static bool kernel_64bit;
pid_t pid_cached;
static bool fault_handling_initialized;
#ifdef PROFILE_RDTSC
uint kilo_hertz; /* cpu clock speed */
#endif
/* Xref PR 258731, dup of STDOUT/STDERR in case app wants to close them. */
DR_API file_t our_stdout = STDOUT_FILENO;
DR_API file_t our_stderr = STDERR_FILENO;
DR_API file_t our_stdin = STDIN_FILENO;
/* we steal fds from the app */
static rlimit64_t app_rlimit_nofile; /* cur rlimit set by app */
static int min_dr_fd;
/* we store all DR files so we can prevent the app from changing them,
* and so we can close them in a child of fork.
* the table key is the fd and the payload is the set of DR_FILE_* flags.
*/
static generic_table_t *fd_table;
#define INIT_HTABLE_SIZE_FD 6 /* should remain small */
#ifdef DEBUG
static int num_fd_add_pre_heap;
#endif
#ifdef LINUX
/* i#1004: brk emulation */
static byte *app_brk_map;
static byte *app_brk_cur;
static byte *app_brk_end;
#endif
#ifdef MACOS
/* xref i#1404: we should expose these via the dr_get_os_version() API */
static int macos_version;
# define MACOS_VERSION_HIGH_SIERRA 17
# define MACOS_VERSION_SIERRA 16
# define MACOS_VERSION_EL_CAPITAN 15
# define MACOS_VERSION_YOSEMITE 14
# define MACOS_VERSION_MAVERICKS 13
# define MACOS_VERSION_MOUNTAIN_LION 12
# define MACOS_VERSION_LION 11
#endif
static bool
is_readable_without_exception_internal(const byte *pc, size_t size, bool query_os);
static void
process_mmap(dcontext_t *dcontext, app_pc base, size_t size, uint prot,
uint flags _IF_DEBUG(const char *map_type));
#ifdef LINUX
static char *
read_proc_self_exe(bool ignore_cache);
#endif
/* Libc independent directory iterator, similar to readdir. If we ever need
* this on Windows we should generalize it and export it to clients.
*/
typedef struct _dir_iterator_t {
file_t fd;
int off;
int end;
const char *name; /* Name of the current entry. */
char buf[4 * MAXIMUM_PATH]; /* Expect stack alloc, so not too big. */
} dir_iterator_t;
static void
os_dir_iterator_start(dir_iterator_t *iter, file_t fd);
static bool
os_dir_iterator_next(dir_iterator_t *iter);
/* XXX: If we generalize to Windows, will we need os_dir_iterator_stop()? */
/* vsyscall page. hardcoded at 0xffffe000 in earlier kernels, but
* randomly placed since fedora2.
* marked rx then: FIXME: should disallow this guy when that's the case!
* random vsyscall page is identified in maps files as "[vdso]"
* (kernel-provided fake shared library or Virt Dyn Shared Object).
*/
/* i#1583: vdso is now 2 pages, yet we assume vsyscall is on 1st page. */
/* i#2945: vdso is now 3 pages and vsyscall is not on the 1st page. */
app_pc vsyscall_page_start = NULL;
/* pc of the end of the syscall instr itself */
app_pc vsyscall_syscall_end_pc = NULL;
/* pc where kernel returns control after sysenter vsyscall */
app_pc vsyscall_sysenter_return_pc = NULL;
/* pc where our hook-displaced code was copied */
app_pc vsyscall_sysenter_displaced_pc = NULL;
#define VSYSCALL_PAGE_START_HARDCODED ((app_pc)(ptr_uint_t)0xffffe000)
#ifdef X64
/* i#430, in Red Hat Enterprise Server 5.6, vsyscall region is marked
* not executable
* ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vsyscall]
*/
# define VSYSCALL_REGION_MAPS_NAME "[vsyscall]"
#endif
/* i#1908: vdso and vsyscall are now split */
app_pc vdso_page_start = NULL;
size_t vdso_size = 0;
#if !defined(STANDALONE_UNIT_TEST) && !defined(STATIC_LIBRARY)
/* The pthreads library keeps errno in its pthread_descr data structure,
* which it looks up by dispatching on the stack pointer. This doesn't work
* when within dynamo. Thus, we define our own __errno_location() for use both
* by us and the app, to prevent pthreads looking at the stack pointer when
* out of the code cache.
*/
/* FIXME: maybe we should create 1st dcontext earlier so we don't need init_errno?
* any problems with init_errno being set and then dcontext->errno being read?
* FIXME: if a thread issues a dr_app_stop, then we don't want to use
* this errno slot? But it may later do a start...probably ok to keep using
* the slot. But, when threads die, they'll all use the same init_errno!
*/
static int init_errno; /* errno until 1st dcontext created */
int *
__errno_location(void)
{
/* Each dynamo thread should have a separate errno */
dcontext_t *dcontext = get_thread_private_dcontext();
if (dcontext == NULL)
return &init_errno;
else {
/* WARNING: init_errno is in data segment so can be RO! */
return &(dcontext->upcontext_ptr->dr_errno);
}
}
#endif /* !STANDALONE_UNIT_TEST && !STATIC_LIBRARY */
#if defined(HAVE_TLS) && defined(CLIENT_INTERFACE)
/* i#598
* (gdb) x/20i (*(errno_loc_t)0xf721e413)
* 0xf721e413 <__errno_location>: push %ebp
* 0xf721e414 <__errno_location+1>: mov %esp,%ebp
* 0xf721e416 <__errno_location+3>: call <__x86.get_pc_thunk.cx>
* 0xf721e41b <__errno_location+8>: add $0x166bd9,%ecx
* 0xf721e421 <__errno_location+14>: mov -0x1c(%ecx),%eax
* 0xf721e427 <__errno_location+20>: add %gs:0x0,%eax
* 0xf721e42e <__errno_location+27>: pop %ebp
* 0xf721e42f <__errno_location+28>: ret
*
* __errno_location calcuates the errno location by adding
* TLS's base with errno's offset in TLS.
* However, because the TLS has been switched in os_tls_init,
* the calculated address is wrong.
* We first get the errno offset in TLS at init time and
* calculate correct address by adding the app's tls base.
*/
/* __errno_location on ARM:
* 0xb6f0b290 <__errno_location>: ldr r3, [pc, #12]
* 0xb6f0b292 <__errno_location+2>: mrc 15, 0, r0, cr13, cr0, {3}
* 0xb6f0b296 <__errno_location+6>: add r3, pc
* 0xb6f0b298 <__errno_location+8>: ldr r3, [r3, #0]
* 0xb6f0b29a <__errno_location+10>: adds r0, r0, r3
* 0xb6f0b29c <__errno_location+12>: bx lr
* It uses the predefined offset to get errno location in TLS,
* and we should be able to reuse the code here.
*/
static int libc_errno_tls_offs;
static int *
our_libc_errno_loc(void)
{
void *app_tls = os_get_app_tls_base(NULL, TLS_REG_LIB);
if (app_tls == NULL)
return NULL;
return (int *)(app_tls + libc_errno_tls_offs);
}
#endif
/* i#238/PR 499179: libc errno preservation
*
* Errno location is per-thread so we store the
* function globally and call it each time. Note that pthreads seems
* to be the one who provides per-thread errno: using raw syscalls to
* create threads, we end up with a global errno:
*
* > for i in linux.thread.*0/log.*; do grep 'libc errno' $i | head -1; done
* libc errno loc: 0x00007f153de26698
* libc errno loc: 0x00007f153de26698
* > for i in pthreads.pthreads.*0/log.*; do grep 'libc errno' $i | head -1; done
* libc errno loc: 0x00007fc24d1ce698
* libc errno loc: 0x00007fc24d1cd8b8
* libc errno loc: 0x00007fc24c7cc8b8
*/
typedef int *(*errno_loc_t)(void);
static errno_loc_t
get_libc_errno_location(bool do_init)
{
static errno_loc_t libc_errno_loc;
if (do_init) {
module_iterator_t *mi = module_iterator_start();
while (module_iterator_hasnext(mi)) {
module_area_t *area = module_iterator_next(mi);
const char *modname = GET_MODULE_NAME(&area->names);
/* We ensure matches start to avoid matching "libgolibc.so".
* GET_MODULE_NAME never includes the path: i#138 will add path.
*/
if (modname != NULL && strstr(modname, "libc.so") == modname) {
bool found = true;
/* called during init when .data is writable */
libc_errno_loc =
(errno_loc_t)d_r_get_proc_address(area->start, "__errno_location");
ASSERT(libc_errno_loc != NULL);
LOG(GLOBAL, LOG_THREADS, 2, "libc errno loc func: " PFX "\n",
libc_errno_loc);
#ifdef CLIENT_INTERFACE
/* Currently, the DR is loaded by system loader and hooked up
* to app's libc. So right now, we still need this routine.
* we can remove this after libc independency and/or
* early injection
*/
if (INTERNAL_OPTION(private_loader)) {
acquire_recursive_lock(&privload_lock);
if (privload_lookup_by_base(area->start) != NULL)
found = false;
release_recursive_lock(&privload_lock);
}
#endif
if (found)
break;
}
}
module_iterator_stop(mi);
#if defined(HAVE_TLS) && defined(CLIENT_INTERFACE)
/* i#598: init the libc errno's offset. If we didn't find libc above,
* then we don't need to do this.
*/
if (INTERNAL_OPTION(private_loader) && libc_errno_loc != NULL) {
void *priv_lib_tls_base = os_get_priv_tls_base(NULL, TLS_REG_LIB);
ASSERT(priv_lib_tls_base != NULL);
libc_errno_tls_offs = (void *)libc_errno_loc() - priv_lib_tls_base;
libc_errno_loc = &our_libc_errno_loc;
}
#endif
}
return libc_errno_loc;
}
/* i#238/PR 499179: our __errno_location isn't affecting libc so until
* we have libc independence or our own private isolated libc we need
* to preserve the app's libc's errno
*/
int
get_libc_errno(void)
{
#if defined(STANDALONE_UNIT_TEST) && (defined(MACOS) || defined(ANDROID))
return errno;
#else
# ifdef STANDALONE_UNIT_TEST
errno_loc_t func = __errno_location;
# else
errno_loc_t func = get_libc_errno_location(false);
# endif
if (func == NULL) {
/* libc hasn't been loaded yet or we're doing early injection. */
return 0;
} else {
int *loc = (*func)();
ASSERT(loc != NULL);
LOG(THREAD_GET, LOG_THREADS, 5, "libc errno loc: " PFX "\n", loc);
if (loc != NULL)
return *loc;
}
return 0;
#endif
}
/* N.B.: pthreads has two other locations it keeps on a per-thread basis:
* h_errno and res_state. See glibc-2.2.4/linuxthreads/errno.c.
* If dynamo ever modifies those we'll need to do to them what we now do to
* errno.
*/
/* The environment vars exhibit totally messed up behavior when someone
* does an execve of /bin/sh -- not sure what's going on, but using our
* own implementation of unsetenv fixes all our problems. If we use
* libc's, unsetenv either does nothing or ends up having getenv return
* NULL for other vars that are obviously set (by iterating through environ).
* FIXME: find out the real story here.
*/
int
our_unsetenv(const char *name)
{
/* FIXME: really we should have some kind of synchronization */
size_t name_len;
char **env = our_environ;
if (name == NULL || *name == '\0' || strchr(name, '=') != NULL) {
return -1;
}
ASSERT(our_environ != NULL);
if (our_environ == NULL)
return -1;
name_len = strlen(name);
while (*env != NULL) {
if (strncmp(*env, name, name_len) == 0 && (*env)[name_len] == '=') {
/* We have a match. Shift the subsequent entries. Keep going to
* handle later matches.
*/
char **e;
for (e = env; *e != NULL; e++)
*e = *(e + 1);
} else {
env++;
}
}
return 0;
}
/* Clobbers the name rather than shifting, to preserve auxv (xref i#909). */
bool
disable_env(const char *name)
{
size_t name_len;
char **env = our_environ;
if (name == NULL || *name == '\0' || strchr(name, '=') != NULL) {
return false;
}
ASSERT(our_environ != NULL);
if (our_environ == NULL)
return false;
name_len = strlen(name);
while (*env != NULL) {
if (strncmp(*env, name, name_len) == 0 && (*env)[name_len] == '=') {
/* We have a match. If we shift subsequent entries we'll mess
* up access to auxv, which is after the env block, so we instead
* disable the env var by changing its name.
* We keep going to handle later matches.
*/
snprintf(*env, name_len, "__disabled__");
}
env++;
}
return true;
}
/* i#46: Private getenv.
*/
char *
our_getenv(const char *name)
{
char **env = our_environ;
size_t i;
size_t name_len;
if (name == NULL || name[0] == '\0' || strchr(name, '=') != NULL) {
return NULL;
}
ASSERT_MESSAGE(CHKLVL_ASSERTS,
"our_environ is missing. _init() or "
"dynamorio_set_envp() were not called",
our_environ != NULL);
if (our_environ == NULL)
return NULL;
name_len = strlen(name);
for (i = 0; env[i] != NULL; i++) {
if (strncmp(env[i], name, name_len) == 0 && env[i][name_len] == '=') {
return env[i] + name_len + 1;
}
}
return NULL;
}
bool
is_our_environ_followed_by_auxv(void)
{
#ifdef STATIC_LIBRARY
/* Since we initialize late, our_environ is likely no longer pointed at
* the stack (i#2122).
*/
return false;
#else
return true;
#endif
}
/* Work around drpreload's _init going first. We can get envp in our own _init
* routine down below, but drpreload.so comes first and calls
* dynamorio_app_init before our own _init routine gets called. Apps using the
* app API are unaffected because our _init routine will have run by then. For
* STATIC_LIBRARY, we used to set our_environ in our_init(), but to support
* the app setting DYNAMORIO_OPTIONS after our_init() runs, we now just use environ.
*/
DYNAMORIO_EXPORT
void
dynamorio_set_envp(char **envp)
{
our_environ = envp;
}
/* shared library init */
static int
our_init(int argc, char **argv, char **envp)
{
/* If we do not want to use drpreload.so, we can take over here: but when using
* drpreload, this is called *after* we have already taken over.
*/
extern void dynamorio_app_take_over(void);
bool takeover = false;
#ifdef INIT_TAKE_OVER
takeover = true;
#endif
#ifdef VMX86_SERVER
/* PR 391765: take over here instead of using preload */
takeover = os_in_vmkernel_classic();
#endif
#ifndef STATIC_LIBRARY
if (our_environ != NULL) {
/* Set by dynamorio_set_envp above. These should agree. */
ASSERT(our_environ == envp);
} else {
our_environ = envp;
}
#endif
/* if using preload, no -early_inject */
#ifdef STATIC_LIBRARY
if (!takeover) {
const char *takeover_env = getenv("DYNAMORIO_TAKEOVER_IN_INIT");
if (takeover_env != NULL && strcmp(takeover_env, "1") == 0) {
takeover = true;
}
}
#endif
if (takeover) {
if (dynamorio_app_init() == 0 /* success */) {
dynamorio_app_take_over();
}
}
return 0;
}
#if defined(STATIC_LIBRARY) || defined(STANDALONE_UNIT_TEST)
/* If we're getting linked into a binary that already has an _init definition
* like the app's exe or unit_tests, we add a pointer to our_init() to the
* .init_array section. We can't use the constructor attribute because not all
* toolchains pass the args and environment to the constructor.
*/
static init_fn_t
# ifdef MACOS
__attribute__((section("__DATA,__mod_init_func"), aligned(sizeof(void *)), used))
# else
__attribute__((section(".init_array"), aligned(sizeof(void *)), used))
# endif
init_array[] = { our_init };
#else
/* If we're a normal shared object, then we override _init.
*/
int
_init(int argc, char **argv, char **envp)
{
# ifdef ANDROID
/* i#1862: the Android loader passes *nothing* to lib init routines. We
* rely on DR being listed before libc so we can read the TLS slot the
* kernel set up.
*/
if (!get_kernel_args(&argc, &argv, &envp)) {
/* XXX: scan the stack and look for known auxv patterns or sthg. */
argc = 0;
argv = NULL;
envp = NULL;
}
ASSERT_MESSAGE(CHKLVL_ASSERTS, "failed to find envp", envp != NULL);
# endif
return our_init(argc, argv, envp);
}
#endif
bool
kernel_is_64bit(void)
{
return kernel_64bit;
}
#ifdef MACOS
/* XXX: if we get enough of these, move to os_macos.c or sthg */
static bool
sysctl_query(int level0, int level1, void *buf, size_t bufsz)
{
int res;
int name[2];
size_t len = bufsz;
name[0] = level0;
name[1] = level1;
res = dynamorio_syscall(SYS___sysctl, 6, &name, 2, buf, &len, NULL, 0);
return (res >= 0);
}
#endif
static void
get_uname(void)
{
/* assumption: only called at init, so we don't need any synch
* or .data unprot
*/
static struct utsname uinfo; /* can be large, avoid stack overflow */
#ifdef MACOS
if (!sysctl_query(CTL_KERN, KERN_OSTYPE, &uinfo.sysname, sizeof(uinfo.sysname)) ||
!sysctl_query(CTL_KERN, KERN_HOSTNAME, &uinfo.nodename, sizeof(uinfo.nodename)) ||
!sysctl_query(CTL_KERN, KERN_OSRELEASE, &uinfo.release, sizeof(uinfo.release)) ||
!sysctl_query(CTL_KERN, KERN_VERSION, &uinfo.version, sizeof(uinfo.version)) ||
!sysctl_query(CTL_HW, HW_MACHINE, &uinfo.machine, sizeof(uinfo.machine))) {
ASSERT(false && "sysctl queries failed");
return;
}
#else
DEBUG_DECLARE(int res =)
dynamorio_syscall(SYS_uname, 1, (ptr_uint_t)&uinfo);
ASSERT(res >= 0);
#endif
LOG(GLOBAL, LOG_TOP, 1, "uname:\n\tsysname: %s\n", uinfo.sysname);
LOG(GLOBAL, LOG_TOP, 1, "\tnodename: %s\n", uinfo.nodename);
LOG(GLOBAL, LOG_TOP, 1, "\trelease: %s\n", uinfo.release);
LOG(GLOBAL, LOG_TOP, 1, "\tversion: %s\n", uinfo.version);
LOG(GLOBAL, LOG_TOP, 1, "\tmachine: %s\n", uinfo.machine);
if (strncmp(uinfo.machine, "x86_64", sizeof("x86_64")) == 0)
kernel_64bit = true;
#ifdef MACOS
/* XXX: I would skip these checks for standalone so we don't have to set env
* vars for frontends to see the options but I'm still afraid of some syscall
* crash with no output: I'd rather have two messages than silent crashing.
*/
if (DYNAMO_OPTION(max_supported_os_version) != 0) { /* 0 disables */
/* We only support OSX 10.7.5 - 10.9.1. That means kernels 11.x-13.x. */
# define MIN_DARWIN_VERSION_SUPPORTED 11
int kernel_major;
if (sscanf(uinfo.release, "%d", &kernel_major) != 1 ||
kernel_major > DYNAMO_OPTION(max_supported_os_version) ||
kernel_major < MIN_DARWIN_VERSION_SUPPORTED) {
/* We make this non-fatal as it's likely DR will work */
SYSLOG(SYSLOG_WARNING, UNSUPPORTED_OS_VERSION, 3, get_application_name(),
get_application_pid(), uinfo.release);
}
macos_version = kernel_major;
}
#endif
}
/* os-specific initializations */
void
d_r_os_init(void)
{
ksynch_init();
get_uname();
/* Populate global data caches. */
get_application_name();
get_application_base();
/* determine whether gettid is provided and needed for threads,
* or whether getpid suffices. even 2.4 kernels have gettid
* (maps to getpid), don't have an old enough target to test this.
*/
#ifdef MACOS
kernel_thread_groups = (dynamorio_syscall(SYS_thread_selfid, 0) >= 0);
#else
kernel_thread_groups = (dynamorio_syscall(SYS_gettid, 0) >= 0);
#endif
LOG(GLOBAL, LOG_TOP | LOG_STATS, 1, "thread id is from %s\n",
kernel_thread_groups ? "gettid" : "getpid");
#ifdef MACOS
/* SYS_thread_selfid was added in 10.6. We have no simple way to get the
* thread id on 10.5, so we don't support it.
*/
if (!kernel_thread_groups) {
SYSLOG(SYSLOG_WARNING, UNSUPPORTED_OS_VERSION, 3, get_application_name(),
get_application_pid(), "Mac OSX 10.5 or earlier");
}
#else
ASSERT_CURIOSITY(kernel_thread_groups);
#endif
pid_cached = get_process_id();
#ifdef VMX86_SERVER
vmk_init();
#endif
d_r_signal_init();
/* We now set up an early fault handler for d_r_safe_read() (i#350) */
fault_handling_initialized = true;
memquery_init();
#ifdef PROFILE_RDTSC
if (dynamo_options.profile_times) {
ASSERT_NOT_TESTED();
kilo_hertz = get_timer_frequency();
LOG(GLOBAL, LOG_TOP | LOG_STATS, 1, "CPU MHz is %d\n", kilo_hertz / 1000);
}
#endif /* PROFILE_RDTSC */
/* Needs to be after heap_init */
IF_NO_MEMQUERY(memcache_init());
/* we didn't have heap in os_file_init() so create and add global logfile now */
fd_table = generic_hash_create(
GLOBAL_DCONTEXT, INIT_HTABLE_SIZE_FD, 80 /* load factor: not perf-critical */,
HASHTABLE_SHARED | HASHTABLE_PERSISTENT, NULL _IF_DEBUG("fd table"));
#ifdef DEBUG
if (GLOBAL != INVALID_FILE)
fd_table_add(GLOBAL, OS_OPEN_CLOSE_ON_FORK);
#endif
/* Ensure initialization */
get_dynamorio_dll_start();
#ifdef LINUX
if (DYNAMO_OPTION(emulate_brk))
init_emulated_brk(NULL);
#endif
#ifdef ANDROID
/* This must be set up earlier than privload_tls_init, and must be set up
* for non-client-interface as well, as this initializes DR_TLS_BASE_OFFSET
* (i#1931).
*/
init_android_version();
#endif
}
/* called before any logfiles are opened */
void
os_file_init(void)
{
/* We steal fds from the app for better transparency. We lower the max file
* descriptor limit as viewed by the app, and block SYS_dup{2,3} and
* SYS_fcntl(F_DUPFD*) from creating a file explicitly in our space. We do
* not try to stop incremental file opening from extending into our space:
* if the app really is running out of fds, we'll give it some of ours:
* after all we probably don't need all -steal_fds, and if we really need fds
* we typically open them at startup. We also don't bother watching all
* syscalls that take in fds from affecting our fds.
*/
if (DYNAMO_OPTION(steal_fds) > 0) {
struct rlimit rlimit_nofile;
/* SYS_getrlimit uses an old 32-bit-field struct so we want SYS_ugetrlimit */
if (dynamorio_syscall(
IF_MACOS_ELSE(SYS_getrlimit, IF_X64_ELSE(SYS_getrlimit, SYS_ugetrlimit)),
2, RLIMIT_NOFILE, &rlimit_nofile) != 0) {
/* linux default is 1024 */
SYSLOG_INTERNAL_WARNING("getrlimit RLIMIT_NOFILE failed"); /* can't LOG yet */
rlimit_nofile.rlim_cur = 1024;
rlimit_nofile.rlim_max = 1024;
}
/* pretend the limit is lower and reserve the top spots for us.
* for simplicity and to give as much room as possible to app,
* raise soft limit to equal hard limit.
* if an app really depends on a low soft limit, they can run
* with -steal_fds 0.
*/
if (rlimit_nofile.rlim_max > DYNAMO_OPTION(steal_fds)) {
int res;
min_dr_fd = rlimit_nofile.rlim_max - DYNAMO_OPTION(steal_fds);
app_rlimit_nofile.rlim_max = min_dr_fd;
app_rlimit_nofile.rlim_cur = app_rlimit_nofile.rlim_max;
rlimit_nofile.rlim_cur = rlimit_nofile.rlim_max;
res = dynamorio_syscall(SYS_setrlimit, 2, RLIMIT_NOFILE, &rlimit_nofile);
if (res != 0) {
SYSLOG_INTERNAL_WARNING("unable to raise RLIMIT_NOFILE soft limit: %d",
res);
}
} else /* not fatal: we'll just end up using fds in app space */
SYSLOG_INTERNAL_WARNING("unable to reserve fds");
}
/* we don't have heap set up yet so we init fd_table in os_init */
}
/* we need to re-cache after a fork */
static char *
get_application_pid_helper(bool ignore_cache)
{
static char pidstr[16];
if (!pidstr[0] || ignore_cache) {
int pid = get_process_id();
snprintf(pidstr, sizeof(pidstr) - 1, "%d", pid);
}
return pidstr;
}
/* get application pid, (cached), used for event logging */
char *
get_application_pid()
{
return get_application_pid_helper(false);
}
/* i#907: Called during early injection before data section protection to avoid
* issues with /proc/self/exe.
*/
void
set_executable_path(const char *exe_path)
{
strncpy(executable_path, exe_path, BUFFER_SIZE_ELEMENTS(executable_path));
NULL_TERMINATE_BUFFER(executable_path);
}
/* The OSX kernel used to place the bare executable path above envp.
* On recent XNU versions, the kernel now prefixes the executable path
* with the string executable_path= so it can be parsed getenv style.
*/
#ifdef MACOS
# define EXECUTABLE_KEY "executable_path="
#endif
/* i#189: we need to re-cache after a fork */
static char *
get_application_name_helper(bool ignore_cache, bool full_path)
{
if (!executable_path[0] || ignore_cache) {
#ifdef VMX86_SERVER
if (os_in_vmkernel_userworld()) {
vmk_getnamefrompid(pid, executable_path, sizeof(executable_path));
} else
#endif
if (DYNAMO_OPTION(early_inject)) {
ASSERT(executable_path[0] != '\0' &&
"i#907: Can't read /proc/self/exe for early injection");
} else {
#ifdef LINUX
/* Populate cache from /proc/self/exe link. */
strncpy(executable_path, read_proc_self_exe(ignore_cache),
BUFFER_SIZE_ELEMENTS(executable_path));
#else
/* OSX kernel puts full app exec path above envp */
char *c, **env = our_environ;
do {
env++;
} while (*env != NULL);
env++; /* Skip the NULL separating the envp array from exec_path */
c = *env;
if (strncmp(EXECUTABLE_KEY, c, strlen(EXECUTABLE_KEY)) == 0) {
c += strlen(EXECUTABLE_KEY);
}
/* If our frontends always absolute-ize paths prior to exec,
* this should usually be absolute -- but we go ahead and
* handle relative just in case (and to handle child processes).
* We add the cur dir, but note that the resulting path can
* still contain . or .. so it's not normalized (but it is a
* correct absolute path). Xref i#1402, i#1406, i#1407.
*/
if (*c != '/') {
int len;
if (!os_get_current_dir(executable_path,
BUFFER_SIZE_ELEMENTS(executable_path)))
len = 0;
else
len = strlen(executable_path);
snprintf(executable_path + len,
BUFFER_SIZE_ELEMENTS(executable_path) - len, "%s%s",
len > 0 ? "/" : "", c);
} else
strncpy(executable_path, c, BUFFER_SIZE_ELEMENTS(executable_path));
#endif
NULL_TERMINATE_BUFFER(executable_path);
/* FIXME: Fall back on /proc/self/cmdline and maybe argv[0] from
* _init().
*/
ASSERT(strlen(executable_path) > 0 && "readlink /proc/self/exe failed");
}
}
/* Get basename. */
if (executable_basename == NULL || ignore_cache) {
executable_basename = strrchr(executable_path, '/');
executable_basename =
(executable_basename == NULL ? executable_path : executable_basename + 1);
}
return (full_path ? executable_path : executable_basename);
}
/* get application name, (cached), used for event logging */
char *
get_application_name(void)
{
return get_application_name_helper(false, true /* full path */);
}
/* Note: this is exported so that libdrpreload.so (preload.c) can use it to
* get process names to do selective process following (PR 212034). The
* alternative is to duplicate or compile in this code into libdrpreload.so,
* which is messy. Besides, libdynamorio.so is already loaded into the process
* and avaiable, so cleaner to just use functions from it.
*/
DYNAMORIO_EXPORT const char *
get_application_short_name(void)
{
return get_application_name_helper(false, false /* short name */);
}
/* Processor information provided by kernel */
#define PROC_CPUINFO "/proc/cpuinfo"
#define CPUMHZ_LINE_LENGTH 64
#define CPUMHZ_LINE_FORMAT "cpu MHz\t\t: %lu.%03lu\n"
/* printed in /usr/src/linux-2.4/arch/i386/kernel/setup.c calibrated in time.c */
/* seq_printf(m, "cpu MHz\t\t: %lu.%03lu\n", cpu_khz / 1000, (cpu_khz % 1000)) */
/* e.g. cpu MHz : 1594.851 */
static timestamp_t
get_timer_frequency_cpuinfo(void)
{
file_t cpuinfo;
ssize_t nread;
char *buf;
char *mhz_line;
ulong cpu_mhz = 1000;
ulong cpu_khz = 0;
cpuinfo = os_open(PROC_CPUINFO, OS_OPEN_READ);
/* This can happen in a chroot or if /proc is disabled. */
if (cpuinfo == INVALID_FILE)
return 1000 * 1000; /* 1 GHz */
/* cpu MHz is typically in the first 4096 bytes. If not, or we get a short
* or interrupted read, our timer frequency estimate will be off, but it's
* not the end of the world.
* FIXME: Factor a buffered file reader out of our maps iterator if we want
* to do this the right way.
*/
buf = global_heap_alloc(PAGE_SIZE HEAPACCT(ACCT_OTHER));
nread = os_read(cpuinfo, buf, PAGE_SIZE - 1);
if (nread > 0) {
buf[nread] = '\0';
mhz_line = strstr(buf, "cpu MHz\t\t:");
if (mhz_line != NULL &&
sscanf(mhz_line, CPUMHZ_LINE_FORMAT, &cpu_mhz, &cpu_khz) == 2) {
LOG(GLOBAL, LOG_ALL, 2, "Processor speed exactly %lu.%03luMHz\n", cpu_mhz,
cpu_khz);
}
}
global_heap_free(buf, PAGE_SIZE HEAPACCT(ACCT_OTHER));
os_close(cpuinfo);
return cpu_mhz * 1000 + cpu_khz;
}
timestamp_t
get_timer_frequency()
{
#ifdef VMX86_SERVER
if (os_in_vmkernel_userworld()) {
return vmk_get_timer_frequency();
}
#endif
return get_timer_frequency_cpuinfo();
}
/* DR has standardized on UTC time which counts from since Jan 1, 1601.
* That's the Windows standard. But Linux uses the Epoch of Jan 1, 1970.
*/
#define UTC_TO_EPOCH_SECONDS 11644473600
/* seconds since 1601 */
uint
query_time_seconds(void)
{
struct timeval current_time;
uint64 val = dynamorio_syscall(SYS_gettimeofday, 2, ¤t_time, NULL);
#ifdef MACOS
/* MacOS before Sierra returns usecs:secs and does not set the timeval struct. */
if (macos_version < MACOS_VERSION_SIERRA) {
if ((int)val < 0)
return 0;
return (uint)val + UTC_TO_EPOCH_SECONDS;
}
#endif
if ((int)val >= 0) {
return current_time.tv_sec + UTC_TO_EPOCH_SECONDS;
} else {
ASSERT_NOT_REACHED();
return 0;
}
}
/* milliseconds since 1601 */
uint64
query_time_millis()
{
struct timeval current_time;
uint64 val = dynamorio_syscall(SYS_gettimeofday, 2, ¤t_time, NULL);
#ifdef MACOS
/* MacOS before Sierra returns usecs:secs and does not set the timeval struct. */
if (macos_version < MACOS_VERSION_SIERRA) {
if ((int)val > 0) {
current_time.tv_sec = (uint)val;
current_time.tv_usec = (uint)(val >> 32);
}
}
#endif
if ((int)val >= 0) {
uint64 res =
(((uint64)current_time.tv_sec) * 1000) + (current_time.tv_usec / 1000);
res += UTC_TO_EPOCH_SECONDS * 1000;
return res;
} else {
ASSERT_NOT_REACHED();
return 0;
}
}
/* microseconds since 1601 */
uint64
query_time_micros()
{
struct timeval current_time;
uint64 val = dynamorio_syscall(SYS_gettimeofday, 2, ¤t_time, NULL);
#ifdef MACOS
/* MacOS before Sierra returns usecs:secs and does not set the timeval struct. */
if (macos_version < MACOS_VERSION_SIERRA) {
if ((int)val > 0) {
current_time.tv_sec = (uint)val;
current_time.tv_usec = (uint)(val >> 32);
}
}
#endif
if ((int)val >= 0) {
uint64 res = (((uint64)current_time.tv_sec) * 1000000) + current_time.tv_usec;
res += UTC_TO_EPOCH_SECONDS * 1000000;
return res;
} else {
ASSERT_NOT_REACHED();
return 0;
}
}
#ifdef RETURN_AFTER_CALL
/* Finds the bottom of the call stack, presumably at program startup. */
/* This routine is a copycat of internal_dump_callstack and makes
assumptions about program state, i.e. that frame pointers are valid
and should be used only in well known points for release build.
*/
static app_pc
find_stack_bottom()
{
app_pc retaddr = 0;
int depth = 0;
reg_t *fp;
/* from dump_dr_callstack() */
asm("mov %%" ASM_XBP ", %0" : "=m"(fp));
LOG(THREAD_GET, LOG_ALL, 3, "Find stack bottom:\n");
while (fp != NULL && is_readable_without_exception((byte *)fp, sizeof(reg_t) * 2)) {
retaddr = (app_pc) * (fp + 1); /* presumably also readable */
LOG(THREAD_GET, LOG_ALL, 3,
"\tframe ptr " PFX " => parent " PFX ", ret = " PFX "\n", fp, *fp, retaddr);
depth++;
/* yes I've seen weird recursive cases before */
if (fp == (reg_t *)*fp || depth > 100)
break;
fp = (reg_t *)*fp;
}
return retaddr;
}
#endif /* RETURN_AFTER_CALL */
/* os-specific atexit cleanup */
void
os_slow_exit(void)
{
d_r_signal_exit();
memquery_exit();
ksynch_exit();
generic_hash_destroy(GLOBAL_DCONTEXT, fd_table);
fd_table = NULL;
if (doing_detach) {
vsyscall_page_start = NULL;
IF_DEBUG(num_fd_add_pre_heap = 0;)
}
DELETE_LOCK(set_thread_area_lock);
#ifdef CLIENT_INTERFACE
DELETE_LOCK(client_tls_lock);
#endif
IF_NO_MEMQUERY(memcache_exit());
}
/* Helper function that calls cleanup_and_terminate after blocking most signals
*(i#2921).
*/
void
block_cleanup_and_terminate(dcontext_t *dcontext, int sysnum, ptr_uint_t sys_arg1,
ptr_uint_t sys_arg2, bool exitproc,
/* these 2 args are only used for Mac thread exit */
ptr_uint_t sys_arg3, ptr_uint_t sys_arg4)
{
/* This thread is on its way to exit. We are blocking all signals since any
* signal that reaches us now can be delayed until after the exit is complete.
* We may still receive a suspend signal for synchronization that we may need
* to reply to (i#2921).
*/
if (sysnum == SYS_kill)
block_all_signals_except(NULL, 2, dcontext->sys_param0, SUSPEND_SIGNAL);
else
block_all_signals_except(NULL, 1, SUSPEND_SIGNAL);
cleanup_and_terminate(dcontext, sysnum, sys_arg1, sys_arg2, exitproc, sys_arg3,
sys_arg4);
}
/* os-specific atexit cleanup */
void
os_fast_exit(void)
{
/* nothing */
}
void
os_terminate_with_code(dcontext_t *dcontext, terminate_flags_t flags, int exit_code)
{
/* i#1319: we support a signal via 2nd byte */
bool use_signal = exit_code > 0x00ff;
/* XXX: TERMINATE_THREAD not supported */
ASSERT_NOT_IMPLEMENTED(TEST(TERMINATE_PROCESS, flags));
if (use_signal) {
int sig = (exit_code & 0xff00) >> 8;
os_terminate_via_signal(dcontext, flags, sig);
ASSERT_NOT_REACHED();
}
if (TEST(TERMINATE_CLEANUP, flags)) {
/* we enter from several different places, so rewind until top-level kstat */
KSTOP_REWIND_UNTIL(thread_measured);
block_cleanup_and_terminate(dcontext, SYSNUM_EXIT_PROCESS, exit_code, 0,
true /*whole process*/, 0, 0);
} else {
/* clean up may be impossible - just terminate */
d_r_config_exit(); /* delete .1config file */
exit_process_syscall(exit_code);
}
}
void
os_terminate(dcontext_t *dcontext, terminate_flags_t flags)
{
os_terminate_with_code(dcontext, flags, -1);
}
int
os_timeout(int time_in_milliseconds)
{
ASSERT_NOT_IMPLEMENTED(false);
return 0;
}
/************************************************************************
* SEGMENT STEALING
*
* Not easy to make truly transparent -- but the alternative of dispatch
* by thread id on global memory has performance implications.
* Pull the non-STEAL_SEGMENT code out of the cvs attic for a base if
* transparency becomes more of a problem.
*/
#define TLS_LOCAL_STATE_OFFSET (offsetof(os_local_state_t, state))
/* offset from top of page */
#define TLS_OS_LOCAL_STATE 0x00
#define TLS_SELF_OFFSET (TLS_OS_LOCAL_STATE + offsetof(os_local_state_t, self))
#define TLS_THREAD_ID_OFFSET (TLS_OS_LOCAL_STATE + offsetof(os_local_state_t, tid))
#define TLS_DCONTEXT_OFFSET (TLS_OS_LOCAL_STATE + TLS_DCONTEXT_SLOT)
#ifdef X86
# define TLS_MAGIC_OFFSET (TLS_OS_LOCAL_STATE + offsetof(os_local_state_t, magic))
#endif
/* they should be used with os_tls_offset, so do not need add TLS_OS_LOCAL_STATE here
*/
#define TLS_APP_LIB_TLS_BASE_OFFSET (offsetof(os_local_state_t, app_lib_tls_base))
#define TLS_APP_ALT_TLS_BASE_OFFSET (offsetof(os_local_state_t, app_alt_tls_base))
#define TLS_APP_LIB_TLS_REG_OFFSET (offsetof(os_local_state_t, app_lib_tls_reg))
#define TLS_APP_ALT_TLS_REG_OFFSET (offsetof(os_local_state_t, app_alt_tls_reg))
/* N.B.: imm and offs are ushorts!
* We use %c[0-9] to get gcc to emit an integer constant without a leading $ for
* the segment offset. See the documentation here:
* http://gcc.gnu.org/onlinedocs/gccint/Output-Template.html#Output-Template
* Also, var needs to match the pointer size, or else we'll get stack corruption.
* XXX: This is marked volatile prevent gcc from speculating this code before
* checks for is_thread_tls_initialized(), but if we could find a more
* precise constraint, then the compiler would be able to optimize better. See
* glibc comments on THREAD_SELF.
*/
#ifdef X86
# define WRITE_TLS_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void *)); \
asm volatile("mov %0, %" ASM_SEG ":%c1" : : "r"(var), "i"(imm));
# define READ_TLS_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void *)); \
asm volatile("mov %" ASM_SEG ":%c1, %0" : "=r"(var) : "i"(imm));
# define WRITE_TLS_INT_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(int)); \
asm volatile("movl %0, %" ASM_SEG ":%c1" : : "r"(var), "i"(imm));
# define READ_TLS_INT_SLOT_IMM(imm, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(int)); \
asm volatile("movl %" ASM_SEG ":%c1, %0" : "=r"(var) : "i"(imm));
/* FIXME: need dedicated-storage var for _TLS_SLOT macros, can't use expr */
# define WRITE_TLS_SLOT(offs, var) \
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED()); \
ASSERT(sizeof(var) == sizeof(void *)); \
ASSERT(sizeof(offs) == 2); \
asm("mov %0, %%" ASM_XAX : : "m"((var)) : ASM_XAX); \
asm("movzw" IF_X64_ELSE("q", "l") " %0, %%" ASM_XDX : : "m"((offs)) : ASM_XDX); \
asm("mov %%" ASM_XAX ", %" ASM_SEG ":(%%" ASM_XDX ")" : : : ASM_XAX, ASM_XDX);
# define READ_TLS_SLOT(offs, var) \
ASSERT(sizeof(var) == sizeof(void *)); \
ASSERT(sizeof(offs) == 2); \
asm("movzw" IF_X64_ELSE("q", "l") " %0, %%" ASM_XAX : : "m"((offs)) : ASM_XAX); \
asm("mov %" ASM_SEG ":(%%" ASM_XAX "), %%" ASM_XAX : : : ASM_XAX); \
asm("mov %%" ASM_XAX ", %0" : "=m"((var)) : : ASM_XAX);
#elif defined(AARCHXX)
/* Android needs indirection through a global. The Android toolchain has
* trouble with relocations if we use a global directly in asm, so we convert to
* a local variable in these macros. We pay the cost of the extra instructions
* for Linux ARM to share the code.
*/
# define WRITE_TLS_SLOT_IMM(imm, var) \
do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__("mov " ASM_R2 ", %0 \n\t" READ_TP_TO_R3_DISP_IN_R2 \
"str %1, [" ASM_R3 ", %2] \n\t" \
: \
: "r"(_base_offs), "r"(var), "i"(imm) \
: "memory", ASM_R2, ASM_R3); \
} while (0)
# define READ_TLS_SLOT_IMM(imm, var) \
do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__("mov " ASM_R2 ", %1 \n\t" READ_TP_TO_R3_DISP_IN_R2 \
"ldr %0, [" ASM_R3 ", %2] \n\t" \
: "=r"(var) \
: "r"(_base_offs), "i"(imm) \
: ASM_R2, ASM_R3); \
} while (0)
# define WRITE_TLS_INT_SLOT_IMM WRITE_TLS_SLOT_IMM /* b/c 32-bit */
# define READ_TLS_INT_SLOT_IMM READ_TLS_SLOT_IMM /* b/c 32-bit */
# define WRITE_TLS_SLOT(offs, var) \
do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__("mov " ASM_R2 ", %0 \n\t" READ_TP_TO_R3_DISP_IN_R2 \
"add " ASM_R3 ", " ASM_R3 ", %2 \n\t" \
"str %1, [" ASM_R3 "] \n\t" \
: \
: "r"(_base_offs), "r"(var), "r"(offs) \
: "memory", ASM_R2, ASM_R3); \
} while (0)
# define READ_TLS_SLOT(offs, var) \
do { \
uint _base_offs = DR_TLS_BASE_OFFSET; \
__asm__ __volatile__("mov " ASM_R2 ", %1 \n\t" READ_TP_TO_R3_DISP_IN_R2 \
"add " ASM_R3 ", " ASM_R3 ", %2 \n\t" \
"ldr %0, [" ASM_R3 "] \n\t" \
: "=r"(var) \
: "r"(_base_offs), "r"(offs) \
: ASM_R2, ASM_R3); \
} while (0)
#endif /* X86/ARM */
#ifdef X86
/* We use this at thread init and exit to make it easy to identify
* whether TLS is initialized (i#2089).
* We assume alignment does not matter.
*/
static os_local_state_t uninit_tls; /* has .magic == 0 */
#endif
static bool
is_thread_tls_initialized(void)
{
#ifdef X86
if (INTERNAL_OPTION(safe_read_tls_init)) {
/* Avoid faults during early init or during exit when we have no handler.
* It's not worth extending the handler as the faults are a perf hit anyway.
* For standalone_library, first_thread_tls_initialized will always be false,
* so we'll return false here and use our check in get_thread_private_dcontext().
*/
if (!first_thread_tls_initialized || last_thread_tls_exited)
return false;
/* To handle WSL (i#1986) where fs and gs start out equal to ss (0x2b),
* and when the MSR is used having a zero selector, and other complexities,
* we just do a blind safe read as the simplest solution once we're past
* initial init and have a fault handler.
*
* i#2089: to avoid the perf cost of syscalls to verify the tid, and to
* distinguish a fork child from a separate-group thread, we no longer read
* the tid field and check that the TLS belongs to this particular thread:
* instead we rely on clearing the .magic field for child threads and at
* thread exit (to avoid a fault) and we simply check the field here.
* A native app thread is very unlikely to match this.
*/
return safe_read_tls_magic() == TLS_MAGIC_VALID;
} else {
/* XXX i#2089: we're keeping this legacy code around until
* we're confident that the safe read code above is safer, more
* performant, and more robust.
*/
os_local_state_t *os_tls = NULL;
ptr_uint_t cur_seg = read_thread_register(SEG_TLS);
/* Handle WSL (i#1986) where fs and gs start out equal to ss (0x2b) */
if (cur_seg != 0 && cur_seg != read_thread_register(SEG_SS)) {
/* XXX: make this a safe read: but w/o dcontext we need special asm support */
READ_TLS_SLOT_IMM(TLS_SELF_OFFSET, os_tls);
}
# ifdef X64
if (os_tls == NULL && tls_dr_using_msr()) {
/* When the MSR is used, the selector in the register remains 0.
* We can't clear the MSR early in a new thread and then look for
* a zero base here b/c if kernel decides to use GDT that zeroing
* will set the selector, unless we want to assume we know when
* the kernel uses the GDT.
* Instead we make a syscall to get the tid. This should be ok
* perf-wise b/c the common case is the non-zero above.
*/
byte *base = tls_get_fs_gs_segment_base(SEG_TLS);
ASSERT(tls_global_type == TLS_TYPE_ARCH_PRCTL);
if (base != (byte *)POINTER_MAX && base != NULL) {
os_tls = (os_local_state_t *)base;
}
}
# endif
if (os_tls != NULL) {
return (os_tls->tid == get_sys_thread_id() ||
/* The child of a fork will initially come here */
os_tls->state.spill_space.dcontext->owning_process ==
get_parent_id());
} else
return false;
}
#elif defined(AARCHXX)
byte **dr_tls_base_addr;
if (tls_global_type == TLS_TYPE_NONE)
return false;
dr_tls_base_addr = (byte **)get_dr_tls_base_addr();
if (dr_tls_base_addr == NULL || *dr_tls_base_addr == NULL ||
/* We use the TLS slot's value to identify a now-exited thread (i#1578) */
*dr_tls_base_addr == TLS_SLOT_VAL_EXITED)
return false;
/* We would like to ASSERT is_dynamo_address(*tls_swap_slot) but that leads
* to infinite recursion for an address not in the vm_reserve area, as
* dynamo_vm_areas_start_reading() ending up calling
* deadlock_avoidance_unlock() which calls get_thread_private_dcontext()
* which comes here.
*/
return true;
#endif
}
bool
is_DR_segment_reader_entry(app_pc pc)
{
/* This routine is used to avoid problems with dr_prepopulate_cache() building
* bbs for DR code that reads DR segments when DR is a static library.
* It's a little ugly but it's not clear there's a better solution.
* See the discussion in i#2463 c#2.
*/
#ifdef X86
if (INTERNAL_OPTION(safe_read_tls_init)) {
return pc == (app_pc)safe_read_tls_magic || pc == (app_pc)safe_read_tls_self;
}
#endif
/* XXX i#2463: for ARM and for -no_safe_read_tls_init it may be
* more complicated as the PC may not be a function entry but the
* start of a bb after a branch in our C code that uses inline asm
* to read the TLS.
*/
return false;
}
#if defined(X86) || defined(DEBUG)
static bool
is_thread_tls_allocated(void)
{
# ifdef X86
if (INTERNAL_OPTION(safe_read_tls_init)) {
/* We use this routine to allow currently-native threads, for which
* is_thread_tls_initialized() (and thus is_thread_initialized()) will
* return false.
* Caution: this will also return true on a fresh clone child.
*/
uint magic;
if (!first_thread_tls_initialized || last_thread_tls_exited)
return false;
magic = safe_read_tls_magic();
return magic == TLS_MAGIC_VALID || magic == TLS_MAGIC_INVALID;
}
# endif
return is_thread_tls_initialized();
}
#endif
/* converts a local_state_t offset to a segment offset */
ushort
os_tls_offset(ushort tls_offs)
{
/* no ushort truncation issues b/c TLS_LOCAL_STATE_OFFSET is 0 */
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(TLS_LOCAL_STATE_OFFSET == 0);
return (TLS_LOCAL_STATE_OFFSET + tls_offs);
}
/* converts a segment offset to a local_state_t offset */
ushort
os_local_state_offset(ushort seg_offs)
{
/* no ushort truncation issues b/c TLS_LOCAL_STATE_OFFSET is 0 */
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(TLS_LOCAL_STATE_OFFSET == 0);
return (seg_offs - TLS_LOCAL_STATE_OFFSET);
}
/* XXX: Will return NULL if called before os_thread_init(), which sets
* ostd->dr_fs/gs_base.
*/
void *
os_get_priv_tls_base(dcontext_t *dcontext, reg_id_t reg)
{
os_thread_data_t *ostd;
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(reg == TLS_REG_ALT || reg == TLS_REG_LIB);
if (dcontext == NULL)
dcontext = get_thread_private_dcontext();
if (dcontext == NULL)
return NULL;
ostd = (os_thread_data_t *)dcontext->os_field;
if (reg == TLS_REG_LIB)
return ostd->priv_lib_tls_base;
else if (reg == TLS_REG_ALT)
return ostd->priv_alt_tls_base;
ASSERT_NOT_REACHED();
return NULL;
}
os_local_state_t *
get_os_tls(void)
{
os_local_state_t *os_tls;
ASSERT(is_thread_tls_initialized());
READ_TLS_SLOT_IMM(TLS_SELF_OFFSET, os_tls);
return os_tls;
}
/* Obtain TLS from dcontext directly, which succeeds in pre-thread-init
* situations where get_os_tls() fails.
*/
static os_local_state_t *
get_os_tls_from_dc(dcontext_t *dcontext)
{
byte *local_state;
ASSERT(dcontext != NULL);
local_state = (byte *)dcontext->local_state;
if (local_state == NULL)
return NULL;
return (os_local_state_t *)(local_state - offsetof(os_local_state_t, state));
}
#ifdef AARCHXX
bool
os_set_app_tls_base(dcontext_t *dcontext, reg_id_t reg, void *base)
{
os_local_state_t *os_tls;
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(reg == TLS_REG_LIB || reg == TLS_REG_ALT);
if (dcontext == NULL)
dcontext = get_thread_private_dcontext();
/* we will be called only if TLS is initialized */
ASSERT(dcontext != NULL);
os_tls = get_os_tls_from_dc(dcontext);
if (reg == TLS_REG_LIB) {
os_tls->app_lib_tls_base = base;
LOG(THREAD, LOG_THREADS, 1, "TLS app lib base =" PFX "\n", base);
return true;
} else if (reg == TLS_REG_ALT) {
os_tls->app_alt_tls_base = base;
LOG(THREAD, LOG_THREADS, 1, "TLS app alt base =" PFX "\n", base);
return true;
}
ASSERT_NOT_REACHED();
return false;
}
#endif
void *
os_get_app_tls_base(dcontext_t *dcontext, reg_id_t reg)
{
os_local_state_t *os_tls;
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(reg == TLS_REG_LIB || reg == TLS_REG_ALT);
if (dcontext == NULL)
dcontext = get_thread_private_dcontext();
if (dcontext == NULL) {
/* No dcontext means we haven't initialized TLS, so we haven't replaced
* the app's segments. get_segment_base is expensive, but this should
* be rare. Re-examine if it pops up in a profile.
*/
return get_segment_base(reg);
}
os_tls = get_os_tls_from_dc(dcontext);
if (reg == TLS_REG_LIB)
return os_tls->app_lib_tls_base;
else if (reg == TLS_REG_ALT)
return os_tls->app_alt_tls_base;
ASSERT_NOT_REACHED();
return NULL;
}
ushort
os_get_app_tls_base_offset(reg_id_t reg)
{
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(TLS_LOCAL_STATE_OFFSET == 0);
if (reg == TLS_REG_LIB)
return TLS_APP_LIB_TLS_BASE_OFFSET;
else if (reg == TLS_REG_ALT)
return TLS_APP_ALT_TLS_BASE_OFFSET;
ASSERT_NOT_REACHED();
return 0;
}
#ifdef X86
ushort
os_get_app_tls_reg_offset(reg_id_t reg)
{
IF_NOT_HAVE_TLS(ASSERT_NOT_REACHED());
ASSERT(TLS_LOCAL_STATE_OFFSET == 0);
if (reg == TLS_REG_LIB)
return TLS_APP_LIB_TLS_REG_OFFSET;
else if (reg == TLS_REG_ALT)
return TLS_APP_ALT_TLS_REG_OFFSET;
ASSERT_NOT_REACHED();
return 0;
}
#endif
void *
d_r_get_tls(ushort tls_offs)
{
void *val;
READ_TLS_SLOT(tls_offs, val);
return val;
}
void
d_r_set_tls(ushort tls_offs, void *value)
{
WRITE_TLS_SLOT(tls_offs, value);
}
/* Returns POINTER_MAX on failure.
* Assumes that cs, ss, ds, and es are flat.
* Should we export this to clients? For now they can get
* this information via opnd_compute_address().
*/
byte *
get_segment_base(uint seg)
{
#ifdef X86
if (seg == SEG_CS || seg == SEG_SS || seg == SEG_DS || seg == SEG_ES)
return NULL;
# ifdef HAVE_TLS
return tls_get_fs_gs_segment_base(seg);
# else
return (byte *)POINTER_MAX;
# endif /* HAVE_TLS */
#elif defined(AARCHXX)
/* XXX i#1551: should we rename/refactor to avoid "segment"? */
return (byte *)read_thread_register(seg);
#endif
}
/* i#572: handle opnd_compute_address to return the application
* segment base value.
*/
byte *
get_app_segment_base(uint seg)
{
#ifdef X86
if (seg == SEG_CS || seg == SEG_SS || seg == SEG_DS || seg == SEG_ES)
return NULL;
#endif /* X86 */
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
return d_r_get_tls(os_get_app_tls_base_offset(seg));
}
return get_segment_base(seg);
}
local_state_extended_t *
get_local_state_extended()
{
os_local_state_t *os_tls;
ASSERT(is_thread_tls_initialized());
READ_TLS_SLOT_IMM(TLS_SELF_OFFSET, os_tls);
return &(os_tls->state);
}
local_state_t *
get_local_state()
{
#ifdef HAVE_TLS
return (local_state_t *)get_local_state_extended();
#else
return NULL;
#endif
}
#ifdef DEBUG
void
os_enter_dynamorio(void)
{
# ifdef ARM
/* i#1578: check that app's tls value doesn't match our sentinel */
ASSERT(*(byte **)get_dr_tls_base_addr() != TLS_SLOT_VAL_EXITED);
# endif
}
#endif
/* i#107: handle segment register usage conflicts between app and dr:
* os_handle_mov_seg updates the app's tls selector maintained by DR.
* It is called before entering code cache in dispatch_enter_fcache.
*/
void
os_handle_mov_seg(dcontext_t *dcontext, byte *pc)
{
#ifdef X86
instr_t instr;
opnd_t opnd;
reg_id_t seg;
ushort sel = 0;
our_modify_ldt_t *desc;
int desc_idx;
os_local_state_t *os_tls;
os_thread_data_t *ostd;
instr_init(dcontext, &instr);
decode_cti(dcontext, pc, &instr);
/* the first instr must be mov seg */
ASSERT(instr_get_opcode(&instr) == OP_mov_seg);
opnd = instr_get_dst(&instr, 0);
ASSERT(opnd_is_reg(opnd));
seg = opnd_get_reg(opnd);
ASSERT(reg_is_segment(seg));
ostd = (os_thread_data_t *)dcontext->os_field;
desc = (our_modify_ldt_t *)ostd->app_thread_areas;
os_tls = get_os_tls();
/* get the selector value */
opnd = instr_get_src(&instr, 0);
if (opnd_is_reg(opnd)) {
sel = (ushort)reg_get_value_priv(opnd_get_reg(opnd), get_mcontext(dcontext));
} else {
void *ptr;
ptr = (ushort *)opnd_compute_address_priv(opnd, get_mcontext(dcontext));
ASSERT(ptr != NULL);
if (!d_r_safe_read(ptr, sizeof(sel), &sel)) {
/* FIXME: if invalid address, should deliver a signal to user. */
ASSERT_NOT_IMPLEMENTED(false);
}
}
/* calculate the entry_number */
desc_idx = SELECTOR_INDEX(sel) - tls_min_index();
if (seg == TLS_REG_LIB) {
os_tls->app_lib_tls_reg = sel;
os_tls->app_lib_tls_base = (void *)(ptr_uint_t)desc[desc_idx].base_addr;
} else {
os_tls->app_alt_tls_reg = sel;
os_tls->app_alt_tls_base = (void *)(ptr_uint_t)desc[desc_idx].base_addr;
}
instr_free(dcontext, &instr);
LOG(THREAD_GET, LOG_THREADS, 2,
"thread " TIDFMT " segment change %s to selector 0x%x => "
"app lib tls base: " PFX ", alt tls base: " PFX "\n",
d_r_get_thread_id(), reg_names[seg], sel, os_tls->app_lib_tls_base,
os_tls->app_alt_tls_base);
#elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_REACHED();
#endif /* X86/ARM */
}
/* Initialization for TLS mangling (-mangle_app_seg on x86).
* Must be called before DR setup its own segment.
*/
static void
os_tls_app_seg_init(os_local_state_t *os_tls, void *segment)
{
app_pc app_lib_tls_base, app_alt_tls_base;
#ifdef X86
int i, index;
our_modify_ldt_t *desc;
os_tls->app_lib_tls_reg = read_thread_register(TLS_REG_LIB);
os_tls->app_alt_tls_reg = read_thread_register(TLS_REG_ALT);
#endif
app_lib_tls_base = get_segment_base(TLS_REG_LIB);
app_alt_tls_base = get_segment_base(TLS_REG_ALT);
/* If we're a non-initial thread, tls will be set to the parent's value,
* or to &uninit_tls (i#2089), both of which will be is_dynamo_address().
*/
os_tls->app_lib_tls_base =
is_dynamo_address(app_lib_tls_base) ? NULL : app_lib_tls_base;
os_tls->app_alt_tls_base =
is_dynamo_address(app_alt_tls_base) ? NULL : app_alt_tls_base;
#ifdef X86
/* get all TLS thread area value */
/* XXX: is get_thread_area supported in 64-bit kernel?
* It has syscall number 211.
* It works for a 32-bit application running in a 64-bit kernel.
* It returns error value -38 for a 64-bit app in a 64-bit kernel.
*/
desc = &os_tls->os_seg_info.app_thread_areas[0];
tls_initialize_indices(os_tls);
index = tls_min_index();
for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) {
tls_get_descriptor(i + index, &desc[i]);
}
#endif /* X86 */
os_tls->os_seg_info.dr_tls_base = segment;
os_tls->os_seg_info.priv_alt_tls_base = IF_X86_ELSE(segment, NULL);
/* now allocate the tls segment for client libraries */
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
os_tls->os_seg_info.priv_lib_tls_base = IF_UNIT_TEST_ELSE(
os_tls->app_lib_tls_base, privload_tls_init(os_tls->app_lib_tls_base));
}
#ifdef X86
LOG(THREAD_GET, LOG_THREADS, 1,
"thread " TIDFMT " app lib tls reg: 0x%x, alt tls reg: 0x%x\n",
d_r_get_thread_id(), os_tls->app_lib_tls_reg, os_tls->app_alt_tls_reg);
#endif
LOG(THREAD_GET, LOG_THREADS, 1,
"thread " TIDFMT " app lib tls base: " PFX ", alt tls base: " PFX "\n",
d_r_get_thread_id(), os_tls->app_lib_tls_base, os_tls->app_alt_tls_base);
LOG(THREAD_GET, LOG_THREADS, 1,
"thread " TIDFMT " priv lib tls base: " PFX ", alt tls base: " PFX ", "
"DR's tls base: " PFX "\n",
d_r_get_thread_id(), os_tls->os_seg_info.priv_lib_tls_base,
os_tls->os_seg_info.priv_alt_tls_base, os_tls->os_seg_info.dr_tls_base);
}
void
os_tls_init(void)
{
#ifdef X86
ASSERT(TLS_MAGIC_OFFSET_ASM == TLS_MAGIC_OFFSET);
ASSERT(TLS_SELF_OFFSET_ASM == TLS_SELF_OFFSET);
#endif
#ifdef HAVE_TLS
/* We create a 1-page segment with an LDT entry for each thread and load its
* selector into fs/gs.
* FIXME PR 205276: this whole scheme currently does not check if app is using
* segments need to watch modify_ldt syscall
*/
/* FIXME: heap_mmap marks as exec, we just want RW */
byte *segment = heap_mmap(PAGE_SIZE, VMM_SPECIAL_MMAP);
os_local_state_t *os_tls = (os_local_state_t *)segment;
LOG(GLOBAL, LOG_THREADS, 1, "os_tls_init for thread " TIDFMT "\n",
d_r_get_thread_id());
ASSERT(!is_thread_tls_initialized());
/* MUST zero out dcontext slot so uninit access gets NULL */
memset(segment, 0, PAGE_SIZE);
/* store key data in the tls itself */
os_tls->self = os_tls;
os_tls->tid = get_sys_thread_id();
os_tls->tls_type = TLS_TYPE_NONE;
# ifdef X86
os_tls->magic = TLS_MAGIC_VALID;
# endif
/* We save DR's TLS segment base here so that os_get_dr_tls_base() will work
* even when -no_mangle_app_seg is set. If -mangle_app_seg is set, this
* will be overwritten in os_tls_app_seg_init().
*/
os_tls->os_seg_info.dr_tls_base = segment;
ASSERT(proc_is_cache_aligned(os_tls->self + TLS_LOCAL_STATE_OFFSET));
/* Verify that local_state_extended_t should indeed be used. */
ASSERT(DYNAMO_OPTION(ibl_table_in_tls));
/* initialize DR TLS seg base before replacing app's TLS in tls_thread_init */
if (MACHINE_TLS_IS_DR_TLS)
os_tls_app_seg_init(os_tls, segment);
tls_thread_init(os_tls, segment);
ASSERT(os_tls->tls_type != TLS_TYPE_NONE);
/* store type in global var for convenience: should be same for all threads */
tls_global_type = os_tls->tls_type;
/* FIXME: this should be a SYSLOG fatal error? Should fall back on !HAVE_TLS?
* Should have create_ldt_entry() return failure instead of asserting, then.
*/
#else
tls_table = (tls_slot_t *)global_heap_alloc(MAX_THREADS *
sizeof(tls_slot_t) HEAPACCT(ACCT_OTHER));
memset(tls_table, 0, MAX_THREADS * sizeof(tls_slot_t));
#endif
if (!first_thread_tls_initialized) {
first_thread_tls_initialized = true;
if (last_thread_tls_exited) /* re-attach */
last_thread_tls_exited = false;
}
ASSERT(is_thread_tls_initialized());
}
static bool
should_zero_tls_at_thread_exit()
{
#ifdef X86
/* i#2089: For a thread w/o CLONE_SIGHAND we cannot handle a fault, so we want to
* leave &uninit_tls (which was put in place in os_thread_exit()) as long as
* possible. For non-detach, that means until the exit.
*/
return !INTERNAL_OPTION(safe_read_tls_init) || doing_detach;
#else
return true;
#endif
}
/* TLS exit for the current thread who must own local_state. */
void
os_tls_thread_exit(local_state_t *local_state)
{
#ifdef HAVE_TLS
/* We assume (assert below) that local_state_t's start == local_state_extended_t */
os_local_state_t *os_tls =
(os_local_state_t *)(((byte *)local_state) - offsetof(os_local_state_t, state));
tls_type_t tls_type = os_tls->tls_type;
int index = os_tls->ldt_index;
ASSERT(offsetof(local_state_t, spill_space) ==
offsetof(local_state_extended_t, spill_space));
if (should_zero_tls_at_thread_exit()) {
tls_thread_free(tls_type, index);
# if defined(X86) && defined(X64)
if (tls_type == TLS_TYPE_ARCH_PRCTL) {
/* syscall re-sets gs register so re-clear it */
if (read_thread_register(SEG_TLS) != 0) {
static const ptr_uint_t zero = 0;
WRITE_DR_SEG(zero); /* macro needs lvalue! */
}
}
# endif
}
/* We already set TLS to &uninit_tls in os_thread_exit() */
/* Do not set last_thread_tls_exited if a client_thread is exiting.
* If set, get_thread_private_dcontext() returns NULL, which may cause
* other thread fault on using dcontext.
*/
if (dynamo_exited_all_other_threads && !last_thread_tls_exited) {
last_thread_tls_exited = true;
first_thread_tls_initialized = false; /* for possible re-attach */
}
#endif
}
/* Frees local_state. If the calling thread is exiting (i.e.,
* !other_thread) then also frees kernel resources for the calling
* thread; if other_thread then that may not be possible.
*/
void
os_tls_exit(local_state_t *local_state, bool other_thread)
{
#ifdef HAVE_TLS
# ifdef X86
static const ptr_uint_t zero = 0;
# endif /* X86 */
/* We can't read from fs: as we can be called from other threads */
/* ASSUMPTION: local_state_t is laid out at same start as local_state_extended_t */
os_local_state_t *os_tls =
(os_local_state_t *)(((byte *)local_state) - offsetof(os_local_state_t, state));
# ifdef X86
/* If the MSR is in use, writing to the reg faults. We rely on it being 0
* to indicate that.
*/
if (!other_thread && read_thread_register(SEG_TLS) != 0 &&
should_zero_tls_at_thread_exit()) {
WRITE_DR_SEG(zero); /* macro needs lvalue! */
}
# endif /* X86 */
/* For another thread we can't really make these syscalls so we have to
* leave it un-cleaned-up. That's fine if the other thread is exiting:
* but for detach (i#95) we get the other thread to run this code.
*/
if (!other_thread)
os_tls_thread_exit(local_state);
/* We can't free prior to tls_thread_free() in case that routine refs os_tls */
heap_munmap(os_tls->self, PAGE_SIZE, VMM_SPECIAL_MMAP);
#else
global_heap_free(tls_table, MAX_THREADS * sizeof(tls_slot_t) HEAPACCT(ACCT_OTHER));
DELETE_LOCK(tls_lock);
#endif
}
static int
os_tls_get_gdt_index(dcontext_t *dcontext)
{
os_local_state_t *os_tls = (os_local_state_t *)(((byte *)dcontext->local_state) -
offsetof(os_local_state_t, state));
if (os_tls->tls_type == TLS_TYPE_GDT)
return os_tls->ldt_index;
else
return -1;
}
void
os_tls_pre_init(int gdt_index)
{
#ifdef X86
/* Only set to above 0 for tls_type == TLS_TYPE_GDT */
if (gdt_index > 0) {
/* PR 458917: clear gdt slot to avoid leak across exec */
DEBUG_DECLARE(bool ok;)
static const ptr_uint_t zero = 0;
/* Be sure to clear the selector before anything that might
* call get_thread_private_dcontext()
*/
WRITE_DR_SEG(zero); /* macro needs lvalue! */
DEBUG_DECLARE(ok =)
tls_clear_descriptor(gdt_index);
ASSERT(ok);
}
#elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
}
#ifdef CLIENT_INTERFACE
/* Allocates num_slots tls slots aligned with alignment align */
bool
os_tls_calloc(OUT uint *offset, uint num_slots, uint alignment)
{
bool res = false;
uint i, count = 0;
int start = -1;
uint offs = offsetof(os_local_state_t, client_tls);
if (num_slots == 0 || num_slots > MAX_NUM_CLIENT_TLS)
return false;
d_r_mutex_lock(&client_tls_lock);
for (i = 0; i < MAX_NUM_CLIENT_TLS; i++) {
if (!client_tls_allocated[i] &&
/* ALIGNED doesn't work for 0 */
(alignment == 0 || ALIGNED(offs + i * sizeof(void *), alignment))) {
if (start == -1)
start = i;
count++;
if (count >= num_slots)
break;
} else {
start = -1;
count = 0;
}
}
if (count >= num_slots) {
for (i = 0; i < num_slots; i++)
client_tls_allocated[i + start] = true;
*offset = offs + start * sizeof(void *);
res = true;
}
d_r_mutex_unlock(&client_tls_lock);
return res;
}
bool
os_tls_cfree(uint offset, uint num_slots)
{
uint i;
uint offs = (offset - offsetof(os_local_state_t, client_tls)) / sizeof(void *);
bool ok = true;
d_r_mutex_lock(&client_tls_lock);
for (i = 0; i < num_slots; i++) {
if (!client_tls_allocated[i + offs])
ok = false;
client_tls_allocated[i + offs] = false;
}
d_r_mutex_unlock(&client_tls_lock);
return ok;
}
#endif
/* os_data is a clone_record_t for signal_thread_inherit */
void
os_thread_init(dcontext_t *dcontext, void *os_data)
{
os_local_state_t *os_tls = get_os_tls();
os_thread_data_t *ostd = (os_thread_data_t *)heap_alloc(
dcontext, sizeof(os_thread_data_t) HEAPACCT(ACCT_OTHER));
dcontext->os_field = (void *)ostd;
/* make sure stack fields, etc. are 0 now so they can be initialized on demand
* (don't have app esp register handy here to init now)
*/
memset(ostd, 0, sizeof(*ostd));
ksynch_init_var(&ostd->suspended);
ksynch_init_var(&ostd->wakeup);
ksynch_init_var(&ostd->resumed);
ksynch_init_var(&ostd->terminated);
ksynch_init_var(&ostd->detached);
#ifdef RETURN_AFTER_CALL
/* We only need the stack bottom for the initial thread, and due to thread
* init now preceding vm_areas_init(), we initialize in find_executable_vm_areas()
*/
ostd->stack_bottom_pc = NULL;
#endif
ASSIGN_INIT_LOCK_FREE(ostd->suspend_lock, suspend_lock);
signal_thread_init(dcontext, os_data);
/* i#107, initialize thread area information,
* the value was first get in os_tls_init and stored in os_tls
*/
ostd->priv_lib_tls_base = os_tls->os_seg_info.priv_lib_tls_base;
ostd->priv_alt_tls_base = os_tls->os_seg_info.priv_alt_tls_base;
ostd->dr_tls_base = os_tls->os_seg_info.dr_tls_base;
LOG(THREAD, LOG_THREADS, 1, "TLS app lib base =" PFX "\n", os_tls->app_lib_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS app alt base =" PFX "\n", os_tls->app_alt_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS priv lib base =" PFX "\n", ostd->priv_lib_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS priv alt base =" PFX "\n", ostd->priv_alt_tls_base);
LOG(THREAD, LOG_THREADS, 1, "TLS DynamoRIO base=" PFX "\n", ostd->dr_tls_base);
#ifdef X86
if (INTERNAL_OPTION(mangle_app_seg)) {
ostd->app_thread_areas = heap_alloc(
dcontext, sizeof(our_modify_ldt_t) * GDT_NUM_TLS_SLOTS HEAPACCT(ACCT_OTHER));
memcpy(ostd->app_thread_areas, os_tls->os_seg_info.app_thread_areas,
sizeof(our_modify_ldt_t) * GDT_NUM_TLS_SLOTS);
}
#endif
LOG(THREAD, LOG_THREADS, 1, "post-TLS-setup, cur %s base is " PFX "\n",
IF_X86_ELSE("gs", "tpidruro"),
get_segment_base(IF_X86_ELSE(SEG_GS, DR_REG_TPIDRURO)));
LOG(THREAD, LOG_THREADS, 1, "post-TLS-setup, cur %s base is " PFX "\n",
IF_X86_ELSE("fs", "tpidrurw"),
get_segment_base(IF_X86_ELSE(SEG_FS, DR_REG_TPIDRURW)));
#ifdef MACOS
/* XXX: do we need to free/close dcontext->thread_port? I don't think so. */
dcontext->thread_port = dynamorio_mach_syscall(MACH_thread_self_trap, 0);
LOG(THREAD, LOG_ALL, 1, "Mach thread port: %d\n", dcontext->thread_port);
#endif
}
/* os_data is a clone_record_t for signal_thread_inherit */
void
os_thread_init_finalize(dcontext_t *dcontext, void *os_data)
{
/* We do not want to record pending signals until at least synch_thread_init()
* is finished so we delay until here: but we need this inside the
* thread_initexit_lock (i#2779).
*/
signal_thread_inherit(dcontext, os_data);
}
void
os_thread_exit(dcontext_t *dcontext, bool other_thread)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
/* i#237/PR 498284: if we had a vfork child call execve we need to clean up
* the env vars.
*/
if (dcontext->thread_record->execve)
handle_execve_post(dcontext);
DELETE_LOCK(ostd->suspend_lock);
signal_thread_exit(dcontext, other_thread);
ksynch_free_var(&ostd->suspended);
ksynch_free_var(&ostd->wakeup);
ksynch_free_var(&ostd->resumed);
ksynch_free_var(&ostd->terminated);
ksynch_free_var(&ostd->detached);
#ifdef X86
if (ostd->clone_tls != NULL) {
if (!other_thread) {
/* Avoid faults in is_thread_tls_initialized() */
/* FIXME i#2088: we need to restore the app's aux seg, if any, instead. */
os_set_dr_tls_base(dcontext, NULL, (byte *)&uninit_tls);
}
DODEBUG({
HEAP_TYPE_FREE(dcontext, ostd->clone_tls, os_local_state_t, ACCT_THREAD_MGT,
UNPROTECTED);
});
}
#endif
/* for non-debug we do fast exit path and don't free local heap */
DODEBUG({
if (MACHINE_TLS_IS_DR_TLS) {
#ifdef X86
heap_free(dcontext, ostd->app_thread_areas,
sizeof(our_modify_ldt_t) * GDT_NUM_TLS_SLOTS HEAPACCT(ACCT_OTHER));
#endif
#ifdef CLIENT_INTERFACE
if (INTERNAL_OPTION(private_loader))
privload_tls_exit(IF_UNIT_TEST_ELSE(NULL, ostd->priv_lib_tls_base));
#endif
}
heap_free(dcontext, ostd, sizeof(os_thread_data_t) HEAPACCT(ACCT_OTHER));
});
}
/* Happens in the parent prior to fork. */
static void
os_fork_pre(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
/* Otherwise a thread might wait for us. */
ASSERT_OWN_NO_LOCKS();
ASSERT(ostd->fork_threads == NULL && ostd->fork_num_threads == 0);
/* i#239: Synch with all other threads to ensure that they are holding no
* locks across the fork.
* FIXME i#26: Suspend signals received before initializing siginfo are
* squelched, so we won't be able to suspend threads that are initializing.
*/
LOG(GLOBAL, 2, LOG_SYSCALLS | LOG_THREADS,
"fork: synching with other threads to prevent deadlock in child\n");
if (!synch_with_all_threads(THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT_OR_NO_XFER,
&ostd->fork_threads, &ostd->fork_num_threads,
THREAD_SYNCH_VALID_MCONTEXT,
/* If we fail to suspend a thread, there is a
* risk of deadlock in the child, so it's worth
* retrying on failure.
*/
THREAD_SYNCH_SUSPEND_FAILURE_RETRY)) {
/* If we failed to synch with all threads, we live with the possiblity
* of deadlock and continue as normal.
*/
LOG(GLOBAL, 1, LOG_SYSCALLS | LOG_THREADS,
"fork: synch failed, possible deadlock in child\n");
ASSERT_CURIOSITY(false);
}
/* We go back to the code cache to execute the syscall, so we can't hold
* locks. If the synch succeeded, no one else is running, so it should be
* safe to release these locks. However, if there are any rogue threads,
* then releasing these locks will allow them to synch and create threads.
* Such threads could be running due to synch failure or presence of
* non-suspendable client threads. We keep our data in ostd to prevent some
* conflicts, but there are some unhandled corner cases.
*/
d_r_mutex_unlock(&thread_initexit_lock);
d_r_mutex_unlock(&all_threads_synch_lock);
}
/* Happens after the fork in both the parent and child. */
static void
os_fork_post(dcontext_t *dcontext, bool parent)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
/* Re-acquire the locks we released before the fork. */
d_r_mutex_lock(&all_threads_synch_lock);
d_r_mutex_lock(&thread_initexit_lock);
/* Resume the other threads that we suspended. */
if (parent) {
LOG(GLOBAL, 2, LOG_SYSCALLS | LOG_THREADS,
"fork: resuming other threads after fork\n");
}
end_synch_with_all_threads(ostd->fork_threads, ostd->fork_num_threads,
parent /*resume in parent, not in child*/);
ostd->fork_threads = NULL; /* Freed by end_synch_with_all_threads. */
ostd->fork_num_threads = 0;
}
/* this one is called before child's new logfiles are set up */
void
os_fork_init(dcontext_t *dcontext)
{
int iter;
/* We use a larger data size than file_t to avoid clobbering our stack (i#991) */
ptr_uint_t fd;
ptr_uint_t flags;
/* Static assert would save debug build overhead: could use array bound trick */
ASSERT(sizeof(file_t) <= sizeof(ptr_uint_t));
/* i#239: If there were unsuspended threads across the fork, we could have
* forked while another thread held locks. We reset the locks and try to
* cope with any intermediate state left behind from the parent. If we
* encounter more deadlocks after fork, we can add more lock and data resets
* on a case by case basis.
*/
d_r_mutex_fork_reset(&all_threads_synch_lock);
d_r_mutex_fork_reset(&thread_initexit_lock);
os_fork_post(dcontext, false /*!parent*/);
/* re-populate cached data that contains pid */
pid_cached = get_process_id();
get_application_pid_helper(true);
get_application_name_helper(true, true /* not important */);
/* close all copies of parent files */
TABLE_RWLOCK(fd_table, write, lock);
iter = 0;
do {
iter = generic_hash_iterate_next(GLOBAL_DCONTEXT, fd_table, iter, &fd,
(void **)&flags);
if (iter < 0)
break;
if (TEST(OS_OPEN_CLOSE_ON_FORK, flags)) {
close_syscall((file_t)fd);
iter = generic_hash_iterate_remove(GLOBAL_DCONTEXT, fd_table, iter, fd);
}
} while (true);
TABLE_RWLOCK(fd_table, write, unlock);
}
static void
os_swap_dr_tls(dcontext_t *dcontext, bool to_app)
{
#ifdef X86
/* If the option is off, we really should swap it (xref i#107/i#2088 comments
* in os_swap_context()) but there are few consequences of not doing it, and we
* have no code set up separate from the i#2089 scheme here.
*/
if (!INTERNAL_OPTION(safe_read_tls_init))
return;
if (to_app) {
/* i#2089: we want the child to inherit a TLS with invalid .magic, but we
* need our own syscall execution and post-syscall code to have valid scratch
* and dcontext values. We can't clear our own magic b/c we don't know when
* the child will be scheduled, so we use a copy of our TLS. We carefully
* never have a valid magic there in case a prior child is still unscheduled.
*
* We assume the child will not modify this TLS copy in any way.
* CLONE_SETTLS touc * hes the other segment (we'll have to watch for
* addition of CLONE_SETTLS_AUX). The parent will use the scratch space
* returning from the syscall to d_r_dispatch, but we restore via os_clone_post()
* immediately before anybody calls get_thread_private_dcontext() or
* anything.
*/
/* FIXME i#2088: to preserve the app's aux seg, if any, we should pass it
* and the seg reg value via the clone record (like we do for ARM today).
*/
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
os_local_state_t *cur_tls = get_os_tls_from_dc(dcontext);
if (ostd->clone_tls == NULL) {
ostd->clone_tls = (os_local_state_t *)HEAP_TYPE_ALLOC(
dcontext, os_local_state_t, ACCT_THREAD_MGT, UNPROTECTED);
LOG(THREAD, LOG_THREADS, 2, "TLS copy is " PFX "\n", ostd->clone_tls);
}
/* Leave no window where a prior uninit child could read valid magic by
* invalidating prior to copying.
*/
cur_tls->magic = TLS_MAGIC_INVALID;
memcpy(ostd->clone_tls, cur_tls, sizeof(*ostd->clone_tls));
cur_tls->magic = TLS_MAGIC_VALID;
ostd->clone_tls->self = ostd->clone_tls;
os_set_dr_tls_base(dcontext, NULL, (byte *)ostd->clone_tls);
} else {
/* i#2089: restore the parent's DR TLS */
os_local_state_t *real_tls = get_os_tls_from_dc(dcontext);
/* For dr_app_start we can end up here with nothing to do, so we check. */
if (get_segment_base(SEG_TLS) != (byte *)real_tls) {
DEBUG_DECLARE(os_thread_data_t *ostd =
(os_thread_data_t *)dcontext->os_field);
ASSERT(get_segment_base(SEG_TLS) == (byte *)ostd->clone_tls);
/* We assume there's no need to copy the scratch slots back */
os_set_dr_tls_base(dcontext, real_tls, (byte *)real_tls);
}
}
#endif
}
static void
os_new_thread_pre(void)
{
/* We use a barrier on new threads to ensure we make progress when
* attaching to an app that is continually making threads.
* XXX i#1305: if we fully suspend all threads during attach we can
* get rid of this barrier.
*/
wait_for_event(dr_attach_finished, 0);
ATOMIC_INC(int, uninit_thread_count);
}
static void
os_clone_pre(dcontext_t *dcontext)
{
/* We switch the lib tls segment back to app's segment.
* Please refer to comment on os_switch_lib_tls.
*/
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
os_switch_lib_tls(dcontext, true /*to app*/);
}
os_swap_dr_tls(dcontext, true /*to app*/);
}
/* This is called from d_r_dispatch prior to post_system_call() */
void
os_clone_post(dcontext_t *dcontext)
{
os_swap_dr_tls(dcontext, false /*to DR*/);
}
byte *
os_get_dr_tls_base(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
return ostd->dr_tls_base;
}
/* We only bother swapping the library segment if we're using the private
* loader.
*/
bool
os_should_swap_state(void)
{
#ifdef X86
/* -private_loader currently implies -mangle_app_seg, but let's be safe. */
return (INTERNAL_OPTION(mangle_app_seg) &&
IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false));
#elif defined(AARCHXX)
/* FIXME i#1582: this should return true, but there is a lot of complexity
* getting os_switch_seg_to_context() to do the right then when called
* at main thread init, secondary thread init, early and late injection,
* and thread exit, since it is fragile with its writes to app TLS.
*/
return false;
#endif
}
bool
os_using_app_state(dcontext_t *dcontext)
{
#ifdef X86
/* FIXME: This could be optimized to avoid the syscall by keeping state in
* the dcontext.
*/
if (INTERNAL_OPTION(mangle_app_seg)) {
return (get_segment_base(TLS_REG_LIB) ==
os_get_app_tls_base(dcontext, TLS_REG_LIB));
}
#endif
/* We're always in the app state if we're not mangling. */
return true;
}
/* Similar to PEB swapping on Windows, this call will switch between DR's
* private lib segment base and the app's segment base.
* i#107/i#2088: If the app wants to use SEG_TLS, we should also switch that back at
* this boundary, but there are many places where we simply assume it is always
* installed.
*/
void
os_swap_context(dcontext_t *dcontext, bool to_app, dr_state_flags_t flags)
{
if (os_should_swap_state())
os_switch_seg_to_context(dcontext, LIB_SEG_TLS, to_app);
if (TEST(DR_STATE_DR_TLS, flags))
os_swap_dr_tls(dcontext, to_app);
}
void
os_swap_context_go_native(dcontext_t *dcontext, dr_state_flags_t flags)
{
#ifdef AARCHXX
/* FIXME i#1582: remove this routine once os_should_swap_state()
* is not disabled and we can actually call
* os_swap_context_go_native() safely from multiple places.
*/
os_switch_seg_to_context(dcontext, LIB_SEG_TLS, true /*to app*/);
#else
os_swap_context(dcontext, true /*to app*/, flags);
#endif
}
void
os_thread_under_dynamo(dcontext_t *dcontext)
{
os_swap_context(dcontext, false /*to dr*/, DR_STATE_GO_NATIVE);
signal_swap_mask(dcontext, false /*to dr*/);
start_itimer(dcontext);
}
void
os_thread_not_under_dynamo(dcontext_t *dcontext)
{
stop_itimer(dcontext);
signal_swap_mask(dcontext, true /*to app*/);
os_swap_context(dcontext, true /*to app*/, DR_STATE_GO_NATIVE);
}
void
os_process_under_dynamorio_initiate(dcontext_t *dcontext)
{
LOG(GLOBAL, LOG_THREADS, 1, "process now under DR\n");
/* We only support regular process-wide signal handlers for delayed takeover. */
/* i#2161: we ignore alarm signals during the attach process to avoid races. */
signal_reinstate_handlers(dcontext, true /*ignore alarm*/);
/* XXX: there's a tradeoff here: we have a race when we remove the hook
* because dr_app_stop() has no barrier and a thread sent native might
* resume from vsyscall after we remove the hook. However, if we leave the
* hook, then the next takeover signal might hit a native thread that's
* inside DR just to go back native after having hit the hook. For now we
* remove the hook and rely on translate_from_synchall_to_dispatch() moving
* threads from vsyscall to our gencode and not relying on the hook being
* present to finish up their go-native code.
*/
hook_vsyscall(dcontext, false);
}
void
os_process_under_dynamorio_complete(dcontext_t *dcontext)
{
/* i#2161: only now do we un-ignore alarm signals. */
signal_reinstate_alarm_handlers(dcontext);
IF_NO_MEMQUERY({
/* Update the memory cache (i#2037) now that we've taken over all the
* threads, if there may have been a gap between setup and start.
*/
if (dr_api_entry)
memcache_update_all_from_os();
});
}
void
os_process_not_under_dynamorio(dcontext_t *dcontext)
{
/* We only support regular process-wide signal handlers for mixed-mode control. */
signal_remove_handlers(dcontext);
unhook_vsyscall();
LOG(GLOBAL, LOG_THREADS, 1, "process no longer under DR\n");
}
bool
detach_do_not_translate(thread_record_t *tr)
{
return false;
}
void
detach_finalize_translation(thread_record_t *tr, priv_mcontext_t *mc)
{
/* Nothing to do. */
}
void
detach_finalize_cleanup(void)
{
/* Nothing to do. */
}
static pid_t
get_process_group_id()
{
return dynamorio_syscall(SYS_getpgid, 0);
}
process_id_t
get_parent_id(void)
{
return dynamorio_syscall(SYS_getppid, 0);
}
thread_id_t
get_sys_thread_id(void)
{
#ifdef MACOS
if (kernel_thread_groups)
return dynamorio_syscall(SYS_thread_selfid, 0);
#else
if (kernel_thread_groups)
return dynamorio_syscall(SYS_gettid, 0);
#endif
return dynamorio_syscall(SYS_getpid, 0);
}
thread_id_t
d_r_get_thread_id(void)
{
/* i#228/PR 494330: making a syscall here is a perf bottleneck since we call
* this routine in read and recursive locks so use the TLS value instead
*/
thread_id_t id = get_tls_thread_id();
if (id != INVALID_THREAD_ID)
return id;
else
return get_sys_thread_id();
}
thread_id_t
get_tls_thread_id(void)
{
ptr_int_t tid; /* can't use thread_id_t since it's 32-bits */
if (!is_thread_tls_initialized())
return INVALID_THREAD_ID;
READ_TLS_SLOT_IMM(TLS_THREAD_ID_OFFSET, tid);
/* it reads 8-bytes into the memory, which includes app_gs and app_fs.
* 0x000000007127357b <get_tls_thread_id+37>: mov %gs:(%rax),%rax
* 0x000000007127357f <get_tls_thread_id+41>: mov %rax,-0x8(%rbp)
* so we remove the TRUNCATE check and trucate it on return.
*/
return (thread_id_t)tid;
}
/* returns the thread-private dcontext pointer for the calling thread */
dcontext_t *
get_thread_private_dcontext(void)
{
#ifdef HAVE_TLS
dcontext_t *dcontext;
/* We have to check this b/c this is called from __errno_location prior
* to os_tls_init, as well as after os_tls_exit, and early in a new
* thread's initialization (see comments below on that).
*/
if (!is_thread_tls_initialized())
return (IF_CLIENT_INTERFACE(standalone_library ? GLOBAL_DCONTEXT :) NULL);
/* We used to check tid and return NULL to distinguish parent from child, but
* that was affecting performance (xref PR 207366: but I'm leaving the assert in
* for now so debug build will still incur it). So we fixed the cases that
* needed that:
*
* - dynamo_thread_init() calling is_thread_initialized() for a new thread
* created via clone or the start/stop interface: so we have
* is_thread_initialized() pay the d_r_get_thread_id() cost.
* - new_thread_setup()'s ENTER_DR_HOOK kstats, or a crash and the signal
* handler asking about dcontext: we have new_thread_dynamo_start()
* clear the segment register for us early on.
* - child of fork (ASSERT_OWN_NO_LOCKS, etc. on re-entering DR):
* here we just suppress the assert: we'll use this same dcontext.
* xref PR 209518 where w/o this fix we used to need an extra KSTOP.
*
* An alternative would be to have the parent thread clear the segment
* register, or even set up the child's TLS ahead of time ourselves
* (and special-case so that we know if at clone syscall the app state is not
* quite correct: but we're already stealing a register there: PR 286194).
* We could also have the kernel set up TLS for us (PR 285898).
*
* For hotp_only or non-full-control (native_exec, e.g.) (PR 212012), this
* routine is not the only issue: we have to catch all new threads since
* hotp_only gateways assume tls is set up.
* Xref PR 192231.
*/
/* PR 307698: this assert causes large slowdowns (also xref PR 207366) */
DOCHECK(CHKLVL_DEFAULT + 1, {
ASSERT(get_tls_thread_id() == get_sys_thread_id() ||
/* ok for fork as mentioned above */
pid_cached != get_process_id());
});
READ_TLS_SLOT_IMM(TLS_DCONTEXT_OFFSET, dcontext);
return dcontext;
#else
/* Assumption: no lock needed on a read => no race conditions between
* reading and writing same tid! Since both get and set are only for
* the current thread, they cannot both execute simultaneously for the
* same tid, right?
*/
thread_id_t tid = d_r_get_thread_id();
int i;
if (tls_table != NULL) {
for (i = 0; i < MAX_THREADS; i++) {
if (tls_table[i].tid == tid) {
return tls_table[i].dcontext;
}
}
}
return NULL;
#endif
}
/* sets the thread-private dcontext pointer for the calling thread */
void
set_thread_private_dcontext(dcontext_t *dcontext)
{
#ifdef HAVE_TLS
ASSERT(is_thread_tls_allocated());
WRITE_TLS_SLOT_IMM(TLS_DCONTEXT_OFFSET, dcontext);
#else
thread_id_t tid = d_r_get_thread_id();
int i;
bool found = false;
ASSERT(tls_table != NULL);
d_r_mutex_lock(&tls_lock);
for (i = 0; i < MAX_THREADS; i++) {
if (tls_table[i].tid == tid) {
if (dcontext == NULL) {
/* if setting to NULL, clear the entire slot for reuse */
tls_table[i].tid = 0;
}
tls_table[i].dcontext = dcontext;
found = true;
break;
}
}
if (!found) {
if (dcontext == NULL) {
/* don't do anything...but why would this happen? */
} else {
/* look for an empty slot */
for (i = 0; i < MAX_THREADS; i++) {
if (tls_table[i].tid == 0) {
tls_table[i].tid = tid;
tls_table[i].dcontext = dcontext;
found = true;
break;
}
}
}
}
d_r_mutex_unlock(&tls_lock);
ASSERT(found);
#endif
}
/* replaces old with new
* use for forking: child should replace parent's id with its own
*/
static void
replace_thread_id(thread_id_t old, thread_id_t new)
{
#ifdef HAVE_TLS
thread_id_t new_tid = new;
ASSERT(is_thread_tls_initialized());
DOCHECK(1, {
thread_id_t old_tid;
READ_TLS_INT_SLOT_IMM(TLS_THREAD_ID_OFFSET, old_tid);
ASSERT(old_tid == old);
});
WRITE_TLS_INT_SLOT_IMM(TLS_THREAD_ID_OFFSET, new_tid);
#else
int i;
d_r_mutex_lock(&tls_lock);
for (i = 0; i < MAX_THREADS; i++) {
if (tls_table[i].tid == old) {
tls_table[i].tid = new;
break;
}
}
d_r_mutex_unlock(&tls_lock);
#endif
}
/* translate native flags to platform independent protection bits */
static inline uint
osprot_to_memprot(uint prot)
{
uint mem_prot = 0;
if (TEST(PROT_EXEC, prot))
mem_prot |= MEMPROT_EXEC;
if (TEST(PROT_READ, prot))
mem_prot |= MEMPROT_READ;
if (TEST(PROT_WRITE, prot))
mem_prot |= MEMPROT_WRITE;
return mem_prot;
}
/* returns osprot flags preserving all native protection flags except
* for RWX, which are replaced according to memprot */
uint
osprot_replace_memprot(uint old_osprot, uint memprot)
{
/* Note only protection flags PROT_ are relevant to mprotect()
* and they are separate from any other MAP_ flags passed to mmap()
*/
uint new_osprot = memprot_to_osprot(memprot);
return new_osprot;
}
/* libc independence */
static inline long
mprotect_syscall(byte *p, size_t size, uint prot)
{
return dynamorio_syscall(SYS_mprotect, 3, p, size, prot);
}
/* free memory allocated from os_raw_mem_alloc */
bool
os_raw_mem_free(void *p, size_t size, uint flags, heap_error_code_t *error_code)
{
long rc;
ASSERT(error_code != NULL);
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
rc = munmap_syscall(p, size);
if (rc != 0) {
*error_code = -rc;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
return (rc == 0);
}
/* try to alloc memory at preferred from os directly,
* caller is required to handle thread synchronization and to update
*/
void *
os_raw_mem_alloc(void *preferred, size_t size, uint prot, uint flags,
heap_error_code_t *error_code)
{
byte *p;
uint os_prot = memprot_to_osprot(prot);
uint os_flags =
MAP_PRIVATE | MAP_ANONYMOUS | (TEST(RAW_ALLOC_32BIT, flags) ? MAP_32BIT : 0);
ASSERT(error_code != NULL);
/* should only be used on aligned pieces */
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
p = mmap_syscall(preferred, size, os_prot, os_flags, -1, 0);
if (!mmap_syscall_succeeded(p)) {
*error_code = -(heap_error_code_t)(ptr_int_t)p;
LOG(GLOBAL, LOG_HEAP, 3, "os_raw_mem_alloc %d bytes failed" PFX "\n", size, p);
return NULL;
}
if (preferred != NULL && p != preferred) {
*error_code = HEAP_ERROR_NOT_AT_PREFERRED;
os_raw_mem_free(p, size, flags, error_code);
LOG(GLOBAL, LOG_HEAP, 3, "os_raw_mem_alloc %d bytes failed" PFX "\n", size, p);
return NULL;
}
LOG(GLOBAL, LOG_HEAP, 2, "os_raw_mem_alloc: " SZFMT " bytes @ " PFX "\n", size, p);
return p;
}
#ifdef LINUX
void
init_emulated_brk(app_pc exe_end)
{
ASSERT(DYNAMO_OPTION(emulate_brk));
if (app_brk_map != NULL)
return;
/* i#1004: emulate brk via a separate mmap.
* The real brk starts out empty, but we need at least a page to have an
* mmap placeholder.
*/
app_brk_map = mmap_syscall(exe_end, PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
ASSERT(mmap_syscall_succeeded(app_brk_map));
app_brk_cur = app_brk_map;
app_brk_end = app_brk_map + PAGE_SIZE;
}
static byte *
emulate_app_brk(dcontext_t *dcontext, byte *new_val)
{
byte *old_brk = app_brk_cur;
ASSERT(DYNAMO_OPTION(emulate_brk));
LOG(THREAD, LOG_HEAP, 2, "%s: cur=" PFX ", requested=" PFX "\n", __FUNCTION__,
app_brk_cur, new_val);
new_val = (byte *)ALIGN_FORWARD(new_val, PAGE_SIZE);
if (new_val == NULL || new_val == app_brk_cur ||
/* Not allowed to shrink below original base */
new_val < app_brk_map) {
/* Just return cur val */
} else if (new_val < app_brk_cur) {
/* Shrink */
if (munmap_syscall(new_val, app_brk_cur - new_val) == 0) {
app_brk_cur = new_val;
app_brk_end = new_val;
}
} else if (new_val < app_brk_end) {
/* We've already allocated the space */
app_brk_cur = new_val;
} else {
/* Expand */
byte *remap = (byte *)dynamorio_syscall(SYS_mremap, 4, app_brk_map,
app_brk_end - app_brk_map,
new_val - app_brk_map, 0 /*do not move*/);
if (mmap_syscall_succeeded(remap)) {
ASSERT(remap == app_brk_map);
app_brk_cur = new_val;
app_brk_end = new_val;
} else {
LOG(THREAD, LOG_HEAP, 1, "%s: mremap to " PFX " failed\n", __FUNCTION__,
new_val);
}
}
if (app_brk_cur != old_brk)
handle_app_brk(dcontext, app_brk_map, old_brk, app_brk_cur);
return app_brk_cur;
}
#endif /* LINUX */
#if defined(CLIENT_INTERFACE) && defined(LINUX)
DR_API
/* XXX: could add dr_raw_mem_realloc() instead of dr_raw_mremap() -- though there
* is no realloc for Windows: supposed to reserve yourself and then commit in
* pieces.
*/
void *
dr_raw_mremap(void *old_address, size_t old_size, size_t new_size, int flags,
void *new_address)
{
byte *res;
dr_mem_info_t info;
dcontext_t *dcontext = get_thread_private_dcontext();
/* i#173: we need prot + type from prior to mremap */
DEBUG_DECLARE(bool ok =)
query_memory_ex(old_address, &info);
/* XXX: this could be a large region w/ multiple protection regions
* inside. For now we assume our handling of it doesn't care.
*/
ASSERT(ok);
if (is_pretend_or_executable_writable(old_address))
info.prot |= DR_MEMPROT_WRITE;
/* we just unconditionally send the 5th param */
res = (byte *)dynamorio_syscall(SYS_mremap, 5, old_address, old_size, new_size, flags,
new_address);
handle_app_mremap(dcontext, res, new_size, old_address, old_size, info.prot,
info.size);
return res;
}
DR_API
void *
dr_raw_brk(void *new_address)
{
dcontext_t *dcontext = get_thread_private_dcontext();
if (DYNAMO_OPTION(emulate_brk)) {
/* i#1004: emulate brk via a separate mmap */
return (void *)emulate_app_brk(dcontext, (byte *)new_address);
} else {
/* We pay the cost of 2 syscalls. This should be infrequent enough that
* it doesn't mater.
*/
if (new_address == NULL) {
/* Just a query */
return (void *)dynamorio_syscall(SYS_brk, 1, new_address);
} else {
byte *old_brk = (byte *)dynamorio_syscall(SYS_brk, 1, 0);
byte *res = (byte *)dynamorio_syscall(SYS_brk, 1, new_address);
handle_app_brk(dcontext, NULL, old_brk, res);
return res;
}
}
}
#endif /* CLIENT_INTERFACE && LINUX */
/* caller is required to handle thread synchronization and to update dynamo vm areas */
void
os_heap_free(void *p, size_t size, heap_error_code_t *error_code)
{
long rc;
ASSERT(error_code != NULL);
if (!dynamo_exited)
LOG(GLOBAL, LOG_HEAP, 4, "os_heap_free: %d bytes @ " PFX "\n", size, p);
rc = munmap_syscall(p, size);
if (rc != 0) {
*error_code = -rc;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
ASSERT(rc == 0);
}
/* reserve virtual address space without committing swap space for it,
and of course no physical pages since it will never be touched */
/* to be transparent, we do not use sbrk, and are
* instead using mmap, and asserting that all os_heap requests are for
* reasonably large pieces of memory */
void *
os_heap_reserve(void *preferred, size_t size, heap_error_code_t *error_code,
bool executable)
{
void *p;
uint prot = PROT_NONE;
#ifdef VMX86_SERVER
/* PR 365331: we need to be in the mmap_text region for code cache and
* gencode (PROT_EXEC).
*/
ASSERT(!os_in_vmkernel_userworld() || !executable || preferred == NULL ||
((byte *)preferred >= os_vmk_mmap_text_start() &&
((byte *)preferred) + size <= os_vmk_mmap_text_end()));
/* Note that a preferred address overrides PROT_EXEC and a mmap_data
* address will be honored, even though any execution there will fault.
*/
/* FIXME: note that PROT_EXEC => read access, so our guard pages and other
* non-committed memory, while not writable, is readable.
* Plus, we can't later clear all prot bits for userworld mmap due to PR 107872
* (PR 365748 covers fixing this for us).
* But in most uses we should get our preferred vmheap and shouldn't run
* out of vmheap, so this should be a corner-case issue.
*/
if (executable)
prot = PROT_EXEC;
#endif
/* should only be used on aligned pieces */
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
ASSERT(error_code != NULL);
/* FIXME: note that this memory is in fact still committed - see man mmap */
/* FIXME: case 2347 on Linux or -vm_reserve should be set to false */
/* FIXME: Need to actually get a mmap-ing with |MAP_NORESERVE */
p = mmap_syscall(
preferred, size, prot,
MAP_PRIVATE |
MAP_ANONYMOUS IF_X64(| (DYNAMO_OPTION(heap_in_lower_4GB) ? MAP_32BIT : 0)),
-1, 0);
if (!mmap_syscall_succeeded(p)) {
*error_code = -(heap_error_code_t)(ptr_int_t)p;
LOG(GLOBAL, LOG_HEAP, 4, "os_heap_reserve %d bytes failed " PFX "\n", size, p);
return NULL;
} else if (preferred != NULL && p != preferred) {
/* We didn't get the preferred address. To harmonize with windows behavior and
* give greater control we fail the reservation. */
heap_error_code_t dummy;
*error_code = HEAP_ERROR_NOT_AT_PREFERRED;
os_heap_free(p, size, &dummy);
ASSERT(dummy == HEAP_ERROR_SUCCESS);
LOG(GLOBAL, LOG_HEAP, 4,
"os_heap_reserve %d bytes at " PFX " not preferred " PFX "\n", size,
preferred, p);
return NULL;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
LOG(GLOBAL, LOG_HEAP, 2, "os_heap_reserve: %d bytes @ " PFX "\n", size, p);
#ifdef VMX86_SERVER
/* PR 365331: ensure our memory is all in the mmap_text region */
ASSERT(!os_in_vmkernel_userworld() || !executable ||
((byte *)p >= os_vmk_mmap_text_start() &&
((byte *)p) + size <= os_vmk_mmap_text_end()));
#endif
#if defined(ANDROID) && defined(DEBUG)
/* We don't label in release to be more transparent */
dynamorio_syscall(SYS_prctl, 5, PR_SET_VMA, PR_SET_VMA_ANON_NAME, p, size,
"DynamoRIO-internal");
#endif
return p;
}
static bool
find_free_memory_in_region(byte *start, byte *end, size_t size, byte **found_start OUT,
byte **found_end OUT)
{
memquery_iter_t iter;
/* XXX: despite /proc/sys/vm/mmap_min_addr == PAGE_SIZE, mmap won't
* give me that address if I use it as a hint.
*/
app_pc last_end = (app_pc)(PAGE_SIZE * 16);
bool found = false;
memquery_iterator_start(&iter, NULL, false /*won't alloc*/);
while (memquery_iterator_next(&iter)) {
if (iter.vm_start >= start &&
MIN(iter.vm_start, end) - MAX(last_end, start) >= size) {
if (found_start != NULL)
*found_start = MAX(last_end, start);
if (found_end != NULL)
*found_end = MIN(iter.vm_start, end);
found = true;
break;
}
if (iter.vm_end >= end)
break;
last_end = iter.vm_end;
}
memquery_iterator_stop(&iter);
return found;
}
void *
os_heap_reserve_in_region(void *start, void *end, size_t size,
heap_error_code_t *error_code, bool executable)
{
byte *p = NULL;
byte *try_start = NULL, *try_end = NULL;
uint iters = 0;
ASSERT(ALIGNED(start, PAGE_SIZE) && ALIGNED(end, PAGE_SIZE));
ASSERT(ALIGNED(size, PAGE_SIZE));
LOG(GLOBAL, LOG_HEAP, 3,
"os_heap_reserve_in_region: " SZFMT " bytes in " PFX "-" PFX "\n", size, start,
end);
/* if no restriction on location use regular os_heap_reserve() */
if (start == (void *)PTR_UINT_0 && end == (void *)POINTER_MAX)
return os_heap_reserve(NULL, size, error_code, executable);
/* loop to handle races */
#define RESERVE_IN_REGION_MAX_ITERS 128
while (find_free_memory_in_region(start, end, size, &try_start, &try_end)) {
/* If there's space we'd prefer the end, to avoid the common case of
* a large binary + heap at attach where we're likely to reserve
* right at the start of the brk: we'd prefer to leave more brk space.
*/
p = os_heap_reserve(try_end - size, size, error_code, executable);
if (p != NULL) {
ASSERT(*error_code == HEAP_ERROR_SUCCESS);
ASSERT(p >= (byte *)start && p + size <= (byte *)end);
break;
}
if (++iters > RESERVE_IN_REGION_MAX_ITERS) {
ASSERT_NOT_REACHED();
break;
}
}
if (p == NULL)
*error_code = HEAP_ERROR_CANT_RESERVE_IN_REGION;
else
*error_code = HEAP_ERROR_SUCCESS;
LOG(GLOBAL, LOG_HEAP, 2,
"os_heap_reserve_in_region: reserved " SZFMT " bytes @ " PFX " in " PFX "-" PFX
"\n",
size, p, start, end);
return p;
}
/* commit previously reserved with os_heap_reserve pages */
/* returns false when out of memory */
/* A replacement of os_heap_alloc can be constructed by using os_heap_reserve
and os_heap_commit on a subset of the reserved pages. */
/* caller is required to handle thread synchronization */
bool
os_heap_commit(void *p, size_t size, uint prot, heap_error_code_t *error_code)
{
uint os_prot = memprot_to_osprot(prot);
long res;
/* should only be used on aligned pieces */
ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE));
ASSERT(p);
ASSERT(error_code != NULL);
/* FIXME: note that the memory would not be not truly committed if we have */
/* not actually marked a mmap-ing without MAP_NORESERVE */
res = mprotect_syscall(p, size, os_prot);
if (res != 0) {
*error_code = -res;
return false;
} else {
*error_code = HEAP_ERROR_SUCCESS;
}
LOG(GLOBAL, LOG_HEAP, 2, "os_heap_commit: %d bytes @ " PFX "\n", size, p);
return true;
}
/* caller is required to handle thread synchronization and to update dynamo vm areas */
void
os_heap_decommit(void *p, size_t size, heap_error_code_t *error_code)
{
int rc;
ASSERT(error_code != NULL);
if (!dynamo_exited)
LOG(GLOBAL, LOG_HEAP, 4, "os_heap_decommit: %d bytes @ " PFX "\n", size, p);
*error_code = HEAP_ERROR_SUCCESS;
/* FIXME: for now do nothing since os_heap_reserve has in fact committed the memory */
rc = 0;
/* TODO:
p = mmap_syscall(p, size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
we should either do a mremap()
or we can do a munmap() followed 'quickly' by a mmap() -
also see above the comment that os_heap_reserve() in fact is not so lightweight
*/
ASSERT(rc == 0);
}
bool
os_heap_systemwide_overcommit(heap_error_code_t last_error_code)
{
/* FIXME: conservative answer yes */
return true;
}
bool
os_heap_get_commit_limit(size_t *commit_used, size_t *commit_limit)
{
/* FIXME - NYI */
return false;
}
/* yield the current thread */
void
os_thread_yield()
{
#ifdef MACOS
/* XXX i#1291: use raw syscall instead */
swtch_pri(0);
#else
dynamorio_syscall(SYS_sched_yield, 0);
#endif
}
bool
thread_signal(process_id_t pid, thread_id_t tid, int signum)
{
#ifdef MACOS
/* FIXME i#58: this takes in a thread port. Need to map thread id to port.
* Need to figure out whether we support raw Mach threads w/o pthread on top.
*/
ASSERT_NOT_IMPLEMENTED(false);
return false;
#else
/* FIXME: for non-NPTL use SYS_kill */
/* Note that the pid is equivalent to the thread group id.
* However, we can have threads sharing address space but not pid
* (if created via CLONE_VM but not CLONE_THREAD), so make sure to
* use the pid of the target thread, not our pid.
*/
return (dynamorio_syscall(SYS_tgkill, 3, pid, tid, signum) == 0);
#endif
}
static bool
known_thread_signal(thread_record_t *tr, int signum)
{
#ifdef MACOS
ptr_int_t res;
if (tr->dcontext == NULL)
return FALSE;
res = dynamorio_syscall(SYS___pthread_kill, 2, tr->dcontext->thread_port, signum);
LOG(THREAD_GET, LOG_ALL, 3, "%s: signal %d to port %d => %ld\n", __FUNCTION__, signum,
tr->dcontext->thread_port, res);
return res == 0;
#else
return thread_signal(tr->pid, tr->id, signum);
#endif
}
void
os_thread_sleep(uint64 milliseconds)
{
#ifdef MACOS
semaphore_t sem = MACH_PORT_NULL;
int res;
#else
struct timespec remain;
int count = 0;
#endif
struct timespec req;
req.tv_sec = (milliseconds / 1000);
/* docs say can go up to 1000000000, but doesn't work on FC9 */
req.tv_nsec = (milliseconds % 1000) * 1000000;
#ifdef MACOS
if (sem == MACH_PORT_NULL) {
DEBUG_DECLARE(kern_return_t res =)
semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0);
ASSERT(res == KERN_SUCCESS);
}
res =
dynamorio_syscall(SYSNUM_NO_CANCEL(SYS___semwait_signal), 6, sem, MACH_PORT_NULL,
1, 1, (int64_t)req.tv_sec, (int32_t)req.tv_nsec);
if (res == -EINTR) {
/* FIXME i#58: figure out how much time elapsed and re-wait */
}
#else
/* FIXME: if we need accurate sleeps in presence of itimers we should
* be using SYS_clock_nanosleep w/ an absolute time instead of relative
*/
while (dynamorio_syscall(SYS_nanosleep, 2, &req, &remain) == -EINTR) {
/* interrupted by signal or something: finish the interval */
ASSERT_CURIOSITY_ONCE(remain.tv_sec <= req.tv_sec &&
(remain.tv_sec < req.tv_sec ||
/* there seems to be some rounding, and sometimes
* remain nsec > req nsec (I've seen 40K diff)
*/
req.tv_nsec - remain.tv_nsec < 100000 ||
req.tv_nsec - remain.tv_nsec > -100000));
/* not unusual for client threads to use itimers and have their run
* routine sleep forever
*/
if (count++ > 3 && !IS_CLIENT_THREAD(get_thread_private_dcontext())) {
ASSERT_NOT_REACHED();
break; /* paranoid */
}
req = remain;
}
#endif
}
bool
os_thread_suspend(thread_record_t *tr)
{
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
/* See synch comments in os_thread_resume: the mutex held there
* prevents prematurely sending a re-suspend signal.
*/
d_r_mutex_lock(&ostd->suspend_lock);
ostd->suspend_count++;
ASSERT(ostd->suspend_count > 0);
/* If already suspended, do not send another signal. However, we do
* need to ensure the target is suspended in case of a race, so we can't
* just return.
*/
if (ostd->suspend_count == 1) {
/* PR 212090: we use a custom signal handler to suspend. We wait
* here until the target reaches the suspend point, and leave it
* up to the caller to check whether it is a safe suspend point,
* to match Windows behavior.
*/
ASSERT(ksynch_get_value(&ostd->suspended) == 0);
if (!known_thread_signal(tr, SUSPEND_SIGNAL)) {
ostd->suspend_count--;
d_r_mutex_unlock(&ostd->suspend_lock);
return false;
}
}
/* we can unlock before the wait loop b/c we're using a separate "resumed"
* int and os_thread_resume holds the lock across its wait. this way a resume
* can proceed as soon as the suspended thread is suspended, before the
* suspending thread gets scheduled again.
*/
d_r_mutex_unlock(&ostd->suspend_lock);
while (ksynch_get_value(&ostd->suspended) == 0) {
/* For Linux, waits only if the suspended flag is not set as 1. Return value
* doesn't matter because the flag will be re-checked.
*/
/* We time out and assert in debug build to provide better diagnostics than a
* silent hang. We can't safely return false b/c the synch model here
* assumes there will not be a retry until the target reaches the suspend
* point. Xref i#2779.
*/
#define SUSPEND_DEBUG_TIMEOUT_MS 5000
if (ksynch_wait(&ostd->suspended, 0, SUSPEND_DEBUG_TIMEOUT_MS) == -ETIMEDOUT) {
ASSERT_CURIOSITY(false && "failed to suspend thread in 5s");
}
if (ksynch_get_value(&ostd->suspended) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
}
return true;
}
bool
os_thread_resume(thread_record_t *tr)
{
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
/* This mutex prevents sending a re-suspend signal before the target
* reaches a safe post-resume point from a first suspend signal.
* Given that race, we can't just use atomic_add_exchange_int +
* atomic_dec_becomes_zero on suspend_count.
*/
d_r_mutex_lock(&ostd->suspend_lock);
ASSERT(ostd->suspend_count > 0);
/* PR 479750: if do get here and target is not suspended then abort
* to avoid possible deadlocks
*/
if (ostd->suspend_count == 0) {
d_r_mutex_unlock(&ostd->suspend_lock);
return true; /* the thread is "resumed", so success status */
}
ostd->suspend_count--;
if (ostd->suspend_count > 0) {
d_r_mutex_unlock(&ostd->suspend_lock);
return true; /* still suspended */
}
ksynch_set_value(&ostd->wakeup, 1);
ksynch_wake(&ostd->wakeup);
while (ksynch_get_value(&ostd->resumed) == 0) {
/* For Linux, waits only if the resumed flag is not set as 1. Return value
* doesn't matter because the flag will be re-checked.
*/
ksynch_wait(&ostd->resumed, 0, 0);
if (ksynch_get_value(&ostd->resumed) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
}
ksynch_set_value(&ostd->wakeup, 0);
ksynch_set_value(&ostd->resumed, 0);
d_r_mutex_unlock(&ostd->suspend_lock);
return true;
}
bool
os_thread_terminate(thread_record_t *tr)
{
/* PR 297902: for NPTL sending SIGKILL will take down the whole group:
* so instead we send SIGUSR2 and have a flag set telling
* target thread to execute SYS_exit
*/
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
ostd->terminate = true;
/* Even if the thread is currently suspended, it's simpler to send it
* another signal than to resume it.
*/
return known_thread_signal(tr, SUSPEND_SIGNAL);
}
bool
is_thread_terminated(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(ostd != NULL);
return (ksynch_get_value(&ostd->terminated) == 1);
}
static void
os_wait_thread_futex(KSYNCH_TYPE *var)
{
while (ksynch_get_value(var) == 0) {
/* On Linux, waits only if var is not set as 1. Return value
* doesn't matter because var will be re-checked.
*/
ksynch_wait(var, 0, 0);
if (ksynch_get_value(var) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
}
}
void
os_wait_thread_terminated(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(ostd != NULL);
os_wait_thread_futex(&ostd->terminated);
}
void
os_wait_thread_detached(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(ostd != NULL);
os_wait_thread_futex(&ostd->detached);
}
void
os_signal_thread_detach(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(ostd != NULL);
ostd->do_detach = true;
}
bool
thread_get_mcontext(thread_record_t *tr, priv_mcontext_t *mc)
{
/* PR 212090: only works when target is suspended by us, and
* we then take the signal context
*/
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
ASSERT(ostd->suspend_count > 0);
if (ostd->suspend_count == 0)
return false;
ASSERT(ostd->suspended_sigcxt != NULL);
sigcontext_to_mcontext(mc, ostd->suspended_sigcxt, DR_MC_ALL);
return true;
}
bool
thread_set_mcontext(thread_record_t *tr, priv_mcontext_t *mc)
{
/* PR 212090: only works when target is suspended by us, and
* we then replace the signal context
*/
os_thread_data_t *ostd = (os_thread_data_t *)tr->dcontext->os_field;
ASSERT(ostd != NULL);
ASSERT(ostd->suspend_count > 0);
if (ostd->suspend_count == 0)
return false;
ASSERT(ostd->suspended_sigcxt != NULL);
mcontext_to_sigcontext(ostd->suspended_sigcxt, mc, DR_MC_ALL);
return true;
}
/* Only one of mc and dmc can be non-NULL. */
bool
os_context_to_mcontext(dr_mcontext_t *dmc, priv_mcontext_t *mc, os_cxt_ptr_t osc)
{
if (dmc != NULL)
sigcontext_to_mcontext(dr_mcontext_as_priv_mcontext(dmc), &osc, dmc->flags);
else if (mc != NULL)
sigcontext_to_mcontext(mc, &osc, DR_MC_ALL);
else
return false;
return true;
}
/* Only one of mc and dmc can be non-NULL. */
bool
mcontext_to_os_context(os_cxt_ptr_t osc, dr_mcontext_t *dmc, priv_mcontext_t *mc)
{
if (dmc != NULL)
mcontext_to_sigcontext(&osc, dr_mcontext_as_priv_mcontext(dmc), dmc->flags);
else if (mc != NULL)
mcontext_to_sigcontext(&osc, mc, DR_MC_ALL);
else
return false;
return true;
}
bool
is_thread_currently_native(thread_record_t *tr)
{
return (!tr->under_dynamo_control ||
/* start/stop doesn't change under_dynamo_control and has its own field */
(tr->dcontext != NULL && tr->dcontext->currently_stopped));
}
#ifdef CLIENT_SIDELINE /* PR 222812: tied to sideline usage */
# ifdef LINUX /* XXX i#58: just until we have Mac support */
static void
client_thread_run(void)
{
void (*func)(void *param);
dcontext_t *dcontext;
byte *xsp;
GET_STACK_PTR(xsp);
void *crec = get_clone_record((reg_t)xsp);
IF_DEBUG(int rc =)
dynamo_thread_init(get_clone_record_dstack(crec), NULL, crec, true);
ASSERT(rc != -1); /* this better be a new thread */
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
LOG(THREAD, LOG_ALL, 1, "\n***** CLIENT THREAD %d *****\n\n", d_r_get_thread_id());
/* We stored the func and args in particular clone record fields */
func = (void (*)(void *param))dcontext->next_tag;
/* Reset any inherited mask (i#2337). */
signal_swap_mask(dcontext, false /*to DR*/);
void *arg = (void *)get_clone_record_app_xsp(crec);
LOG(THREAD, LOG_ALL, 1, "func=" PFX ", arg=" PFX "\n", func, arg);
/* i#2335: we support setup separate from start, and we want to allow a client
* to create a client thread during init, but we do not support that thread
* executing until the app has started (b/c we have no signal handlers in place).
*/
wait_for_event(dr_app_started, 0);
(*func)(arg);
LOG(THREAD, LOG_ALL, 1, "\n***** CLIENT THREAD %d EXITING *****\n\n",
d_r_get_thread_id());
block_cleanup_and_terminate(dcontext, SYS_exit, 0, 0, false /*just thread*/,
IF_MACOS_ELSE(dcontext->thread_port, 0), 0);
}
# endif
/* i#41/PR 222812: client threads
* * thread must have dcontext since many API routines require one and we
* don't expose GLOBAL_DCONTEXT (xref PR 243008, PR 216936, PR 536058)
* * reversed the old design of not using dstack (partly b/c want dcontext)
* and I'm using the same parent-creates-dstack and clone_record_t design
* to create linux threads: dstack should be big enough for client threads
* (xref PR 202669)
* * reversed the old design of explicit dr_terminate_client_thread(): now
* the thread is auto-terminated and stack cleaned up on return from run
* function
*/
DR_API bool
dr_create_client_thread(void (*func)(void *param), void *arg)
{
# ifdef LINUX
dcontext_t *dcontext = get_thread_private_dcontext();
byte *xsp;
/* We do not pass SIGCHLD since don't want signal to parent and don't support
* waiting on child.
* We do not pass CLONE_THREAD so that the new thread is in its own thread
* group, allowing it to have private itimers and not receive any signals
* sent to the app's thread groups. It also makes the thread not show up in
* the thread list for the app, making it more invisible.
*/
uint flags = CLONE_VM | CLONE_FS | CLONE_FILES |
CLONE_SIGHAND IF_NOT_X64(| CLONE_SETTLS)
/* CLONE_THREAD required. Signals and itimers are private anyway. */
IF_VMX86(| (os_in_vmkernel_userworld() ? CLONE_THREAD : 0));
pre_second_thread();
/* need to share signal handler table, prior to creating clone record */
handle_clone(dcontext, flags);
ATOMIC_INC(int, uninit_thread_count);
void *crec = create_clone_record(dcontext, (reg_t *)&xsp);
/* make sure client_thread_run can get the func and arg, and that
* signal_thread_inherit gets the right syscall info
*/
set_clone_record_fields(crec, (reg_t)arg, (app_pc)func, SYS_clone, flags);
/* i#501 switch to app's tls before creating client thread */
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false))
os_switch_lib_tls(dcontext, true /*to app*/);
# if defined(X86) && !defined(X64)
/* For the TCB we simply share the parent's. On Linux we could just inherit
* the same selector but not for VMX86_SERVER so we specify for both for
* 32-bit. Most of the fields are pthreads-specific and we assume the ones
* that will be used (such as tcbhead_t.sysinfo @0x10) are read-only.
*/
our_modify_ldt_t desc;
/* if get_segment_base() returned size too we could use it */
uint index = tls_priv_lib_index();
ASSERT(index != -1);
if (!tls_get_descriptor(index, &desc)) {
LOG(THREAD, LOG_ALL, 1, "%s: client thread tls get entry %d failed\n",
__FUNCTION__, index);
return false;
}
# endif
LOG(THREAD, LOG_ALL, 1, "dr_create_client_thread xsp=" PFX " dstack=" PFX "\n", xsp,
get_clone_record_dstack(crec));
thread_id_t newpid =
dynamorio_clone(flags, xsp, NULL, IF_X86_ELSE(IF_X64_ELSE(NULL, &desc), NULL),
NULL, client_thread_run);
/* i#501 switch to app's tls before creating client thread */
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false))
os_switch_lib_tls(dcontext, false /*to dr*/);
if (newpid < 0) {
LOG(THREAD, LOG_ALL, 1, "client thread creation failed: %d\n", newpid);
return false;
} else if (newpid == 0) {
/* dynamorio_clone() should have called client_thread_run directly */
ASSERT_NOT_REACHED();
return false;
}
return true;
# else
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#58: implement on Mac */
return false;
# endif
}
#endif /* CLIENT_SIDELINE PR 222812: tied to sideline usage */
int
get_num_processors(void)
{
static uint num_cpu = 0; /* cached value */
if (!num_cpu) {
#ifdef MACOS
DEBUG_DECLARE(bool ok =)
sysctl_query(CTL_HW, HW_NCPU, &num_cpu, sizeof(num_cpu));
ASSERT(ok);
#else
/* We used to use get_nprocs_conf, but that's in libc, so now we just
* look at the /sys filesystem ourselves, which is what glibc does.
*/
uint local_num_cpus = 0;
file_t cpu_dir = os_open_directory("/sys/devices/system/cpu", OS_OPEN_READ);
dir_iterator_t iter;
ASSERT(cpu_dir != INVALID_FILE &&
"/sys must be mounted: mount -t sysfs sysfs /sys");
os_dir_iterator_start(&iter, cpu_dir);
while (os_dir_iterator_next(&iter)) {
int dummy_num;
if (sscanf(iter.name, "cpu%d", &dummy_num) == 1)
local_num_cpus++;
}
os_close(cpu_dir);
num_cpu = local_num_cpus;
#endif
ASSERT(num_cpu);
}
return num_cpu;
}
/* i#46: To support -no_private_loader, we have to call the dlfcn family of
* routines in libdl.so. When we do early injection, there is no loader to
* resolve these imports, so they will crash. Early injection is incompatible
* with -no_private_loader, so this should never happen.
*/
#if defined(CLIENT_INTERFACE) || defined(HOT_PATCHING_INTERFACE)
shlib_handle_t
load_shared_library(const char *name, bool reachable)
{
# ifdef STATIC_LIBRARY
if (os_files_same(name, get_application_name())) {
/* The private loader falls back to dlsym() and friends for modules it
* doesn't recognize, so this works without disabling the private loader.
*/
return dlopen(NULL, RTLD_LAZY); /* Gets a handle to the exe. */
}
# endif
/* We call locate_and_load_private_library() to support searching for
* a pathless name.
*/
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false))
return (shlib_handle_t)locate_and_load_private_library(name, reachable);
# if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
return dlopen(name, RTLD_LAZY);
# else
/* -no_private_loader is no longer supported in our default builds.
* If we want it for hybrid mode we should add a new build param and include
* the libdl calls here under that param.
*/
ASSERT_NOT_REACHED();
return NULL;
# endif
}
#endif
#if defined(CLIENT_INTERFACE)
shlib_routine_ptr_t
lookup_library_routine(shlib_handle_t lib, const char *name)
{
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
return (shlib_routine_ptr_t)get_private_library_address((app_pc)lib, name);
}
# if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
return dlsym(lib, name);
# else
ASSERT_NOT_REACHED(); /* -no_private_loader is no longer supported: see above */
return NULL;
# endif
}
void
unload_shared_library(shlib_handle_t lib)
{
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
unload_private_library(lib);
} else {
# if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
if (!DYNAMO_OPTION(avoid_dlclose)) {
dlclose(lib);
}
# else
ASSERT_NOT_REACHED(); /* -no_private_loader is no longer supported: see above */
# endif
}
}
void
shared_library_error(char *buf, int maxlen)
{
const char *err;
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
err = "error in private loader";
} else {
# if defined(STATIC_LIBRARY) || defined(MACOS)
ASSERT(!DYNAMO_OPTION(early_inject));
err = dlerror();
if (err == NULL) {
err = "dlerror returned NULL";
}
# else
ASSERT_NOT_REACHED(); /* -no_private_loader is no longer supported */
err = "unknown error";
# endif
}
strncpy(buf, err, maxlen - 1);
buf[maxlen - 1] = '\0'; /* strncpy won't put on trailing null if maxes out */
}
/* addr is any pointer known to lie within the library.
* for linux, one of addr or name is needed; for windows, neither is needed.
*/
bool
shared_library_bounds(IN shlib_handle_t lib, IN byte *addr, IN const char *name,
OUT byte **start, OUT byte **end)
{
ASSERT(start != NULL && end != NULL);
/* PR 366195: dlopen() handle truly is opaque, so we have to use either
* addr or name
*/
ASSERT(addr != NULL || name != NULL);
*start = addr;
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
privmod_t *mod;
/* look for private library first */
acquire_recursive_lock(&privload_lock);
mod = privload_lookup_by_base((app_pc)lib);
if (name != NULL && mod == NULL)
mod = privload_lookup(name);
if (mod != NULL && !mod->externally_loaded) {
*start = mod->base;
if (end != NULL)
*end = mod->base + mod->size;
release_recursive_lock(&privload_lock);
return true;
}
release_recursive_lock(&privload_lock);
}
return (memquery_library_bounds(name, start, end, NULL, 0, NULL, 0) > 0);
}
#endif /* defined(CLIENT_INTERFACE) */
static int
fcntl_syscall(int fd, int cmd, long arg)
{
return dynamorio_syscall(SYSNUM_NO_CANCEL(SYS_fcntl), 3, fd, cmd, arg);
}
/* dups curfd to a private fd.
* returns -1 if unsuccessful.
*/
file_t
fd_priv_dup(file_t curfd)
{
file_t newfd = -1;
if (DYNAMO_OPTION(steal_fds) > 0) {
/* RLIMIT_NOFILES is 1 greater than max and F_DUPFD starts at given value */
/* XXX: if > linux 2.6.24, can use F_DUPFD_CLOEXEC to avoid later call:
* so how do we tell if the flag is supported? try calling once at init?
*/
newfd = fcntl_syscall(curfd, F_DUPFD, min_dr_fd);
if (newfd < 0) {
/* We probably ran out of fds, esp if debug build and there are
* lots of threads. Should we track how many we've given out to
* avoid a failed syscall every time after?
*/
SYSLOG_INTERNAL_WARNING_ONCE("ran out of stolen fd space");
/* Try again but this time in the app space, somewhere high up
* to avoid issues like tcsh assuming it can own fds 3-5 for
* piping std{in,out,err} (xref the old -open_tcsh_fds option).
*/
newfd = fcntl_syscall(curfd, F_DUPFD, min_dr_fd / 2);
}
}
return newfd;
}
bool
fd_mark_close_on_exec(file_t fd)
{
/* we assume FD_CLOEXEC is the only flag and don't bother w/ F_GETFD */
if (fcntl_syscall(fd, F_SETFD, FD_CLOEXEC) != 0) {
SYSLOG_INTERNAL_WARNING("unable to mark file %d as close-on-exec", fd);
return false;
}
return true;
}
void
fd_table_add(file_t fd, uint flags)
{
if (fd_table != NULL) {
TABLE_RWLOCK(fd_table, write, lock);
DODEBUG({
/* i#1010: If the fd is already in the table, chances are it's a
* stale logfile fd left behind by a vforked or cloned child that
* called execve. Avoid an assert if that happens.
*/
bool present = generic_hash_remove(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)fd);
ASSERT_CURIOSITY_ONCE(!present && "stale fd not cleaned up");
});
generic_hash_add(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)fd,
/* store the flags, w/ a set bit to ensure not 0 */
(void *)(ptr_uint_t)(flags | OS_OPEN_RESERVED));
TABLE_RWLOCK(fd_table, write, unlock);
} else {
#ifdef DEBUG
num_fd_add_pre_heap++;
/* we add main_logfile in d_r_os_init() */
ASSERT(num_fd_add_pre_heap == 1 && "only main_logfile should come here");
#endif
}
}
static bool
fd_is_dr_owned(file_t fd)
{
ptr_uint_t flags;
ASSERT(fd_table != NULL);
TABLE_RWLOCK(fd_table, read, lock);
flags = (ptr_uint_t)generic_hash_lookup(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)fd);
TABLE_RWLOCK(fd_table, read, unlock);
return (flags != 0);
}
static bool
fd_is_in_private_range(file_t fd)
{
return (DYNAMO_OPTION(steal_fds) > 0 && min_dr_fd > 0 && fd >= min_dr_fd);
}
file_t
os_open_protected(const char *fname, int os_open_flags)
{
file_t dup;
file_t res = os_open(fname, os_open_flags);
if (res < 0)
return res;
/* we could have os_open() always switch to a private fd but it's probably
* not worth the extra syscall for temporary open/close sequences so we
* only use it for persistent files
*/
dup = fd_priv_dup(res);
if (dup >= 0) {
close_syscall(res);
res = dup;
fd_mark_close_on_exec(res);
} /* else just keep original */
/* ditto here, plus for things like config.c opening files we can't handle
* grabbing locks and often don't have heap available so no fd_table
*/
fd_table_add(res, os_open_flags);
return res;
}
void
os_close_protected(file_t f)
{
ASSERT(fd_table != NULL || dynamo_exited);
if (fd_table != NULL) {
TABLE_RWLOCK(fd_table, write, lock);
generic_hash_remove(GLOBAL_DCONTEXT, fd_table, (ptr_uint_t)f);
TABLE_RWLOCK(fd_table, write, unlock);
}
os_close(f);
}
bool
os_get_current_dir(char *buf, size_t bufsz)
{
#ifdef MACOS
static char noheap_buf[MAXPATHLEN];
bool res = false;
file_t fd = os_open(".", OS_OPEN_READ);
int len;
/* F_GETPATH assumes a buffer of size MAXPATHLEN */
char *fcntl_buf;
if (dynamo_heap_initialized)
fcntl_buf = global_heap_alloc(MAXPATHLEN HEAPACCT(ACCT_OTHER));
else
fcntl_buf = noheap_buf;
if (fd == INVALID_FILE)
goto cwd_error;
if (fcntl_syscall(fd, F_GETPATH, (long)fcntl_buf) != 0)
goto cwd_error;
len = snprintf(buf, bufsz, "%s", fcntl_buf);
buf[bufsz - 1] = '\0';
return (len > 0 && len < bufsz);
cwd_error:
if (dynamo_heap_initialized)
global_heap_free(fcntl_buf, MAXPATHLEN HEAPACCT(ACCT_OTHER));
os_close(fd);
return res;
#else
return (dynamorio_syscall(SYS_getcwd, 2, buf, bufsz) > 0);
#endif
}
ssize_t
os_write(file_t f, const void *buf, size_t count)
{
return write_syscall(f, buf, count);
}
/* There are enough differences vs the shared drlibc_os.c version that we override
* it here. We use a loop to ensure reachability for the core.
*/
byte *
os_map_file(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot,
map_flags_t map_flags)
{
int flags;
byte *map;
#if defined(X64)
bool loop = false;
uint iters = 0;
# define MAX_MMAP_LOOP_ITERS 100
byte *region_start = NULL, *region_end = NULL;
#else
uint pg_offs;
ASSERT_TRUNCATE(pg_offs, uint, offs / PAGE_SIZE);
pg_offs = (uint)(offs / PAGE_SIZE);
#endif
#ifdef VMX86_SERVER
flags = MAP_PRIVATE; /* MAP_SHARED not supported yet */
#else
flags = TEST(MAP_FILE_COPY_ON_WRITE, map_flags) ? MAP_PRIVATE : MAP_SHARED;
#endif
#if defined(X64)
/* Allocate memory from reachable range for image: or anything (pcache
* in particular): for low 4GB, easiest to just pass MAP_32BIT (which is
* low 2GB, but good enough).
*/
if (DYNAMO_OPTION(heap_in_lower_4GB) && !TEST(MAP_FILE_FIXED, map_flags))
flags |= MAP_32BIT;
#endif
/* Allows memory request instead of mapping a file,
* so we can request memory from a particular address with fixed argument */
if (f == -1)
flags |= MAP_ANONYMOUS;
if (TEST(MAP_FILE_FIXED, map_flags))
flags |= MAP_FIXED;
#if defined(X64)
if (!TEST(MAP_32BIT, flags) && TEST(MAP_FILE_REACHABLE, map_flags)) {
vmcode_get_reachable_region(®ion_start, ®ion_end);
/* addr need not be NULL: we'll use it if it's in the region */
ASSERT(!TEST(MAP_FILE_FIXED, map_flags));
/* Loop to handle races */
loop = true;
}
while (!loop ||
(addr != NULL && addr >= region_start && addr + *size <= region_end) ||
find_free_memory_in_region(region_start, region_end, *size, &addr, NULL)) {
#endif
map = mmap_syscall(addr, *size, memprot_to_osprot(prot), flags, f,
/* x86 Linux mmap uses offset in pages */
IF_LINUX_ELSE(IF_X64_ELSE(offs, pg_offs), offs));
if (!mmap_syscall_succeeded(map)) {
LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: " PIFX "\n", __func__, map);
map = NULL;
}
#if defined(X64)
else if (loop && (map < region_start || map + *size > region_end)) {
/* Try again: probably a race. Hopefully our notion of "there's a free
* region big enough" matches the kernel's, else we'll loop forever
* (which we try to catch w/ a max iters count).
*/
munmap_syscall(map, *size);
map = NULL;
} else
break;
if (!loop)
break;
if (++iters > MAX_MMAP_LOOP_ITERS) {
ASSERT_NOT_REACHED();
map = NULL;
break;
}
addr = NULL; /* pick a new one */
}
#endif
return map;
}
bool
os_get_disk_free_space(/*IN*/ file_t file_handle,
/*OUT*/ uint64 *AvailableQuotaBytes /*OPTIONAL*/,
/*OUT*/ uint64 *TotalQuotaBytes /*OPTIONAL*/,
/*OUT*/ uint64 *TotalVolumeBytes /*OPTIONAL*/)
{
/* libc struct seems to match kernel's */
struct statfs stat;
ptr_int_t res = dynamorio_syscall(SYS_fstatfs, 2, file_handle, &stat);
if (res != 0) {
LOG(THREAD_GET, LOG_SYSCALLS, 2, "%s failed: " PIFX "\n", __func__, res);
return false;
}
LOG(GLOBAL, LOG_STATS, 3,
"os_get_disk_free_space: avail=" SZFMT ", free=" SZFMT ", bsize=" SZFMT "\n",
stat.f_bavail, stat.f_bfree, stat.f_bsize);
if (AvailableQuotaBytes != NULL)
*AvailableQuotaBytes = ((uint64)stat.f_bavail * stat.f_bsize);
/* no support for quotas */
if (TotalQuotaBytes != NULL)
*TotalQuotaBytes = ((uint64)stat.f_bavail * stat.f_bsize);
if (TotalVolumeBytes != NULL) /* despite name this is how much is free */
*TotalVolumeBytes = ((uint64)stat.f_bfree * stat.f_bsize);
return true;
}
#ifdef LINUX
static bool
symlink_is_self_exe(const char *path)
{
/* Look for "/proc/%d/exe" where %d exists in /proc/self/task/%d,
* or "/proc/self/exe". Rule out the exe link for another process
* (though it could also be under DR we have no simple way to obtain
* its actual app path).
*/
# define SELF_LEN_LEADER 6 /* "/proc/" */
# define SELF_LEN_TRAILER 4 /* "/exe" */
# define SELF_LEN_MAX 18
size_t len = strlen(path);
if (strcmp(path, "/proc/self/exe") == 0)
return true;
if (len < SELF_LEN_MAX && /* /proc/nnnnnn/exe */
strncmp(path, "/proc/", SELF_LEN_LEADER) == 0 &&
strncmp(path + len - SELF_LEN_TRAILER, "/exe", SELF_LEN_TRAILER) == 0) {
int pid;
if (sscanf(path + SELF_LEN_LEADER, "%d", &pid) == 1) {
char task[32];
snprintf(task, BUFFER_SIZE_ELEMENTS(task), "/proc/self/task/%d", pid);
NULL_TERMINATE_BUFFER(task);
return os_file_exists(task, true /*dir*/);
}
}
return false;
}
#endif
void
exit_process_syscall(long status)
{
/* We now assume SYS_exit_group is defined: not building on old machines,
* but will execute there. We try exit_group and if it fails we use exit.
*
* FIXME: if no exit_group, kill all other threads (==processes in same addr
* space) manually? Presumably we got here b/c at an unsafe point to do
* full exit? Or is that not true: what about dr_abort()?
*/
dynamorio_syscall(SYSNUM_EXIT_PROCESS, 1, status);
/* would assert that result is -ENOSYS but assert likely calls us => infinite loop */
exit_thread_syscall(status);
ASSERT_NOT_REACHED();
}
void
exit_thread_syscall(long status)
{
#ifdef MACOS
mach_port_t thread_port = dynamorio_mach_syscall(MACH_thread_self_trap, 0);
/* FIXME i#1403: on MacOS we fail to free the app's stack: we need to pass it to
* bsdthread_terminate.
*/
dynamorio_syscall(SYSNUM_EXIT_THREAD, 4, 0, 0, thread_port, 0);
#else
dynamorio_syscall(SYSNUM_EXIT_THREAD, 1, status);
#endif
}
/* FIXME: this one will not be easily internationalizable
yet it is easier to have a syslog based Unix implementation with real strings.
*/
void
os_syslog(syslog_event_type_t priority, uint message_id, uint substitutions_num,
va_list args)
{
int native_priority;
switch (priority) {
case SYSLOG_INFORMATION: native_priority = LOG_INFO; break;
case SYSLOG_WARNING: native_priority = LOG_WARNING; break;
case SYSLOG_CRITICAL: native_priority = LOG_CRIT; break;
case SYSLOG_ERROR: native_priority = LOG_ERR; break;
default: ASSERT_NOT_REACHED();
}
/* can amount to passing a format string (careful here) to vsyslog */
/* Never let user controlled data in the format string! */
ASSERT_NOT_IMPLEMENTED(false);
}
/* This is subject to races, but should only happen at init/attach when
* there should only be one live thread.
*/
static bool
safe_read_via_query(const void *base, size_t size, void *out_buf, size_t *bytes_read)
{
bool res = false;
size_t num_read = 0;
ASSERT(!fault_handling_initialized);
/* XXX: in today's init ordering, allmem will never be initialized when we come
* here, but we check it nevertheless to be general in case this routine is
* ever called at some later time
*/
if (IF_MEMQUERY_ELSE(false, memcache_initialized()))
res = is_readable_without_exception_internal(base, size, false /*use allmem*/);
else
res = is_readable_without_exception_query_os((void *)base, size);
if (res) {
memcpy(out_buf, base, size);
num_read = size;
}
if (bytes_read != NULL)
*bytes_read = num_read;
return res;
}
bool
safe_read_ex(const void *base, size_t size, void *out_buf, size_t *bytes_read)
{
STATS_INC(num_safe_reads);
/* XXX i#350: we'd like to always use safe_read_fast() and remove this extra
* call layer, but safe_read_fast() requires fault handling to be set up.
* We do set up an early signal handler in d_r_os_init(),
* but there is still be a window prior to that with no handler.
*/
if (!fault_handling_initialized) {
return safe_read_via_query(base, size, out_buf, bytes_read);
} else {
return safe_read_fast(base, size, out_buf, bytes_read);
}
}
bool
safe_read_if_fast(const void *base, size_t size, void *out_buf)
{
if (!fault_handling_initialized) {
memcpy(out_buf, base, size);
return true;
} else {
return safe_read_ex(base, size, out_buf, NULL);
}
}
/* FIXME - fold this together with safe_read_ex() (is a lot of places to update) */
bool
d_r_safe_read(const void *base, size_t size, void *out_buf)
{
return safe_read_ex(base, size, out_buf, NULL);
}
bool
safe_write_ex(void *base, size_t size, const void *in_buf, size_t *bytes_written)
{
return safe_write_try_except(base, size, in_buf, bytes_written);
}
/* is_readable_without_exception checks to see that all bytes with addresses
* from pc to pc+size-1 are readable and that reading from there won't
* generate an exception. if 'from_os' is true, check what the os thinks
* the prot bits are instead of using the all memory list.
*/
static bool
is_readable_without_exception_internal(const byte *pc, size_t size, bool query_os)
{
uint prot = MEMPROT_NONE;
byte *check_pc = (byte *)ALIGN_BACKWARD(pc, PAGE_SIZE);
if (size > ((byte *)POINTER_MAX - pc))
size = (byte *)POINTER_MAX - pc;
do {
bool rc = query_os ? get_memory_info_from_os(check_pc, NULL, NULL, &prot)
: get_memory_info(check_pc, NULL, NULL, &prot);
if (!rc || !TESTANY(MEMPROT_READ | MEMPROT_EXEC, prot))
return false;
if (POINTER_OVERFLOW_ON_ADD(check_pc, PAGE_SIZE))
break;
check_pc += PAGE_SIZE;
} while (check_pc < pc + size);
return true;
}
bool
is_readable_without_exception(const byte *pc, size_t size)
{
/* case 9745 / i#853: We've had problems with all_memory_areas not being
* accurate in the past. Parsing proc maps is too slow for some apps, so we
* use a runtime option.
*/
bool query_os = IF_MEMQUERY_ELSE(true, !DYNAMO_OPTION(use_all_memory_areas));
return is_readable_without_exception_internal(pc, size, query_os);
}
/* Identical to is_readable_without_exception except that the os is queried
* for info on the indicated region */
bool
is_readable_without_exception_query_os(byte *pc, size_t size)
{
return is_readable_without_exception_internal(pc, size, true);
}
bool
is_readable_without_exception_query_os_noblock(byte *pc, size_t size)
{
if (memquery_from_os_will_block())
return false;
return is_readable_without_exception_internal(pc, size, true);
}
bool
is_user_address(byte *pc)
{
/* FIXME: NYI */
/* note returning true will always skip the case 9022 logic on Linux */
return true;
}
/* change protections on memory region starting at pc of length length
* this does not update the all memory area info
*/
bool
os_set_protection(byte *pc, size_t length, uint prot /*MEMPROT_*/)
{
app_pc start_page = (app_pc)PAGE_START(pc);
uint num_bytes = ALIGN_FORWARD(length + (pc - start_page), PAGE_SIZE);
long res = 0;
uint flags = memprot_to_osprot(prot);
#ifdef IA32_ON_IA64
LOG(THREAD_GET, LOG_VMAREAS, 1, "protection change not supported on IA64\n");
LOG(THREAD_GET, LOG_VMAREAS, 1,
" attempted change_prot(" PFX ", " PIFX ", %s) => "
"mprotect(" PFX ", " PIFX ")==%d pages\n",
pc, length, memprot_string(prot), start_page, num_bytes, num_bytes / PAGE_SIZE);
#else
DOSTATS({
/* once on each side of prot, to get on right side of writability */
if (!TEST(PROT_WRITE, flags)) {
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, num_bytes / PAGE_SIZE);
}
});
res = mprotect_syscall((void *)start_page, num_bytes, flags);
if (res != 0)
return false;
LOG(THREAD_GET, LOG_VMAREAS, 3,
"change_prot(" PFX ", " PIFX ", %s) => "
"mprotect(" PFX ", " PIFX ", %d)==%d pages\n",
pc, length, memprot_string(prot), start_page, num_bytes, flags,
num_bytes / PAGE_SIZE);
#endif
DOSTATS({
/* once on each side of prot, to get on right side of writability */
if (TEST(PROT_WRITE, flags)) {
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, num_bytes / PAGE_SIZE);
}
});
return true;
}
/* change protections on memory region starting at pc of length length */
bool
set_protection(byte *pc, size_t length, uint prot /*MEMPROT_*/)
{
if (os_set_protection(pc, length, prot) == false)
return false;
#ifndef HAVE_MEMINFO_QUERY
else {
app_pc start_page = (app_pc)PAGE_START(pc);
uint num_bytes = ALIGN_FORWARD(length + (pc - start_page), PAGE_SIZE);
memcache_update_locked(start_page, start_page + num_bytes, prot,
-1 /*type unchanged*/, true /*exists*/);
}
#endif
return true;
}
/* change protections on memory region starting at pc of length length */
bool
change_protection(byte *pc, size_t length, bool writable)
{
if (writable)
return make_writable(pc, length);
else
make_unwritable(pc, length);
return true;
}
/* make pc's page writable */
bool
make_writable(byte *pc, size_t size)
{
long res;
app_pc start_page = (app_pc)PAGE_START(pc);
size_t prot_size = (size == 0) ? PAGE_SIZE : size;
uint prot = PROT_EXEC | PROT_READ | PROT_WRITE;
/* if can get current protection then keep old read/exec flags.
* this is crucial on modern linux kernels which refuse to mark stack +x.
*/
if (!is_in_dynamo_dll(pc) /*avoid allmem assert*/ &&
#ifdef STATIC_LIBRARY
/* FIXME i#975: is_in_dynamo_dll() is always false for STATIC_LIBRARY,
* but we can't call get_memory_info() until allmem is initialized. Our
* uses before then are for patching x86.asm, which is OK.
*/
IF_NO_MEMQUERY(memcache_initialized() &&)
#endif
get_memory_info(pc, NULL, NULL, &prot))
prot |= PROT_WRITE;
ASSERT(start_page == pc && ALIGN_FORWARD(size, PAGE_SIZE) == size);
#ifdef IA32_ON_IA64
LOG(THREAD_GET, LOG_VMAREAS, 1, "protection change not supported on IA64\n");
LOG(THREAD_GET, LOG_VMAREAS, 3,
"attempted make_writable: pc " PFX " -> " PFX "-" PFX "\n", pc, start_page,
start_page + prot_size);
#else
res = mprotect_syscall((void *)start_page, prot_size, prot);
LOG(THREAD_GET, LOG_VMAREAS, 3, "make_writable: pc " PFX " -> " PFX "-" PFX " %d\n",
pc, start_page, start_page + prot_size, res);
ASSERT(res == 0);
if (res != 0)
return false;
#endif
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, size / PAGE_SIZE);
#ifndef HAVE_MEMINFO_QUERY
/* update all_memory_areas list with the protection change */
if (memcache_initialized()) {
memcache_update_locked(start_page, start_page + prot_size,
osprot_to_memprot(prot), -1 /*type unchanged*/,
true /*exists*/);
}
#endif
return true;
}
/* like make_writable but adds COW */
bool
make_copy_on_writable(byte *pc, size_t size)
{
/* FIXME: for current usage this should be fine */
return make_writable(pc, size);
}
/* make pc's page unwritable */
void
make_unwritable(byte *pc, size_t size)
{
long res;
app_pc start_page = (app_pc)PAGE_START(pc);
size_t prot_size = (size == 0) ? PAGE_SIZE : size;
uint prot = PROT_EXEC | PROT_READ;
/* if can get current protection then keep old read/exec flags.
* this is crucial on modern linux kernels which refuse to mark stack +x.
*/
if (!is_in_dynamo_dll(pc) /*avoid allmem assert*/ &&
#ifdef STATIC_LIBRARY
/* FIXME i#975: is_in_dynamo_dll() is always false for STATIC_LIBRARY,
* but we can't call get_memory_info() until allmem is initialized. Our
* uses before then are for patching x86.asm, which is OK.
*/
IF_NO_MEMQUERY(memcache_initialized() &&)
#endif
get_memory_info(pc, NULL, NULL, &prot))
prot &= ~PROT_WRITE;
ASSERT(start_page == pc && ALIGN_FORWARD(size, PAGE_SIZE) == size);
/* inc stats before making unwritable, in case messing w/ data segment */
STATS_INC(protection_change_calls);
STATS_ADD(protection_change_pages, size / PAGE_SIZE);
#ifdef IA32_ON_IA64
LOG(THREAD_GET, LOG_VMAREAS, 1, "protection change not supported on IA64\n");
LOG(THREAD_GET, LOG_VMAREAS, 3,
"attempted make_writable: pc " PFX " -> " PFX "-" PFX "\n", pc, start_page,
start_page + prot_size);
#else
res = mprotect_syscall((void *)start_page, prot_size, prot);
LOG(THREAD_GET, LOG_VMAREAS, 3, "make_unwritable: pc " PFX " -> " PFX "-" PFX "\n",
pc, start_page, start_page + prot_size);
ASSERT(res == 0);
# ifndef HAVE_MEMINFO_QUERY
/* update all_memory_areas list with the protection change */
if (memcache_initialized()) {
memcache_update_locked(start_page, start_page + prot_size,
osprot_to_memprot(prot), -1 /*type unchanged*/,
false /*!exists*/);
}
# endif
#endif
}
/****************************************************************************/
/* SYSTEM CALLS */
/* SYS_ defines are in /usr/include/bits/syscall.h
* numbers used by libc are in /usr/include/asm/unistd.h
* kernel defines are in /usr/src/linux-2.4/include/asm-i386/unistd.h
* kernel function names are in /usr/src/linux/arch/i386/kernel/entry.S
*
* For now, we've copied the SYS/NR defines from syscall.h and unistd.h
* and put them in our own local syscall.h.
*/
/* num_raw should be the xax register value.
* For a live system call, dcontext_live should be passed (for examining
* the dcontext->last_exit and exit_reason flags); otherwise, gateway should
* be passed.
*/
int
os_normalized_sysnum(int num_raw, instr_t *gateway, dcontext_t *dcontext)
{
#ifdef MACOS
/* The x64 encoding indicates the syscall type in the top 8 bits.
* We drop the 0x2000000 for BSD so we can use the SYS_ enum constants.
* That leaves 0x1000000 for Mach and 0x3000000 for Machdep.
* On 32-bit, a different encoding is used: we transform that
* to the x64 encoding minus BSD.
*/
int interrupt = 0;
int num = 0;
if (gateway != NULL) {
if (instr_is_interrupt(gateway))
interrupt = instr_get_interrupt_number(gateway);
} else {
ASSERT(dcontext != NULL);
if (TEST(LINK_SPECIAL_EXIT, dcontext->last_exit->flags)) {
if (dcontext->upcontext.upcontext.exit_reason ==
EXIT_REASON_NI_SYSCALL_INT_0x81)
interrupt = 0x81;
else {
ASSERT(dcontext->upcontext.upcontext.exit_reason ==
EXIT_REASON_NI_SYSCALL_INT_0x82);
interrupt = 0x82;
}
}
}
# ifdef X64
if (num_raw >> 24 == 0x2)
return (int)(num_raw & 0xffffff); /* Drop BSD bit */
else
num = (int)num_raw; /* Keep Mach and Machdep bits */
# else
if ((ptr_int_t)num_raw < 0) /* Mach syscall */
return (SYSCALL_NUM_MARKER_MACH | -(int)num_raw);
else {
/* Bottom 16 bits are the number, top are arg size. */
num = (int)(num_raw & 0xffff);
}
# endif
if (interrupt == 0x81)
num |= SYSCALL_NUM_MARKER_MACH;
else if (interrupt == 0x82)
num |= SYSCALL_NUM_MARKER_MACHDEP;
return num;
#else
return num_raw;
#endif
}
static bool
ignorable_system_call_normalized(int num)
{
switch (num) {
#if defined(SYS_exit_group)
case SYS_exit_group:
#endif
case SYS_exit:
#ifdef MACOS
case SYS_bsdthread_terminate:
#endif
#ifdef LINUX
case SYS_brk:
# ifdef SYS_uselib
case SYS_uselib:
# endif
#endif
#if defined(X64) || !defined(ARM)
case SYS_mmap:
#endif
#if !defined(X64) && !defined(MACOS)
case SYS_mmap2:
#endif
case SYS_munmap:
#ifdef LINUX
case SYS_mremap:
#endif
case SYS_mprotect:
#ifdef ANDROID
case SYS_prctl:
#endif
case SYS_execve:
#ifdef LINUX
case SYS_clone:
#elif defined(MACOS)
case SYS_bsdthread_create:
case SYS_posix_spawn:
#endif
#ifdef SYS_fork
case SYS_fork:
#endif
#ifdef SYS_vfork
case SYS_vfork:
#endif
case SYS_kill:
#if defined(SYS_tkill)
case SYS_tkill:
#endif
#if defined(SYS_tgkill)
case SYS_tgkill:
#endif
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_signal:
#endif
#ifdef MACOS
case SYS_sigsuspend_nocancel:
#endif
#if !defined(X64) || defined(MACOS)
case SYS_sigaction:
case SYS_sigsuspend:
case SYS_sigpending:
case SYS_sigreturn:
case SYS_sigprocmask:
#endif
#ifdef LINUX
case SYS_rt_sigreturn:
case SYS_rt_sigaction:
case SYS_rt_sigprocmask:
case SYS_rt_sigpending:
case SYS_rt_sigtimedwait:
case SYS_rt_sigqueueinfo:
case SYS_rt_sigsuspend:
# ifdef SYS_signalfd
case SYS_signalfd:
# endif
case SYS_signalfd4:
#endif
case SYS_sigaltstack:
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_sgetmask:
case SYS_ssetmask:
#endif
case SYS_setitimer:
case SYS_getitimer:
#ifdef MACOS
case SYS_close_nocancel:
#endif
case SYS_close:
#ifdef SYS_dup2
case SYS_dup2:
#endif
#ifdef LINUX
case SYS_dup3:
#endif
#ifdef MACOS
case SYS_fcntl_nocancel:
#endif
case SYS_fcntl:
#if defined(X64) || !defined(ARM)
case SYS_getrlimit:
#endif
#if defined(LINUX) && !defined(X64)
case SYS_ugetrlimit:
#endif
case SYS_setrlimit:
#ifdef LINUX
case SYS_prlimit64:
#endif
#if defined(LINUX) && defined(X86)
/* i#784: app may have behavior relying on SIGALRM */
case SYS_alarm:
#endif
/* i#107: syscall might change/query app's seg memory
* need stop app from clobbering our GDT slot.
*/
#if defined(LINUX) && defined(X86) && defined(X64)
case SYS_arch_prctl:
#endif
#if defined(LINUX) && defined(X86)
case SYS_set_thread_area:
case SYS_get_thread_area:
/* FIXME: we might add SYS_modify_ldt later. */
#endif
#if defined(LINUX) && defined(ARM)
/* syscall changes app's thread register */
case SYS_set_tls:
case SYS_cacheflush:
#endif
#if defined(LINUX)
/* syscalls change procsigmask */
case SYS_pselect6:
case SYS_ppoll:
case SYS_epoll_pwait:
#endif
return false;
#ifdef LINUX
# ifdef SYS_readlink
case SYS_readlink:
# endif
case SYS_readlinkat: return !DYNAMO_OPTION(early_inject);
#endif
default:
#ifdef VMX86_SERVER
if (is_vmkuw_sysnum(num))
return vmkuw_ignorable_system_call(num);
#endif
return true;
}
}
bool
ignorable_system_call(int num_raw, instr_t *gateway, dcontext_t *dcontext_live)
{
return ignorable_system_call_normalized(
os_normalized_sysnum(num_raw, gateway, dcontext_live));
}
typedef struct {
unsigned long addr;
unsigned long len;
unsigned long prot;
unsigned long flags;
unsigned long fd;
unsigned long offset;
} mmap_arg_struct_t;
static inline reg_t *
sys_param_addr(dcontext_t *dcontext, int num)
{
/* we force-inline get_mcontext() and so don't take it as a param */
priv_mcontext_t *mc = get_mcontext(dcontext);
#if defined(X86) && defined(X64)
switch (num) {
case 0: return &mc->xdi;
case 1: return &mc->xsi;
case 2: return &mc->xdx;
case 3: return &mc->r10; /* since rcx holds retaddr for syscall instr */
case 4: return &mc->r8;
case 5: return &mc->r9;
default: CLIENT_ASSERT(false, "invalid system call parameter number");
}
#else
# ifdef MACOS
/* XXX: if we don't end up using dcontext->sys_was_int here, we could
* make that field Linux-only.
*/
/* For 32-bit, the args are passed on the stack, above a retaddr slot
* (regardless of whether using a sysenter or int gateway).
*/
return ((reg_t *)mc->esp) + 1 /*retaddr*/ + num;
# endif
/* even for vsyscall where ecx (syscall) or esp (sysenter) are saved into
* ebp, the original parameter registers are not yet changed pre-syscall,
* except for ebp, which is pushed on the stack:
* 0xffffe400 55 push %ebp %esp -> %esp (%esp)
* 0xffffe401 89 cd mov %ecx -> %ebp
* 0xffffe403 0f 05 syscall -> %ecx
*
* 0xffffe400 51 push %ecx %esp -> %esp (%esp)
* 0xffffe401 52 push %edx %esp -> %esp (%esp)
* 0xffffe402 55 push %ebp %esp -> %esp (%esp)
* 0xffffe403 89 e5 mov %esp -> %ebp
* 0xffffe405 0f 34 sysenter -> %esp
*/
switch (num) {
case 0: return &mc->IF_X86_ELSE(xbx, r0);
case 1: return &mc->IF_X86_ELSE(xcx, r1);
case 2: return &mc->IF_X86_ELSE(xdx, r2);
case 3: return &mc->IF_X86_ELSE(xsi, r3);
case 4: return &mc->IF_X86_ELSE(xdi, r4);
/* FIXME: do a safe_read: but what about performance?
* See the #if 0 below, as well. */
case 5:
return IF_X86_ELSE((dcontext->sys_was_int ? &mc->xbp : ((reg_t *)mc->xsp)),
&mc->r5);
# ifdef ARM
/* AArch32 supposedly has 7 args in some cases. */
case 6: return &mc->r6;
# endif
default: CLIENT_ASSERT(false, "invalid system call parameter number");
}
#endif
return 0;
}
static inline reg_t
sys_param(dcontext_t *dcontext, int num)
{
return *sys_param_addr(dcontext, num);
}
void
set_syscall_param(dcontext_t *dcontext, int param_num, reg_t new_value)
{
*sys_param_addr(dcontext, param_num) = new_value;
}
static inline bool
syscall_successful(priv_mcontext_t *mc, int normalized_sysnum)
{
#ifdef MACOS
if (TEST(SYSCALL_NUM_MARKER_MACH, normalized_sysnum)) {
/* XXX: Mach syscalls vary (for some KERN_SUCCESS=0 is success,
* for others that return mach_port_t 0 is failure (I think?).
* We defer to drsyscall.
*/
return ((ptr_int_t)MCXT_SYSCALL_RES(mc) >= 0);
} else
return !TEST(EFLAGS_CF, mc->eflags);
#else
if (normalized_sysnum == IF_X64_ELSE(SYS_mmap, SYS_mmap2) ||
# if !defined(ARM) && !defined(X64)
normalized_sysnum == SYS_mmap ||
# endif
normalized_sysnum == SYS_mremap)
return mmap_syscall_succeeded((byte *)MCXT_SYSCALL_RES(mc));
return ((ptr_int_t)MCXT_SYSCALL_RES(mc) >= 0);
#endif
}
/* For non-Mac, this does nothing to indicate "success": you can pass -errno.
* For Mac, this clears CF and just sets xax. To return a 64-bit value in
* 32-bit mode, the caller must explicitly set xdx as well (we don't always
* do so b/c syscalls that just return 32-bit values do not touch xdx).
*/
static inline void
set_success_return_val(dcontext_t *dcontext, reg_t val)
{
/* since always coming from d_r_dispatch now, only need to set mcontext */
priv_mcontext_t *mc = get_mcontext(dcontext);
#ifdef MACOS
/* On MacOS, success is determined by CF, except for Mach syscalls, but
* there it doesn't hurt to set CF.
*/
mc->eflags &= ~(EFLAGS_CF);
#endif
MCXT_SYSCALL_RES(mc) = val;
}
/* Always pass a positive value for errno */
static inline void
set_failure_return_val(dcontext_t *dcontext, uint errno_val)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
#ifdef MACOS
/* On MacOS, success is determined by CF, and errno is positive */
mc->eflags |= EFLAGS_CF;
MCXT_SYSCALL_RES(mc) = errno_val;
#else
MCXT_SYSCALL_RES(mc) = -(int)errno_val;
#endif
}
#ifdef CLIENT_INTERFACE
DR_API
reg_t
dr_syscall_get_param(void *drcontext, int param_num)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall,
"dr_syscall_get_param() can only be called from pre-syscall event");
return sys_param(dcontext, param_num);
}
DR_API
void
dr_syscall_set_param(void *drcontext, int param_num, reg_t new_value)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_param() can only be called from a syscall event");
*sys_param_addr(dcontext, param_num) = new_value;
}
DR_API
reg_t
dr_syscall_get_result(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(dcontext->client_data->in_post_syscall,
"dr_syscall_get_param() can only be called from post-syscall event");
return MCXT_SYSCALL_RES(get_mcontext(dcontext));
}
DR_API
bool
dr_syscall_get_result_ex(void *drcontext, dr_syscall_result_info_t *info INOUT)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
priv_mcontext_t *mc = get_mcontext(dcontext);
CLIENT_ASSERT(dcontext->client_data->in_post_syscall,
"only call dr_syscall_get_param_ex() from post-syscall event");
CLIENT_ASSERT(info != NULL, "invalid parameter");
CLIENT_ASSERT(info->size == sizeof(*info), "invalid dr_syscall_result_info_t size");
if (info->size != sizeof(*info))
return false;
info->value = MCXT_SYSCALL_RES(mc);
info->succeeded = syscall_successful(mc, dcontext->sys_num);
if (info->use_high) {
/* MacOS has some 32-bit syscalls that return 64-bit values in
* xdx:xax, but the other syscalls don't clear xdx, so we can't easily
* return a 64-bit value all the time.
*/
IF_X86_ELSE({ info->high = mc->xdx; }, { ASSERT_NOT_REACHED(); });
}
if (info->use_errno) {
if (info->succeeded)
info->errno_value = 0;
else {
info->errno_value = (uint)IF_LINUX(-(int)) MCXT_SYSCALL_RES(mc);
}
}
return true;
}
DR_API
void
dr_syscall_set_result(void *drcontext, reg_t value)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_result() can only be called from a syscall event");
/* For non-Mac, the caller can still pass -errno and this will work */
set_success_return_val(dcontext, value);
}
DR_API
bool
dr_syscall_set_result_ex(void *drcontext, dr_syscall_result_info_t *info)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
priv_mcontext_t *mc = get_mcontext(dcontext);
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_result() can only be called from a syscall event");
CLIENT_ASSERT(info->size == sizeof(*info), "invalid dr_syscall_result_info_t size");
if (info->size != sizeof(*info))
return false;
if (info->use_errno) {
if (info->succeeded) {
/* a weird case but we let the user combine these */
set_success_return_val(dcontext, info->errno_value);
} else
set_failure_return_val(dcontext, info->errno_value);
} else {
if (info->succeeded)
set_success_return_val(dcontext, info->value);
else {
/* use this to set CF, even though it might negate the value */
set_failure_return_val(dcontext, (uint)info->value);
/* now set the value, overriding set_failure_return_val() */
MCXT_SYSCALL_RES(mc) = info->value;
}
if (info->use_high) {
/* MacOS has some 32-bit syscalls that return 64-bit values in
* xdx:xax.
*/
IF_X86_ELSE({ mc->xdx = info->high; }, { ASSERT_NOT_REACHED(); });
}
}
return true;
}
DR_API
void
dr_syscall_set_sysnum(void *drcontext, int new_num)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
priv_mcontext_t *mc = get_mcontext(dcontext);
CLIENT_ASSERT(dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall,
"dr_syscall_set_sysnum() can only be called from a syscall event");
MCXT_SYSNUM_REG(mc) = new_num;
}
DR_API
void
dr_syscall_invoke_another(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(dcontext->client_data->in_post_syscall,
"dr_syscall_invoke_another() can only be called from post-syscall "
"event");
LOG(THREAD, LOG_SYSCALLS, 2, "invoking additional syscall on client request\n");
dcontext->client_data->invoke_another_syscall = true;
# ifdef X86
if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
priv_mcontext_t *mc = get_mcontext(dcontext);
/* restore xbp to xsp */
mc->xbp = mc->xsp;
}
# endif /* X86 */
/* for x64 we don't need to copy xcx into r10 b/c we use r10 as our param */
}
#endif /* CLIENT_INTERFACE */
static inline bool
is_thread_create_syscall_helper(ptr_uint_t sysnum, ptr_uint_t flags)
{
#ifdef MACOS
/* XXX i#1403: we need earlier injection to intercept
* bsdthread_register in order to capture workqueue threads.
*/
return (sysnum == SYS_bsdthread_create || sysnum == SYS_vfork);
#else
# ifdef SYS_vfork
if (sysnum == SYS_vfork)
return true;
# endif
# ifdef LINUX
if (sysnum == SYS_clone && TEST(CLONE_VM, flags))
return true;
# endif
return false;
#endif
}
bool
is_thread_create_syscall(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
return is_thread_create_syscall_helper(MCXT_SYSNUM_REG(mc), sys_param(dcontext, 0));
}
bool
was_thread_create_syscall(dcontext_t *dcontext)
{
return is_thread_create_syscall_helper(dcontext->sys_num,
/* flags in param0 */
dcontext->sys_param0);
}
bool
is_sigreturn_syscall_number(int sysnum)
{
#ifdef MACOS
return sysnum == SYS_sigreturn;
#else
return (IF_NOT_X64(sysnum == SYS_sigreturn ||) sysnum == SYS_rt_sigreturn);
#endif
}
bool
is_sigreturn_syscall(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
return is_sigreturn_syscall_number(MCXT_SYSNUM_REG(mc));
}
bool
was_sigreturn_syscall(dcontext_t *dcontext)
{
return is_sigreturn_syscall_number(dcontext->sys_num);
}
/* process a signal this process/thread is sending to itself */
static void
handle_self_signal(dcontext_t *dcontext, uint sig)
{
/* FIXME PR 297903: watch for all DEFAULT_TERMINATE signals,
* and for any thread in the group, not just self.
*
* FIXME PR 297033: watch for SIGSTOP and SIGCONT.
*
* With -intercept_all_signals, we only need to watch for SIGKILL
* and SIGSTOP here, and we avoid the FIXMEs below. If it's fine
* for DR not to clean up on a SIGKILL, then SIGSTOP is all that's
* left (at least once we have PR 297033 and are intercepting the
* various STOP variations and CONT).
*/
if (sig == SIGABRT && !DYNAMO_OPTION(intercept_all_signals)) {
LOG(GLOBAL, LOG_TOP | LOG_SYSCALLS, 1,
"thread " TIDFMT " sending itself a SIGABRT\n", d_r_get_thread_id());
KSTOP(num_exits_dir_syscall);
/* FIXME: need to check whether app has a handler for SIGABRT! */
/* FIXME PR 211180/6723: this will do SYS_exit rather than the SIGABRT.
* Should do set_default_signal_action(SIGABRT) (and set a flag so
* no races w/ another thread re-installing?) and then SYS_kill.
*/
block_cleanup_and_terminate(dcontext, SYSNUM_EXIT_THREAD, -1, 0,
(is_last_app_thread() && !dynamo_exited),
IF_MACOS_ELSE(dcontext->thread_port, 0), 0);
ASSERT_NOT_REACHED();
}
}
/***************************************************************************
* EXECVE
*/
/* when adding here, also add to the switch in handle_execve if necessary */
enum {
ENV_PROP_RUNUNDER,
ENV_PROP_OPTIONS,
ENV_PROP_EXECVE_LOGDIR,
ENV_PROP_EXE_PATH,
ENV_PROP_CONFIGDIR,
};
static const char *const env_to_propagate[] = {
/* these must line up with the enum */
DYNAMORIO_VAR_RUNUNDER,
DYNAMORIO_VAR_OPTIONS,
/* DYNAMORIO_VAR_EXECVE_LOGDIR is different from DYNAMORIO_VAR_LOGDIR:
* - DYNAMORIO_VAR_LOGDIR: a parent dir inside which a new dir will be created;
* - DYNAMORIO_VAR_EXECVE_LOGDIR: the same subdir with the pre-execve process.
* Xref comment in create_log_dir about their precedence.
*/
DYNAMORIO_VAR_EXECVE_LOGDIR,
/* i#909: needed for early injection */
DYNAMORIO_VAR_EXE_PATH,
/* these will only be propagated if they exist */
DYNAMORIO_VAR_CONFIGDIR,
};
#define NUM_ENV_TO_PROPAGATE (sizeof(env_to_propagate) / sizeof(env_to_propagate[0]))
/* Called at pre-SYS_execve to append DR vars in the target process env vars list.
* For late injection via libdrpreload, we call this for *all children, because
* even if -no_follow_children is specified, a whitelist will still ask for takeover
* and it's libdrpreload who checks the whitelist.
* For -early, however, we check the config ahead of time and only call this routine
* if we in fact want to inject.
* XXX i#1679: these parent vs child differences bring up corner cases of which
* config dir takes precedence (if the child clears the HOME env var, e.g.).
*/
static void
add_dr_env_vars(dcontext_t *dcontext, char *inject_library_path, const char *app_path)
{
char **envp = (char **)sys_param(dcontext, 2);
int idx, j, preload = -1, ldpath = -1;
int num_old, num_new, sz;
bool need_var[NUM_ENV_TO_PROPAGATE];
int prop_idx[NUM_ENV_TO_PROPAGATE];
bool ldpath_us = false, preload_us = false;
char **new_envp, *var, *old;
/* check if any var needs to be propagated */
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
prop_idx[j] = -1;
if (get_config_val(env_to_propagate[j]) == NULL)
need_var[j] = false;
else
need_var[j] = true;
}
/* Special handling for DYNAMORIO_VAR_EXECVE_LOGDIR:
* we only need it if follow_children is true and PROCESS_DIR exists.
*/
if (DYNAMO_OPTION(follow_children) && get_log_dir(PROCESS_DIR, NULL, NULL))
need_var[ENV_PROP_EXECVE_LOGDIR] = true;
else
need_var[ENV_PROP_EXECVE_LOGDIR] = false;
if (DYNAMO_OPTION(early_inject))
need_var[ENV_PROP_EXE_PATH] = true;
/* iterate the env in target process */
if (envp == NULL) {
LOG(THREAD, LOG_SYSCALLS, 3, "\tenv is NULL\n");
idx = 0;
} else {
for (idx = 0; envp[idx] != NULL; idx++) {
/* execve env vars should never be set here */
ASSERT(strstr(envp[idx], DYNAMORIO_VAR_EXECVE) != envp[idx]);
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
if (strstr(envp[idx], env_to_propagate[j]) == envp[idx]) {
/* If conflict between env and cfg, we assume those env vars
* are for DR usage only, and replace them with cfg value.
*/
prop_idx[j] = idx; /* remember the index for replacing later */
break;
}
}
if (!DYNAMO_OPTION(early_inject) &&
strstr(envp[idx], "LD_LIBRARY_PATH=") == envp[idx]) {
ldpath = idx;
if (strstr(envp[idx], inject_library_path) != NULL)
ldpath_us = true;
}
if (!DYNAMO_OPTION(early_inject) &&
strstr(envp[idx], "LD_PRELOAD=") == envp[idx]) {
preload = idx;
if (strstr(envp[idx], DYNAMORIO_PRELOAD_NAME) != NULL &&
strstr(envp[idx], get_dynamorio_library_path()) != NULL) {
preload_us = true;
}
}
LOG(THREAD, LOG_SYSCALLS, 3, "\tenv %d: %s\n", idx, envp[idx]);
}
}
/* We want to add new env vars, so we create a new envp
* array. We have to deallocate them and restore the old
* envp if execve fails; if execve succeeds, the address
* space is reset so we don't need to do anything.
*/
num_old = idx;
/* how many new env vars we need add */
num_new = 2 + /* execve indicator var plus final NULL */
(DYNAMO_OPTION(early_inject)
? 0
: (((preload < 0) ? 1 : 0) + ((ldpath < 0) ? 1 : 0)));
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
if ((DYNAMO_OPTION(follow_children) || j == ENV_PROP_EXE_PATH) && need_var[j] &&
prop_idx[j] < 0)
num_new++;
}
/* setup new envp */
new_envp =
heap_alloc(dcontext, sizeof(char *) * (num_old + num_new) HEAPACCT(ACCT_OTHER));
/* copy old envp */
memcpy(new_envp, envp, sizeof(char *) * num_old);
/* change/add preload and ldpath if necessary */
if (!DYNAMO_OPTION(early_inject) && !preload_us) {
int idx_preload;
LOG(THREAD, LOG_SYSCALLS, 1,
"WARNING: execve env does NOT preload DynamoRIO, forcing it!\n");
if (preload >= 0) {
/* replace the existing preload */
const char *dr_lib_path = get_dynamorio_library_path();
sz = strlen(envp[preload]) + strlen(DYNAMORIO_PRELOAD_NAME) +
strlen(dr_lib_path) + 3;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
old = envp[preload] + strlen("LD_PRELOAD=");
snprintf(var, sz, "LD_PRELOAD=%s %s %s", DYNAMORIO_PRELOAD_NAME, dr_lib_path,
old);
idx_preload = preload;
} else {
/* add new preload */
const char *dr_lib_path = get_dynamorio_library_path();
sz = strlen("LD_PRELOAD=") + strlen(DYNAMORIO_PRELOAD_NAME) +
strlen(dr_lib_path) + 2;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "LD_PRELOAD=%s %s", DYNAMORIO_PRELOAD_NAME, dr_lib_path);
idx_preload = idx++;
}
*(var + sz - 1) = '\0'; /* null terminate */
new_envp[idx_preload] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", idx_preload,
new_envp[idx_preload]);
}
if (!DYNAMO_OPTION(early_inject) && !ldpath_us) {
int idx_ldpath;
if (ldpath >= 0) {
sz = strlen(envp[ldpath]) + strlen(inject_library_path) + 2;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
old = envp[ldpath] + strlen("LD_LIBRARY_PATH=");
snprintf(var, sz, "LD_LIBRARY_PATH=%s:%s", inject_library_path, old);
idx_ldpath = ldpath;
} else {
sz = strlen("LD_LIBRARY_PATH=") + strlen(inject_library_path) + 1;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "LD_LIBRARY_PATH=%s", inject_library_path);
idx_ldpath = idx++;
}
*(var + sz - 1) = '\0'; /* null terminate */
new_envp[idx_ldpath] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", idx_ldpath,
new_envp[idx_ldpath]);
}
/* propagating DR env vars */
for (j = 0; j < NUM_ENV_TO_PROPAGATE; j++) {
const char *val = "";
if (!need_var[j])
continue;
if (!DYNAMO_OPTION(follow_children) && j != ENV_PROP_EXE_PATH)
continue;
switch (j) {
case ENV_PROP_RUNUNDER:
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_RUNUNDER) == 0);
/* Must pass RUNUNDER_ALL to get child injected if has no app config.
* If rununder var is already set we assume it's set to 1.
*/
ASSERT((RUNUNDER_ON | RUNUNDER_ALL) == 0x3); /* else, update "3" */
val = "3";
break;
case ENV_PROP_OPTIONS:
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_OPTIONS) == 0);
val = option_string;
break;
case ENV_PROP_EXECVE_LOGDIR:
/* we use PROCESS_DIR for DYNAMORIO_VAR_EXECVE_LOGDIR */
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_EXECVE_LOGDIR) == 0);
ASSERT(get_log_dir(PROCESS_DIR, NULL, NULL));
break;
case ENV_PROP_EXE_PATH:
ASSERT(strcmp(env_to_propagate[j], DYNAMORIO_VAR_EXE_PATH) == 0);
val = app_path;
break;
default:
val = getenv(env_to_propagate[j]);
if (val == NULL)
val = "";
break;
}
if (j == ENV_PROP_EXECVE_LOGDIR) {
uint logdir_length;
get_log_dir(PROCESS_DIR, NULL, &logdir_length);
/* logdir_length includes the terminating NULL */
sz = strlen(DYNAMORIO_VAR_EXECVE_LOGDIR) + logdir_length + 1 /* '=' */;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "%s=", DYNAMORIO_VAR_EXECVE_LOGDIR);
get_log_dir(PROCESS_DIR, var + strlen(var), &logdir_length);
} else {
sz = strlen(env_to_propagate[j]) + strlen(val) + 2 /* '=' + null */;
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
snprintf(var, sz, "%s=%s", env_to_propagate[j], val);
}
*(var + sz - 1) = '\0'; /* null terminate */
prop_idx[j] = (prop_idx[j] >= 0) ? prop_idx[j] : idx++;
new_envp[prop_idx[j]] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", prop_idx[j],
new_envp[prop_idx[j]]);
}
if (!DYNAMO_OPTION(follow_children) && !DYNAMO_OPTION(early_inject)) {
if (prop_idx[ENV_PROP_RUNUNDER] >= 0) {
/* disable auto-following of this execve, yet still allow preload
* on other side to inject if config file exists.
* kind of hacky mangle here:
*/
ASSERT(!need_var[ENV_PROP_RUNUNDER]);
ASSERT(new_envp[prop_idx[ENV_PROP_RUNUNDER]][0] == 'D');
new_envp[prop_idx[ENV_PROP_RUNUNDER]][0] = 'X';
}
}
sz = strlen(DYNAMORIO_VAR_EXECVE) + 4;
/* we always pass this var to indicate "post-execve" */
var = heap_alloc(dcontext, sizeof(char) * sz HEAPACCT(ACCT_OTHER));
/* PR 458917: we overload this to also pass our gdt index */
ASSERT(os_tls_get_gdt_index(dcontext) < 100 &&
os_tls_get_gdt_index(dcontext) >= -1); /* only 2 chars allocated */
snprintf(var, sz, "%s=%02d", DYNAMORIO_VAR_EXECVE, os_tls_get_gdt_index(dcontext));
*(var + sz - 1) = '\0'; /* null terminate */
new_envp[idx++] = var;
LOG(THREAD, LOG_SYSCALLS, 2, "\tnew env %d: %s\n", idx - 1, new_envp[idx - 1]);
/* must end with NULL */
new_envp[idx++] = NULL;
ASSERT((num_new + num_old) == idx);
/* update syscall param */
*sys_param_addr(dcontext, 2) = (reg_t)new_envp; /* OUT */
/* store for reset in case execve fails, and for cleanup if
* this is a vfork thread
*/
dcontext->sys_param0 = (reg_t)envp;
dcontext->sys_param1 = (reg_t)new_envp;
}
static ssize_t
script_file_reader(const char *pathname, void *buf, size_t count)
{
/* FIXME i#2090: Check file is executable. */
file_t file = os_open(pathname, OS_OPEN_READ);
size_t len;
if (file == INVALID_FILE)
return -1;
len = os_read(file, buf, count);
os_close(file);
return len;
}
/* For early injection, recognise when the executable is a script ("#!") and
* modify the syscall parameters to invoke a script interpreter instead. In
* this case we will have allocated memory here but we expect the caller to
* do a non-failing execve of libdynamorio.so and therefore not to have to
* free the memory. That is one reason for checking that the (final) script
* interpreter really is an executable binary.
* We recognise one error case here and return the non-zero error code (ELOOP)
* but in other cases we leave it up to the caller to detect the error, which
* it may do by attempting to exec the path natively, expecting this to fail,
* though there is the obvious danger that the file might have been modified
* just before the exec.
* We do not, and cannot easily, handle a file that is executable but not
* readable. Currently such files will be executed without DynamoRIO though
* in some situations it would be more helpful to stop with an error.
*
* XXX: There is a minor transparency bug with misformed binaries. For example,
* execve can return EINVAL if the ELF executable has more than one PT_INTERP
* segment but we do not check this and so under DynamoRIO the error would be
* detected only after the exec, if we are following the child.
*
* FIXME i#2091: There is a memory leak if a script is recognised, and it is
* later decided not to inject (see where should_inject is set), and the exec
* fails, because in this case there is no mechanism for freeing the memory
* allocated in this function. This function should return sufficient information
* for the caller to free the memory, which it can do so before the exec if it
* reverts to the original syscall arguments and execs the script.
*/
static int
handle_execve_script(dcontext_t *dcontext)
{
char *fname = (char *)sys_param(dcontext, 0);
char **orig_argv = (char **)sys_param(dcontext, 1);
script_interpreter_t *script;
int ret = 0;
script = global_heap_alloc(sizeof(*script) HEAPACCT(ACCT_OTHER));
if (!find_script_interpreter(script, fname, script_file_reader))
goto free_and_return;
if (script->argc == 0) {
ret = ELOOP;
goto free_and_return;
}
/* Check that the final interpreter is an executable binary. */
{
file_t file = os_open(script->argv[0], OS_OPEN_READ);
bool is64;
if (file == INVALID_FILE)
goto free_and_return;
if (!module_file_is_module64(file, &is64, NULL)) {
os_close(file);
goto free_and_return;
}
}
{
size_t i, orig_argc = 0;
char **new_argv;
/* Concatenate new arguments and original arguments. */
while (orig_argv[orig_argc] != NULL)
++orig_argc;
if (orig_argc == 0)
orig_argc = 1;
new_argv = global_heap_alloc((script->argc + orig_argc + 1) *
sizeof(char *) HEAPACCT(ACCT_OTHER));
for (i = 0; i < script->argc; i++)
new_argv[i] = script->argv[i];
new_argv[script->argc] = fname; /* replaces orig_argv[0] */
for (i = 1; i < orig_argc; i++)
new_argv[script->argc + i] = orig_argv[i];
new_argv[script->argc + orig_argc] = NULL;
/* Modify syscall parameters. */
*sys_param_addr(dcontext, 0) = (reg_t)new_argv[0];
*sys_param_addr(dcontext, 1) = (reg_t)new_argv;
}
return 0;
free_and_return:
global_heap_free(script, sizeof(*script) HEAPACCT(ACCT_OTHER));
return ret;
}
static int
handle_execve(dcontext_t *dcontext)
{
/* in /usr/src/linux/arch/i386/kernel/process.c:
* asmlinkage int sys_execve(struct pt_regs regs) { ...
* error = do_execve(filename, (char **) regs.xcx, (char **) regs.xdx, ®s);
* in fs/exec.c:
* int do_execve(char * filename, char ** argv, char ** envp, struct pt_regs * regs)
*/
/* We need to make sure we get injected into the new image:
* we simply make sure LD_PRELOAD contains us, and that our directory
* is on LD_LIBRARY_PATH (seems not to work to put absolute paths in
* LD_PRELOAD).
* FIXME: this doesn't work for setuid programs
*
* For -follow_children we also pass the current DYNAMORIO_RUNUNDER and
* DYNAMORIO_OPTIONS and logdir to the new image to support a simple
* run-all-children model without bothering w/ setting up config files for
* children, and to support injecting across execve that does not
* preserve $HOME.
* FIXME i#287/PR 546544: we'll need to propagate DYNAMORIO_AUTOINJECT too
* once we use it in preload
*/
/* FIXME i#191: supposed to preserve things like pending signal
* set across execve: going to ignore for now
*/
char *fname;
bool x64 = IF_X64_ELSE(true, false);
bool expect_to_fail = false;
bool should_inject;
file_t file;
char *inject_library_path;
char rununder_buf[16]; /* just an integer printed in ascii */
bool app_specific, from_env, rununder_on;
#if defined(LINUX) || defined(DEBUG)
const char **argv;
#endif
if (DYNAMO_OPTION(follow_children) && DYNAMO_OPTION(early_inject)) {
int ret = handle_execve_script(dcontext);
if (ret != 0)
return ret;
}
fname = (char *)sys_param(dcontext, 0);
#if defined(LINUX) || defined(DEBUG)
argv = (const char **)sys_param(dcontext, 1);
#endif
#ifdef LINUX
if (DYNAMO_OPTION(early_inject) && symlink_is_self_exe(fname)) {
/* i#907: /proc/self/exe points at libdynamorio.so. Make sure we run
* the right thing here.
*/
fname = get_application_name();
}
#endif
LOG(GLOBAL, LOG_ALL, 1,
"\n---------------------------------------------------------------------------"
"\n");
LOG(THREAD, LOG_ALL, 1,
"\n---------------------------------------------------------------------------"
"\n");
DODEBUG({
int i;
SYSLOG_INTERNAL_INFO("-- execve %s --", fname);
LOG(THREAD, LOG_SYSCALLS, 1, "syscall: execve %s\n", fname);
LOG(GLOBAL, LOG_TOP | LOG_SYSCALLS, 1, "execve %s\n", fname);
if (d_r_stats->loglevel >= 3) {
if (argv == NULL) {
LOG(THREAD, LOG_SYSCALLS, 3, "\targs are NULL\n");
} else {
for (i = 0; argv[i] != NULL; i++) {
LOG(THREAD, LOG_SYSCALLS, 2, "\targ %d: len=%d\n", i,
strlen(argv[i]));
LOG(THREAD, LOG_SYSCALLS, 3, "\targ %d: %s\n", i, argv[i]);
}
}
}
});
/* i#237/PR 498284: if we're a vfork "thread" we're really in a different
* process and if we exec then the parent process will still be alive. We
* can't easily clean our own state (dcontext, dstack, etc.) up in our
* parent process: we need it to invoke the syscall and the syscall might
* fail. We could expand cleanup_and_terminate to also be able to invoke
* SYS_execve: but execve seems more likely to fail than termination
* syscalls. Our solution is to mark this thread as "execve" and hide it
* from regular thread queries; we clean it up in the process-exiting
* synch_with_thread(), or if the same parent thread performs another vfork
* (to prevent heap accumulation from repeated vfork+execve). Since vfork
* on linux suspends the parent, there cannot be any races with the execve
* syscall completing: there can't even be peer vfork threads, so we could
* set a flag and clean up in d_r_dispatch, but that seems overkill. (If vfork
* didn't suspend the parent we'd need to touch a marker file or something
* to know the execve was finished.)
*/
mark_thread_execve(dcontext->thread_record, true);
#ifdef STATIC_LIBRARY
/* no way we can inject, we just lose control */
SYSLOG_INTERNAL_WARNING("WARNING: static DynamoRIO library, losing control on "
"execve");
return 0;
#endif
/* Issue 20: handle cross-architecture execve */
/* Xref alternate solution i#145: use dual paths on
* LD_LIBRARY_PATH to solve cross-arch execve
*/
file = os_open(fname, OS_OPEN_READ);
if (file != INVALID_FILE) {
if (!module_file_is_module64(file, &x64,
NULL /*only care about primary==execve*/))
expect_to_fail = true;
os_close(file);
} else
expect_to_fail = true;
inject_library_path =
IF_X64_ELSE(x64, !x64) ? dynamorio_library_path : dynamorio_alt_arch_path;
should_inject = DYNAMO_OPTION(follow_children);
if (get_config_val_other_app(get_short_name(fname), get_process_id(),
x64 ? DR_PLATFORM_64BIT : DR_PLATFORM_32BIT,
DYNAMORIO_VAR_RUNUNDER, rununder_buf,
BUFFER_SIZE_ELEMENTS(rununder_buf), &app_specific,
&from_env, NULL /* 1config is ok */)) {
if (should_inject_from_rununder(rununder_buf, app_specific, from_env,
&rununder_on))
should_inject = rununder_on;
}
if (should_inject)
add_dr_env_vars(dcontext, inject_library_path, fname);
else {
dcontext->sys_param0 = 0;
dcontext->sys_param1 = 0;
}
#ifdef LINUX
/* We have to be accurate with expect_to_fail as we cannot come back
* and fail the syscall once the kernel execs DR!
*/
if (should_inject && DYNAMO_OPTION(early_inject) && !expect_to_fail) {
/* i#909: change the target image to libdynamorio.so */
const char *drpath = IF_X64_ELSE(x64, !x64) ? dynamorio_library_filepath
: dynamorio_alt_arch_filepath;
TRY_EXCEPT(dcontext, /* try */
{
if (symlink_is_self_exe(argv[0])) {
/* we're out of sys_param entries so we assume argv[0] == fname
*/
dcontext->sys_param3 = (reg_t)argv;
argv[0] = fname; /* XXX: handle readable but not writable! */
} else
dcontext->sys_param3 = 0; /* no restore in post */
dcontext->sys_param4 =
(reg_t)fname; /* store for restore in post */
*sys_param_addr(dcontext, 0) = (reg_t)drpath;
LOG(THREAD, LOG_SYSCALLS, 2, "actual execve on: %s\n",
(char *)sys_param(dcontext, 0));
},
/* except */
{
dcontext->sys_param3 = 0; /* no restore in post */
dcontext->sys_param4 = 0; /* no restore in post */
LOG(THREAD, LOG_SYSCALLS, 2,
"argv is unreadable, expect execve to fail\n");
});
} else {
dcontext->sys_param3 = 0; /* no restore in post */
dcontext->sys_param4 = 0; /* no restore in post */
}
#endif
/* we need to clean up the .1config file here. if the execve fails,
* we'll just live w/o dynamic option re-read.
*/
d_r_config_exit();
return 0;
}
static void
handle_execve_post(dcontext_t *dcontext)
{
/* if we get here it means execve failed (doesn't return on success),
* or we did an execve from a vfork and its memory changes are visible
* in the parent process.
* we have to restore env to how it was and free the allocated heap.
*/
char **old_envp = (char **)dcontext->sys_param0;
char **new_envp = (char **)dcontext->sys_param1;
#ifdef STATIC_LIBRARY
/* nothing to clean up */
return;
#endif
#ifdef LINUX
if (dcontext->sys_param4 != 0) {
/* restore original /proc/.../exe */
*sys_param_addr(dcontext, 0) = dcontext->sys_param4;
if (dcontext->sys_param3 != 0) {
/* restore original argv[0] */
const char **argv = (const char **)dcontext->sys_param3;
argv[0] = (const char *)dcontext->sys_param4;
}
}
#endif
if (new_envp != NULL) {
int i;
LOG(THREAD, LOG_SYSCALLS, 2, "\tcleaning up our env vars\n");
/* we replaced existing ones and/or added new ones.
* we can't compare to old_envp b/c it may have changed by now.
*/
for (i = 0; new_envp[i] != NULL; i++) {
if (is_dynamo_address((byte *)new_envp[i])) {
heap_free(dcontext, new_envp[i],
sizeof(char) * (strlen(new_envp[i]) + 1) HEAPACCT(ACCT_OTHER));
}
}
i++; /* need to de-allocate final null slot too */
heap_free(dcontext, new_envp, sizeof(char *) * i HEAPACCT(ACCT_OTHER));
/* restore prev envp if we're post-syscall */
if (!dcontext->thread_record->execve)
*sys_param_addr(dcontext, 2) = (reg_t)old_envp;
}
}
/* i#237/PR 498284: to avoid accumulation of thread state we clean up a vfork
* child who invoked execve here so we have at most one outstanding thread. we
* also clean up at process exit and before thread creation. we could do this
* in d_r_dispatch but too rare to be worth a flag check there.
*/
static void
cleanup_after_vfork_execve(dcontext_t *dcontext)
{
thread_record_t **threads;
int num_threads, i;
if (num_execve_threads == 0)
return;
d_r_mutex_lock(&thread_initexit_lock);
get_list_of_threads_ex(&threads, &num_threads, true /*include execve*/);
for (i = 0; i < num_threads; i++) {
if (threads[i]->execve) {
LOG(THREAD, LOG_SYSCALLS, 2, "cleaning up earlier vfork thread " TIDFMT "\n",
threads[i]->id);
dynamo_other_thread_exit(threads[i]);
}
}
d_r_mutex_unlock(&thread_initexit_lock);
global_heap_free(threads,
num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
}
static void
set_stdfile_fileno(stdfile_t **stdfile, file_t file_no)
{
#ifdef STDFILE_FILENO
(*stdfile)->STDFILE_FILENO = file_no;
#else
# warning stdfile_t is opaque; DynamoRIO will not set fds of libc FILEs.
/* i#1973: musl libc support (and potentially other non-glibcs) */
/* only called by handle_close_pre(), so warning is specific to that. */
SYSLOG_INTERNAL_WARNING_ONCE(
"DynamoRIO cannot set the file descriptors of private libc FILEs on "
"this platform. Client usage of stdio.h stdin, stdout, or stderr may "
"no longer work as expected, because the app is closing the UNIX fds "
"backing these.");
#endif
}
/* returns whether to execute syscall */
static bool
handle_close_pre(dcontext_t *dcontext)
{
/* in fs/open.c: asmlinkage long sys_close(unsigned int fd) */
uint fd = (uint)sys_param(dcontext, 0);
LOG(THREAD, LOG_SYSCALLS, 3, "syscall: close fd %d\n", fd);
/* prevent app from closing our files */
if (fd_is_dr_owned(fd)) {
SYSLOG_INTERNAL_WARNING_ONCE("app trying to close DR file(s)");
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app trying to close DR file %d! Not allowing it.\n", fd);
if (DYNAMO_OPTION(fail_on_stolen_fds)) {
set_failure_return_val(dcontext, EBADF);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else
set_success_return_val(dcontext, 0);
return false; /* do not execute syscall */
}
/* Xref PR 258731 - duplicate STDOUT/STDERR when app closes them so we (or
* a client) can continue to use them for logging. */
if (DYNAMO_OPTION(dup_stdout_on_close) && fd == STDOUT) {
our_stdout = fd_priv_dup(fd);
if (our_stdout < 0) /* no private fd available */
our_stdout = dup_syscall(fd);
if (our_stdout >= 0)
fd_mark_close_on_exec(our_stdout);
fd_table_add(our_stdout, 0);
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app is closing stdout=%d - duplicating descriptor for "
"DynamoRIO usage got %d.\n",
fd, our_stdout);
if (privmod_stdout != NULL &&
IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
/* update the privately loaded libc's stdout _fileno. */
set_stdfile_fileno(privmod_stdout, our_stdout);
}
}
if (DYNAMO_OPTION(dup_stderr_on_close) && fd == STDERR) {
our_stderr = fd_priv_dup(fd);
if (our_stderr < 0) /* no private fd available */
our_stderr = dup_syscall(fd);
if (our_stderr >= 0)
fd_mark_close_on_exec(our_stderr);
fd_table_add(our_stderr, 0);
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app is closing stderr=%d - duplicating descriptor for "
"DynamoRIO usage got %d.\n",
fd, our_stderr);
if (privmod_stderr != NULL &&
IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
/* update the privately loaded libc's stderr _fileno. */
set_stdfile_fileno(privmod_stderr, our_stderr);
}
}
if (DYNAMO_OPTION(dup_stdin_on_close) && fd == STDIN) {
our_stdin = fd_priv_dup(fd);
if (our_stdin < 0) /* no private fd available */
our_stdin = dup_syscall(fd);
if (our_stdin >= 0)
fd_mark_close_on_exec(our_stdin);
fd_table_add(our_stdin, 0);
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app is closing stdin=%d - duplicating descriptor for "
"DynamoRIO usage got %d.\n",
fd, our_stdin);
if (privmod_stdin != NULL &&
IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
/* update the privately loaded libc's stdout _fileno. */
set_stdfile_fileno(privmod_stdin, our_stdin);
}
}
return true;
}
/***************************************************************************/
/* Used to obtain the pc of the syscall instr itself when the dcontext dc
* is currently in a syscall handler.
* Alternatively for sysenter we could set app_sysenter_instr_addr for Linux.
*/
#define SYSCALL_PC(dc) \
((get_syscall_method() == SYSCALL_METHOD_INT || \
get_syscall_method() == SYSCALL_METHOD_SYSCALL) \
? (ASSERT(SYSCALL_LENGTH == INT_LENGTH), POST_SYSCALL_PC(dc) - INT_LENGTH) \
: (vsyscall_syscall_end_pc - SYSENTER_LENGTH))
static void
handle_exit(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
bool exit_process = false;
if (dcontext->sys_num == SYSNUM_EXIT_PROCESS) {
/* We can have multiple thread groups within the same address space.
* We need to know whether this is the only group left.
* FIXME: we can have races where new threads are created after our
* check: we'll live with that for now, but the right approach is to
* suspend all threads via synch_with_all_threads(), do the check,
* and if exit_process then exit w/o resuming: though have to
* coordinate lock access w/ cleanup_and_terminate.
* Xref i#94. Xref PR 541760.
*/
process_id_t mypid = get_process_id();
thread_record_t **threads;
int num_threads, i;
exit_process = true;
d_r_mutex_lock(&thread_initexit_lock);
get_list_of_threads(&threads, &num_threads);
for (i = 0; i < num_threads; i++) {
if (threads[i]->pid != mypid && !IS_CLIENT_THREAD(threads[i]->dcontext)) {
exit_process = false;
break;
}
}
if (!exit_process) {
/* We need to clean up the other threads in our group here. */
thread_id_t myid = d_r_get_thread_id();
priv_mcontext_t mcontext;
DEBUG_DECLARE(thread_synch_result_t synch_res;)
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"SYS_exit_group %d not final group: %d cleaning up just "
"threads in group\n",
get_process_id(), d_r_get_thread_id());
/* Set where we are to handle reciprocal syncs */
copy_mcontext(mc, &mcontext);
mc->pc = SYSCALL_PC(dcontext);
for (i = 0; i < num_threads; i++) {
if (threads[i]->id != myid && threads[i]->pid == mypid) {
/* See comments in dynamo_process_exit_cleanup(): we terminate
* to make cleanup easier, but may want to switch to shifting
* the target thread to a stack-free loop.
*/
DEBUG_DECLARE(synch_res =)
synch_with_thread(
threads[i]->id, true /*block*/, true /*have initexit lock*/,
THREAD_SYNCH_VALID_MCONTEXT, THREAD_SYNCH_TERMINATED_AND_CLEANED,
THREAD_SYNCH_SUSPEND_FAILURE_IGNORE);
/* initexit lock may be released and re-acquired in course of
* doing the synch so we may have races where the thread
* exits on its own (or new threads appear): we'll live
* with those for now.
*/
ASSERT(synch_res == THREAD_SYNCH_RESULT_SUCCESS);
}
}
copy_mcontext(&mcontext, mc);
}
d_r_mutex_unlock(&thread_initexit_lock);
global_heap_free(
threads, num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
}
if (is_last_app_thread() && !dynamo_exited) {
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"SYS_exit%s(%d) in final thread " TIDFMT " of " PIDFMT
" => exiting DynamoRIO\n",
(dcontext->sys_num == SYSNUM_EXIT_PROCESS) ? "_group" : "",
MCXT_SYSNUM_REG(mc), d_r_get_thread_id(), get_process_id());
/* we want to clean up even if not automatic startup! */
automatic_startup = true;
exit_process = true;
} else {
LOG(THREAD, LOG_TOP | LOG_THREADS | LOG_SYSCALLS, 1,
"SYS_exit%s(%d) in thread " TIDFMT " of " PIDFMT " => cleaning up %s\n",
(dcontext->sys_num == SYSNUM_EXIT_PROCESS) ? "_group" : "",
MCXT_SYSNUM_REG(mc), d_r_get_thread_id(), get_process_id(),
exit_process ? "process" : "thread");
}
KSTOP(num_exits_dir_syscall);
block_cleanup_and_terminate(dcontext, MCXT_SYSNUM_REG(mc), sys_param(dcontext, 0),
sys_param(dcontext, 1), exit_process,
/* SYS_bsdthread_terminate has 2 more args */
sys_param(dcontext, 2), sys_param(dcontext, 3));
}
#if defined(LINUX) && defined(X86) /* XXX i#58: just until we have Mac support \
*/
static bool
os_set_app_thread_area(dcontext_t *dcontext, our_modify_ldt_t *user_desc)
{
# ifdef X86
int i;
os_thread_data_t *ostd = dcontext->os_field;
our_modify_ldt_t *desc = (our_modify_ldt_t *)ostd->app_thread_areas;
if (user_desc->seg_not_present == 1) {
/* find an empty one to update */
for (i = 0; i < GDT_NUM_TLS_SLOTS; i++) {
if (desc[i].seg_not_present == 1)
break;
}
if (i < GDT_NUM_TLS_SLOTS) {
user_desc->entry_number = GDT_SELECTOR(i + tls_min_index());
memcpy(&desc[i], user_desc, sizeof(*user_desc));
} else
return false;
} else {
/* If we used early injection, this might be ld.so trying to set up TLS. We
* direct the app to use the GDT entry we already set up for our private
* libraries, but only the first time it requests TLS.
*/
if (user_desc->entry_number == -1 && return_stolen_lib_tls_gdt) {
d_r_mutex_lock(&set_thread_area_lock);
if (return_stolen_lib_tls_gdt) {
uint selector = read_thread_register(LIB_SEG_TLS);
uint index = SELECTOR_INDEX(selector);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
return_stolen_lib_tls_gdt = false;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
user_desc->entry_number = index;
LOG(GLOBAL, LOG_THREADS, 2,
"%s: directing app to use "
"selector 0x%x for first call to set_thread_area\n",
__FUNCTION__, selector);
}
d_r_mutex_unlock(&set_thread_area_lock);
}
/* update the specific one */
i = user_desc->entry_number - tls_min_index();
if (i < 0 || i >= GDT_NUM_TLS_SLOTS)
return false;
LOG(GLOBAL, LOG_THREADS, 2,
"%s: change selector 0x%x base from " PFX " to " PFX "\n", __FUNCTION__,
GDT_SELECTOR(user_desc->entry_number), desc[i].base_addr,
user_desc->base_addr);
memcpy(&desc[i], user_desc, sizeof(*user_desc));
}
/* if not conflict with dr's tls, perform the syscall */
if (IF_CLIENT_INTERFACE_ELSE(!INTERNAL_OPTION(private_loader), true) &&
GDT_SELECTOR(user_desc->entry_number) != read_thread_register(SEG_TLS) &&
GDT_SELECTOR(user_desc->entry_number) != read_thread_register(LIB_SEG_TLS))
return false;
# elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
# endif /* X86/ARM */
return true;
}
static bool
os_get_app_thread_area(dcontext_t *dcontext, our_modify_ldt_t *user_desc)
{
# ifdef X86
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
our_modify_ldt_t *desc = (our_modify_ldt_t *)ostd->app_thread_areas;
int i = user_desc->entry_number - tls_min_index();
if (i < 0 || i >= GDT_NUM_TLS_SLOTS)
return false;
if (desc[i].seg_not_present == 1)
return false;
# elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
# endif /* X86/ARM */
return true;
}
#endif
/* This function is used for switch lib tls segment on creating thread.
* We switch to app's lib tls seg before thread creation system call, i.e.
* clone and vfork, and switch back to dr's lib tls seg after the system call.
* They are only called on parent thread, not the child thread.
* The child thread's tls is setup in os_tls_app_seg_init.
*/
/* XXX: It looks like the Linux kernel has some dependency on the segment
* descriptor. If using dr's segment descriptor, the created thread will have
* access violation for tls not being setup. However, it works fine if we switch
* the descriptor to app's segment descriptor before creating the thread.
* We should be able to remove this function later if we find the problem.
*/
static bool
os_switch_lib_tls(dcontext_t *dcontext, bool to_app)
{
return os_switch_seg_to_context(dcontext, LIB_SEG_TLS, to_app);
}
#ifdef X86
/* dcontext can be NULL if !to_app */
static bool
os_switch_seg_to_base(dcontext_t *dcontext, os_local_state_t *os_tls, reg_id_t seg,
bool to_app, app_pc base)
{
bool res = false;
ASSERT(dcontext != NULL);
ASSERT(IF_X86_ELSE((seg == SEG_FS || seg == SEG_GS),
(seg == DR_REG_TPIDRURW || DR_REG_TPIDRURO)));
switch (os_tls->tls_type) {
# ifdef X64
case TLS_TYPE_ARCH_PRCTL: {
res = tls_set_fs_gs_segment_base(os_tls->tls_type, seg, base, NULL);
ASSERT(res);
LOG(GLOBAL, LOG_THREADS, 2,
"%s %s: arch_prctl successful for thread " TIDFMT " base " PFX "\n",
__FUNCTION__, to_app ? "to app" : "to DR", d_r_get_thread_id(), base);
if (seg == SEG_TLS && base == NULL) {
/* Set the selector to 0 so we don't think TLS is available. */
/* FIXME i#107: Still assumes app isn't using SEG_TLS. */
reg_t zero = 0;
WRITE_DR_SEG(zero);
}
break;
}
# endif
case TLS_TYPE_GDT: {
our_modify_ldt_t desc;
uint index;
uint selector;
if (to_app) {
selector = os_tls->app_lib_tls_reg;
index = SELECTOR_INDEX(selector);
} else {
index = (seg == LIB_SEG_TLS ? tls_priv_lib_index() : tls_dr_index());
ASSERT(index != -1 && "TLS indices not initialized");
selector = GDT_SELECTOR(index);
}
if (selector != 0) {
if (to_app) {
our_modify_ldt_t *areas =
((os_thread_data_t *)dcontext->os_field)->app_thread_areas;
ASSERT((index >= tls_min_index()) &&
((index - tls_min_index()) <= GDT_NUM_TLS_SLOTS));
desc = areas[index - tls_min_index()];
} else {
tls_init_descriptor(&desc, base, GDT_NO_SIZE_LIMIT, index);
}
res = tls_set_fs_gs_segment_base(os_tls->tls_type, seg, NULL, &desc);
ASSERT(res);
} else {
/* For a selector of zero, we just reset the segment to zero. We
* don't need to call set_thread_area.
*/
res = true; /* Indicate success. */
}
/* XXX i#2098: it's unsafe to call LOG here in between GDT and register changes */
/* i558 update lib seg reg to enforce the segment changes */
if (seg == SEG_TLS)
WRITE_DR_SEG(selector);
else
WRITE_LIB_SEG(selector);
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting %s to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"), reg_names[seg], selector);
LOG(THREAD, LOG_LOADER, 2,
"%s %s: set_thread_area successful for thread " TIDFMT " base " PFX "\n",
__FUNCTION__, to_app ? "to app" : "to DR", d_r_get_thread_id(), base);
break;
}
case TLS_TYPE_LDT: {
uint index;
uint selector;
if (to_app) {
selector = os_tls->app_lib_tls_reg;
index = SELECTOR_INDEX(selector);
} else {
index = (seg == LIB_SEG_TLS ? tls_priv_lib_index() : tls_dr_index());
ASSERT(index != -1 && "TLS indices not initialized");
selector = LDT_SELECTOR(index);
}
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting %s to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"), reg_names[seg], selector);
if (seg == SEG_TLS)
WRITE_DR_SEG(selector);
else
WRITE_LIB_SEG(selector);
LOG(THREAD, LOG_LOADER, 2,
"%s %s: ldt selector swap successful for thread " TIDFMT "\n", __FUNCTION__,
to_app ? "to app" : "to DR", d_r_get_thread_id());
break;
}
default: ASSERT_NOT_REACHED(); return false;
}
ASSERT((!to_app && seg == SEG_TLS) ||
BOOLS_MATCH(to_app, os_using_app_state(dcontext)));
return res;
}
static bool
os_set_dr_tls_base(dcontext_t *dcontext, os_local_state_t *tls, byte *base)
{
if (tls == NULL) {
ASSERT(dcontext != NULL);
tls = get_os_tls_from_dc(dcontext);
}
return os_switch_seg_to_base(dcontext, tls, SEG_TLS, false, base);
}
#endif /* X86 */
static bool
os_switch_seg_to_context(dcontext_t *dcontext, reg_id_t seg, bool to_app)
{
os_local_state_t *os_tls = get_os_tls_from_dc(dcontext);
#ifdef X86
app_pc base;
/* we can only update the executing thread's segment (i#920) */
ASSERT_MESSAGE(CHKLVL_ASSERTS + 1 /*expensive*/, "can only act on executing thread",
/* i#2089: a clone syscall, or when native, temporarily puts in
* invalid TLS, so we don't check get_thread_private_dcontext().
*/
is_thread_tls_allocated() &&
dcontext->owning_thread == get_sys_thread_id());
if (to_app) {
base = os_get_app_tls_base(dcontext, seg);
} else {
base = os_get_priv_tls_base(dcontext, seg);
}
return os_switch_seg_to_base(dcontext, os_tls, seg, to_app, base);
#elif defined(AARCHXX)
bool res = false;
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
ASSERT(INTERNAL_OPTION(private_loader));
if (to_app) {
/* On switching to app's TLS, we need put DR's TLS base into app's TLS
* at the same offset so it can be loaded on entering code cache.
* Otherwise, the context switch code on entering fcache will fault on
* accessing DR's TLS.
* The app's TLS slot value is stored into privlib's TLS slot for
* later restore on switching back to privlib's TLS.
*/
byte **priv_lib_tls_swap_slot =
(byte **)(ostd->priv_lib_tls_base + DR_TLS_BASE_OFFSET);
byte **app_lib_tls_swap_slot =
(byte **)(os_tls->app_lib_tls_base + DR_TLS_BASE_OFFSET);
LOG(THREAD, LOG_LOADER, 3,
"%s: switching to app: app slot=&" PFX " *" PFX ", priv slot=&" PFX " *" PFX
"\n",
__FUNCTION__, app_lib_tls_swap_slot, *app_lib_tls_swap_slot,
priv_lib_tls_swap_slot, *priv_lib_tls_swap_slot);
byte *dr_tls_base = *priv_lib_tls_swap_slot;
*priv_lib_tls_swap_slot = *app_lib_tls_swap_slot;
*app_lib_tls_swap_slot = dr_tls_base;
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting coproc reg to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"), os_tls->app_lib_tls_base);
res = write_thread_register(os_tls->app_lib_tls_base);
} else {
/* Restore the app's TLS slot that we used for storing DR's TLS base,
* and put DR's TLS base back to privlib's TLS slot.
*/
byte **priv_lib_tls_swap_slot =
(byte **)(ostd->priv_lib_tls_base + DR_TLS_BASE_OFFSET);
byte **app_lib_tls_swap_slot =
(byte **)(os_tls->app_lib_tls_base + DR_TLS_BASE_OFFSET);
byte *dr_tls_base = *app_lib_tls_swap_slot;
LOG(THREAD, LOG_LOADER, 3,
"%s: switching to DR: app slot=&" PFX " *" PFX ", priv slot=&" PFX " *" PFX
"\n",
__FUNCTION__, app_lib_tls_swap_slot, *app_lib_tls_swap_slot,
priv_lib_tls_swap_slot, *priv_lib_tls_swap_slot);
*app_lib_tls_swap_slot = *priv_lib_tls_swap_slot;
*priv_lib_tls_swap_slot = dr_tls_base;
LOG(THREAD, LOG_LOADER, 2, "%s: switching to %s, setting coproc reg to 0x%x\n",
__FUNCTION__, (to_app ? "app" : "dr"), ostd->priv_lib_tls_base);
res = write_thread_register(ostd->priv_lib_tls_base);
}
LOG(THREAD, LOG_LOADER, 2, "%s %s: set_tls swap success=%d for thread " TIDFMT "\n",
__FUNCTION__, to_app ? "to app" : "to DR", res, d_r_get_thread_id());
return res;
#elif defined(AARCH64)
(void)os_tls;
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return false;
#endif /* X86/ARM/AARCH64 */
}
/* System call interception: put any special handling here
* Arguments come from the pusha right before the call
*/
/* WARNING: flush_fragments_and_remove_region assumes that pre and post system
* call handlers do not examine or modify fcache or its fragments in any
* way except for calling flush_fragments_and_remove_region!
*/
/* WARNING: All registers are IN values, but NOT OUT values --
* must set mcontext's register for that.
*/
/* Returns false if system call should NOT be executed (in which case,
* post_system_call() will *not* be called!).
* Returns true if system call should go ahead
*/
/* XXX: split out specific handlers into separate routines
*/
bool
pre_system_call(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
bool execute_syscall = true;
dr_where_am_i_t old_whereami = dcontext->whereami;
dcontext->whereami = DR_WHERE_SYSCALL_HANDLER;
/* FIXME We haven't yet done the work to detect which syscalls we
* can determine a priori will fail. Once we do, we will set the
* expect_last_syscall_to_fail to true for those case, and can
* confirm in post_system_call() that the syscall failed as
* expected.
*/
DODEBUG(dcontext->expect_last_syscall_to_fail = false;);
/* save key register values for post_system_call (they get clobbered
* in syscall itself)
*/
dcontext->sys_num = os_normalized_sysnum((int)MCXT_SYSNUM_REG(mc), NULL, dcontext);
RSTATS_INC(pre_syscall);
DOSTATS({
if (ignorable_system_call_normalized(dcontext->sys_num))
STATS_INC(pre_syscall_ignorable);
});
LOG(THREAD, LOG_SYSCALLS, 2, "system call %d\n", dcontext->sys_num);
#if defined(LINUX) && defined(X86)
/* PR 313715: If we fail to hook the vsyscall page (xref PR 212570, PR 288330)
* we fall back on int, but we have to tweak syscall param #5 (ebp)
* Once we have PR 288330 we can remove this.
*/
if (should_syscall_method_be_sysenter() && !dcontext->sys_was_int) {
dcontext->sys_xbp = mc->xbp;
/* not using SAFE_READ due to performance concerns (we do this for
* every single system call on systems where we can't hook vsyscall!)
*/
TRY_EXCEPT(dcontext, /* try */ { mc->xbp = *(reg_t *)mc->xsp; }, /* except */
{
ASSERT_NOT_REACHED();
mc->xbp = 0;
});
}
#endif
switch (dcontext->sys_num) {
case SYSNUM_EXIT_PROCESS:
#if defined(LINUX) && VMX86_SERVER
if (os_in_vmkernel_32bit()) {
/* on esx 3.5 => ENOSYS, so wait for SYS_exit */
LOG(THREAD, LOG_SYSCALLS, 2, "on esx35 => ignoring exitgroup\n");
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
break;
}
#endif
/* fall-through */
case SYSNUM_EXIT_THREAD: {
handle_exit(dcontext);
break;
}
/****************************************************************************/
/* MEMORY REGIONS */
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_mmap: {
/* in /usr/src/linux/arch/i386/kernel/sys_i386.c:
asmlinkage int old_mmap(struct mmap_arg_struct_t *arg)
*/
mmap_arg_struct_t *arg = (mmap_arg_struct_t *)sys_param(dcontext, 0);
mmap_arg_struct_t arg_buf;
if (d_r_safe_read(arg, sizeof(mmap_arg_struct_t), &arg_buf)) {
void *addr = (void *)arg->addr;
size_t len = (size_t)arg->len;
uint prot = (uint)arg->prot;
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: mmap addr=" PFX " size=" PIFX " prot=0x%x"
" flags=" PIFX " offset=" PIFX " fd=%d\n",
addr, len, prot, arg->flags, arg->offset, arg->fd);
/* Check for overlap with existing code or patch-proof regions */
if (addr != NULL &&
!app_memory_pre_alloc(dcontext, addr, len, osprot_to_memprot(prot),
!TEST(MAP_FIXED, arg->flags))) {
/* Rather than failing or skipping the syscall we'd like to just
* remove the hint -- but we don't want to write to app memory, so
* we do fail. We could set up our own mmap_arg_struct_t but
* we'd need dedicate per-thread storage, and SYS_mmap is obsolete.
*/
execute_syscall = false;
set_failure_return_val(dcontext, ENOMEM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
break;
}
}
/* post_system_call does the work */
dcontext->sys_param0 = (reg_t)arg;
break;
}
#endif
case IF_MACOS_ELSE(SYS_mmap, IF_X64_ELSE(SYS_mmap, SYS_mmap2)): {
/* in /usr/src/linux/arch/i386/kernel/sys_i386.c:
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
*/
void *addr = (void *)sys_param(dcontext, 0);
size_t len = (size_t)sys_param(dcontext, 1);
uint prot = (uint)sys_param(dcontext, 2);
uint flags = (uint)sys_param(dcontext, 3);
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: mmap2 addr=" PFX " size=" PIFX " prot=0x%x"
" flags=" PIFX " offset=" PIFX " fd=%d\n",
addr, len, prot, flags, sys_param(dcontext, 5), sys_param(dcontext, 4));
/* Check for overlap with existing code or patch-proof regions */
if (addr != NULL &&
!app_memory_pre_alloc(dcontext, addr, len, osprot_to_memprot(prot),
!TEST(MAP_FIXED, flags))) {
if (!TEST(MAP_FIXED, flags)) {
/* Rather than failing or skipping the syscall we just remove
* the hint which should eliminate any overlap.
*/
*sys_param_addr(dcontext, 0) = 0;
} else {
execute_syscall = false;
set_failure_return_val(dcontext, ENOMEM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
break;
}
}
/* post_system_call does the work */
dcontext->sys_param0 = (reg_t)addr;
dcontext->sys_param1 = len;
dcontext->sys_param2 = prot;
dcontext->sys_param3 = flags;
break;
}
/* must flush stale fragments when we see munmap/mremap */
case SYS_munmap: {
/* in /usr/src/linux/mm/mmap.c:
asmlinkage long sys_munmap(unsigned long addr, uint len)
*/
app_pc addr = (void *)sys_param(dcontext, 0);
size_t len = (size_t)sys_param(dcontext, 1);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: munmap addr=" PFX " size=" PFX "\n", addr,
len);
RSTATS_INC(num_app_munmaps);
/* FIXME addr is supposed to be on a page boundary so we
* could detect that condition here and set
* expect_last_syscall_to_fail.
*/
/* save params in case an undo is needed in post_system_call */
dcontext->sys_param0 = (reg_t)addr;
dcontext->sys_param1 = len;
/* We assume that the unmap will succeed and so are conservative
* and remove the region from exec areas and flush all fragments
* prior to issuing the syscall. If the unmap fails, we try to
* recover in post_system_call() by re-adding the region. This
* approach has its shortcomings -- see comments below in
* post_system_call().
*/
/* Check for unmapping a module. */
os_get_module_info_lock();
if (module_overlaps(addr, len)) {
/* FIXME - handle unmapping more than one module at once, or only unmapping
* part of a module (for which case should adjust view size? or treat as full
* unmap?). Theoretical for now as we haven't seen this. */
module_area_t *ma = module_pc_lookup(addr);
ASSERT_CURIOSITY(ma != NULL);
ASSERT_CURIOSITY(addr == ma->start);
/* XREF 307599 on rounding module end to the next PAGE boundary */
ASSERT_CURIOSITY((app_pc)ALIGN_FORWARD(addr + len, PAGE_SIZE) == ma->end);
os_get_module_info_unlock();
/* i#210:
* we only think a module is removed if its first memory region
* is unloaded (unmapped).
* XREF i#160 to fix the real problem of handling module splitting.
*/
if (ma != NULL && ma->start == addr)
module_list_remove(addr, ALIGN_FORWARD(len, PAGE_SIZE));
} else
os_get_module_info_unlock();
app_memory_deallocation(dcontext, (app_pc)addr, len,
false /* don't own thread_initexit_lock */,
true /* image, FIXME: though not necessarily */);
/* FIXME: case 4983 use is_elf_so_header() */
#ifndef HAVE_MEMINFO_QUERY
memcache_lock();
memcache_remove(addr, addr + len);
memcache_unlock();
#endif
break;
}
#ifdef LINUX
case SYS_mremap: {
/* in /usr/src/linux/mm/mmap.c:
asmlinkage unsigned long sys_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr)
*/
dr_mem_info_t info;
app_pc addr = (void *)sys_param(dcontext, 0);
size_t old_len = (size_t)sys_param(dcontext, 1);
size_t new_len = (size_t)sys_param(dcontext, 2);
DEBUG_DECLARE(bool ok;)
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: mremap addr=" PFX " size=" PFX "\n", addr,
old_len);
/* post_system_call does the work */
dcontext->sys_param0 = (reg_t)addr;
dcontext->sys_param1 = old_len;
dcontext->sys_param2 = new_len;
/* i#173
* we need memory type and prot to set the
* new memory region in the post_system_call
*/
DEBUG_DECLARE(ok =)
query_memory_ex(addr, &info);
ASSERT(ok);
dcontext->sys_param3 = info.prot;
dcontext->sys_param4 = info.type;
DOCHECK(1, {
/* we don't expect to see remappings of modules */
os_get_module_info_lock();
ASSERT_CURIOSITY(!module_overlaps(addr, old_len));
os_get_module_info_unlock();
});
break;
}
#endif
case SYS_mprotect: {
/* in /usr/src/linux/mm/mprotect.c:
asmlinkage long sys_mprotect(unsigned long start, uint len,
unsigned long prot)
*/
uint res;
DEBUG_DECLARE(size_t size;)
app_pc addr = (void *)sys_param(dcontext, 0);
size_t len = (size_t)sys_param(dcontext, 1);
uint prot = (uint)sys_param(dcontext, 2);
uint old_memprot = MEMPROT_NONE, new_memprot;
bool exists = true;
/* save params in case an undo is needed in post_system_call */
dcontext->sys_param0 = (reg_t)addr;
dcontext->sys_param1 = len;
dcontext->sys_param2 = prot;
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: mprotect addr=" PFX " size=" PFX " prot=%s\n", addr, len,
memprot_string(osprot_to_memprot(prot)));
if (!get_memory_info(addr, NULL, IF_DEBUG_ELSE(&size, NULL), &old_memprot)) {
exists = false;
/* Xref PR 413109, PR 410921: if the start, or any page, is not mapped,
* this should fail with ENOMEM. We used to force-fail it to avoid
* asserts in our own allmem update code, but there are cases where a
* seemingly unmapped page succeeds (i#1912: next page of grows-down
* initial stack). Thus we let it go through.
*/
LOG(THREAD, LOG_SYSCALLS, 2,
"\t" PFX " isn't mapped: probably mprotect will fail\n", addr);
} else {
/* If mprotect region spans beyond the end of the vmarea then it
* spans 2 or more vmareas with dissimilar protection (xref
* PR 410921) or has unallocated regions in between (PR 413109).
*/
DOCHECK(1, dcontext->mprot_multi_areas = len > size ? true : false;);
}
new_memprot = osprot_to_memprot(prot) |
/* mprotect won't change meta flags */
(old_memprot & MEMPROT_META_FLAGS);
res = app_memory_protection_change(dcontext, addr, len, new_memprot, &new_memprot,
NULL);
if (res != DO_APP_MEM_PROT_CHANGE) {
if (res == FAIL_APP_MEM_PROT_CHANGE) {
ASSERT_NOT_IMPLEMENTED(false); /* return code? */
} else {
ASSERT_NOT_IMPLEMENTED(res != SUBSET_APP_MEM_PROT_CHANGE);
ASSERT_NOT_REACHED();
}
execute_syscall = false;
} else {
/* FIXME Store state for undo if the syscall fails. */
IF_NO_MEMQUERY(memcache_update_locked(addr, addr + len, new_memprot,
-1 /*type unchanged*/, exists));
}
break;
}
#ifdef ANDROID
case SYS_prctl:
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
dcontext->sys_param3 = sys_param(dcontext, 3);
dcontext->sys_param4 = sys_param(dcontext, 4);
break;
#endif
#ifdef LINUX
case SYS_brk: {
if (DYNAMO_OPTION(emulate_brk)) {
/* i#1004: emulate brk via a separate mmap */
byte *new_val = (byte *)sys_param(dcontext, 0);
byte *res = emulate_app_brk(dcontext, new_val);
execute_syscall = false;
/* SYS_brk returns old brk on failure */
set_success_return_val(dcontext, (reg_t)res);
} else {
/* i#91/PR 396352: need to watch SYS_brk to maintain all_memory_areas.
* We store the old break in the param1 slot.
*/
DODEBUG(dcontext->sys_param0 = (reg_t)sys_param(dcontext, 0););
dcontext->sys_param1 = dynamorio_syscall(SYS_brk, 1, 0);
}
break;
}
# ifdef SYS_uselib
case SYS_uselib: {
/* Used to get the kernel to load a share library (legacy system call).
* Was primarily used when statically linking to dynamically loaded shared
* libraries that were loaded at known locations. Shouldn't be used by
* applications using the dynamic loader (ld) which is currently the only
* way we can inject so we don't expect to see this. PR 307621. */
ASSERT_NOT_IMPLEMENTED(false);
break;
}
# endif
#endif
/****************************************************************************/
/* SPAWNING */
#ifdef LINUX
case SYS_clone: {
/* in /usr/src/linux/arch/i386/kernel/process.c
* 32-bit params: flags, newsp, ptid, tls, ctid
* 64-bit params: should be the same yet tls (for ARCH_SET_FS) is in r8?!?
* I don't see how sys_clone gets its special args: shouldn't it
* just get pt_regs as a "special system call"?
* sys_clone(unsigned long clone_flags, unsigned long newsp,
* void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
*/
uint flags = (uint)sys_param(dcontext, 0);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: clone with flags = " PFX "\n", flags);
LOG(THREAD, LOG_SYSCALLS, 2,
"args: " PFX ", " PFX ", " PFX ", " PFX ", " PFX "\n", sys_param(dcontext, 0),
sys_param(dcontext, 1), sys_param(dcontext, 2), sys_param(dcontext, 3),
sys_param(dcontext, 4));
handle_clone(dcontext, flags);
if ((flags & CLONE_VM) == 0) {
LOG(THREAD, LOG_SYSCALLS, 1, "\tWARNING: CLONE_VM not set!\n");
}
/* save for post_system_call */
dcontext->sys_param0 = (reg_t)flags;
/* i#1010: If we have private fds open (usually logfiles), we should
* clean those up before they get reused by a new thread.
* XXX: Ideally we'd do this in fd_table_add(), but we can't acquire
* thread_initexit_lock there.
*/
cleanup_after_vfork_execve(dcontext);
/* For thread creation clone syscalls a clone_record_t structure
* containing the pc after the app's syscall instr and other data
* (see i#27) is placed at the bottom of the dstack (which is allocated
* by create_clone_record() - it also saves app stack and switches
* to dstack). xref i#149/PR 403015.
* Note: This must be done after sys_param0 is set.
*/
if (is_thread_create_syscall(dcontext)) {
create_clone_record(dcontext,
sys_param_addr(dcontext, SYSCALL_PARAM_CLONE_STACK));
os_clone_pre(dcontext);
os_new_thread_pre();
} else /* This is really a fork. */
os_fork_pre(dcontext);
break;
}
#elif defined(MACOS)
case SYS_bsdthread_create: {
/* XXX i#1403: we need earlier injection to intercept
* bsdthread_register in order to capture workqueue threads.
* For now we settle for intercepting bsd threads at the user thread func.
* We miss a little user-mode code but this is enough to get started.
*/
app_pc func = (app_pc)sys_param(dcontext, 0);
void *func_arg = (void *)sys_param(dcontext, 1);
void *clone_rec;
LOG(THREAD, LOG_SYSCALLS, 1,
"bsdthread_create: thread func " PFX ", arg " PFX "\n", func, func_arg);
handle_clone(dcontext, CLONE_THREAD | CLONE_VM | CLONE_SIGHAND | SIGCHLD);
clone_rec = create_clone_record(dcontext, NULL, func, func_arg);
dcontext->sys_param0 = (reg_t)func;
dcontext->sys_param1 = (reg_t)func_arg;
*sys_param_addr(dcontext, 0) = (reg_t)new_bsdthread_intercept;
*sys_param_addr(dcontext, 1) = (reg_t)clone_rec;
os_new_thread_pre();
break;
}
case SYS_posix_spawn: {
/* FIXME i#1644: monitor this call which can be fork or exec */
ASSERT_NOT_IMPLEMENTED(false);
break;
}
#endif
#ifdef SYS_vfork
case SYS_vfork: {
/* treat as if sys_clone with flags just as sys_vfork does */
/* in /usr/src/linux/arch/i386/kernel/process.c */
uint flags = CLONE_VFORK | CLONE_VM | SIGCHLD;
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: vfork\n");
handle_clone(dcontext, flags);
cleanup_after_vfork_execve(dcontext);
/* save for post_system_call, treated as if SYS_clone */
dcontext->sys_param0 = (reg_t)flags;
/* vfork has the same needs as clone. Pass info via a clone_record_t
* structure to child. See SYS_clone for info about i#149/PR 403015.
*/
IF_LINUX(ASSERT(is_thread_create_syscall(dcontext)));
dcontext->sys_param1 = mc->xsp; /* for restoring in parent */
# ifdef MACOS
create_clone_record(dcontext, (reg_t *)&mc->xsp, NULL, NULL);
# else
create_clone_record(dcontext, (reg_t *)&mc->xsp /*child uses parent sp*/);
# endif
os_clone_pre(dcontext);
os_new_thread_pre();
break;
}
#endif
#ifdef SYS_fork
case SYS_fork: {
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: fork\n");
os_fork_pre(dcontext);
break;
}
#endif
case SYS_execve: {
int ret = handle_execve(dcontext);
if (ret != 0) {
execute_syscall = false;
set_failure_return_val(dcontext, ret);
}
break;
}
/****************************************************************************/
/* SIGNALS */
case IF_MACOS_ELSE(SYS_sigaction, SYS_rt_sigaction): { /* 174 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigaction(int sig, const struct sigaction *act,
struct sigaction *oact, size_t sigsetsize)
*/
int sig = (int)sys_param(dcontext, 0);
const kernel_sigaction_t *act =
(const kernel_sigaction_t *)sys_param(dcontext, 1);
prev_sigaction_t *oact = (prev_sigaction_t *)sys_param(dcontext, 2);
size_t sigsetsize = (size_t)
/* On Mac there is no size arg (but it doesn't use old sigaction, so
* closer to rt_ than non-rt_ below).
*/
IF_MACOS_ELSE(sizeof(kernel_sigset_t), sys_param(dcontext, 3));
uint res;
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: %ssigaction %d " PFX " " PFX " %d\n",
IF_MACOS_ELSE("", "rt_"), sig, act, oact, sigsetsize);
/* post_syscall does some work as well */
dcontext->sys_param0 = (reg_t)sig;
dcontext->sys_param1 = (reg_t)act;
dcontext->sys_param2 = (reg_t)oact;
dcontext->sys_param3 = (reg_t)sigsetsize;
execute_syscall = handle_sigaction(dcontext, sig, act, oact, sigsetsize, &res);
if (!execute_syscall) {
LOG(THREAD, LOG_SYSCALLS, 2, "sigaction emulation => %d\n", -res);
if (res == 0)
set_success_return_val(dcontext, 0);
else
set_failure_return_val(dcontext, res);
}
break;
}
#if defined(LINUX) && !defined(X64)
case SYS_sigaction: { /* 67 */
/* sys_sigaction(int sig, const struct old_sigaction *act,
* struct old_sigaction *oact)
*/
int sig = (int)sys_param(dcontext, 0);
const old_sigaction_t *act = (const old_sigaction_t *)sys_param(dcontext, 1);
old_sigaction_t *oact = (old_sigaction_t *)sys_param(dcontext, 2);
uint res;
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: sigaction %d " PFX " " PFX "\n", sig, act,
oact);
dcontext->sys_param0 = (reg_t)sig;
dcontext->sys_param1 = (reg_t)act;
dcontext->sys_param2 = (reg_t)oact;
execute_syscall = handle_old_sigaction(dcontext, sig, act, oact, &res);
if (!execute_syscall) {
LOG(THREAD, LOG_SYSCALLS, 2, "sigaction emulation => %d\n", -res);
if (res == 0)
set_success_return_val(dcontext, 0);
else
set_failure_return_val(dcontext, res);
}
break;
}
#endif
#if defined(LINUX) && !defined(X64)
case SYS_sigreturn: { /* 119 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int sys_sigreturn(unsigned long __unused)
*/
execute_syscall = handle_sigreturn(dcontext, false);
/* app will not expect syscall to return, so when handle_sigreturn
* returns false it always redirects the context, and thus no
* need to set return val here.
*/
break;
}
#endif
#ifdef LINUX
case SYS_rt_sigreturn: { /* 173 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int sys_rt_sigreturn(unsigned long __unused)
*/
execute_syscall = handle_sigreturn(dcontext, true);
/* see comment for SYS_sigreturn on return val */
break;
}
#endif
#ifdef MACOS
case SYS_sigreturn: {
/* int sigreturn(struct ucontext *uctx, int infostyle) */
execute_syscall = handle_sigreturn(dcontext, (void *)sys_param(dcontext, 0),
(int)sys_param(dcontext, 1));
/* see comment for SYS_sigreturn on return val */
break;
}
#endif
case SYS_sigaltstack: { /* 186 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int
sys_sigaltstack(const stack_t *uss, stack_t *uoss)
*/
const stack_t *uss = (const stack_t *)sys_param(dcontext, 0);
stack_t *uoss = (stack_t *)sys_param(dcontext, 1);
uint res;
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: sigaltstack " PFX " " PFX "\n", uss, uoss);
execute_syscall =
handle_sigaltstack(dcontext, uss, uoss, get_mcontext(dcontext)->xsp, &res);
if (!execute_syscall) {
LOG(THREAD, LOG_SYSCALLS, 2, "sigaltstack emulation => %d\n", -res);
if (res == 0)
set_success_return_val(dcontext, res);
else
set_failure_return_val(dcontext, res);
}
break;
}
case IF_MACOS_ELSE(SYS_sigprocmask, SYS_rt_sigprocmask): { /* 175 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset,
size_t sigsetsize)
*/
/* we also need access to the params in post_system_call */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
dcontext->sys_param3 = sys_param(dcontext, 3);
execute_syscall = handle_sigprocmask(dcontext, (int)sys_param(dcontext, 0),
(kernel_sigset_t *)sys_param(dcontext, 1),
(kernel_sigset_t *)sys_param(dcontext, 2),
(size_t)sys_param(dcontext, 3));
if (!execute_syscall)
set_success_return_val(dcontext, 0);
break;
}
#ifdef MACOS
case SYS_sigsuspend_nocancel:
#endif
case IF_MACOS_ELSE(SYS_sigsuspend, SYS_rt_sigsuspend): { /* 179 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage int
sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize)
*/
handle_sigsuspend(dcontext, (kernel_sigset_t *)sys_param(dcontext, 0),
(size_t)sys_param(dcontext, 1));
break;
}
#ifdef LINUX
# ifdef SYS_signalfd
case SYS_signalfd: /* 282/321 */
# endif
case SYS_signalfd4: { /* 289 */
/* int signalfd (int fd, const sigset_t *mask, size_t sizemask) */
/* int signalfd4(int fd, const sigset_t *mask, size_t sizemask, int flags) */
ptr_int_t new_result;
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
# ifdef SYS_signalfd
if (dcontext->sys_num == SYS_signalfd)
dcontext->sys_param3 = 0;
else
# endif
dcontext->sys_param3 = sys_param(dcontext, 3);
new_result = handle_pre_signalfd(
dcontext, (int)dcontext->sys_param0, (kernel_sigset_t *)dcontext->sys_param1,
(size_t)dcontext->sys_param2, (int)dcontext->sys_param3);
execute_syscall = false;
/* since non-Mac, we can use this even if the call failed */
set_success_return_val(dcontext, new_result);
break;
}
#endif
case SYS_kill: { /* 37 */
/* in /usr/src/linux/kernel/signal.c:
* asmlinkage long sys_kill(int pid, int sig)
*/
pid_t pid = (pid_t)sys_param(dcontext, 0);
uint sig = (uint)sys_param(dcontext, 1);
LOG(GLOBAL, LOG_TOP | LOG_SYSCALLS, 2,
"thread " TIDFMT " sending signal %d to pid " PIDFMT "\n",
d_r_get_thread_id(), sig, pid);
/* We check whether targeting this process or this process group */
if (pid == get_process_id() || pid == 0 || pid == -get_process_group_id()) {
handle_self_signal(dcontext, sig);
}
break;
}
#if defined(SYS_tkill)
case SYS_tkill: { /* 238 */
/* in /usr/src/linux/kernel/signal.c:
* asmlinkage long sys_tkill(int pid, int sig)
*/
pid_t tid = (pid_t)sys_param(dcontext, 0);
uint sig = (uint)sys_param(dcontext, 1);
LOG(GLOBAL, LOG_TOP | LOG_SYSCALLS, 2,
"thread " TIDFMT " sending signal %d to tid %d\n", d_r_get_thread_id(), sig,
tid);
if (tid == d_r_get_thread_id()) {
handle_self_signal(dcontext, sig);
}
break;
}
#endif
#if defined(SYS_tgkill)
case SYS_tgkill: { /* 270 */
/* in /usr/src/linux/kernel/signal.c:
* asmlinkage long sys_tgkill(int tgid, int pid, int sig)
*/
pid_t tgid = (pid_t)sys_param(dcontext, 0);
pid_t tid = (pid_t)sys_param(dcontext, 1);
uint sig = (uint)sys_param(dcontext, 2);
LOG(GLOBAL, LOG_TOP | LOG_SYSCALLS, 2,
"thread " TIDFMT " sending signal %d to tid %d tgid %d\n",
d_r_get_thread_id(), sig, tid, tgid);
/* some kernels support -1 values:
+ tgkill(-1, tid, sig) == tkill(tid, sig)
* tgkill(tgid, -1, sig) == kill(tgid, sig)
* the 2nd was proposed but is not in 2.6.20 so I'm ignoring it, since
* I don't want to kill the thread when the signal is never sent!
* FIXME: the 1st is in my tkill manpage, but not my 2.6.20 kernel sources!
*/
if ((tgid == -1 || tgid == get_process_id()) && tid == d_r_get_thread_id()) {
handle_self_signal(dcontext, sig);
}
break;
}
#endif
case SYS_setitimer: /* 104 */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
handle_pre_setitimer(dcontext, (int)sys_param(dcontext, 0),
(const struct itimerval *)sys_param(dcontext, 1),
(struct itimerval *)sys_param(dcontext, 2));
break;
case SYS_getitimer: /* 105 */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
break;
#if defined(LINUX) && defined(X86)
case SYS_alarm: /* 27 on x86 and 37 on x64 */
dcontext->sys_param0 = sys_param(dcontext, 0);
handle_pre_alarm(dcontext, (unsigned int)dcontext->sys_param0);
break;
#endif
#if 0
# ifndef X64
case SYS_signal: { /* 48 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage unsigned long
sys_signal(int sig, __sighandler_t handler)
*/
break;
}
case SYS_sigsuspend: { /* 72 */
/* in /usr/src/linux/arch/i386/kernel/signal.c:
asmlinkage int
sys_sigsuspend(int history0, int history1, old_sigset_t mask)
*/
break;
}
case SYS_sigprocmask: { /* 126 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
*/
break;
}
# endif
#else
/* until we've implemented them, keep down here to get warning: */
# if defined(LINUX) && !defined(X64)
# ifndef ARM
case SYS_signal:
# endif
case SYS_sigsuspend:
case SYS_sigprocmask:
# endif
#endif
#if defined(LINUX) && !defined(X64)
case SYS_sigpending: /* 73 */
# ifndef ARM
case SYS_sgetmask: /* 68 */
case SYS_ssetmask: /* 69 */
# endif
#endif
#ifdef LINUX
case SYS_rt_sigtimedwait: /* 177 */
case SYS_rt_sigqueueinfo: /* 178 */
#endif
case IF_MACOS_ELSE(SYS_sigpending, SYS_rt_sigpending): { /* 176 */
/* FIXME i#92: handle all of these syscalls! */
LOG(THREAD, LOG_ASYNCH | LOG_SYSCALLS, 1,
"WARNING: unhandled signal system call %d\n", dcontext->sys_num);
SYSLOG_INTERNAL_WARNING_ONCE("unhandled signal system call %d",
dcontext->sys_num);
break;
}
#ifdef LINUX
case SYS_ppoll: {
kernel_sigset_t *sigmask = (kernel_sigset_t *)sys_param(dcontext, 3);
dcontext->sys_param3 = (reg_t)sigmask;
if (sigmask == NULL)
break;
size_t sizemask = (size_t)sys_param(dcontext, 4);
/* The original app's sigmask parameter is now NULL effectively making the syscall
* a non p* version, and the mask's semantics are emulated by DR instead.
*/
set_syscall_param(dcontext, 3, (reg_t)NULL);
bool sig_pending = false;
if (!handle_pre_extended_syscall_sigmasks(dcontext, sigmask, sizemask,
&sig_pending)) {
/* In old kernels with sizeof(kernel_sigset_t) != sizemask, we're forcing
* failure. We're already violating app transparency in other places in DR.
*/
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
if (sig_pending) {
/* If there had been pending signals, we revert re-writing the app's
* parameter, but we leave the modified signal mask.
*/
set_syscall_param(dcontext, 3, dcontext->sys_param3);
set_failure_return_val(dcontext, EINTR);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
break;
}
case SYS_pselect6: {
typedef struct {
kernel_sigset_t *sigmask;
size_t sizemask;
} data_t;
dcontext->sys_param3 = sys_param(dcontext, 5);
data_t *data_param = (data_t *)dcontext->sys_param3;
data_t data;
/* Refer to comments in SYS_ppoll above. Taking extra steps here due to struct
* argument in pselect6.
*/
if (!d_r_safe_read(data_param, sizeof(data), &data)) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EFAULT to app for pselect6\n");
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
break;
}
dcontext->sys_param4 = (reg_t)data.sigmask;
if (data.sigmask == NULL)
break;
kernel_sigset_t *nullsigmaskptr = NULL;
if (!safe_write_ex((void *)&data_param->sigmask, sizeof(data_param->sigmask),
&nullsigmaskptr, NULL)) {
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
break;
}
bool sig_pending = false;
if (!handle_pre_extended_syscall_sigmasks(dcontext, data.sigmask, data.sizemask,
&sig_pending)) {
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
if (sig_pending) {
if (!safe_write_ex((void *)&data_param->sigmask, sizeof(data_param->sigmask),
&dcontext->sys_param4, NULL)) {
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
break;
}
set_failure_return_val(dcontext, EINTR);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
break;
}
case SYS_epoll_pwait: {
kernel_sigset_t *sigmask = (kernel_sigset_t *)sys_param(dcontext, 4);
dcontext->sys_param4 = (reg_t)sigmask;
if (sigmask == NULL)
break;
size_t sizemask = (size_t)sys_param(dcontext, 5);
/* Refer to comments in SYS_ppoll above. */
set_syscall_param(dcontext, 4, (reg_t)NULL);
bool sig_pending = false;
if (!handle_pre_extended_syscall_sigmasks(dcontext, sigmask, sizemask,
&sig_pending)) {
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
if (sig_pending) {
set_syscall_param(dcontext, 4, dcontext->sys_param4);
set_failure_return_val(dcontext, EINTR);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
}
break;
}
#endif
/****************************************************************************/
/* FILES */
/* prevent app from closing our files or opening a new file in our fd space.
* it's not worth monitoring all syscalls that take in fds from affecting ours.
*/
#ifdef MACOS
case SYS_close_nocancel:
#endif
case SYS_close: {
execute_syscall = handle_close_pre(dcontext);
#ifdef LINUX
if (execute_syscall)
signal_handle_close(dcontext, (file_t)sys_param(dcontext, 0));
#endif
break;
}
#ifdef SYS_dup2
case SYS_dup2:
IF_LINUX(case SYS_dup3:)
{
file_t newfd = (file_t)sys_param(dcontext, 1);
if (fd_is_dr_owned(newfd) || fd_is_in_private_range(newfd)) {
SYSLOG_INTERNAL_WARNING_ONCE("app trying to dup-close DR file(s)");
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app trying to dup2/dup3 to %d. Disallowing.\n", newfd);
if (DYNAMO_OPTION(fail_on_stolen_fds)) {
set_failure_return_val(dcontext, EBADF);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else
set_success_return_val(dcontext, 0);
execute_syscall = false;
}
break;
}
#endif
#ifdef MACOS
case SYS_fcntl_nocancel:
#endif
case SYS_fcntl: {
int cmd = (int)sys_param(dcontext, 1);
long arg = (long)sys_param(dcontext, 2);
/* we only check for asking for min in private space: not min below
* but actual will be above (see notes in os_file_init())
*/
if ((cmd == F_DUPFD || cmd == F_DUPFD_CLOEXEC) && fd_is_in_private_range(arg)) {
SYSLOG_INTERNAL_WARNING_ONCE("app trying to open private fd(s)");
LOG(THREAD, LOG_TOP | LOG_SYSCALLS, 1,
"WARNING: app trying to dup to >= %d. Disallowing.\n", arg);
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
execute_syscall = false;
} else {
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = cmd;
}
break;
}
#if defined(X64) || !defined(ARM) || defined(MACOS)
case SYS_getrlimit:
#endif
#if defined(LINUX) && !defined(X64)
case SYS_ugetrlimit:
#endif
/* save for post */
dcontext->sys_param0 = sys_param(dcontext, 0); /* resource */
dcontext->sys_param1 = sys_param(dcontext, 1); /* rlimit */
break;
case SYS_setrlimit: {
int resource = (int)sys_param(dcontext, 0);
if (resource == RLIMIT_NOFILE && DYNAMO_OPTION(steal_fds) > 0) {
#if !defined(ARM) && !defined(X64) && !defined(MACOS)
struct compat_rlimit rlim;
#else
struct rlimit rlim;
#endif
if (!d_r_safe_read((void *)sys_param(dcontext, 1), sizeof(rlim), &rlim)) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EFAULT to app for prlimit64\n");
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else if (rlim.rlim_cur > rlim.rlim_max) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EINVAL for prlimit64\n");
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else if (rlim.rlim_max <= min_dr_fd &&
/* Can't raise hard unless have CAP_SYS_RESOURCE capability.
* XXX i#2980: should query for that capability.
*/
rlim.rlim_max <= app_rlimit_nofile.rlim_max) {
/* if the new rlimit is lower, pretend succeed */
app_rlimit_nofile.rlim_cur = rlim.rlim_cur;
app_rlimit_nofile.rlim_max = rlim.rlim_max;
set_success_return_val(dcontext, 0);
} else {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EPERM to app for setrlimit\n");
/* don't let app raise limits as that would mess up our fd space */
set_failure_return_val(dcontext, EPERM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
}
execute_syscall = false;
}
break;
}
#ifdef LINUX
case SYS_prlimit64:
/* save for post */
dcontext->sys_param0 = sys_param(dcontext, 0); /* pid */
dcontext->sys_param1 = sys_param(dcontext, 1); /* resource */
dcontext->sys_param2 = sys_param(dcontext, 2); /* new rlimit */
dcontext->sys_param3 = sys_param(dcontext, 3); /* old rlimit */
if (/* XXX: how do we handle the case of setting rlimit.nofile on another
* process that is running with DynamoRIO?
*/
/* XXX: CLONE_FILES allows different processes to share the same file
* descriptor table, and different threads of the same process have
* separate file descriptor tables. POSIX specifies that rlimits are
* per-process, not per-thread, and Linux follows suit, so the threads
* with different descriptors will not matter, and the pids sharing
* descriptors turns into the hard-to-solve IPC problem.
*/
(dcontext->sys_param0 == 0 || dcontext->sys_param0 == get_process_id()) &&
dcontext->sys_param1 == RLIMIT_NOFILE &&
dcontext->sys_param2 != (reg_t)NULL && DYNAMO_OPTION(steal_fds) > 0) {
rlimit64_t rlim;
if (!d_r_safe_read((void *)(dcontext->sys_param2), sizeof(rlim), &rlim)) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EFAULT to app for prlimit64\n");
set_failure_return_val(dcontext, EFAULT);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else {
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: prlimit64 soft=" INT64_FORMAT_STRING
" hard=" INT64_FORMAT_STRING " vs DR %d\n",
rlim.rlim_cur, rlim.rlim_max, min_dr_fd);
if (rlim.rlim_cur > rlim.rlim_max) {
LOG(THREAD, LOG_SYSCALLS, 2, "\treturning EINVAL for prlimit64\n");
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
} else if (rlim.rlim_max <= min_dr_fd &&
/* Can't raise hard unless have CAP_SYS_RESOURCE capability.
* XXX i#2980: should query for that capability.
*/
rlim.rlim_max <= app_rlimit_nofile.rlim_max) {
/* if the new rlimit is lower, pretend succeed */
app_rlimit_nofile.rlim_cur = rlim.rlim_cur;
app_rlimit_nofile.rlim_max = rlim.rlim_max;
set_success_return_val(dcontext, 0);
/* set old rlimit if necessary */
if (dcontext->sys_param3 != (reg_t)NULL) {
safe_write_ex((void *)(dcontext->sys_param3), sizeof(rlim),
&app_rlimit_nofile, NULL);
}
} else {
/* don't let app raise limits as that would mess up our fd space */
LOG(THREAD, LOG_SYSCALLS, 2,
"\treturning EPERM to app for prlimit64\n");
set_failure_return_val(dcontext, EPERM);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
}
}
execute_syscall = false;
}
break;
#endif
#ifdef LINUX
# ifdef SYS_readlink
case SYS_readlink:
# endif
case SYS_readlinkat:
if (DYNAMO_OPTION(early_inject)) {
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
if (dcontext->sys_num == SYS_readlinkat)
dcontext->sys_param3 = sys_param(dcontext, 3);
}
break;
/* i#107 syscalls that might change/query app's segment */
# if defined(X86) && defined(X64)
case SYS_arch_prctl: {
/* we handle arch_prctl in post_syscall */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
break;
}
# endif
# ifdef X86
case SYS_set_thread_area: {
our_modify_ldt_t desc;
if (INTERNAL_OPTION(mangle_app_seg) &&
d_r_safe_read((void *)sys_param(dcontext, 0), sizeof(desc), &desc)) {
if (os_set_app_thread_area(dcontext, &desc) &&
safe_write_ex((void *)sys_param(dcontext, 0), sizeof(desc), &desc,
NULL)) {
/* check if the range is unlimited */
ASSERT_CURIOSITY(desc.limit == 0xfffff);
execute_syscall = false;
set_success_return_val(dcontext, 0);
}
}
break;
}
case SYS_get_thread_area: {
our_modify_ldt_t desc;
if (INTERNAL_OPTION(mangle_app_seg) &&
d_r_safe_read((const void *)sys_param(dcontext, 0), sizeof(desc), &desc)) {
if (os_get_app_thread_area(dcontext, &desc) &&
safe_write_ex((void *)sys_param(dcontext, 0), sizeof(desc), &desc,
NULL)) {
execute_syscall = false;
set_success_return_val(dcontext, 0);
}
}
break;
}
# endif /* X86 */
# ifdef ARM
case SYS_set_tls: {
LOG(THREAD, LOG_VMAREAS | LOG_SYSCALLS, 2, "syscall: set_tls " PFX "\n",
sys_param(dcontext, 0));
if (os_set_app_tls_base(dcontext, TLS_REG_LIB, (void *)sys_param(dcontext, 0))) {
execute_syscall = false;
set_success_return_val(dcontext, 0);
} else {
ASSERT_NOT_REACHED();
}
break;
}
case SYS_cacheflush: {
/* We assume we don't want to change the executable_areas list or change
* the selfmod status of this region: else we should call something
* that invokes handle_modified_code() in a way that handles a bigger
* region than a single write.
*/
app_pc start = (app_pc)sys_param(dcontext, 0);
app_pc end = (app_pc)sys_param(dcontext, 1);
LOG(THREAD, LOG_VMAREAS | LOG_SYSCALLS, 2,
"syscall: cacheflush " PFX "-" PFX "\n", start, end);
flush_fragments_from_region(dcontext, start, end - start,
/* An unlink flush should be fine: the app must
* use synch to ensure other threads see the
* new code.
*/
false /*don't force synchall*/);
break;
}
# endif /* ARM */
#elif defined(MACOS)
/* FIXME i#58: handle i386_{get,set}_ldt and thread_fast_set_cthread_self64 */
#endif
#ifdef DEBUG
# ifdef MACOS
case SYS_open_nocancel:
# endif
# ifdef SYS_open
case SYS_open: {
dcontext->sys_param0 = sys_param(dcontext, 0);
break;
}
# endif
#endif
default: {
#ifdef LINUX
execute_syscall = handle_restartable_region_syscall_pre(dcontext);
#endif
#ifdef VMX86_SERVER
if (is_vmkuw_sysnum(dcontext->sys_num)) {
execute_syscall = vmkuw_pre_system_call(dcontext);
break;
}
#endif
break;
}
} /* end switch */
dcontext->whereami = old_whereami;
return execute_syscall;
}
void
all_memory_areas_lock(void)
{
IF_NO_MEMQUERY(memcache_lock());
}
void
all_memory_areas_unlock(void)
{
IF_NO_MEMQUERY(memcache_unlock());
}
void
update_all_memory_areas(app_pc start, app_pc end, uint prot, int type)
{
IF_NO_MEMQUERY(memcache_update(start, end, prot, type));
}
bool
remove_from_all_memory_areas(app_pc start, app_pc end)
{
IF_NO_MEMQUERY(return memcache_remove(start, end));
return true;
}
/* We consider a module load to happen at the first mmap, so we check on later
* overmaps to ensure things look consistent. */
static bool
mmap_check_for_module_overlap(app_pc base, size_t size, bool readable, uint64 inode,
bool at_map)
{
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup(base);
if (ma != NULL) {
/* FIXME - how can we distinguish between the loader mapping the segments
* over the initial map from someone just mapping over part of a module? If
* is the latter case need to adjust the view size or remove from module list. */
LOG(GLOBAL, LOG_VMAREAS, 2,
"%s mmap overlapping module area : \n"
"\tmap : base=" PFX " base+size=" PFX " inode=" UINT64_FORMAT_STRING "\n"
"\tmod : start=" PFX " end=" PFX " inode=" UINT64_FORMAT_STRING "\n",
at_map ? "new" : "existing", base, base + size, inode, ma->start, ma->end,
ma->names.inode);
ASSERT_CURIOSITY(base >= ma->start);
if (at_map) {
ASSERT_CURIOSITY(base + size <= ma->end);
} else {
/* FIXME - I'm having problems with this check for existing maps. I
* haven't been able to get gdb to break in early enough to really get a good
* look at the early loader behavior. Two issues: One case is with our .so
* for which the anonymous .bss mapping is one page larger than expected
* (which might be some loader bug in the size calculation? or something? if
* so should see it trigger the at_map curiosity on some dll and can address
* then) and the other is that for a few executables the .bss mapping is much
* larger (~0x20000 larger) then expected when running under DR (but not
* running natively where it is instead the expected size). Both could just
* be the loader merging adjacent identically protected regions though I
* can't explain the discrepancy between DR and native given that our vmmheap
* is elsewhere in the address space (so who and how allocated that adjacent
* memory). I've yet to see any issue with dynamically loaded modules so
* it's probably the loader merging regions. Still worth investigating. */
ASSERT_CURIOSITY(inode == 0 /*see above comment*/ ||
module_contains_addr(ma, base + size - 1));
}
/* Handle cases like transparent huge pages where there are anon regions on top
* of the file mapping (i#2566).
*/
if (ma->names.inode == 0)
ma->names.inode = inode;
ASSERT_CURIOSITY(ma->names.inode == inode || inode == 0 /* for .bss */);
DOCHECK(1, {
if (readable && module_is_header(base, size)) {
/* Case 8879: For really small modules, to save disk space, the same
* disk page could hold both RO and .data, occupying just 1 page of
* disk space, e.g. /usr/lib/httpd/modules/mod_auth_anon.so. When
* such a module is mapped in, the os maps the same disk page twice,
* one readonly and one copy-on-write (see pg. 96, Sec 4.4 from
* Linkers and Loaders by John R. Levine). This makes the data
* section also satisfy the elf_header check above. So, if the new
* mmap overlaps an elf_area and it is also a header, then make sure
* the previous page (correcting for alignment) is also a elf_header.
* Note, if it is a header of a different module, then we'll not have
* an overlap, so we will not hit this case.
*/
ASSERT_CURIOSITY(
ma->start + ma->os_data.alignment ==
base
/* On Mac we walk the dyld module list before the
* address space, so we often hit modules we already
* know about. */
IF_MACOS(|| !dynamo_initialized && ma->start == base));
}
});
}
os_get_module_info_unlock();
#ifdef ANDROID
/* i#1860: we need to keep looking for the segment with .dynamic as Android's
* loader does not map the whole file up front.
*/
if (ma != NULL && at_map && readable)
os_module_update_dynamic_info(base, size, at_map);
#endif
return ma != NULL;
}
static void
os_add_new_app_module(dcontext_t *dcontext, bool at_map, app_pc base, size_t size,
uint memprot)
{
memquery_iter_t iter;
bool found_map = false;
uint64 inode = 0;
const char *filename = "";
size_t mod_size = size;
if (!at_map) {
/* the size is the first seg size, get the whole module size instead */
app_pc first_seg_base = NULL;
app_pc first_seg_end = NULL;
app_pc last_seg_end = NULL;
if (module_walk_program_headers(base, size, at_map, false, &first_seg_base,
&first_seg_end, &last_seg_end, NULL, NULL)) {
ASSERT_CURIOSITY(size ==
(ALIGN_FORWARD(first_seg_end, PAGE_SIZE) -
(ptr_uint_t)first_seg_base) ||
base == vdso_page_start || base == vsyscall_page_start);
mod_size =
ALIGN_FORWARD(last_seg_end, PAGE_SIZE) - (ptr_uint_t)first_seg_base;
}
}
LOG(THREAD, LOG_SYSCALLS | LOG_VMAREAS, 2, "dlopen " PFX "-" PFX "%s\n", base,
base + mod_size, TEST(MEMPROT_EXEC, memprot) ? " +x" : "");
/* Mapping in a new module. From what we've observed of the loader's
* behavior, it first maps the file in with size equal to the final
* memory image size (I'm not sure how it gets that size without reading
* in the elf header and then walking through all the program headers to
* get the largest virtual offset). This is necessary to reserve all the
* space that will be needed. It then walks through the program headers
* mapping over the the previously mapped space with the appropriate
* permissions and offsets. Note that the .bss portion is mapped over
* as anonymous. It may also, depending on the program headers, make some
* areas read-only after fixing up their relocations etc. NOTE - at
* no point are the section headers guaranteed to be mapped in so we can't
* reliably walk sections (only segments) without looking to disk.
*/
/* FIXME - when should we add the module to our list? At the first map
* seems to be the best choice as we know the bounds and it's difficult to
* tell when the loader is finished. The downside is that at the initial map
* the memory layout isn't finalized (memory beyond the first segment will
* be shifted for page alignment reasons), so we have to be careful and
* make adjustments to read anything beyond the first segment until the
* loader finishes. This goes for the client too as it gets notified when we
* add to the list. FIXME we could try to track the expected segment overmaps
* and only notify the client after the last one (though that's still before
* linking and relocation, but that's true on Windows too). */
/* Get filename & inode for the list. */
memquery_iterator_start(&iter, base, true /* plan to alloc a module_area_t */);
while (memquery_iterator_next(&iter)) {
if (iter.vm_start == base) {
ASSERT_CURIOSITY(iter.inode != 0 || base == vdso_page_start ||
base == vsyscall_page_start);
ASSERT_CURIOSITY(iter.offset == 0); /* first map shouldn't have offset */
/* XREF 307599 on rounding module end to the next PAGE boundary */
ASSERT_CURIOSITY(
(iter.vm_end - iter.vm_start == ALIGN_FORWARD(size, PAGE_SIZE)));
inode = iter.inode;
filename = dr_strdup(iter.comment HEAPACCT(ACCT_OTHER));
found_map = true;
break;
}
}
memquery_iterator_stop(&iter);
#ifdef HAVE_MEMINFO
/* barring weird races we should find this map except */
ASSERT_CURIOSITY(found_map);
#else /* HAVE_MEMINFO */
/* Without /proc/maps or other memory querying interface available at
* library map time, there is no way to find out the name of the file
* that was mapped, thus its inode isn't available either.
*
* Just module_list_add with no filename will still result in
* library name being extracted from the .dynamic section and added
* to the module list. However, this name may not always exist, thus
* we might have a library with no file name available at all!
*
* Note: visor implements vsi mem maps that give file info, but, no
* path, should be ok. xref PR 401580.
*
* Once PR 235433 is implemented in visor then fix memquery_iterator*() to
* use vsi to find out page protection info, file name & inode.
*/
#endif /* HAVE_MEMINFO */
/* XREF 307599 on rounding module end to the next PAGE boundary */
if (found_map) {
module_list_add(base, ALIGN_FORWARD(mod_size, PAGE_SIZE), at_map, filename,
inode);
dr_strfree(filename HEAPACCT(ACCT_OTHER));
}
}
void
os_check_new_app_module(dcontext_t *dcontext, app_pc pc)
{
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup(pc);
/* ma might be NULL due to dynamic generated code or custom loaded modules */
if (ma == NULL) {
dr_mem_info_t info;
/* i#1760: an app module loaded by custom loader (e.g., bionic libc)
* might not be detected by DynamoRIO in process_mmap.
*/
if (query_memory_ex_from_os(pc, &info) && info.type == DR_MEMTYPE_IMAGE) {
/* add the missing module */
os_get_module_info_unlock();
os_add_new_app_module(get_thread_private_dcontext(), false /*!at_map*/,
info.base_pc, info.size, info.prot);
os_get_module_info_lock();
}
}
os_get_module_info_unlock();
}
/* All processing for mmap and mmap2. */
static void
process_mmap(dcontext_t *dcontext, app_pc base, size_t size, uint prot,
uint flags _IF_DEBUG(const char *map_type))
{
bool image = false;
uint memprot = osprot_to_memprot(prot);
#ifdef ANDROID
/* i#1861: avoid merging file-backed w/ anon regions */
if (!TEST(MAP_ANONYMOUS, flags))
memprot |= MEMPROT_HAS_COMMENT;
#endif
LOG(THREAD, LOG_SYSCALLS, 4, "process_mmap(" PFX "," PFX ",0x%x,%s,%s)\n", base, size,
flags, memprot_string(memprot), map_type);
/* Notes on how ELF SOs are mapped in.
*
* o The initial mmap for an ELF file specifies enough space for
* all segments (and their constituent sections) in the file.
* The protection bits for that section are used for the entire
* region, and subsequent mmaps for subsequent segments within
* the region modify their portion's protection bits as needed.
* So if the prot bits for the first segment are +x, the entire
* region is +x. ** Note that our primary concern is adjusting
* exec areas to reflect the prot bits of subsequent
* segments. ** The region is added to the all-memory areas
* and also to exec areas (as determined by app_memory_allocation()).
*
* o Any subsequent segment sub-mappings specify their own protection
* bits and therefore are added to the exec areas via normal
* processing. They are also "naturally" added to the all-mems list.
* We do a little extra processing when mapping into a previously
* mapped region and the prot bits mismatch; if the new mapping is
* not +x, flushing needs to occur.
*/
/* process_mmap can be called with PROT_NONE, so we need to check if we
* can read the memory to see if it is a elf_header
*/
/* XXX: get inode for check */
if (TEST(MAP_ANONYMOUS, flags)) {
/* not an ELF mmap */
LOG(THREAD, LOG_SYSCALLS, 4, "mmap " PFX ": anon\n", base);
} else if (mmap_check_for_module_overlap(base, size, TEST(MEMPROT_READ, memprot), 0,
true)) {
/* FIXME - how can we distinguish between the loader mapping the segments
* over the initial map from someone just mapping over part of a module? If
* is the latter case need to adjust the view size or remove from module list. */
image = true;
DODEBUG({ map_type = "ELF SO"; });
LOG(THREAD, LOG_SYSCALLS, 4, "mmap " PFX ": overlaps image\n", base);
} else if (TEST(MEMPROT_READ, memprot) &&
/* i#727: We can still get SIGBUS on mmap'ed files that can't be
* read, so pass size=0 to use a safe_read.
*/
module_is_header(base, 0)) {
#ifdef ANDROID
/* The Android loader's initial all-segment-covering mmap is anonymous */
dr_mem_info_t info;
if (query_memory_ex_from_os((byte *)ALIGN_FORWARD(base + size, PAGE_SIZE),
&info) &&
info.prot == MEMPROT_NONE && info.type == DR_MEMTYPE_DATA) {
LOG(THREAD, LOG_SYSCALLS, 4, "mmap " PFX ": Android elf\n", base);
image = true;
DODEBUG({ map_type = "ELF SO"; });
os_add_new_app_module(dcontext, true /*at_map*/, base,
/* pass segment size, not whole module size */
size, memprot);
} else
#endif
if (module_is_partial_map(base, size, memprot)) {
/* i#1240: App might read first page of ELF header using mmap, which
* might accidentally be treated as a module load. Heuristically
* distinguish this by saying that if this is the first mmap for an ELF
* (i.e., it doesn't overlap with a previous map), and if it's small,
* then don't treat it as a module load.
*/
LOG(THREAD, LOG_SYSCALLS, 4, "mmap " PFX ": partial\n", base);
} else {
LOG(THREAD, LOG_SYSCALLS, 4, "mmap " PFX ": elf header\n", base);
image = true;
DODEBUG({ map_type = "ELF SO"; });
os_add_new_app_module(dcontext, true /*at_map*/, base, size, memprot);
}
}
IF_NO_MEMQUERY(memcache_handle_mmap(dcontext, base, size, memprot, image));
/* app_memory_allocation() expects to not see an overlap -- exec areas
* doesn't expect one. We have yet to see a +x mmap into a previously
* mapped +x region, but we do check and handle in pre-syscall (i#1175).
*/
LOG(THREAD, LOG_SYSCALLS, 4, "\t try app_mem_alloc\n");
if (app_memory_allocation(dcontext, base, size, memprot, image _IF_DEBUG(map_type)))
STATS_INC(num_app_code_modules);
LOG(THREAD, LOG_SYSCALLS, 4, "\t app_mem_alloc -- DONE\n");
}
#ifdef LINUX
/* Call right after the system call.
* i#173: old_prot and old_type should be from before the system call
*/
static bool
handle_app_mremap(dcontext_t *dcontext, byte *base, size_t size, byte *old_base,
size_t old_size, uint old_prot, uint old_type)
{
if (!mmap_syscall_succeeded(base))
return false;
if (base != old_base || size < old_size) { /* take action only if
* there was a change */
DEBUG_DECLARE(bool ok;)
/* fragments were shifted...don't try to fix them, just flush */
app_memory_deallocation(dcontext, (app_pc)old_base, old_size,
false /* don't own thread_initexit_lock */,
false /* not image, FIXME: somewhat arbitrary */);
DOCHECK(1, {
/* we don't expect to see remappings of modules */
os_get_module_info_lock();
ASSERT_CURIOSITY(!module_overlaps(base, size));
os_get_module_info_unlock();
});
/* Verify that the current prot on the new region (according to
* the os) is the same as what the prot used to be for the old
* region.
*/
DOCHECK(1, {
uint memprot;
ok = get_memory_info_from_os(base, NULL, NULL, &memprot);
/* allow maps to have +x,
* +x may be caused by READ_IMPLIES_EXEC set in personality flag (i#262)
*/
ASSERT(ok &&
(memprot == old_prot || (memprot & (~MEMPROT_EXEC)) == old_prot));
});
app_memory_allocation(dcontext, base, size, old_prot,
old_type == DR_MEMTYPE_IMAGE _IF_DEBUG("mremap"));
IF_NO_MEMQUERY(memcache_handle_mremap(dcontext, base, size, old_base, old_size,
old_prot, old_type));
}
return true;
}
static void
handle_app_brk(dcontext_t *dcontext, byte *lowest_brk /*if known*/, byte *old_brk,
byte *new_brk)
{
/* i#851: the brk might not be page aligned */
old_brk = (app_pc)ALIGN_FORWARD(old_brk, PAGE_SIZE);
new_brk = (app_pc)ALIGN_FORWARD(new_brk, PAGE_SIZE);
if (new_brk < old_brk) {
/* Usually the heap is writable, so we don't really need to call
* this here: but seems safest to do so, esp if someone made part of
* the heap read-only and then put code there.
*/
app_memory_deallocation(dcontext, new_brk, old_brk - new_brk,
false /* don't own thread_initexit_lock */,
false /* not image */);
} else if (new_brk > old_brk) {
/* No need to call app_memory_allocation() as doesn't interact
* w/ security policies.
*/
}
IF_NO_MEMQUERY(memcache_handle_app_brk(lowest_brk, old_brk, new_brk));
}
#endif
/* This routine is *not* called is pre_system_call() returns false to skip
* the syscall.
*/
/* XXX: split out specific handlers into separate routines
*/
void
post_system_call(dcontext_t *dcontext)
{
priv_mcontext_t *mc = get_mcontext(dcontext);
/* registers have been clobbered, so sysnum is kept in dcontext */
int sysnum = dcontext->sys_num;
/* We expect most syscall failures to return < 0, so >= 0 is success.
* Some syscall return addresses that have the sign bit set and so
* appear to be failures but are not. They are handled on a
* case-by-case basis in the switch statement below.
*/
ptr_int_t result = (ptr_int_t)MCXT_SYSCALL_RES(mc); /* signed */
bool success = syscall_successful(mc, sysnum);
app_pc base;
size_t size;
uint prot;
dr_where_am_i_t old_whereami;
DEBUG_DECLARE(bool ok;)
RSTATS_INC(post_syscall);
old_whereami = dcontext->whereami;
dcontext->whereami = DR_WHERE_SYSCALL_HANDLER;
#if defined(LINUX) && defined(X86)
/* PR 313715: restore xbp since for some vsyscall sequences that use
* the syscall instruction its value is needed:
* 0xffffe400 <__kernel_vsyscall+0>: push %ebp
* 0xffffe401 <__kernel_vsyscall+1>: mov %ecx,%ebp
* 0xffffe403 <__kernel_vsyscall+3>: syscall
* 0xffffe405 <__kernel_vsyscall+5>: mov $0x2b,%ecx
* 0xffffe40a <__kernel_vsyscall+10>: movl %ecx,%ss
* 0xffffe40c <__kernel_vsyscall+12>: mov %ebp,%ecx
* 0xffffe40e <__kernel_vsyscall+14>: pop %ebp
* 0xffffe40f <__kernel_vsyscall+15>: ret
*/
if (should_syscall_method_be_sysenter() && !dcontext->sys_was_int) {
mc->xbp = dcontext->sys_xbp;
}
#endif
/* handle fork, try to do it early before too much logging occurs */
if (false
#ifdef SYS_fork
|| sysnum ==
SYS_fork
#endif
IF_LINUX(
|| (sysnum == SYS_clone && !TEST(CLONE_VM, dcontext->sys_param0)))) {
if (result == 0) {
/* we're the child */
thread_id_t child = get_sys_thread_id();
#ifdef DEBUG
thread_id_t parent = get_parent_id();
SYSLOG_INTERNAL_INFO("-- parent %d forked child %d --", parent, child);
#endif
/* first, fix TLS of dcontext */
ASSERT(parent != 0);
/* change parent pid to our pid */
replace_thread_id(dcontext->owning_thread, child);
dcontext->owning_thread = child;
dcontext->owning_process = get_process_id();
/* now let dynamo initialize new shared memory, logfiles, etc.
* need access to static vars in dynamo.c, that's why we don't do it. */
/* FIXME - xref PR 246902 - d_r_dispatch runs a lot of code before
* getting to post_system_call() is any of that going to be messed up
* by waiting till here to fixup the child logfolder/file and tid?
*/
dynamorio_fork_init(dcontext);
LOG(THREAD, LOG_SYSCALLS, 1,
"after fork-like syscall: parent is %d, child is %d\n", parent, child);
} else {
/* we're the parent */
os_fork_post(dcontext, true /*parent*/);
}
}
LOG(THREAD, LOG_SYSCALLS, 2, "post syscall: sysnum=" PFX ", result=" PFX " (%d)\n",
sysnum, MCXT_SYSCALL_RES(mc), (int)MCXT_SYSCALL_RES(mc));
switch (sysnum) {
/****************************************************************************/
/* MEMORY REGIONS */
#ifdef DEBUG
# ifdef MACOS
case SYS_open_nocancel:
# endif
# ifdef SYS_open
case SYS_open: {
if (success) {
/* useful for figuring out what module was loaded that then triggers
* module.c elf curiosities
*/
LOG(THREAD, LOG_SYSCALLS, 2, "SYS_open %s => %d\n", dcontext->sys_param0,
(int)result);
}
break;
}
# endif
#endif
#if defined(LINUX) && !defined(X64) && !defined(ARM)
case SYS_mmap:
#endif
case IF_MACOS_ELSE(SYS_mmap, IF_X64_ELSE(SYS_mmap, SYS_mmap2)): {
uint flags;
DEBUG_DECLARE(const char *map_type;)
RSTATS_INC(num_app_mmaps);
base = (app_pc)MCXT_SYSCALL_RES(mc); /* For mmap, it's NOT arg->addr! */
/* mmap isn't simply a user-space wrapper for mmap2. It's called
* directly when dynamically loading an SO, i.e., dlopen(). */
#ifdef LINUX /* MacOS success is in CF */
success = mmap_syscall_succeeded((app_pc)result);
/* The syscall either failed OR the retcode is less than the
* largest uint value of any errno and the addr returned is
* page-aligned.
*/
ASSERT_CURIOSITY(
!success ||
((app_pc)result < (app_pc)(ptr_int_t)-0x1000 && ALIGNED(base, PAGE_SIZE)));
#else
ASSERT_CURIOSITY(!success || ALIGNED(base, PAGE_SIZE));
#endif
if (!success)
goto exit_post_system_call;
#if defined(LINUX) && !defined(X64) && !defined(ARM)
if (sysnum == SYS_mmap) {
/* The syscall succeeded so the read of 'arg' should be
* safe. */
mmap_arg_struct_t *arg = (mmap_arg_struct_t *)dcontext->sys_param0;
size = (size_t)arg->len;
prot = (uint)arg->prot;
flags = (uint)arg->flags;
DEBUG_DECLARE(map_type = "mmap";)
} else {
#endif
size = (size_t)dcontext->sys_param1;
prot = (uint)dcontext->sys_param2;
flags = (uint)dcontext->sys_param3;
DEBUG_DECLARE(map_type = IF_X64_ELSE("mmap2", "mmap");)
#if defined(LINUX) && !defined(X64) && !defined(ARM)
}
#endif
process_mmap(dcontext, base, size, prot, flags _IF_DEBUG(map_type));
break;
}
case SYS_munmap: {
app_pc addr = (app_pc)dcontext->sys_param0;
size_t len = (size_t)dcontext->sys_param1;
/* We assumed in pre_system_call() that the unmap would succeed
* and flushed fragments and removed the region from exec areas.
* If the unmap failed, we re-add the region to exec areas.
*
* The same logic can be used on Windows (but isn't yet).
*/
/* FIXME There are shortcomings to the approach. If another thread
* executes in the region after our pre_system_call processing
* but before the re-add below, it will get a security violation.
* That's less than ideal but at least isn't a security hole.
* The overall shortcoming is that we lose the state from our
* stateful security policies -- future exec list, tables used
* for RCT (.C/.E/.F) -- which can't be easily restored. Also,
* the re-add could add a region that wasn't on the exec list
* previously.
*
* See case 7559 for a better approach.
*/
if (!success) {
dr_mem_info_t info;
/* must go to os to get real memory since we already removed */
DEBUG_DECLARE(ok =)
query_memory_ex_from_os(addr, &info);
ASSERT(ok);
app_memory_allocation(dcontext, addr, len, info.prot,
info.type ==
DR_MEMTYPE_IMAGE _IF_DEBUG("failed munmap"));
IF_NO_MEMQUERY(
memcache_update_locked((app_pc)ALIGN_BACKWARD(addr, PAGE_SIZE),
(app_pc)ALIGN_FORWARD(addr + len, PAGE_SIZE),
info.prot, info.type, false /*add back*/));
}
break;
}
#ifdef LINUX
case SYS_mremap: {
app_pc old_base = (app_pc)dcontext->sys_param0;
size_t old_size = (size_t)dcontext->sys_param1;
base = (app_pc)MCXT_SYSCALL_RES(mc);
size = (size_t)dcontext->sys_param2;
/* even if no shift, count as munmap plus mmap */
RSTATS_INC(num_app_munmaps);
RSTATS_INC(num_app_mmaps);
success =
handle_app_mremap(dcontext, base, size, old_base, old_size,
/* i#173: use memory prot and type
* obtained from pre_system_call
*/
(uint)dcontext->sys_param3, (uint)dcontext->sys_param4);
/* The syscall either failed OR the retcode is less than the
* largest uint value of any errno and the addr returned is
* is page-aligned.
*/
ASSERT_CURIOSITY(
!success ||
((app_pc)result < (app_pc)(ptr_int_t)-0x1000 && ALIGNED(base, PAGE_SIZE)));
if (!success)
goto exit_post_system_call;
break;
}
#endif
case SYS_mprotect: {
base = (app_pc)dcontext->sys_param0;
size = dcontext->sys_param1;
prot = dcontext->sys_param2;
#ifdef VMX86_SERVER
/* PR 475111: workaround for PR 107872 */
if (os_in_vmkernel_userworld() && result == -EBUSY && prot == PROT_NONE) {
result = mprotect_syscall(base, size, PROT_READ);
/* since non-Mac, we can use this even if the call failed */
set_success_return_val(dcontext, result);
success = (result >= 0);
LOG(THREAD, LOG_VMAREAS, 1,
"re-doing mprotect -EBUSY for " PFX "-" PFX " => %d\n", base, base + size,
(int)result);
SYSLOG_INTERNAL_WARNING_ONCE("re-doing mprotect for PR 475111, PR 107872");
}
#endif
/* FIXME i#143: we need to tweak the returned oldprot for
* writable areas we've made read-only
*/
if (!success) {
uint memprot = 0;
/* Revert the prot bits if needed. */
if (!get_memory_info_from_os(base, NULL, NULL, &memprot))
memprot = PROT_NONE;
LOG(THREAD, LOG_SYSCALLS, 3,
"syscall: mprotect failed: " PFX "-" PFX " prot->%d\n", base, base + size,
osprot_to_memprot(prot));
LOG(THREAD, LOG_SYSCALLS, 3, "\told prot->%d\n", memprot);
if (prot != memprot_to_osprot(memprot)) {
/* We're trying to reverse the prot change, assuming that
* this action doesn't have any unexpected side effects
* when doing so (such as not reversing some bit of internal
* state).
*/
uint new_memprot;
DEBUG_DECLARE(uint res =)
app_memory_protection_change(dcontext, base, size,
osprot_to_memprot(prot), &new_memprot, NULL);
ASSERT_NOT_IMPLEMENTED(res != SUBSET_APP_MEM_PROT_CHANGE);
ASSERT(res == DO_APP_MEM_PROT_CHANGE ||
res == PRETEND_APP_MEM_PROT_CHANGE);
/* PR 410921 - Revert the changes to all-mems list.
* FIXME: This fix assumes the whole region had the prot &
* type, which is true in the cases we have seen so far, but
* theoretically may not be true. If it isn't true, multiple
* memory areas with different types/protections might have
* been changed in pre_system_call(), so will have to keep a
* list of all vmareas changed. This might be expensive for
* each mprotect syscall to guard against a rare theoretical bug.
*/
ASSERT_CURIOSITY(!dcontext->mprot_multi_areas);
IF_NO_MEMQUERY(memcache_update_locked(
base, base + size, memprot, -1 /*type unchanged*/, true /*exists*/));
}
}
break;
}
#ifdef ANDROID
case SYS_prctl: {
int code = (int)dcontext->sys_param0;
int subcode = (ulong)dcontext->sys_param1;
if (success && code == PR_SET_VMA && subcode == PR_SET_VMA_ANON_NAME) {
byte *addr = (byte *)dcontext->sys_param2;
size_t len = (size_t)dcontext->sys_param3;
IF_DEBUG(const char *comment = (const char *)dcontext->sys_param4;)
uint memprot = 0;
if (!get_memory_info_from_os(addr, NULL, NULL, &memprot))
memprot = MEMPROT_NONE;
/* We're post-syscall so from_os should match the prctl */
ASSERT((comment == NULL && !TEST(MEMPROT_HAS_COMMENT, memprot)) ||
(comment != NULL && TEST(MEMPROT_HAS_COMMENT, memprot)));
LOG(THREAD, LOG_SYSCALLS, 2,
"syscall: prctl PR_SET_VMA_ANON_NAME base=" PFX " size=" PFX
" comment=%s\n",
addr, len, comment == NULL ? "<null>" : comment);
IF_NO_MEMQUERY(memcache_update_locked(
addr, addr + len, memprot, -1 /*type unchanged*/, true /*exists*/));
}
break;
}
#endif
#ifdef LINUX
case SYS_brk: {
/* i#91/PR 396352: need to watch SYS_brk to maintain all_memory_areas.
* This code should work regardless of whether syscall failed
* (if it failed, the old break will be returned). We stored
* the old break in sys_param1 in pre-syscall.
*/
app_pc old_brk = (app_pc)dcontext->sys_param1;
app_pc new_brk = (app_pc)result;
DEBUG_DECLARE(app_pc req_brk = (app_pc)dcontext->sys_param0;);
ASSERT(!DYNAMO_OPTION(emulate_brk)); /* shouldn't get here */
# ifdef DEBUG
if (DYNAMO_OPTION(early_inject) &&
req_brk != NULL /* Ignore calls that don't increase brk. */) {
DO_ONCE({
ASSERT_CURIOSITY(new_brk > old_brk &&
"i#1004: first brk() "
"allocation failed with -early_inject");
});
}
# endif
handle_app_brk(dcontext, NULL, old_brk, new_brk);
break;
}
#endif
/****************************************************************************/
/* SPAWNING -- fork mostly handled above */
#ifdef LINUX
case SYS_clone: {
/* in /usr/src/linux/arch/i386/kernel/process.c */
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: clone returned " PFX "\n",
MCXT_SYSCALL_RES(mc));
/* We switch the lib tls segment back to dr's privlib segment.
* Please refer to comment on os_switch_lib_tls.
* It is only called in parent thread.
* The child thread's tls setup is done in os_tls_app_seg_init.
*/
if (was_thread_create_syscall(dcontext)) {
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false))
os_switch_lib_tls(dcontext, false /*to dr*/);
/* i#2089: we already restored the DR tls in os_clone_post() */
}
break;
}
#elif defined(MACOS) && !defined(X64)
case SYS_bsdthread_create: {
/* restore stack values we clobbered */
ASSERT(*sys_param_addr(dcontext, 0) == (reg_t)new_bsdthread_intercept);
*sys_param_addr(dcontext, 0) = dcontext->sys_param0;
*sys_param_addr(dcontext, 1) = dcontext->sys_param1;
break;
}
#endif
#ifdef SYS_fork
case SYS_fork: {
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: fork returned " PFX "\n",
MCXT_SYSCALL_RES(mc));
break;
}
#endif
#ifdef SYS_vfork
case SYS_vfork: {
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: vfork returned " PFX "\n",
MCXT_SYSCALL_RES(mc));
IF_LINUX(ASSERT(was_thread_create_syscall(dcontext)));
/* restore xsp in parent */
LOG(THREAD, LOG_SYSCALLS, 2, "vfork: restoring xsp from " PFX " to " PFX "\n",
mc->xsp, dcontext->sys_param1);
mc->xsp = dcontext->sys_param1;
if (MCXT_SYSCALL_RES(mc) != 0) {
/* We switch the lib tls segment back to dr's segment.
* Please refer to comment on os_switch_lib_tls.
* It is only called in parent thread.
* The child thread's tls setup is done in os_tls_app_seg_init.
*/
if (IF_CLIENT_INTERFACE_ELSE(INTERNAL_OPTION(private_loader), false)) {
os_switch_lib_tls(dcontext, false /*to dr*/);
}
/* i#2089: we already restored the DR tls in os_clone_post() */
}
break;
}
#endif
case SYS_execve: {
/* if we get here it means execve failed (doesn't return on success) */
success = false;
mark_thread_execve(dcontext->thread_record, false);
ASSERT(result < 0);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: execve failed\n");
handle_execve_post(dcontext);
/* Don't 'break' as we have an ASSERT(success) just below
* the switch(). */
goto exit_post_system_call;
break; /* unnecessary but good form so keep it */
}
/****************************************************************************/
/* SIGNALS */
case IF_MACOS_ELSE(SYS_sigaction, SYS_rt_sigaction): { /* 174 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigaction(int sig, const struct sigaction *act,
struct sigaction *oact, size_t sigsetsize)
*/
/* FIXME i#148: Handle syscall failure. */
int sig = (int)dcontext->sys_param0;
const kernel_sigaction_t *act = (const kernel_sigaction_t *)dcontext->sys_param1;
prev_sigaction_t *oact = (prev_sigaction_t *)dcontext->sys_param2;
size_t sigsetsize = (size_t)dcontext->sys_param3;
uint res;
res = handle_post_sigaction(dcontext, success, sig, act, oact, sigsetsize);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: %ssigaction => %d\n",
IF_MACOS_ELSE("", "rt_"), -res);
if (res != 0)
set_failure_return_val(dcontext, res);
if (!success || res != 0)
goto exit_post_system_call;
break;
}
#if defined(LINUX) && !defined(X64)
case SYS_sigaction: { /* 67 */
int sig = (int)dcontext->sys_param0;
const old_sigaction_t *act = (const old_sigaction_t *)dcontext->sys_param1;
old_sigaction_t *oact = (old_sigaction_t *)dcontext->sys_param2;
uint res = handle_post_old_sigaction(dcontext, success, sig, act, oact);
LOG(THREAD, LOG_SYSCALLS, 2, "syscall: sigaction => %d\n", -res);
if (res != 0)
set_failure_return_val(dcontext, res);
if (!success || res != 0)
goto exit_post_system_call;
break;
}
#endif
case IF_MACOS_ELSE(SYS_sigprocmask, SYS_rt_sigprocmask): { /* 175 */
/* in /usr/src/linux/kernel/signal.c:
asmlinkage long
sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset,
size_t sigsetsize)
*/
/* FIXME i#148: Handle syscall failure. */
handle_post_sigprocmask(
dcontext, (int)dcontext->sys_param0, (kernel_sigset_t *)dcontext->sys_param1,
(kernel_sigset_t *)dcontext->sys_param2, (size_t)dcontext->sys_param3);
break;
}
#if defined(LINUX) && !defined(X64)
case SYS_sigreturn: /* 119 */
#endif
case IF_MACOS_ELSE(SYS_sigreturn, SYS_rt_sigreturn): /* 173 */
/* there is no return value: it's just the value of eax, so avoid
* assert below
*/
success = true;
break;
case SYS_setitimer: /* 104 */
handle_post_setitimer(dcontext, success, (int)dcontext->sys_param0,
(const struct itimerval *)dcontext->sys_param1,
(struct itimerval *)dcontext->sys_param2);
break;
case SYS_getitimer: /* 105 */
handle_post_getitimer(dcontext, success, (int)dcontext->sys_param0,
(struct itimerval *)dcontext->sys_param1);
break;
#if defined(LINUX) && defined(X86)
case SYS_alarm: /* 27 on x86 and 37 on x64 */
handle_post_alarm(dcontext, success, (unsigned int)dcontext->sys_param0);
break;
#endif
#if defined(LINUX) && defined(X86) && defined(X64)
case SYS_arch_prctl: {
if (success && INTERNAL_OPTION(mangle_app_seg)) {
tls_handle_post_arch_prctl(dcontext, dcontext->sys_param0,
dcontext->sys_param1);
}
break;
}
#endif
#ifdef LINUX
case SYS_ppoll: {
if (dcontext->sys_param3 == (reg_t)NULL)
break;
handle_post_extended_syscall_sigmasks(dcontext, success);
set_syscall_param(dcontext, 3, dcontext->sys_param3);
break;
}
case SYS_pselect6: {
if (dcontext->sys_param4 == (reg_t)NULL)
break;
typedef struct {
kernel_sigset_t *sigmask;
size_t sizemask;
} data_t;
data_t *data_param = (data_t *)dcontext->sys_param3;
handle_post_extended_syscall_sigmasks(dcontext, success);
if (!safe_write_ex((void *)&data_param->sigmask, sizeof(data_param->sigmask),
&dcontext->sys_param4, NULL)) {
LOG(THREAD, LOG_SYSCALLS, 2, "\tEFAULT for pselect6 post syscall\n");
}
break;
}
case SYS_epoll_pwait: {
if (dcontext->sys_param4 == (reg_t)NULL)
break;
handle_post_extended_syscall_sigmasks(dcontext, success);
set_syscall_param(dcontext, 4, dcontext->sys_param4);
break;
}
#endif
/****************************************************************************/
/* FILES */
#ifdef SYS_dup2
case SYS_dup2: IF_LINUX(case SYS_dup3:) {
# ifdef LINUX
if (success) {
signal_handle_dup(dcontext, (file_t)sys_param(dcontext, 1),
(file_t)result);
}
# endif
break;
}
#endif
#ifdef MACOS
case SYS_fcntl_nocancel:
#endif
case SYS_fcntl: {
#ifdef LINUX /* Linux-only since only for signalfd */
if (success) {
file_t fd = (long)dcontext->sys_param0;
int cmd = (int)dcontext->sys_param1;
if ((cmd == F_DUPFD || cmd == F_DUPFD_CLOEXEC))
signal_handle_dup(dcontext, fd, (file_t)result);
}
break;
#endif
}
case IF_MACOS_ELSE(SYS_getrlimit, IF_X64_ELSE(SYS_getrlimit, SYS_ugetrlimit)): {
int resource = dcontext->sys_param0;
if (success && resource == RLIMIT_NOFILE) {
/* we stole some space: hide it from app */
struct rlimit *rlim = (struct rlimit *)dcontext->sys_param1;
safe_write_ex(&rlim->rlim_cur, sizeof(rlim->rlim_cur),
&app_rlimit_nofile.rlim_cur, NULL);
safe_write_ex(&rlim->rlim_max, sizeof(rlim->rlim_max),
&app_rlimit_nofile.rlim_max, NULL);
}
break;
}
#if !defined(ARM) && !defined(X64) && !defined(MACOS)
/* Old struct w/ smaller fields */
case SYS_getrlimit: {
int resource = dcontext->sys_param0;
if (success && resource == RLIMIT_NOFILE) {
struct compat_rlimit *rlim = (struct compat_rlimit *)dcontext->sys_param1;
safe_write_ex(&rlim->rlim_cur, sizeof(rlim->rlim_cur),
&app_rlimit_nofile.rlim_cur, NULL);
safe_write_ex(&rlim->rlim_max, sizeof(rlim->rlim_max),
&app_rlimit_nofile.rlim_max, NULL);
}
break;
}
#endif
#ifdef LINUX
case SYS_prlimit64: {
int resource = dcontext->sys_param1;
rlimit64_t *rlim = (rlimit64_t *)dcontext->sys_param3;
if (success && resource == RLIMIT_NOFILE && rlim != NULL &&
/* XXX: xref pid discussion in pre_system_call SYS_prlimit64 */
(dcontext->sys_param0 == 0 || dcontext->sys_param0 == get_process_id())) {
safe_write_ex(rlim, sizeof(*rlim), &app_rlimit_nofile, NULL);
}
break;
}
#endif
#ifdef LINUX
# ifdef SYS_readlink
case SYS_readlink:
# endif
case SYS_readlinkat:
if (success && DYNAMO_OPTION(early_inject)) {
bool is_at = (sysnum == SYS_readlinkat);
/* i#907: /proc/self/exe is a symlink to libdynamorio.so. We need
* to fix it up if the app queries. Any thread id can be passed to
* /proc/%d/exe, so we have to check. We could instead look for
* libdynamorio.so in the result but we've tweaked our injector
* in the past to exec different binaries so this seems more robust.
*/
if (symlink_is_self_exe((const char *)(is_at ? dcontext->sys_param1
: dcontext->sys_param0))) {
char *tgt = (char *)(is_at ? dcontext->sys_param2 : dcontext->sys_param1);
size_t tgt_sz =
(size_t)(is_at ? dcontext->sys_param3 : dcontext->sys_param2);
int len = snprintf(tgt, tgt_sz, "%s", get_application_name());
if (len > 0)
set_success_return_val(dcontext, len);
else {
set_failure_return_val(dcontext, EINVAL);
DODEBUG({ dcontext->expect_last_syscall_to_fail = true; });
}
}
}
break;
#endif
default:
#ifdef LINUX
handle_restartable_region_syscall_post(dcontext, success);
#endif
#ifdef VMX86_SERVER
if (is_vmkuw_sysnum(sysnum)) {
vmkuw_post_system_call(dcontext);
break;
}
#endif
break;
} /* switch */
DODEBUG({
if (ignorable_system_call_normalized(sysnum)) {
STATS_INC(post_syscall_ignorable);
} else {
/* Many syscalls can fail though they aren't ignored. However, they
* shouldn't happen without us knowing about them. See PR 402769
* for SYS_close case.
*/
if (!(success || sysnum == SYS_close ||
IF_MACOS(sysnum == SYS_close_nocancel ||)
dcontext->expect_last_syscall_to_fail)) {
LOG(THREAD, LOG_SYSCALLS, 1,
"Unexpected failure of non-ignorable syscall %d\n", sysnum);
}
}
});
exit_post_system_call:
#ifdef CLIENT_INTERFACE
/* The instrument_post_syscall should be called after DR finishes all
* its operations, since DR needs to know the real syscall results,
* and any changes made by the client are simply to fool the app.
* Also, dr_syscall_invoke_another() needs to set eax, which shouldn't
* affect the result of the 1st syscall. Xref i#1.
*/
/* after restore of xbp so client sees it as though was sysenter */
instrument_post_syscall(dcontext, sysnum);
#endif
dcontext->whereami = old_whereami;
}
/* get_dynamo_library_bounds initializes dynamorio library bounds, using a
* release-time assert if there is a problem doing so. It does not use any
* heap, and we assume it is called prior to find_executable_vm_areas.
*/
static void
get_dynamo_library_bounds(void)
{
/* Note that we're not counting DYNAMORIO_PRELOAD_NAME as a DR area, to match
* Windows, so we should unload it like we do there. The other reason not to
* count it is so is_in_dynamo_dll() can be the only exception to the
* never-execute-from-DR-areas list rule
*/
int res;
app_pc check_start, check_end;
char *libdir;
const char *dynamorio_libname;
#ifdef STATIC_LIBRARY
/* We don't know our image name, so look up our bounds with an internal
* address.
*/
dynamorio_libname = NULL;
check_start = (app_pc)&get_dynamo_library_bounds;
#else /* !STATIC_LIBRARY */
# ifdef LINUX
/* PR 361594: we get our bounds from linker-provided symbols.
* Note that referencing the value of these symbols will crash:
* always use the address only.
*/
extern int dynamorio_so_start, dynamorio_so_end;
dynamo_dll_start = (app_pc)&dynamorio_so_start;
dynamo_dll_end = (app_pc)ALIGN_FORWARD(&dynamorio_so_end, PAGE_SIZE);
# elif defined(MACOS)
dynamo_dll_start = module_dynamorio_lib_base();
# endif
check_start = dynamo_dll_start;
#endif /* STATIC_LIBRARY */
static char dynamorio_libname_buf[MAXIMUM_PATH];
res = memquery_library_bounds(NULL, &check_start, &check_end, dynamorio_library_path,
BUFFER_SIZE_ELEMENTS(dynamorio_library_path),
dynamorio_libname_buf,
BUFFER_SIZE_ELEMENTS(dynamorio_libname_buf));
#ifndef STATIC_LIBRARY
dynamorio_libname = IF_UNIT_TEST_ELSE(UNIT_TEST_EXE_NAME, dynamorio_libname_buf);
#endif /* STATIC_LIBRARY */
LOG(GLOBAL, LOG_VMAREAS, 1, PRODUCT_NAME " library path: %s\n",
dynamorio_library_path);
snprintf(dynamorio_library_filepath, BUFFER_SIZE_ELEMENTS(dynamorio_library_filepath),
"%s%s", dynamorio_library_path, dynamorio_libname);
NULL_TERMINATE_BUFFER(dynamorio_library_filepath);
LOG(GLOBAL, LOG_VMAREAS, 1, PRODUCT_NAME " library file path: %s\n",
dynamorio_library_filepath);
NULL_TERMINATE_BUFFER(dynamorio_library_filepath);
#if !defined(STATIC_LIBRARY) && defined(LINUX)
ASSERT(check_start == dynamo_dll_start && check_end == dynamo_dll_end);
#elif defined(MACOS)
ASSERT(check_start == dynamo_dll_start);
dynamo_dll_end = check_end;
#else
dynamo_dll_start = check_start;
dynamo_dll_end = check_end;
#endif
LOG(GLOBAL, LOG_VMAREAS, 1, "DR library bounds: " PFX " to " PFX "\n",
dynamo_dll_start, dynamo_dll_end);
ASSERT(res > 0);
/* Issue 20: we need the path to the alt arch */
strncpy(dynamorio_alt_arch_path, dynamorio_library_path,
BUFFER_SIZE_ELEMENTS(dynamorio_alt_arch_path));
/* Assumption: libdir name is not repeated elsewhere in path */
libdir = strstr(dynamorio_alt_arch_path, IF_X64_ELSE(DR_LIBDIR_X64, DR_LIBDIR_X86));
if (libdir != NULL) {
const char *newdir = IF_X64_ELSE(DR_LIBDIR_X86, DR_LIBDIR_X64);
/* do NOT place the NULL */
strncpy(libdir, newdir, strlen(newdir));
} else {
SYSLOG_INTERNAL_WARNING("unable to determine lib path for cross-arch execve");
}
NULL_TERMINATE_BUFFER(dynamorio_alt_arch_path);
LOG(GLOBAL, LOG_VMAREAS, 1, PRODUCT_NAME " alt arch path: %s\n",
dynamorio_alt_arch_path);
snprintf(dynamorio_alt_arch_filepath,
BUFFER_SIZE_ELEMENTS(dynamorio_alt_arch_filepath), "%s%s",
dynamorio_alt_arch_path, dynamorio_libname);
NULL_TERMINATE_BUFFER(dynamorio_alt_arch_filepath);
if (dynamo_dll_start == NULL || dynamo_dll_end == NULL) {
REPORT_FATAL_ERROR_AND_EXIT(FAILED_TO_FIND_DR_BOUNDS, 2, get_application_name(),
get_application_pid());
}
}
/* get full path to our own library, (cached), used for forking and message file name */
char *
get_dynamorio_library_path(void)
{
if (!dynamorio_library_filepath[0]) { /* not cached */
get_dynamo_library_bounds();
}
return dynamorio_library_filepath;
}
#ifdef LINUX
/* Get full path+name of executable file from /proc/self/exe. Returns an empty
* string on error.
* FIXME i#47: This will return DR's path when using early injection.
*/
static char *
read_proc_self_exe(bool ignore_cache)
{
static char exepath[MAXIMUM_PATH];
static bool tried = false;
# ifdef MACOS
ASSERT_NOT_IMPLEMENTED(false);
# endif
if (!tried || ignore_cache) {
tried = true;
/* assume we have /proc/self/exe symlink: could add HAVE_PROC_EXE
* but we have no alternative solution except assuming the first
* /proc/self/maps entry is the executable
*/
ssize_t res;
DEBUG_DECLARE(int len =)
snprintf(exepath, BUFFER_SIZE_ELEMENTS(exepath), "/proc/%d/exe",
get_process_id());
ASSERT(len > 0);
NULL_TERMINATE_BUFFER(exepath);
/* i#960: readlink does not null terminate, so we do it. */
# ifdef SYS_readlink
res = dynamorio_syscall(SYS_readlink, 3, exepath, exepath,
BUFFER_SIZE_ELEMENTS(exepath) - 1);
# else
res = dynamorio_syscall(SYS_readlinkat, 4, AT_FDCWD, exepath, exepath,
BUFFER_SIZE_ELEMENTS(exepath) - 1);
# endif
ASSERT(res < BUFFER_SIZE_ELEMENTS(exepath));
exepath[MAX(res, 0)] = '\0';
NULL_TERMINATE_BUFFER(exepath);
}
return exepath;
}
#endif /* LINUX */
app_pc
get_application_base(void)
{
if (executable_start == NULL) {
#ifdef HAVE_MEMINFO
/* Haven't done find_executable_vm_areas() yet so walk maps ourselves */
const char *name = get_application_name();
if (name != NULL && name[0] != '\0') {
DEBUG_DECLARE(int count =)
memquery_library_bounds(name, &executable_start, &executable_end, NULL, 0,
NULL, 0);
ASSERT(count > 0 && executable_start != NULL);
}
#else
/* We have to fail. Should we dl_iterate this early? */
#endif
}
return executable_start;
}
app_pc
get_application_end(void)
{
if (executable_end == NULL)
get_application_base();
return executable_end;
}
app_pc
get_image_entry()
{
static app_pc image_entry_point = NULL;
if (image_entry_point == NULL && executable_start != NULL) {
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup(executable_start);
ASSERT(ma != NULL);
if (ma != NULL) {
ASSERT(executable_start == ma->start);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
image_entry_point = ma->entry_point;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
os_get_module_info_unlock();
}
return image_entry_point;
}
#ifdef DEBUG
void
mem_stats_snapshot()
{
/* FIXME: NYI */
}
#endif
bool
is_in_dynamo_dll(app_pc pc)
{
ASSERT(dynamo_dll_start != NULL);
#ifdef VMX86_SERVER
/* We want to consider vmklib as part of the DR lib for allowing
* execution (_init calls os_in_vmkernel_classic()) and for
* reporting crashes as our fault
*/
if (vmk_in_vmklib(pc))
return true;
#endif
return (pc >= dynamo_dll_start && pc < dynamo_dll_end);
}
app_pc
get_dynamorio_dll_start()
{
if (dynamo_dll_start == NULL)
get_dynamo_library_bounds();
ASSERT(dynamo_dll_start != NULL);
return dynamo_dll_start;
}
app_pc
get_dynamorio_dll_end()
{
if (dynamo_dll_end == NULL)
get_dynamo_library_bounds();
ASSERT(dynamo_dll_end != NULL);
return dynamo_dll_end;
}
app_pc
get_dynamorio_dll_preferred_base()
{
/* on Linux there is no preferred base if we're PIC,
* therefore is always equal to dynamo_dll_start */
return get_dynamorio_dll_start();
}
static void
found_vsyscall_page(memquery_iter_t *iter _IF_DEBUG(OUT const char **map_type))
{
#ifndef X64
/* We assume no vsyscall page for x64; thus, checking the
* hardcoded address shouldn't have any false positives.
*/
ASSERT(iter->vm_end - iter->vm_start == PAGE_SIZE ||
/* i#1583: recent kernels have 2-page vdso */
iter->vm_end - iter->vm_start == 2 * PAGE_SIZE);
ASSERT(!dynamo_initialized); /* .data should be +w */
/* we're not considering as "image" even if part of ld.so (xref i#89) and
* thus we aren't adjusting our code origins policies to remove the
* vsyscall page exemption.
*/
DODEBUG({ *map_type = "VDSO"; });
/* On re-attach, the vdso can be split into two entries (from DR's hook),
* so take just the first one as the start (xref i#2157).
*/
if (vdso_page_start == NULL) {
vdso_page_start = iter->vm_start;
vdso_size = iter->vm_end - iter->vm_start;
}
/* The vsyscall page can be on the 2nd page inside the vdso, but until we
* see a syscall we don't know and we point it at the vdso start.
*/
if (vsyscall_page_start == NULL)
vsyscall_page_start = iter->vm_start;
LOG(GLOBAL, LOG_VMAREAS, 1, "found vdso/vsyscall pages @ " PFX " %s\n",
vsyscall_page_start, iter->comment);
#else
/* i#172
* fix bugs for OS where vdso page is set unreadable as below
* ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vdso]
* but it is readable indeed.
*/
/* i#430
* fix bugs for OS where vdso page is set unreadable as below
* ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0 [vsyscall]
* but it is readable indeed.
*/
if (!TESTALL((PROT_READ | PROT_EXEC), iter->prot))
iter->prot |= (PROT_READ | PROT_EXEC);
/* i#1908: vdso and vsyscall pages are now split */
if (strncmp(iter->comment, VSYSCALL_PAGE_MAPS_NAME,
strlen(VSYSCALL_PAGE_MAPS_NAME)) == 0)
vdso_page_start = iter->vm_start;
else if (strncmp(iter->comment, VSYSCALL_REGION_MAPS_NAME,
strlen(VSYSCALL_REGION_MAPS_NAME)) == 0)
vsyscall_page_start = iter->vm_start;
#endif
}
int
os_walk_address_space(memquery_iter_t *iter, bool add_modules)
{
int count = 0;
#ifdef MACOS
app_pc shared_start, shared_end;
bool have_shared = module_dyld_shared_region(&shared_start, &shared_end);
#endif
#ifdef RETURN_AFTER_CALL
dcontext_t *dcontext = get_thread_private_dcontext();
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
#endif
#ifndef HAVE_MEMINFO_QUERY
/* We avoid tracking the innards of vmheap for all_memory_areas by
* adding a single no-access region for the whole vmheap.
* Queries from heap routines use _from_os.
* Queries in check_thread_vm_area are fine getting "noaccess": wants
* any DR memory not on exec areas list to be noaccess.
* Queries from clients: should be ok to hide innards. Marking noaccess
* should be safer than marking free, as unruly client might try to mmap
* something in the free space: better to have it think it's reserved but
* not yet used memory. FIXME: we're not marking beyond-vmheap DR regions
* as noaccess!
*/
byte *our_heap_start, *our_heap_end;
get_vmm_heap_bounds(&our_heap_start, &our_heap_end);
if (our_heap_end - our_heap_start > 0) {
memcache_update_locked(our_heap_start, our_heap_end, MEMPROT_NONE,
DR_MEMTYPE_DATA, false /*!exists*/);
}
#endif
#ifndef HAVE_MEMINFO
count = find_vm_areas_via_probe();
#else
while (memquery_iterator_next(iter)) {
bool image = false;
size_t size = iter->vm_end - iter->vm_start;
/* i#479, hide private module and match Windows's behavior */
bool skip = dynamo_vm_area_overlap(iter->vm_start, iter->vm_end) &&
!is_in_dynamo_dll(iter->vm_start) /* our own text section is ok */
/* client lib text section is ok (xref i#487) */
IF_CLIENT_INTERFACE(&&!is_in_client_lib(iter->vm_start));
DEBUG_DECLARE(const char *map_type = "Private");
/* we can't really tell what's a stack and what's not, but we rely on
* our passing NULL preventing rwx regions from being added to executable
* or future list, even w/ -executable_if_alloc
*/
LOG(GLOBAL, LOG_VMAREAS, 2, "start=" PFX " end=" PFX " prot=%x comment=%s\n",
iter->vm_start, iter->vm_end, iter->prot, iter->comment);
/* Issue 89: the vdso might be loaded inside ld.so as below,
* which causes ASSERT_CURIOSITY in mmap_check_for_module_overlap fail.
* b7fa3000-b7fbd000 r-xp 00000000 08:01 108679 /lib/ld-2.8.90.so
* b7fbd000-b7fbe000 r-xp b7fbd000 00:00 0 [vdso]
* b7fbe000-b7fbf000 r--p 0001a000 08:01 108679 /lib/ld-2.8.90.so
* b7fbf000-b7fc0000 rw-p 0001b000 08:01 108679 /lib/ld-2.8.90.so
* So we always first check if it is a vdso page before calling
* mmap_check_for_module_overlap.
* Update: with i#160/PR 562667 handling non-contiguous modules like
* ld.so we now gracefully handle other objects like vdso in gaps in
* module, but it's simpler to leave this ordering here.
*/
if (skip) {
/* i#479, hide private module and match Windows's behavior */
LOG(GLOBAL, LOG_VMAREAS, 2, PFX "-" PFX " skipping: internal DR region\n",
iter->vm_start, iter->vm_end);
# ifdef MACOS
} else if (have_shared && iter->vm_start >= shared_start &&
iter->vm_start < shared_end) {
/* Skip modules we happen to find inside the dyld shared cache,
* as we'll fail to identify the library. We add them
* in module_walk_dyld_list instead.
*/
image = true;
# endif
} else if (strncmp(iter->comment, VSYSCALL_PAGE_MAPS_NAME,
strlen(VSYSCALL_PAGE_MAPS_NAME)) == 0 ||
IF_X64_ELSE(strncmp(iter->comment, VSYSCALL_REGION_MAPS_NAME,
strlen(VSYSCALL_REGION_MAPS_NAME)) == 0,
/* Older kernels do not label it as "[vdso]", but it is
* hardcoded there.
*/
/* 32-bit */
iter->vm_start == VSYSCALL_PAGE_START_HARDCODED)) {
if (add_modules) {
found_vsyscall_page(iter _IF_DEBUG(&map_type));
/* We'd like to add vsyscall to the module list too but when it's
* separate from vdso it has no ELF header which is too complex
* to force into the module list.
*/
if (module_is_header(iter->vm_start, iter->vm_end - iter->vm_start)) {
module_list_add(iter->vm_start, iter->vm_end - iter->vm_start, false,
iter->comment, iter->inode);
}
}
} else if (add_modules &&
mmap_check_for_module_overlap(iter->vm_start, size,
TEST(MEMPROT_READ, iter->prot),
iter->inode, false)) {
/* we already added the whole image region when we hit the first map for it */
image = true;
DODEBUG({ map_type = "ELF SO"; });
} else if (TEST(MEMPROT_READ, iter->prot) &&
module_is_header(iter->vm_start, size)) {
size_t image_size = size;
app_pc mod_base, mod_first_end, mod_max_end;
char *exec_match;
bool found_exec = false;
image = true;
DODEBUG({ map_type = "ELF SO"; });
LOG(GLOBAL, LOG_VMAREAS, 2,
"Found already mapped module first segment :\n"
"\t" PFX "-" PFX "%s inode=" UINT64_FORMAT_STRING " name=%s\n",
iter->vm_start, iter->vm_end, TEST(MEMPROT_EXEC, iter->prot) ? " +x" : "",
iter->inode, iter->comment);
# ifdef LINUX
/* Mapped images should have inodes, except for cases where an anon
* map is placed on top (i#2566)
*/
ASSERT_CURIOSITY(iter->inode != 0 || iter->comment[0] == '\0');
# endif
ASSERT_CURIOSITY(iter->offset == 0); /* first map shouldn't have offset */
/* Get size by walking the program headers. This includes .bss. */
if (module_walk_program_headers(iter->vm_start, size, false,
true, /* i#1589: ld.so relocated .dynamic */
&mod_base, &mod_first_end, &mod_max_end, NULL,
NULL)) {
image_size = mod_max_end - mod_base;
} else {
ASSERT_NOT_REACHED();
}
LOG(GLOBAL, LOG_VMAREAS, 2,
"Found already mapped module total module :\n"
"\t" PFX "-" PFX " inode=" UINT64_FORMAT_STRING " name=%s\n",
iter->vm_start, iter->vm_start + image_size, iter->inode, iter->comment);
if (add_modules) {
/* look for executable */
# ifdef LINUX
exec_match = get_application_name();
if (exec_match != NULL && exec_match[0] != '\0')
found_exec = (strcmp(iter->comment, exec_match) == 0);
/* Handle an anon region for the header (i#2566) */
if (!found_exec && executable_start != NULL &&
executable_start == iter->vm_start)
found_exec = true;
# else
/* We don't have a nice normalized name: it can have ./ or ../ inside
* it. But, we can distinguish an exe from a lib here, even for PIE,
* so we go with that plus a basename comparison.
*/
exec_match = (char *)get_application_short_name();
if (module_is_executable(iter->vm_start) && exec_match != NULL &&
exec_match[0] != '\0') {
const char *iter_basename = strrchr(iter->comment, '/');
if (iter_basename == NULL)
iter_basename = iter->comment;
else
iter_basename++;
found_exec = (strcmp(iter_basename, exec_match) == 0);
}
# endif
if (found_exec) {
if (executable_start == NULL)
executable_start = iter->vm_start;
else
ASSERT(iter->vm_start == executable_start);
LOG(GLOBAL, LOG_VMAREAS, 2,
"Found executable %s @" PFX "-" PFX " %s\n",
get_application_name(), iter->vm_start,
iter->vm_start + image_size, iter->comment);
}
/* We don't yet know whether contiguous so we have to settle for the
* first segment's size. We'll update it in module_list_add().
*/
module_list_add(iter->vm_start, mod_first_end - mod_base, false,
iter->comment, iter->inode);
# ifdef MACOS
/* look for dyld */
if (strcmp(iter->comment, "/usr/lib/dyld") == 0)
module_walk_dyld_list(iter->vm_start);
# endif
}
} else if (iter->inode != 0) {
DODEBUG({ map_type = "Mapped File"; });
}
/* add all regions (incl. dynamo_areas and stack) to all_memory_areas */
# ifndef HAVE_MEMINFO_QUERY
/* Don't add if we're using one single vmheap entry. */
if (iter->vm_start < our_heap_start || iter->vm_end > our_heap_end) {
LOG(GLOBAL, LOG_VMAREAS, 4,
"os_walk_address_space: adding: " PFX "-" PFX " prot=%d\n",
iter->vm_start, iter->vm_end, iter->prot);
memcache_update_locked(iter->vm_start, iter->vm_end, iter->prot,
image ? DR_MEMTYPE_IMAGE : DR_MEMTYPE_DATA,
false /*!exists*/);
}
# endif
/* FIXME: best if we could pass every region to vmareas, but
* it has no way of determining if this is a stack b/c we don't have
* a dcontext at this point -- so we just don't pass the stack
*/
if (!skip /* i#479, hide private module and match Windows's behavior */ &&
add_modules &&
app_memory_allocation(NULL, iter->vm_start, (iter->vm_end - iter->vm_start),
iter->prot, image _IF_DEBUG(map_type))) {
count++;
}
}
#endif /* !HAVE_MEMINFO */
#ifndef HAVE_MEMINFO_QUERY
DOLOG(4, LOG_VMAREAS, memcache_print(GLOBAL, "init: all memory areas:\n"););
#endif
#ifdef RETURN_AFTER_CALL
/* Find the bottom of the stack of the initial (native) entry */
ostd->stack_bottom_pc = find_stack_bottom();
LOG(THREAD, LOG_ALL, 1, "Stack bottom pc = " PFX "\n", ostd->stack_bottom_pc);
#endif
/* now that we've walked memory print all modules */
LOG(GLOBAL, LOG_VMAREAS, 2, "Module list after memory walk\n");
DOLOG(1, LOG_VMAREAS, {
if (add_modules)
print_modules(GLOBAL, DUMP_NOT_XML);
});
return count;
}
/* assumed to be called after find_dynamo_library_vm_areas() */
int
find_executable_vm_areas(void)
{
int count;
memquery_iter_t iter;
memquery_iterator_start(&iter, NULL, true /*may alloc*/);
count = os_walk_address_space(&iter, true);
memquery_iterator_stop(&iter);
STATS_ADD(num_app_code_modules, count);
/* now that we have the modules set up, query libc */
get_libc_errno_location(true /*force init*/);
return count;
}
/* initializes dynamorio library bounds.
* does not use any heap.
* assumed to be called prior to find_executable_vm_areas.
*/
int
find_dynamo_library_vm_areas(void)
{
#ifndef STATIC_LIBRARY
/* We didn't add inside get_dynamo_library_bounds b/c it was called pre-alloc.
* We don't bother to break down the sub-regions.
* Assumption: we don't need to have the protection flags for DR sub-regions.
* For static library builds, DR's code is in the exe and isn't considered
* to be a DR area.
*/
add_dynamo_vm_area(get_dynamorio_dll_start(), get_dynamorio_dll_end(),
MEMPROT_READ | MEMPROT_WRITE | MEMPROT_EXEC,
true /* from image */ _IF_DEBUG(dynamorio_library_filepath));
#endif
#ifdef VMX86_SERVER
if (os_in_vmkernel_userworld())
vmk_add_vmklib_to_dynamo_areas();
#endif
return 1;
}
bool
get_stack_bounds(dcontext_t *dcontext, byte **base, byte **top)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
if (ostd->stack_base == NULL) {
/* initialize on-demand since don't have app esp handy in os_thread_init()
* FIXME: the comment here -- ignoring it for now, if hit cases confirming
* it the right thing will be to merge adjacent rwx regions and assume
* their union is the stack -- otherwise have to have special stack init
* routine called from x86.asm new_thread_dynamo_start and internal_dynamo_start,
* and the latter is not a do-once...
*/
size_t size = 0;
bool ok;
/* store stack info at thread startup, since stack can get fragmented in
* /proc/self/maps w/ later mprotects and it can be hard to piece together later
*/
if (IF_MEMQUERY_ELSE(false, DYNAMO_OPTION(use_all_memory_areas))) {
ok = get_memory_info((app_pc)get_mcontext(dcontext)->xsp, &ostd->stack_base,
&size, NULL);
} else {
ok = get_memory_info_from_os((app_pc)get_mcontext(dcontext)->xsp,
&ostd->stack_base, &size, NULL);
}
if (!ok) {
/* This can happen with dr_prepopulate_cache() before we start running
* the app.
*/
ASSERT(!dynamo_started);
return false;
}
ostd->stack_top = ostd->stack_base + size;
LOG(THREAD, LOG_THREADS, 1, "App stack is " PFX "-" PFX "\n", ostd->stack_base,
ostd->stack_top);
}
if (base != NULL)
*base = ostd->stack_base;
if (top != NULL)
*top = ostd->stack_top;
return true;
}
#ifdef RETURN_AFTER_CALL
initial_call_stack_status_t
at_initial_stack_bottom(dcontext_t *dcontext, app_pc target_pc)
{
/* We can't rely exclusively on finding the true stack bottom
* b/c we can't always walk the call stack (PR 608990) so we
* use the image entry as our primary trigger
*/
if (executable_start != NULL /*defensive*/ && reached_image_entry_yet()) {
return INITIAL_STACK_EMPTY;
} else {
/* If our stack walk ends early we could have false positives, but
* that's better than false negatives if we miss the image entry
* or we were unable to find the executable_start
*/
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
if (target_pc == ostd->stack_bottom_pc) {
return INITIAL_STACK_BOTTOM_REACHED;
} else {
return INITIAL_STACK_BOTTOM_NOT_REACHED;
}
}
}
#endif /* RETURN_AFTER_CALL */
/* Uses our cached data structures (if in use, else raw query) to retrieve memory info */
bool
query_memory_ex(const byte *pc, OUT dr_mem_info_t *out_info)
{
#ifdef HAVE_MEMINFO_QUERY
return query_memory_ex_from_os(pc, out_info);
#else
return memcache_query_memory(pc, out_info);
#endif
}
bool
query_memory_cur_base(const byte *pc, OUT dr_mem_info_t *info)
{
return query_memory_ex(pc, info);
}
/* Use our cached data structures (if in use, else raw query) to retrieve memory info */
bool
get_memory_info(const byte *pc, byte **base_pc, size_t *size,
uint *prot /* OUT optional, returns MEMPROT_* value */)
{
dr_mem_info_t info;
if (is_vmm_reserved_address((byte *)pc, 1)) {
if (!query_memory_ex_from_os(pc, &info) || info.type == DR_MEMTYPE_FREE)
return false;
} else {
if (!query_memory_ex(pc, &info) || info.type == DR_MEMTYPE_FREE)
return false;
}
if (base_pc != NULL)
*base_pc = info.base_pc;
if (size != NULL)
*size = info.size;
if (prot != NULL)
*prot = info.prot;
return true;
}
/* We assume that this routine might be called instead of query_memory_ex()
* b/c the caller is in a fragile location and cannot acquire locks, so
* we try to do the same here.
*/
bool
query_memory_ex_from_os(const byte *pc, OUT dr_mem_info_t *info)
{
bool have_type = false;
bool res = memquery_from_os(pc, info, &have_type);
if (!res) {
/* No other failure types for now */
info->type = DR_MEMTYPE_ERROR;
} else if (res && !have_type) {
/* We pass 0 instead of info->size b/c even if marked as +r we can still
* get SIGBUS if beyond end of mmapped file: not uncommon if querying
* in middle of library load before .bss fully set up (PR 528744).
* However, if there is no fault handler, is_elf_so_header's safe_read will
* recurse to here, so in that case we use info->size but we assume
* it's only at init or exit and so not in the middle of a load
* and less likely to be querying a random mmapped file.
* The cleaner fix is to allow safe_read to work w/o a dcontext or
* fault handling: i#350/PR 529066.
*/
if (TEST(MEMPROT_READ, info->prot) &&
module_is_header(info->base_pc, fault_handling_initialized ? 0 : info->size))
info->type = DR_MEMTYPE_IMAGE;
else {
/* FIXME: won't quite match find_executable_vm_areas marking as
* image: can be doubly-mapped so; don't want to count vdso; etc.
*/
info->type = DR_MEMTYPE_DATA;
}
}
return res;
}
bool
get_memory_info_from_os(const byte *pc, byte **base_pc, size_t *size,
uint *prot /* OUT optional, returns MEMPROT_* value */)
{
dr_mem_info_t info;
if (!query_memory_ex_from_os(pc, &info) || info.type == DR_MEMTYPE_FREE)
return false;
if (base_pc != NULL)
*base_pc = info.base_pc;
if (size != NULL)
*size = info.size;
if (prot != NULL)
*prot = info.prot;
return true;
}
/* in utils.c, exported only for our hack! */
extern void
deadlock_avoidance_unlock(mutex_t *lock, bool ownable);
void
mutex_wait_contended_lock(mutex_t *lock _IF_CLIENT_INTERFACE(priv_mcontext_t *mc))
{
#ifdef CLIENT_INTERFACE
dcontext_t *dcontext = get_thread_private_dcontext();
bool set_client_safe_for_synch =
((dcontext != NULL) && IS_CLIENT_THREAD(dcontext) &&
((mutex_t *)dcontext->client_data->client_grab_mutex == lock));
if (mc != NULL) {
ASSERT(dcontext != NULL);
/* set_safe_for_sync can't be true at the same time as passing
* an mcontext to return into: nothing would be able to reset the
* client_thread_safe_for_sync flag.
*/
ASSERT(!set_client_safe_for_synch);
*get_mcontext(dcontext) = *mc;
}
#endif
/* i#96/PR 295561: use futex(2) if available */
if (ksynch_kernel_support()) {
/* Try to get the lock. If already held, it's fine to store any value
* > LOCK_SET_STATE (we don't rely on paired incs/decs) so that
* the next unlocker will call mutex_notify_released_lock().
*/
ptr_int_t res;
#ifndef LINUX /* we actually don't use this for Linux: see below */
KSYNCH_TYPE *event = mutex_get_contended_event(lock);
ASSERT(event != NULL && ksynch_var_initialized(event));
#endif
while (atomic_exchange_int(&lock->lock_requests, LOCK_CONTENDED_STATE) !=
LOCK_FREE_STATE) {
#ifdef CLIENT_INTERFACE
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = true;
if (mc != NULL)
set_synch_state(dcontext, THREAD_SYNCH_VALID_MCONTEXT);
#endif
/* Unfortunately the synch semantics are different for Linux vs Mac.
* We have to use lock_requests as the futex to avoid waiting if
* lock_requests changes, while on Mac the underlying synch prevents
* a wait there.
*/
#ifdef LINUX
/* We'll abort the wait if lock_requests has changed at all.
* We can't have a series of changes that result in no apparent
* change w/o someone acquiring the lock, b/c
* mutex_notify_released_lock() sets lock_requests to LOCK_FREE_STATE.
*/
res = ksynch_wait(&lock->lock_requests, LOCK_CONTENDED_STATE, 0);
#else
res = ksynch_wait(event, 0, 0);
#endif
if (res != 0 && res != -EWOULDBLOCK)
os_thread_yield();
#ifdef CLIENT_INTERFACE
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = false;
if (mc != NULL)
set_synch_state(dcontext, THREAD_SYNCH_NONE);
#endif
/* we don't care whether properly woken (res==0), var mismatch
* (res==-EWOULDBLOCK), or error: regardless, someone else
* could have acquired the lock, so we try again
*/
}
} else {
/* we now have to undo our earlier request */
atomic_dec_and_test(&lock->lock_requests);
while (!d_r_mutex_trylock(lock)) {
#ifdef CLIENT_INTERFACE
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = true;
if (mc != NULL)
set_synch_state(dcontext, THREAD_SYNCH_VALID_MCONTEXT);
#endif
os_thread_yield();
#ifdef CLIENT_INTERFACE
if (set_client_safe_for_synch)
dcontext->client_data->client_thread_safe_for_synch = false;
if (mc != NULL)
set_synch_state(dcontext, THREAD_SYNCH_NONE);
#endif
}
#ifdef DEADLOCK_AVOIDANCE
/* HACK: trylock's success causes it to do DEADLOCK_AVOIDANCE_LOCK, so to
* avoid two in a row (causes assertion on owner) we unlock here
* In the future we will remove the trylock here and this will go away.
*/
deadlock_avoidance_unlock(lock, true);
#endif
}
return;
}
void
mutex_notify_released_lock(mutex_t *lock)
{
/* i#96/PR 295561: use futex(2) if available. */
if (ksynch_kernel_support()) {
/* Set to LOCK_FREE_STATE to avoid concurrent lock attempts from
* resulting in a futex_wait value match w/o anyone owning the lock
*/
lock->lock_requests = LOCK_FREE_STATE;
/* No reason to wake multiple threads: just one */
#ifdef LINUX
ksynch_wake(&lock->lock_requests);
#else
ksynch_wake(&lock->contended_event);
#endif
} /* else nothing to do */
}
/* read_write_lock_t implementation doesn't expect the contention path
helpers to guarantee the lock is held (unlike mutexes) so simple
yields are still acceptable.
*/
void
rwlock_wait_contended_writer(read_write_lock_t *rwlock)
{
os_thread_yield();
}
void
rwlock_notify_writer(read_write_lock_t *rwlock)
{
/* nothing to do here */
}
void
rwlock_wait_contended_reader(read_write_lock_t *rwlock)
{
os_thread_yield();
}
void
rwlock_notify_readers(read_write_lock_t *rwlock)
{
/* nothing to do here */
}
/***************************************************************************/
/* events are un-signaled when successfully waited upon. */
typedef struct linux_event_t {
/* Any function that sets this flag must also notify possibly waiting
* thread(s). See i#96/PR 295561.
*/
KSYNCH_TYPE signaled;
mutex_t lock;
bool broadcast;
} linux_event_t;
/* FIXME: this routine will need to have a macro wrapper to let us
* assign different ranks to all events for DEADLOCK_AVOIDANCE.
* Currently a single rank seems to work.
*/
event_t
create_event(void)
{
event_t e = (event_t)global_heap_alloc(sizeof(linux_event_t) HEAPACCT(ACCT_OTHER));
ksynch_init_var(&e->signaled);
ASSIGN_INIT_LOCK_FREE(e->lock, event_lock); /* FIXME: pass the event name here */
e->broadcast = false;
return e;
}
event_t
create_broadcast_event(void)
{
event_t e = create_event();
e->broadcast = true;
return e;
}
void
destroy_event(event_t e)
{
DELETE_LOCK(e->lock);
ksynch_free_var(&e->signaled);
global_heap_free(e, sizeof(linux_event_t) HEAPACCT(ACCT_OTHER));
}
void
signal_event(event_t e)
{
d_r_mutex_lock(&e->lock);
ksynch_set_value(&e->signaled, 1);
if (e->broadcast)
ksynch_wake_all(&e->signaled);
else
ksynch_wake(&e->signaled);
LOG(THREAD_GET, LOG_THREADS, 3, "thread " TIDFMT " signalling event " PFX "\n",
d_r_get_thread_id(), e);
d_r_mutex_unlock(&e->lock);
}
void
reset_event(event_t e)
{
d_r_mutex_lock(&e->lock);
ksynch_set_value(&e->signaled, 0);
LOG(THREAD_GET, LOG_THREADS, 3, "thread " TIDFMT " resetting event " PFX "\n",
d_r_get_thread_id(), e);
d_r_mutex_unlock(&e->lock);
}
bool
wait_for_event(event_t e, int timeout_ms)
{
#ifdef DEBUG
dcontext_t *dcontext = get_thread_private_dcontext();
#endif
uint64 start_time, cur_time;
if (timeout_ms > 0)
start_time = query_time_millis();
/* Use a user-space event on Linux, a kernel event on Windows. */
LOG(THREAD, LOG_THREADS, 3, "thread " TIDFMT " waiting for event " PFX "\n",
d_r_get_thread_id(), e);
do {
if (ksynch_get_value(&e->signaled) == 1) {
d_r_mutex_lock(&e->lock);
if (ksynch_get_value(&e->signaled) == 0) {
/* some other thread beat us to it */
LOG(THREAD, LOG_THREADS, 3,
"thread " TIDFMT " was beaten to event " PFX "\n",
d_r_get_thread_id(), e);
d_r_mutex_unlock(&e->lock);
} else {
if (!e->broadcast) {
/* reset the event */
ksynch_set_value(&e->signaled, 0);
}
d_r_mutex_unlock(&e->lock);
LOG(THREAD, LOG_THREADS, 3,
"thread " TIDFMT " finished waiting for event " PFX "\n",
d_r_get_thread_id(), e);
return true;
}
} else {
/* Waits only if the signaled flag is not set as 1. Return value
* doesn't matter because the flag will be re-checked.
*/
ksynch_wait(&e->signaled, 0, timeout_ms);
}
if (ksynch_get_value(&e->signaled) == 0) {
/* If it still has to wait, give up the cpu. */
os_thread_yield();
}
if (timeout_ms > 0)
cur_time = query_time_millis();
} while (timeout_ms <= 0 || cur_time - start_time < timeout_ms);
return false;
}
/***************************************************************************
* DIRECTORY ITERATOR
*/
/* These structs are written to the buf that we pass to getdents. We can
* iterate them by adding d_reclen to the current buffer offset and interpreting
* that as the next entry.
*/
struct linux_dirent {
#ifdef SYS_getdents
/* Adapted from struct old_linux_dirent in linux/fs/readdir.c: */
unsigned long d_ino;
unsigned long d_off;
unsigned short d_reclen;
char d_name[];
#else
/* Adapted from struct linux_dirent64 in linux/include/linux/dirent.h: */
uint64 d_ino;
int64 d_off;
unsigned short d_reclen;
unsigned char d_type;
char d_name[];
#endif
};
#define CURRENT_DIRENT(iter) ((struct linux_dirent *)(&iter->buf[iter->off]))
static void
os_dir_iterator_start(dir_iterator_t *iter, file_t fd)
{
iter->fd = fd;
iter->off = 0;
iter->end = 0;
}
static bool
os_dir_iterator_next(dir_iterator_t *iter)
{
#ifdef MACOS
/* We can use SYS_getdirentries, but do we even need a dir iterator?
* On Linux it's only used to enumerate /proc/pid/task.
*/
ASSERT_NOT_IMPLEMENTED(false);
return false;
#else
if (iter->off < iter->end) {
/* Have existing dents, get the next offset. */
iter->off += CURRENT_DIRENT(iter)->d_reclen;
ASSERT(iter->off <= iter->end);
}
if (iter->off == iter->end) {
/* Do a getdents syscall. Unlike when reading a file, the kernel will
* not read a partial linux_dirent struct, so we don't need to shift the
* left over bytes to the buffer start. See the getdents manpage for
* the example code that this is based on.
*/
iter->off = 0;
# ifdef SYS_getdents
iter->end =
dynamorio_syscall(SYS_getdents, 3, iter->fd, iter->buf, sizeof(iter->buf));
# else
iter->end =
dynamorio_syscall(SYS_getdents64, 3, iter->fd, iter->buf, sizeof(iter->buf));
# endif
ASSERT(iter->end <= sizeof(iter->buf));
if (iter->end <= 0) { /* No more dents, or error. */
iter->name = NULL;
if (iter->end < 0) {
LOG(GLOBAL, LOG_SYSCALLS, 1, "getdents syscall failed with errno %d\n",
-iter->end);
}
return false;
}
}
iter->name = CURRENT_DIRENT(iter)->d_name;
return true;
#endif
}
/***************************************************************************
* THREAD TAKEOVER
*/
/* Record used to synchronize thread takeover. */
typedef struct _takeover_record_t {
thread_id_t tid;
event_t event;
} takeover_record_t;
/* When attempting thread takeover, we store an array of thread id and event
* pairs here. Each thread we signal is supposed to enter DR control and signal
* this event after it has added itself to all_threads.
*
* XXX: What we really want is to be able to use SYS_rt_tgsigqueueinfo (Linux >=
* 2.6.31) to pass the event_t to each thread directly, rather than using this
* side data structure.
*/
static takeover_record_t *thread_takeover_records;
static uint num_thread_takeover_records;
/* This is the dcontext of the thread that initiated the takeover. We read the
* owning_thread and signal_field threads from it in the signaled threads to
* set up siginfo sharing.
*/
static dcontext_t *takeover_dcontext;
/* Lists active threads in the process.
* XXX: The /proc man page says /proc/pid/task is only available if the main
* thread is still alive, but experiments on 2.6.38 show otherwise.
*/
static thread_id_t *
os_list_threads(dcontext_t *dcontext, uint *num_threads_out)
{
dir_iterator_t iter;
file_t task_dir;
uint tids_alloced = 10;
uint num_threads = 0;
thread_id_t *new_tids;
thread_id_t *tids;
ASSERT(num_threads_out != NULL);
#ifdef MACOS
/* XXX i#58: NYI.
* We may want SYS_proc_info with PROC_INFO_PID_INFO and PROC_PIDLISTTHREADS,
* or is that just BSD threads and instead we want process_set_tasks()
* and task_info() as in 7.3.1.3 in Singh's OSX book?
*/
*num_threads_out = 0;
return NULL;
#endif
tids =
HEAP_ARRAY_ALLOC(dcontext, thread_id_t, tids_alloced, ACCT_THREAD_MGT, PROTECTED);
task_dir = os_open_directory("/proc/self/task", OS_OPEN_READ);
ASSERT(task_dir != INVALID_FILE);
os_dir_iterator_start(&iter, task_dir);
while (os_dir_iterator_next(&iter)) {
thread_id_t tid;
DEBUG_DECLARE(int r;)
if (strcmp(iter.name, ".") == 0 || strcmp(iter.name, "..") == 0)
continue;
IF_DEBUG(r =)
sscanf(iter.name, "%u", &tid);
ASSERT_MESSAGE(CHKLVL_ASSERTS, "failed to parse /proc/pid/task entry", r == 1);
if (tid <= 0)
continue;
if (num_threads == tids_alloced) {
/* realloc, essentially. Less expensive than counting first. */
new_tids = HEAP_ARRAY_ALLOC(dcontext, thread_id_t, tids_alloced * 2,
ACCT_THREAD_MGT, PROTECTED);
memcpy(new_tids, tids, sizeof(thread_id_t) * tids_alloced);
HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, tids_alloced, ACCT_THREAD_MGT,
PROTECTED);
tids = new_tids;
tids_alloced *= 2;
}
tids[num_threads++] = tid;
}
ASSERT(iter.end == 0); /* No reading errors. */
os_close(task_dir);
/* realloc back down to num_threads for caller simplicity. */
new_tids =
HEAP_ARRAY_ALLOC(dcontext, thread_id_t, num_threads, ACCT_THREAD_MGT, PROTECTED);
memcpy(new_tids, tids, sizeof(thread_id_t) * num_threads);
HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, tids_alloced, ACCT_THREAD_MGT,
PROTECTED);
tids = new_tids;
*num_threads_out = num_threads;
return tids;
}
/* List the /proc/self/task directory and add all unknown thread ids to the
* all_threads hashtable in dynamo.c. Returns true if we found any unknown
* threads and false otherwise. We assume that since we don't know about them
* they are not under DR and have no dcontexts.
*/
bool
os_take_over_all_unknown_threads(dcontext_t *dcontext)
{
uint i;
uint num_threads;
thread_id_t *tids;
uint threads_to_signal = 0;
/* We do not want to re-takeover a thread that's in between notifying us on
* the last call to this routine and getting onto the all_threads list as
* we'll self-interpret our own code leading to a lot of problems.
* XXX: should we use an event to avoid this inefficient loop? We expect
* this to only happen in rare cases during attach when threads are in flux.
*/
while (uninit_thread_count > 0) /* relying on volatile */
os_thread_yield();
/* This can only happen if we had already taken over a thread, because there is
* full synchronization at detach. The same thread may now already be on its way
* to exit, and its thread record might be gone already and make it look like a
* new native thread below. If we rely on the thread to self-detect that it was
* interrupted at a DR address we may run into a deadlock (i#2694). In order to
* avoid this, we wait here. This is expected to be uncommon, and can only happen
* with very short-lived threads.
* XXX: if this loop turns out to be too inefficient, we could support detecting
* the lock function's address bounds along w/ is_dynamo_address.
*/
while (exiting_thread_count > 0)
os_thread_yield();
d_r_mutex_lock(&thread_initexit_lock);
CLIENT_ASSERT(thread_takeover_records == NULL,
"Only one thread should attempt app take over!");
/* Find tids for which we have no thread record, meaning they are not under
* our control. Shift them to the beginning of the tids array.
*/
tids = os_list_threads(dcontext, &num_threads);
if (tids == NULL) {
d_r_mutex_unlock(&thread_initexit_lock);
return false; /* have to assume no unknown */
}
for (i = 0; i < num_threads; i++) {
thread_record_t *tr = thread_lookup(tids[i]);
if (tr == NULL ||
/* Re-takeover known threads that are currently native as well.
* XXX i#95: we need a synchall-style loop for known threads as
* they can be in DR for syscall hook handling.
* Update: we now remove the hook for start/stop: but native_exec
* or other individual threads going native could still hit this.
*/
(is_thread_currently_native(tr)
IF_CLIENT_INTERFACE(&&!IS_CLIENT_THREAD(tr->dcontext))))
tids[threads_to_signal++] = tids[i];
}
LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: %d threads to take over\n", threads_to_signal);
if (threads_to_signal > 0) {
takeover_record_t *records;
/* Assuming pthreads, prepare signal_field for sharing. */
handle_clone(dcontext, PTHREAD_CLONE_FLAGS);
/* Create records with events for all the threads we want to signal. */
LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: publishing takeover records\n");
records = HEAP_ARRAY_ALLOC(dcontext, takeover_record_t, threads_to_signal,
ACCT_THREAD_MGT, PROTECTED);
for (i = 0; i < threads_to_signal; i++) {
LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: will signal thread " TIDFMT "\n",
tids[i]);
records[i].tid = tids[i];
records[i].event = create_event();
}
/* Publish the records and the initial take over dcontext. */
thread_takeover_records = records;
num_thread_takeover_records = threads_to_signal;
takeover_dcontext = dcontext;
/* Signal the other threads. */
for (i = 0; i < threads_to_signal; i++) {
thread_signal(get_process_id(), records[i].tid, SUSPEND_SIGNAL);
}
d_r_mutex_unlock(&thread_initexit_lock);
/* Wait for all the threads we signaled. */
ASSERT_OWN_NO_LOCKS();
for (i = 0; i < threads_to_signal; i++) {
static const int progress_period = 50;
if (i % progress_period == 0) {
char buf[16];
/* +1 to include the attach request thread to match the final msg. */
snprintf(buf, BUFFER_SIZE_ELEMENTS(buf), "%d/%d", i + 1,
threads_to_signal + 1);
NULL_TERMINATE_BUFFER(buf);
SYSLOG(SYSLOG_VERBOSE, INFO_ATTACHED, 3, buf, get_application_name(),
get_application_pid());
}
static const int wait_ms = 25;
while (!wait_for_event(records[i].event, wait_ms)) {
/* The thread may have exited (i#2601). We assume no tid re-use. */
char task[64];
snprintf(task, BUFFER_SIZE_ELEMENTS(task), "/proc/self/task/%d", tids[i]);
NULL_TERMINATE_BUFFER(task);
if (!os_file_exists(task, false /*!is dir*/)) {
SYSLOG_INTERNAL_WARNING_ONCE("thread exited while attaching");
break;
}
/* Else try again. */
}
}
/* Now that we've taken over the other threads, we can safely free the
* records and reset the shared globals.
*/
d_r_mutex_lock(&thread_initexit_lock);
LOG(GLOBAL, LOG_THREADS, 1,
"TAKEOVER: takeover complete, unpublishing records\n");
thread_takeover_records = NULL;
num_thread_takeover_records = 0;
takeover_dcontext = NULL;
for (i = 0; i < threads_to_signal; i++) {
destroy_event(records[i].event);
}
HEAP_ARRAY_FREE(dcontext, records, takeover_record_t, threads_to_signal,
ACCT_THREAD_MGT, PROTECTED);
}
d_r_mutex_unlock(&thread_initexit_lock);
HEAP_ARRAY_FREE(dcontext, tids, thread_id_t, num_threads, ACCT_THREAD_MGT, PROTECTED);
return threads_to_signal > 0;
}
bool
os_thread_re_take_over(void)
{
#ifdef X86
/* i#2089: is_thread_initialized() will fail for a currently-native app.
* We bypass the magic field checks here of is_thread_tls_initialized().
* XXX: should this be inside is_thread_initialized()? But that may mislead
* other callers: the caller has to restore the TLs. Some old code also
* used get_thread_private_dcontext() being NULL to indicate an unknown thread:
* that should also call here.
*/
if (!is_thread_initialized() && is_thread_tls_allocated()) {
/* It's safe to call thread_lookup() for ourself. */
thread_record_t *tr = thread_lookup(get_sys_thread_id());
if (tr != NULL) {
ASSERT(is_thread_currently_native(tr));
LOG(GLOBAL, LOG_THREADS, 1, "\tretakeover for cur-native thread " TIDFMT "\n",
get_sys_thread_id());
LOG(tr->dcontext->logfile, LOG_THREADS, 1,
"\nretakeover for cur-native thread " TIDFMT "\n", get_sys_thread_id());
os_swap_dr_tls(tr->dcontext, false /*to dr*/);
ASSERT(is_thread_initialized());
return true;
}
}
#endif
return false;
}
static void
os_thread_signal_taken_over(void)
{
thread_id_t mytid;
event_t event = NULL;
uint i;
/* Wake up the thread that initiated the take over. */
mytid = d_r_get_thread_id();
ASSERT(thread_takeover_records != NULL);
for (i = 0; i < num_thread_takeover_records; i++) {
if (thread_takeover_records[i].tid == mytid) {
event = thread_takeover_records[i].event;
break;
}
}
ASSERT_MESSAGE(CHKLVL_ASSERTS, "mytid not present in takeover records!",
event != NULL);
signal_event(event);
}
/* Takes over the current thread from the signal handler. We notify the thread
* that signaled us by signalling our event in thread_takeover_records.
* If it returns, it returns false, and the thread should be let go.
*/
bool
os_thread_take_over(priv_mcontext_t *mc, kernel_sigset_t *sigset)
{
dcontext_t *dcontext;
priv_mcontext_t *dc_mc;
LOG(GLOBAL, LOG_THREADS, 1, "TAKEOVER: received signal in thread " TIDFMT "\n",
get_sys_thread_id());
/* Do standard DR thread initialization. Mirrors code in
* create_clone_record and new_thread_setup, except we're not putting a
* clone record on the dstack.
*/
os_thread_re_take_over();
if (!is_thread_initialized()) {
/* If this is a thread on its way to init, don't self-interp (i#2688). */
if (is_dynamo_address(mc->pc)) {
os_thread_signal_taken_over();
return false;
}
dcontext = init_thread_with_shared_siginfo(mc, takeover_dcontext);
ASSERT(dcontext != NULL);
} else {
/* Re-takeover a thread that we let go native */
dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
}
signal_set_mask(dcontext, sigset);
signal_swap_mask(dcontext, true /*to app*/);
dynamo_thread_under_dynamo(dcontext);
dc_mc = get_mcontext(dcontext);
*dc_mc = *mc;
dcontext->whereami = DR_WHERE_APP;
dcontext->next_tag = mc->pc;
os_thread_signal_taken_over();
DOLOG(2, LOG_TOP, {
byte *cur_esp;
GET_STACK_PTR(cur_esp);
LOG(THREAD, LOG_TOP, 2,
"%s: next_tag=" PFX ", cur xsp=" PFX ", mc->xsp=" PFX "\n", __FUNCTION__,
dcontext->next_tag, cur_esp, mc->xsp);
});
/* Start interpreting from the signal context. */
call_switch_stack(dcontext, dcontext->dstack, (void (*)(void *))d_r_dispatch,
NULL /*not on d_r_initstack*/, false /*shouldn't return*/);
ASSERT_NOT_REACHED();
return true; /* make compiler happy */
}
bool
os_thread_take_over_suspended_native(dcontext_t *dcontext)
{
os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field;
if (!is_thread_currently_native(dcontext->thread_record) ||
ksynch_get_value(&ostd->suspended) < 0)
return false;
/* Thread is sitting in suspend signal loop so we just set a flag
* for when it resumes:
*/
/* XXX: there's no event for a client to trigger this on so not yet
* tested. i#721 may help.
*/
ASSERT_NOT_TESTED();
ostd->retakeover = true;
return true;
}
/* Called for os-specific takeover of a secondary thread from the one
* that called dr_app_setup().
*/
dcontext_t *
os_thread_take_over_secondary(priv_mcontext_t *mc)
{
thread_record_t **list;
int num_threads;
int i;
dcontext_t *dcontext;
/* We want to share with the thread that called dr_app_setup. */
d_r_mutex_lock(&thread_initexit_lock);
get_list_of_threads(&list, &num_threads);
ASSERT(num_threads >= 1);
for (i = 0; i < num_threads; i++) {
/* Find a thread that's already set up */
if (is_thread_signal_info_initialized(list[i]->dcontext))
break;
}
ASSERT(i < num_threads);
ASSERT(list[i]->id != get_sys_thread_id());
/* Assuming pthreads, prepare signal_field for sharing. */
handle_clone(list[i]->dcontext, PTHREAD_CLONE_FLAGS);
dcontext = init_thread_with_shared_siginfo(mc, list[i]->dcontext);
d_r_mutex_unlock(&thread_initexit_lock);
global_heap_free(list,
num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
return dcontext;
}
/***************************************************************************/
uint
os_random_seed(void)
{
uint seed;
/* reading from /dev/urandom for a non-blocking random */
int urand = os_open("/dev/urandom", OS_OPEN_READ);
DEBUG_DECLARE(int read =) os_read(urand, &seed, sizeof(seed));
ASSERT(read == sizeof(seed));
os_close(urand);
return seed;
}
#ifdef RCT_IND_BRANCH
/* Analyze a range in a possibly new module
* return false if not a code section in a module
* otherwise returns true and adds all valid targets for rct_ind_branch_check
*/
bool
rct_analyze_module_at_violation(dcontext_t *dcontext, app_pc target_pc)
{
/* FIXME: note that this will NOT find the data section corresponding to the given PC
* we don't yet have a corresponding get_allocation_size or an ELF header walk routine
* on linux
*/
app_pc code_start;
size_t code_size;
uint prot;
if (!get_memory_info(target_pc, &code_start, &code_size, &prot))
return false;
/* TODO: in almost all cases expect the region at module_base+module_size to be
* the corresponding data section.
* Writable yet initialized data indeed needs to be processed.
*/
if (code_size > 0) {
app_pc code_end = code_start + code_size;
app_pc data_start;
size_t data_size;
ASSERT(TESTALL(MEMPROT_READ | MEMPROT_EXEC, prot)); /* code */
if (!get_memory_info(code_end, &data_start, &data_size, &prot))
return false;
ASSERT(data_start == code_end);
ASSERT(TESTALL(MEMPROT_READ | MEMPROT_WRITE, prot)); /* data */
app_pc text_start = code_start;
app_pc text_end = data_start + data_size;
/* TODO: performance: do this only in case relocation info is not present */
DEBUG_DECLARE(uint found =)
find_address_references(dcontext, text_start, text_end, code_start, code_end);
LOG(GLOBAL, LOG_RCT, 2, PFX "-" PFX " : %d ind targets of %d code size",
text_start, text_end, found, code_size);
return true;
}
return false;
}
# ifdef X64
bool
rct_add_rip_rel_addr(dcontext_t *dcontext, app_pc tgt _IF_DEBUG(app_pc src))
{
/* FIXME PR 276762: not implemented */
return false;
}
# endif
#endif /* RCT_IND_BRANCH */
#ifdef HOT_PATCHING_INTERFACE
void *
get_drmarker_hotp_policy_status_table()
{
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
void
set_drmarker_hotp_policy_status_table(void *new_table)
{
ASSERT_NOT_IMPLEMENTED(false);
}
byte *
hook_text(byte *hook_code_buf, const app_pc image_addr, intercept_function_t hook_func,
const void *callee_arg, const after_intercept_action_t action_after,
const bool abort_if_hooked, const bool ignore_cti, byte **app_code_copy_p,
byte **alt_exit_tgt_p)
{
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
void
unhook_text(byte *hook_code_buf, app_pc image_addr)
{
ASSERT_NOT_IMPLEMENTED(false);
}
void
insert_jmp_at_tramp_entry(dcontext_t *dcontext, byte *trampoline, byte *target)
{
ASSERT_NOT_IMPLEMENTED(false);
}
#endif /* HOT_PATCHING_INTERFACE */
bool
aslr_is_possible_attack(app_pc target)
{
/* FIXME: ASLR not implemented */
return false;
}
app_pc
aslr_possible_preferred_address(app_pc target_addr)
{
/* FIXME: ASLR not implemented */
return NULL;
}
void
take_over_primary_thread()
{
/* nothing to do here */
}
bool
os_current_user_directory(char *directory_prefix /* INOUT */, uint directory_len,
bool create)
{
/* XXX: could share some of this code w/ corresponding windows routine */
uid_t uid = dynamorio_syscall(SYS_getuid, 0);
char *directory = directory_prefix;
char *dirend = directory_prefix + strlen(directory_prefix);
snprintf(dirend, directory_len - (dirend - directory_prefix), "%cdpc-%d", DIRSEP,
uid);
directory_prefix[directory_len - 1] = '\0';
if (!os_file_exists(directory, true /*is dir*/) && create) {
/* XXX: we should ensure we do not follow symlinks */
/* XXX: should add support for CREATE_DIR_FORCE_OWNER */
if (!os_create_dir(directory, CREATE_DIR_REQUIRE_NEW)) {
LOG(GLOBAL, LOG_CACHE, 2, "\terror creating per-user dir %s\n", directory);
return false;
} else {
LOG(GLOBAL, LOG_CACHE, 2, "\tcreated per-user dir %s\n", directory);
}
}
return true;
}
bool
os_validate_user_owned(file_t file_or_directory_handle)
{
/* note on Linux this scheme should never be used */
ASSERT(false && "chown Alice evilfile");
return false;
}
bool
os_check_option_compatibility(void)
{
/* no options are Linux OS version dependent */
return false;
}
#ifdef X86_32
/* Emulate uint64 modulo and division by uint32 on ia32.
* XXX: Does *not* handle 64-bit divisors!
*/
static uint64
uint64_divmod(uint64 dividend, uint64 divisor64, uint32 *remainder)
{
/* Assumes little endian, which x86 is. */
union {
uint64 v64;
struct {
uint32 lo;
uint32 hi;
};
} res;
uint32 upper;
uint32 divisor = (uint32)divisor64;
/* Our uses don't use large divisors. */
ASSERT(divisor64 <= UINT_MAX && "divisor is larger than uint32 can hold");
/* Divide out the high bits first. */
res.v64 = dividend;
upper = res.hi;
res.hi = upper / divisor;
upper %= divisor;
/* Use the unsigned div instruction, which uses EDX:EAX to form a 64-bit
* dividend. We only get a 32-bit quotient out, which is why we divide out
* the high bits first. The quotient will fit in EAX.
*
* DIV r/m32 F7 /6 Unsigned divide EDX:EAX by r/m32, with result stored
* in EAX <- Quotient, EDX <- Remainder.
* inputs:
* EAX = res.lo
* EDX = upper
* rm = divisor
* outputs:
* res.lo = EAX
* *remainder = EDX
* The outputs precede the inputs in gcc inline asm syntax, and so to put
* inputs in EAX and EDX we use "0" and "1".
*/
asm("divl %2"
: "=a"(res.lo), "=d"(*remainder)
: "rm"(divisor), "0"(res.lo), "1"(upper));
return res.v64;
}
/* Match libgcc's prototype. */
uint64
__udivdi3(uint64 dividend, uint64 divisor)
{
uint32 remainder;
return uint64_divmod(dividend, divisor, &remainder);
}
/* Match libgcc's prototype. */
uint64
__umoddi3(uint64 dividend, uint64 divisor)
{
uint32 remainder;
uint64_divmod(dividend, divisor, &remainder);
return (uint64)remainder;
}
/* Same thing for signed. */
static int64
int64_divmod(int64 dividend, int64 divisor64, int *remainder)
{
union {
int64 v64;
struct {
int lo;
int hi;
};
} res;
int upper;
int divisor = (int)divisor64;
/* Our uses don't use large divisors. */
ASSERT(divisor64 <= INT_MAX && divisor64 >= INT_MIN && "divisor too large for int");
/* Divide out the high bits first. */
res.v64 = dividend;
upper = res.hi;
res.hi = upper / divisor;
upper %= divisor;
/* Like above but with the signed div instruction, which does a signed divide
* on edx:eax by r/m32 => quotient in eax, remainder in edx.
*/
asm("idivl %2"
: "=a"(res.lo), "=d"(*remainder)
: "rm"(divisor), "0"(res.lo), "1"(upper));
return res.v64;
}
/* Match libgcc's prototype. */
int64
__divdi3(int64 dividend, int64 divisor)
{
int remainder;
return int64_divmod(dividend, divisor, &remainder);
}
/* __moddi3 is coming from third_party/libgcc for x86 as well as arm. */
#elif defined(ARM)
/* i#1566: for ARM, __aeabi versions are used instead of udivdi3 and umoddi3.
* We link with __aeabi routines from libgcc via third_party/libgcc.
*/
#endif /* X86_32 */
/****************************************************************************
* Kernel-restartable sequences
*/
#ifdef LINUX
/* Support for Linux kernel extensions for per-cpu critical regions.
* Xref https://lwn.net/Articles/649288/
* Some of this may vary on different kernels.
* The way it works is that the app tells the kernel the bounds of a
* code region within which a context switch should restart the code.
*
* As these sequences are complex to handle (it would be much simpler
* if they used existing mechanisms like signals!), we start out by
* running their code natively. We assume it is "well-behaved" and
* we'll get control back. These code sequences will be invisible to
* tools: we'll live with the lack of instrumentation for now as a
* tradeoff for getting correct app execution.
*
* Unfortunately we can't easily have a regression test in the main
* repository as mainstream kernels do not have this feature.
*/
/* We support a syscall of this form, with number DYNAMO_OPTION(rseq_sysnum):
* SYSCALL_DEFINE4(rseq, int, op, long, val1, long, val2, long, val3)
*/
/* Set operation: app_pc start, app_pc end, app_pc restart */
# define RSEQ_SET_CRITICAL 1
/* Get operation: app_pc *start, app_pc *end, app_pc *restart */
# define RSEQ_GET_CRITICAL 3
static app_pc app_restart_region_start;
static app_pc app_restart_region_end;
static void
restartable_region_init(void)
{
int res;
app_pc restart_handler;
if (DYNAMO_OPTION(rseq_sysnum) == 0)
return;
res = dynamorio_syscall(DYNAMO_OPTION(rseq_sysnum), 4, RSEQ_GET_CRITICAL,
&app_restart_region_start, &app_restart_region_end,
&restart_handler);
if (res != 0) {
ASSERT(res == -ENOSYS);
LOG(GLOBAL, LOG_TOP, 1, "No restartable region at init\n");
app_restart_region_start = NULL;
app_restart_region_end = NULL;
} else {
LOG(GLOBAL, LOG_TOP, 1, "Restartable region at init: " PFX "-" PFX " @" PFX "\n",
app_restart_region_start, app_restart_region_end, restart_handler);
if (app_restart_region_start != NULL &&
app_restart_region_end > app_restart_region_start) {
vmvector_add(native_exec_areas, app_restart_region_start,
app_restart_region_end, NULL);
}
}
}
static bool
handle_restartable_region_syscall_pre(dcontext_t *dcontext)
{
if (DYNAMO_OPTION(rseq_sysnum) == 0 ||
dcontext->sys_num != DYNAMO_OPTION(rseq_sysnum))
return true;
/* We do the work in post */
dcontext->sys_param0 = sys_param(dcontext, 0);
dcontext->sys_param1 = sys_param(dcontext, 1);
dcontext->sys_param2 = sys_param(dcontext, 2);
return true;
}
/* Though there is a race, it is hard to imagine the app executing correctly
* without first checking the return value of the syscall. Thus we handle
* rseq in post and avoid having to emulate the kernel's argument checking.
*/
static void
handle_restartable_region_syscall_post(dcontext_t *dcontext, bool success)
{
int op;
if (DYNAMO_OPTION(rseq_sysnum) == 0 ||
dcontext->sys_num != DYNAMO_OPTION(rseq_sysnum) || !success)
return;
op = (int)dcontext->sys_param0;
if (op == RSEQ_SET_CRITICAL) {
app_pc start = (app_pc)dcontext->sys_param1;
app_pc end = (app_pc)dcontext->sys_param2;
LOG(THREAD, LOG_VMAREAS | LOG_SYSCALLS, 2,
"syscall: set rseq region to " PFX "-" PFX "\n", start, end);
/* An unlink flush should be good enough: we simply don't support
* suddenly setting an rseq region for some fallthrough code after the
* syscall.
*/
if (app_restart_region_start != NULL &&
app_restart_region_end > app_restart_region_start) {
vmvector_remove(native_exec_areas, app_restart_region_start,
app_restart_region_end);
/* Flush existing code so it no longer goes native. */
flush_fragments_from_region(dcontext, app_restart_region_start,
app_restart_region_end - app_restart_region_start,
false /*don't force synchall*/);
}
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
app_restart_region_start = start;
app_restart_region_end = end;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
if (app_restart_region_start != NULL &&
app_restart_region_end > app_restart_region_start) {
vmvector_add(native_exec_areas, app_restart_region_start,
app_restart_region_end, NULL);
/* We have to flush any existing code in the region. */
flush_fragments_from_region(dcontext, app_restart_region_start,
app_restart_region_end - app_restart_region_start,
false /*don't force synchall*/);
}
}
}
#endif /* LINUX */
void
native_exec_os_init(void)
{
#ifdef LINUX
restartable_region_init();
#endif
}
/****************************************************************************
* Tests
*/
#if defined(STANDALONE_UNIT_TEST)
void
test_uint64_divmod(void)
{
# ifdef X86_32
uint64 quotient;
uint32 remainder;
/* Simple division below 2^32. */
quotient = uint64_divmod(9, 3, &remainder);
EXPECT(quotient == 3, true);
EXPECT(remainder == 0, true);
quotient = uint64_divmod(10, 3, &remainder);
EXPECT(quotient == 3, true);
EXPECT(remainder == 1, true);
/* Division when upper bits are less than the divisor. */
quotient = uint64_divmod(45ULL << 31, 1U << 31, &remainder);
EXPECT(quotient == 45, true);
EXPECT(remainder == 0, true);
/* Division when upper bits are greater than the divisor. */
quotient = uint64_divmod(45ULL << 32, 15, &remainder);
EXPECT(quotient == 3ULL << 32, true);
EXPECT(remainder == 0, true);
quotient = uint64_divmod((45ULL << 32) + 13, 15, &remainder);
EXPECT(quotient == 3ULL << 32, true);
EXPECT(remainder == 13, true);
/* Try calling the intrinsics. Don't divide by powers of two, gcc will
* lower that to a shift.
*/
quotient = (45ULL << 32);
quotient /= 15;
EXPECT(quotient == (3ULL << 32), true);
quotient = (45ULL << 32) + 13;
remainder = quotient % 15;
EXPECT(remainder == 13, true);
# endif /* X86_32 */
}
void
unit_test_os(void)
{
test_uint64_divmod();
}
#endif /* STANDALONE_UNIT_TEST */
| 1 | 16,430 | But we're already doing os_switch_lib_tls to app a few lines above, so there is now redundancy we should alleviate. | DynamoRIO-dynamorio | c |
@@ -52,10 +52,11 @@ namespace Datadog.Trace
/// <param name="parent">The parent context.</param>
/// <param name="traceContext">The trace context.</param>
/// <param name="serviceName">The service name to propagate to child spans.</param>
- internal SpanContext(ISpanContext parent, ITraceContext traceContext, string serviceName)
+ /// <param name="spanId">The propagated span id.</param>
+ internal SpanContext(ISpanContext parent, ITraceContext traceContext, string serviceName, ulong? spanId = null)
: this(parent?.TraceId, serviceName)
{
- SpanId = SpanIdGenerator.ThreadInstance.CreateNew();
+ SpanId = spanId ?? SpanIdGenerator.ThreadInstance.CreateNew();
Parent = parent;
TraceContext = traceContext;
if (parent is SpanContext spanContext) | 1 | using Datadog.Trace.ExtensionMethods;
using Datadog.Trace.Logging;
using Datadog.Trace.Util;
namespace Datadog.Trace
{
/// <summary>
/// The SpanContext contains all the information needed to express relationships between spans inside or outside the process boundaries.
/// </summary>
public class SpanContext : ISpanContext
{
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.For<SpanContext>();
/// <summary>
/// Initializes a new instance of the <see cref="SpanContext"/> class
/// from a propagated context. <see cref="Parent"/> will be null
/// since this is a root context locally.
/// </summary>
/// <param name="traceId">The propagated trace id.</param>
/// <param name="spanId">The propagated span id.</param>
/// <param name="samplingPriority">The propagated sampling priority.</param>
/// <param name="serviceName">The service name to propagate to child spans.</param>
public SpanContext(ulong? traceId, ulong spanId, SamplingPriority? samplingPriority = null, string serviceName = null)
: this(traceId, serviceName)
{
SpanId = spanId;
SamplingPriority = samplingPriority;
}
/// <summary>
/// Initializes a new instance of the <see cref="SpanContext"/> class
/// from a propagated context. <see cref="Parent"/> will be null
/// since this is a root context locally.
/// </summary>
/// <param name="traceId">The propagated trace id.</param>
/// <param name="spanId">The propagated span id.</param>
/// <param name="samplingPriority">The propagated sampling priority.</param>
/// <param name="serviceName">The service name to propagate to child spans.</param>
/// <param name="origin">The propagated origin of the trace.</param>
internal SpanContext(ulong? traceId, ulong spanId, SamplingPriority? samplingPriority, string serviceName, string origin)
: this(traceId, serviceName)
{
SpanId = spanId;
SamplingPriority = samplingPriority;
Origin = origin;
}
/// <summary>
/// Initializes a new instance of the <see cref="SpanContext"/> class
/// that is the child of the specified parent context.
/// </summary>
/// <param name="parent">The parent context.</param>
/// <param name="traceContext">The trace context.</param>
/// <param name="serviceName">The service name to propagate to child spans.</param>
internal SpanContext(ISpanContext parent, ITraceContext traceContext, string serviceName)
: this(parent?.TraceId, serviceName)
{
SpanId = SpanIdGenerator.ThreadInstance.CreateNew();
Parent = parent;
TraceContext = traceContext;
if (parent is SpanContext spanContext)
{
Origin = spanContext.Origin;
}
}
private SpanContext(ulong? traceId, string serviceName)
{
TraceId = traceId > 0
? traceId.Value
: SpanIdGenerator.ThreadInstance.CreateNew();
ServiceName = serviceName;
}
/// <summary>
/// Gets the parent context.
/// </summary>
public ISpanContext Parent { get; }
/// <summary>
/// Gets the trace id
/// </summary>
public ulong TraceId { get; }
/// <summary>
/// Gets the span id of the parent span
/// </summary>
public ulong? ParentId => Parent?.SpanId;
/// <summary>
/// Gets the span id
/// </summary>
public ulong SpanId { get; }
/// <summary>
/// Gets or sets the service name to propagate to child spans.
/// </summary>
public string ServiceName { get; set; }
/// <summary>
/// Gets the origin of the trace
/// </summary>
internal string Origin { get; }
/// <summary>
/// Gets the trace context.
/// Returns null for contexts created from incoming propagated context.
/// </summary>
internal ITraceContext TraceContext { get; }
/// <summary>
/// Gets the sampling priority for contexts created from incoming propagated context.
/// Returns null for local contexts.
/// </summary>
internal SamplingPriority? SamplingPriority { get; }
}
}
| 1 | 18,713 | Under what circumstance would we have a span id already? Is this for testing purposes? | DataDog-dd-trace-dotnet | .cs |
@@ -2986,7 +2986,8 @@ static bool PreCallValidateQueueSubmit(layer_data *dev_data, VkQueue queue, uint
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
- " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
+ " that was previously signaled by queue 0x%" PRIx64
+ " but has not since been waited on by any queue.",
HandleToUint64(queue), HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
} else {
unsignaled_semaphores.erase(semaphore); | 1 | /* Copyright (c) 2015-2018 The Khronos Group Inc.
* Copyright (c) 2015-2018 Valve Corporation
* Copyright (c) 2015-2018 LunarG, Inc.
* Copyright (C) 2015-2018 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Cody Northrop <[email protected]>
* Author: Michael Lentine <[email protected]>
* Author: Tobin Ehlis <[email protected]>
* Author: Chia-I Wu <[email protected]>
* Author: Chris Forbes <[email protected]>
* Author: Mark Lobodzinski <[email protected]>
* Author: Ian Elliott <[email protected]>
* Author: Dave Houlton <[email protected]>
* Author: Dustin Graves <[email protected]>
* Author: Jeremy Hayes <[email protected]>
* Author: Jon Ashburn <[email protected]>
* Author: Karl Schultz <[email protected]>
* Author: Mark Young <[email protected]>
* Author: Mike Schuchardt <[email protected]>
* Author: Mike Weiblen <[email protected]>
* Author: Tony Barbour <[email protected]>
*/
// Allow use of STL min and max functions in Windows
#define NOMINMAX
#define VALIDATION_ERROR_MAP_IMPL
#include <algorithm>
#include <array>
#include <assert.h>
#include <iostream>
#include <list>
#include <map>
#include <memory>
#include <mutex>
#include <set>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <valarray>
#include "vk_loader_platform.h"
#include "vk_dispatch_table_helper.h"
#include "vk_enum_string_helper.h"
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wwrite-strings"
#endif
#if defined(__GNUC__)
#pragma GCC diagnostic warning "-Wwrite-strings"
#endif
#include "core_validation.h"
#include "buffer_validation.h"
#include "shader_validation.h"
#include "vk_layer_data.h"
#include "vk_layer_extension_utils.h"
#include "vk_layer_utils.h"
#include "vk_typemap_helper.h"
#if defined __ANDROID__
#include <android/log.h>
#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "CORE_VALIDATION", __VA_ARGS__))
#else
#define LOGCONSOLE(...) \
{ \
printf(__VA_ARGS__); \
printf("\n"); \
}
#endif
// This intentionally includes a cpp file
#include "vk_safe_struct.cpp"
using mutex_t = std::mutex;
using lock_guard_t = std::lock_guard<mutex_t>;
using unique_lock_t = std::unique_lock<mutex_t>;
// These functions are defined *outside* the core_validation namespace as their type
// is also defined outside that namespace
size_t PipelineLayoutCompatDef::hash() const {
hash_util::HashCombiner hc;
// The set number is integral to the CompatDef's distinctiveness
hc << set << push_constant_ranges.get();
const auto &descriptor_set_layouts = *set_layouts_id.get();
for (uint32_t i = 0; i <= set; i++) {
hc << descriptor_set_layouts[i].get();
}
return hc.Value();
}
bool PipelineLayoutCompatDef::operator==(const PipelineLayoutCompatDef &other) const {
if ((set != other.set) || (push_constant_ranges != other.push_constant_ranges)) {
return false;
}
if (set_layouts_id == other.set_layouts_id) {
// if it's the same set_layouts_id, then *any* subset will match
return true;
}
// They aren't exactly the same PipelineLayoutSetLayouts, so we need to check if the required subsets match
const auto &descriptor_set_layouts = *set_layouts_id.get();
assert(set < descriptor_set_layouts.size());
const auto &other_ds_layouts = *other.set_layouts_id.get();
assert(set < other_ds_layouts.size());
for (uint32_t i = 0; i <= set; i++) {
if (descriptor_set_layouts[i] != other_ds_layouts[i]) {
return false;
}
}
return true;
}
namespace core_validation {
using std::max;
using std::string;
using std::stringstream;
using std::unique_ptr;
using std::unordered_map;
using std::unordered_set;
using std::vector;
// WSI Image Objects bypass usual Image Object creation methods. A special Memory
// Object value will be used to identify them internally.
static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
// 2nd special memory handle used to flag object as unbound from memory
static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
struct instance_layer_data {
VkInstance instance = VK_NULL_HANDLE;
debug_report_data *report_data = nullptr;
vector<VkDebugReportCallbackEXT> logging_callback;
vector<VkDebugUtilsMessengerEXT> logging_messenger;
VkLayerInstanceDispatchTable dispatch_table;
CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
uint32_t physical_devices_count = 0;
CALL_STATE vkEnumeratePhysicalDeviceGroupsState = UNCALLED;
uint32_t physical_device_groups_count = 0;
CHECK_DISABLED disabled = {};
unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
InstanceExtensions extensions;
uint32_t api_version;
};
struct layer_data {
debug_report_data *report_data = nullptr;
VkLayerDispatchTable dispatch_table;
DeviceExtensions extensions = {};
unordered_set<VkQueue> queues; // All queues under given device
// Layer specific data
unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
unordered_map<VkPipeline, unique_ptr<PIPELINE_STATE>> pipelineMap;
unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
unordered_map<VkDescriptorSetLayout, std::shared_ptr<cvdescriptorset::DescriptorSetLayout>> descriptorSetLayoutMap;
unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
unordered_map<VkFence, FENCE_NODE> fenceMap;
unordered_map<VkQueue, QUEUE_STATE> queueMap;
unordered_map<VkEvent, EVENT_STATE> eventMap;
unordered_map<QueryObject, bool> queryToStateMap;
unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
unordered_map<VkRenderPass, std::shared_ptr<RENDER_PASS_STATE>> renderPassMap;
unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
unordered_map<VkDescriptorUpdateTemplateKHR, unique_ptr<TEMPLATE_STATE>> desc_template_map;
unordered_map<VkSwapchainKHR, std::unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> qfo_release_image_barrier_map;
GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> qfo_release_buffer_barrier_map;
VkDevice device = VK_NULL_HANDLE;
VkPhysicalDevice physical_device = VK_NULL_HANDLE;
instance_layer_data *instance_data = nullptr; // from device to enclosing instance
DeviceFeatures enabled_features = {};
// Device specific data
PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
VkPhysicalDeviceProperties phys_dev_props = {};
// Device extension properties -- storing properties gathered from VkPhysicalDeviceProperties2KHR::pNext chain
struct DeviceExtensionProperties {
uint32_t max_push_descriptors; // from VkPhysicalDevicePushDescriptorPropertiesKHR::maxPushDescriptors
VkPhysicalDeviceDescriptorIndexingPropertiesEXT descriptor_indexing_props;
};
DeviceExtensionProperties phys_dev_ext_props = {};
bool external_sync_warning = false;
uint32_t api_version = 0;
};
// TODO : Do we need to guard access to layer_data_map w/ lock?
static unordered_map<void *, layer_data *> layer_data_map;
static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
static const VkLayerProperties global_layer = {
"VK_LAYER_LUNARG_core_validation",
VK_LAYER_API_VERSION,
1,
"LunarG Validation Layer",
};
static const VkExtensionProperties device_extensions[] = {
{VK_EXT_VALIDATION_CACHE_EXTENSION_NAME, VK_EXT_VALIDATION_CACHE_SPEC_VERSION},
};
template <class TCreateInfo>
void ValidateLayerOrdering(const TCreateInfo &createInfo) {
bool foundLayer = false;
for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
foundLayer = true;
}
// This has to be logged to console as we don't have a callback at this point.
if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.", global_layer.layerName);
}
}
}
// TODO : This can be much smarter, using separate locks for separate global data
static mutex_t global_lock;
// Get the global map of pending releases
GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &GetGlobalQFOReleaseBarrierMap(
layer_data *dev_data, const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) {
return dev_data->qfo_release_image_barrier_map;
}
GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &GetGlobalQFOReleaseBarrierMap(
layer_data *dev_data, const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) {
return dev_data->qfo_release_buffer_barrier_map;
}
// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
IMAGE_VIEW_STATE *GetImageViewState(const layer_data *dev_data, VkImageView image_view) {
auto iv_it = dev_data->imageViewMap.find(image_view);
if (iv_it == dev_data->imageViewMap.end()) {
return nullptr;
}
return iv_it->second.get();
}
// Return sampler node ptr for specified sampler or else NULL
SAMPLER_STATE *GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
auto sampler_it = dev_data->samplerMap.find(sampler);
if (sampler_it == dev_data->samplerMap.end()) {
return nullptr;
}
return sampler_it->second.get();
}
// Return image state ptr for specified image or else NULL
IMAGE_STATE *GetImageState(const layer_data *dev_data, VkImage image) {
auto img_it = dev_data->imageMap.find(image);
if (img_it == dev_data->imageMap.end()) {
return nullptr;
}
return img_it->second.get();
}
// Return buffer state ptr for specified buffer or else NULL
BUFFER_STATE *GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
auto buff_it = dev_data->bufferMap.find(buffer);
if (buff_it == dev_data->bufferMap.end()) {
return nullptr;
}
return buff_it->second.get();
}
// Return swapchain node for specified swapchain or else NULL
SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
auto swp_it = dev_data->swapchainMap.find(swapchain);
if (swp_it == dev_data->swapchainMap.end()) {
return nullptr;
}
return swp_it->second.get();
}
// Return buffer node ptr for specified buffer or else NULL
BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
auto bv_it = dev_data->bufferViewMap.find(buffer_view);
if (bv_it == dev_data->bufferViewMap.end()) {
return nullptr;
}
return bv_it->second.get();
}
FENCE_NODE *GetFenceNode(layer_data *dev_data, VkFence fence) {
auto it = dev_data->fenceMap.find(fence);
if (it == dev_data->fenceMap.end()) {
return nullptr;
}
return &it->second;
}
EVENT_STATE *GetEventNode(layer_data *dev_data, VkEvent event) {
auto it = dev_data->eventMap.find(event);
if (it == dev_data->eventMap.end()) {
return nullptr;
}
return &it->second;
}
QUERY_POOL_NODE *GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
auto it = dev_data->queryPoolMap.find(query_pool);
if (it == dev_data->queryPoolMap.end()) {
return nullptr;
}
return &it->second;
}
QUEUE_STATE *GetQueueState(layer_data *dev_data, VkQueue queue) {
auto it = dev_data->queueMap.find(queue);
if (it == dev_data->queueMap.end()) {
return nullptr;
}
return &it->second;
}
SEMAPHORE_NODE *GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
auto it = dev_data->semaphoreMap.find(semaphore);
if (it == dev_data->semaphoreMap.end()) {
return nullptr;
}
return &it->second;
}
COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
auto it = dev_data->commandPoolMap.find(pool);
if (it == dev_data->commandPoolMap.end()) {
return nullptr;
}
return &it->second;
}
PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
auto it = instance_data->physical_device_map.find(phys);
if (it == instance_data->physical_device_map.end()) {
return nullptr;
}
return &it->second;
}
SURFACE_STATE *GetSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
auto it = instance_data->surface_map.find(surface);
if (it == instance_data->surface_map.end()) {
return nullptr;
}
return &it->second;
}
// Return ptr to memory binding for given handle of specified type
static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
switch (type) {
case kVulkanObjectTypeImage:
return GetImageState(dev_data, VkImage(handle));
case kVulkanObjectTypeBuffer:
return GetBufferState(dev_data, VkBuffer(handle));
default:
break;
}
return nullptr;
}
// prototype
GLOBAL_CB_NODE *GetCBNode(layer_data const *, const VkCommandBuffer);
// Return ptr to info in map container containing mem, or NULL if not found
// Calls to this function should be wrapped in mutex
DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
auto mem_it = dev_data->memObjMap.find(mem);
if (mem_it == dev_data->memObjMap.end()) {
return NULL;
}
return mem_it->second.get();
}
static void AddMemObjInfo(layer_data *dev_data, void *object, const VkDeviceMemory mem, const VkMemoryAllocateInfo *pAllocateInfo) {
assert(object != NULL);
auto *mem_info = new DEVICE_MEM_INFO(object, mem, pAllocateInfo);
dev_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(mem_info);
auto dedicated = lvl_find_in_chain<VkMemoryDedicatedAllocateInfoKHR>(pAllocateInfo->pNext);
if (dedicated) {
mem_info->is_dedicated = true;
mem_info->dedicated_buffer = dedicated->buffer;
mem_info->dedicated_image = dedicated->image;
}
}
// Create binding link between given sampler and command buffer node
void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
sampler_state->cb_bindings.insert(cb_node);
cb_node->object_bindings.insert({HandleToUint64(sampler_state->sampler), kVulkanObjectTypeSampler});
}
// Create binding link between given image node and command buffer node
void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
// Skip validation if this image was created through WSI
if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
// First update CB binding in MemObj mini CB list
for (auto mem_binding : image_state->GetBoundMemory()) {
DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
if (pMemInfo) {
pMemInfo->cb_bindings.insert(cb_node);
// Now update CBInfo's Mem reference list
cb_node->memObjs.insert(mem_binding);
}
}
// Now update cb binding for image
cb_node->object_bindings.insert({HandleToUint64(image_state->image), kVulkanObjectTypeImage});
image_state->cb_bindings.insert(cb_node);
}
}
// Create binding link between given image view node and its image with command buffer node
void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
// First add bindings for imageView
view_state->cb_bindings.insert(cb_node);
cb_node->object_bindings.insert({HandleToUint64(view_state->image_view), kVulkanObjectTypeImageView});
auto image_state = GetImageState(dev_data, view_state->create_info.image);
// Add bindings for image within imageView
if (image_state) {
AddCommandBufferBindingImage(dev_data, cb_node, image_state);
}
}
// Create binding link between given buffer node and command buffer node
void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
// First update CB binding in MemObj mini CB list
for (auto mem_binding : buffer_state->GetBoundMemory()) {
DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
if (pMemInfo) {
pMemInfo->cb_bindings.insert(cb_node);
// Now update CBInfo's Mem reference list
cb_node->memObjs.insert(mem_binding);
}
}
// Now update cb binding for buffer
cb_node->object_bindings.insert({HandleToUint64(buffer_state->buffer), kVulkanObjectTypeBuffer});
buffer_state->cb_bindings.insert(cb_node);
}
// Create binding link between given buffer view node and its buffer with command buffer node
void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
// First add bindings for bufferView
view_state->cb_bindings.insert(cb_node);
cb_node->object_bindings.insert({HandleToUint64(view_state->buffer_view), kVulkanObjectTypeBufferView});
auto buffer_state = GetBufferState(dev_data, view_state->create_info.buffer);
// Add bindings for buffer within bufferView
if (buffer_state) {
AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
}
}
// For every mem obj bound to particular CB, free bindings related to that CB
static void ClearCmdBufAndMemReferences(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
if (cb_node) {
if (cb_node->memObjs.size() > 0) {
for (auto mem : cb_node->memObjs) {
DEVICE_MEM_INFO *pInfo = GetMemObjInfo(dev_data, mem);
if (pInfo) {
pInfo->cb_bindings.erase(cb_node);
}
}
cb_node->memObjs.clear();
}
}
}
// Clear a single object binding from given memory object, or report error if binding is missing
static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
// This obj is bound to a memory object. Remove the reference to this object in that memory object's list
if (mem_info) {
mem_info->obj_bindings.erase({handle, type});
}
return false;
}
// ClearMemoryObjectBindings clears the binding of objects to memory
// For the given object it pulls the memory bindings and makes sure that the bindings
// no longer refer to the object being cleared. This occurs when objects are destroyed.
bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
bool skip = false;
BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
if (mem_binding) {
if (!mem_binding->sparse) {
skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
} else { // Sparse, clear all bindings
for (auto &sparse_mem_binding : mem_binding->sparse_bindings) {
skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
}
}
}
return skip;
}
// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
const char *type_name, std::string error_code) {
bool result = false;
if (VK_NULL_HANDLE == mem) {
result =
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code,
"%s: Vk%s object 0x%" PRIx64 " used with no memory bound. Memory should be bound by calling vkBind%sMemory().",
api_name, type_name, handle, type_name);
} else if (MEMORY_UNBOUND == mem) {
result =
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle, error_code,
"%s: Vk%s object 0x%" PRIx64
" used with no memory bound and previously bound memory was freed. Memory must not be freed prior to this "
"operation.",
api_name, type_name, handle);
}
return result;
}
// Check to see if memory was ever bound to this image
bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
const std::string &error_code) {
bool result = false;
if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), api_name, "Image",
error_code);
}
return result;
}
// Check to see if memory was bound to this buffer
bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
const std::string &error_code) {
bool result = false;
if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), api_name,
"Buffer", error_code);
}
return result;
}
// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
// Corresponding valid usage checks are in ValidateSetMemBinding().
static void SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, BINDABLE *mem_binding, VkDeviceSize memory_offset,
uint64_t handle, VulkanObjectType type, const char *apiName) {
assert(mem_binding);
mem_binding->binding.mem = mem;
mem_binding->UpdateBoundMemorySet(); // force recreation of cached set
mem_binding->binding.offset = memory_offset;
mem_binding->binding.size = mem_binding->requirements.size;
if (mem != VK_NULL_HANDLE) {
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
mem_info->obj_bindings.insert({handle, type});
// For image objects, make sure default memory state is correctly set
// TODO : What's the best/correct way to handle this?
if (kVulkanObjectTypeImage == type) {
auto const image_state = reinterpret_cast<const IMAGE_STATE *>(mem_binding);
if (image_state) {
VkImageCreateInfo ici = image_state->createInfo;
if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
// TODO:: More memory state transition stuff.
}
}
}
}
}
}
// Valid usage checks for a call to SetMemBinding().
// For NULL mem case, output warning
// Make sure given object is in global object map
// IF a previous binding existed, output validation error
// Otherwise, add reference from objectInfo to memoryInfo
// Add reference off of objInfo
// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
static bool ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
const char *apiName) {
bool skip = false;
// It's an error to bind an object to NULL memory
if (mem != VK_NULL_HANDLE) {
BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
assert(mem_binding);
if (mem_binding->sparse) {
std::string error_code = "VUID-vkBindImageMemory-image-01045";
const char *handle_type = "IMAGE";
if (type == kVulkanObjectTypeBuffer) {
error_code = "VUID-vkBindBufferMemory-buffer-01030";
handle_type = "BUFFER";
} else {
assert(type == kVulkanObjectTypeImage);
}
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), error_code,
"In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
") which was created with sparse memory flags (VK_%s_CREATE_SPARSE_*_BIT).",
apiName, HandleToUint64(mem), handle, handle_type);
}
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
DEVICE_MEM_INFO *prev_binding = GetMemObjInfo(dev_data, mem_binding->binding.mem);
if (prev_binding) {
std::string error_code = "VUID-vkBindImageMemory-image-01044";
if (type == kVulkanObjectTypeBuffer) {
error_code = "VUID-vkBindBufferMemory-buffer-01029";
} else {
assert(type == kVulkanObjectTypeImage);
}
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), error_code,
"In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
") which has already been bound to mem object 0x%" PRIx64 ".",
apiName, HandleToUint64(mem), handle, HandleToUint64(prev_binding->mem));
} else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), kVUID_Core_MemTrack_RebindObject,
"In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64
") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
"Vulkan so this attempt to bind to new memory is not allowed.",
apiName, HandleToUint64(mem), handle);
}
}
}
return skip;
}
// For NULL mem case, clear any previous binding Else...
// Make sure given object is in its object map
// IF a previous binding existed, update binding
// Add reference from objectInfo to memoryInfo
// Add reference off of object's binding info
// Return VK_TRUE if addition is successful, VK_FALSE otherwise
static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
bool skip = VK_FALSE;
// Handle NULL case separately, just clear previous binding & decrement reference
if (binding.mem == VK_NULL_HANDLE) {
// TODO : This should cause the range of the resource to be unbound according to spec
} else {
BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
assert(mem_binding);
if (mem_binding) { // Invalid handles are reported by object tracker, but Get returns NULL for them, so avoid SEGV here
assert(mem_binding->sparse);
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, binding.mem);
if (mem_info) {
mem_info->obj_bindings.insert({handle, type});
// Need to set mem binding for this object
mem_binding->sparse_bindings.insert(binding);
mem_binding->UpdateBoundMemorySet();
}
}
}
return skip;
}
// Check object status for selected flag state
static bool ValidateStatus(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
const char *fail_msg, std::string const msg_code) {
if (!(pNode->status & status_mask)) {
return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pNode->commandBuffer), msg_code, "command buffer object 0x%" PRIx64 ": %s..",
HandleToUint64(pNode->commandBuffer), fail_msg);
}
return false;
}
// Retrieve pipeline node ptr for given pipeline object
static PIPELINE_STATE *GetPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
auto it = dev_data->pipelineMap.find(pipeline);
if (it == dev_data->pipelineMap.end()) {
return nullptr;
}
return it->second.get();
}
RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
auto it = dev_data->renderPassMap.find(renderpass);
if (it == dev_data->renderPassMap.end()) {
return nullptr;
}
return it->second.get();
}
std::shared_ptr<RENDER_PASS_STATE> GetRenderPassStateSharedPtr(layer_data const *dev_data, VkRenderPass renderpass) {
auto it = dev_data->renderPassMap.find(renderpass);
if (it == dev_data->renderPassMap.end()) {
return nullptr;
}
return it->second;
}
FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
auto it = dev_data->frameBufferMap.find(framebuffer);
if (it == dev_data->frameBufferMap.end()) {
return nullptr;
}
return it->second.get();
}
std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(layer_data const *dev_data,
VkDescriptorSetLayout dsLayout) {
auto it = dev_data->descriptorSetLayoutMap.find(dsLayout);
if (it == dev_data->descriptorSetLayoutMap.end()) {
return nullptr;
}
return it->second;
}
static PIPELINE_LAYOUT_NODE const *GetPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
if (it == dev_data->pipelineLayoutMap.end()) {
return nullptr;
}
return &it->second;
}
shader_module const *GetShaderModuleState(layer_data const *dev_data, VkShaderModule module) {
auto it = dev_data->shaderModuleMap.find(module);
if (it == dev_data->shaderModuleMap.end()) {
return nullptr;
}
return it->second.get();
}
// Return true if for a given PSO, the given state enum is dynamic, else return false
static bool IsDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
}
}
return false;
}
// Validate state stored as flags at time of draw call
static bool ValidateDrawStateFlags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
std::string const msg_code) {
bool result = false;
if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
result |= ValidateStatus(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic line width state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pRasterizationState &&
(pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
result |= ValidateStatus(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic depth bias state not set for this command buffer", msg_code);
}
if (pPipe->blendConstantsEnabled) {
result |= ValidateStatus(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic blend constants state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pDepthStencilState &&
(pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
result |= ValidateStatus(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic depth bounds state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pDepthStencilState &&
(pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
result |= ValidateStatus(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic stencil read mask state not set for this command buffer", msg_code);
result |= ValidateStatus(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic stencil write mask state not set for this command buffer", msg_code);
result |= ValidateStatus(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic stencil reference state not set for this command buffer", msg_code);
}
if (indexed) {
result |= ValidateStatus(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
}
return result;
}
static bool LogInvalidAttachmentMessage(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach,
uint32_t secondary_attach, const char *msg, const char *caller, std::string error_code) {
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(rp1_state->renderPass), error_code,
"%s: RenderPasses incompatible between %s w/ renderPass 0x%" PRIx64 " and %s w/ renderPass 0x%" PRIx64
" Attachment %u is not compatible with %u: %s.",
caller, type1_string, HandleToUint64(rp1_state->renderPass), type2_string, HandleToUint64(rp2_state->renderPass),
primary_attach, secondary_attach, msg);
}
static bool ValidateAttachmentCompatibility(layer_data const *dev_data, const char *type1_string,
const RENDER_PASS_STATE *rp1_state, const char *type2_string,
const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
const char *caller, std::string error_code) {
bool skip = false;
const auto &primaryPassCI = rp1_state->createInfo;
const auto &secondaryPassCI = rp2_state->createInfo;
if (primaryPassCI.attachmentCount <= primary_attach) {
primary_attach = VK_ATTACHMENT_UNUSED;
}
if (secondaryPassCI.attachmentCount <= secondary_attach) {
secondary_attach = VK_ATTACHMENT_UNUSED;
}
if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) {
return skip;
}
if (primary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
secondary_attach, "The first is unused while the second is not.", caller, error_code);
return skip;
}
if (secondary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
secondary_attach, "The second is unused while the first is not.", caller, error_code);
return skip;
}
if (primaryPassCI.pAttachments[primary_attach].format != secondaryPassCI.pAttachments[secondary_attach].format) {
skip |= LogInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
secondary_attach, "They have different formats.", caller, error_code);
}
if (primaryPassCI.pAttachments[primary_attach].samples != secondaryPassCI.pAttachments[secondary_attach].samples) {
skip |= LogInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
secondary_attach, "They have different samples.", caller, error_code);
}
if (primaryPassCI.pAttachments[primary_attach].flags != secondaryPassCI.pAttachments[secondary_attach].flags) {
skip |= LogInvalidAttachmentMessage(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_attach,
secondary_attach, "They have different flags.", caller, error_code);
}
return skip;
}
static bool ValidateSubpassCompatibility(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass,
const char *caller, std::string error_code) {
bool skip = false;
const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass];
const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass];
uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.inputAttachmentCount) {
primary_input_attach = primary_desc.pInputAttachments[i].attachment;
}
if (i < secondary_desc.inputAttachmentCount) {
secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
secondary_input_attach, caller, error_code);
}
uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount) {
primary_color_attach = primary_desc.pColorAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount) {
secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_color_attach,
secondary_color_attach, caller, error_code);
uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach,
secondary_resolve_attach, caller, error_code);
}
uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
if (primary_desc.pDepthStencilAttachment) {
primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
}
if (secondary_desc.pDepthStencilAttachment) {
secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
}
skip |= ValidateAttachmentCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach,
secondary_depthstencil_attach, caller, error_code);
return skip;
}
// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
// This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
// will then feed into this function
static bool ValidateRenderPassCompatibility(layer_data const *dev_data, const char *type1_string,
const RENDER_PASS_STATE *rp1_state, const char *type2_string,
const RENDER_PASS_STATE *rp2_state, const char *caller, std::string error_code) {
bool skip = false;
if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(rp1_state->renderPass), error_code,
"%s: RenderPasses incompatible between %s w/ renderPass 0x%" PRIx64
" with a subpassCount of %u and %s w/ renderPass 0x%" PRIx64 " with a subpassCount of %u.",
caller, type1_string, HandleToUint64(rp1_state->renderPass), rp1_state->createInfo.subpassCount,
type2_string, HandleToUint64(rp2_state->renderPass), rp2_state->createInfo.subpassCount);
} else {
for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) {
skip |= ValidateSubpassCompatibility(dev_data, type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code);
}
}
return skip;
}
// Return Set node ptr for specified set or else NULL
cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *dev_data, VkDescriptorSet set) {
auto set_it = dev_data->setMap.find(set);
if (set_it == dev_data->setMap.end()) {
return NULL;
}
return set_it->second;
}
// For given pipeline, return number of MSAA samples, or one if MSAA disabled
static VkSampleCountFlagBits GetNumSamples(PIPELINE_STATE const *pipe) {
if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
}
return VK_SAMPLE_COUNT_1_BIT;
}
static void ListBits(std::ostream &s, uint32_t bits) {
for (int i = 0; i < 32 && bits; i++) {
if (bits & (1 << i)) {
s << i;
bits &= ~(1 << i);
if (bits) {
s << ",";
}
}
}
}
// Validate draw-time state related to the PSO
static bool ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
CMD_TYPE cmd_type, PIPELINE_STATE const *pPipeline, const char *caller) {
bool skip = false;
// Verify vertex binding
if (pPipeline->vertex_binding_descriptions_.size() > 0) {
for (size_t i = 0; i < pPipeline->vertex_binding_descriptions_.size(); i++) {
const auto vertex_binding = pPipeline->vertex_binding_descriptions_[i].binding;
if ((pCB->current_draw_data.vertex_buffer_bindings.size() < (vertex_binding + 1)) ||
(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer == VK_NULL_HANDLE)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_VtxIndexOutOfBounds,
"The Pipeline State Object (0x%" PRIx64
") expects that this Command Buffer's vertex binding Index %u should be set via "
"vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
"index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
HandleToUint64(state.pipeline_state->pipeline), vertex_binding, i, vertex_binding);
}
}
// Verify vertex attribute address alignment
for (size_t i = 0; i < pPipeline->vertex_attribute_descriptions_.size(); i++) {
const auto &attribute_description = pPipeline->vertex_attribute_descriptions_[i];
const auto vertex_binding = attribute_description.binding;
const auto attribute_offset = attribute_description.offset;
const auto attribute_format = attribute_description.format;
const auto &vertex_binding_map_it = pPipeline->vertex_binding_to_index_map_.find(vertex_binding);
if ((vertex_binding_map_it != pPipeline->vertex_binding_to_index_map_.cend()) &&
(vertex_binding < pCB->current_draw_data.vertex_buffer_bindings.size()) &&
(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer != VK_NULL_HANDLE)) {
const auto vertex_buffer_stride = pPipeline->vertex_binding_descriptions_[vertex_binding_map_it->second].stride;
const auto vertex_buffer_offset = pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].offset;
const auto buffer_state =
GetBufferState(dev_data, pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer);
// Use only memory binding offset as base memory should be properly aligned by the driver
const auto buffer_binding_address = buffer_state->binding.offset + vertex_buffer_offset;
// Use 1 as vertex/instance index to use buffer stride as well
const auto attrib_address = buffer_binding_address + vertex_buffer_stride + attribute_offset;
if (SafeModulo(attrib_address, FormatAlignment(attribute_format)) != 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer),
kVUID_Core_DrawState_InvalidVtxAttributeAlignment,
"Invalid attribAddress alignment for vertex attribute " PRINTF_SIZE_T_SPECIFIER
" from "
"pipeline (0x%" PRIx64 ") and vertex buffer (0x%" PRIx64 ").",
i, HandleToUint64(state.pipeline_state->pipeline),
HandleToUint64(pCB->current_draw_data.vertex_buffer_bindings[vertex_binding].buffer));
}
}
}
} else {
if ((!pCB->current_draw_data.vertex_buffer_bindings.empty()) && (!pCB->vertex_buffer_used)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer),
kVUID_Core_DrawState_VtxIndexOutOfBounds,
"Vertex buffers are bound to command buffer (0x%" PRIx64
") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIx64 ").",
HandleToUint64(pCB->commandBuffer), HandleToUint64(state.pipeline_state->pipeline));
}
}
// If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
// Skip check if rasterization is disabled or there is no viewport.
if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
(pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
pPipeline->graphicsPipelineCI.pViewportState) {
bool dynViewport = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
bool dynScissor = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
if (dynViewport) {
const auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
const auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
if (missingViewportMask) {
std::stringstream ss;
ss << "Dynamic viewport(s) ";
ListBits(ss, missingViewportMask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str());
}
}
if (dynScissor) {
const auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
const auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
if (missingScissorMask) {
std::stringstream ss;
ss << "Dynamic scissor(s) ";
ListBits(ss, missingScissorMask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str());
}
}
}
// Verify that any MSAA request in PSO matches sample# in bound FB
// Skip the check if rasterization is disabled.
if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
(pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
VkSampleCountFlagBits pso_num_samples = GetNumSamples(pPipeline);
if (pCB->activeRenderPass) {
const auto render_pass_info = pCB->activeRenderPass->createInfo.ptr();
const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
uint32_t i;
unsigned subpass_num_samples = 0;
for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment != VK_ATTACHMENT_UNUSED)
subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
}
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
}
if (!dev_data->extensions.vk_amd_mixed_attachment_samples &&
((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_NumSamplesMismatch,
"Num samples mismatch! At draw-time in Pipeline (0x%" PRIx64
") with %u samples while current RenderPass (0x%" PRIx64 ") w/ %u samples!",
HandleToUint64(pPipeline->pipeline), pso_num_samples,
HandleToUint64(pCB->activeRenderPass->renderPass), subpass_num_samples);
}
} else {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_NoActiveRenderpass,
"No active render pass found at draw-time in Pipeline (0x%" PRIx64 ")!",
HandleToUint64(pPipeline->pipeline));
}
}
// Verify that PSO creation renderPass is compatible with active renderPass
if (pCB->activeRenderPass) {
// TODO: Move all of the error codes common across different Draws into a LUT accessed by cmd_type
// TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted
// Error codes for renderpass and subpass mismatches
auto rp_error = "VUID-vkCmdDraw-renderPass-00435", sp_error = "VUID-vkCmdDraw-subpass-00436";
switch (cmd_type) {
case CMD_DRAWINDEXED:
rp_error = "VUID-vkCmdDrawIndexed-renderPass-00454";
sp_error = "VUID-vkCmdDrawIndexed-subpass-00455";
break;
case CMD_DRAWINDIRECT:
rp_error = "VUID-vkCmdDrawIndirect-renderPass-00479";
sp_error = "VUID-vkCmdDrawIndirect-subpass-00480";
break;
case CMD_DRAWINDIRECTCOUNTAMD:
rp_error = "VUID-vkCmdDrawIndirectCountAMD-renderPass-00507";
sp_error = "VUID-vkCmdDrawIndirectCountAMD-subpass-00508";
break;
case CMD_DRAWINDIRECTCOUNTKHR:
rp_error = "VUID-vkCmdDrawIndirectCountKHR-renderPass-03113";
sp_error = "VUID-vkCmdDrawIndirectCountKHR-subpass-03114";
break;
case CMD_DRAWINDEXEDINDIRECT:
rp_error = "VUID-vkCmdDrawIndexedIndirect-renderPass-00531";
sp_error = "VUID-vkCmdDrawIndexedIndirect-subpass-00532";
break;
case CMD_DRAWINDEXEDINDIRECTCOUNTAMD:
rp_error = "VUID-vkCmdDrawIndexedIndirectCountAMD-renderPass-00560";
sp_error = "VUID-vkCmdDrawIndexedIndirectCountAMD-subpass-00561";
break;
case CMD_DRAWINDEXEDINDIRECTCOUNTKHR:
rp_error = "VUID-vkCmdDrawIndexedIndirectCountKHR-renderPass-03145";
sp_error = "VUID-vkCmdDrawIndexedIndirectCountKHR-subpass-03146";
break;
default:
assert(CMD_DRAW == cmd_type);
break;
}
std::string err_string;
if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) {
// renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
skip |= ValidateRenderPassCompatibility(dev_data, "active render pass", pCB->activeRenderPass, "pipeline state object",
pPipeline->rp_state.get(), caller, rp_error);
}
if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), sp_error, "Pipeline was built for subpass %u but used in subpass %u.",
pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass);
}
}
return skip;
}
// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
// pipelineLayout[layoutIndex]
static bool VerifySetLayoutCompatibility(const cvdescriptorset::DescriptorSet *descriptor_set,
PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
string &errorMsg) {
auto num_sets = pipeline_layout->set_layouts.size();
if (layoutIndex >= num_sets) {
stringstream errorStr;
errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
<< " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
<< layoutIndex;
errorMsg = errorStr.str();
return false;
}
if (descriptor_set->IsPushDescriptor()) return true;
auto layout_node = pipeline_layout->set_layouts[layoutIndex];
return descriptor_set->IsCompatible(layout_node.get(), &errorMsg);
}
// Validate overall state at the time of a draw call
static bool ValidateCmdBufDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, CMD_TYPE cmd_type, const bool indexed,
const VkPipelineBindPoint bind_point, const char *function, const std::string &pipe_err_code,
const std::string &state_err_code) {
bool result = false;
auto const &state = cb_node->lastBound[bind_point];
PIPELINE_STATE *pPipe = state.pipeline_state;
if (nullptr == pPipe) {
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), pipe_err_code,
"Must not call %s on this command buffer while there is no %s pipeline bound.", function,
bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS ? "Graphics" : "Compute");
}
// First check flag states
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
result = ValidateDrawStateFlags(dev_data, cb_node, pPipe, indexed, state_err_code);
// Now complete other state checks
string errorString;
auto const &pipeline_layout = pPipe->pipeline_layout;
for (const auto &set_binding_pair : pPipe->active_slots) {
uint32_t setIndex = set_binding_pair.first;
// If valid set is not bound throw an error
if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_DescriptorSetNotBound,
"VkPipeline 0x%" PRIx64 " uses set #%u but that set is not bound.", HandleToUint64(pPipe->pipeline),
setIndex);
} else if (!VerifySetLayoutCompatibility(state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex, errorString)) {
// Set is bound but not compatible w/ overlapping pipeline_layout from PSO
VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(setHandle), kVUID_Core_DrawState_PipelineLayoutsIncompatible,
"VkDescriptorSet (0x%" PRIx64
") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIx64 " due to: %s",
HandleToUint64(setHandle), setIndex, HandleToUint64(pipeline_layout.layout), errorString.c_str());
} else { // Valid set is bound and layout compatible, validate that it's updated
// Pull the set node
cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
// Validate the draw-time state for this descriptor set
std::string err_str;
if (!descriptor_set->IsPushDescriptor()) {
// For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor
// binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks.
// Here, the currently bound pipeline determines whether an image validation check is redundant...
// for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline.
const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_node,
pPipe);
const auto &binding_req_map = reduced_map.Map();
if (!descriptor_set->ValidateDrawState(binding_req_map, state.dynamicOffsets[setIndex], cb_node, function,
&err_str)) {
auto set = descriptor_set->GetSet();
result |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(set), kVUID_Core_DrawState_DescriptorSetNotUpdated,
"Descriptor set 0x%" PRIx64 " bound as set #%u encountered the following validation error at %s time: %s",
HandleToUint64(set), setIndex, function, err_str.c_str());
}
}
}
}
// Check general pipeline state that needs to be validated at drawtime
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
result |= ValidatePipelineDrawtimeState(dev_data, state, cb_node, cmd_type, pPipe, function);
return result;
}
static void UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
auto const &state = cb_state->lastBound[bind_point];
PIPELINE_STATE *pPipe = state.pipeline_state;
if (VK_NULL_HANDLE != state.pipeline_layout) {
for (const auto &set_binding_pair : pPipe->active_slots) {
uint32_t setIndex = set_binding_pair.first;
// Pull the set node
cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
if (!descriptor_set->IsPushDescriptor()) {
// For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor binding
const cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second, cb_state);
const auto &binding_req_map = reduced_map.Map();
// Bind this set and its active descriptor resources to the command buffer
descriptor_set->BindCommandBuffer(cb_state, binding_req_map);
// For given active slots record updated images & buffers
descriptor_set->GetStorageUpdates(binding_req_map, &cb_state->updateBuffers, &cb_state->updateImages);
}
}
}
if (!pPipe->vertex_binding_descriptions_.empty()) {
cb_state->vertex_buffer_used = true;
}
}
static bool ValidatePipelineLocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
int pipelineIndex) {
bool skip = false;
PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
// If create derivative bit is set, check that we've specified a base
// pipeline correctly, and that the base pipeline was created to allow
// derivatives.
if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
PIPELINE_STATE *pBasePipeline = nullptr;
if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
(pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
// This check is a superset of "VUID-VkGraphicsPipelineCreateInfo-flags-00724" and
// "VUID-VkGraphicsPipelineCreateInfo-flags-00725"
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
} else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-vkCreateGraphicsPipelines-flags-00720",
"Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
} else {
pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex].get();
}
} else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
pBasePipeline = GetPipelineState(dev_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
}
if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
}
}
return skip;
}
// UNLOCKED pipeline validation. DO NOT lookup objects in the layer_data->* maps in this function.
static bool ValidatePipelineUnlocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
int pipelineIndex) {
bool skip = false;
PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
// Ensure the subpass index is valid. If not, then ValidateAndCapturePipelineShaderState
// produces nonsense errors that confuse users. Other layers should already
// emit errors for renderpass being invalid.
auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-subpass-00759",
"Invalid Pipeline CreateInfo State: Subpass index %u is out of range for this renderpass (0..%u).",
pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1);
subpass_desc = nullptr;
}
if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
if (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746",
"vkCreateGraphicsPipelines(): Render pass (0x%" PRIx64
") subpass %u has colorAttachmentCount of %u which doesn't match the pColorBlendState->attachmentCount of %u.",
HandleToUint64(pPipeline->rp_state->renderPass), pPipeline->graphicsPipelineCI.subpass,
subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount);
}
if (!dev_data->enabled_features.core.independentBlend) {
if (pPipeline->attachments.size() > 1) {
VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
// Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
// settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
// only attachment state, so memcmp is best suited for the comparison
if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
sizeof(pAttachments[0]))) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, HandleToUint64(pPipeline->pipeline),
"VUID-VkPipelineColorBlendStateCreateInfo-pAttachments-00605",
"Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of "
"pAttachments must be identical.");
break;
}
}
}
}
if (!dev_data->enabled_features.core.logicOp &&
(pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606",
"Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.");
}
for (size_t i = 0; i < pPipeline->attachments.size(); i++) {
if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!dev_data->enabled_features.core.dualSrcBlend) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor);
}
}
if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!dev_data->enabled_features.core.dualSrcBlend) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor);
}
}
if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!dev_data->enabled_features.core.dualSrcBlend) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor);
}
}
if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!dev_data->enabled_features.core.dualSrcBlend) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor);
}
}
}
}
if (ValidateAndCapturePipelineShaderState(dev_data, pPipeline)) {
skip = true;
}
// Each shader's stage must be unique
if (pPipeline->duplicate_shaders) {
for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
if (pPipeline->duplicate_shaders & stage) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
}
}
}
// VS is required
if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-stage-00727",
"Invalid Pipeline CreateInfo State: Vertex Shader required.");
}
// Either both or neither TC/TE shaders should be defined
bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
if (has_control && !has_eval) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-pStages-00729",
"Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
}
if (!has_control && has_eval) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-pStages-00730",
"Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair.");
}
// Compute shaders should be specified independent of Gfx shaders
if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-stage-00728",
"Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline.");
}
// VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
// Mismatching primitive topology and tessellation fails graphics pipeline creation.
if (has_control && has_eval &&
(!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-pStages-00736",
"Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for "
"tessellation pipelines.");
}
if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
if (!has_control || !has_eval) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-topology-00737",
"Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid "
"for tessellation pipelines.");
}
}
// If a rasterization state is provided...
if (pPipeline->graphicsPipelineCI.pRasterizationState) {
if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
(!dev_data->enabled_features.core.depthClamp)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineRasterizationStateCreateInfo-depthClampEnable-00782",
"vkCreateGraphicsPipelines(): the depthClamp device feature is disabled: the depthClampEnable member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.");
}
if (!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
(pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) &&
(!dev_data->enabled_features.core.depthBiasClamp)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), kVUID_Core_DrawState_InvalidFeature,
"vkCreateGraphicsPipelines(): the depthBiasClamp device feature is disabled: the depthBiasClamp member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
"VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled");
}
// If rasterization is enabled...
if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
(!dev_data->enabled_features.core.alphaToOne)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkPipelineMultisampleStateCreateInfo-alphaToOneEnable-00785",
"vkCreateGraphicsPipelines(): the alphaToOne device feature is disabled: the alphaToOneEnable "
"member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.");
}
// If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline),
"VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00752",
"Invalid Pipeline CreateInfo State: pDepthStencilState is NULL when rasterization is enabled "
"and subpass uses a depth/stencil attachment.");
} else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
(!dev_data->enabled_features.core.depthBounds)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline),
"VUID-VkPipelineDepthStencilStateCreateInfo-depthBoundsTestEnable-00598",
"vkCreateGraphicsPipelines(): the depthBounds device feature is disabled: the "
"depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be "
"set to VK_FALSE.");
}
}
// If subpass uses color attachments, pColorBlendState must be valid pointer
if (subpass_desc) {
uint32_t color_attachment_count = 0;
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
++color_attachment_count;
}
}
if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline),
"VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753",
"Invalid Pipeline CreateInfo State: pColorBlendState is NULL when rasterization is enabled and "
"subpass uses color attachments.");
}
}
}
}
auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
if (vi != NULL) {
for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
VkFormat format = vi->pVertexAttributeDescriptions[j].format;
// Internal call to get format info. Still goes through layers, could potentially go directly to ICD.
VkFormatProperties properties;
dev_data->instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(dev_data->physical_device, format,
&properties);
if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkVertexInputAttributeDescription-format-00623",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
"(%s) is not a supported vertex buffer format.",
pipelineIndex, j, string_VkFormat(format));
}
}
}
if (dev_data->extensions.vk_amd_mixed_attachment_samples) {
VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0);
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count =
std::max(max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples);
}
}
if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count =
std::max(max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples);
}
if (pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples != max_sample_count) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
HandleToUint64(pPipeline->pipeline), "VUID-VkGraphicsPipelineCreateInfo-subpass-01505",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max "
"attachment samples (%s) used in subpass %u.",
pipelineIndex,
string_VkSampleCountFlagBits(pPipeline->graphicsPipelineCI.pMultisampleState->rasterizationSamples),
string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass);
}
}
return skip;
}
// Block of code at start here specifically for managing/tracking DSs
// Return Pool node ptr for specified pool or else NULL
DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
auto pool_it = dev_data->descriptorPoolMap.find(pool);
if (pool_it == dev_data->descriptorPoolMap.end()) {
return NULL;
}
return pool_it->second;
}
// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
// func_str is the name of the calling function
// Return false if no errors occur
// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
static bool ValidateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
bool skip = false;
auto set_node = dev_data->setMap.find(set);
if (set_node == dev_data->setMap.end()) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(set), kVUID_Core_DrawState_DoubleDestroy,
"Cannot call %s() on descriptor set 0x%" PRIx64 " that has not been allocated.", func_str.c_str(),
HandleToUint64(set));
} else {
// TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
if (set_node->second->in_use.load()) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(set), "VUID-vkFreeDescriptorSets-pDescriptorSets-00309",
"Cannot call %s() on descriptor set 0x%" PRIx64 " that is in use by a command buffer.",
func_str.c_str(), HandleToUint64(set));
}
}
return skip;
}
// Remove set from setMap and delete the set
static void FreeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
dev_data->setMap.erase(descriptor_set->GetSet());
delete descriptor_set;
}
// Free all DS Pools including their Sets & related sub-structs
// NOTE : Calls to this function should be wrapped in mutex
static void DeletePools(layer_data *dev_data) {
for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end();) {
// Remove this pools' sets from setMap and delete them
for (auto ds : ii->second->sets) {
FreeDescriptorSet(dev_data, ds);
}
ii->second->sets.clear();
delete ii->second;
ii = dev_data->descriptorPoolMap.erase(ii);
}
}
// For given CB object, fetch associated CB Node from map
GLOBAL_CB_NODE *GetCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
auto it = dev_data->commandBufferMap.find(cb);
if (it == dev_data->commandBufferMap.end()) {
return NULL;
}
return it->second;
}
// If a renderpass is active, verify that the given command type is appropriate for current subpass state
bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
if (!pCB->activeRenderPass) return false;
bool skip = false;
if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
(cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
"Commands cannot be called in a subpass using secondary command buffers.");
} else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
"vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
}
return skip;
}
bool ValidateCmdQueueFlags(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *caller_name,
VkQueueFlags required_flags, const std::string &error_code) {
auto pool = GetCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
if (pool) {
VkQueueFlags queue_flags = dev_data->phys_dev_properties.queue_family_properties[pool->queueFamilyIndex].queueFlags;
if (!(required_flags & queue_flags)) {
string required_flags_string;
for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
if (flag & required_flags) {
if (required_flags_string.size()) {
required_flags_string += " or ";
}
required_flags_string += string_VkQueueFlagBits(flag);
}
}
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), error_code,
"Cannot call %s on a command buffer allocated from a pool without %s capabilities..", caller_name,
required_flags_string.c_str());
}
}
return false;
}
static char const *GetCauseStr(VK_OBJECT obj) {
if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated";
if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded";
return "destroyed";
}
static bool ReportInvalidCommandBuffer(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const char *call_source) {
bool skip = false;
for (auto obj : cb_state->broken_bindings) {
const char *type_str = object_string[obj.type];
const char *cause_str = GetCauseStr(obj);
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
"You are adding %s to command buffer 0x%" PRIx64 " that is invalid because bound %s 0x%" PRIx64 " was %s.",
call_source, HandleToUint64(cb_state->commandBuffer), type_str, obj.handle, cause_str);
}
return skip;
}
// 'commandBuffer must be in the recording state' valid usage error code for each command
// Note: grepping for ^^^^^^^^^ in vk_validation_database is easily massaged into the following list
// Note: C++11 doesn't automatically devolve enum types to the underlying type for hash traits purposes (fixed in C++14)
using CmdTypeHashType = std::underlying_type<CMD_TYPE>::type;
static const std::unordered_map<CmdTypeHashType, std::string> must_be_recording_map = {
{CMD_NONE, kVUIDUndefined}, // UNMATCHED
{CMD_BEGINQUERY, "VUID-vkCmdBeginQuery-commandBuffer-recording"},
{CMD_BEGINRENDERPASS, "VUID-vkCmdBeginRenderPass-commandBuffer-recording"},
{CMD_BINDDESCRIPTORSETS, "VUID-vkCmdBindDescriptorSets-commandBuffer-recording"},
{CMD_BINDINDEXBUFFER, "VUID-vkCmdBindIndexBuffer-commandBuffer-recording"},
{CMD_BINDPIPELINE, "VUID-vkCmdBindPipeline-commandBuffer-recording"},
{CMD_BINDVERTEXBUFFERS, "VUID-vkCmdBindVertexBuffers-commandBuffer-recording"},
{CMD_BLITIMAGE, "VUID-vkCmdBlitImage-commandBuffer-recording"},
{CMD_CLEARATTACHMENTS, "VUID-vkCmdClearAttachments-commandBuffer-recording"},
{CMD_CLEARCOLORIMAGE, "VUID-vkCmdClearColorImage-commandBuffer-recording"},
{CMD_CLEARDEPTHSTENCILIMAGE, "VUID-vkCmdClearDepthStencilImage-commandBuffer-recording"},
{CMD_COPYBUFFER, "VUID-vkCmdCopyBuffer-commandBuffer-recording"},
{CMD_COPYBUFFERTOIMAGE, "VUID-vkCmdCopyBufferToImage-commandBuffer-recording"},
{CMD_COPYIMAGE, "VUID-vkCmdCopyImage-commandBuffer-recording"},
{CMD_COPYIMAGETOBUFFER, "VUID-vkCmdCopyImageToBuffer-commandBuffer-recording"},
{CMD_COPYQUERYPOOLRESULTS, "VUID-vkCmdCopyQueryPoolResults-commandBuffer-recording"},
{CMD_DEBUGMARKERBEGINEXT, "VUID-vkCmdDebugMarkerBeginEXT-commandBuffer-recording"},
{CMD_DEBUGMARKERENDEXT, "VUID-vkCmdDebugMarkerEndEXT-commandBuffer-recording"},
{CMD_DEBUGMARKERINSERTEXT, "VUID-vkCmdDebugMarkerInsertEXT-commandBuffer-recording"},
{CMD_DISPATCH, "VUID-vkCmdDispatch-commandBuffer-recording"},
// Exclude KHX (if not already present) { CMD_DISPATCHBASEKHX, "VUID-vkCmdDispatchBase-commandBuffer-recording" },
{CMD_DISPATCHINDIRECT, "VUID-vkCmdDispatchIndirect-commandBuffer-recording"},
{CMD_DRAW, "VUID-vkCmdDraw-commandBuffer-recording"},
{CMD_DRAWINDEXED, "VUID-vkCmdDrawIndexed-commandBuffer-recording"},
{CMD_DRAWINDEXEDINDIRECT, "VUID-vkCmdDrawIndexedIndirect-commandBuffer-recording"},
// Exclude vendor ext (if not already present) { CMD_DRAWINDEXEDINDIRECTCOUNTAMD,
// "VUID-vkCmdDrawIndexedIndirectCountAMD-commandBuffer-recording" },
{CMD_DRAWINDEXEDINDIRECTCOUNTKHR, "VUID-vkCmdDrawIndexedIndirectCountKHR-commandBuffer-recording"},
{CMD_DRAWINDIRECT, "VUID-vkCmdDrawIndirect-commandBuffer-recording"},
// Exclude vendor ext (if not already present) { CMD_DRAWINDIRECTCOUNTAMD,
// "VUID-vkCmdDrawIndirectCountAMD-commandBuffer-recording" },
{CMD_DRAWINDIRECTCOUNTKHR, "VUID-vkCmdDrawIndirectCountKHR-commandBuffer-recording"},
{CMD_ENDCOMMANDBUFFER, "VUID-vkEndCommandBuffer-commandBuffer-00059"},
{CMD_ENDQUERY, "VUID-vkCmdEndQuery-commandBuffer-recording"},
{CMD_ENDRENDERPASS, "VUID-vkCmdEndRenderPass-commandBuffer-recording"},
{CMD_EXECUTECOMMANDS, "VUID-vkCmdExecuteCommands-commandBuffer-recording"},
{CMD_FILLBUFFER, "VUID-vkCmdFillBuffer-commandBuffer-recording"},
{CMD_NEXTSUBPASS, "VUID-vkCmdNextSubpass-commandBuffer-recording"},
{CMD_PIPELINEBARRIER, "VUID-vkCmdPipelineBarrier-commandBuffer-recording"},
// Exclude vendor ext (if not already present) { CMD_PROCESSCOMMANDSNVX, "VUID-vkCmdProcessCommandsNVX-commandBuffer-recording"
// },
{CMD_PUSHCONSTANTS, "VUID-vkCmdPushConstants-commandBuffer-recording"},
{CMD_PUSHDESCRIPTORSETKHR, "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-recording"},
{CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-recording"},
// Exclude vendor ext (if not already present) { CMD_RESERVESPACEFORCOMMANDSNVX,
// "VUID-vkCmdReserveSpaceForCommandsNVX-commandBuffer-recording" },
{CMD_RESETEVENT, "VUID-vkCmdResetEvent-commandBuffer-recording"},
{CMD_RESETQUERYPOOL, "VUID-vkCmdResetQueryPool-commandBuffer-recording"},
{CMD_RESOLVEIMAGE, "VUID-vkCmdResolveImage-commandBuffer-recording"},
{CMD_SETBLENDCONSTANTS, "VUID-vkCmdSetBlendConstants-commandBuffer-recording"},
{CMD_SETDEPTHBIAS, "VUID-vkCmdSetDepthBias-commandBuffer-recording"},
{CMD_SETDEPTHBOUNDS, "VUID-vkCmdSetDepthBounds-commandBuffer-recording"},
// Exclude KHX (if not already present) { CMD_SETDEVICEMASKKHX, "VUID-vkCmdSetDeviceMask-commandBuffer-recording" },
{CMD_SETDISCARDRECTANGLEEXT, "VUID-vkCmdSetDiscardRectangleEXT-commandBuffer-recording"},
{CMD_SETEVENT, "VUID-vkCmdSetEvent-commandBuffer-recording"},
{CMD_SETLINEWIDTH, "VUID-vkCmdSetLineWidth-commandBuffer-recording"},
{CMD_SETSAMPLELOCATIONSEXT, "VUID-vkCmdSetSampleLocationsEXT-commandBuffer-recording"},
{CMD_SETSCISSOR, "VUID-vkCmdSetScissor-commandBuffer-recording"},
{CMD_SETSTENCILCOMPAREMASK, "VUID-vkCmdSetStencilCompareMask-commandBuffer-recording"},
{CMD_SETSTENCILREFERENCE, "VUID-vkCmdSetStencilReference-commandBuffer-recording"},
{CMD_SETSTENCILWRITEMASK, "VUID-vkCmdSetStencilWriteMask-commandBuffer-recording"},
{CMD_SETVIEWPORT, "VUID-vkCmdSetViewport-commandBuffer-recording"},
// Exclude vendor ext (if not already present) { CMD_SETVIEWPORTWSCALINGNV,
// "VUID-vkCmdSetViewportWScalingNV-commandBuffer-recording" },
{CMD_UPDATEBUFFER, "VUID-vkCmdUpdateBuffer-commandBuffer-recording"},
{CMD_WAITEVENTS, "VUID-vkCmdWaitEvents-commandBuffer-recording"},
{CMD_WRITETIMESTAMP, "VUID-vkCmdWriteTimestamp-commandBuffer-recording"},
};
// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
// there's an issue with the Cmd ordering
bool ValidateCmd(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
switch (cb_state->state) {
case CB_RECORDING:
return ValidateCmdSubpassState(dev_data, cb_state, cmd);
case CB_INVALID_COMPLETE:
case CB_INVALID_INCOMPLETE:
return ReportInvalidCommandBuffer(dev_data, cb_state, caller_name);
default:
auto error_it = must_be_recording_map.find(cmd);
// This assert lets us know that a vkCmd.* entrypoint has been added without enabling it in the map
assert(error_it != must_be_recording_map.cend());
if (error_it == must_be_recording_map.cend()) {
error_it = must_be_recording_map.find(CMD_NONE); // But we'll handle the asserting case, in case of a test gap
}
const auto error = error_it->second;
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), error,
"You must call vkBeginCommandBuffer() before this call to %s.", caller_name);
}
}
// For given object struct return a ptr of BASE_NODE type for its wrapping struct
BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
BASE_NODE *base_ptr = nullptr;
switch (object_struct.type) {
case kVulkanObjectTypeDescriptorSet: {
base_ptr = GetSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
break;
}
case kVulkanObjectTypeSampler: {
base_ptr = GetSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
break;
}
case kVulkanObjectTypeQueryPool: {
base_ptr = GetQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
break;
}
case kVulkanObjectTypePipeline: {
base_ptr = GetPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
break;
}
case kVulkanObjectTypeBuffer: {
base_ptr = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
break;
}
case kVulkanObjectTypeBufferView: {
base_ptr = GetBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
break;
}
case kVulkanObjectTypeImage: {
base_ptr = GetImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
break;
}
case kVulkanObjectTypeImageView: {
base_ptr = GetImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
break;
}
case kVulkanObjectTypeEvent: {
base_ptr = GetEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
break;
}
case kVulkanObjectTypeDescriptorPool: {
base_ptr = GetDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
break;
}
case kVulkanObjectTypeCommandPool: {
base_ptr = GetCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
break;
}
case kVulkanObjectTypeFramebuffer: {
base_ptr = GetFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
break;
}
case kVulkanObjectTypeRenderPass: {
base_ptr = GetRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
break;
}
case kVulkanObjectTypeDeviceMemory: {
base_ptr = GetMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
break;
}
default:
// TODO : Any other objects to be handled here?
assert(0);
break;
}
return base_ptr;
}
// Tie the VK_OBJECT to the cmd buffer which includes:
// Add object_binding to cmd buffer
// Add cb_binding to object
static void AddCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
cb_bindings->insert(cb_node);
cb_node->object_bindings.insert(obj);
}
// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
static void RemoveCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
if (base_obj) base_obj->cb_bindings.erase(cb_node);
}
// Reset the command buffer state
// Maintain the createInfo and set state to CB_NEW, but clear all other state
static void ResetCommandBufferState(layer_data *dev_data, const VkCommandBuffer cb) {
GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
if (pCB) {
pCB->in_use.store(0);
// Reset CB state (note that createInfo is not cleared)
pCB->commandBuffer = cb;
memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
pCB->hasDrawCmd = false;
pCB->state = CB_NEW;
pCB->submitCount = 0;
pCB->image_layout_change_count = 1; // Start at 1. 0 is insert value for validation cache versions, s.t. new == dirty
pCB->status = 0;
pCB->static_status = 0;
pCB->viewportMask = 0;
pCB->scissorMask = 0;
for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
pCB->lastBound[i].reset();
}
memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
pCB->activeRenderPass = nullptr;
pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
pCB->activeSubpass = 0;
pCB->broken_bindings.clear();
pCB->waitedEvents.clear();
pCB->events.clear();
pCB->writeEventsBeforeWait.clear();
pCB->waitedEventsBeforeQueryReset.clear();
pCB->queryToStateMap.clear();
pCB->activeQueries.clear();
pCB->startedQueries.clear();
pCB->imageLayoutMap.clear();
pCB->eventToStageMap.clear();
pCB->draw_data.clear();
pCB->current_draw_data.vertex_buffer_bindings.clear();
pCB->vertex_buffer_used = false;
pCB->primaryCommandBuffer = VK_NULL_HANDLE;
// If secondary, invalidate any primary command buffer that may call us.
if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
InvalidateCommandBuffers(dev_data, pCB->linkedCommandBuffers, {HandleToUint64(cb), kVulkanObjectTypeCommandBuffer});
}
// Remove reverse command buffer links.
for (auto pSubCB : pCB->linkedCommandBuffers) {
pSubCB->linkedCommandBuffers.erase(pCB);
}
pCB->linkedCommandBuffers.clear();
pCB->updateImages.clear();
pCB->updateBuffers.clear();
ClearCmdBufAndMemReferences(dev_data, pCB);
pCB->queue_submit_functions.clear();
pCB->cmd_execute_commands_functions.clear();
pCB->eventUpdates.clear();
pCB->queryUpdates.clear();
// Remove object bindings
for (auto obj : pCB->object_bindings) {
RemoveCommandBufferBinding(dev_data, &obj, pCB);
}
pCB->object_bindings.clear();
// Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
for (auto framebuffer : pCB->framebuffers) {
auto fb_state = GetFramebufferState(dev_data, framebuffer);
if (fb_state) fb_state->cb_bindings.erase(pCB);
}
pCB->framebuffers.clear();
pCB->activeFramebuffer = VK_NULL_HANDLE;
memset(&pCB->index_buffer_binding, 0, sizeof(pCB->index_buffer_binding));
pCB->qfo_transfer_image_barriers.Reset();
pCB->qfo_transfer_buffer_barriers.Reset();
}
}
CBStatusFlags MakeStaticStateMask(VkPipelineDynamicStateCreateInfo const *ds) {
// initially assume everything is static state
CBStatusFlags flags = CBSTATUS_ALL_STATE_SET;
if (ds) {
for (uint32_t i = 0; i < ds->dynamicStateCount; i++) {
switch (ds->pDynamicStates[i]) {
case VK_DYNAMIC_STATE_LINE_WIDTH:
flags &= ~CBSTATUS_LINE_WIDTH_SET;
break;
case VK_DYNAMIC_STATE_DEPTH_BIAS:
flags &= ~CBSTATUS_DEPTH_BIAS_SET;
break;
case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
flags &= ~CBSTATUS_BLEND_CONSTANTS_SET;
break;
case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
flags &= ~CBSTATUS_DEPTH_BOUNDS_SET;
break;
case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
flags &= ~CBSTATUS_STENCIL_READ_MASK_SET;
break;
case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
flags &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
break;
case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
flags &= ~CBSTATUS_STENCIL_REFERENCE_SET;
break;
case VK_DYNAMIC_STATE_SCISSOR:
flags &= ~CBSTATUS_SCISSOR_SET;
break;
case VK_DYNAMIC_STATE_VIEWPORT:
flags &= ~CBSTATUS_VIEWPORT_SET;
break;
default:
break;
}
}
}
return flags;
}
// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
// render pass.
bool InsideRenderPass(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const char *apiName, const std::string &msgCode) {
bool inside = false;
if (pCB->activeRenderPass) {
inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), msgCode,
"%s: It is invalid to issue this call inside an active render pass (0x%" PRIx64 ").", apiName,
HandleToUint64(pCB->activeRenderPass->renderPass));
}
return inside;
}
// Flags validation error if the associated call is made outside a render pass. The apiName
// routine should ONLY be called inside a render pass.
bool OutsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, const std::string &msgCode) {
bool outside = false;
if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
outside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), msgCode, "%s: This call must be issued inside an active render pass.",
apiName);
}
return outside;
}
static void InitCoreValidation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
layer_debug_report_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
layer_debug_messenger_actions(instance_data->report_data, instance_data->logging_messenger, pAllocator,
"lunarg_core_validation");
}
// For the given ValidationCheck enum, set all relevant instance disabled flags to true
void SetDisabledFlags(instance_layer_data *instance_data, const VkValidationFlagsEXT *val_flags_struct) {
for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
switch (val_flags_struct->pDisabledValidationChecks[i]) {
case VK_VALIDATION_CHECK_SHADERS_EXT:
instance_data->disabled.shader_validation = true;
break;
case VK_VALIDATION_CHECK_ALL_EXT:
// Set all disabled flags to true
instance_data->disabled.SetAll(true);
break;
default:
break;
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
VkInstance *pInstance) {
VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) return VK_ERROR_INITIALIZATION_FAILED;
// Advance the link info for the next element on the chain
chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
if (result != VK_SUCCESS) return result;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
instance_data->instance = *pInstance;
layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
instance_data->report_data = debug_utils_create_instance(
&instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
instance_data->api_version = instance_data->extensions.InitFromInstanceCreateInfo(
(pCreateInfo->pApplicationInfo ? pCreateInfo->pApplicationInfo->apiVersion : VK_API_VERSION_1_0), pCreateInfo);
InitCoreValidation(instance_data, pAllocator);
ValidateLayerOrdering(*pCreateInfo);
// Parse any pNext chains
const auto *validation_flags_ext = lvl_find_in_chain<VkValidationFlagsEXT>(pCreateInfo->pNext);
if (validation_flags_ext) {
SetDisabledFlags(instance_data, validation_flags_ext);
}
return result;
}
static void PostCallRecordDestroyInstance(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator,
dispatch_key key) {
// Clean up logging callback, if any
while (instance_data->logging_messenger.size() > 0) {
VkDebugUtilsMessengerEXT messenger = instance_data->logging_messenger.back();
layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
instance_data->logging_messenger.pop_back();
}
while (instance_data->logging_callback.size() > 0) {
VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
layer_destroy_report_callback(instance_data->report_data, callback, pAllocator);
instance_data->logging_callback.pop_back();
}
layer_debug_utils_destroy_instance(instance_data->report_data);
FreeLayerDataPtr(key, instance_layer_data_map);
}
// Hook DestroyInstance to remove tableInstanceMap entry
VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
// TODOSC : Shouldn't need any customization here
dispatch_key key = get_dispatch_key(instance);
// TBD: Need any locking this early, in case this function is called at the
// same time by more than one thread?
instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map);
instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
lock_guard_t lock(global_lock);
PostCallRecordDestroyInstance(instance_data, pAllocator, key);
}
static bool ValidatePhysicalDeviceQueueFamily(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
uint32_t requested_queue_family, std::string err_code, const char *cmd_name,
const char *queue_family_var_name) {
bool skip = false;
const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
? "or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
: "";
std::string count_note = (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
? "the pQueueFamilyPropertyCount was never obtained"
: "i.e. is not less than " + std::to_string(pd_state->queue_family_count);
if (requested_queue_family >= pd_state->queue_family_count) {
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), err_code,
"%s: %s (= %" PRIu32
") is not less than any previously obtained pQueueFamilyPropertyCount from "
"vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, count_note.c_str());
}
return skip;
}
// Verify VkDeviceQueueCreateInfos
static bool ValidateDeviceQueueCreateInfos(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
uint32_t info_count, const VkDeviceQueueCreateInfo *infos) {
bool skip = false;
for (uint32_t i = 0; i < info_count; ++i) {
const auto requested_queue_family = infos[i].queueFamilyIndex;
// Verify that requested queue family is known to be valid at this point in time
std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
skip |= ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, requested_queue_family,
"VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381", "vkCreateDevice",
queue_family_var_name.c_str());
// Verify that requested queue count of queue family is known to be valid at this point in time
if (requested_queue_family < pd_state->queue_family_count) {
const auto requested_queue_count = infos[i].queueCount;
const auto queue_family_props_count = pd_state->queue_family_properties.size();
const bool queue_family_has_props = requested_queue_family < queue_family_props_count;
const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
? "or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
: "";
std::string count_note =
!queue_family_has_props
? "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"
: "i.e. is not less than or equal to " +
std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount);
if (!queue_family_has_props ||
requested_queue_count > pd_state->queue_family_properties[requested_queue_family].queueCount) {
skip |= log_msg(
instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), "VUID-VkDeviceQueueCreateInfo-queueCount-00382",
"vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32
"].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str());
}
}
}
return skip;
}
// Verify that features have been queried and that they are available
static bool ValidateRequestedFeatures(instance_layer_data *instance_data, const PHYSICAL_DEVICE_STATE *pd_state,
const VkPhysicalDeviceFeatures *requested_features) {
bool skip = false;
const VkBool32 *actual = reinterpret_cast<const VkBool32 *>(&pd_state->features2.features.robustBufferAccess);
const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
// TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
// Need to provide the struct member name with the issue. To do that seems like we'll
// have to loop through each struct member which should be done w/ codegen to keep in synch.
uint32_t errors = 0;
uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
for (uint32_t i = 0; i < total_bools; i++) {
if (requested[i] > actual[i]) {
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, kVUID_Core_DevLimit_InvalidFeatureRequested,
"While calling vkCreateDevice(), requesting feature '%s' in VkPhysicalDeviceFeatures struct, which is "
"not available on this device.",
GetPhysDevFeatureString(i));
errors++;
}
}
if (errors && (UNCALLED == pd_state->vkGetPhysicalDeviceFeaturesState)) {
// If user didn't request features, notify them that they should
// TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
0, kVUID_Core_DevLimit_InvalidFeatureRequested,
"You requested features that are unavailable on this device. You should first query feature availability "
"by calling vkGetPhysicalDeviceFeatures().");
}
return skip;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
bool skip = false;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
unique_lock_t lock(global_lock);
auto pd_state = GetPhysicalDeviceState(instance_data, gpu);
// TODO: object_tracker should perhaps do this instead
// and it does not seem to currently work anyway -- the loader just crashes before this point
if (!GetPhysicalDeviceState(instance_data, gpu)) {
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
0, kVUID_Core_DevLimit_MustQueryCount,
"Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
}
// Check that any requested features are available
// The enabled features can come from either pEnabledFeatures, or from the pNext chain
// TODO: Validate "VUID-VkDeviceCreateInfo-pNext-00373" here, can't have non-null pEnabledFeatures & GPDF2 in pNext chain
const VkPhysicalDeviceFeatures *enabled_features_found = pCreateInfo->pEnabledFeatures;
if (nullptr == enabled_features_found) {
const auto *features2 = lvl_find_in_chain<VkPhysicalDeviceFeatures2KHR>(pCreateInfo->pNext);
if (features2) {
enabled_features_found = &(features2->features);
}
}
if (enabled_features_found) {
skip |= ValidateRequestedFeatures(instance_data, pd_state, enabled_features_found);
}
skip |=
ValidateDeviceQueueCreateInfos(instance_data, pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(instance_data->instance, "vkCreateDevice");
if (fpCreateDevice == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
// Advance the link info for the next element on the chain
chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
lock.unlock();
VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
if (result != VK_SUCCESS) {
return result;
}
lock.lock();
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
device_data->instance_data = instance_data;
// Setup device dispatch table
layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
device_data->device = *pDevice;
// Save PhysicalDevice handle
device_data->physical_device = gpu;
device_data->report_data = layer_debug_utils_create_device(instance_data->report_data, *pDevice);
// Get physical device limits for this device
instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(device_data->phys_dev_properties.properties));
// Setup the validation tables based on the application API version from the instance and the capabilities of the device driver.
uint32_t effective_api_version = std::min(device_data->phys_dev_properties.properties.apiVersion, instance_data->api_version);
device_data->api_version =
device_data->extensions.InitFromDeviceCreateInfo(&instance_data->extensions, effective_api_version, pCreateInfo);
uint32_t count;
instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
device_data->phys_dev_properties.queue_family_properties.resize(count);
instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
gpu, &count, &device_data->phys_dev_properties.queue_family_properties[0]);
// TODO: device limits should make sure these are compatible
if (enabled_features_found) {
device_data->enabled_features.core = *enabled_features_found;
}
const auto *descriptor_indexing_features = lvl_find_in_chain<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(pCreateInfo->pNext);
if (descriptor_indexing_features) {
device_data->enabled_features.descriptor_indexing = *descriptor_indexing_features;
}
const auto *eight_bit_storage_features = lvl_find_in_chain<VkPhysicalDevice8BitStorageFeaturesKHR>(pCreateInfo->pNext);
if (eight_bit_storage_features) {
device_data->enabled_features.eight_bit_storage = *eight_bit_storage_features;
}
// Store physical device properties and physical device mem limits into device layer_data structs
instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &device_data->phys_dev_mem_props);
instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &device_data->phys_dev_props);
if (device_data->extensions.vk_khr_push_descriptor) {
// Get the needed push_descriptor limits
auto push_descriptor_prop = lvl_init_struct<VkPhysicalDevicePushDescriptorPropertiesKHR>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&push_descriptor_prop);
instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
device_data->phys_dev_ext_props.max_push_descriptors = push_descriptor_prop.maxPushDescriptors;
}
if (device_data->extensions.vk_ext_descriptor_indexing) {
// Get the needed descriptor_indexing limits
auto descriptor_indexing_props = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingPropertiesEXT>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&descriptor_indexing_props);
instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
device_data->phys_dev_ext_props.descriptor_indexing_props = descriptor_indexing_props;
}
lock.unlock();
ValidateLayerOrdering(*pCreateInfo);
return result;
}
static void PreCallRecordDestroyDevice(layer_data *dev_data, VkDevice device) {
dev_data->pipelineMap.clear();
dev_data->renderPassMap.clear();
for (auto ii = dev_data->commandBufferMap.begin(); ii != dev_data->commandBufferMap.end(); ++ii) {
delete (*ii).second;
}
dev_data->commandBufferMap.clear();
// This will also delete all sets in the pool & remove them from setMap
DeletePools(dev_data);
// All sets should be removed
assert(dev_data->setMap.empty());
dev_data->descriptorSetLayoutMap.clear();
dev_data->imageViewMap.clear();
dev_data->imageMap.clear();
dev_data->imageSubresourceMap.clear();
dev_data->imageLayoutMap.clear();
dev_data->bufferViewMap.clear();
dev_data->bufferMap.clear();
// Queues persist until device is destroyed
dev_data->queueMap.clear();
// Report any memory leaks
layer_debug_utils_destroy_device(device);
}
static void PostCallRecordDestroyDevice(const dispatch_key &key) { FreeLayerDataPtr(key, layer_data_map); }
VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
// TODOSC : Shouldn't need any customization here
dispatch_key key = get_dispatch_key(device);
layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
unique_lock_t lock(global_lock);
PreCallRecordDestroyDevice(dev_data, device);
lock.unlock();
#if DISPATCH_MAP_DEBUG
fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
#endif
dev_data->dispatch_table.DestroyDevice(device, pAllocator);
// Free all the memory
lock.lock();
PostCallRecordDestroyDevice(key);
}
static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
// For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
// and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id
static bool ValidateStageMaskGsTsEnables(const layer_data *dev_data, VkPipelineStageFlags stageMask, const char *caller,
std::string geo_error_id, std::string tess_error_id) {
bool skip = false;
if (!dev_data->enabled_features.core.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, geo_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when device does not have "
"geometryShader feature enabled.",
caller);
}
if (!dev_data->enabled_features.core.tessellationShader &&
(stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, tess_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT and/or "
"VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device does not have "
"tessellationShader feature enabled.",
caller);
}
return skip;
}
// Loop through bound objects and increment their in_use counts.
static void IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
for (auto obj : cb_node->object_bindings) {
auto base_obj = GetStateStructPtrFromObject(dev_data, obj);
if (base_obj) {
base_obj->in_use.fetch_add(1);
}
}
}
// Track which resources are in-flight by atomically incrementing their "in_use" count
static void IncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
cb_node->submitCount++;
cb_node->in_use.fetch_add(1);
// First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
IncrementBoundObjects(dev_data, cb_node);
// TODO : We should be able to remove the NULL look-up checks from the code below as long as
// all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
// should then be flagged prior to calling this function
for (auto draw_data_element : cb_node->draw_data) {
for (auto &vertex_buffer : draw_data_element.vertex_buffer_bindings) {
auto buffer_state = GetBufferState(dev_data, vertex_buffer.buffer);
if (buffer_state) {
buffer_state->in_use.fetch_add(1);
}
}
}
for (auto event : cb_node->writeEventsBeforeWait) {
auto event_state = GetEventNode(dev_data, event);
if (event_state) event_state->write_in_use++;
}
}
// Note: This function assumes that the global lock is held by the calling thread.
// For the given queue, verify the queue state up to the given seq number.
// Currently the only check is to make sure that if there are events to be waited on prior to
// a QueryReset, make sure that all such events have been signalled.
static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *initial_queue, uint64_t initial_seq) {
bool skip = false;
// sequence number we want to validate up to, per queue
std::unordered_map<QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}};
// sequence number we've completed validation for, per queue
std::unordered_map<QUEUE_STATE *, uint64_t> done_seqs;
std::vector<QUEUE_STATE *> worklist{initial_queue};
while (worklist.size()) {
auto queue = worklist.back();
worklist.pop_back();
auto target_seq = target_seqs[queue];
auto seq = std::max(done_seqs[queue], queue->seq);
auto sub_it = queue->submissions.begin() + int(seq - queue->seq); // seq >= queue->seq
for (; seq < target_seq; ++sub_it, ++seq) {
for (auto &wait : sub_it->waitSemaphores) {
auto other_queue = GetQueueState(dev_data, wait.queue);
if (other_queue == queue) continue; // semaphores /always/ point backwards, so no point here.
auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
// if this wait is for another queue, and covers new sequence
// numbers beyond what we've already validated, mark the new
// target seq and (possibly-re)add the queue to the worklist.
if (other_done_seq < other_target_seq) {
target_seqs[other_queue] = other_target_seq;
worklist.push_back(other_queue);
}
}
for (auto cb : sub_it->cbs) {
auto cb_node = GetCBNode(dev_data, cb);
if (cb_node) {
for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
for (auto event : queryEventsPair.second) {
if (dev_data->eventMap[event].needsSignaled) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, kVUID_Core_DrawState_InvalidQuery,
"Cannot get query results on queryPool 0x%" PRIx64
" with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
HandleToUint64(queryEventsPair.first.pool), queryEventsPair.first.index,
HandleToUint64(event));
}
}
}
}
}
}
// finally mark the point we've now validated this queue to.
done_seqs[queue] = seq;
}
return skip;
}
// When the given fence is retired, verify outstanding queue operations through the point of the fence
static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
auto fence_state = GetFenceNode(dev_data, fence);
if (fence_state && fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) {
return VerifyQueueStateToSeq(dev_data, GetQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
}
return false;
}
// Decrement in-use count for objects bound to command buffer
static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
BASE_NODE *base_obj = nullptr;
for (auto obj : cb_node->object_bindings) {
base_obj = GetStateStructPtrFromObject(dev_data, obj);
if (base_obj) {
base_obj->in_use.fetch_sub(1);
}
}
}
static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
// Roll this queue forward, one submission at a time.
while (pQueue->seq < seq) {
auto &submission = pQueue->submissions.front();
for (auto &wait : submission.waitSemaphores) {
auto pSemaphore = GetSemaphoreNode(dev_data, wait.semaphore);
if (pSemaphore) {
pSemaphore->in_use.fetch_sub(1);
}
auto &lastSeq = otherQueueSeqs[wait.queue];
lastSeq = std::max(lastSeq, wait.seq);
}
for (auto &semaphore : submission.signalSemaphores) {
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore) {
pSemaphore->in_use.fetch_sub(1);
}
}
for (auto &semaphore : submission.externalSemaphores) {
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore) {
pSemaphore->in_use.fetch_sub(1);
}
}
for (auto cb : submission.cbs) {
auto cb_node = GetCBNode(dev_data, cb);
if (!cb_node) {
continue;
}
// First perform decrement on general case bound objects
DecrementBoundResources(dev_data, cb_node);
for (auto draw_data_element : cb_node->draw_data) {
for (auto &vertex_buffer_binding : draw_data_element.vertex_buffer_bindings) {
auto buffer_state = GetBufferState(dev_data, vertex_buffer_binding.buffer);
if (buffer_state) {
buffer_state->in_use.fetch_sub(1);
}
}
}
for (auto event : cb_node->writeEventsBeforeWait) {
auto eventNode = dev_data->eventMap.find(event);
if (eventNode != dev_data->eventMap.end()) {
eventNode->second.write_in_use--;
}
}
for (auto queryStatePair : cb_node->queryToStateMap) {
dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
}
for (auto eventStagePair : cb_node->eventToStageMap) {
dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
}
cb_node->in_use.fetch_sub(1);
}
auto pFence = GetFenceNode(dev_data, submission.fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
pFence->state = FENCE_RETIRED;
}
pQueue->submissions.pop_front();
pQueue->seq++;
}
// Roll other queues forward to the highest seq we saw a wait for
for (auto qs : otherQueueSeqs) {
RetireWorkOnQueue(dev_data, GetQueueState(dev_data, qs.first), qs.second);
}
}
// Submit a fence to a queue, delimiting previous fences and previous untracked
// work by it.
static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
pFence->state = FENCE_INFLIGHT;
pFence->signaler.first = pQueue->queue;
pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
}
static bool ValidateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
bool skip = false;
if ((pCB->in_use.load() || current_submit_count > 1) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
"VUID-vkQueueSubmit-pCommandBuffers-00071",
"Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
HandleToUint64(pCB->commandBuffer));
}
return skip;
}
static bool ValidateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
int current_submit_count, std::string vu_id) {
bool skip = false;
if (dev_data->instance_data->disabled.command_buffer_state) return skip;
// Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
(cb_state->submitCount + current_submit_count > 1)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
kVUID_Core_DrawState_CommandBufferSingleSubmitViolation,
"Commandbuffer 0x%" PRIx64
" was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64
" times.",
HandleToUint64(cb_state->commandBuffer), cb_state->submitCount + current_submit_count);
}
// Validate that cmd buffers have been updated
switch (cb_state->state) {
case CB_INVALID_INCOMPLETE:
case CB_INVALID_COMPLETE:
skip |= ReportInvalidCommandBuffer(dev_data, cb_state, call_source);
break;
case CB_NEW:
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
(uint64_t)(cb_state->commandBuffer), vu_id,
"Command buffer 0x%" PRIx64 " used in the call to %s is unrecorded and contains no commands.",
HandleToUint64(cb_state->commandBuffer), call_source);
break;
case CB_RECORDING:
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), kVUID_Core_DrawState_NoEndCommandBuffer,
"You must call vkEndCommandBuffer() on command buffer 0x%" PRIx64 " before this call to %s!",
HandleToUint64(cb_state->commandBuffer), call_source);
break;
default: /* recorded */
break;
}
return skip;
}
static bool ValidateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
bool skip = false;
// TODO : We should be able to remove the NULL look-up checks from the code below as long as
// all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
// should then be flagged prior to calling this function
for (const auto &draw_data_element : cb_node->draw_data) {
for (const auto &vertex_buffer_binding : draw_data_element.vertex_buffer_bindings) {
auto buffer_state = GetBufferState(dev_data, vertex_buffer_binding.buffer);
if ((vertex_buffer_binding.buffer != VK_NULL_HANDLE) && (!buffer_state)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(vertex_buffer_binding.buffer), kVUID_Core_DrawState_InvalidBuffer,
"Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".",
HandleToUint64(vertex_buffer_binding.buffer));
}
}
}
return skip;
}
// Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
bool ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue, uint32_t count,
const uint32_t *indices) {
bool found = false;
bool skip = false;
auto queue_state = GetQueueState(dev_data, queue);
if (queue_state) {
for (uint32_t i = 0; i < count; i++) {
if (indices[i] == queue_state->queueFamilyIndex) {
found = true;
break;
}
}
if (!found) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object->type],
object->handle, kVUID_Core_DrawState_InvalidQueueFamily,
"vkQueueSubmit: Command buffer 0x%" PRIx64 " contains %s 0x%" PRIx64
" which was not created allowing concurrent access to this queue family %d.",
HandleToUint64(cb_node->commandBuffer), object_string[object->type], object->handle,
queue_state->queueFamilyIndex);
}
}
return skip;
}
// Validate that queueFamilyIndices of primary command buffers match this queue
// Secondary command buffers were previously validated in vkCmdExecuteCommands().
static bool ValidateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
bool skip = false;
auto pPool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
auto queue_state = GetQueueState(dev_data, queue);
if (pPool && queue_state) {
if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-vkQueueSubmit-pCommandBuffers-00074",
"vkQueueSubmit: Primary command buffer 0x%" PRIx64
" created in queue family %d is being submitted on queue 0x%" PRIx64 " from queue family %d.",
HandleToUint64(pCB->commandBuffer), pPool->queueFamilyIndex, HandleToUint64(queue),
queue_state->queueFamilyIndex);
}
// Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
for (auto object : pCB->object_bindings) {
if (object.type == kVulkanObjectTypeImage) {
auto image_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(object.handle));
if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, image_state->createInfo.queueFamilyIndexCount,
image_state->createInfo.pQueueFamilyIndices);
}
} else if (object.type == kVulkanObjectTypeBuffer) {
auto buffer_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(object.handle));
if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(dev_data, pCB, &object, queue, buffer_state->createInfo.queueFamilyIndexCount,
buffer_state->createInfo.pQueueFamilyIndices);
}
}
}
}
return skip;
}
static bool ValidatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count,
QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) {
// Track in-use for resources off of primary and any secondary CBs
bool skip = false;
// If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
// on device
skip |= ValidateCommandBufferSimultaneousUse(dev_data, pCB, current_submit_count);
skip |= ValidateResources(dev_data, pCB);
skip |= ValidateQueuedQFOTransfers(dev_data, pCB, qfo_image_scoreboards, qfo_buffer_scoreboards);
for (auto pSubCB : pCB->linkedCommandBuffers) {
skip |= ValidateResources(dev_data, pSubCB);
skip |= ValidateQueuedQFOTransfers(dev_data, pSubCB, qfo_image_scoreboards, qfo_buffer_scoreboards);
// TODO: replace with InvalidateCommandBuffers() at recording.
if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
"VUID-vkQueueSubmit-pCommandBuffers-00073",
"Commandbuffer 0x%" PRIx64 " was submitted with secondary buffer 0x%" PRIx64
" but that buffer has subsequently been bound to primary cmd buffer 0x%" PRIx64
" and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
HandleToUint64(pCB->commandBuffer), HandleToUint64(pSubCB->commandBuffer),
HandleToUint64(pSubCB->primaryCommandBuffer));
}
}
skip |= ValidateCommandBufferState(dev_data, pCB, "vkQueueSubmit()", current_submit_count,
"VUID-vkQueueSubmit-pCommandBuffers-00072");
return skip;
}
static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
bool skip = false;
if (pFence && pFence->scope == kSyncScopeInternal) {
if (pFence->state == FENCE_INFLIGHT) {
// TODO: opportunities for "VUID-vkQueueSubmit-fence-00064", "VUID-vkQueueBindSparse-fence-01114",
// "VUID-vkAcquireNextImageKHR-fence-01287"
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(pFence->fence), kVUID_Core_DrawState_InvalidFence,
"Fence 0x%" PRIx64 " is already in use by another submission.", HandleToUint64(pFence->fence));
}
else if (pFence->state == FENCE_RETIRED) {
// TODO: opportunities for "VUID-vkQueueSubmit-fence-00063", "VUID-vkQueueBindSparse-fence-01113",
// "VUID-vkAcquireNextImageKHR-fence-01287"
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(pFence->fence), kVUID_Core_MemTrack_FenceState,
"Fence 0x%" PRIx64 " submitted in SIGNALED state. Fences must be reset before being submitted",
HandleToUint64(pFence->fence));
}
}
return skip;
}
static void PostCallRecordQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
VkFence fence) {
uint64_t early_retire_seq = 0;
auto pQueue = GetQueueState(dev_data, queue);
auto pFence = GetFenceNode(dev_data, fence);
if (pFence) {
if (pFence->scope == kSyncScopeInternal) {
// Mark fence in use
SubmitFence(pQueue, pFence, std::max(1u, submitCount));
if (!submitCount) {
// If no submissions, but just dropping a fence on the end of the queue,
// record an empty submission with just the fence, so we can determine
// its completion.
pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
}
} else {
// Retire work up until this fence early, we will not see the wait that corresponds to this signal
early_retire_seq = pQueue->seq + pQueue->submissions.size();
if (!dev_data->external_sync_warning) {
dev_data->external_sync_warning = true;
log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(fence), kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueSubmit(): Signaling external fence 0x%" PRIx64 " on queue 0x%" PRIx64
" will disable validation of preceding command buffer lifecycle states and the in-use status of associated "
"objects.",
HandleToUint64(fence), HandleToUint64(queue));
}
}
}
// Now process each individual submit
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
std::vector<VkCommandBuffer> cbs;
const VkSubmitInfo *submit = &pSubmits[submit_idx];
vector<SEMAPHORE_WAIT> semaphore_waits;
vector<VkSemaphore> semaphore_signals;
vector<VkSemaphore> semaphore_externals;
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pWaitSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
pSemaphore->in_use.fetch_add(1);
}
pSemaphore->signaler.first = VK_NULL_HANDLE;
pSemaphore->signaled = false;
} else {
semaphore_externals.push_back(semaphore);
pSemaphore->in_use.fetch_add(1);
if (pSemaphore->scope == kSyncScopeExternalTemporary) {
pSemaphore->scope = kSyncScopeInternal;
}
}
}
}
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
pSemaphore->signaler.first = queue;
pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
pSemaphore->signaled = true;
pSemaphore->in_use.fetch_add(1);
semaphore_signals.push_back(semaphore);
} else {
// Retire work up until this submit early, we will not see the wait that corresponds to this signal
early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
if (!dev_data->external_sync_warning) {
dev_data->external_sync_warning = true;
log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueSubmit(): Signaling external semaphore 0x%" PRIx64 " on queue 0x%" PRIx64
" will disable validation of preceding command buffer lifecycle states and the in-use status of "
"associated objects.",
HandleToUint64(semaphore), HandleToUint64(queue));
}
}
}
}
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
if (cb_node) {
cbs.push_back(submit->pCommandBuffers[i]);
for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
cbs.push_back(secondaryCmdBuffer->commandBuffer);
UpdateCmdBufImageLayouts(dev_data, secondaryCmdBuffer);
IncrementResources(dev_data, secondaryCmdBuffer);
RecordQueuedQFOTransfers(dev_data, secondaryCmdBuffer);
}
UpdateCmdBufImageLayouts(dev_data, cb_node);
IncrementResources(dev_data, cb_node);
RecordQueuedQFOTransfers(dev_data, cb_node);
}
}
pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals, semaphore_externals,
submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
}
if (early_retire_seq) {
RetireWorkOnQueue(dev_data, pQueue, early_retire_seq);
}
}
static bool PreCallValidateQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
VkFence fence) {
auto pFence = GetFenceNode(dev_data, fence);
bool skip = ValidateFenceForSubmit(dev_data, pFence);
if (skip) {
return true;
}
unordered_set<VkSemaphore> signaled_semaphores;
unordered_set<VkSemaphore> unsignaled_semaphores;
unordered_set<VkSemaphore> internal_semaphores;
vector<VkCommandBuffer> current_cmds;
unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> localImageLayoutMap;
// Now verify each individual submit
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
skip |= ValidateStageMaskGsTsEnables(dev_data, submit->pWaitDstStageMask[i], "vkQueueSubmit()",
"VUID-VkSubmitInfo-pWaitDstStageMask-00076",
"VUID-VkSubmitInfo-pWaitDstStageMask-00077");
VkSemaphore semaphore = submit->pWaitSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) ||
(!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
HandleToUint64(queue), HandleToUint64(semaphore));
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
}
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
" that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
HandleToUint64(queue), HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
}
QFOTransferCBScoreboards<VkImageMemoryBarrier> qfo_image_scoreboards;
QFOTransferCBScoreboards<VkBufferMemoryBarrier> qfo_buffer_scoreboards;
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
auto cb_node = GetCBNode(dev_data, submit->pCommandBuffers[i]);
if (cb_node) {
skip |= ValidateCmdBufImageLayouts(dev_data, cb_node, dev_data->imageLayoutMap, localImageLayoutMap);
current_cmds.push_back(submit->pCommandBuffers[i]);
skip |= ValidatePrimaryCommandBufferState(
dev_data, cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]),
&qfo_image_scoreboards, &qfo_buffer_scoreboards);
skip |= ValidateQueueFamilyIndices(dev_data, cb_node, queue);
// Potential early exit here as bad object state may crash in delayed function calls
if (skip) {
return true;
}
// Call submit-time functions to validate/update state
for (auto &function : cb_node->queue_submit_functions) {
skip |= function();
}
for (auto &function : cb_node->eventUpdates) {
skip |= function(queue);
}
for (auto &function : cb_node->queryUpdates) {
skip |= function(queue);
}
}
}
}
return skip;
}
VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
lock.lock();
PostCallRecordQueueSubmit(dev_data, queue, submitCount, pSubmits, fence);
lock.unlock();
return result;
}
static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
bool skip = false;
if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), kVUIDUndefined,
"Number of currently valid memory objects is not less than the maximum allowed (%u).",
dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount);
}
return skip;
}
static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
AddMemObjInfo(dev_data, dev_data->device, *pMemory, pAllocateInfo);
return;
}
VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateAllocateMemory(dev_data);
if (!skip) {
lock.unlock();
result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
lock.lock();
if (VK_SUCCESS == result) {
PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
}
}
return result;
}
// For given obj node, if it is use, flag a validation error and return callback result, else return false
bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct, const char *caller_name,
const std::string &error_code) {
if (dev_data->instance_data->disabled.object_in_use) return false;
bool skip = false;
if (obj_node->in_use.load()) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_struct.type], obj_struct.handle,
error_code, "Cannot call %s on %s 0x%" PRIx64 " that is currently in use by a command buffer.", caller_name,
object_string[obj_struct.type], obj_struct.handle);
}
return skip;
}
static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
*mem_info = GetMemObjInfo(dev_data, mem);
*obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory};
if (dev_data->instance_data->disabled.free_memory) return false;
bool skip = false;
if (*mem_info) {
skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, "vkFreeMemory", "VUID-vkFreeMemory-memory-00677");
}
return skip;
}
static void PreCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
// Clear mem binding for any bound objects
for (auto obj : mem_info->obj_bindings) {
log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, get_debug_report_enum[obj.type], obj.handle,
kVUID_Core_MemTrack_FreedMemRef, "VK Object 0x%" PRIx64 " still has a reference to mem obj 0x%" PRIx64,
HandleToUint64(obj.handle), HandleToUint64(mem_info->mem));
BINDABLE *bindable_state = nullptr;
switch (obj.type) {
case kVulkanObjectTypeImage:
bindable_state = GetImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
break;
case kVulkanObjectTypeBuffer:
bindable_state = GetBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
break;
default:
// Should only have buffer or image objects bound to memory
assert(0);
}
assert(bindable_state);
bindable_state->binding.mem = MEMORY_UNBOUND;
bindable_state->UpdateBoundMemorySet();
}
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
dev_data->memObjMap.erase(mem);
}
VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
DEVICE_MEM_INFO *mem_info = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
if (!skip) {
if (mem != VK_NULL_HANDLE) {
// Avoid free/alloc race by recording state change before dispatching
PreCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
}
}
// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
// and that the size of the map range should be:
// 1. Not zero
// 2. Within the size of the memory allocation
static bool ValidateMapMemRange(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
bool skip = false;
if (size == 0) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap,
"VkMapMemory: Attempting to map memory range of size zero");
}
auto mem_element = dev_data->memObjMap.find(mem);
if (mem_element != dev_data->memObjMap.end()) {
auto mem_info = mem_element->second.get();
// It is an application error to call VkMapMemory on an object that is already mapped
if (mem_info->mem_range.size != 0) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap,
"VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIx64, HandleToUint64(mem));
}
// Validate that offset + size is within object's allocationSize
if (size == VK_WHOLE_SIZE) {
if (offset >= mem_info->alloc_info.allocationSize) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), kVUID_Core_MemTrack_InvalidMap,
"Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
" with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
}
} else {
if ((offset + size) > mem_info->alloc_info.allocationSize) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), "VUID-vkMapMemory-size-00681",
"Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ".",
offset, size + offset, mem_info->alloc_info.allocationSize);
}
}
}
return skip;
}
static void StoreMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
mem_info->mem_range.offset = offset;
mem_info->mem_range.size = size;
}
}
static bool DeleteMemRanges(layer_data *dev_data, VkDeviceMemory mem) {
bool skip = false;
auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
if (!mem_info->mem_range.size) {
// Valid Usage: memory must currently be mapped
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), "VUID-vkUnmapMemory-memory-00689",
"Unmapping Memory without memory being mapped: mem obj 0x%" PRIx64 ".", HandleToUint64(mem));
}
mem_info->mem_range.size = 0;
if (mem_info->shadow_copy) {
free(mem_info->shadow_copy_base);
mem_info->shadow_copy_base = 0;
mem_info->shadow_copy = 0;
}
}
return skip;
}
// Guard value for pad data
static char NoncoherentMemoryFillValue = 0xb;
static void InitializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
void **ppData) {
auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
mem_info->p_driver_data = *ppData;
uint32_t index = mem_info->alloc_info.memoryTypeIndex;
if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
mem_info->shadow_copy = 0;
} else {
if (size == VK_WHOLE_SIZE) {
size = mem_info->alloc_info.allocationSize - offset;
}
mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
assert(SafeModulo(mem_info->shadow_pad_size, dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) ==
0);
// Ensure start of mapped region reflects hardware alignment constraints
uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
// From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
uint64_t start_offset = offset % map_alignment;
// Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
mem_info->shadow_copy_base =
malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
mem_info->shadow_copy =
reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
~(map_alignment - 1)) +
start_offset;
assert(SafeModulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
map_alignment) == 0);
memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
*ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
}
}
}
// Verify that state for fence being waited on is appropriate. That is,
// a fence being waited on should not already be signaled and
// it should have been submitted on a queue or during acquire next image
static inline bool VerifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
bool skip = false;
auto pFence = GetFenceNode(dev_data, fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
if (pFence->state == FENCE_UNSIGNALED) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(fence), kVUID_Core_MemTrack_FenceState,
"%s called for fence 0x%" PRIx64 " which has not been submitted on a Queue or during acquire next image.",
apiCall, HandleToUint64(fence));
}
}
return skip;
}
static void RetireFence(layer_data *dev_data, VkFence fence) {
auto pFence = GetFenceNode(dev_data, fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
if (pFence->signaler.first != VK_NULL_HANDLE) {
// Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
RetireWorkOnQueue(dev_data, GetQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
} else {
// Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
// the fence as retired.
pFence->state = FENCE_RETIRED;
}
}
}
static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
if (dev_data->instance_data->disabled.wait_for_fences) return false;
bool skip = false;
for (uint32_t i = 0; i < fence_count; i++) {
skip |= VerifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
skip |= VerifyQueueStateToFence(dev_data, fences[i]);
}
return skip;
}
static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
// When we know that all fences are complete we can clean/remove their CBs
if ((VK_TRUE == wait_all) || (1 == fence_count)) {
for (uint32_t i = 0; i < fence_count; i++) {
RetireFence(dev_data, fences[i]);
}
}
// NOTE : Alternate case not handled here is when some fences have completed. In
// this case for app to guarantee which fences completed it will have to call
// vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
}
VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
uint64_t timeout) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Verify fence status of submitted fences
unique_lock_t lock(global_lock);
bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
if (result == VK_SUCCESS) {
lock.lock();
PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
lock.unlock();
}
return result;
}
static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
if (dev_data->instance_data->disabled.get_fence_state) return false;
return VerifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
}
static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
if (result == VK_SUCCESS) {
lock.lock();
PostCallRecordGetFenceStatus(dev_data, fence);
lock.unlock();
}
return result;
}
static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
// Add queue to tracking set only if it is new
auto result = dev_data->queues.emplace(queue);
if (result.second == true) {
QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
queue_state->queue = queue;
queue_state->queueFamilyIndex = q_family_index;
queue_state->seq = 0;
}
}
VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
lock_guard_t lock(global_lock);
PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
}
VKAPI_ATTR void VKAPI_CALL GetDeviceQueue2(VkDevice device, VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetDeviceQueue2(device, pQueueInfo, pQueue);
lock_guard_t lock(global_lock);
if (*pQueue != VK_NULL_HANDLE) {
PostCallRecordGetDeviceQueue(dev_data, pQueueInfo->queueFamilyIndex, *pQueue);
}
}
static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
*queue_state = GetQueueState(dev_data, queue);
if (dev_data->instance_data->disabled.queue_wait_idle) return false;
return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
}
static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
}
VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
QUEUE_STATE *queue_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordQueueWaitIdle(dev_data, queue_state);
lock.unlock();
}
return result;
}
static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
if (dev_data->instance_data->disabled.device_wait_idle) return false;
bool skip = false;
for (auto &queue : dev_data->queueMap) {
skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
}
return skip;
}
static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
for (auto &queue : dev_data->queueMap) {
RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
}
}
VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDeviceWaitIdle(dev_data);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordDeviceWaitIdle(dev_data);
lock.unlock();
}
return result;
}
static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
*fence_node = GetFenceNode(dev_data, fence);
*obj_struct = {HandleToUint64(fence), kVulkanObjectTypeFence};
if (dev_data->instance_data->disabled.destroy_fence) return false;
bool skip = false;
if (*fence_node) {
if ((*fence_node)->scope == kSyncScopeInternal && (*fence_node)->state == FENCE_INFLIGHT) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(fence), "VUID-vkDestroyFence-fence-01120", "Fence 0x%" PRIx64 " is in use.",
HandleToUint64(fence));
}
}
return skip;
}
static void PreCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Common data objects used pre & post call
FENCE_NODE *fence_node = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
if (!skip) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyFence(dev_data, fence);
lock.unlock();
dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
}
}
static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
VK_OBJECT *obj_struct) {
*sema_node = GetSemaphoreNode(dev_data, semaphore);
*obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
if (dev_data->instance_data->disabled.destroy_semaphore) return false;
bool skip = false;
if (*sema_node) {
skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, "vkDestroySemaphore",
"VUID-vkDestroySemaphore-semaphore-01137");
}
return skip;
}
static void PreCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
SEMAPHORE_NODE *sema_node;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
if (!skip) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroySemaphore(dev_data, semaphore);
lock.unlock();
dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
}
}
static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
*event_state = GetEventNode(dev_data, event);
*obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent};
if (dev_data->instance_data->disabled.destroy_event) return false;
bool skip = false;
if (*event_state) {
skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, "vkDestroyEvent", "VUID-vkDestroyEvent-event-01145");
}
return skip;
}
static void PreCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
InvalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
dev_data->eventMap.erase(event);
}
VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
EVENT_STATE *event_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
if (!skip) {
if (event != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
}
}
static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
VK_OBJECT *obj_struct) {
*qp_state = GetQueryPoolNode(dev_data, query_pool);
*obj_struct = {HandleToUint64(query_pool), kVulkanObjectTypeQueryPool};
if (dev_data->instance_data->disabled.destroy_query_pool) return false;
bool skip = false;
if (*qp_state) {
skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, "vkDestroyQueryPool",
"VUID-vkDestroyQueryPool-queryPool-00793");
}
return skip;
}
static void PreCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state,
VK_OBJECT obj_struct) {
InvalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
dev_data->queryPoolMap.erase(query_pool);
}
VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
QUERY_POOL_NODE *qp_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
if (!skip) {
if (queryPool != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
}
}
static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
uint32_t query_count, VkQueryResultFlags flags,
unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
bool skip = false;
auto query_pool_state = dev_data->queryPoolMap.find(query_pool);
if (query_pool_state != dev_data->queryPoolMap.end()) {
if ((query_pool_state->second.createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
"VUID-vkGetQueryPoolResults-queryType-00818",
"QueryPool 0x%" PRIx64
" was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.",
HandleToUint64(query_pool));
}
}
// TODO: clean this up, it's insanely wasteful.
for (auto cmd_buffer : dev_data->commandBufferMap) {
if (cmd_buffer.second->in_use.load()) {
for (auto query_state_pair : cmd_buffer.second->queryToStateMap) {
(*queries_in_flight)[query_state_pair.first].push_back(cmd_buffer.first);
}
}
}
if (dev_data->instance_data->disabled.get_query_pool_results) return false;
for (uint32_t i = 0; i < query_count; ++i) {
QueryObject query = {query_pool, first_query + i};
auto qif_pair = queries_in_flight->find(query);
auto query_state_pair = dev_data->queryToStateMap.find(query);
if (query_state_pair != dev_data->queryToStateMap.end()) {
// Available and in flight
if (qif_pair != queries_in_flight->end()) {
if (query_state_pair->second) {
for (auto cmd_buffer : qif_pair->second) {
auto cb = GetCBNode(dev_data, cmd_buffer);
auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, kVUID_Core_DrawState_InvalidQuery,
"Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
HandleToUint64(query_pool), first_query + i);
}
}
}
} else if (!query_state_pair->second) { // Unavailable and Not in flight
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
kVUID_Core_DrawState_InvalidQuery,
"Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
HandleToUint64(query_pool), first_query + i);
}
} else { // Uninitialized
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
kVUID_Core_DrawState_InvalidQuery,
"Cannot get query results on queryPool 0x%" PRIx64
" with index %d as data has not been collected for this index.",
HandleToUint64(query_pool), first_query + i);
}
}
return skip;
}
static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
uint32_t query_count,
unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
for (uint32_t i = 0; i < query_count; ++i) {
QueryObject query = {query_pool, first_query + i};
auto qif_pair = queries_in_flight->find(query);
auto query_state_pair = dev_data->queryToStateMap.find(query);
if (query_state_pair != dev_data->queryToStateMap.end()) {
// Available and in flight
if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
query_state_pair->second) {
for (auto cmd_buffer : qif_pair->second) {
auto cb = GetCBNode(dev_data, cmd_buffer);
auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
for (auto event : query_event_pair->second) {
dev_data->eventMap[event].needsSignaled = true;
}
}
}
}
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result =
dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
lock.lock();
PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
lock.unlock();
return result;
}
// Return true if given ranges intersect, else false
// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
// in an error so not checking that here
// pad_ranges bool indicates a linear and non-linear comparison which requires padding
// In the case where padding is required, if an alias is encountered then a validation error is reported and skip
// may be set by the callback function so caller should merge in skip value if padding case is possible.
// This check can be skipped by passing skip_checks=true, for call sites outside the validation path.
static bool RangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
bool skip_checks) {
*skip = false;
auto r1_start = range1->start;
auto r1_end = range1->end;
auto r2_start = range2->start;
auto r2_end = range2->end;
VkDeviceSize pad_align = 1;
if (range1->linear != range2->linear) {
pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
}
if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1))) return false;
if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1))) return false;
if (!skip_checks && (range1->linear != range2->linear)) {
// In linear vs. non-linear case, warn of aliasing
const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
const char *r1_type_str = range1->image ? "image" : "buffer";
const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
const char *r2_type_str = range2->image ? "image" : "buffer";
auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
*skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, kVUID_Core_MemTrack_InvalidAliasing,
"%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
" which may indicate a bug. For further info refer to the Buffer-Image Granularity section of the Vulkan "
"specification. "
"(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/xhtml/vkspec.html#resources-bufferimagegranularity)",
r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
}
// Ranges intersect
return true;
}
// Simplified RangesIntersect that calls above function to check range1 for intersection with offset & end addresses
bool RangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
// Create a local MEMORY_RANGE struct to wrap offset/size
MEMORY_RANGE range_wrap;
// Synch linear with range1 to avoid padding and potential validation error case
range_wrap.linear = range1->linear;
range_wrap.start = offset;
range_wrap.end = end;
bool tmp_bool;
return RangesIntersect(dev_data, range1, &range_wrap, &tmp_bool, true);
}
static bool ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
bool is_linear, const char *api_name) {
bool skip = false;
MEMORY_RANGE range;
range.image = is_image;
range.handle = handle;
range.linear = is_linear;
range.memory = mem_info->mem;
range.start = memoryOffset;
range.size = memRequirements.size;
range.end = memoryOffset + memRequirements.size - 1;
range.aliases.clear();
// Check for aliasing problems.
for (auto &obj_range_pair : mem_info->bound_ranges) {
auto check_range = &obj_range_pair.second;
bool intersection_error = false;
if (RangesIntersect(dev_data, &range, check_range, &intersection_error, false)) {
skip |= intersection_error;
range.aliases.insert(check_range);
}
}
if (memoryOffset >= mem_info->alloc_info.allocationSize) {
std::string error_code =
is_image ? "VUID-vkBindImageMemory-memoryOffset-01046" : "VUID-vkBindBufferMemory-memoryOffset-01031";
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_info->mem), error_code,
"In %s, attempting to bind memory (0x%" PRIx64 ") to object (0x%" PRIx64 "), memoryOffset=0x%" PRIxLEAST64
" must be less than the memory allocation size 0x%" PRIxLEAST64 ".",
api_name, HandleToUint64(mem_info->mem), HandleToUint64(handle), memoryOffset,
mem_info->alloc_info.allocationSize);
}
return skip;
}
// Object with given handle is being bound to memory w/ given mem_info struct.
// Track the newly bound memory range with given memoryOffset
// Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
// and non-linear range incorrectly overlap.
// Return true if an error is flagged and the user callback returns "true", otherwise false
// is_image indicates an image object, otherwise handle is for a buffer
// is_linear indicates a buffer or linear image
static void InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
MEMORY_RANGE range;
range.image = is_image;
range.handle = handle;
range.linear = is_linear;
range.memory = mem_info->mem;
range.start = memoryOffset;
range.size = memRequirements.size;
range.end = memoryOffset + memRequirements.size - 1;
range.aliases.clear();
// Update Memory aliasing
// Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
// inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
for (auto &obj_range_pair : mem_info->bound_ranges) {
auto check_range = &obj_range_pair.second;
bool intersection_error = false;
if (RangesIntersect(dev_data, &range, check_range, &intersection_error, true)) {
range.aliases.insert(check_range);
tmp_alias_ranges.insert(check_range);
}
}
mem_info->bound_ranges[handle] = std::move(range);
for (auto tmp_range : tmp_alias_ranges) {
tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
}
if (is_image)
mem_info->bound_images.insert(handle);
else
mem_info->bound_buffers.insert(handle);
}
static bool ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
const char *api_name) {
return ValidateInsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name);
}
static void InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
VkMemoryRequirements mem_reqs, bool is_linear) {
InsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear);
}
static bool ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
return ValidateInsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name);
}
static void InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
VkMemoryRequirements mem_reqs) {
InsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true);
}
// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
// is_image indicates if handle is for image or buffer
// This function will also remove the handle-to-index mapping from the appropriate
// map and clean up any aliases for range being removed.
static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
auto erase_range = &mem_info->bound_ranges[handle];
for (auto alias_range : erase_range->aliases) {
alias_range->aliases.erase(erase_range);
}
erase_range->aliases.clear();
mem_info->bound_ranges.erase(handle);
if (is_image) {
mem_info->bound_images.erase(handle);
} else {
mem_info->bound_buffers.erase(handle);
}
}
void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
BUFFER_STATE *buffer_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
if (!skip) {
if (buffer != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
}
}
VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Common data objects used pre & post call
BUFFER_VIEW_STATE *buffer_view_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
// Validate state before calling down chain, update common data if we'll be calling down chain
bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
if (!skip) {
if (bufferView != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
}
}
VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
IMAGE_STATE *image_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
if (!skip) {
if (image != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
}
}
static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
const char *funcName, std::string msgCode) {
bool skip = false;
if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_info->mem), msgCode,
"%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
"type (0x%X) of this memory object 0x%" PRIx64 ".",
funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, HandleToUint64(mem_info->mem));
}
return skip;
}
static bool PreCallValidateBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
VkDeviceSize memoryOffset, const char *api_name) {
bool skip = false;
if (buffer_state) {
unique_lock_t lock(global_lock);
// Track objects tied to memory
uint64_t buffer_handle = HandleToUint64(buffer);
skip = ValidateSetMemBinding(dev_data, mem, buffer_handle, kVulkanObjectTypeBuffer, api_name);
if (!buffer_state->memory_requirements_checked) {
// There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
// BindBufferMemory, but it's implied in that memory being bound must conform with VkMemoryRequirements from
// vkGetBufferMemoryRequirements()
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
buffer_handle, kVUID_Core_DrawState_InvalidBuffer,
"%s: Binding memory to buffer 0x%" PRIx64
" but vkGetBufferMemoryRequirements() has not been called on that buffer.",
api_name, HandleToUint64(buffer_handle));
// Make the call for them so we can verify the state
lock.unlock();
dev_data->dispatch_table.GetBufferMemoryRequirements(dev_data->device, buffer, &buffer_state->requirements);
lock.lock();
}
// Validate bound memory range information
const auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
skip |= ValidateInsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements, api_name);
skip |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, api_name,
"VUID-vkBindBufferMemory-memory-01035");
}
// Validate memory requirements alignment
if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
buffer_handle, "VUID-vkBindBufferMemory-memoryOffset-01036",
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, memoryOffset, buffer_state->requirements.alignment);
}
if (mem_info) {
// Validate memory requirements size
if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
buffer_handle, "VUID-vkBindBufferMemory-size-01037",
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size);
}
// Validate dedicated allocation
if (mem_info->is_dedicated && ((mem_info->dedicated_buffer != buffer) || (memoryOffset != 0))) {
// TODO: Add vkBindBufferMemory2KHR error message when added to spec.
auto validation_error = kVUIDUndefined;
if (strcmp(api_name, "vkBindBufferMemory()") == 0) {
validation_error = "VUID-vkBindBufferMemory-memory-01508";
}
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
buffer_handle, validation_error,
"%s: for dedicated memory allocation 0x%" PRIxLEAST64
", VkMemoryDedicatedAllocateInfoKHR::buffer 0x%" PRIXLEAST64 " must be equal to buffer 0x%" PRIxLEAST64
" and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
api_name, HandleToUint64(mem), HandleToUint64(mem_info->dedicated_buffer), buffer_handle, memoryOffset);
}
}
}
return skip;
}
static void PostCallRecordBindBufferMemory(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VkDeviceMemory mem,
VkDeviceSize memoryOffset, const char *api_name) {
if (buffer_state) {
unique_lock_t lock(global_lock);
// Track bound memory range information
auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
}
// Track objects tied to memory
uint64_t buffer_handle = HandleToUint64(buffer);
SetMemBinding(dev_data, mem, buffer_state, memoryOffset, buffer_handle, kVulkanObjectTypeBuffer, api_name);
}
}
VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
BUFFER_STATE *buffer_state;
{
unique_lock_t lock(global_lock);
buffer_state = GetBufferState(dev_data, buffer);
}
bool skip = PreCallValidateBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset, "vkBindBufferMemory()");
if (!skip) {
result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
if (result == VK_SUCCESS) {
PostCallRecordBindBufferMemory(dev_data, buffer, buffer_state, mem, memoryOffset, "vkBindBufferMemory()");
}
}
return result;
}
static bool PreCallValidateBindBufferMemory2(layer_data *dev_data, std::vector<BUFFER_STATE *> *buffer_state,
uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
{
unique_lock_t lock(global_lock);
for (uint32_t i = 0; i < bindInfoCount; i++) {
(*buffer_state)[i] = GetBufferState(dev_data, pBindInfos[i].buffer);
}
}
bool skip = false;
char api_name[64];
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i);
skip |= PreCallValidateBindBufferMemory(dev_data, pBindInfos[i].buffer, (*buffer_state)[i], pBindInfos[i].memory,
pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
static void PostCallRecordBindBufferMemory2(layer_data *dev_data, const std::vector<BUFFER_STATE *> &buffer_state,
uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
for (uint32_t i = 0; i < bindInfoCount; i++) {
PostCallRecordBindBufferMemory(dev_data, pBindInfos[i].buffer, buffer_state[i], pBindInfos[i].memory,
pBindInfos[i].memoryOffset, "vkBindBufferMemory2()");
}
}
VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
std::vector<BUFFER_STATE *> buffer_state(bindInfoCount);
if (!PreCallValidateBindBufferMemory2(dev_data, &buffer_state, bindInfoCount, pBindInfos)) {
result = dev_data->dispatch_table.BindBufferMemory2(device, bindInfoCount, pBindInfos);
if (result == VK_SUCCESS) {
PostCallRecordBindBufferMemory2(dev_data, buffer_state, bindInfoCount, pBindInfos);
}
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
std::vector<BUFFER_STATE *> buffer_state(bindInfoCount);
if (!PreCallValidateBindBufferMemory2(dev_data, &buffer_state, bindInfoCount, pBindInfos)) {
result = dev_data->dispatch_table.BindBufferMemory2KHR(device, bindInfoCount, pBindInfos);
if (result == VK_SUCCESS) {
PostCallRecordBindBufferMemory2(dev_data, buffer_state, bindInfoCount, pBindInfos);
}
}
return result;
}
static void PostCallRecordGetBufferMemoryRequirements(layer_data *dev_data, VkBuffer buffer,
VkMemoryRequirements *pMemoryRequirements) {
BUFFER_STATE *buffer_state;
{
unique_lock_t lock(global_lock);
buffer_state = GetBufferState(dev_data, buffer);
}
if (buffer_state) {
buffer_state->requirements = *pMemoryRequirements;
buffer_state->memory_requirements_checked = true;
}
}
VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
VkMemoryRequirements *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
PostCallRecordGetBufferMemoryRequirements(dev_data, buffer, pMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
VkMemoryRequirements2KHR *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements);
PostCallRecordGetBufferMemoryRequirements(dev_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
VkMemoryRequirements2KHR *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
PostCallRecordGetBufferMemoryRequirements(dev_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
}
static void PostCallRecordGetImageMemoryRequirements(layer_data *dev_data, VkImage image,
VkMemoryRequirements *pMemoryRequirements) {
IMAGE_STATE *image_state;
{
unique_lock_t lock(global_lock);
image_state = GetImageState(dev_data, image);
}
if (image_state) {
image_state->requirements = *pMemoryRequirements;
image_state->memory_requirements_checked = true;
}
}
VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
PostCallRecordGetImageMemoryRequirements(dev_data, image, pMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2KHR *pInfo,
VkMemoryRequirements2KHR *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageMemoryRequirements2(device, pInfo, pMemoryRequirements);
PostCallRecordGetImageMemoryRequirements(dev_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2KHR *pInfo,
VkMemoryRequirements2KHR *pMemoryRequirements) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
PostCallRecordGetImageMemoryRequirements(dev_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
}
static void PostCallRecordGetImageSparseMemoryRequirements(IMAGE_STATE *image_state, uint32_t req_count,
VkSparseImageMemoryRequirements *reqs) {
image_state->get_sparse_reqs_called = true;
image_state->sparse_requirements.resize(req_count);
if (reqs) {
std::copy(reqs, reqs + req_count, image_state->sparse_requirements.begin());
}
for (const auto &req : image_state->sparse_requirements) {
if (req.formatProperties.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
image_state->sparse_metadata_required = true;
}
}
}
VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements *pSparseMemoryRequirements) {
// TODO : Implement tracking here, just passthrough initially
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount,
pSparseMemoryRequirements);
unique_lock_t lock(global_lock);
auto image_state = GetImageState(dev_data, image);
PostCallRecordGetImageSparseMemoryRequirements(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
}
static void PostCallRecordGetImageSparseMemoryRequirements2(IMAGE_STATE *image_state, uint32_t req_count,
VkSparseImageMemoryRequirements2KHR *reqs) {
// reqs is empty, so there is nothing to loop over and read.
if (reqs == nullptr) {
return;
}
std::vector<VkSparseImageMemoryRequirements> sparse_reqs(req_count);
// Migrate to old struct type for common handling with GetImageSparseMemoryRequirements()
for (uint32_t i = 0; i < req_count; ++i) {
assert(!reqs[i].pNext); // TODO: If an extension is ever added here we need to handle it
sparse_reqs[i] = reqs[i].memoryRequirements;
}
PostCallRecordGetImageSparseMemoryRequirements(image_state, req_count, sparse_reqs.data());
}
VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
// TODO : Implement tracking here, just passthrough initially
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount,
pSparseMemoryRequirements);
unique_lock_t lock(global_lock);
auto image_state = GetImageState(dev_data, pInfo->image);
PostCallRecordGetImageSparseMemoryRequirements2(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2KHR(VkDevice device,
const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
uint32_t *pSparseMemoryRequirementCount,
VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
// TODO : Implement tracking here, just passthrough initially
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
dev_data->dispatch_table.GetImageSparseMemoryRequirements2KHR(device, pInfo, pSparseMemoryRequirementCount,
pSparseMemoryRequirements);
unique_lock_t lock(global_lock);
auto image_state = GetImageState(dev_data, pInfo->image);
PostCallRecordGetImageSparseMemoryRequirements2(image_state, *pSparseMemoryRequirementCount, pSparseMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format,
VkImageType type, VkSampleCountFlagBits samples,
VkImageUsageFlags usage, VkImageTiling tiling,
uint32_t *pPropertyCount,
VkSparseImageFormatProperties *pProperties) {
// TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling,
pPropertyCount, pProperties);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2(
VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
VkSparseImageFormatProperties2KHR *pProperties) {
// TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties2(physicalDevice, pFormatInfo, pPropertyCount,
pProperties);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2KHR(
VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2KHR *pFormatInfo, uint32_t *pPropertyCount,
VkSparseImageFormatProperties2KHR *pProperties) {
// TODO : Implement this intercept, track sparse image format properties and make sure they are obeyed.
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
instance_data->dispatch_table.GetPhysicalDeviceSparseImageFormatProperties2KHR(physicalDevice, pFormatInfo, pPropertyCount,
pProperties);
}
VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Common data objects used pre & post call
IMAGE_VIEW_STATE *image_view_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
if (!skip) {
if (imageView != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
}
}
static void PreCallRecordDestroyShaderModule(layer_data *dev_data, VkShaderModule shaderModule) {
dev_data->shaderModuleMap.erase(shaderModule);
}
VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyShaderModule(dev_data, shaderModule);
lock.unlock();
dev_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
}
static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
VK_OBJECT *obj_struct) {
*pipeline_state = GetPipelineState(dev_data, pipeline);
*obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline};
if (dev_data->instance_data->disabled.destroy_pipeline) return false;
bool skip = false;
if (*pipeline_state) {
skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, "vkDestroyPipeline",
"VUID-vkDestroyPipeline-pipeline-00765");
}
return skip;
}
static void PreCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
VK_OBJECT obj_struct) {
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
dev_data->pipelineMap.erase(pipeline);
}
VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
PIPELINE_STATE *pipeline_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
if (!skip) {
if (pipeline != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
}
}
static void PreCallRecordDestroyPipelineLayout(layer_data *dev_data, VkPipelineLayout pipelineLayout) {
dev_data->pipelineLayoutMap.erase(pipelineLayout);
}
VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyPipelineLayout(dev_data, pipelineLayout);
lock.unlock();
dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
}
static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
VK_OBJECT *obj_struct) {
*sampler_state = GetSamplerState(dev_data, sampler);
*obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler};
if (dev_data->instance_data->disabled.destroy_sampler) return false;
bool skip = false;
if (*sampler_state) {
skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, "vkDestroySampler",
"VUID-vkDestroySampler-sampler-01082");
}
return skip;
}
static void PreCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
VK_OBJECT obj_struct) {
// Any bound cmd buffers are now invalid
if (sampler_state) InvalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
dev_data->samplerMap.erase(sampler);
}
VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
SAMPLER_STATE *sampler_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
if (!skip) {
if (sampler != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
}
}
static void PreCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
auto layout_it = dev_data->descriptorSetLayoutMap.find(ds_layout);
if (layout_it != dev_data->descriptorSetLayoutMap.end()) {
layout_it->second.get()->MarkDestroyed();
dev_data->descriptorSetLayoutMap.erase(layout_it);
}
}
VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
{
lock_guard_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
}
dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
}
static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
*desc_pool_state = GetDescriptorPoolState(dev_data, pool);
*obj_struct = {HandleToUint64(pool), kVulkanObjectTypeDescriptorPool};
if (dev_data->instance_data->disabled.destroy_descriptor_pool) return false;
bool skip = false;
if (*desc_pool_state) {
skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, "vkDestroyDescriptorPool",
"VUID-vkDestroyDescriptorPool-descriptorPool-00303");
}
return skip;
}
static void PreCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
if (desc_pool_state) {
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
// Free sets that were in this pool
for (auto ds : desc_pool_state->sets) {
FreeDescriptorSet(dev_data, ds);
}
dev_data->descriptorPoolMap.erase(descriptorPool);
delete desc_pool_state;
}
}
VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
if (!skip) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
lock.unlock();
dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
}
}
// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
// If this is a secondary command buffer, then make sure its primary is also in-flight
// If primary is not in-flight, then remove secondary from global in-flight set
// This function is only valid at a point when cmdBuffer is being reset or freed
static bool CheckCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
std::string error_code) {
bool skip = false;
if (cb_node->in_use.load()) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), error_code,
"Attempt to %s command buffer (0x%" PRIx64 ") which is in use.", action,
HandleToUint64(cb_node->commandBuffer));
}
return skip;
}
// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
static bool CheckCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
std::string error_code) {
bool skip = false;
for (auto cmd_buffer : pPool->commandBuffers) {
skip |= CheckCommandBufferInFlight(dev_data, GetCBNode(dev_data, cmd_buffer), action, error_code);
}
return skip;
}
// Free all command buffers in given list, removing all references/links to them using ResetCommandBufferState
static void FreeCommandBufferStates(layer_data *dev_data, COMMAND_POOL_NODE *pool_state, const uint32_t command_buffer_count,
const VkCommandBuffer *command_buffers) {
for (uint32_t i = 0; i < command_buffer_count; i++) {
auto cb_state = GetCBNode(dev_data, command_buffers[i]);
// Remove references to command buffer's state and delete
if (cb_state) {
// reset prior to delete, removing various references to it.
// TODO: fix this, it's insane.
ResetCommandBufferState(dev_data, cb_state->commandBuffer);
// Remove the cb_state's references from layer_data and COMMAND_POOL_NODE
dev_data->commandBufferMap.erase(cb_state->commandBuffer);
pool_state->commandBuffers.erase(command_buffers[i]);
delete cb_state;
}
}
}
static bool PreCallValidateFreeCommandBuffers(layer_data *dev_data, uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers) {
bool skip = false;
for (uint32_t i = 0; i < commandBufferCount; i++) {
auto cb_node = GetCBNode(dev_data, pCommandBuffers[i]);
// Delete CB information structure, and remove from commandBufferMap
if (cb_node) {
skip |= CheckCommandBufferInFlight(dev_data, cb_node, "free", "VUID-vkFreeCommandBuffers-pCommandBuffers-00047");
}
}
return skip;
}
static void PreCallRecordFreeCommandBuffers(layer_data *dev_data, VkCommandPool commandPool, uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers) {
auto pPool = GetCommandPoolNode(dev_data, commandPool);
FreeCommandBufferStates(dev_data, pPool, commandBufferCount, pCommandBuffers);
}
VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateFreeCommandBuffers(dev_data, commandBufferCount, pCommandBuffers);
if (skip) return;
PreCallRecordFreeCommandBuffers(dev_data, commandPool, commandBufferCount, pCommandBuffers);
lock.unlock();
dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
}
static void PostCallRecordCreateCommandPool(layer_data *dev_data, const VkCommandPoolCreateInfo *pCreateInfo,
VkCommandPool *pCommandPool) {
dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
if (VK_SUCCESS == result) {
lock_guard_t lock(global_lock);
PostCallRecordCreateCommandPool(dev_data, pCreateInfo, pCommandPool);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
if (!dev_data->enabled_features.core.pipelineStatisticsQuery) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
"VUID-VkQueryPoolCreateInfo-queryType-00791",
"Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with "
"VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE.");
}
}
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
if (!skip) {
result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
}
if (result == VK_SUCCESS) {
lock_guard_t lock(global_lock);
QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
qp_node->createInfo = *pCreateInfo;
}
return result;
}
static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool) {
COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(dev_data, pool);
if (dev_data->instance_data->disabled.destroy_command_pool) return false;
bool skip = false;
if (cp_state) {
// Verify that command buffers in pool are complete (not in-flight)
skip |= CheckCommandBuffersInFlight(dev_data, cp_state, "destroy command pool with",
"VUID-vkDestroyCommandPool-commandPool-00041");
}
return skip;
}
static void PreCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool) {
COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(dev_data, pool);
// Remove cmdpool from cmdpoolmap, after freeing layer data for the command buffers
// "When a pool is destroyed, all command buffers allocated from the pool are freed."
if (cp_state) {
// Create a vector, as FreeCommandBufferStates deletes from cp_state->commandBuffers during iteration.
std::vector<VkCommandBuffer> cb_vec{cp_state->commandBuffers.begin(), cp_state->commandBuffers.end()};
FreeCommandBufferStates(dev_data, cp_state, static_cast<uint32_t>(cb_vec.size()), cb_vec.data());
dev_data->commandPoolMap.erase(pool);
}
}
// Destroy commandPool along with all of the commandBuffers allocated from that pool
VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool);
if (!skip) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyCommandPool(dev_data, commandPool);
lock.unlock();
dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
}
}
static bool PreCallValidateResetCommandPool(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
return CheckCommandBuffersInFlight(dev_data, pPool, "reset command pool with", "VUID-vkResetCommandPool-commandPool-00040");
}
static void PostCallRecordResetCommandPool(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
for (auto cmdBuffer : pPool->commandBuffers) {
ResetCommandBufferState(dev_data, cmdBuffer);
}
}
VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
auto pPool = GetCommandPoolNode(dev_data, commandPool);
bool skip = PreCallValidateResetCommandPool(dev_data, pPool);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
// Reset all of the CBs allocated from this pool
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordResetCommandPool(dev_data, pPool);
lock.unlock();
}
return result;
}
static bool PreCallValidateResetFences(layer_data *dev_data, uint32_t fenceCount, const VkFence *pFences) {
bool skip = false;
for (uint32_t i = 0; i < fenceCount; ++i) {
auto pFence = GetFenceNode(dev_data, pFences[i]);
if (pFence && pFence->scope == kSyncScopeInternal && pFence->state == FENCE_INFLIGHT) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(pFences[i]), "VUID-vkResetFences-pFences-01123", "Fence 0x%" PRIx64 " is in use.",
HandleToUint64(pFences[i]));
}
}
return skip;
}
static void PostCallRecordResetFences(layer_data *dev_data, uint32_t fenceCount, const VkFence *pFences) {
for (uint32_t i = 0; i < fenceCount; ++i) {
auto pFence = GetFenceNode(dev_data, pFences[i]);
if (pFence) {
if (pFence->scope == kSyncScopeInternal) {
pFence->state = FENCE_UNSIGNALED;
} else if (pFence->scope == kSyncScopeExternalTemporary) {
pFence->scope = kSyncScopeInternal;
}
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateResetFences(dev_data, fenceCount, pFences);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
if (result == VK_SUCCESS) {
lock.lock();
PostCallRecordResetFences(dev_data, fenceCount, pFences);
lock.unlock();
}
return result;
}
// For given cb_nodes, invalidate them and track object causing invalidation
void InvalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
for (auto cb_node : cb_nodes) {
if (cb_node->state == CB_RECORDING) {
log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_InvalidCommandBuffer,
"Invalidating a command buffer that's currently being recorded: 0x%" PRIx64 ".",
HandleToUint64(cb_node->commandBuffer));
cb_node->state = CB_INVALID_INCOMPLETE;
} else if (cb_node->state == CB_RECORDED) {
cb_node->state = CB_INVALID_COMPLETE;
}
cb_node->broken_bindings.push_back(obj);
// if secondary, then propagate the invalidation to the primaries that will call us.
if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
InvalidateCommandBuffers(dev_data, cb_node->linkedCommandBuffers, obj);
}
}
}
static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
*framebuffer_state = GetFramebufferState(dev_data, framebuffer);
*obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer};
if (dev_data->instance_data->disabled.destroy_framebuffer) return false;
bool skip = false;
if (*framebuffer_state) {
skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, "vkDestroyFramebuffer",
"VUID-vkDestroyFramebuffer-framebuffer-00892");
}
return skip;
}
static void PreCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
VK_OBJECT obj_struct) {
InvalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
dev_data->frameBufferMap.erase(framebuffer);
}
VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
FRAMEBUFFER_STATE *framebuffer_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
if (!skip) {
if (framebuffer != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
}
}
static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
VK_OBJECT *obj_struct) {
*rp_state = GetRenderPassState(dev_data, render_pass);
*obj_struct = {HandleToUint64(render_pass), kVulkanObjectTypeRenderPass};
if (dev_data->instance_data->disabled.destroy_renderpass) return false;
bool skip = false;
if (*rp_state) {
skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, "vkDestroyRenderPass",
"VUID-vkDestroyRenderPass-renderPass-00873");
}
return skip;
}
static void PreCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
VK_OBJECT obj_struct) {
InvalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
dev_data->renderPassMap.erase(render_pass);
}
VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
RENDER_PASS_STATE *rp_state = nullptr;
VK_OBJECT obj_struct;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
if (!skip) {
if (renderPass != VK_NULL_HANDLE) {
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
}
lock.unlock();
dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateBuffer(dev_data, pCreateInfo);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateBuffer(dev_data, pCreateInfo, pBuffer);
lock.unlock();
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateBufferView(dev_data, pCreateInfo, pView);
lock.unlock();
}
return result;
}
// Access helper functions for external modules
VkFormatProperties GetFormatProperties(const core_validation::layer_data *device_data, const VkFormat format) {
VkFormatProperties format_properties;
instance_layer_data *instance_data =
GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &format_properties);
return format_properties;
}
VkResult GetImageFormatProperties(core_validation::layer_data *device_data, const VkImageCreateInfo *image_ci,
VkImageFormatProperties *pImageFormatProperties) {
instance_layer_data *instance_data =
GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
return instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties(
device_data->physical_device, image_ci->format, image_ci->imageType, image_ci->tiling, image_ci->usage, image_ci->flags,
pImageFormatProperties);
}
const debug_report_data *GetReportData(const core_validation::layer_data *device_data) { return device_data->report_data; }
const VkPhysicalDeviceProperties *GetPhysicalDeviceProperties(const core_validation::layer_data *device_data) {
return &device_data->phys_dev_props;
}
const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
return &device_data->imageMap;
}
std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
return &device_data->imageSubresourceMap;
}
std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
return &device_data->imageLayoutMap;
}
std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *device_data) {
return &device_data->imageLayoutMap;
}
std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data) {
return &device_data->bufferMap;
}
std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data) {
return &device_data->bufferViewMap;
}
std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data) {
return &device_data->imageViewMap;
}
const PHYS_DEV_PROPERTIES_NODE *GetPhysDevProperties(const layer_data *device_data) { return &device_data->phys_dev_properties; }
const DeviceFeatures *GetEnabledFeatures(const layer_data *device_data) { return &device_data->enabled_features; }
const DeviceExtensions *GetDeviceExtensions(const layer_data *device_data) { return &device_data->extensions; }
uint32_t GetApiVersion(const layer_data *device_data) { return device_data->api_version; }
VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = PreCallValidateCreateImage(dev_data, pCreateInfo, pAllocator, pImage);
if (!skip) {
result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
}
if (VK_SUCCESS == result) {
lock_guard_t lock(global_lock);
PostCallRecordCreateImage(dev_data, pCreateInfo, pImage);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
lock.unlock();
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
if (VK_SUCCESS == result) {
lock_guard_t lock(global_lock);
auto &fence_node = dev_data->fenceMap[*pFence];
fence_node.fence = *pFence;
fence_node.createInfo = *pCreateInfo;
fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
}
return result;
}
// TODO handle pipeline caches
VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Pre-record to avoid Destroy/Create race (if/when implemented)
dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
}
VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize,
void *pData) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
const VkPipelineCache *pSrcCaches) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
return result;
}
// Validation cache:
// CV is the bottommost implementor of this extension. Don't pass calls down.
VKAPI_ATTR VkResult VKAPI_CALL CreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkValidationCacheEXT *pValidationCache) {
*pValidationCache = ValidationCache::Create(pCreateInfo);
return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
}
VKAPI_ATTR void VKAPI_CALL DestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
const VkAllocationCallbacks *pAllocator) {
delete (ValidationCache *)validationCache;
}
VKAPI_ATTR VkResult VKAPI_CALL GetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
void *pData) {
size_t inSize = *pDataSize;
((ValidationCache *)validationCache)->Write(pDataSize, pData);
return (pData && *pDataSize != inSize) ? VK_INCOMPLETE : VK_SUCCESS;
}
VKAPI_ATTR VkResult VKAPI_CALL MergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
const VkValidationCacheEXT *pSrcCaches) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
auto dst = (ValidationCache *)dstCache;
auto src = (ValidationCache const *const *)pSrcCaches;
VkResult result = VK_SUCCESS;
for (uint32_t i = 0; i < srcCacheCount; i++) {
if (src[i] == dst) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT,
0, "VUID-vkMergeValidationCachesEXT-dstCache-01536",
"vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.",
HandleToUint64(dstCache));
result = VK_ERROR_VALIDATION_FAILED_EXT;
}
if (!skip) {
dst->Merge(src[i]);
}
}
return result;
}
// utility function to set collective state for pipeline
void SetPipelineState(PIPELINE_STATE *pPipe) {
// If any attachment used by this pipeline has blendEnable, set top-level blendEnable
if (pPipe->graphicsPipelineCI.pColorBlendState) {
for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
if (VK_TRUE == pPipe->attachments[i].blendEnable) {
if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
(pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
(pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
(pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
(pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
pPipe->blendConstantsEnabled = true;
}
}
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
// The order of operations here is a little convoluted but gets the job done
// 1. Pipeline create state is first shadowed into PIPELINE_STATE struct
// 2. Create state is then validated (which uses flags setup during shadowing)
// 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
bool skip = false;
vector<std::unique_ptr<PIPELINE_STATE>> pipe_state;
pipe_state.reserve(count);
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
uint32_t i = 0;
unique_lock_t lock(global_lock);
for (i = 0; i < count; i++) {
pipe_state.push_back(std::unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i], GetRenderPassStateSharedPtr(dev_data, pCreateInfos[i].renderPass));
pipe_state[i]->pipeline_layout = *GetPipelineLayout(dev_data, pCreateInfos[i].layout);
}
for (i = 0; i < count; i++) {
skip |= ValidatePipelineLocked(dev_data, pipe_state, i);
}
lock.unlock();
for (i = 0; i < count; i++) {
skip |= ValidatePipelineUnlocked(dev_data, pipe_state, i);
}
if (skip) {
for (i = 0; i < count; i++) {
pPipelines[i] = VK_NULL_HANDLE;
}
return VK_ERROR_VALIDATION_FAILED_EXT;
}
auto result =
dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
lock.lock();
for (i = 0; i < count; i++) {
if (pPipelines[i] != VK_NULL_HANDLE) {
pipe_state[i]->pipeline = pPipelines[i];
dev_data->pipelineMap[pPipelines[i]] = std::move(pipe_state[i]);
}
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
bool skip = false;
vector<std::unique_ptr<PIPELINE_STATE>> pPipeState;
pPipeState.reserve(count);
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
uint32_t i = 0;
unique_lock_t lock(global_lock);
for (i = 0; i < count; i++) {
// Create and initialize internal tracking data structure
pPipeState.push_back(unique_ptr<PIPELINE_STATE>(new PIPELINE_STATE));
pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
pPipeState[i]->pipeline_layout = *GetPipelineLayout(dev_data, pCreateInfos[i].layout);
// TODO: Add Compute Pipeline Verification
skip |= ValidateComputePipeline(dev_data, pPipeState[i].get());
}
if (skip) {
for (i = 0; i < count; i++) {
pPipelines[i] = VK_NULL_HANDLE;
}
return VK_ERROR_VALIDATION_FAILED_EXT;
}
lock.unlock();
auto result =
dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
lock.lock();
for (i = 0; i < count; i++) {
if (pPipelines[i] != VK_NULL_HANDLE) {
pPipeState[i]->pipeline = pPipelines[i];
dev_data->pipelineMap[pPipelines[i]] = std::move(pPipeState[i]);
}
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
if (VK_SUCCESS == result) {
lock_guard_t lock(global_lock);
dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
}
return result;
}
static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
if (dev_data->instance_data->disabled.create_descriptor_set_layout) return false;
return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(
dev_data->report_data, create_info, dev_data->extensions.vk_khr_push_descriptor,
dev_data->phys_dev_ext_props.max_push_descriptors, dev_data->extensions.vk_ext_descriptor_indexing,
&dev_data->enabled_features.descriptor_indexing);
}
static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
VkDescriptorSetLayout set_layout) {
dev_data->descriptorSetLayoutMap[set_layout] = std::make_shared<cvdescriptorset::DescriptorSetLayout>(create_info, set_layout);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
if (!skip) {
lock.unlock();
result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
}
}
return result;
}
// Used by CreatePipelineLayout and CmdPushConstants.
// Note that the index argument is optional and only used by CreatePipelineLayout.
static bool ValidatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
const char *caller_name, uint32_t index = 0) {
if (dev_data->instance_data->disabled.push_constant_range) return false;
uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
bool skip = false;
// Check that offset + size don't exceed the max.
// Prevent arithetic overflow here by avoiding addition and testing in this order.
if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
// This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
if (offset >= maxPushConstantsSize) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-offset-00294",
"%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
caller_name, index, offset, maxPushConstantsSize);
}
if (size > maxPushConstantsSize - offset) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-size-00298",
"%s call has push constants index %u with offset %u and size %u that exceeds this device's "
"maxPushConstantSize of %u.",
caller_name, index, offset, size, maxPushConstantsSize);
}
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
if (offset >= maxPushConstantsSize) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-offset-00370",
"%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
caller_name, index, offset, maxPushConstantsSize);
}
if (size > maxPushConstantsSize - offset) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-size-00371",
"%s call has push constants index %u with offset %u and size %u that exceeds this device's "
"maxPushConstantSize of %u.",
caller_name, index, offset, size, maxPushConstantsSize);
}
} else {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
// size needs to be non-zero and a multiple of 4.
if ((size == 0) || ((size & 0x3) != 0)) {
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
if (size == 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-size-00296",
"%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
index, size);
}
if (size & 0x3) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-size-00297",
"%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
index, size);
}
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
if (size == 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-size-arraylength",
"%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
index, size);
}
if (size & 0x3) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-size-00369",
"%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
index, size);
}
} else {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
// offset needs to be a multiple of 4.
if ((offset & 0x3) != 0) {
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-offset-00295",
"%s call has push constants index %u with offset %u. Offset must be a multiple of 4.", caller_name,
index, offset);
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-vkCmdPushConstants-offset-00368",
"%s call has push constants with offset %u. Offset must be a multiple of 4.", caller_name, offset);
} else {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
return skip;
}
enum DSL_DESCRIPTOR_GROUPS {
DSL_TYPE_SAMPLERS = 0,
DSL_TYPE_UNIFORM_BUFFERS,
DSL_TYPE_STORAGE_BUFFERS,
DSL_TYPE_SAMPLED_IMAGES,
DSL_TYPE_STORAGE_IMAGES,
DSL_TYPE_INPUT_ATTACHMENTS,
DSL_NUM_DESCRIPTOR_GROUPS
};
// Used by PreCallValiateCreatePipelineLayout.
// Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage
std::valarray<uint32_t> GetDescriptorCountMaxPerStage(
const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts,
bool skip_update_after_bind) {
// Identify active pipeline stages
std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT,
VK_SHADER_STAGE_COMPUTE_BIT};
if (dev_data->enabled_features.core.geometryShader) {
stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT);
}
if (dev_data->enabled_features.core.tessellationShader) {
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
}
// Allow iteration over enum values
std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = {DSL_TYPE_SAMPLERS, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS,
DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS};
// Sum by layouts per stage, then pick max of stages per type
std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages
for (auto stage : stage_flags) {
std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums
for (auto dsl : set_layouts) {
if (skip_update_after_bind &&
(dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
if (0 != (stage & binding->stageFlags)) {
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount;
break;
default:
break;
}
}
}
}
for (auto type : dsl_groups) {
max_sum[type] = std::max(stage_sum[type], max_sum[type]);
}
}
return max_sum;
}
// Used by PreCallValidateCreatePipelineLayout.
// Returns an array of size VK_DESCRIPTOR_TYPE_RANGE_SIZE of the summed descriptors by type.
// Note: descriptors only count against the limit once even if used by multiple stages.
std::valarray<uint32_t> GetDescriptorSum(
const layer_data *dev_data, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts,
bool skip_update_after_bind) {
std::valarray<uint32_t> sum_by_type(0U, VK_DESCRIPTOR_TYPE_RANGE_SIZE);
for (auto dsl : set_layouts) {
if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
sum_by_type[binding->descriptorType] += binding->descriptorCount;
}
}
return sum_by_type;
}
static bool PreCallValiateCreatePipelineLayout(const layer_data *dev_data, const VkPipelineLayoutCreateInfo *pCreateInfo) {
bool skip = false;
// Validate layout count against device physical limit
if (pCreateInfo->setLayoutCount > dev_data->phys_dev_props.limits.maxBoundDescriptorSets) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286",
"vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d).",
pCreateInfo->setLayoutCount, dev_data->phys_dev_props.limits.maxBoundDescriptorSets);
}
// Validate Push Constant ranges
uint32_t i, j;
for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
skip |= ValidatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPushConstantRange-stageFlags-requiredbitmask",
"vkCreatePipelineLayout() call has no stageFlags set.");
}
}
// As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pPushConstantRanges-00292",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d.", i, j);
}
}
}
// Early-out
if (skip) return skip;
std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr);
unsigned int push_descriptor_set_count = 0;
{
unique_lock_t lock(global_lock); // Lock while accessing global state
for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count;
}
} // Unlock
if (push_descriptor_set_count > 1) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293",
"vkCreatePipelineLayout() Multiple push descriptor sets found.");
}
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(dev_data, set_layouts, true);
// Samplers
if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorSamplers) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287",
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorSamplers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLERS], dev_data->phys_dev_props.limits.maxPerStageDescriptorSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288",
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUniformBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS],
dev_data->phys_dev_props.limits.maxPerStageDescriptorUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289",
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS],
dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > dev_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290",
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorSampledImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES],
dev_data->phys_dev_props.limits.maxPerStageDescriptorSampledImages);
}
// Storage images
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291",
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES],
dev_data->phys_dev_props.limits.maxPerStageDescriptorStorageImages);
}
// Input attachments
if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] >
dev_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676",
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorInputAttachments limit (%d).",
max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS],
dev_data->phys_dev_props.limits.maxPerStageDescriptorInputAttachments);
}
// Total descriptors by type
//
std::valarray<uint32_t> sum_all_stages = GetDescriptorSum(dev_data, set_layouts, true);
// Samplers
uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > dev_data->phys_dev_props.limits.maxDescriptorSetSamplers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677",
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetSamplers limit (%d).",
sum, dev_data->phys_dev_props.limits.maxDescriptorSetSamplers);
}
// Uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678",
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679",
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
dev_data->phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680",
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681",
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
dev_data->phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > dev_data->phys_dev_props.limits.maxDescriptorSetSampledImages) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682",
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetSampledImages limit (%d).",
sum, dev_data->phys_dev_props.limits.maxDescriptorSetSampledImages);
}
// Storage images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > dev_data->phys_dev_props.limits.maxDescriptorSetStorageImages) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683",
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageImages limit (%d).",
sum, dev_data->phys_dev_props.limits.maxDescriptorSetStorageImages);
}
// Input attachments
if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > dev_data->phys_dev_props.limits.maxDescriptorSetInputAttachments) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684",
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetInputAttachments limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
dev_data->phys_dev_props.limits.maxDescriptorSetInputAttachments);
}
if (dev_data->extensions.vk_ext_descriptor_indexing) {
// XXX TODO: replace with correct VU messages
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind =
GetDescriptorCountMaxPerStage(dev_data, set_layouts, false);
// Samplers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03022",
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03023",
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03024",
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03025",
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindSampledImages);
}
// Storage images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03026",
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindStorageImages);
}
// Input attachments
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-descriptorType-03027",
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxPerStageDescriptorUpdateAfterBindInputAttachments);
}
// Total descriptors by type, summed across all pipeline stages
//
std::valarray<uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(dev_data, set_layouts, false);
// Samplers
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036",
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSamplers limit (%d).",
sum, dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSamplers);
}
// Uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037",
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038",
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039",
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040",
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041",
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSampledImages limit (%d).",
sum, dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindSampledImages);
}
// Storage images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042",
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageImages limit (%d).",
sum, dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindStorageImages);
}
// Input attachments
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] >
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043",
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
dev_data->phys_dev_ext_props.descriptor_indexing_props.maxDescriptorSetUpdateAfterBindInputAttachments);
}
}
return skip;
}
// For repeatable sorting, not very useful for "memory in range" search
struct PushConstantRangeCompare {
bool operator()(const VkPushConstantRange *lhs, const VkPushConstantRange *rhs) const {
if (lhs->offset == rhs->offset) {
if (lhs->size == rhs->size) {
// The comparison is arbitrary, but avoids false aliasing by comparing all fields.
return lhs->stageFlags < rhs->stageFlags;
}
// If the offsets are the same then sorting by the end of range is useful for validation
return lhs->size < rhs->size;
}
return lhs->offset < rhs->offset;
}
};
static PushConstantRangesDict push_constant_ranges_dict;
PushConstantRangesId GetCanonicalId(const VkPipelineLayoutCreateInfo *info) {
if (!info->pPushConstantRanges) {
// Hand back the empty entry (creating as needed)...
return push_constant_ranges_dict.look_up(PushConstantRanges());
}
// Sort the input ranges to ensure equivalent ranges map to the same id
std::set<const VkPushConstantRange *, PushConstantRangeCompare> sorted;
for (uint32_t i = 0; i < info->pushConstantRangeCount; i++) {
sorted.insert(info->pPushConstantRanges + i);
}
PushConstantRanges ranges(sorted.size());
for (const auto range : sorted) {
ranges.emplace_back(*range);
}
return push_constant_ranges_dict.look_up(std::move(ranges));
}
// Dictionary of canoncial form of the pipeline set layout of descriptor set layouts
static PipelineLayoutSetLayoutsDict pipeline_layout_set_layouts_dict;
// Dictionary of canonical form of the "compatible for set" records
static PipelineLayoutCompatDict pipeline_layout_compat_dict;
static PipelineLayoutCompatId GetCanonicalId(const uint32_t set_index, const PushConstantRangesId pcr_id,
const PipelineLayoutSetLayoutsId set_layouts_id) {
return pipeline_layout_compat_dict.look_up(PipelineLayoutCompatDef(set_index, pcr_id, set_layouts_id));
}
static void PostCallRecordCreatePipelineLayout(layer_data *dev_data, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkPipelineLayout *pPipelineLayout) {
unique_lock_t lock(global_lock); // Lock while accessing state
PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
plNode.layout = *pPipelineLayout;
plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
PipelineLayoutSetLayoutsDef set_layouts(pCreateInfo->setLayoutCount);
for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
plNode.set_layouts[i] = GetDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
set_layouts[i] = plNode.set_layouts[i]->GetLayoutId();
}
// Get canonical form IDs for the "compatible for set" contents
plNode.push_constant_ranges = GetCanonicalId(pCreateInfo);
auto set_layouts_id = pipeline_layout_set_layouts_dict.look_up(set_layouts);
plNode.compat_for_set.reserve(pCreateInfo->setLayoutCount);
// Create table of "compatible for set N" cannonical forms for trivial accept validation
for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) {
plNode.compat_for_set.emplace_back(GetCanonicalId(i, plNode.push_constant_ranges, set_layouts_id));
}
// Implicit unlock
};
VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = PreCallValiateCreatePipelineLayout(dev_data, pCreateInfo);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
if (VK_SUCCESS == result) {
PostCallRecordCreatePipelineLayout(dev_data, pCreateInfo, pPipelineLayout);
}
return result;
}
static bool PostCallValidateCreateDescriptorPool(layer_data *dev_data, VkDescriptorPool *pDescriptorPool) {
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
HandleToUint64(*pDescriptorPool), kVUID_Core_DrawState_OutOfMemory,
"Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()");
}
static void PostCallRecordCreateDescriptorPool(layer_data *dev_data, DESCRIPTOR_POOL_STATE *pNewNode,
VkDescriptorPool *pDescriptorPool) {
dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
if (VK_SUCCESS == result) {
DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
lock_guard_t lock(global_lock);
if (NULL == pNewNode) {
bool skip = PostCallValidateCreateDescriptorPool(dev_data, pDescriptorPool);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
} else {
PostCallRecordCreateDescriptorPool(dev_data, pNewNode, pDescriptorPool);
}
} else {
// Need to do anything if pool create fails?
}
return result;
}
// Validate that given pool does not store any descriptor sets used by an in-flight CmdBuffer
// pool stores the descriptor sets to be validated
// Return false if no errors occur
// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
static bool PreCallValidateResetDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool) {
if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
bool skip = false;
DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, descriptorPool);
if (pPool != nullptr) {
for (auto ds : pPool->sets) {
if (ds && ds->in_use.load()) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
HandleToUint64(descriptorPool), "VUID-vkResetDescriptorPool-descriptorPool-00313",
"It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer.");
if (skip) break;
}
}
}
return skip;
}
static void PostCallRecordResetDescriptorPool(layer_data *dev_data, VkDevice device, VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags) {
DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(dev_data, descriptorPool);
// TODO: validate flags
// For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
for (auto ds : pPool->sets) {
FreeDescriptorSet(dev_data, ds);
}
pPool->sets.clear();
// Reset available count for each type and available sets for this pool
for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
}
pPool->availableSets = pPool->maxSets;
}
VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Make sure sets being destroyed are not currently in-use
bool skip = PreCallValidateResetDescriptorPool(dev_data, descriptorPool);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordResetDescriptorPool(dev_data, device, descriptorPool, flags);
lock.unlock();
}
return result;
}
// Ensure the pool contains enough descriptors and descriptor sets to satisfy
// an allocation request. Fills common_data with the total number of descriptors of each type required,
// as well as DescriptorSetLayout ptrs used for later update.
static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
cvdescriptorset::AllocateDescriptorSetsData *common_data) {
// Always update common data
cvdescriptorset::UpdateAllocateDescriptorSetsData(dev_data, pAllocateInfo, common_data);
if (dev_data->instance_data->disabled.allocate_descriptor_sets) return false;
// All state checks for AllocateDescriptorSets is done in single function
return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data, pAllocateInfo, common_data);
}
// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets,
const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
// All the updates are contained in a single cvdescriptorset function
cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
&dev_data->setMap, dev_data);
}
// TODO: PostCallRecord routine is dependent on data generated in PreCallValidate -- needs to be moved out
VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
bool skip = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
lock.unlock();
}
return result;
}
// Verify state before freeing DescriptorSets
static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
const VkDescriptorSet *descriptor_sets) {
if (dev_data->instance_data->disabled.free_descriptor_sets) return false;
bool skip = false;
// First make sure sets being destroyed are not currently in-use
for (uint32_t i = 0; i < count; ++i) {
if (descriptor_sets[i] != VK_NULL_HANDLE) {
skip |= ValidateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
}
}
DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
// Can't Free from a NON_FREE pool
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
HandleToUint64(pool), "VUID-vkFreeDescriptorSets-descriptorPool-00312",
"It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
"VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
}
return skip;
}
// Sets are being returned to the pool so update the pool state
static void PreCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
const VkDescriptorSet *descriptor_sets) {
DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(dev_data, pool);
// Update available descriptor sets in pool
pool_state->availableSets += count;
// For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
for (uint32_t i = 0; i < count; ++i) {
if (descriptor_sets[i] != VK_NULL_HANDLE) {
auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
uint32_t type_index = 0, descriptor_count = 0;
for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
}
FreeDescriptorSet(dev_data, descriptor_set);
pool_state->sets.erase(descriptor_set);
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
const VkDescriptorSet *pDescriptorSets) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Make sure that no sets being destroyed are in-flight
unique_lock_t lock(global_lock);
bool skip = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
VkResult result;
if (skip) {
result = VK_ERROR_VALIDATION_FAILED_EXT;
} else {
// A race here is invalid (descriptorPool should be externally sync'd), but code defensively against an invalid race
PreCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
lock.unlock();
result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
}
return result;
}
// TODO : This is a Proof-of-concept for core validation architecture
// Really we'll want to break out these functions to separate files but
// keeping it all together here to prove out design
// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) {
if (dev_data->instance_data->disabled.update_descriptor_sets) return false;
// First thing to do is perform map look-ups.
// NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
// so we can't just do a single map look-up up-front, but do them individually in functions below
// Now make call(s) that validate state, but don't perform state updates in this function
// Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
// namespace which will parse params and make calls into specific class instances
return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
descriptorCopyCount, pDescriptorCopies);
}
// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
static void PreCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) {
cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
}
VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) {
// Only map look-up at top level is for device-level layer_data
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
if (!skip) {
// Since UpdateDescriptorSets() is void, nothing to check prior to updating state & we can update before call down chain
PreCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
lock.unlock();
dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
}
}
static void PostCallRecordAllocateCommandBuffers(layer_data *dev_data, VkDevice device,
const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
auto pPool = GetCommandPoolNode(dev_data, pCreateInfo->commandPool);
if (pPool) {
for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
// Add command buffer to its commandPool map
pPool->commandBuffers.insert(pCommandBuffer[i]);
GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
// Add command buffer to map
dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
ResetCommandBufferState(dev_data, pCommandBuffer[i]);
pCB->createInfo = *pCreateInfo;
pCB->device = device;
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
VkCommandBuffer *pCommandBuffer) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
if (VK_SUCCESS == result) {
unique_lock_t lock(global_lock);
PostCallRecordAllocateCommandBuffers(dev_data, device, pCreateInfo, pCommandBuffer);
lock.unlock();
}
return result;
}
// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
AddCommandBufferBinding(&fb_state->cb_bindings, {HandleToUint64(fb_state->framebuffer), kVulkanObjectTypeFramebuffer},
cb_state);
for (auto attachment : fb_state->attachments) {
auto view_state = attachment.view_state;
if (view_state) {
AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
// Validate command buffer level
GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
if (cb_node) {
// This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
if (cb_node->in_use.load()) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"Calling vkBeginCommandBuffer() on active command buffer %" PRIx64
" before it has completed. You must check command buffer fence before this call.",
HandleToUint64(commandBuffer));
}
ClearCmdBufAndMemReferences(dev_data, cb_node);
if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
// Secondary Command Buffer
const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
if (!pInfo) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00051",
"vkBeginCommandBuffer(): Secondary Command Buffer (0x%" PRIx64 ") must have inheritance info.",
HandleToUint64(commandBuffer));
} else {
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
assert(pInfo->renderPass);
string errorString = "";
auto framebuffer = GetFramebufferState(dev_data, pInfo->framebuffer);
if (framebuffer) {
if (framebuffer->createInfo.renderPass != pInfo->renderPass) {
// renderPass that framebuffer was created with must be compatible with local renderPass
skip |= ValidateRenderPassCompatibility(
dev_data, "framebuffer", framebuffer->rp_state.get(), "command buffer",
GetRenderPassState(dev_data, pInfo->renderPass), "vkBeginCommandBuffer()",
"VUID-VkCommandBufferBeginInfo-flags-00055");
}
// Connect this framebuffer and its children to this cmdBuffer
AddFramebufferBinding(dev_data, cb_node, framebuffer);
}
}
if ((pInfo->occlusionQueryEnable == VK_FALSE ||
dev_data->enabled_features.core.occlusionQueryPrecise == VK_FALSE) &&
(pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
"VUID-vkBeginCommandBuffer-commandBuffer-00052",
"vkBeginCommandBuffer(): Secondary Command Buffer (0x%" PRIx64
") must not have VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device "
"does not support precise occlusion queries.",
HandleToUint64(commandBuffer));
}
}
if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
auto renderPass = GetRenderPassState(dev_data, pInfo->renderPass);
if (renderPass) {
if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
"VUID-VkCommandBufferBeginInfo-flags-00054",
"vkBeginCommandBuffer(): Secondary Command Buffers (0x%" PRIx64
") must have a subpass index (%d) that is less than the number of subpasses (%d).",
HandleToUint64(commandBuffer), pInfo->subpass, renderPass->createInfo.subpassCount);
}
}
}
}
if (CB_RECORDING == cb_node->state) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%" PRIx64
") in the RECORDING state. Must first call vkEndCommandBuffer().",
HandleToUint64(commandBuffer));
} else if (CB_RECORDED == cb_node->state || CB_INVALID_COMPLETE == cb_node->state) {
VkCommandPool cmdPool = cb_node->createInfo.commandPool;
auto pPool = GetCommandPoolNode(dev_data, cmdPool);
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkBeginCommandBuffer-commandBuffer-00050",
"Call to vkBeginCommandBuffer() on command buffer (0x%" PRIx64
") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIx64
") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
HandleToUint64(commandBuffer), HandleToUint64(cmdPool));
}
ResetCommandBufferState(dev_data, commandBuffer);
}
// Set updated state here in case implicit reset occurs above
cb_node->state = CB_RECORDING;
cb_node->beginInfo = *pBeginInfo;
if (cb_node->beginInfo.pInheritanceInfo) {
cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
// If we are a secondary command-buffer and inheriting. Update the items we should inherit.
if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
(cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
cb_node->activeRenderPass = GetRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
}
}
}
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
return result;
}
static void PostCallRecordEndCommandBuffer(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
// Cached validation is specific to a specific recording of a specific command buffer.
for (auto descriptor_set : cb_state->validated_descriptor_sets) {
descriptor_set->ClearCachedValidation(cb_state);
}
cb_state->validated_descriptor_sets.clear();
}
VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
// This needs spec clarification to update valid usage, see comments in PR:
// https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/165
skip |= InsideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", "VUID-vkEndCommandBuffer-commandBuffer-00060");
}
skip |= ValidateCmd(dev_data, pCB, CMD_ENDCOMMANDBUFFER, "vkEndCommandBuffer()");
for (auto query : pCB->activeQueries) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkEndCommandBuffer-commandBuffer-00061",
"Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d.",
HandleToUint64(query.pool), query.index);
}
}
if (!skip) {
lock.unlock();
auto result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
lock.lock();
PostCallRecordEndCommandBuffer(dev_data, pCB);
if (VK_SUCCESS == result) {
pCB->state = CB_RECORDED;
}
return result;
} else {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
}
static bool PreCallValidateResetCommandBuffer(layer_data *dev_data, VkCommandBuffer commandBuffer) {
bool skip = false;
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
VkCommandPool cmdPool = pCB->createInfo.commandPool;
auto pPool = GetCommandPoolNode(dev_data, cmdPool);
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkResetCommandBuffer-commandBuffer-00046",
"Attempt to reset command buffer (0x%" PRIx64 ") created from command pool (0x%" PRIx64
") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
HandleToUint64(commandBuffer), HandleToUint64(cmdPool));
}
skip |= CheckCommandBufferInFlight(dev_data, pCB, "reset", "VUID-vkResetCommandBuffer-commandBuffer-00045");
return skip;
}
static void PostCallRecordResetCommandBuffer(layer_data *dev_data, VkCommandBuffer commandBuffer) {
ResetCommandBufferState(dev_data, commandBuffer);
}
VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateResetCommandBuffer(dev_data, commandBuffer);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordResetCommandBuffer(dev_data, commandBuffer);
lock.unlock();
}
return result;
}
static bool PreCallValidateCmdBindPipeline(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBindPipeline-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
// TODO: "VUID-vkCmdBindPipeline-pipelineBindPoint-00777" "VUID-vkCmdBindPipeline-pipelineBindPoint-00779" -- using
// ValidatePipelineBindPoint
return skip;
}
static void PreCallRecordCmdBindPipeline(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint pipelineBindPoint,
VkPipeline pipeline) {
auto pipe_state = GetPipelineState(dev_data, pipeline);
if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
cb_state->status &= ~cb_state->static_status;
cb_state->static_status = MakeStaticStateMask(pipe_state->graphicsPipelineCI.ptr()->pDynamicState);
cb_state->status |= cb_state->static_status;
}
cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
SetPipelineState(pipe_state);
AddCommandBufferBinding(&pipe_state->cb_bindings, {HandleToUint64(pipeline), kVulkanObjectTypePipeline}, cb_state);
}
VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipeline pipeline) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdBindPipeline(dev_data, cb_state);
PreCallRecordCmdBindPipeline(dev_data, cb_state, pipelineBindPoint, pipeline);
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
}
static bool PreCallValidateCmdSetViewport(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetViewport-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETVIEWPORT, "vkCmdSetViewport()");
if (cb_state->static_status & CBSTATUS_VIEWPORT_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetViewport-None-01221",
"vkCmdSetViewport(): pipeline was created without VK_DYNAMIC_STATE_VIEWPORT flag..");
}
return skip;
}
static void PreCallRecordCmdSetViewport(GLOBAL_CB_NODE *cb_state, uint32_t firstViewport, uint32_t viewportCount) {
cb_state->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
cb_state->status |= CBSTATUS_VIEWPORT_SET;
}
VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
const VkViewport *pViewports) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetViewport(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetViewport(pCB, firstViewport, viewportCount);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
}
static bool PreCallValidateCmdSetScissor(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetScissor-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETSCISSOR, "vkCmdSetScissor()");
if (cb_state->static_status & CBSTATUS_SCISSOR_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetScissor-None-00590",
"vkCmdSetScissor(): pipeline was created without VK_DYNAMIC_STATE_SCISSOR flag..");
}
return skip;
}
static void PreCallRecordCmdSetScissor(GLOBAL_CB_NODE *cb_state, uint32_t firstScissor, uint32_t scissorCount) {
cb_state->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
cb_state->status |= CBSTATUS_SCISSOR_SET;
}
VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
const VkRect2D *pScissors) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetScissor(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetScissor(pCB, firstScissor, scissorCount);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
}
static bool PreCallValidateCmdSetLineWidth(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetLineWidth-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()");
if (cb_state->static_status & CBSTATUS_LINE_WIDTH_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetLineWidth-None-00787",
"vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH flag.");
}
return skip;
}
static void PreCallRecordCmdSetLineWidth(GLOBAL_CB_NODE *cb_state) { cb_state->status |= CBSTATUS_LINE_WIDTH_SET; }
VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetLineWidth(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetLineWidth(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
}
static bool PreCallValidateCmdSetDepthBias(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
float depthBiasClamp) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthBias-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()");
if (cb_state->static_status & CBSTATUS_DEPTH_BIAS_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBias-None-00789",
"vkCmdSetDepthBias(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BIAS flag..");
}
if ((depthBiasClamp != 0.0) && (!dev_data->enabled_features.core.depthBiasClamp)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBias-depthBiasClamp-00790",
"vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must "
"be set to 0.0.");
}
return skip;
}
static void PreCallRecordCmdSetDepthBias(GLOBAL_CB_NODE *cb_state) { cb_state->status |= CBSTATUS_DEPTH_BIAS_SET; }
VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
float depthBiasSlopeFactor) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetDepthBias(dev_data, pCB, commandBuffer, depthBiasClamp);
if (!skip) {
PreCallRecordCmdSetDepthBias(pCB);
}
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
}
}
static bool PreCallValidateCmdSetBlendConstants(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetBlendConstants-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()");
if (cb_state->static_status & CBSTATUS_BLEND_CONSTANTS_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetBlendConstants-None-00612",
"vkCmdSetBlendConstants(): pipeline was created without VK_DYNAMIC_STATE_BLEND_CONSTANTS flag..");
}
return skip;
}
static void PreCallRecordCmdSetBlendConstants(GLOBAL_CB_NODE *cb_state) { cb_state->status |= CBSTATUS_BLEND_CONSTANTS_SET; }
VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetBlendConstants(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetBlendConstants(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
}
static bool PreCallValidateCmdSetDepthBounds(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthBounds-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()");
if (cb_state->static_status & CBSTATUS_DEPTH_BOUNDS_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetDepthBounds-None-00599",
"vkCmdSetDepthBounds(): pipeline was created without VK_DYNAMIC_STATE_DEPTH_BOUNDS flag..");
}
return skip;
}
static void PreCallRecordCmdSetDepthBounds(GLOBAL_CB_NODE *cb_state) { cb_state->status |= CBSTATUS_DEPTH_BOUNDS_SET; }
VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetDepthBounds(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetDepthBounds(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
}
static bool PreCallValidateCmdSetStencilCompareMask(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilCompareMask-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()");
if (cb_state->static_status & CBSTATUS_STENCIL_READ_MASK_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilCompareMask-None-00602",
"vkCmdSetStencilCompareMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK flag..");
}
return skip;
}
static void PreCallRecordCmdSetStencilCompareMask(GLOBAL_CB_NODE *cb_state) { cb_state->status |= CBSTATUS_STENCIL_READ_MASK_SET; }
VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t compareMask) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetStencilCompareMask(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetStencilCompareMask(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
}
static bool PreCallValidateCmdSetStencilWriteMask(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilWriteMask-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()");
if (cb_state->static_status & CBSTATUS_STENCIL_WRITE_MASK_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilWriteMask-None-00603",
"vkCmdSetStencilWriteMask(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_WRITE_MASK flag..");
}
return skip;
}
static void PreCallRecordCmdSetStencilWriteMask(GLOBAL_CB_NODE *cb_state) { cb_state->status |= CBSTATUS_STENCIL_WRITE_MASK_SET; }
VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetStencilWriteMask(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetStencilWriteMask(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
}
static bool PreCallValidateCmdSetStencilReference(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilReference-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()");
if (cb_state->static_status & CBSTATUS_STENCIL_REFERENCE_SET) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdSetStencilReference-None-00604",
"vkCmdSetStencilReference(): pipeline was created without VK_DYNAMIC_STATE_STENCIL_REFERENCE flag..");
}
return skip;
}
static void PreCallRecordCmdSetStencilReference(GLOBAL_CB_NODE *cb_state) { cb_state->status |= CBSTATUS_STENCIL_REFERENCE_SET; }
VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetStencilReference(dev_data, pCB, commandBuffer);
if (!skip) {
PreCallRecordCmdSetStencilReference(pCB);
}
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
}
// Update pipeline_layout bind points applying the "Pipeline Layout Compatibility" rules
static void UpdateLastBoundDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
VkPipelineBindPoint pipeline_bind_point, const PIPELINE_LAYOUT_NODE *pipeline_layout,
uint32_t first_set, uint32_t set_count,
const std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets,
uint32_t dynamic_offset_count, const uint32_t *p_dynamic_offsets) {
// Defensive
assert(set_count);
if (0 == set_count) return;
assert(pipeline_layout);
if (!pipeline_layout) return;
uint32_t required_size = first_set + set_count;
const uint32_t last_binding_index = required_size - 1;
assert(last_binding_index < pipeline_layout->compat_for_set.size());
// Some useful shorthand
auto &last_bound = cb_state->lastBound[pipeline_bind_point];
auto &bound_sets = last_bound.boundDescriptorSets;
auto &dynamic_offsets = last_bound.dynamicOffsets;
auto &bound_compat_ids = last_bound.compat_id_for_set;
auto &pipe_compat_ids = pipeline_layout->compat_for_set;
const uint32_t current_size = static_cast<uint32_t>(bound_sets.size());
assert(current_size == dynamic_offsets.size());
assert(current_size == bound_compat_ids.size());
// We need this three times in this function, but nowhere else
auto push_descriptor_cleanup = [&last_bound](const cvdescriptorset::DescriptorSet *ds) -> bool {
if (ds && ds->IsPushDescriptor()) {
assert(ds == last_bound.push_descriptor_set.get());
last_bound.push_descriptor_set = nullptr;
return true;
}
return false;
};
// Clean up the "disturbed" before and after the range to be set
if (required_size < current_size) {
if (bound_compat_ids[last_binding_index] != pipe_compat_ids[last_binding_index]) {
// We're disturbing those after last, we'll shrink below, but first need to check for and cleanup the push_descriptor
for (auto set_idx = required_size; set_idx < current_size; ++set_idx) {
if (push_descriptor_cleanup(bound_sets[set_idx])) break;
}
} else {
// We're not disturbing past last, so leave the upper binding data alone.
required_size = current_size;
}
}
// We resize if we need more set entries or if those past "last" are disturbed
if (required_size != current_size) {
// TODO: put these size tied things in a struct (touches many lines)
bound_sets.resize(required_size);
dynamic_offsets.resize(required_size);
bound_compat_ids.resize(required_size);
}
// For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
for (uint32_t set_idx = 0; set_idx < first_set; ++set_idx) {
if (bound_compat_ids[set_idx] != pipe_compat_ids[set_idx]) {
push_descriptor_cleanup(bound_sets[set_idx]);
bound_sets[set_idx] = nullptr;
dynamic_offsets[set_idx].clear();
bound_compat_ids[set_idx] = pipe_compat_ids[set_idx];
}
}
// Now update the bound sets with the input sets
const uint32_t *input_dynamic_offsets = p_dynamic_offsets; // "read" pointer for dynamic offset data
for (uint32_t input_idx = 0; input_idx < set_count; input_idx++) {
auto set_idx = input_idx + first_set; // set_idx is index within layout, input_idx is index within input descriptor sets
cvdescriptorset::DescriptorSet *descriptor_set = descriptor_sets[input_idx];
// Record binding (or push)
push_descriptor_cleanup(bound_sets[set_idx]);
bound_sets[set_idx] = descriptor_set;
bound_compat_ids[set_idx] = pipe_compat_ids[set_idx]; // compat ids are canonical *per* set index
if (descriptor_set) {
auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
// TODO: Add logic for tracking push_descriptor offsets (here or in caller)
if (set_dynamic_descriptor_count && input_dynamic_offsets) {
const uint32_t *end_offset = input_dynamic_offsets + set_dynamic_descriptor_count;
dynamic_offsets[set_idx] = std::vector<uint32_t>(input_dynamic_offsets, end_offset);
input_dynamic_offsets = end_offset;
assert(input_dynamic_offsets <= (p_dynamic_offsets + dynamic_offset_count));
} else {
dynamic_offsets[set_idx].clear();
}
if (!descriptor_set->IsPushDescriptor()) {
// Can't cache validation of push_descriptors
cb_state->validated_descriptor_sets.insert(descriptor_set);
}
}
}
}
// Update the bound state for the bind point, including the effects of incompatible pipeline layouts
static void PreCallRecordCmdBindDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet,
uint32_t setCount, const VkDescriptorSet *pDescriptorSets,
uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
auto pipeline_layout = GetPipelineLayout(device_data, layout);
std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets;
descriptor_sets.reserve(setCount);
// Construct a list of the descriptors
bool found_non_null = false;
for (uint32_t i = 0; i < setCount; i++) {
cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(device_data, pDescriptorSets[i]);
descriptor_sets.emplace_back(descriptor_set);
found_non_null |= descriptor_set != nullptr;
}
if (found_non_null) { // which implies setCount > 0
UpdateLastBoundDescriptorSets(device_data, cb_state, pipelineBindPoint, pipeline_layout, firstSet, setCount,
descriptor_sets, dynamicOffsetCount, pDynamicOffsets);
cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout;
}
}
static bool PreCallValidateCmdBindDescriptorSets(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet,
uint32_t setCount, const VkDescriptorSet *pDescriptorSets,
uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
bool skip = false;
skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBindDescriptorSets-commandBuffer-cmdpool");
skip |= ValidateCmd(device_data, cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
// Track total count of dynamic descriptor types to make sure we have an offset for each one
uint32_t total_dynamic_descriptors = 0;
string error_string = "";
uint32_t last_set_index = firstSet + setCount - 1;
if (last_set_index >= cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
cb_state->lastBound[pipelineBindPoint].boundDescriptorSets.resize(last_set_index + 1);
cb_state->lastBound[pipelineBindPoint].dynamicOffsets.resize(last_set_index + 1);
cb_state->lastBound[pipelineBindPoint].compat_id_for_set.resize(last_set_index + 1);
}
auto pipeline_layout = GetPipelineLayout(device_data, layout);
for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(device_data, pDescriptorSets[set_idx]);
if (descriptor_set) {
if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
skip |= log_msg(
device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(pDescriptorSets[set_idx]), kVUID_Core_DrawState_DescriptorSetNotUpdated,
"Descriptor Set 0x%" PRIx64 " bound but it was never updated. You may want to either update it or not bind it.",
HandleToUint64(pDescriptorSets[set_idx]));
}
// Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
if (!VerifySetLayoutCompatibility(descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
skip |=
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(pDescriptorSets[set_idx]), "VUID-vkCmdBindDescriptorSets-pDescriptorSets-00358",
"descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout at index %u of "
"pipelineLayout 0x%" PRIx64 " due to: %s.",
set_idx, set_idx + firstSet, HandleToUint64(layout), error_string.c_str());
}
auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
if (set_dynamic_descriptor_count) {
// First make sure we won't overstep bounds of pDynamicOffsets array
if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(pDescriptorSets[set_idx]),
kVUID_Core_DrawState_InvalidDynamicOffsetCount,
"descriptorSet #%u (0x%" PRIx64
") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets array. "
"There must be one dynamic offset for each dynamic descriptor being bound.",
set_idx, HandleToUint64(pDescriptorSets[set_idx]), descriptor_set->GetDynamicDescriptorCount(),
(dynamicOffsetCount - total_dynamic_descriptors));
} else { // Validate dynamic offsets and Dynamic Offset Minimums
uint32_t cur_dyn_offset = total_dynamic_descriptors;
for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
if (SafeModulo(pDynamicOffsets[cur_dyn_offset],
device_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
0) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
"VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01971",
"vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
"device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
device_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
}
cur_dyn_offset++;
} else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
if (SafeModulo(pDynamicOffsets[cur_dyn_offset],
device_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
0) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
"VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01972",
"vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
"device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ".",
cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
device_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
}
cur_dyn_offset++;
}
}
// Keep running total of dynamic descriptor count to verify at the end
total_dynamic_descriptors += set_dynamic_descriptor_count;
}
}
} else {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(pDescriptorSets[set_idx]), kVUID_Core_DrawState_InvalidSet,
"Attempt to bind descriptor set 0x%" PRIx64 " that doesn't exist!",
HandleToUint64(pDescriptorSets[set_idx]));
}
}
// dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
if (total_dynamic_descriptors != dynamicOffsetCount) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
"Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount is %u. It should "
"exactly match the number of dynamic descriptors.",
setCount, total_dynamic_descriptors, dynamicOffsetCount);
}
return skip;
}
VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
const uint32_t *pDynamicOffsets) {
bool skip = false;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
skip = PreCallValidateCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount,
pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
if (!skip) {
PreCallRecordCmdBindDescriptorSets(device_data, cb_state, pipelineBindPoint, layout, firstSet, setCount, pDescriptorSets,
dynamicOffsetCount, pDynamicOffsets);
lock.unlock();
device_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
} else {
lock.unlock();
}
}
// Validates that the supplied bind point is supported for the command buffer (vis. the command pool)
// Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint
// TODO add vkCmdBindPipeline bind_point validation using this call.
bool ValidatePipelineBindPoint(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
const char *func_name,
const std::array<std::string, VK_PIPELINE_BIND_POINT_RANGE_SIZE> &bind_errors) {
bool skip = false;
auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool
static const VkQueueFlags flag_mask[VK_PIPELINE_BIND_POINT_RANGE_SIZE] = {VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT};
const auto bind_point_index = bind_point - VK_PIPELINE_BIND_POINT_BEGIN_RANGE; // typeof enum is not defined, use auto
const auto &qfp = GetPhysDevProperties(device_data)->queue_family_properties[pool->queueFamilyIndex];
if (0 == (qfp.queueFlags & flag_mask[bind_point_index])) {
const std::string error = bind_errors[bind_point_index];
auto cb_u64 = HandleToUint64(cb_state->commandBuffer);
auto cp_u64 = HandleToUint64(cb_state->createInfo.commandPool);
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
cb_u64, error,
"%s: CommandBuffer 0x%" PRIxLEAST64 " was allocated from VkCommandPool 0x%" PRIxLEAST64
" that does not support bindpoint %s.",
func_name, cb_u64, cp_u64, string_VkPipelineBindPoint(bind_point));
}
}
return skip;
}
static bool PreCallValidateCmdPushDescriptorSetKHR(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
const VkPipelineBindPoint bind_point, const VkPipelineLayout layout,
const uint32_t set, const uint32_t descriptor_write_count,
const VkWriteDescriptorSet *descriptor_writes, const char *func_name) {
bool skip = false;
skip |= ValidateCmd(device_data, cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name);
skip |= ValidateCmdQueueFlags(device_data, cb_state, func_name, (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT),
"VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool");
skip |= ValidatePipelineBindPoint(
device_data, cb_state, bind_point, func_name,
{{"VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363", "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"}});
auto layout_data = GetPipelineLayout(device_data, layout);
// Validate the set index points to a push descriptor set and is in range
if (layout_data) {
const auto &set_layouts = layout_data->set_layouts;
const auto layout_u64 = HandleToUint64(layout);
if (set < set_layouts.size()) {
const auto *dsl = set_layouts[set].get();
if (dsl && (0 == (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR))) {
skip =
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
"%s: Set index %" PRIu32
" does not match push descriptor set layout index for VkPipelineLayout 0x%" PRIxLEAST64 ".",
func_name, set, layout_u64);
}
} else {
skip = log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
layout_u64, "VUID-vkCmdPushDescriptorSetKHR-set-00364",
"%s: Set index %" PRIu32 " is outside of range for VkPipelineLayout 0x%" PRIxLEAST64 " (set < %" PRIu32
").",
func_name, set, layout_u64, static_cast<uint32_t>(set_layouts.size()));
}
}
return skip;
}
static void PreCallRecordCmdPushDescriptorSetKHR(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set,
uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) {
const auto &pipeline_layout = GetPipelineLayout(device_data, layout);
if (!pipeline_layout) return;
std::unique_ptr<cvdescriptorset::DescriptorSet> new_desc{
new cvdescriptorset::DescriptorSet(0, 0, pipeline_layout->set_layouts[set], 0, device_data)};
std::vector<cvdescriptorset::DescriptorSet *> descriptor_sets = {new_desc.get()};
UpdateLastBoundDescriptorSets(device_data, cb_state, pipelineBindPoint, pipeline_layout, set, 1, descriptor_sets, 0, nullptr);
cb_state->lastBound[pipelineBindPoint].push_descriptor_set = std::move(new_desc);
cb_state->lastBound[pipelineBindPoint].pipeline_layout = layout;
}
VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_state = GetCBNode(device_data, commandBuffer);
bool skip = PreCallValidateCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
pDescriptorWrites, "vkCmdPushDescriptorSetKHR()");
if (!skip) {
PreCallRecordCmdPushDescriptorSetKHR(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount,
pDescriptorWrites);
lock.unlock();
device_data->dispatch_table.CmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount,
pDescriptorWrites);
}
}
static VkDeviceSize GetIndexAlignment(VkIndexType indexType) {
switch (indexType) {
case VK_INDEX_TYPE_UINT16:
return 2;
case VK_INDEX_TYPE_UINT32:
return 4;
default:
// Not a real index type. Express no alignment requirement here; we expect upper layer
// to have already picked up on the enum being nonsense.
return 1;
}
}
static bool PreCallValidateCmdBindIndexBuffer(layer_data *dev_data, BUFFER_STATE *buffer_state, GLOBAL_CB_NODE *cb_node,
VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkIndexType indexType) {
bool skip = ValidateBufferUsageFlags(dev_data, buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true,
"VUID-vkCmdBindIndexBuffer-buffer-00433", "vkCmdBindIndexBuffer()",
"VK_BUFFER_USAGE_INDEX_BUFFER_BIT");
skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindIndexBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
skip |=
ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", "VUID-vkCmdBindIndexBuffer-buffer-00434");
auto offset_align = GetIndexAlignment(indexType);
if (offset % offset_align) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdBindIndexBuffer-offset-00432",
"vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
string_VkIndexType(indexType));
}
return skip;
}
static void PreCallRecordCmdBindIndexBuffer(BUFFER_STATE *buffer_state, GLOBAL_CB_NODE *cb_node, VkBuffer buffer,
VkDeviceSize offset, VkIndexType indexType) {
cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
cb_node->index_buffer_binding.buffer = buffer;
cb_node->index_buffer_binding.size = buffer_state->createInfo.size;
cb_node->index_buffer_binding.offset = offset;
cb_node->index_buffer_binding.index_type = indexType;
}
VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkIndexType indexType) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto buffer_state = GetBufferState(dev_data, buffer);
auto cb_node = GetCBNode(dev_data, commandBuffer);
assert(cb_node);
assert(buffer_state);
PreCallValidateCmdBindIndexBuffer(dev_data, buffer_state, cb_node, commandBuffer, buffer, offset, indexType);
if (skip) return;
PreCallRecordCmdBindIndexBuffer(buffer_state, cb_node, buffer, offset, indexType);
lock.unlock();
dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
}
static inline void UpdateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->draw_data.push_back(pCB->current_draw_data); }
static bool PreCallValidateCmdBindVertexBuffers(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, uint32_t bindingCount,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindVertexBuffers-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()");
for (uint32_t i = 0; i < bindingCount; ++i) {
auto buffer_state = GetBufferState(dev_data, pBuffers[i]);
assert(buffer_state);
skip |= ValidateBufferUsageFlags(dev_data, buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true,
"VUID-vkCmdBindVertexBuffers-pBuffers-00627", "vkCmdBindVertexBuffers()",
"VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
skip |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()",
"VUID-vkCmdBindVertexBuffers-pBuffers-00628");
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-vkCmdBindVertexBuffers-pOffsets-00626",
"vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]);
}
}
return skip;
}
static void PreCallRecordCmdBindVertexBuffers(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
uint32_t end = firstBinding + bindingCount;
if (pCB->current_draw_data.vertex_buffer_bindings.size() < end) {
pCB->current_draw_data.vertex_buffer_bindings.resize(end);
}
for (uint32_t i = 0; i < bindingCount; ++i) {
auto &vertex_buffer_binding = pCB->current_draw_data.vertex_buffer_bindings[i + firstBinding];
vertex_buffer_binding.buffer = pBuffers[i];
vertex_buffer_binding.offset = pOffsets[i];
}
}
VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(dev_data, commandBuffer);
assert(cb_node);
skip |= PreCallValidateCmdBindVertexBuffers(dev_data, cb_node, bindingCount, pBuffers, pOffsets);
if (skip) return;
PreCallRecordCmdBindVertexBuffers(cb_node, firstBinding, bindingCount, pBuffers, pOffsets);
lock.unlock();
dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
}
// Generic function to handle validation for all CmdDraw* type functions
static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller, VkQueueFlags queue_flags,
const std::string &queue_flag_code, const std::string &renderpass_msg_code,
const std::string &pipebound_msg_code, const std::string &dynamic_state_msg_code) {
bool skip = false;
*cb_state = GetCBNode(dev_data, cmd_buffer);
if (*cb_state) {
skip |= ValidateCmdQueueFlags(dev_data, *cb_state, caller, queue_flags, queue_flag_code);
skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
skip |= ValidateCmdBufDrawState(dev_data, *cb_state, cmd_type, indexed, bind_point, caller, pipebound_msg_code,
dynamic_state_msg_code);
skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
? OutsideRenderPass(dev_data, *cb_state, caller, renderpass_msg_code)
: InsideRenderPass(dev_data, *cb_state, caller, renderpass_msg_code);
}
return skip;
}
// Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
UpdateDrawState(dev_data, cb_state, bind_point);
}
// Generic function to handle state update for all CmdDraw* type functions
static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
UpdateResourceTrackingOnDraw(cb_state);
cb_state->hasDrawCmd = true;
// Add descriptor image/CIS layouts to CB layout map
auto &desc_sets = cb_state->lastBound->boundDescriptorSets;
for (auto &desc : desc_sets) {
if (desc) {
desc->UpdateDSImageLayoutState(cb_state);
}
}
}
static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
GLOBAL_CB_NODE **cb_state, const char *caller) {
return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdDraw-commandBuffer-cmdpool", "VUID-vkCmdDraw-renderpass", "VUID-vkCmdDraw-None-00442",
"VUID-vkCmdDraw-None-00443");
}
static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
}
VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
uint32_t firstVertex, uint32_t firstInstance) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
lock.lock();
PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
lock.unlock();
}
}
static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller,
uint32_t indexCount, uint32_t firstIndex) {
bool skip =
ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdDrawIndexed-commandBuffer-cmdpool", "VUID-vkCmdDrawIndexed-renderpass",
"VUID-vkCmdDrawIndexed-None-00461", "VUID-vkCmdDrawIndexed-None-00462");
if (!skip && ((*cb_state)->status & CBSTATUS_INDEX_BUFFER_BOUND)) {
unsigned int index_size = 0;
const auto &index_buffer_binding = (*cb_state)->index_buffer_binding;
if (index_buffer_binding.index_type == VK_INDEX_TYPE_UINT16) {
index_size = 2;
} else if (index_buffer_binding.index_type == VK_INDEX_TYPE_UINT32) {
index_size = 4;
}
VkDeviceSize end_offset = (index_size * ((VkDeviceSize)firstIndex + indexCount)) + index_buffer_binding.offset;
if (end_offset > index_buffer_binding.size) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(index_buffer_binding.buffer), "VUID-vkCmdDrawIndexed-indexSize-00463",
"vkCmdDrawIndexed() index size (%d) * (firstIndex (%d) + indexCount (%d)) "
"+ binding offset (%" PRIuLEAST64 ") = an ending offset of %" PRIuLEAST64
" bytes, "
"which is greater than the index buffer size (%" PRIuLEAST64 ").",
index_size, firstIndex, indexCount, index_buffer_binding.offset, end_offset, index_buffer_binding.size);
}
}
return skip;
}
static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
}
VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
"vkCmdDrawIndexed()", indexCount, firstIndex);
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
lock.lock();
PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
lock.unlock();
}
}
static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
const char *caller) {
bool skip =
ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdDrawIndirect-commandBuffer-cmdpool", "VUID-vkCmdDrawIndirect-renderpass",
"VUID-vkCmdDrawIndirect-None-00485", "VUID-vkCmdDrawIndirect-None-00486");
*buffer_state = GetBufferState(dev_data, buffer);
skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, "VUID-vkCmdDrawIndirect-buffer-00474");
// TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
// VkDrawIndirectCommand structures accessed by this command must be 0, which will require access to the contents of 'buffer'.
return skip;
}
static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
BUFFER_STATE *buffer_state) {
UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
}
VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
&buffer_state, "vkCmdDrawIndirect()");
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
lock.lock();
PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
lock.unlock();
}
}
static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
BUFFER_STATE **buffer_state, const char *caller) {
bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdDrawIndexedIndirect-commandBuffer-cmdpool",
"VUID-vkCmdDrawIndexedIndirect-renderpass", "VUID-vkCmdDrawIndexedIndirect-None-00537",
"VUID-vkCmdDrawIndexedIndirect-None-00538");
*buffer_state = GetBufferState(dev_data, buffer);
skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, "VUID-vkCmdDrawIndexedIndirect-buffer-00526");
// TODO: If the drawIndirectFirstInstance feature is not enabled, all the firstInstance members of the
// VkDrawIndexedIndirectCommand structures accessed by this command must be 0, which will require access to the contents of
// 'buffer'.
return skip;
}
static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
BUFFER_STATE *buffer_state) {
UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
}
VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
uint32_t count, uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
&cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
lock.lock();
PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
lock.unlock();
}
}
static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdDispatch-commandBuffer-cmdpool", "VUID-vkCmdDispatch-renderpass",
"VUID-vkCmdDispatch-None-00391", kVUIDUndefined);
}
static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
}
VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
unique_lock_t lock(global_lock);
bool skip =
PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
lock.lock();
PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
lock.unlock();
}
}
static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
BUFFER_STATE **buffer_state, const char *caller) {
bool skip =
ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller, VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdDispatchIndirect-commandBuffer-cmdpool", "VUID-vkCmdDispatchIndirect-renderpass",
"VUID-vkCmdDispatchIndirect-None-00404", kVUIDUndefined);
*buffer_state = GetBufferState(dev_data, buffer);
skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, "VUID-vkCmdDispatchIndirect-buffer-00401");
return skip;
}
static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
BUFFER_STATE *buffer_state) {
UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
}
VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
&cb_state, &buffer_state, "vkCmdDispatchIndirect()");
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
lock.lock();
PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
lock.unlock();
}
}
VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
uint32_t regionCount, const VkBufferCopy *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_buffer_state = GetBufferState(device_data, srcBuffer);
auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
if (cb_node && src_buffer_state && dst_buffer_state) {
bool skip = PreCallValidateCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
if (!skip) {
PreCallRecordCmdCopyBuffer(device_data, cb_node, src_buffer_state, dst_buffer_state);
lock.unlock();
device_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
}
} else {
lock.unlock();
assert(0);
}
}
VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageCopy *pRegions) {
bool skip = false;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_image_state = GetImageState(device_data, srcImage);
auto dst_image_state = GetImageState(device_data, dstImage);
if (cb_node && src_image_state && dst_image_state) {
skip = PreCallValidateCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
srcImageLayout, dstImageLayout);
if (!skip) {
PreCallRecordCmdCopyImage(device_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
dstImageLayout);
lock.unlock();
device_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions);
}
} else {
lock.unlock();
assert(0);
}
}
// Validate that an image's sampleCount matches the requirement for a specific API call
bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
const char *location, const std::string &msgCode) {
bool skip = false;
if (image_state->createInfo.samples != sample_count) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), msgCode,
"%s for image 0x%" PRIx64 " was created with a sample count of %s but must be %s.", location,
HandleToUint64(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
string_VkSampleCountFlagBits(sample_count));
}
return skip;
}
VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageBlit *pRegions, VkFilter filter) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(dev_data, commandBuffer);
auto src_image_state = GetImageState(dev_data, srcImage);
auto dst_image_state = GetImageState(dev_data, dstImage);
bool skip = PreCallValidateCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions,
srcImageLayout, dstImageLayout, filter);
if (!skip) {
PreCallRecordCmdBlitImage(dev_data, cb_node, src_image_state, dst_image_state, regionCount, pRegions, srcImageLayout,
dstImageLayout);
lock.unlock();
dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions, filter);
}
}
VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkBufferImageCopy *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_buffer_state = GetBufferState(device_data, srcBuffer);
auto dst_image_state = GetImageState(device_data, dstImage);
if (cb_node && src_buffer_state && dst_image_state) {
skip = PreCallValidateCmdCopyBufferToImage(device_data, dstImageLayout, cb_node, src_buffer_state, dst_image_state,
regionCount, pRegions, "vkCmdCopyBufferToImage()");
} else {
lock.unlock();
assert(0);
// TODO: report VU01244 here, or put in object tracker?
}
if (!skip) {
PreCallRecordCmdCopyBufferToImage(device_data, cb_node, src_buffer_state, dst_image_state, regionCount, pRegions,
dstImageLayout);
lock.unlock();
device_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
}
}
VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
bool skip = false;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_image_state = GetImageState(device_data, srcImage);
auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
if (cb_node && src_image_state && dst_buffer_state) {
skip = PreCallValidateCmdCopyImageToBuffer(device_data, srcImageLayout, cb_node, src_image_state, dst_buffer_state,
regionCount, pRegions, "vkCmdCopyImageToBuffer()");
} else {
lock.unlock();
assert(0);
// TODO: report VU01262 here, or put in object tracker?
}
if (!skip) {
PreCallRecordCmdCopyImageToBuffer(device_data, cb_node, src_image_state, dst_buffer_state, regionCount, pRegions,
srcImageLayout);
lock.unlock();
device_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
}
}
static bool PreCallCmdUpdateBuffer(layer_data *device_data, const GLOBAL_CB_NODE *cb_state, const BUFFER_STATE *dst_buffer_state) {
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(device_data, dst_buffer_state, "vkCmdUpdateBuffer()",
"VUID-vkCmdUpdateBuffer-dstBuffer-00035");
// Validate that DST buffer has correct usage flags set
skip |= ValidateBufferUsageFlags(device_data, dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdUpdateBuffer-dstBuffer-00034", "vkCmdUpdateBuffer()",
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdUpdateBuffer()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdUpdateBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(device_data, cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
skip |= InsideRenderPass(device_data, cb_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-renderpass");
return skip;
}
static void PostCallRecordCmdUpdateBuffer(layer_data *device_data, GLOBAL_CB_NODE *cb_state, BUFFER_STATE *dst_buffer_state) {
// Update bindings between buffer and cmd buffer
AddCommandBufferBindingBuffer(device_data, cb_state, dst_buffer_state);
}
VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize dataSize, const uint32_t *pData) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_state = GetCBNode(dev_data, commandBuffer);
assert(cb_state);
auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
assert(dst_buff_state);
skip |= PreCallCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
lock.lock();
PostCallRecordCmdUpdateBuffer(dev_data, cb_state, dst_buff_state);
lock.unlock();
}
}
VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize size, uint32_t data) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto buffer_state = GetBufferState(device_data, dstBuffer);
if (cb_node && buffer_state) {
bool skip = PreCallValidateCmdFillBuffer(device_data, cb_node, buffer_state);
if (!skip) {
PreCallRecordCmdFillBuffer(device_data, cb_node, buffer_state);
lock.unlock();
device_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
}
} else {
lock.unlock();
assert(0);
}
}
VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
const VkClearAttachment *pAttachments, uint32_t rectCount,
const VkClearRect *pRects) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
{
lock_guard_t lock(global_lock);
skip = PreCallValidateCmdClearAttachments(dev_data, commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
}
if (!skip) dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
}
VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdClearColorImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
if (!skip) {
PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
lock.unlock();
dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
}
}
VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCmdClearDepthStencilImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
if (!skip) {
PreCallRecordCmdClearImage(dev_data, commandBuffer, image, imageLayout, rangeCount, pRanges);
lock.unlock();
dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
}
}
VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageResolve *pRegions) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(dev_data, commandBuffer);
auto src_image_state = GetImageState(dev_data, srcImage);
auto dst_image_state = GetImageState(dev_data, dstImage);
bool skip = PreCallValidateCmdResolveImage(dev_data, cb_node, src_image_state, srcImageLayout, dst_image_state, dstImageLayout,
regionCount, pRegions);
if (!skip) {
PreCallRecordCmdResolveImage(dev_data, cb_node, src_image_state, dst_image_state);
lock.unlock();
dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions);
}
}
VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
VkSubresourceLayout *pLayout) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetImageSubresourceLayout(device_data, image, pSubresource);
if (!skip) {
lock.unlock();
device_data->dispatch_table.GetImageSubresourceLayout(device, image, pSubresource, pLayout);
}
}
bool SetEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
pCB->eventToStageMap[event] = stageMask;
}
auto queue_data = dev_data->queueMap.find(queue);
if (queue_data != dev_data->queueMap.end()) {
queue_data->second.eventToStageMap[event] = stageMask;
}
return false;
}
static bool PreCallValidateCmdSetEvent(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags stageMask) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdSetEvent-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_SETEVENT, "vkCmdSetEvent()");
skip |= InsideRenderPass(dev_data, cb_state, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-renderpass");
skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-stageMask-01150",
"VUID-vkCmdSetEvent-stageMask-01151");
return skip;
}
static void PreCallRecordCmdSetEvent(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer, VkEvent event,
VkPipelineStageFlags stageMask) {
auto event_state = GetEventNode(dev_data, event);
if (event_state) {
AddCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, cb_state);
event_state->cb_bindings.insert(cb_state);
}
cb_state->events.push_back(event);
if (!cb_state->waitedEvents.count(event)) {
cb_state->writeEventsBeforeWait.push_back(event);
}
cb_state->eventUpdates.emplace_back([=](VkQueue q) { return SetEventStageMask(q, commandBuffer, event, stageMask); });
}
VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdSetEvent(dev_data, pCB, stageMask);
PreCallRecordCmdSetEvent(dev_data, pCB, commandBuffer, event, stageMask);
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
}
static bool PreCallValidateCmdResetEvent(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags stageMask) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdResetEvent-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_RESETEVENT, "vkCmdResetEvent()");
skip |= InsideRenderPass(dev_data, cb_state, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-renderpass");
skip |= ValidateStageMaskGsTsEnables(dev_data, stageMask, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-stageMask-01154",
"VUID-vkCmdResetEvent-stageMask-01155");
return skip;
}
static void PreCallRecordCmdResetEvent(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
VkEvent event) {
auto event_state = GetEventNode(dev_data, event);
if (event_state) {
AddCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(event), kVulkanObjectTypeEvent}, cb_state);
event_state->cb_bindings.insert(cb_state);
}
cb_state->events.push_back(event);
if (!cb_state->waitedEvents.count(event)) {
cb_state->writeEventsBeforeWait.push_back(event);
}
// TODO : Add check for "VUID-vkResetEvent-event-01148"
cb_state->eventUpdates.emplace_back(
[=](VkQueue q) { return SetEventStageMask(q, commandBuffer, event, VkPipelineStageFlags(0)); });
}
VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdResetEvent(dev_data, pCB, stageMask);
PreCallRecordCmdResetEvent(dev_data, pCB, commandBuffer, event);
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
}
// Return input pipeline stage flags, expanded for individual bits if VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT is set
static VkPipelineStageFlags ExpandPipelineStageFlags(VkPipelineStageFlags inflags) {
if (~inflags & VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) return inflags;
return (inflags & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) |
(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
}
// Verify image barrier image state and that the image is consistent with FB image
static bool ValidateImageBarrierImage(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
VkFramebuffer framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc,
uint64_t rp_handle, uint32_t img_index, const VkImageMemoryBarrier &img_barrier) {
bool skip = false;
const auto &fb_state = GetFramebufferState(device_data, framebuffer);
assert(fb_state);
const auto img_bar_image = img_barrier.image;
bool image_match = false;
bool sub_image_found = false; // Do we find a corresponding subpass description
VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED;
uint32_t attach_index = 0;
uint32_t index_count = 0;
// Verify that a framebuffer image matches barrier image
for (const auto &fb_attach : fb_state->attachments) {
if (img_bar_image == fb_attach.image) {
image_match = true;
attach_index = index_count;
break;
}
index_count++;
}
if (image_match) { // Make sure subpass is referring to matching attachment
if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) {
sub_image_layout = sub_desc.pDepthStencilAttachment->layout;
sub_image_found = true;
} else {
for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) {
if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pColorAttachments[j].layout;
sub_image_found = true;
break;
} else if (sub_desc.pResolveAttachments && sub_desc.pResolveAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pResolveAttachments[j].layout;
sub_image_found = true;
break;
}
}
}
if (!sub_image_found) {
skip |= log_msg(
device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
"VUID-vkCmdPipelineBarrier-image-01179",
"%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
") is not referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64 ").",
funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle);
}
} else { // !image_match
auto const fb_handle = HandleToUint64(fb_state->framebuffer);
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
fb_handle, "VUID-vkCmdPipelineBarrier-image-01179",
"%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
") does not match an image from the current framebuffer (0x%" PRIx64 ").",
funcName, img_index, HandleToUint64(img_bar_image), fb_handle);
}
if (img_barrier.oldLayout != img_barrier.newLayout) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-oldLayout-01181",
"%s: As the Image Barrier for image 0x%" PRIx64
" is being executed within a render pass instance, oldLayout must equal newLayout yet they are %s and %s.",
funcName, HandleToUint64(img_barrier.image), string_VkImageLayout(img_barrier.oldLayout),
string_VkImageLayout(img_barrier.newLayout));
} else {
if (sub_image_found && sub_image_layout != img_barrier.oldLayout) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-oldLayout-01180",
"%s: Barrier pImageMemoryBarriers[%d].image (0x%" PRIx64
") is referenced by the VkSubpassDescription for active subpass (%d) of current renderPass (0x%" PRIx64
") as having layout %s, but image barrier has layout %s.",
funcName, img_index, HandleToUint64(img_bar_image), active_subpass, rp_handle,
string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(sub_image_layout));
}
}
return skip;
}
// Validate image barriers within a renderPass
static bool ValidateRenderPassImageBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
uint32_t active_subpass, const safe_VkSubpassDescription &sub_desc, uint64_t rp_handle,
const VkSubpassDependency *dependencies, const std::vector<uint32_t> &self_dependencies,
uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) {
bool skip = false;
for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
const auto &img_barrier = image_barriers[i];
const auto &img_src_access_mask = img_barrier.srcAccessMask;
const auto &img_dst_access_mask = img_barrier.dstAccessMask;
bool access_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
access_mask_match = (img_src_access_mask == (sub_dep.srcAccessMask & img_src_access_mask)) &&
(img_dst_access_mask == (sub_dep.dstAccessMask & img_dst_access_mask));
if (access_mask_match) break;
}
if (!access_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: Barrier pImageMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
"srcAccessMask of subpass %d of renderPass 0x%" PRIx64
". Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, img_src_access_mask, active_subpass, rp_handle, self_dep_ss.str().c_str());
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: Barrier pImageMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
"dstAccessMask of subpass %d of renderPass 0x%" PRIx64
". Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, img_dst_access_mask, active_subpass, rp_handle, self_dep_ss.str().c_str());
}
if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182",
"%s: Barrier pImageMemoryBarriers[%d].srcQueueFamilyIndex is %d and "
"pImageMemoryBarriers[%d].dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.",
funcName, i, img_barrier.srcQueueFamilyIndex, i, img_barrier.dstQueueFamilyIndex);
}
// Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known
if (VK_NULL_HANDLE == cb_state->activeFramebuffer) {
assert(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level);
// Secondary CB case w/o FB specified delay validation
cb_state->cmd_execute_commands_functions.emplace_back([=](GLOBAL_CB_NODE *primary_cb, VkFramebuffer fb) {
return ValidateImageBarrierImage(device_data, funcName, cb_state, fb, active_subpass, sub_desc, rp_handle, i,
img_barrier);
});
} else {
skip |= ValidateImageBarrierImage(device_data, funcName, cb_state, cb_state->activeFramebuffer, active_subpass,
sub_desc, rp_handle, i, img_barrier);
}
}
return skip;
}
// Validate VUs for Pipeline Barriers that are within a renderPass
// Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
static bool ValidateRenderPassPipelineBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
const VkBufferMemoryBarrier *buffer_mem_barriers, uint32_t image_mem_barrier_count,
const VkImageMemoryBarrier *image_barriers) {
bool skip = false;
const auto rp_state = cb_state->activeRenderPass;
const auto active_subpass = cb_state->activeSubpass;
auto rp_handle = HandleToUint64(rp_state->renderPass);
const auto &self_dependencies = rp_state->self_dependencies[active_subpass];
const auto &dependencies = rp_state->createInfo.pDependencies;
if (self_dependencies.size() == 0) {
skip |=
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
"VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: Barriers cannot be set during subpass %d of renderPass 0x%" PRIx64 " with no self-dependency specified.",
funcName, active_subpass, rp_handle);
} else {
// Grab ref to current subpassDescription up-front for use below
const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
// Look for matching mask in any self-dependency
bool stage_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
const auto &sub_src_stage_mask = ExpandPipelineStageFlags(sub_dep.srcStageMask);
const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(sub_dep.dstStageMask);
stage_mask_match = ((sub_src_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(src_stage_mask == (sub_src_stage_mask & src_stage_mask))) &&
((sub_dst_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(dst_stage_mask == (sub_dst_stage_mask & dst_stage_mask)));
if (stage_mask_match) break;
}
if (!stage_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask of any "
"self-dependency of subpass %d of renderPass 0x%" PRIx64
" for which dstStageMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, src_stage_mask, active_subpass, rp_handle, self_dep_ss.str().c_str());
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask of any "
"self-dependency of subpass %d of renderPass 0x%" PRIx64
" for which srcStageMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, dst_stage_mask, active_subpass, rp_handle, self_dep_ss.str().c_str());
}
if (0 != buffer_mem_barrier_count) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178",
"%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of renderPass 0x%" PRIx64 ".", funcName,
buffer_mem_barrier_count, active_subpass, rp_handle);
}
for (uint32_t i = 0; i < mem_barrier_count; ++i) {
const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask;
const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask;
bool access_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
access_mask_match = (mb_src_access_mask == (sub_dep.srcAccessMask & mb_src_access_mask)) &&
(mb_dst_access_mask == (sub_dep.dstAccessMask & mb_dst_access_mask));
if (access_mask_match) break;
}
if (!access_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(
device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
"VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency srcAccessMask "
"for any self-dependency of subpass %d of renderPass 0x%" PRIx64
" for which dstAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, mb_src_access_mask, active_subpass, rp_handle, self_dep_ss.str().c_str());
skip |= log_msg(
device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, rp_handle,
"VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency dstAccessMask "
"for any self-dependency of subpass %d of renderPass 0x%" PRIx64
" for which srcAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, mb_dst_access_mask, active_subpass, rp_handle, self_dep_ss.str().c_str());
}
}
skip |= ValidateRenderPassImageBarriers(device_data, funcName, cb_state, active_subpass, sub_desc, rp_handle, dependencies,
self_dependencies, image_mem_barrier_count, image_barriers);
bool flag_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
flag_match = sub_dep.dependencyFlags == dependency_flags;
if (flag_match) break;
}
if (!flag_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02024",
"%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value for any "
"self-dependency of subpass %d of renderPass 0x%" PRIx64
". Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, dependency_flags, cb_state->activeSubpass, rp_handle, self_dep_ss.str().c_str());
}
}
return skip;
}
// Array to mask individual accessMask to corresponding stageMask
// accessMask active bit position (0-31) maps to index
const static VkPipelineStageFlags AccessMaskToPipeStage[20] = {
// VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
// VK_ACCESS_INDEX_READ_BIT = 1
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
// VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 2
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
// VK_ACCESS_UNIFORM_READ_BIT = 3
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
// VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 4
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
// VK_ACCESS_SHADER_READ_BIT = 5
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
// VK_ACCESS_SHADER_WRITE_BIT = 6
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
// VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 7
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 8
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 9
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
// VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 10
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
// VK_ACCESS_TRANSFER_READ_BIT = 11
VK_PIPELINE_STAGE_TRANSFER_BIT,
// VK_ACCESS_TRANSFER_WRITE_BIT = 12
VK_PIPELINE_STAGE_TRANSFER_BIT,
// VK_ACCESS_HOST_READ_BIT = 13
VK_PIPELINE_STAGE_HOST_BIT,
// VK_ACCESS_HOST_WRITE_BIT = 14
VK_PIPELINE_STAGE_HOST_BIT,
// VK_ACCESS_MEMORY_READ_BIT = 15
VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
// VK_ACCESS_MEMORY_WRITE_BIT = 16
VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
// VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 17
VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
// VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 18
VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
};
// Verify that all bits of access_mask are supported by the src_stage_mask
static bool ValidateAccessMaskPipelineStage(VkAccessFlags access_mask, VkPipelineStageFlags stage_mask) {
// Early out if all commands set, or access_mask NULL
if ((stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (0 == access_mask)) return true;
stage_mask = ExpandPipelineStageFlags(stage_mask);
int index = 0;
// for each of the set bits in access_mask, make sure that supporting stage mask bit(s) are set
while (access_mask) {
index = (u_ffs(access_mask) - 1);
assert(index >= 0);
// Must have "!= 0" compare to prevent warning from MSVC
if ((AccessMaskToPipeStage[index] & stage_mask) == 0) return false; // early out
access_mask &= ~(1 << index); // Mask off bit that's been checked
}
return true;
}
namespace barrier_queue_families {
enum VuIndex {
kSrcOrDstMustBeIgnore,
kSpecialOrIgnoreOnly,
kSrcIgnoreRequiresDstIgnore,
kDstValidOrSpecialIfNotIgnore,
kSrcValidOrSpecialIfNotIgnore,
kSrcAndDestMustBeIgnore,
kBothIgnoreOrBothValid,
kSubmitQueueMustMatchSrcOrDst
};
static const char *vu_summary[] = {"Source or destination queue family must be ignored.",
"Source or destination queue family must be special or ignored.",
"Destination queue family must be ignored if source queue family is.",
"Destination queue family must be valid, ignored, or special.",
"Source queue family must be valid, ignored, or special.",
"Source and destination queue family must both be ignored.",
"Source and destination queue family must both be ignore or both valid.",
"Source or destination queue family must match submit queue family, if not ignored."};
static const std::string image_error_codes[] = {
"VUID-VkImageMemoryBarrier-image-01381", // kSrcOrDstMustBeIgnore
"VUID-VkImageMemoryBarrier-image-01766", // kSpecialOrIgnoreOnly
"VUID-VkImageMemoryBarrier-image-01201", // kSrcIgnoreRequiresDstIgnore
"VUID-VkImageMemoryBarrier-image-01768", // kDstValidOrSpecialIfNotIgnore
"VUID-VkImageMemoryBarrier-image-01767", // kSrcValidOrSpecialIfNotIgnore
"VUID-VkImageMemoryBarrier-image-01199", // kSrcAndDestMustBeIgnore
"VUID-VkImageMemoryBarrier-image-01200", // kBothIgnoreOrBothValid
"VUID-VkImageMemoryBarrier-image-01205", // kSubmitQueueMustMatchSrcOrDst
};
static const std::string buffer_error_codes[] = {
"VUID-VkBufferMemoryBarrier-buffer-01191", // kSrcOrDstMustBeIgnore
"VUID-VkBufferMemoryBarrier-buffer-01763", // kSpecialOrIgnoreOnly
"VUID-VkBufferMemoryBarrier-buffer-01193", // kSrcIgnoreRequiresDstIgnore
"VUID-VkBufferMemoryBarrier-buffer-01765", // kDstValidOrSpecialIfNotIgnore
"VUID-VkBufferMemoryBarrier-buffer-01764", // kSrcValidOrSpecialIfNotIgnore
"VUID-VkBufferMemoryBarrier-buffer-01190", // kSrcAndDestMustBeIgnore
"VUID-VkBufferMemoryBarrier-buffer-01192", // kBothIgnoreOrBothValid
"VUID-VkBufferMemoryBarrier-buffer-01196", // kSubmitQueueMustMatchSrcOrDst
};
class ValidatorState {
public:
ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
const uint64_t barrier_handle64, const VkSharingMode sharing_mode, const VulkanObjectType object_type,
const std::string *val_codes)
: report_data_(device_data->report_data),
func_name_(func_name),
cb_handle64_(HandleToUint64(cb_state->commandBuffer)),
barrier_handle64_(barrier_handle64),
sharing_mode_(sharing_mode),
object_type_(object_type),
val_codes_(val_codes),
limit_(static_cast<uint32_t>(device_data->phys_dev_properties.queue_family_properties.size())),
mem_ext_(device_data->extensions.vk_khr_external_memory) {}
// Create a validator state from an image state... reducing the image specific to the generic version.
ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state)
: ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->image), state->createInfo.sharingMode,
kVulkanObjectTypeImage, image_error_codes) {}
// Create a validator state from an buffer state... reducing the buffer specific to the generic version.
ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state)
: ValidatorState(device_data, func_name, cb_state, HandleToUint64(barrier->buffer), state->createInfo.sharingMode,
kVulkanObjectTypeImage, buffer_error_codes) {}
// Log the messages using boilerplate from object state, and Vu specific information from the template arg
// One and two family versions, in the single family version, Vu holds the name of the passed parameter
bool LogMsg(VuIndex vu_index, uint32_t family, const char *param_name) const {
const std::string val_code = val_codes_[vu_index];
const char *annotation = GetFamilyAnnotation(family);
return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
val_code, "%s: Barrier using %s 0x%" PRIx64 " created with sharingMode %s, has %s %u%s. %s", func_name_,
GetTypeString(), barrier_handle64_, GetModeString(), param_name, family, annotation, vu_summary[vu_index]);
}
bool LogMsg(VuIndex vu_index, uint32_t src_family, uint32_t dst_family) const {
const std::string val_code = val_codes_[vu_index];
const char *src_annotation = GetFamilyAnnotation(src_family);
const char *dst_annotation = GetFamilyAnnotation(dst_family);
return log_msg(report_data_, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, cb_handle64_,
val_code,
"%s: Barrier using %s 0x%" PRIx64
" created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
func_name_, GetTypeString(), barrier_handle64_, GetModeString(), src_family, src_annotation, dst_family,
dst_annotation, vu_summary[vu_index]);
}
// This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed
// data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for
// application input.
static bool ValidateAtQueueSubmit(const VkQueue queue, const layer_data *device_data, uint32_t src_family, uint32_t dst_family,
const ValidatorState &val) {
auto queue_data_it = device_data->queueMap.find(queue);
if (queue_data_it == device_data->queueMap.end()) return false;
uint32_t queue_family = queue_data_it->second.queueFamilyIndex;
if ((src_family != queue_family) && (dst_family != queue_family)) {
const std::string val_code = val.val_codes_[kSubmitQueueMustMatchSrcOrDst];
const char *src_annotation = val.GetFamilyAnnotation(src_family);
const char *dst_annotation = val.GetFamilyAnnotation(dst_family);
return log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
HandleToUint64(queue), val_code,
"%s: Barrier submitted to queue with family index %u, using %s 0x%" PRIx64
" created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
"vkQueueSubmit", queue_family, val.GetTypeString(), val.barrier_handle64_, val.GetModeString(),
src_family, src_annotation, dst_family, dst_annotation, vu_summary[kSubmitQueueMustMatchSrcOrDst]);
}
return false;
}
// Logical helpers for semantic clarity
inline bool KhrExternalMem() const { return mem_ext_; }
inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); }
inline bool IsValidOrSpecial(uint32_t queue_family) const {
return IsValid(queue_family) || (mem_ext_ && IsSpecial(queue_family));
}
inline bool IsIgnored(uint32_t queue_family) const { return queue_family == VK_QUEUE_FAMILY_IGNORED; }
// Helpers for LogMsg (and log_msg)
const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); }
// Descriptive text for the various types of queue family index
const char *GetFamilyAnnotation(uint32_t family) const {
const char *external = " (VK_QUEUE_FAMILY_EXTERNAL_KHR)";
const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)";
const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)";
const char *valid = " (VALID)";
const char *invalid = " (INVALID)";
switch (family) {
case VK_QUEUE_FAMILY_EXTERNAL_KHR:
return external;
case VK_QUEUE_FAMILY_FOREIGN_EXT:
return foreign;
case VK_QUEUE_FAMILY_IGNORED:
return ignored;
default:
if (IsValid(family)) {
return valid;
}
return invalid;
};
}
const char *GetTypeString() const { return object_string[object_type_]; }
VkSharingMode GetSharingMode() const { return sharing_mode_; }
protected:
const debug_report_data *const report_data_;
const char *const func_name_;
const uint64_t cb_handle64_;
const uint64_t barrier_handle64_;
const VkSharingMode sharing_mode_;
const VulkanObjectType object_type_;
const std::string *val_codes_;
const uint32_t limit_;
const bool mem_ext_;
};
bool Validate(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state, const ValidatorState &val,
const uint32_t src_queue_family, const uint32_t dst_queue_family) {
bool skip = false;
const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT;
const bool src_ignored = val.IsIgnored(src_queue_family);
const bool dst_ignored = val.IsIgnored(dst_queue_family);
if (val.KhrExternalMem()) {
if (mode_concurrent) {
if (!(src_ignored || dst_ignored)) {
skip |= val.LogMsg(kSrcOrDstMustBeIgnore, src_queue_family, dst_queue_family);
}
if ((src_ignored && !(dst_ignored || IsSpecial(dst_queue_family))) ||
(dst_ignored && !(src_ignored || IsSpecial(src_queue_family)))) {
skip |= val.LogMsg(kSpecialOrIgnoreOnly, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if (src_ignored && !dst_ignored) {
skip |= val.LogMsg(kSrcIgnoreRequiresDstIgnore, src_queue_family, dst_queue_family);
}
if (!dst_ignored && !val.IsValidOrSpecial(dst_queue_family)) {
skip |= val.LogMsg(kDstValidOrSpecialIfNotIgnore, dst_queue_family, "dstQueueFamilyIndex");
}
if (!src_ignored && !val.IsValidOrSpecial(src_queue_family)) {
skip |= val.LogMsg(kSrcValidOrSpecialIfNotIgnore, src_queue_family, "srcQueueFamilyIndex");
}
}
} else {
// No memory extension
if (mode_concurrent) {
if (!src_ignored || !dst_ignored) {
skip |= val.LogMsg(kSrcAndDestMustBeIgnore, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if (!((src_ignored && dst_ignored) || (val.IsValid(src_queue_family) && val.IsValid(dst_queue_family)))) {
skip |= val.LogMsg(kBothIgnoreOrBothValid, src_queue_family, dst_queue_family);
}
}
}
if (!mode_concurrent && !src_ignored && !dst_ignored) {
// Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria
// TODO create a better named list, or rename the submit time lists to something that matches the broader usage...
// Note: if we want to create a semantic that separates state lookup, validation, and state update this should go
// to a local queue of update_state_actions or something.
cb_state->eventUpdates.emplace_back([device_data, src_queue_family, dst_queue_family, val](VkQueue queue) {
return ValidatorState::ValidateAtQueueSubmit(queue, device_data, src_queue_family, dst_queue_family, val);
});
}
return skip;
}
} // namespace barrier_queue_families
// Type specific wrapper for image barriers
bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
const VkImageMemoryBarrier *barrier, const IMAGE_STATE *state_data) {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the image state
barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
}
// Type specific wrapper for buffer barriers
bool ValidateBarrierQueueFamilies(const layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
const VkBufferMemoryBarrier *barrier, const BUFFER_STATE *state_data) {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the buffer state
barrier_queue_families::ValidatorState val(device_data, func_name, cb_state, barrier, state_data);
const uint32_t src_queue_family = barrier->srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier->dstQueueFamilyIndex;
return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
}
static bool ValidateBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) {
bool skip = false;
for (uint32_t i = 0; i < memBarrierCount; ++i) {
const auto &mem_barrier = pMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(mem_barrier.srcAccessMask, src_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184",
"%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier.srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(mem_barrier.dstAccessMask, dst_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185",
"%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier.dstAccessMask, dst_stage_mask);
}
}
for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
auto mem_barrier = &pImageMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184",
"%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier->srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185",
"%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier->dstAccessMask, dst_stage_mask);
}
auto image_data = GetImageState(device_data, mem_barrier->image);
skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, image_data);
if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkImageMemoryBarrier-newLayout-01198",
"%s: Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.", funcName);
}
if (image_data) {
// There is no VUID for this, but there is blanket text:
// "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
// recording commands in a command buffer."
// TODO: Update this when VUID is defined
skip |= ValidateMemoryIsBoundToImage(device_data, image_data, funcName, kVUIDUndefined);
auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
skip |= ValidateImageAspectMask(device_data, image_data->image, image_data->createInfo.format, aspect_mask, funcName);
std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
skip |= ValidateImageBarrierSubresourceRange(device_data, image_data, mem_barrier->subresourceRange, funcName,
param_name.c_str());
}
}
for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
auto mem_barrier = &pBufferMemBarriers[i];
if (!mem_barrier) continue;
if (!ValidateAccessMaskPipelineStage(mem_barrier->srcAccessMask, src_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184",
"%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier->srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(mem_barrier->dstAccessMask, dst_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185",
"%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier->dstAccessMask, dst_stage_mask);
}
// Validate buffer barrier queue family indices
auto buffer_state = GetBufferState(device_data, mem_barrier->buffer);
skip |= ValidateBarrierQueueFamilies(device_data, funcName, cb_state, mem_barrier, buffer_state);
if (buffer_state) {
// There is no VUID for this, but there is blanket text:
// "Non-sparse resources must be bound completely and contiguously to a single VkDeviceMemory object before
// recording commands in a command buffer"
// TODO: Update this when VUID is defined
skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, funcName, kVUIDUndefined);
auto buffer_size = buffer_state->createInfo.size;
if (mem_barrier->offset >= buffer_size) {
skip |= log_msg(
device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkBufferMemoryBarrier-offset-01187",
"%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
HandleToUint64(buffer_size));
} else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
skip |=
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-VkBufferMemoryBarrier-size-01189",
"%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
" whose sum is greater than total size 0x%" PRIx64 ".",
funcName, HandleToUint64(mem_barrier->buffer), HandleToUint64(mem_barrier->offset),
HandleToUint64(mem_barrier->size), HandleToUint64(buffer_size));
}
}
}
skip |= ValidateBarriersQFOTransferUniqueness(device_data, funcName, cb_state, bufferBarrierCount, pBufferMemBarriers,
imageMemBarrierCount, pImageMemBarriers);
return skip;
}
bool ValidateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
VkPipelineStageFlags sourceStageMask) {
bool skip = false;
VkPipelineStageFlags stageMask = 0;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
for (uint32_t i = 0; i < eventCount; ++i) {
auto event = pCB->events[firstEventIndex + i];
auto queue_data = dev_data->queueMap.find(queue);
if (queue_data == dev_data->queueMap.end()) return false;
auto event_data = queue_data->second.eventToStageMap.find(event);
if (event_data != queue_data->second.eventToStageMap.end()) {
stageMask |= event_data->second;
} else {
auto global_event_data = GetEventNode(dev_data, event);
if (!global_event_data) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
HandleToUint64(event), kVUID_Core_DrawState_InvalidEvent,
"Event 0x%" PRIx64 " cannot be waited on if it has never been set.", HandleToUint64(event));
} else {
stageMask |= global_event_data->stageMask;
}
}
}
// TODO: Need to validate that host_bit is only set if set event is called
// but set event can be called at any time.
if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-vkCmdWaitEvents-srcStageMask-parameter",
"Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%X which must be the bitwise OR of "
"the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with "
"vkSetEvent but instead is 0x%X.",
sourceStageMask, stageMask);
}
return skip;
}
// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
{VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
{VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
std::string error_code) {
bool skip = false;
// Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
for (const auto &item : stage_flag_bit_array) {
if (stage_mask & item) {
if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(command_buffer), error_code,
"%s(): %s flag %s is not compatible with the queue family properties of this command buffer.",
function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)));
}
}
}
return skip;
}
// Check if all barriers are of a given operation type.
template <typename Barrier, typename OpCheck>
static bool AllTransferOp(const COMMAND_POOL_NODE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) {
if (!pool) return false;
for (uint32_t b = 0; b < count; b++) {
if (!op_check(pool, barriers + b)) return false;
}
return true;
}
enum BarrierOperationsType {
kAllAcquire, // All Barrier operations are "ownership acquire" operations
kAllRelease, // All Barrier operations are "ownership release" operations
kGeneral, // Either no ownership operations or a mix of ownership operation types and/or non-ownership operations
};
// Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation
BarrierOperationsType ComputeBarrierOperationsType(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t buffer_barrier_count,
const VkBufferMemoryBarrier *buffer_barriers, uint32_t image_barrier_count,
const VkImageMemoryBarrier *image_barriers) {
auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
BarrierOperationsType op_type = kGeneral;
// Look at the barrier details only if they exist
// Note: AllTransferOp returns true for count == 0
if ((buffer_barrier_count + image_barrier_count) != 0) {
if (AllTransferOp(pool, IsReleaseOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
AllTransferOp(pool, IsReleaseOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
op_type = kAllRelease;
} else if (AllTransferOp(pool, IsAcquireOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
AllTransferOp(pool, IsAcquireOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
op_type = kAllAcquire;
}
}
return op_type;
}
bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE const *cb_state,
VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
BarrierOperationsType barrier_op_type, const char *function,
std::string error_code) {
bool skip = false;
uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
auto physical_device_state = GetPhysicalDeviceState(instance_data, dev_data->physical_device);
// Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
// specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
// that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
if (queue_family_index < physical_device_state->queue_family_properties.size()) {
VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
// Only check the source stage mask if any barriers aren't "acquire ownership"
if ((barrier_op_type != kAllAcquire) && (source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
function, "srcStageMask", error_code);
}
// Only check the dest stage mask if any barriers aren't "release ownership"
if ((barrier_op_type != kAllRelease) && (dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
function, "dstStageMask", error_code);
}
}
return skip;
}
static bool PreCallValidateCmdEventCount(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags sourceStageMask,
VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount,
const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) {
auto barrier_op_type = ComputeBarrierOperationsType(dev_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
bool skip = ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, barrier_op_type,
"vkCmdWaitEvents", "VUID-vkCmdWaitEvents-srcStageMask-01164");
skip |= ValidateStageMaskGsTsEnables(dev_data, sourceStageMask, "vkCmdWaitEvents()", "VUID-vkCmdWaitEvents-srcStageMask-01159",
"VUID-vkCmdWaitEvents-srcStageMask-01161");
skip |= ValidateStageMaskGsTsEnables(dev_data, dstStageMask, "vkCmdWaitEvents()", "VUID-vkCmdWaitEvents-dstStageMask-01160",
"VUID-vkCmdWaitEvents-dstStageMask-01162");
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdWaitEvents-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
skip |= ValidateBarriersToImages(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
skip |= ValidateBarriers(dev_data, "vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount,
pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
return skip;
}
static void PreCallRecordCmdWaitEvents(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) {
auto first_event_index = cb_state->events.size();
for (uint32_t i = 0; i < eventCount; ++i) {
auto event_state = GetEventNode(dev_data, pEvents[i]);
if (event_state) {
AddCommandBufferBinding(&event_state->cb_bindings, {HandleToUint64(pEvents[i]), kVulkanObjectTypeEvent}, cb_state);
event_state->cb_bindings.insert(cb_state);
}
cb_state->waitedEvents.insert(pEvents[i]);
cb_state->events.push_back(pEvents[i]);
}
cb_state->eventUpdates.emplace_back(
[=](VkQueue q) { return ValidateEventStageMask(q, cb_state, eventCount, first_event_index, sourceStageMask); });
TransitionImageLayouts(dev_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
}
static void PostCallRecordCmdWaitEvents(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) {
RecordBarriersQFOTransfers(dev_data, "vkCmdWaitEvents()", cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
}
VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdEventCount(dev_data, cb_state, sourceStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
if (!skip) {
PreCallRecordCmdWaitEvents(dev_data, cb_state, eventCount, pEvents, sourceStageMask, imageMemoryBarrierCount,
pImageMemoryBarriers);
}
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
}
lock.lock();
PostCallRecordCmdWaitEvents(dev_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
}
static bool PreCallValidateCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
bool skip = false;
auto barrier_op_type = ComputeBarrierOperationsType(device_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
skip |= ValidateStageMasksAgainstQueueCapabilities(device_data, cb_state, srcStageMask, dstStageMask, barrier_op_type,
"vkCmdPipelineBarrier", "VUID-vkCmdPipelineBarrier-srcStageMask-01183");
skip |= ValidateCmdQueueFlags(device_data, cb_state, "vkCmdPipelineBarrier()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdPipelineBarrier-commandBuffer-cmdpool");
skip |= ValidateCmd(device_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
skip |= ValidateStageMaskGsTsEnables(device_data, srcStageMask, "vkCmdPipelineBarrier()",
"VUID-vkCmdPipelineBarrier-srcStageMask-01168",
"VUID-vkCmdPipelineBarrier-srcStageMask-01170");
skip |= ValidateStageMaskGsTsEnables(device_data, dstStageMask, "vkCmdPipelineBarrier()",
"VUID-vkCmdPipelineBarrier-dstStageMask-01169",
"VUID-vkCmdPipelineBarrier-dstStageMask-01171");
if (cb_state->activeRenderPass) {
skip |= ValidateRenderPassPipelineBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask,
dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
if (skip) return true; // Early return to avoid redundant errors from below calls
}
skip |=
ValidateBarriersToImages(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()");
skip |= ValidateBarriers(device_data, "vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount,
pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
return skip;
}
static void PreCallRecordCmdPipelineBarrier(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
RecordBarriersQFOTransfers(device_data, "vkCmdPipelineBarrier()", cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
TransitionImageLayouts(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
}
VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
bool skip = false;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdPipelineBarrier(device_data, cb_state, srcStageMask, dstStageMask, dependencyFlags,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
if (!skip) {
PreCallRecordCmdPipelineBarrier(device_data, cb_state, commandBuffer, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
}
} else {
assert(0);
}
lock.unlock();
if (!skip) {
device_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
}
}
static bool SetQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
pCB->queryToStateMap[object] = value;
}
auto queue_data = dev_data->queueMap.find(queue);
if (queue_data != dev_data->queueMap.end()) {
queue_data->second.queryToStateMap[object] = value;
}
return false;
}
static bool PreCallValidateCmdBeginQuery(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
bool skip = ValidateCmdQueueFlags(dev_data, pCB, "vkCmdBeginQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBeginQuery-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
return skip;
}
static void PostCallRecordCmdBeginQuery(layer_data *dev_data, VkQueryPool queryPool, uint32_t slot, GLOBAL_CB_NODE *pCB) {
QueryObject query = {queryPool, slot};
pCB->activeQueries.insert(query);
pCB->startedQueries.insert(query);
AddCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
{HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, pCB);
}
VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
PreCallValidateCmdBeginQuery(dev_data, pCB);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
lock.lock();
if (pCB) {
PostCallRecordCmdBeginQuery(dev_data, queryPool, slot, pCB);
}
}
static bool PreCallValidateCmdEndQuery(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const QueryObject &query,
VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
bool skip = false;
if (!cb_state->activeQueries.count(query)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdEndQuery-None-01923",
"Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d.", HandleToUint64(queryPool), slot);
}
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdEndQuery()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdEndQuery-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_ENDQUERY, "VkCmdEndQuery()");
return skip;
}
static void PostCallRecordCmdEndQuery(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const QueryObject &query,
VkCommandBuffer commandBuffer, VkQueryPool queryPool) {
cb_state->activeQueries.erase(query);
cb_state->queryUpdates.emplace_back([=](VkQueue q) { return SetQueryState(q, commandBuffer, query, true); });
AddCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
{HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
}
VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
QueryObject query = {queryPool, slot};
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdEndQuery(dev_data, cb_state, query, commandBuffer, queryPool, slot);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
lock.lock();
if (cb_state) {
PostCallRecordCmdEndQuery(dev_data, cb_state, query, commandBuffer, queryPool);
}
}
static bool PreCallValidateCmdResetQueryPool(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
bool skip = InsideRenderPass(dev_data, cb_state, "vkCmdResetQueryPool()", "VUID-vkCmdResetQueryPool-renderpass");
skip |= ValidateCmd(dev_data, cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdResetQueryPool-commandBuffer-cmdpool");
return skip;
}
static void PostCallRecordCmdResetQueryPool(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer,
VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
for (uint32_t i = 0; i < queryCount; i++) {
QueryObject query = {queryPool, firstQuery + i};
cb_state->waitedEventsBeforeQueryReset[query] = cb_state->waitedEvents;
cb_state->queryUpdates.emplace_back([=](VkQueue q) { return SetQueryState(q, commandBuffer, query, false); });
}
AddCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
{HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
}
VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
bool skip = PreCallValidateCmdResetQueryPool(dev_data, cb_state);
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
lock.lock();
PostCallRecordCmdResetQueryPool(dev_data, cb_state, commandBuffer, queryPool, firstQuery, queryCount);
}
static bool IsQueryInvalid(layer_data *dev_data, QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) {
QueryObject query = {queryPool, queryIndex};
auto query_data = queue_data->queryToStateMap.find(query);
if (query_data != queue_data->queryToStateMap.end()) {
if (!query_data->second) return true;
} else {
auto it = dev_data->queryToStateMap.find(query);
if (it == dev_data->queryToStateMap.end() || !it->second) return true;
}
return false;
}
static bool ValidateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
auto queue_data = GetQueueState(dev_data, queue);
if (!queue_data) return false;
for (uint32_t i = 0; i < queryCount; i++) {
if (IsQueryInvalid(dev_data, queue_data, queryPool, firstQuery + i)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidQuery,
"Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
HandleToUint64(queryPool), firstQuery + i);
}
}
return skip;
}
static bool PreCallValidateCmdCopyQueryPoolResults(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, BUFFER_STATE *dst_buff_state) {
bool skip = ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()",
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00826");
// Validate that DST buffer has correct usage flags set
skip |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00825", "vkCmdCopyQueryPoolResults()",
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdCopyQueryPoolResults()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyQueryPoolResults-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
skip |= InsideRenderPass(dev_data, cb_state, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-renderpass");
return skip;
}
static void PostCallRecordCmdCopyQueryPoolResults(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, BUFFER_STATE *dst_buff_state,
VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
AddCommandBufferBindingBuffer(dev_data, cb_state, dst_buff_state);
cb_state->queryUpdates.emplace_back([=](VkQueue q) { return ValidateQuery(q, cb_state, queryPool, firstQuery, queryCount); });
AddCommandBufferBinding(&GetQueryPoolNode(dev_data, queryPool)->cb_bindings,
{HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
}
VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto cb_node = GetCBNode(dev_data, commandBuffer);
auto dst_buff_state = GetBufferState(dev_data, dstBuffer);
if (cb_node && dst_buff_state) {
skip |= PreCallValidateCmdCopyQueryPoolResults(dev_data, cb_node, dst_buff_state);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride,
flags);
lock.lock();
if (cb_node && dst_buff_state) {
PostCallRecordCmdCopyQueryPoolResults(dev_data, cb_node, dst_buff_state, queryPool, firstQuery, queryCount);
}
}
static bool PreCallValidateCmdPushConstants(layer_data *dev_data, VkCommandBuffer commandBuffer, VkPipelineLayout layout,
VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size) {
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdPushConstants-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
}
skip |= ValidatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
if (0 == stageFlags) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-stageFlags-requiredbitmask",
"vkCmdPushConstants() call has no stageFlags set.");
}
// Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command
// stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range.
if (!skip) {
const auto &ranges = *GetPipelineLayout(dev_data, layout)->push_constant_ranges;
VkShaderStageFlags found_stages = 0;
for (const auto &range : ranges) {
if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) {
VkShaderStageFlags matching_stages = range.stageFlags & stageFlags;
if (matching_stages != range.stageFlags) {
// "VUID-vkCmdPushConstants-offset-01796" VUID-vkCmdPushConstants-offset-01796
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
"VUID-vkCmdPushConstants-offset-01796",
"vkCmdPushConstants(): stageFlags (0x%" PRIx32 ", offset (%" PRIu32 "), and size (%" PRIu32
"), "
"must contain all stages in overlapping VkPushConstantRange stageFlags (0x%" PRIx32
"), offset (%" PRIu32 "), and size (%" PRIu32 ") in pipeline layout 0x%" PRIx64 ".",
(uint32_t)stageFlags, offset, size, (uint32_t)range.stageFlags, range.offset, range.size,
HandleToUint64(layout));
}
// Accumulate all stages we've found
found_stages = matching_stages | found_stages;
}
}
if (found_stages != stageFlags) {
// "VUID-vkCmdPushConstants-offset-01795" VUID-vkCmdPushConstants-offset-01795
uint32_t missing_stages = ~found_stages & stageFlags;
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdPushConstants-offset-01795",
"vkCmdPushConstants(): stageFlags = 0x%" PRIx32 ", VkPushConstantRange in pipeline layout 0x%" PRIx64
" overlapping offset = %d and size = %d, do not contain stageFlags 0x%" PRIx32 ".",
(uint32_t)stageFlags, HandleToUint64(layout), offset, size, missing_stages);
}
}
return skip;
}
VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
uint32_t offset, uint32_t size, const void *pValues) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
skip |= PreCallValidateCmdPushConstants(dev_data, commandBuffer, layout, stageFlags, offset, size);
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
}
static bool PreCallValidateCmdWriteTimestamp(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
bool skip = ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdWriteTimestamp()",
VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
"VUID-vkCmdWriteTimestamp-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
return skip;
}
static void PostCallRecordCmdWriteTimestamp(GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer, VkQueryPool queryPool,
uint32_t slot) {
QueryObject query = {queryPool, slot};
cb_state->queryUpdates.emplace_back([=](VkQueue q) { return SetQueryState(q, commandBuffer, query, true); });
}
VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool, uint32_t slot) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
if (cb_state) {
skip |= PreCallValidateCmdWriteTimestamp(dev_data, cb_state);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
lock.lock();
if (cb_state) PostCallRecordCmdWriteTimestamp(cb_state, commandBuffer, queryPool, slot);
}
static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag, std::string error_code) {
bool skip = false;
for (uint32_t attach = 0; attach < count; attach++) {
if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
// Attachment counts are verified elsewhere, but prevent an invalid access
if (attachments[attach].attachment < fbci->attachmentCount) {
const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
auto view_state = GetImageViewState(dev_data, *image_view);
if (view_state) {
const VkImageCreateInfo *ici = &GetImageState(dev_data, view_state->create_info.image)->createInfo;
if (ici != nullptr) {
if ((ici->usage & usage_flag) == 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, error_code,
"vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's "
"IMAGE_USAGE flags (%s).",
attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
}
}
}
}
}
}
return skip;
}
// Validate VkFramebufferCreateInfo which includes:
// 1. attachmentCount equals renderPass attachmentCount
// 2. corresponding framebuffer and renderpass attachments have matching formats
// 3. corresponding framebuffer and renderpass attachments have matching sample counts
// 4. fb attachments only have a single mip level
// 5. fb attachment dimensions are each at least as large as the fb
// 6. fb attachments use idenity swizzle
// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
// 8. fb dimensions are within physical device limits
static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
bool skip = false;
auto rp_state = GetRenderPassState(dev_data, pCreateInfo->renderPass);
if (rp_state) {
const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-attachmentCount-00876",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount "
"of %u of renderPass (0x%" PRIx64 ") being used to create Framebuffer.",
pCreateInfo->attachmentCount, rpci->attachmentCount, HandleToUint64(pCreateInfo->renderPass));
} else {
// attachmentCounts match, so make sure corresponding attachment details line up
const VkImageView *image_views = pCreateInfo->pAttachments;
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
auto view_state = GetImageViewState(dev_data, image_views[i]);
auto &ivci = view_state->create_info;
if (ivci.format != rpci->pAttachments[i].format) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-pAttachments-00880",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not "
"match the format of %s used by the corresponding attachment for renderPass (0x%" PRIx64 ").",
i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
HandleToUint64(pCreateInfo->renderPass));
}
const VkImageCreateInfo *ici = &GetImageState(dev_data, ivci.image)->createInfo;
if (ici->samples != rpci->pAttachments[i].samples) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pCreateInfo->renderPass), "VUID-VkFramebufferCreateInfo-pAttachments-00881",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match the %s "
"samples used by the corresponding attachment for renderPass (0x%" PRIx64 ").",
i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
HandleToUint64(pCreateInfo->renderPass));
}
// Verify that view only has a single mip level
if (ivci.subresourceRange.levelCount != 1) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, "VUID-VkFramebufferCreateInfo-pAttachments-00883",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but "
"only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.",
i, ivci.subresourceRange.levelCount);
}
const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
(mip_height < pCreateInfo->height)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, "VUID-VkFramebufferCreateInfo-pAttachments-00882",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions "
"smaller than the corresponding framebuffer dimensions. Here are the respective dimensions for "
"attachment #%u, framebuffer:\n"
"width: %u, %u\n"
"height: %u, %u\n"
"layerCount: %u, %u\n",
i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
}
if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, "VUID-VkFramebufferCreateInfo-pAttachments-00884",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All "
"framebuffer attachments must have been created with the identity swizzle. Here are the actual "
"swizzle values:\n"
"r swizzle = %s\n"
"g swizzle = %s\n"
"b swizzle = %s\n"
"a swizzle = %s\n",
i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
}
}
}
// Verify correct attachment usage flags
for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
// Verify input attachments:
skip |=
MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00879");
// Verify color attachments:
skip |=
MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00877");
// Verify depth/stencil attachments:
if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
skip |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00878");
}
}
}
// Verify FB dimensions are within physical device limits
if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-width-00886",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested "
"width: %u, device max: %u\n",
pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth);
}
if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-height-00888",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested "
"height: %u, device max: %u\n",
pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight);
}
if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-layers-00890",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested "
"layers: %u, device max: %u\n",
pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
}
// Verify FB dimensions are greater than zero
if (pCreateInfo->width <= 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-width-00885",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero.");
}
if (pCreateInfo->height <= 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-height-00887",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero.");
}
if (pCreateInfo->layers <= 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkFramebufferCreateInfo-layers-00889",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero.");
}
return skip;
}
// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
// Return true if an error is encountered and callback returns true to skip call down chain
// false indicates that call down chain should proceed
static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
// TODO : Verify that renderPass FB is created with is compatible with FB
bool skip = false;
skip |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
return skip;
}
// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
// Shadow create info and store in map
std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
new FRAMEBUFFER_STATE(fb, pCreateInfo, GetRenderPassStateSharedPtr(dev_data, pCreateInfo->renderPass)));
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
VkImageView view = pCreateInfo->pAttachments[i];
auto view_state = GetImageViewState(dev_data, view);
if (!view_state) {
continue;
}
MT_FB_ATTACHMENT_INFO fb_info;
fb_info.view_state = view_state;
fb_info.image = view_state->create_info.image;
fb_state->attachments.push_back(fb_info);
}
dev_data->frameBufferMap[fb] = std::move(fb_state);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
lock.unlock();
}
return result;
}
static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
std::unordered_set<uint32_t> &processed_nodes) {
// If we have already checked this node we have not found a dependency path so return false.
if (processed_nodes.count(index)) return false;
processed_nodes.insert(index);
const DAGNode &node = subpass_to_node[index];
// Look for a dependency path. If one exists return true else recurse on the previous nodes.
if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
for (auto elem : node.prev) {
if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
}
} else {
return true;
}
return false;
}
static bool CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node,
bool &skip) {
bool result = true;
// Loop through all subpasses that share the same attachment and make sure a dependency exists
for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
if (static_cast<uint32_t>(subpass) == dependent_subpasses[k]) continue;
const DAGNode &node = subpass_to_node[subpass];
// Check for a specified dependency between the two nodes. If one exists we are done.
auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
// If no dependency exits an implicit dependency still might. If not, throw an error.
std::unordered_set<uint32_t> processed_nodes;
if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
dependent_subpasses[k]);
result = false;
}
}
}
return result;
}
static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
const DAGNode &node = subpass_to_node[index];
// If this node writes to the attachment return true as next nodes need to preserve the attachment.
const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (attachment == subpass.pColorAttachments[j].attachment) return true;
}
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
if (attachment == subpass.pInputAttachments[j].attachment) return true;
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
}
bool result = false;
// Loop through previous nodes and see if any of them write to the attachment.
for (auto elem : node.prev) {
result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
}
// If the attachment was written to by a previous node than this node needs to preserve it.
if (result && depth > 0) {
bool has_preserved = false;
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
if (subpass.pPreserveAttachments[j] == attachment) {
has_preserved = true;
break;
}
}
if (!has_preserved) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
}
}
return result;
}
template <class T>
bool IsRangeOverlapping(T offset1, T size1, T offset2, T size2) {
return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
((offset1 > offset2) && (offset1 < (offset2 + size2)));
}
bool IsRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
return (IsRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
IsRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
}
static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
RENDER_PASS_STATE const *renderPass) {
bool skip = false;
auto const pFramebufferInfo = framebuffer->createInfo.ptr();
auto const pCreateInfo = renderPass->createInfo.ptr();
auto const &subpass_to_node = renderPass->subpassToNode;
std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
// Find overlapping attachments
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
VkImageView viewi = pFramebufferInfo->pAttachments[i];
VkImageView viewj = pFramebufferInfo->pAttachments[j];
if (viewi == viewj) {
overlapping_attachments[i].push_back(j);
overlapping_attachments[j].push_back(i);
continue;
}
auto view_state_i = GetImageViewState(dev_data, viewi);
auto view_state_j = GetImageViewState(dev_data, viewj);
if (!view_state_i || !view_state_j) {
continue;
}
auto view_ci_i = view_state_i->create_info;
auto view_ci_j = view_state_j->create_info;
if (view_ci_i.image == view_ci_j.image && IsRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
overlapping_attachments[i].push_back(j);
overlapping_attachments[j].push_back(i);
continue;
}
auto image_data_i = GetImageState(dev_data, view_ci_i.image);
auto image_data_j = GetImageState(dev_data, view_ci_j.image);
if (!image_data_i || !image_data_j) {
continue;
}
if (image_data_i->binding.mem == image_data_j->binding.mem &&
IsRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
image_data_j->binding.size)) {
overlapping_attachments[i].push_back(j);
overlapping_attachments[j].push_back(i);
}
}
}
for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
uint32_t attachment = i;
for (auto other_attachment : overlapping_attachments[i]) {
if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
HandleToUint64(framebuffer->framebuffer), "VUID-VkRenderPassCreateInfo-attachment-00833",
"Attachment %d aliases attachment %d but doesn't set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
attachment, other_attachment);
}
if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT,
HandleToUint64(framebuffer->framebuffer), "VUID-VkRenderPassCreateInfo-attachment-00833",
"Attachment %d aliases attachment %d but doesn't set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
other_attachment, attachment);
}
}
}
// Find for each attachment the subpasses that use them.
unordered_set<uint32_t> attachmentIndices;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
attachmentIndices.clear();
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
input_attachment_to_subpass[attachment].push_back(i);
for (auto overlapping_attachment : overlapping_attachments[attachment]) {
input_attachment_to_subpass[overlapping_attachment].push_back(i);
}
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
output_attachment_to_subpass[attachment].push_back(i);
for (auto overlapping_attachment : overlapping_attachments[attachment]) {
output_attachment_to_subpass[overlapping_attachment].push_back(i);
}
attachmentIndices.insert(attachment);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
output_attachment_to_subpass[attachment].push_back(i);
for (auto overlapping_attachment : overlapping_attachments[attachment]) {
output_attachment_to_subpass[overlapping_attachment].push_back(i);
}
if (attachmentIndices.count(attachment)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
}
}
}
// If there is a dependency needed make sure one exists
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
// If the attachment is an input then all subpasses that output must have a dependency relationship
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
}
// If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip);
CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip);
}
}
// Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
// written.
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip);
}
}
return skip;
}
static bool CreatePassDAG(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, RENDER_PASS_STATE *render_pass) {
// Shorthand...
auto &subpass_to_node = render_pass->subpassToNode;
subpass_to_node.resize(pCreateInfo->subpassCount);
auto &self_dependencies = render_pass->self_dependencies;
self_dependencies.resize(pCreateInfo->subpassCount);
bool skip = false;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
subpass_to_node[i].pass = i;
self_dependencies[i].clear();
}
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
if (dependency.srcSubpass == dependency.dstSubpass) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass, "The src and dest subpasses cannot both be external.");
}
} else if (dependency.srcSubpass > dependency.dstSubpass) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"Dependency graph must be specified such that an earlier pass cannot depend on a later pass.");
} else if (dependency.srcSubpass == dependency.dstSubpass) {
self_dependencies[dependency.srcSubpass].push_back(i);
} else {
subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
}
}
return skip;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool spirv_valid;
if (PreCallValidateCreateShaderModule(dev_data, pCreateInfo, &spirv_valid)) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
if (res == VK_SUCCESS) {
lock_guard_t lock(global_lock);
unique_ptr<shader_module> new_shader_module(spirv_valid ? new shader_module(pCreateInfo, *pShaderModule)
: new shader_module());
dev_data->shaderModuleMap[*pShaderModule] = std::move(new_shader_module);
}
return res;
}
static bool ValidateAttachmentIndex(const layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
bool skip = false;
if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkRenderPassCreateInfo-attachment-00834",
"CreateRenderPass: %s attachment %d must be less than the total number of attachments %d.", type,
attachment, attachment_count);
}
return skip;
}
static bool IsPowerOfTwo(unsigned x) { return x && !(x & (x - 1)); }
enum AttachmentType {
ATTACHMENT_COLOR = 1,
ATTACHMENT_DEPTH = 2,
ATTACHMENT_INPUT = 4,
ATTACHMENT_PRESERVE = 8,
ATTACHMENT_RESOLVE = 16,
};
char const *StringAttachmentType(uint8_t type) {
switch (type) {
case ATTACHMENT_COLOR:
return "color";
case ATTACHMENT_DEPTH:
return "depth";
case ATTACHMENT_INPUT:
return "input";
case ATTACHMENT_PRESERVE:
return "preserve";
case ATTACHMENT_RESOLVE:
return "resolve";
default:
return "(multiple)";
}
}
static bool AddAttachmentUse(const layer_data *dev_data, uint32_t subpass, std::vector<uint8_t> &attachment_uses,
std::vector<VkImageLayout> &attachment_layouts, uint32_t attachment, uint8_t new_use,
VkImageLayout new_layout) {
if (attachment >= attachment_uses.size()) return false; /* out of range, but already reported */
bool skip = false;
auto &uses = attachment_uses[attachment];
if (uses & new_use) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"vkCreateRenderPass(): subpass %u already uses attachment %u as a %s attachment.", subpass, attachment,
StringAttachmentType(new_use));
} else if (uses & ~ATTACHMENT_INPUT || (uses && (new_use == ATTACHMENT_RESOLVE || new_use == ATTACHMENT_PRESERVE))) {
/* Note: input attachments are assumed to be done first. */
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescription-pPreserveAttachments-00854",
"vkCreateRenderPass(): subpass %u uses attachment %u as both %s and %s attachment.", subpass, attachment,
StringAttachmentType(uses), StringAttachmentType(new_use));
} else if (uses && attachment_layouts[attachment] != new_layout) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescription-layout-00855",
"vkCreateRenderPass(): subpass %u uses attachment %u with conflicting layouts: input uses %s, but %s "
"attachment uses %s.",
subpass, attachment, string_VkImageLayout(attachment_layouts[attachment]), StringAttachmentType(new_use),
string_VkImageLayout(new_layout));
} else {
attachment_layouts[attachment] = new_layout;
uses |= new_use;
}
return skip;
}
static bool ValidateRenderpassAttachmentUsage(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
bool skip = false;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
std::vector<uint8_t> attachment_uses(pCreateInfo->attachmentCount);
std::vector<VkImageLayout> attachment_layouts(pCreateInfo->attachmentCount);
if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescription-pipelineBindPoint-00844",
"vkCreateRenderPass(): Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
}
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
auto const &attachment_ref = subpass.pInputAttachments[j];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(dev_data, attachment_ref.attachment, pCreateInfo->attachmentCount, "Input");
skip |= AddAttachmentUse(dev_data, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_INPUT, attachment_ref.layout);
}
}
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
uint32_t attachment = subpass.pPreserveAttachments[j];
if (attachment == VK_ATTACHMENT_UNUSED) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescription-attachment-00853",
"vkCreateRenderPass(): Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
} else {
skip |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
skip |= AddAttachmentUse(dev_data, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_PRESERVE,
VkImageLayout(0) /* preserve doesn't have any layout */);
}
}
unsigned sample_count = 0;
bool subpass_performs_resolve = false;
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (subpass.pResolveAttachments) {
auto const &attachment_ref = subpass.pResolveAttachments[j];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentIndex(dev_data, attachment_ref.attachment, pCreateInfo->attachmentCount, "Resolve");
skip |= AddAttachmentUse(dev_data, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_RESOLVE, attachment_ref.layout);
subpass_performs_resolve = true;
if (!skip && pCreateInfo->pAttachments[attachment_ref.attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, "VUID-VkSubpassDescription-pResolveAttachments-00849",
"vkCreateRenderPass(): Subpass %u requests multisample resolve into attachment %u, which must "
"have VK_SAMPLE_COUNT_1_BIT but has %s.",
i, attachment_ref.attachment,
string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples));
}
}
}
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
auto const &attachment_ref = subpass.pColorAttachments[j];
skip |= ValidateAttachmentIndex(dev_data, attachment_ref.attachment, pCreateInfo->attachmentCount, "Color");
if (!skip && attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= AddAttachmentUse(dev_data, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_COLOR, attachment_ref.layout);
sample_count |= (unsigned)pCreateInfo->pAttachments[attachment_ref.attachment].samples;
if (subpass_performs_resolve &&
pCreateInfo->pAttachments[attachment_ref.attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, "VUID-VkSubpassDescription-pResolveAttachments-00848",
"vkCreateRenderPass(): Subpass %u requests multisample resolve from attachment %u which has "
"VK_SAMPLE_COUNT_1_BIT.",
i, attachment_ref.attachment);
}
if (dev_data->extensions.vk_amd_mixed_attachment_samples && subpass.pDepthStencilAttachment &&
subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto depth_stencil_sample_count =
pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples;
if (pCreateInfo->pAttachments[attachment_ref.attachment].samples > depth_stencil_sample_count) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDescription-pColorAttachments-01506",
"vkCreateRenderPass(): Subpass %u pColorAttachments[%u] has %s which is larger than "
"depth/stencil attachment %s.",
i, j, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples),
string_VkSampleCountFlagBits(depth_stencil_sample_count));
}
}
}
if (!skip && subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED) {
if (attachment_ref.attachment == VK_ATTACHMENT_UNUSED) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, "VUID-VkSubpassDescription-pResolveAttachments-00847",
"vkCreateRenderPass(): Subpass %u requests multisample resolve from attachment %u which has "
"attachment=VK_ATTACHMENT_UNUSED.",
i, attachment_ref.attachment);
} else {
const auto &color_desc = pCreateInfo->pAttachments[attachment_ref.attachment];
const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
if (color_desc.format != resolve_desc.format) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
0, "VUID-VkSubpassDescription-pResolveAttachments-00850",
"vkCreateRenderPass(): Subpass %u pColorAttachments[%u] resolves to an attachment with a "
"different format. color format: %u, resolve format: %u.",
i, j, color_desc.format, resolve_desc.format);
}
}
}
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
auto const &attachment_ref = *subpass.pDepthStencilAttachment;
skip |= ValidateAttachmentIndex(dev_data, attachment_ref.attachment, pCreateInfo->attachmentCount, "Depth stencil");
if (!skip && attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= AddAttachmentUse(dev_data, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_DEPTH, attachment_ref.layout);
sample_count |= (unsigned)pCreateInfo->pAttachments[attachment_ref.attachment].samples;
}
}
if (!dev_data->extensions.vk_amd_mixed_attachment_samples && sample_count && !IsPowerOfTwo(sample_count)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkAttachmentDescription-samples-parameter",
"vkCreateRenderPass(): Subpass %u attempts to render to attachments with inconsistent sample counts.", i);
}
}
return skip;
}
static void MarkAttachmentFirstUse(RENDER_PASS_STATE *render_pass, uint32_t index, bool is_read) {
if (index == VK_ATTACHMENT_UNUSED) return;
if (!render_pass->attachment_first_read.count(index)) render_pass->attachment_first_read[index] = is_read;
}
static bool PreCallValidateCreateRenderPass(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
RENDER_PASS_STATE *render_pass) {
bool skip = false;
// TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
// ValidateLayouts.
skip |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
render_pass->renderPass = VK_NULL_HANDLE;
skip |= CreatePassDAG(dev_data, pCreateInfo, render_pass);
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
auto const &dependency = pCreateInfo->pDependencies[i];
skip |= ValidateStageMaskGsTsEnables(dev_data, dependency.srcStageMask, "vkCreateRenderPass()",
"VUID-VkSubpassDependency-srcStageMask-00860",
"VUID-VkSubpassDependency-srcStageMask-00862");
skip |= ValidateStageMaskGsTsEnables(dev_data, dependency.dstStageMask, "vkCreateRenderPass()",
"VUID-VkSubpassDependency-dstStageMask-00861",
"VUID-VkSubpassDependency-dstStageMask-00863");
if (!ValidateAccessMaskPipelineStage(dependency.srcAccessMask, dependency.srcStageMask)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDependency-srcAccessMask-00868",
"CreateRenderPass: pDependencies[%u].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", i,
dependency.srcAccessMask, dependency.srcStageMask);
}
if (!ValidateAccessMaskPipelineStage(dependency.dstAccessMask, dependency.dstStageMask)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkSubpassDependency-dstAccessMask-00869",
"CreateRenderPass: pDependencies[%u].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", i,
dependency.dstAccessMask, dependency.dstStageMask);
}
}
if (!skip) {
skip |= ValidateLayouts(dev_data, device, pCreateInfo);
}
return skip;
}
// Style note:
// Use of rvalue reference exceeds reccommended usage of rvalue refs in google style guide, but intentionally forces caller to move
// or copy. This is clearer than passing a pointer to shared_ptr and avoids the atomic increment/decrement of shared_ptr copy
// construction or assignment.
static void PostCallRecordCreateRenderPass(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo,
const VkRenderPass render_pass_handle,
std::shared_ptr<RENDER_PASS_STATE> &&render_pass) {
render_pass->renderPass = render_pass_handle;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
MarkAttachmentFirstUse(render_pass.get(), subpass.pColorAttachments[j].attachment, false);
// resolve attachments are considered to be written
if (subpass.pResolveAttachments) {
MarkAttachmentFirstUse(render_pass.get(), subpass.pResolveAttachments[j].attachment, false);
}
}
if (subpass.pDepthStencilAttachment) {
MarkAttachmentFirstUse(render_pass.get(), subpass.pDepthStencilAttachment->attachment, false);
}
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
MarkAttachmentFirstUse(render_pass.get(), subpass.pInputAttachments[j].attachment, true);
}
}
// Even though render_pass is an rvalue-ref parameter, still must move s.t. move assignment is invoked.
dev_data->renderPassMap[render_pass_handle] = std::move(render_pass);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// If we fail, this will act like a unique_ptr and auto-cleanup, as we aren't saving it anywhere
auto render_pass = std::make_shared<RENDER_PASS_STATE>(pCreateInfo);
unique_lock_t lock(global_lock);
skip = PreCallValidateCreateRenderPass(dev_data, device, pCreateInfo, render_pass.get());
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateRenderPass(dev_data, pCreateInfo, *pRenderPass, std::move(render_pass));
}
return result;
}
static bool ValidatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
std::string error_code) {
bool skip = false;
if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), error_code, "Cannot execute command %s on a secondary command buffer.",
cmd_name);
}
return skip;
}
static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
bool skip = false;
const safe_VkFramebufferCreateInfo *pFramebufferInfo =
&GetFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
if (pRenderPassBegin->renderArea.offset.x < 0 ||
(pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
pRenderPassBegin->renderArea.offset.y < 0 ||
(pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
skip |= static_cast<bool>(log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderArea,
"Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width "
"%d, height %d. Framebuffer: width %d, height %d.",
pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
}
return skip;
}
// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
// [load|store]Op flag must be checked
// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
template <typename T>
static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
if (color_depth_op != op && stencil_op != op) {
return false;
}
bool check_color_depth_load_op = !FormatIsStencilOnly(format);
bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op)));
}
VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *cb_node = GetCBNode(dev_data, commandBuffer);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
if (cb_node) {
if (render_pass_state) {
uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
VK_ATTACHMENT_LOAD_OP_CLEAR)) {
clear_op_size = static_cast<uint32_t>(i) + 1;
}
}
if (clear_op_size > pRenderPassBegin->clearValueCount) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(render_pass_state->renderPass), "VUID-VkRenderPassBeginInfo-clearValueCount-00902",
"In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there "
"must be at least %u entries in pClearValues array to account for the highest index attachment in "
"renderPass 0x%" PRIx64
" that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by "
"attachment number so even if some pClearValues entries between 0 and %u correspond to attachments "
"that aren't cleared they will be ignored.",
pRenderPassBegin->clearValueCount, clear_op_size, HandleToUint64(render_pass_state->renderPass),
clear_op_size, clear_op_size - 1);
}
skip |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
skip |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin,
GetFramebufferState(dev_data, pRenderPassBegin->framebuffer));
if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) {
skip |= ValidateRenderPassCompatibility(dev_data, "render pass", render_pass_state, "framebuffer",
framebuffer->rp_state.get(), "vkCmdBeginRenderPass()",
"VUID-VkRenderPassBeginInfo-renderPass-00904");
}
skip |= InsideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", "VUID-vkCmdBeginRenderPass-renderpass");
skip |= ValidateDependencies(dev_data, framebuffer, render_pass_state);
skip |=
ValidatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass()", "VUID-vkCmdBeginRenderPass-bufferlevel");
skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdBeginRenderPass()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBeginRenderPass-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
cb_node->activeRenderPass = render_pass_state;
// This is a shallow copy as that is all that is needed for now
cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
cb_node->activeSubpass = 0;
cb_node->activeSubpassContents = contents;
cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
// Connect this framebuffer and its children to this cmdBuffer
AddFramebufferBinding(dev_data, cb_node, framebuffer);
// Connect this RP to cmdBuffer
AddCommandBufferBinding(&render_pass_state->cb_bindings,
{HandleToUint64(render_pass_state->renderPass), kVulkanObjectTypeRenderPass}, cb_node);
// transition attachments to the correct layouts for beginning of renderPass and first subpass
TransitionBeginRenderPassLayouts(dev_data, cb_node, render_pass_state, framebuffer);
}
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
}
}
static bool PreCallValidateCmdNextSubpass(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = ValidatePrimaryCommandBuffer(dev_data, cb_state, "vkCmdNextSubpass()", "VUID-vkCmdNextSubpass-bufferlevel");
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdNextSubpass()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdNextSubpass-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
skip |= OutsideRenderPass(dev_data, cb_state, "vkCmdNextSubpass()", "VUID-vkCmdNextSubpass-renderpass");
auto subpassCount = cb_state->activeRenderPass->createInfo.subpassCount;
if (cb_state->activeSubpass == subpassCount - 1) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdNextSubpass-None-00909",
"vkCmdNextSubpass(): Attempted to advance beyond final subpass.");
}
return skip;
}
static void PostCallRecordCmdNextSubpass(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkSubpassContents contents) {
cb_node->activeSubpass++;
cb_node->activeSubpassContents = contents;
TransitionSubpassLayouts(dev_data, cb_node, cb_node->activeRenderPass, cb_node->activeSubpass,
GetFramebufferState(dev_data, cb_node->activeRenderPassBeginInfo.framebuffer));
}
VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdNextSubpass(dev_data, pCB, commandBuffer);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
if (pCB) {
lock.lock();
PostCallRecordCmdNextSubpass(dev_data, pCB, contents);
}
}
static bool PreCallValidateCmdEndRenderPass(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkCommandBuffer commandBuffer) {
bool skip = false;
RENDER_PASS_STATE *rp_state = cb_state->activeRenderPass;
if (rp_state) {
if (cb_state->activeSubpass != rp_state->createInfo.subpassCount - 1) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdEndRenderPass-None-00910",
"vkCmdEndRenderPass(): Called before reaching final subpass.");
}
}
skip |= OutsideRenderPass(dev_data, cb_state, "vkCmdEndRenderpass()", "VUID-vkCmdEndRenderPass-renderpass");
skip |= ValidatePrimaryCommandBuffer(dev_data, cb_state, "vkCmdEndRenderPass()", "VUID-vkCmdEndRenderPass-bufferlevel");
skip |= ValidateCmdQueueFlags(dev_data, cb_state, "vkCmdEndRenderPass()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdEndRenderPass-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_state, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
return skip;
}
static void PostCallRecordCmdEndRenderPass(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
FRAMEBUFFER_STATE *framebuffer = GetFramebufferState(dev_data, cb_state->activeFramebuffer);
TransitionFinalSubpassLayouts(dev_data, cb_state, &cb_state->activeRenderPassBeginInfo, framebuffer);
cb_state->activeRenderPass = nullptr;
cb_state->activeSubpass = 0;
cb_state->activeFramebuffer = VK_NULL_HANDLE;
}
VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
auto pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
skip |= PreCallValidateCmdEndRenderPass(dev_data, pCB, commandBuffer);
}
lock.unlock();
if (skip) return;
dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
if (pCB) {
lock.lock();
PostCallRecordCmdEndRenderPass(dev_data, pCB);
}
}
static bool ValidateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB, const char *caller) {
bool skip = false;
if (!pSubCB->beginInfo.pInheritanceInfo) {
return skip;
}
VkFramebuffer primary_fb = pCB->activeFramebuffer;
VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
if (secondary_fb != VK_NULL_HANDLE) {
if (primary_fb != secondary_fb) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(primaryBuffer), "VUID-vkCmdExecuteCommands-pCommandBuffers-00099",
"vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64
" which has a framebuffer 0x%" PRIx64
" that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ".",
HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb), HandleToUint64(primary_fb));
}
auto fb = GetFramebufferState(dev_data, secondary_fb);
if (!fb) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(primaryBuffer), kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
" which has invalid framebuffer 0x%" PRIx64 ".",
HandleToUint64(secondaryBuffer), HandleToUint64(secondary_fb));
return skip;
}
}
return skip;
}
static bool ValidateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
bool skip = false;
unordered_set<int> activeTypes;
for (auto queryObject : pCB->activeQueries) {
auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
if (queryPoolData != dev_data->queryPoolMap.end()) {
if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
pSubCB->beginInfo.pInheritanceInfo) {
VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), "VUID-vkCmdExecuteCommands-commandBuffer-00104",
"vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
" which has invalid active query pool 0x%" PRIx64
". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.",
HandleToUint64(pCB->commandBuffer), HandleToUint64(queryPoolData->first));
}
}
activeTypes.insert(queryPoolData->second.createInfo.queryType);
}
}
for (auto queryObject : pSubCB->startedQueries) {
auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%" PRIx64
" which has invalid active query pool 0x%" PRIx64
" of type %d but a query of that type has been started on secondary Cmd Buffer 0x%" PRIx64 ".",
HandleToUint64(pCB->commandBuffer), HandleToUint64(queryPoolData->first),
queryPoolData->second.createInfo.queryType, HandleToUint64(pSubCB->commandBuffer));
}
}
auto primary_pool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
auto secondary_pool = GetCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pSubCB->commandBuffer), kVUID_Core_DrawState_InvalidQueueFamily,
"vkCmdExecuteCommands(): Primary command buffer 0x%" PRIx64
" created in queue family %d has secondary command buffer 0x%" PRIx64 " created in queue family %d.",
HandleToUint64(pCB->commandBuffer), primary_pool->queueFamilyIndex, HandleToUint64(pSubCB->commandBuffer),
secondary_pool->queueFamilyIndex);
}
return skip;
}
VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
const VkCommandBuffer *pCommandBuffers) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
GLOBAL_CB_NODE *pSubCB = NULL;
for (uint32_t i = 0; i < commandBuffersCount; i++) {
pSubCB = GetCBNode(dev_data, pCommandBuffers[i]);
assert(pSubCB);
if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-pCommandBuffers-00088",
"vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%" PRIx64
" in element %u of pCommandBuffers array. All cmd buffers in pCommandBuffers array must be secondary.",
HandleToUint64(pCommandBuffers[i]), i);
} else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
if (pSubCB->beginInfo.pInheritanceInfo != nullptr) {
auto secondary_rp_state = GetRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
"VUID-vkCmdExecuteCommands-pCommandBuffers-00096",
"vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
") executed within render pass (0x%" PRIx64
") must have had vkBeginCommandBuffer() called w/ "
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
HandleToUint64(pCommandBuffers[i]), HandleToUint64(pCB->activeRenderPass->renderPass));
} else {
// Make sure render pass is compatible with parent command buffer pass if has continue
if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
skip |= ValidateRenderPassCompatibility(
dev_data, "primary command buffer", pCB->activeRenderPass, "secondary command buffer",
secondary_rp_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-pInheritanceInfo-00098");
}
// If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
skip |=
ValidateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB, "vkCmdExecuteCommands()");
if (!pSubCB->cmd_execute_commands_functions.empty()) {
// Inherit primary's activeFramebuffer and while running validate functions
for (auto &function : pSubCB->cmd_execute_commands_functions) {
skip |= function(pCB, pCB->activeFramebuffer);
}
}
}
}
}
// TODO(mlentine): Move more logic into this method
skip |= ValidateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
skip |= ValidateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()", 0,
"VUID-vkCmdExecuteCommands-pCommandBuffers-00089");
if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
if (pSubCB->in_use.load() || pCB->linkedCommandBuffers.count(pSubCB)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer),
"VUID-vkCmdExecuteCommands-pCommandBuffers-00090",
"Attempt to simultaneously execute command buffer 0x%" PRIx64
" without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!",
HandleToUint64(pCB->commandBuffer));
}
if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
// Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
kVUID_Core_DrawState_InvalidCommandBufferSimultaneousUse,
"vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary "
"command buffer (0x%" PRIx64
") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even "
"though it does.",
HandleToUint64(pCommandBuffers[i]), HandleToUint64(pCB->commandBuffer));
pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
}
}
if (!pCB->activeQueries.empty() && !dev_data->enabled_features.core.inheritedQueries) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCommandBuffers[i]), "VUID-vkCmdExecuteCommands-commandBuffer-00101",
"vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIx64
") cannot be submitted with a query in flight and inherited queries not supported on this device.",
HandleToUint64(pCommandBuffers[i]));
}
// TODO: separate validate from update! This is very tangled.
// Propagate layout transitions to the primary cmd buffer
// Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001"
// initial layout usage of secondary command buffers resources must match parent command buffer
for (const auto &ilm_entry : pSubCB->imageLayoutMap) {
auto cb_entry = pCB->imageLayoutMap.find(ilm_entry.first);
if (cb_entry != pCB->imageLayoutMap.end()) {
// For exact matches ImageSubresourcePair matches, validate and update the parent entry
if ((VK_IMAGE_LAYOUT_UNDEFINED != ilm_entry.second.initialLayout) &&
(cb_entry->second.layout != ilm_entry.second.initialLayout)) {
const VkImageSubresource &subresource = ilm_entry.first.subresource;
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
"UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001",
"%s: Cannot execute cmd buffer using image (0x%" PRIx64
") [sub-resource: aspectMask 0x%X "
"array layer %u, mip level %u], with current layout %s when first use is %s.",
"vkCmdExecuteCommands():", HandleToUint64(ilm_entry.first.image), subresource.aspectMask,
subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(cb_entry->second.layout),
string_VkImageLayout(ilm_entry.second.initialLayout));
}
cb_entry->second.layout = ilm_entry.second.layout;
} else {
// Look for partial matches (in aspectMask), and update or create parent map entry in SetLayout
assert(ilm_entry.first.hasSubresource);
IMAGE_CMD_BUF_LAYOUT_NODE node;
if (!FindCmdBufLayout(dev_data, pCB, ilm_entry.first.image, ilm_entry.first.subresource, node)) {
node.initialLayout = ilm_entry.second.initialLayout;
} else if ((VK_IMAGE_LAYOUT_UNDEFINED != ilm_entry.second.initialLayout) &&
(node.layout != ilm_entry.second.initialLayout)) {
const VkImageSubresource &subresource = ilm_entry.first.subresource;
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCommandBuffers[i]),
"UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001",
"%s: Cannot execute cmd buffer using image (0x%" PRIx64
") [sub-resource: aspectMask 0x%X "
"array layer %u, mip level %u], with current layout %s when first use is %s.",
"vkCmdExecuteCommands():", HandleToUint64(ilm_entry.first.image), subresource.aspectMask,
subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(node.layout),
string_VkImageLayout(ilm_entry.second.initialLayout));
}
node.layout = ilm_entry.second.layout;
SetLayout(dev_data, pCB, ilm_entry.first, node);
}
}
pSubCB->primaryCommandBuffer = pCB->commandBuffer;
pCB->linkedCommandBuffers.insert(pSubCB);
pSubCB->linkedCommandBuffers.insert(pCB);
for (auto &function : pSubCB->queryUpdates) {
pCB->queryUpdates.push_back(function);
}
for (auto &function : pSubCB->queue_submit_functions) {
pCB->queue_submit_functions.push_back(function);
}
}
skip |= ValidatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-bufferlevel");
skip |= ValidateCmdQueueFlags(dev_data, pCB, "vkCmdExecuteCommands()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdExecuteCommands-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
}
lock.unlock();
if (!skip) dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
}
static bool PreCallValidateMapMemory(layer_data *dev_data, VkDevice device, VkDeviceMemory mem, VkDeviceSize offset,
VkDeviceSize size) {
bool skip = false;
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
skip |= ValidateMapImageLayouts(dev_data, device, mem_info, offset, end_offset);
if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem), "VUID-vkMapMemory-memory-00682",
"Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIx64 ".",
HandleToUint64(mem));
}
}
skip |= ValidateMapMemRange(dev_data, mem, offset, size);
return skip;
}
static void PostCallRecordMapMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
void **ppData) {
// TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
StoreMemRanges(dev_data, mem, offset, size);
InitializeAndTrackMemory(dev_data, mem, offset, size, ppData);
}
VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
void **ppData) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateMapMemory(dev_data, device, mem, offset, size);
lock.unlock();
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
if (!skip) {
result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordMapMemory(dev_data, mem, offset, size, ppData);
lock.unlock();
}
}
return result;
}
VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
unique_lock_t lock(global_lock);
skip |= DeleteMemRanges(dev_data, mem);
lock.unlock();
if (!skip) {
dev_data->dispatch_table.UnmapMemory(device, mem);
}
}
static bool ValidateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) {
bool skip = false;
for (uint32_t i = 0; i < memRangeCount; ++i) {
auto mem_info = GetMemObjInfo(dev_data, pMemRanges[i].memory);
if (mem_info) {
if (pMemRanges[i].size == VK_WHOLE_SIZE) {
if (mem_info->mem_range.offset > pMemRanges[i].offset) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(pMemRanges[i].memory), "VUID-VkMappedMemoryRange-size-00686",
"%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset));
}
} else {
const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
? mem_info->alloc_info.allocationSize
: (mem_info->mem_range.offset + mem_info->mem_range.size);
if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
(data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(pMemRanges[i].memory), "VUID-VkMappedMemoryRange-size-00685",
"%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end));
}
}
}
}
return skip;
}
static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) {
bool skip = false;
for (uint32_t i = 0; i < mem_range_count; ++i) {
auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
if (mem_info) {
if (mem_info->shadow_copy) {
VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
? mem_info->mem_range.size
: (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
char *data = static_cast<char *>(mem_info->shadow_copy);
for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
if (data[j] != NoncoherentMemoryFillValue) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory),
kVUID_Core_MemTrack_InvalidMap, "Memory underflow was detected on mem obj 0x%" PRIx64,
HandleToUint64(mem_ranges[i].memory));
}
}
for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
if (data[j] != NoncoherentMemoryFillValue) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, HandleToUint64(mem_ranges[i].memory),
kVUID_Core_MemTrack_InvalidMap, "Memory overflow was detected on mem obj 0x%" PRIx64,
HandleToUint64(mem_ranges[i].memory));
}
}
memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
}
}
}
return skip;
}
static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
for (uint32_t i = 0; i < mem_range_count; ++i) {
auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
if (mem_info && mem_info->shadow_copy) {
VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
? mem_info->mem_range.size
: (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
char *data = static_cast<char *>(mem_info->shadow_copy);
memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
}
}
}
static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) {
bool skip = false;
for (uint32_t i = 0; i < mem_range_count; ++i) {
uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_ranges->memory), "VUID-VkMappedMemoryRange-offset-00687",
"%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
func_name, i, mem_ranges[i].offset, atom_size);
}
auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
if ((mem_ranges[i].size != VK_WHOLE_SIZE) &&
(mem_ranges[i].size + mem_ranges[i].offset != mem_info->alloc_info.allocationSize) &&
(SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_ranges->memory), "VUID-VkMappedMemoryRange-size-01390",
"%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
func_name, i, mem_ranges[i].size, atom_size);
}
}
return skip;
}
static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) {
bool skip = false;
lock_guard_t lock(global_lock);
skip |= ValidateMappedMemoryRangeDeviceLimits(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
skip |= ValidateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
return skip;
}
VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
}
return result;
}
static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) {
bool skip = false;
lock_guard_t lock(global_lock);
skip |= ValidateMappedMemoryRangeDeviceLimits(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
skip |= ValidateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
return skip;
}
static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) {
lock_guard_t lock(global_lock);
// Update our shadow copy with modified driver data
CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
}
VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
if (result == VK_SUCCESS) {
PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
}
}
return result;
}
static bool PreCallValidateBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
VkDeviceSize memoryOffset, const char *api_name) {
bool skip = false;
if (image_state) {
unique_lock_t lock(global_lock);
// Track objects tied to memory
uint64_t image_handle = HandleToUint64(image);
skip = ValidateSetMemBinding(dev_data, mem, image_handle, kVulkanObjectTypeImage, api_name);
if (!image_state->memory_requirements_checked) {
// There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
// BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
// vkGetImageMemoryRequirements()
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
image_handle, kVUID_Core_DrawState_InvalidImage,
"%s: Binding memory to image 0x%" PRIx64
" but vkGetImageMemoryRequirements() has not been called on that image.",
api_name, HandleToUint64(image_handle));
// Make the call for them so we can verify the state
lock.unlock();
dev_data->dispatch_table.GetImageMemoryRequirements(dev_data->device, image, &image_state->requirements);
lock.lock();
}
// Validate bound memory range information
auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
skip |= ValidateInsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR, api_name);
skip |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, api_name,
"VUID-vkBindImageMemory-memory-01047");
}
// Validate memory requirements alignment
if (SafeModulo(memoryOffset, image_state->requirements.alignment) != 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
image_handle, "VUID-vkBindImageMemory-memoryOffset-01048",
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
api_name, memoryOffset, image_state->requirements.alignment);
}
if (mem_info) {
// Validate memory requirements size
if (image_state->requirements.size > mem_info->alloc_info.allocationSize - memoryOffset) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
image_handle, "VUID-vkBindImageMemory-size-01049",
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
api_name, mem_info->alloc_info.allocationSize - memoryOffset, image_state->requirements.size);
}
// Validate dedicated allocation
if (mem_info->is_dedicated && ((mem_info->dedicated_image != image) || (memoryOffset != 0))) {
// TODO: Add vkBindImageMemory2KHR error message when added to spec.
auto validation_error = kVUIDUndefined;
if (strcmp(api_name, "vkBindImageMemory()") == 0) {
validation_error = "VUID-vkBindImageMemory-memory-01509";
}
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
image_handle, validation_error,
"%s: for dedicated memory allocation 0x%" PRIxLEAST64
", VkMemoryDedicatedAllocateInfoKHR::image 0x%" PRIXLEAST64 " must be equal to image 0x%" PRIxLEAST64
" and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
api_name, HandleToUint64(mem), HandleToUint64(mem_info->dedicated_image), image_handle, memoryOffset);
}
}
}
return skip;
}
static void PostCallRecordBindImageMemory(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VkDeviceMemory mem,
VkDeviceSize memoryOffset, const char *api_name) {
if (image_state) {
unique_lock_t lock(global_lock);
// Track bound memory range information
auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
}
// Track objects tied to memory
uint64_t image_handle = HandleToUint64(image);
SetMemBinding(dev_data, mem, image_state, memoryOffset, image_handle, kVulkanObjectTypeImage, api_name);
}
}
VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
IMAGE_STATE *image_state;
{
unique_lock_t lock(global_lock);
image_state = GetImageState(dev_data, image);
}
bool skip = PreCallValidateBindImageMemory(dev_data, image, image_state, mem, memoryOffset, "vkBindImageMemory()");
if (!skip) {
result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
if (result == VK_SUCCESS) {
PostCallRecordBindImageMemory(dev_data, image, image_state, mem, memoryOffset, "vkBindImageMemory()");
}
}
return result;
}
static bool PreCallValidateBindImageMemory2(layer_data *dev_data, std::vector<IMAGE_STATE *> *image_state, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos) {
{
unique_lock_t lock(global_lock);
for (uint32_t i = 0; i < bindInfoCount; i++) {
(*image_state)[i] = GetImageState(dev_data, pBindInfos[i].image);
}
}
bool skip = false;
char api_name[128];
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindImageMemory2() pBindInfos[%u]", i);
skip |= PreCallValidateBindImageMemory(dev_data, pBindInfos[i].image, (*image_state)[i], pBindInfos[i].memory,
pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
static void PostCallRecordBindImageMemory2(layer_data *dev_data, const std::vector<IMAGE_STATE *> &image_state,
uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos) {
for (uint32_t i = 0; i < bindInfoCount; i++) {
PostCallRecordBindImageMemory(dev_data, pBindInfos[i].image, image_state[i], pBindInfos[i].memory,
pBindInfos[i].memoryOffset, "vkBindImageMemory2()");
}
}
VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
std::vector<IMAGE_STATE *> image_state(bindInfoCount);
if (!PreCallValidateBindImageMemory2(dev_data, &image_state, bindInfoCount, pBindInfos)) {
result = dev_data->dispatch_table.BindImageMemory2(device, bindInfoCount, pBindInfos);
if (result == VK_SUCCESS) {
PostCallRecordBindImageMemory2(dev_data, image_state, bindInfoCount, pBindInfos);
}
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
std::vector<IMAGE_STATE *> image_state(bindInfoCount);
if (!PreCallValidateBindImageMemory2(dev_data, &image_state, bindInfoCount, pBindInfos)) {
result = dev_data->dispatch_table.BindImageMemory2KHR(device, bindInfoCount, pBindInfos);
if (result == VK_SUCCESS) {
PostCallRecordBindImageMemory2(dev_data, image_state, bindInfoCount, pBindInfos);
}
}
return result;
}
static bool PreCallValidateSetEvent(layer_data *dev_data, VkEvent event) {
bool skip = false;
auto event_state = GetEventNode(dev_data, event);
if (event_state) {
event_state->needsSignaled = false;
event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
if (event_state->write_in_use) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
HandleToUint64(event), kVUID_Core_DrawState_QueueForwardProgress,
"Cannot call vkSetEvent() on event 0x%" PRIx64 " that is already in use by a command buffer.",
HandleToUint64(event));
}
}
return skip;
}
static void PreCallRecordSetEvent(layer_data *dev_data, VkEvent event) {
// Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
// TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
// ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
for (auto queue_data : dev_data->queueMap) {
auto event_entry = queue_data.second.eventToStageMap.find(event);
if (event_entry != queue_data.second.eventToStageMap.end()) {
event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateSetEvent(dev_data, event);
PreCallRecordSetEvent(dev_data, event);
lock.unlock();
if (!skip) result = dev_data->dispatch_table.SetEvent(device, event);
return result;
}
static bool PreCallValidateQueueBindSparse(layer_data *dev_data, VkQueue queue, uint32_t bindInfoCount,
const VkBindSparseInfo *pBindInfo, VkFence fence) {
auto pFence = GetFenceNode(dev_data, fence);
bool skip = ValidateFenceForSubmit(dev_data, pFence);
if (skip) {
return true;
}
unordered_set<VkSemaphore> signaled_semaphores;
unordered_set<VkSemaphore> unsignaled_semaphores;
unordered_set<VkSemaphore> internal_semaphores;
for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
std::vector<SEMAPHORE_WAIT> semaphore_waits;
std::vector<VkSemaphore> semaphore_signals;
for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) ||
(!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled))) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
HandleToUint64(queue), HandleToUint64(semaphore));
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (pSemaphore && pSemaphore->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
}
for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
" that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
HandleToUint64(queue), HandleToUint64(semaphore), HandleToUint64(pSemaphore->signaler.first));
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
}
// Store sparse binding image_state and after binding is complete make sure that any requiring metadata have it bound
std::unordered_set<IMAGE_STATE *> sparse_images;
// If we're binding sparse image memory make sure reqs were queried and note if metadata is required and bound
for (uint32_t i = 0; i < bindInfo.imageBindCount; ++i) {
const auto &image_bind = bindInfo.pImageBinds[i];
auto image_state = GetImageState(dev_data, image_bind.image);
if (!image_state)
continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here.
sparse_images.insert(image_state);
if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
// For now just warning if sparse image binding occurs without calling to get reqs first
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState,
"vkQueueBindSparse(): Binding sparse memory to image 0x%" PRIx64
" without first calling vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
HandleToUint64(image_state->image));
}
for (uint32_t j = 0; j < image_bind.bindCount; ++j) {
if (image_bind.pBinds[j].flags & VK_IMAGE_ASPECT_METADATA_BIT) {
image_state->sparse_metadata_bound = true;
}
}
}
for (uint32_t i = 0; i < bindInfo.imageOpaqueBindCount; ++i) {
auto image_state = GetImageState(dev_data, bindInfo.pImageOpaqueBinds[i].image);
if (!image_state)
continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here.
sparse_images.insert(image_state);
if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) {
// For now just warning if sparse image binding occurs without calling to get reqs first
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), kVUID_Core_MemTrack_InvalidState,
"vkQueueBindSparse(): Binding opaque sparse memory to image 0x%" PRIx64
" without first calling vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.",
HandleToUint64(image_state->image));
}
}
for (const auto &sparse_image_state : sparse_images) {
if (sparse_image_state->sparse_metadata_required && !sparse_image_state->sparse_metadata_bound) {
// Warn if sparse image binding metadata required for image with sparse binding, but metadata not bound
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(sparse_image_state->image), kVUID_Core_MemTrack_InvalidState,
"vkQueueBindSparse(): Binding sparse memory to image 0x%" PRIx64
" which requires a metadata aspect but no binding with VK_IMAGE_ASPECT_METADATA_BIT set was made.",
HandleToUint64(sparse_image_state->image));
}
}
}
return skip;
}
static void PostCallRecordQueueBindSparse(layer_data *dev_data, VkQueue queue, uint32_t bindInfoCount,
const VkBindSparseInfo *pBindInfo, VkFence fence) {
uint64_t early_retire_seq = 0;
auto pFence = GetFenceNode(dev_data, fence);
auto pQueue = GetQueueState(dev_data, queue);
if (pFence) {
if (pFence->scope == kSyncScopeInternal) {
SubmitFence(pQueue, pFence, std::max(1u, bindInfoCount));
if (!bindInfoCount) {
// No work to do, just dropping a fence in the queue by itself.
pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), std::vector<SEMAPHORE_WAIT>(),
std::vector<VkSemaphore>(), std::vector<VkSemaphore>(), fence);
}
} else {
// Retire work up until this fence early, we will not see the wait that corresponds to this signal
early_retire_seq = pQueue->seq + pQueue->submissions.size();
if (!dev_data->external_sync_warning) {
dev_data->external_sync_warning = true;
log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(fence), kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueBindSparse(): Signaling external fence 0x%" PRIx64 " on queue 0x%" PRIx64
" will disable validation of preceding command buffer lifecycle states and the in-use status of associated "
"objects.",
HandleToUint64(fence), HandleToUint64(queue));
}
}
}
for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
// Track objects tied to memory
for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
HandleToUint64(bindInfo.pBufferBinds[j].buffer), kVulkanObjectTypeBuffer);
}
}
for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
HandleToUint64(bindInfo.pImageOpaqueBinds[j].image), kVulkanObjectTypeImage);
}
}
for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
// TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
HandleToUint64(bindInfo.pImageBinds[j].image), kVulkanObjectTypeImage);
}
}
std::vector<SEMAPHORE_WAIT> semaphore_waits;
std::vector<VkSemaphore> semaphore_signals;
std::vector<VkSemaphore> semaphore_externals;
for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
pSemaphore->in_use.fetch_add(1);
}
pSemaphore->signaler.first = VK_NULL_HANDLE;
pSemaphore->signaled = false;
} else {
semaphore_externals.push_back(semaphore);
pSemaphore->in_use.fetch_add(1);
if (pSemaphore->scope == kSyncScopeExternalTemporary) {
pSemaphore->scope = kSyncScopeInternal;
}
}
}
}
for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore) {
if (pSemaphore->scope == kSyncScopeInternal) {
pSemaphore->signaler.first = queue;
pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
pSemaphore->signaled = true;
pSemaphore->in_use.fetch_add(1);
semaphore_signals.push_back(semaphore);
} else {
// Retire work up until this submit early, we will not see the wait that corresponds to this signal
early_retire_seq = std::max(early_retire_seq, pQueue->seq + pQueue->submissions.size() + 1);
if (!dev_data->external_sync_warning) {
dev_data->external_sync_warning = true;
log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueBindSparse(): Signaling external semaphore 0x%" PRIx64 " on queue 0x%" PRIx64
" will disable validation of preceding command buffer lifecycle states and the in-use status of "
"associated objects.",
HandleToUint64(semaphore), HandleToUint64(queue));
}
}
}
}
pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(), semaphore_waits, semaphore_signals, semaphore_externals,
bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
}
if (early_retire_seq) {
RetireWorkOnQueue(dev_data, pQueue, early_retire_seq);
}
}
VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
VkFence fence) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateQueueBindSparse(dev_data, queue, bindInfoCount, pBindInfo, fence);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
lock.lock();
PostCallRecordQueueBindSparse(dev_data, queue, bindInfoCount, pBindInfo, fence);
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
if (result == VK_SUCCESS) {
lock_guard_t lock(global_lock);
SEMAPHORE_NODE *sNode = &dev_data->semaphoreMap[*pSemaphore];
sNode->signaler.first = VK_NULL_HANDLE;
sNode->signaler.second = 0;
sNode->signaled = false;
sNode->scope = kSyncScopeInternal;
}
return result;
}
static bool PreCallValidateImportSemaphore(layer_data *dev_data, VkSemaphore semaphore, const char *caller_name) {
SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
VK_OBJECT obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
bool skip = false;
if (sema_node) {
skip |= ValidateObjectNotInUse(dev_data, sema_node, obj_struct, caller_name, kVUIDUndefined);
}
return skip;
}
static void PostCallRecordImportSemaphore(layer_data *dev_data, VkSemaphore semaphore,
VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type, VkSemaphoreImportFlagsKHR flags) {
SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
if (sema_node && sema_node->scope != kSyncScopeExternalPermanent) {
if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) &&
sema_node->scope == kSyncScopeInternal) {
sema_node->scope = kSyncScopeExternalTemporary;
} else {
sema_node->scope = kSyncScopeExternalPermanent;
}
}
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
VKAPI_ATTR VkResult VKAPI_CALL
ImportSemaphoreWin32HandleKHR(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip =
PreCallValidateImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
if (!skip) {
result = dev_data->dispatch_table.ImportSemaphoreWin32HandleKHR(device, pImportSemaphoreWin32HandleInfo);
}
if (result == VK_SUCCESS) {
PostCallRecordImportSemaphore(dev_data, pImportSemaphoreWin32HandleInfo->semaphore,
pImportSemaphoreWin32HandleInfo->handleType, pImportSemaphoreWin32HandleInfo->flags);
}
return result;
}
#endif
VKAPI_ATTR VkResult VKAPI_CALL ImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = PreCallValidateImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
if (!skip) {
result = dev_data->dispatch_table.ImportSemaphoreFdKHR(device, pImportSemaphoreFdInfo);
}
if (result == VK_SUCCESS) {
PostCallRecordImportSemaphore(dev_data, pImportSemaphoreFdInfo->semaphore, pImportSemaphoreFdInfo->handleType,
pImportSemaphoreFdInfo->flags);
}
return result;
}
static void PostCallRecordGetSemaphore(layer_data *dev_data, VkSemaphore semaphore,
VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type) {
SEMAPHORE_NODE *sema_node = GetSemaphoreNode(dev_data, semaphore);
if (sema_node && handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
// Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference
sema_node->scope = kSyncScopeExternalPermanent;
}
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreWin32HandleKHR(VkDevice device,
const VkSemaphoreGetWin32HandleInfoKHR *pGetWin32HandleInfo,
HANDLE *pHandle) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.GetSemaphoreWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
if (result == VK_SUCCESS) {
PostCallRecordGetSemaphore(dev_data, pGetWin32HandleInfo->semaphore, pGetWin32HandleInfo->handleType);
}
return result;
}
#endif
VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR *pGetFdInfo, int *pFd) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.GetSemaphoreFdKHR(device, pGetFdInfo, pFd);
if (result == VK_SUCCESS) {
PostCallRecordGetSemaphore(dev_data, pGetFdInfo->semaphore, pGetFdInfo->handleType);
}
return result;
}
static bool PreCallValidateImportFence(layer_data *dev_data, VkFence fence, const char *caller_name) {
FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
bool skip = false;
if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
HandleToUint64(fence), kVUIDUndefined, "Cannot call %s on fence 0x%" PRIx64 " that is currently in use.",
caller_name, HandleToUint64(fence));
}
return skip;
}
static void PostCallRecordImportFence(layer_data *dev_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type,
VkFenceImportFlagsKHR flags) {
FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
if (fence_node && fence_node->scope != kSyncScopeExternalPermanent) {
if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) &&
fence_node->scope == kSyncScopeInternal) {
fence_node->scope = kSyncScopeExternalTemporary;
} else {
fence_node->scope = kSyncScopeExternalPermanent;
}
}
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
VKAPI_ATTR VkResult VKAPI_CALL ImportFenceWin32HandleKHR(VkDevice device,
const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = PreCallValidateImportFence(dev_data, pImportFenceWin32HandleInfo->fence, "vkImportFenceWin32HandleKHR");
if (!skip) {
result = dev_data->dispatch_table.ImportFenceWin32HandleKHR(device, pImportFenceWin32HandleInfo);
}
if (result == VK_SUCCESS) {
PostCallRecordImportFence(dev_data, pImportFenceWin32HandleInfo->fence, pImportFenceWin32HandleInfo->handleType,
pImportFenceWin32HandleInfo->flags);
}
return result;
}
#endif
VKAPI_ATTR VkResult VKAPI_CALL ImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = PreCallValidateImportFence(dev_data, pImportFenceFdInfo->fence, "vkImportFenceFdKHR");
if (!skip) {
result = dev_data->dispatch_table.ImportFenceFdKHR(device, pImportFenceFdInfo);
}
if (result == VK_SUCCESS) {
PostCallRecordImportFence(dev_data, pImportFenceFdInfo->fence, pImportFenceFdInfo->handleType, pImportFenceFdInfo->flags);
}
return result;
}
static void PostCallRecordGetFence(layer_data *dev_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type) {
FENCE_NODE *fence_node = GetFenceNode(dev_data, fence);
if (fence_node) {
if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
// Export with reference transference becomes external
fence_node->scope = kSyncScopeExternalPermanent;
} else if (fence_node->scope == kSyncScopeInternal) {
// Export with copy transference has a side effect of resetting the fence
fence_node->state = FENCE_UNSIGNALED;
}
}
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
VKAPI_ATTR VkResult VKAPI_CALL GetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR *pGetWin32HandleInfo,
HANDLE *pHandle) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.GetFenceWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
if (result == VK_SUCCESS) {
PostCallRecordGetFence(dev_data, pGetWin32HandleInfo->fence, pGetWin32HandleInfo->handleType);
}
return result;
}
#endif
VKAPI_ATTR VkResult VKAPI_CALL GetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR *pGetFdInfo, int *pFd) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.GetFenceFdKHR(device, pGetFdInfo, pFd);
if (result == VK_SUCCESS) {
PostCallRecordGetFence(dev_data, pGetFdInfo->fence, pGetFdInfo->handleType);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
if (result == VK_SUCCESS) {
lock_guard_t lock(global_lock);
dev_data->eventMap[*pEvent].needsSignaled = false;
dev_data->eventMap[*pEvent].write_in_use = 0;
dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
}
return result;
}
static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
SWAPCHAIN_NODE *old_swapchain_state) {
auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
// TODO: revisit this. some of these rules are being relaxed.
// All physical devices and queue families are required to be able
// to present to any native window on Android; require the
// application to have established support on any other platform.
if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
auto support_predicate = [dev_data](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool {
// TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
return (qs.first.gpu == dev_data->physical_device) && qs.second;
};
const auto &support = surface_state->gpu_queue_support;
bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
if (!is_supported) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-surface-01270",
"%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The "
"vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with "
"this surface for at least one queue family of this device.",
func_name))
return true;
}
}
if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), kVUID_Core_DrawState_SwapchainAlreadyExists,
"%s: surface has an existing swapchain other than oldSwapchain", func_name))
return true;
}
if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pCreateInfo->oldSwapchain), kVUID_Core_DrawState_SwapchainWrongSurface,
"%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
return true;
}
if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-imageExtent-01689",
"%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width,
pCreateInfo->imageExtent.height))
return true;
}
auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(dev_data->physical_device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery,
"%s: surface capabilities not retrieved for this physical device", func_name))
return true;
} else { // have valid capabilities
auto &capabilities = physical_device_state->surfaceCapabilities;
// Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
if (pCreateInfo->minImageCount < capabilities.minImageCount) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-minImageCount-01271",
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
return true;
}
if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-minImageCount-01272",
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
return true;
}
// Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
(pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
(pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
(pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-imageExtent-01274",
"%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
"maxImageExtent = (%d,%d).",
func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height))
return true;
}
// pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedTransforms.
if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
!(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string errorString = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name,
string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
errorString += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedTransforms) {
const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
sprintf(str, " %s\n", newStr);
errorString += str;
}
}
// Log the message that we've built up:
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-preTransform-01279", "%s.",
errorString.c_str()))
return true;
}
// pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
!((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string errorString = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n",
func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
errorString += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedCompositeAlpha) {
const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
sprintf(str, " %s\n", newStr);
errorString += str;
}
}
// Log the message that we've built up:
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-compositeAlpha-01280", "%s.",
errorString.c_str()))
return true;
}
// Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275",
"%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name,
pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers))
return true;
}
// Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-imageUsage-01276",
"%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.",
func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags))
return true;
}
}
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery,
"%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
return true;
} else {
// Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
bool foundFormat = false;
bool foundColorSpace = false;
bool foundMatch = false;
for (auto const &format : physical_device_state->surface_formats) {
if (pCreateInfo->imageFormat == format.format) {
// Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
foundFormat = true;
if (pCreateInfo->imageColorSpace == format.colorSpace) {
foundMatch = true;
break;
}
} else {
if (pCreateInfo->imageColorSpace == format.colorSpace) {
foundColorSpace = true;
}
}
}
if (!foundMatch) {
if (!foundFormat) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageFormat (i.e. %d).", func_name,
pCreateInfo->imageFormat))
return true;
}
if (!foundColorSpace) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d).", func_name,
pCreateInfo->imageColorSpace))
return true;
}
}
}
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
// FIFO is required to always be supported
if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery,
"%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
return true;
}
} else {
// Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
bool foundMatch = std::find(physical_device_state->present_modes.begin(), physical_device_state->present_modes.end(),
pCreateInfo->presentMode) != physical_device_state->present_modes.end();
if (!foundMatch) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-presentMode-01281",
"%s called with a non-supported presentMode (i.e. %s).", func_name,
string_VkPresentModeKHR(pCreateInfo->presentMode)))
return true;
}
}
// Validate state for shared presentable case
if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
if (!dev_data->extensions.vk_khr_shared_presentable_image) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), kVUID_Core_DrawState_ExtensionNotEnabled,
"%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
"been enabled.",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
return true;
} else if (pCreateInfo->minImageCount != 1) {
if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(dev_data->device), "VUID-VkSwapchainCreateInfoKHR-minImageCount-01383",
"%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
"must be 1.",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount))
return true;
}
}
return false;
}
static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
SWAPCHAIN_NODE *old_swapchain_state) {
if (VK_SUCCESS == result) {
lock_guard_t lock(global_lock);
auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
swapchain_state->shared_presentable = true;
}
surface_state->swapchain = swapchain_state.get();
dev_data->swapchainMap[*pSwapchain] = std::move(swapchain_state);
} else {
surface_state->swapchain = nullptr;
}
// Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
if (old_swapchain_state) {
old_swapchain_state->replaced = true;
}
surface_state->old_swapchain = old_swapchain_state;
return;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto surface_state = GetSurfaceState(dev_data->instance_data, pCreateInfo->surface);
auto old_swapchain_state = GetSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
return result;
}
VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
unique_lock_t lock(global_lock);
auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
if (swapchain_data) {
// Pre-record to avoid Destroy/Create race
if (swapchain_data->images.size() > 0) {
for (auto swapchain_image : swapchain_data->images) {
auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
if (image_sub != dev_data->imageSubresourceMap.end()) {
for (auto imgsubpair : image_sub->second) {
auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
if (image_item != dev_data->imageLayoutMap.end()) {
dev_data->imageLayoutMap.erase(image_item);
}
}
dev_data->imageSubresourceMap.erase(image_sub);
}
skip = ClearMemoryObjectBindings(dev_data, HandleToUint64(swapchain_image), kVulkanObjectTypeSwapchainKHR);
EraseQFOImageRelaseBarriers(dev_data, swapchain_image);
dev_data->imageMap.erase(swapchain_image);
}
}
auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
if (surface_state) {
if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
if (surface_state->old_swapchain == swapchain_data) surface_state->old_swapchain = nullptr;
}
dev_data->swapchainMap.erase(swapchain);
}
lock.unlock();
if (!skip) dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
}
static bool PreCallValidateGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
bool skip = false;
if (swapchain_state && pSwapchainImages) {
lock_guard_t lock(global_lock);
// Compare the preliminary value of *pSwapchainImageCount with the value this time:
if (swapchain_state->vkGetSwapchainImagesKHRState == UNCALLED) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_Swapchain_PriorCount,
"vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive value has "
"been seen for pSwapchainImages.");
} else if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
skip |=
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_Swapchain_InvalidCount,
"vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount, and with pSwapchainImages set to a "
"value (%d) that is greater than the value (%d) that was returned when pSwapchainImageCount was NULL.",
*pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
}
}
return skip;
}
static void PostCallRecordGetSwapchainImagesKHR(layer_data *device_data, SWAPCHAIN_NODE *swapchain_state, VkDevice device,
uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
lock_guard_t lock(global_lock);
if (*pSwapchainImageCount > swapchain_state->images.size()) swapchain_state->images.resize(*pSwapchainImageCount);
if (pSwapchainImages) {
if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_DETAILS) {
swapchain_state->vkGetSwapchainImagesKHRState = QUERY_DETAILS;
}
for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
if (swapchain_state->images[i] != VK_NULL_HANDLE) continue; // Already retrieved this.
IMAGE_LAYOUT_NODE image_layout_node;
image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
image_layout_node.format = swapchain_state->createInfo.imageFormat;
// Add imageMap entries for each swapchain image
VkImageCreateInfo image_ci = {};
image_ci.flags = 0;
image_ci.imageType = VK_IMAGE_TYPE_2D;
image_ci.format = swapchain_state->createInfo.imageFormat;
image_ci.extent.width = swapchain_state->createInfo.imageExtent.width;
image_ci.extent.height = swapchain_state->createInfo.imageExtent.height;
image_ci.extent.depth = 1;
image_ci.mipLevels = 1;
image_ci.arrayLayers = swapchain_state->createInfo.imageArrayLayers;
image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
image_ci.usage = swapchain_state->createInfo.imageUsage;
image_ci.sharingMode = swapchain_state->createInfo.imageSharingMode;
device_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
auto &image_state = device_data->imageMap[pSwapchainImages[i]];
image_state->valid = false;
image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
swapchain_state->images[i] = pSwapchainImages[i];
ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
device_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
device_data->imageLayoutMap[subpair] = image_layout_node;
}
}
if (*pSwapchainImageCount) {
if (swapchain_state->vkGetSwapchainImagesKHRState < QUERY_COUNT) {
swapchain_state->vkGetSwapchainImagesKHRState = QUERY_COUNT;
}
swapchain_state->get_swapchain_image_count = *pSwapchainImageCount;
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto swapchain_state = GetSwapchainNode(device_data, swapchain);
bool skip = PreCallValidateGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
if (!skip) {
result = device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
}
if ((result == VK_SUCCESS || result == VK_INCOMPLETE)) {
PostCallRecordGetSwapchainImagesKHR(device_data, swapchain_state, device, pSwapchainImageCount, pSwapchainImages);
}
return result;
}
static bool PreCallValidateQueuePresentKHR(layer_data *dev_data, VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
bool skip = false;
lock_guard_t lock(global_lock);
auto queue_state = GetQueueState(dev_data, queue);
for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
if (pSemaphore && !pSemaphore->signaled) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
kVUID_Core_DrawState_QueueForwardProgress,
"Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
HandleToUint64(queue), HandleToUint64(pPresentInfo->pWaitSemaphores[i]));
}
}
for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
if (swapchain_data) {
if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainInvalidImage,
"vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
} else {
auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
auto image_state = GetImageState(dev_data, image);
if (image_state->shared_presentable) {
image_state->layout_locked = true;
}
if (!image_state->acquired) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainImageNotAcquired,
"vkQueuePresentKHR: Swapchain image index %u has not been acquired.", pPresentInfo->pImageIndices[i]);
}
vector<VkImageLayout> layouts;
if (FindLayouts(dev_data, image, layouts)) {
for (auto layout : layouts) {
if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!dev_data->extensions.vk_khr_shared_presentable_image ||
(layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
HandleToUint64(queue), "VUID-VkPresentInfoKHR-pImageIndices-01296",
"Images passed to present must be in layout VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.",
string_VkImageLayout(layout));
}
}
}
}
// All physical devices and queue families are required to be able to present to any native window on Android; require
// the application to have established support on any other platform.
if (!dev_data->instance_data->extensions.vk_khr_android_surface) {
auto surface_state = GetSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
if (support_it == surface_state->gpu_queue_support.end()) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), kVUID_Core_DrawState_SwapchainUnsupportedQueue,
"vkQueuePresentKHR: Presenting image without calling vkGetPhysicalDeviceSurfaceSupportKHR");
} else if (!support_it->second) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-vkQueuePresentKHR-pSwapchains-01292",
"vkQueuePresentKHR: Presenting image on queue that cannot present to this surface.");
}
}
}
}
if (pPresentInfo && pPresentInfo->pNext) {
// Verify ext struct
const auto *present_regions = lvl_find_in_chain<VkPresentRegionsKHR>(pPresentInfo->pNext);
if (present_regions) {
for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
assert(swapchain_data);
VkPresentRegionKHR region = present_regions->pRegions[i];
for (uint32_t j = 0; j < region.rectangleCount; ++j) {
VkRectLayerKHR rect = region.pRectangles[j];
if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
"VUID-VkRectLayerKHR-offset-01261",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
"pRegion[%i].pRectangles[%i], the sum of offset.x (%i) and extent.width (%i) is greater "
"than the corresponding swapchain's imageExtent.width (%i).",
i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
}
if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, HandleToUint64(pPresentInfo->pSwapchains[i]),
"VUID-VkRectLayerKHR-offset-01261",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
"pRegion[%i].pRectangles[%i], the sum of offset.y (%i) and extent.height (%i) is greater "
"than the corresponding swapchain's imageExtent.height (%i).",
i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
}
if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
skip |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[i]), "VUID-VkRectLayerKHR-layer-01262",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer "
"(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
}
}
}
}
const auto *present_times_info = lvl_find_in_chain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext);
if (present_times_info) {
if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pPresentInfo->pSwapchains[0]), "VUID-VkPresentTimesInfoGOOGLE-swapchainCount-01247",
"vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount "
"is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, "
"VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.",
present_times_info->swapchainCount, pPresentInfo->swapchainCount);
}
}
}
return skip;
}
static void PostCallRecordQueuePresentKHR(layer_data *dev_data, const VkPresentInfoKHR *pPresentInfo, const VkResult &result) {
// Semaphore waits occur before error generation, if the call reached the ICD. (Confirm?)
for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
auto pSemaphore = GetSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
if (pSemaphore) {
pSemaphore->signaler.first = VK_NULL_HANDLE;
pSemaphore->signaled = false;
}
}
for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
// Note: this is imperfect, in that we can get confused about what did or didn't succeed-- but if the app does that, it's
// confused itself just as much.
auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR) continue; // this present didn't actually happen.
// Mark the image as having been released to the WSI
auto swapchain_data = GetSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
auto image_state = GetImageState(dev_data, image);
image_state->acquired = false;
}
// Note: even though presentation is directed to a queue, there is no direct ordering between QP and subsequent work, so QP (and
// its semaphore waits) /never/ participate in any completion proof.
}
VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
bool skip = PreCallValidateQueuePresentKHR(dev_data, queue, pPresentInfo);
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
PostCallRecordQueuePresentKHR(dev_data, pPresentInfo, result);
}
return result;
}
static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
std::vector<SURFACE_STATE *> &surface_state,
std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
if (pCreateInfos) {
lock_guard_t lock(global_lock);
for (uint32_t i = 0; i < swapchainCount; i++) {
surface_state.push_back(GetSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
old_swapchain_state.push_back(GetSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
std::stringstream func_name;
func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i],
old_swapchain_state[i])) {
return true;
}
}
}
return false;
}
static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
std::vector<SURFACE_STATE *> &surface_state,
std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
if (VK_SUCCESS == result) {
for (uint32_t i = 0; i < swapchainCount; i++) {
auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfos[i].presentMode ||
VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfos[i].presentMode) {
swapchain_state->shared_presentable = true;
}
surface_state[i]->swapchain = swapchain_state.get();
dev_data->swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
}
} else {
for (uint32_t i = 0; i < swapchainCount; i++) {
surface_state[i]->swapchain = nullptr;
}
}
// Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
for (uint32_t i = 0; i < swapchainCount; i++) {
if (old_swapchain_state[i]) {
old_swapchain_state[i]->replaced = true;
}
surface_state[i]->old_swapchain = old_swapchain_state[i];
}
return;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
const VkSwapchainCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
std::vector<SURFACE_STATE *> surface_state;
std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
old_swapchain_state)) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result =
dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
old_swapchain_state);
return result;
}
static bool PreCallValidateAcquireNextImageKHR(layer_data *dev_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
bool skip = false;
if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-vkAcquireNextImageKHR-semaphore-01780",
"vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way to "
"determine the completion of this operation.");
}
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore && pSemaphore->scope == kSyncScopeInternal && pSemaphore->signaled) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(semaphore), "VUID-vkAcquireNextImageKHR-semaphore-01286",
"vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state.");
}
auto pFence = GetFenceNode(dev_data, fence);
if (pFence) {
skip |= ValidateFenceForSubmit(dev_data, pFence);
}
auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
if (swapchain_data->replaced) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(swapchain), "VUID-vkAcquireNextImageKHR-swapchain-01285",
"vkAcquireNextImageKHR: This swapchain has been retired. The application can still present any images it "
"has acquired, but cannot acquire any more.");
}
auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
[=](VkImage image) { return GetImageState(dev_data, image)->acquired; });
if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(swapchain), kVUID_Core_DrawState_SwapchainTooManyImages,
"vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
acquired_images);
}
}
if (swapchain_data->images.size() == 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(swapchain), kVUID_Core_DrawState_SwapchainImagesNotFound,
"vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call "
"vkGetSwapchainImagesKHR after swapchain creation.");
}
return skip;
}
static void PostCallRecordAcquireNextImageKHR(layer_data *dev_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
auto pFence = GetFenceNode(dev_data, fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
// Treat as inflight since it is valid to wait on this fence, even in cases where it is technically a temporary
// import
pFence->state = FENCE_INFLIGHT;
pFence->signaler.first = VK_NULL_HANDLE; // ANI isn't on a queue, so this can't participate in a completion proof.
}
auto pSemaphore = GetSemaphoreNode(dev_data, semaphore);
if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
// Treat as signaled since it is valid to wait on this semaphore, even in cases where it is technically a
// temporary import
pSemaphore->signaled = true;
pSemaphore->signaler.first = VK_NULL_HANDLE;
}
// Mark the image as acquired.
auto swapchain_data = GetSwapchainNode(dev_data, swapchain);
auto image = swapchain_data->images[*pImageIndex];
auto image_state = GetImageState(dev_data, image);
image_state->acquired = true;
image_state->shared_presentable = swapchain_data->shared_presentable;
}
static bool PreCallValidateAcquireNextImage2KHR(layer_data *dev_data, VkDevice device,
const VkAcquireNextImageInfoKHR *pAcquireInfo, uint32_t *pImageIndex) {
// TODO: unify as appropriate with PreCallValidateAcquireNextImage2KHR; get
// new VUIDs assigned for AcquireNextImage2KHR-specific cases
bool skip = false;
if (pAcquireInfo->fence == VK_NULL_HANDLE && pAcquireInfo->semaphore == VK_NULL_HANDLE) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), "VUID-vkAcquireNextImageKHR-semaphore-01780",
"vkAcquireNextImage2KHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way to "
"determine the completion of this operation.");
}
auto pSemaphore = GetSemaphoreNode(dev_data, pAcquireInfo->semaphore);
if (pSemaphore && pSemaphore->scope == kSyncScopeInternal && pSemaphore->signaled) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
HandleToUint64(pAcquireInfo->semaphore), "VUID-vkAcquireNextImageKHR-semaphore-01286",
"vkAcquireNextImage2KHR: Semaphore must not be currently signaled or in a wait state.");
}
auto pFence = GetFenceNode(dev_data, pAcquireInfo->fence);
if (pFence) {
skip |= ValidateFenceForSubmit(dev_data, pFence);
}
auto swapchain_data = GetSwapchainNode(dev_data, pAcquireInfo->swapchain);
if (swapchain_data->replaced) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pAcquireInfo->swapchain), "VUID-vkAcquireNextImageKHR-swapchain-01285",
"vkAcquireNextImage2KHR: This swapchain has been retired. The application can still present any images it "
"has acquired, but cannot acquire any more.");
}
auto physical_device_state = GetPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
[=](VkImage image) { return GetImageState(dev_data, image)->acquired; });
if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pAcquireInfo->swapchain), kVUID_Core_DrawState_SwapchainTooManyImages,
"vkAcquireNextImage2KHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64
")",
acquired_images);
}
}
if (swapchain_data->images.size() == 0) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
HandleToUint64(pAcquireInfo->swapchain), kVUID_Core_DrawState_SwapchainImagesNotFound,
"vkAcquireNextImage2KHR: No images found to acquire from. Application probably did not call "
"vkGetSwapchainImagesKHR after swapchain creation.");
}
return skip;
}
static void PostCallRecordAcquireNextImage2KHR(layer_data *dev_data, VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex) {
auto pFence = GetFenceNode(dev_data, pAcquireInfo->fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
// Treat as inflight since it is valid to wait on this fence, even in cases where it is technically a temporary
// import
pFence->state = FENCE_INFLIGHT;
pFence->signaler.first = VK_NULL_HANDLE; // ANI isn't on a queue, so this can't participate in a completion proof.
}
auto pSemaphore = GetSemaphoreNode(dev_data, pAcquireInfo->semaphore);
if (pSemaphore && pSemaphore->scope == kSyncScopeInternal) {
// Treat as signaled since it is valid to wait on this semaphore, even in cases where it is technically a
// temporary import
pSemaphore->signaled = true;
pSemaphore->signaler.first = VK_NULL_HANDLE;
}
// Mark the image as acquired.
auto swapchain_data = GetSwapchainNode(dev_data, pAcquireInfo->swapchain);
auto image = swapchain_data->images[*pImageIndex];
auto image_state = GetImageState(dev_data, image);
image_state->acquired = true;
image_state->shared_presentable = swapchain_data->shared_presentable;
// TODO: unify as appropriate with PostCallRecordAcquireNextImageKHR;
// consider physical device masks
}
VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateAcquireNextImageKHR(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
lock.lock();
if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
PostCallRecordAcquireNextImageKHR(dev_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
}
lock.unlock();
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateAcquireNextImage2KHR(dev_data, device, pAcquireInfo, pImageIndex);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
VkResult result = dev_data->dispatch_table.AcquireNextImage2KHR(device, pAcquireInfo, pImageIndex);
lock.lock();
if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
PostCallRecordAcquireNextImage2KHR(dev_data, device, pAcquireInfo, pImageIndex);
}
lock.unlock();
return result;
}
static bool PreCallValidateEnumeratePhysicalDevices(instance_layer_data *instance_data, uint32_t *pPhysicalDeviceCount) {
bool skip = false;
if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
// Flag warning here. You can call this without having queried the count, but it may not be
// robust on platforms with multiple physical devices.
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0,
kVUID_Core_DevLimit_MissingQueryCount,
"Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first call "
"vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
} // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
// Having actual count match count from app is not a requirement, so this can be a warning
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, kVUID_Core_DevLimit_CountMismatch,
"Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count supported by "
"this instance is %u.",
*pPhysicalDeviceCount, instance_data->physical_devices_count);
}
return skip;
}
static void PreCallRecordEnumeratePhysicalDevices(instance_layer_data *instance_data) {
instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
}
static void PostCallRecordEnumeratePhysicalDevices(instance_layer_data *instance_data, const VkResult &result,
uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices) {
if (NULL == pPhysicalDevices) {
instance_data->physical_devices_count = *pPhysicalDeviceCount;
} else if (result == VK_SUCCESS) { // Save physical devices
for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
phys_device_state.phys_device = pPhysicalDevices[i];
// Init actual features for each physical device
instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features2.features);
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
VkPhysicalDevice *pPhysicalDevices) {
bool skip = false;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
assert(instance_data);
unique_lock_t lock(global_lock);
// For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
if (pPhysicalDevices) {
skip |= PreCallValidateEnumeratePhysicalDevices(instance_data, pPhysicalDeviceCount);
}
PreCallRecordEnumeratePhysicalDevices(instance_data);
lock.unlock();
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
lock.lock();
PostCallRecordEnumeratePhysicalDevices(instance_data, result, pPhysicalDeviceCount, pPhysicalDevices);
return result;
}
// Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
static bool ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
PHYSICAL_DEVICE_STATE *pd_state,
uint32_t requested_queue_family_property_count, bool qfp_null,
const char *caller_name) {
bool skip = false;
if (!qfp_null) {
// Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count
if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
skip |= log_msg(
instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), kVUID_Core_DevLimit_MissingQueryCount,
"%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is recommended "
"to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.",
caller_name, caller_name);
// Then verify that pCount that is passed in on second call matches what was returned
} else if (pd_state->queue_family_count != requested_queue_family_property_count) {
skip |= log_msg(
instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(pd_state->phys_device), kVUID_Core_DevLimit_CountMismatch,
"%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32
", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32
". It is recommended to instead receive all the properties by calling %s with pQueueFamilyPropertyCount that was "
"previously obtained by calling %s with NULL pQueueFamilyProperties.",
caller_name, requested_queue_family_property_count, pd_state->queue_family_count, caller_name, caller_name);
}
pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
}
return skip;
}
static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_layer_data *instance_data,
PHYSICAL_DEVICE_STATE *pd_state,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties *pQueueFamilyProperties) {
return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
(nullptr == pQueueFamilyProperties),
"vkGetPhysicalDeviceQueueFamilyProperties()");
}
static bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_layer_data *instance_data,
PHYSICAL_DEVICE_STATE *pd_state,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, pd_state, *pQueueFamilyPropertyCount,
(nullptr == pQueueFamilyProperties),
"vkGetPhysicalDeviceQueueFamilyProperties2[KHR]()");
}
// Common function to update state for GetPhysicalDeviceQueueFamilyProperties & 2KHR version
static void StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
if (!pQueueFamilyProperties) {
if (UNCALLED == pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState)
pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
pd_state->queue_family_count = count;
} else { // Save queue family properties
pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
pd_state->queue_family_count = std::max(pd_state->queue_family_count, count);
pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
for (uint32_t i = 0; i < count; ++i) {
pd_state->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
}
}
}
static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
VkQueueFamilyProperties *pQueueFamilyProperties) {
VkQueueFamilyProperties2KHR *pqfp = nullptr;
std::vector<VkQueueFamilyProperties2KHR> qfp;
qfp.resize(count);
if (pQueueFamilyProperties) {
for (uint32_t i = 0; i < count; ++i) {
qfp[i].sType = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR;
qfp[i].pNext = nullptr;
qfp[i].queueFamilyProperties = pQueueFamilyProperties[i];
}
pqfp = qfp.data();
}
StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pqfp);
}
static void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(PHYSICAL_DEVICE_STATE *pd_state, uint32_t count,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(pd_state, count, pQueueFamilyProperties);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties *pQueueFamilyProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
assert(physical_device_state);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state,
pQueueFamilyPropertyCount, pQueueFamilyProperties);
lock.unlock();
if (skip) return;
instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
pQueueFamilyProperties);
lock.lock();
PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pQueueFamilyProperties);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
assert(physical_device_state);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_data, physical_device_state,
pQueueFamilyPropertyCount, pQueueFamilyProperties);
lock.unlock();
if (skip) return;
instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount,
pQueueFamilyProperties);
lock.lock();
PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(physical_device_state, *pQueueFamilyPropertyCount,
pQueueFamilyProperties);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
assert(physical_device_state);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(instance_data, physical_device_state,
pQueueFamilyPropertyCount, pQueueFamilyProperties);
lock.unlock();
if (skip) return;
instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount,
pQueueFamilyProperties);
lock.lock();
PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(physical_device_state, *pQueueFamilyPropertyCount,
pQueueFamilyProperties);
}
template <typename TCreateInfo, typename FPtr>
static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo, VkAllocationCallbacks const *pAllocator,
VkSurfaceKHR *pSurface, FPtr fptr) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
// Call down the call chain:
VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
if (result == VK_SUCCESS) {
unique_lock_t lock(global_lock);
instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
lock.unlock();
}
return result;
}
static bool PreCallValidateDestroySurfaceKHR(instance_layer_data *instance_data, VkInstance instance, VkSurfaceKHR surface) {
auto surface_state = GetSurfaceState(instance_data, surface);
bool skip = false;
if ((surface_state) && (surface_state->swapchain)) {
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
HandleToUint64(instance), "VUID-vkDestroySurfaceKHR-surface-01266",
"vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed.");
}
return skip;
}
static void PreCallRecordValidateDestroySurfaceKHR(instance_layer_data *instance_data, VkSurfaceKHR surface) {
instance_data->surface_map.erase(surface);
}
VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateDestroySurfaceKHR(instance_data, instance, surface);
// Pre-record to avoid Destroy/Create race
PreCallRecordValidateDestroySurfaceKHR(instance_data, surface);
lock.unlock();
if (!skip) {
instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
}
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
#ifdef VK_USE_PLATFORM_IOS_MVK
VKAPI_ATTR VkResult VKAPI_CALL CreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateIOSSurfaceMVK);
}
#endif // VK_USE_PLATFORM_IOS_MVK
#ifdef VK_USE_PLATFORM_MACOS_MVK
VKAPI_ATTR VkResult VKAPI_CALL CreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMacOSSurfaceMVK);
}
#endif // VK_USE_PLATFORM_MACOS_MVK
#ifdef VK_USE_PLATFORM_MIR_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
}
static bool PreCallValidateGetPhysicalDeviceMirPresentationSupportKHR(instance_layer_data *instance_data,
VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex) {
const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceMirPresentationSupportKHR-queueFamilyIndex-01265",
"vkGetPhysicalDeviceMirPresentationSupportKHR", "queueFamilyIndex");
}
VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceMirPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, MirConnection *connection) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceMirPresentationSupportKHR(instance_data, physicalDevice, queueFamilyIndex);
lock.unlock();
if (skip) return VK_FALSE;
// Call down the call chain:
VkBool32 result =
instance_data->dispatch_table.GetPhysicalDeviceMirPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection);
return result;
}
#endif // VK_USE_PLATFORM_MIR_KHR
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
}
static bool PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(instance_layer_data *instance_data,
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) {
const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWaylandPresentationSupportKHR-queueFamilyIndex-01306",
"vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
}
VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct wl_display *display) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(instance_data, physicalDevice, queueFamilyIndex);
lock.unlock();
if (skip) return VK_FALSE;
// Call down the call chain:
VkBool32 result =
instance_data->dispatch_table.GetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display);
return result;
}
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
}
static bool PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(instance_layer_data *instance_data,
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) {
const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWin32PresentationSupportKHR-queueFamilyIndex-01309",
"vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
}
VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(instance_data, physicalDevice, queueFamilyIndex);
lock.unlock();
if (skip) return VK_FALSE;
// Call down the call chain:
VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex);
return result;
}
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
}
static bool PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(instance_layer_data *instance_data,
VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex) {
const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXcbPresentationSupportKHR-queueFamilyIndex-01312",
"vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
}
VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, xcb_connection_t *connection,
xcb_visualid_t visual_id) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(instance_data, physicalDevice, queueFamilyIndex);
lock.unlock();
if (skip) return VK_FALSE;
// Call down the call chain:
VkBool32 result = instance_data->dispatch_table.GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex,
connection, visual_id);
return result;
}
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
}
static bool PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(instance_layer_data *instance_data,
VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex) {
const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXlibPresentationSupportKHR-queueFamilyIndex-01315",
"vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
}
VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, Display *dpy,
VisualID visualID) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(instance_data, physicalDevice, queueFamilyIndex);
lock.unlock();
if (skip) return VK_FALSE;
// Call down the call chain:
VkBool32 result =
instance_data->dispatch_table.GetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID);
return result;
}
#endif // VK_USE_PLATFORM_XLIB_KHR
static void PostCallRecordGetPhysicalDeviceSurfaceCapabilitiesKHR(instance_layer_data *instance_data,
VkPhysicalDevice physicalDevice,
VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto result =
instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
unique_lock_t lock(global_lock);
if (result == VK_SUCCESS) {
PostCallRecordGetPhysicalDeviceSurfaceCapabilitiesKHR(instance_data, physicalDevice, pSurfaceCapabilities);
}
return result;
}
static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instance_layer_data *instanceData,
VkPhysicalDevice physicalDevice,
VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
unique_lock_t lock(global_lock);
auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
physicalDeviceState->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities;
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
VkSurfaceCapabilities2KHR *pSurfaceCapabilities) {
auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto result =
instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities);
if (result == VK_SUCCESS) {
PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(instanceData, physicalDevice, pSurfaceCapabilities);
}
return result;
}
static void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instance_layer_data *instanceData,
VkPhysicalDevice physicalDevice,
VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
unique_lock_t lock(global_lock);
auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
physicalDeviceState->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
physicalDeviceState->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount;
physicalDeviceState->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount;
physicalDeviceState->surfaceCapabilities.currentExtent = pSurfaceCapabilities->currentExtent;
physicalDeviceState->surfaceCapabilities.minImageExtent = pSurfaceCapabilities->minImageExtent;
physicalDeviceState->surfaceCapabilities.maxImageExtent = pSurfaceCapabilities->maxImageExtent;
physicalDeviceState->surfaceCapabilities.maxImageArrayLayers = pSurfaceCapabilities->maxImageArrayLayers;
physicalDeviceState->surfaceCapabilities.supportedTransforms = pSurfaceCapabilities->supportedTransforms;
physicalDeviceState->surfaceCapabilities.currentTransform = pSurfaceCapabilities->currentTransform;
physicalDeviceState->surfaceCapabilities.supportedCompositeAlpha = pSurfaceCapabilities->supportedCompositeAlpha;
physicalDeviceState->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags;
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
VkSurfaceCapabilities2EXT *pSurfaceCapabilities) {
auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto result =
instanceData->dispatch_table.GetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
if (result == VK_SUCCESS) {
PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(instanceData, physicalDevice, pSurfaceCapabilities);
}
return result;
}
static bool PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(instance_layer_data *instance_data,
PHYSICAL_DEVICE_STATE *physical_device_state,
uint32_t queueFamilyIndex) {
return ValidatePhysicalDeviceQueueFamily(instance_data, physical_device_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceSurfaceSupportKHR-queueFamilyIndex-01269",
"vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
}
static void PostCallRecordGetPhysicalDeviceSurfaceSupportKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, VkSurfaceKHR surface,
VkBool32 *pSupported) {
auto surface_state = GetSurfaceState(instance_data, surface);
surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE);
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
VkSurfaceKHR surface, VkBool32 *pSupported) {
bool skip = false;
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
skip |= PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(instance_data, pd_state, queueFamilyIndex);
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
auto result =
instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
if (result == VK_SUCCESS) {
lock.lock();
PostCallRecordGetPhysicalDeviceSurfaceSupportKHR(instance_data, physicalDevice, queueFamilyIndex, surface, pSupported);
}
return result;
}
static bool PreCallValidateGetPhysicalDeviceSurfacePresentModesKHR(instance_layer_data *instance_data,
PHYSICAL_DEVICE_STATE *physical_device_state,
CALL_STATE &call_state, VkPhysicalDevice physicalDevice,
uint32_t *pPresentModeCount) {
// Compare the preliminary value of *pPresentModeCount with the value this time:
auto prev_mode_count = (uint32_t)physical_device_state->present_modes.size();
bool skip = false;
switch (call_state) {
case UNCALLED:
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
kVUID_Core_DevLimit_MustQueryCount,
"vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior "
"positive value has been seen for pPresentModeCount.");
break;
default:
// both query count and query details
if (*pPresentModeCount != prev_mode_count) {
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
kVUID_Core_DevLimit_CountMismatch,
"vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that differs "
"from the value (%u) that was returned when pPresentModes was NULL.",
*pPresentModeCount, prev_mode_count);
}
break;
}
return skip;
}
static void PostCallRecordGetPhysicalDeviceSurfacePresentModesKHR(PHYSICAL_DEVICE_STATE *physical_device_state,
CALL_STATE &call_state, uint32_t *pPresentModeCount,
VkPresentModeKHR *pPresentModes) {
if (*pPresentModeCount) {
if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
if (*pPresentModeCount > physical_device_state->present_modes.size())
physical_device_state->present_modes.resize(*pPresentModeCount);
}
if (pPresentModes) {
if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
for (uint32_t i = 0; i < *pPresentModeCount; i++) {
physical_device_state->present_modes[i] = pPresentModes[i];
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
uint32_t *pPresentModeCount,
VkPresentModeKHR *pPresentModes) {
bool skip = false;
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
// TODO: this isn't quite right. available modes may differ by surface AND physical device.
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
if (pPresentModes) {
skip |= PreCallValidateGetPhysicalDeviceSurfacePresentModesKHR(instance_data, physical_device_state, call_state,
physicalDevice, pPresentModeCount);
}
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount,
pPresentModes);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
lock.lock();
PostCallRecordGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state, call_state, pPresentModeCount, pPresentModes);
}
return result;
}
static bool PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(instance_layer_data *instance_data,
PHYSICAL_DEVICE_STATE *physical_device_state, CALL_STATE &call_state,
VkPhysicalDevice physicalDevice, uint32_t *pSurfaceFormatCount) {
auto prev_format_count = (uint32_t)physical_device_state->surface_formats.size();
bool skip = false;
switch (call_state) {
case UNCALLED:
// Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application
// didn't
// previously call this function with a NULL value of pSurfaceFormats:
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
kVUID_Core_DevLimit_MustQueryCount,
"vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior "
"positive value has been seen for pSurfaceFormats.");
break;
default:
if (prev_format_count != *pSurfaceFormatCount) {
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, HandleToUint64(physicalDevice),
kVUID_Core_DevLimit_CountMismatch,
"vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with "
"pSurfaceFormats set to a value (%u) that is greater than the value (%u) that was returned "
"when pSurfaceFormatCount was NULL.",
*pSurfaceFormatCount, prev_format_count);
}
break;
}
return skip;
}
static void PostCallRecordGetPhysicalDeviceSurfaceFormatsKHR(PHYSICAL_DEVICE_STATE *physical_device_state, CALL_STATE &call_state,
uint32_t *pSurfaceFormatCount, VkSurfaceFormatKHR *pSurfaceFormats) {
if (*pSurfaceFormatCount) {
if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
}
if (pSurfaceFormats) {
if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
physical_device_state->surface_formats[i] = pSurfaceFormats[i];
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormatKHR *pSurfaceFormats) {
bool skip = false;
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
unique_lock_t lock(global_lock);
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
if (pSurfaceFormats) {
skip |= PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(instance_data, physical_device_state, call_state, physicalDevice,
pSurfaceFormatCount);
}
lock.unlock();
if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
// Call down the call chain:
auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
pSurfaceFormats);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
lock.lock();
PostCallRecordGetPhysicalDeviceSurfaceFormatsKHR(physical_device_state, call_state, pSurfaceFormatCount, pSurfaceFormats);
}
return result;
}
static void PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instance_layer_data *instanceData, VkPhysicalDevice physicalDevice,
uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats) {
unique_lock_t lock(global_lock);
auto physicalDeviceState = GetPhysicalDeviceState(instanceData, physicalDevice);
if (*pSurfaceFormatCount) {
if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
}
if (*pSurfaceFormatCount > physicalDeviceState->surface_formats.size())
physicalDeviceState->surface_formats.resize(*pSurfaceFormatCount);
}
if (pSurfaceFormats) {
if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) {
physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS;
}
for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
physicalDeviceState->surface_formats[i] = pSurfaceFormats[i].surfaceFormat;
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormat2KHR *pSurfaceFormats) {
auto instanceData = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
auto result = instanceData->dispatch_table.GetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo,
pSurfaceFormatCount, pSurfaceFormats);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(instanceData, physicalDevice, pSurfaceFormatCount, pSurfaceFormats);
}
return result;
}
// VK_EXT_debug_utils commands
static void PreCallRecordSetDebugUtilsObectNameEXT(layer_data *dev_data, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) {
if (pNameInfo->pObjectName) {
lock_guard_t lock(global_lock);
dev_data->report_data->debugUtilsObjectNameMap->insert(
std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->objectHandle, pNameInfo->pObjectName));
} else {
lock_guard_t lock(global_lock);
dev_data->report_data->debugUtilsObjectNameMap->erase(pNameInfo->objectHandle);
}
}
VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectNameEXT(VkDevice device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_SUCCESS;
PreCallRecordSetDebugUtilsObectNameEXT(dev_data, pNameInfo);
if (nullptr != dev_data->dispatch_table.SetDebugUtilsObjectNameEXT) {
result = dev_data->dispatch_table.SetDebugUtilsObjectNameEXT(device, pNameInfo);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectTagEXT(VkDevice device, const VkDebugUtilsObjectTagInfoEXT *pTagInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_SUCCESS;
if (nullptr != dev_data->dispatch_table.SetDebugUtilsObjectTagEXT) {
result = dev_data->dispatch_table.SetDebugUtilsObjectTagEXT(device, pTagInfo);
}
return result;
}
static void PreCallRecordQueueBeginDebugUtilsLabelEXT(layer_data *dev_data, VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
BeginQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
}
VKAPI_ATTR void VKAPI_CALL QueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
std::unique_lock<std::mutex> lock(global_lock);
PreCallRecordQueueBeginDebugUtilsLabelEXT(dev_data, queue, pLabelInfo);
lock.unlock();
if (nullptr != dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT) {
dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT(queue, pLabelInfo);
}
}
static void PostCallRecordQueueEndDebugUtilsLabelEXT(layer_data *dev_data, VkQueue queue) {
EndQueueDebugUtilsLabel(dev_data->report_data, queue);
}
VKAPI_ATTR void VKAPI_CALL QueueEndDebugUtilsLabelEXT(VkQueue queue) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
if (nullptr != dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT) {
dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT(queue);
}
lock_guard_t lock(global_lock);
PostCallRecordQueueEndDebugUtilsLabelEXT(dev_data, queue);
}
static void PreCallRecordQueueInsertDebugUtilsLabelEXT(layer_data *dev_data, VkQueue queue,
const VkDebugUtilsLabelEXT *pLabelInfo) {
InsertQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
}
VKAPI_ATTR void VKAPI_CALL QueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
std::unique_lock<std::mutex> lock(global_lock);
PreCallRecordQueueInsertDebugUtilsLabelEXT(dev_data, queue, pLabelInfo);
lock.unlock();
if (nullptr != dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT) {
dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT(queue, pLabelInfo);
}
}
static void PreCallRecordCmdBeginDebugUtilsLabelEXT(layer_data *dev_data, VkCommandBuffer commandBuffer,
const VkDebugUtilsLabelEXT *pLabelInfo) {
BeginCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
}
VKAPI_ATTR void VKAPI_CALL CmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
std::unique_lock<std::mutex> lock(global_lock);
PreCallRecordCmdBeginDebugUtilsLabelEXT(dev_data, commandBuffer, pLabelInfo);
lock.unlock();
if (nullptr != dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT) {
dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
}
}
static void PostCallRecordCmdEndDebugUtilsLabelEXT(layer_data *dev_data, VkCommandBuffer commandBuffer) {
EndCmdDebugUtilsLabel(dev_data->report_data, commandBuffer);
}
VKAPI_ATTR void VKAPI_CALL CmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
if (nullptr != dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT) {
dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT(commandBuffer);
}
lock_guard_t lock(global_lock);
PostCallRecordCmdEndDebugUtilsLabelEXT(dev_data, commandBuffer);
}
static void PreCallRecordCmdInsertDebugUtilsLabelEXT(layer_data *dev_data, VkCommandBuffer commandBuffer,
const VkDebugUtilsLabelEXT *pLabelInfo) {
InsertCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
}
VKAPI_ATTR void VKAPI_CALL CmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
std::unique_lock<std::mutex> lock(global_lock);
PreCallRecordCmdInsertDebugUtilsLabelEXT(dev_data, commandBuffer, pLabelInfo);
lock.unlock();
if (nullptr != dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT) {
dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
}
}
static VkResult PostCallRecordCreateDebugUtilsMessengerEXT(instance_layer_data *instance_data,
const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDebugUtilsMessengerEXT *pMessenger) {
return layer_create_messenger_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMessenger);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDebugUtilsMessengerEXT(VkInstance instance,
const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDebugUtilsMessengerEXT *pMessenger) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
VkResult result = instance_data->dispatch_table.CreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger);
if (VK_SUCCESS == result) {
result = PostCallRecordCreateDebugUtilsMessengerEXT(instance_data, pCreateInfo, pAllocator, pMessenger);
}
return result;
}
static void PostCallRecordDestroyDebugUtilsMessengerEXT(instance_layer_data *instance_data, VkDebugUtilsMessengerEXT messenger,
const VkAllocationCallbacks *pAllocator) {
layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL DestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
const VkAllocationCallbacks *pAllocator) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
instance_data->dispatch_table.DestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator);
PostCallRecordDestroyDebugUtilsMessengerEXT(instance_data, messenger, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL SubmitDebugUtilsMessageEXT(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
VkDebugUtilsMessageTypeFlagsEXT messageTypes,
const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
instance_data->dispatch_table.SubmitDebugUtilsMessageEXT(instance, messageSeverity, messageTypes, pCallbackData);
}
// VK_EXT_debug_report commands
static VkResult PostCallRecordCreateDebugReportCallbackEXT(instance_layer_data *instance_data,
const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDebugReportCallbackEXT *pMsgCallback) {
return layer_create_report_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDebugReportCallbackEXT *pMsgCallback) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
if (VK_SUCCESS == res) {
lock_guard_t lock(global_lock);
res = PostCallRecordCreateDebugReportCallbackEXT(instance_data, pCreateInfo, pAllocator, pMsgCallback);
}
return res;
}
static void PostCallDestroyDebugReportCallbackEXT(instance_layer_data *instance_data, VkDebugReportCallbackEXT msgCallback,
const VkAllocationCallbacks *pAllocator) {
layer_destroy_report_callback(instance_data->report_data, msgCallback, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
const VkAllocationCallbacks *pAllocator) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
lock_guard_t lock(global_lock);
PostCallDestroyDebugReportCallbackEXT(instance_data, msgCallback, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
VkLayerProperties *pProperties) {
return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
return VK_ERROR_LAYER_NOT_PRESENT;
}
VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
uint32_t *pCount, VkExtensionProperties *pProperties) {
if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
return util_GetExtensionProperties(1, device_extensions, pCount, pProperties);
assert(physicalDevice);
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
}
static bool PreCallValidateEnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
bool skip = false;
if (instance_data) {
// For this instance, flag when EnumeratePhysicalDeviceGroups goes to QUERY_COUNT and then QUERY_DETAILS.
if (NULL != pPhysicalDeviceGroupProperties) {
if (UNCALLED == instance_data->vkEnumeratePhysicalDeviceGroupsState) {
// Flag warning here. You can call this without having queried the count, but it may not be
// robust on platforms with multiple physical devices.
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, kVUID_Core_DevLimit_MissingQueryCount,
"Call sequence has vkEnumeratePhysicalDeviceGroups() w/ non-NULL "
"pPhysicalDeviceGroupProperties. You should first call vkEnumeratePhysicalDeviceGroups() w/ "
"NULL pPhysicalDeviceGroupProperties to query pPhysicalDeviceGroupCount.");
} // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
else if (instance_data->physical_device_groups_count != *pPhysicalDeviceGroupCount) {
// Having actual count match count from app is not a requirement, so this can be a warning
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, kVUID_Core_DevLimit_CountMismatch,
"Call to vkEnumeratePhysicalDeviceGroups() w/ pPhysicalDeviceGroupCount value %u, but actual count "
"supported by this instance is %u.",
*pPhysicalDeviceGroupCount, instance_data->physical_device_groups_count);
}
}
} else {
log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0,
kVUID_Core_DevLimit_InvalidInstance,
"Invalid instance (0x%" PRIx64 ") passed into vkEnumeratePhysicalDeviceGroups().", HandleToUint64(instance));
}
return skip;
}
static void PreCallRecordEnumeratePhysicalDeviceGroups(instance_layer_data *instance_data,
VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
if (instance_data) {
// For this instance, flag when EnumeratePhysicalDeviceGroups goes to QUERY_COUNT and then QUERY_DETAILS.
if (NULL == pPhysicalDeviceGroupProperties) {
instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_COUNT;
} else {
instance_data->vkEnumeratePhysicalDeviceGroupsState = QUERY_DETAILS;
}
}
}
static void PostCallRecordEnumeratePhysicalDeviceGroups(instance_layer_data *instance_data, uint32_t *pPhysicalDeviceGroupCount,
VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
if (NULL == pPhysicalDeviceGroupProperties) {
instance_data->physical_device_groups_count = *pPhysicalDeviceGroupCount;
} else { // Save physical devices
for (uint32_t i = 0; i < *pPhysicalDeviceGroupCount; i++) {
for (uint32_t j = 0; j < pPhysicalDeviceGroupProperties[i].physicalDeviceCount; j++) {
VkPhysicalDevice cur_phys_dev = pPhysicalDeviceGroupProperties[i].physicalDevices[j];
auto &phys_device_state = instance_data->physical_device_map[cur_phys_dev];
phys_device_state.phys_device = cur_phys_dev;
// Init actual features for each physical device
instance_data->dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features2.features);
}
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
bool skip = false;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
skip = PreCallValidateEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
PreCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupProperties);
VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount,
pPhysicalDeviceGroupProperties);
if (result == VK_SUCCESS) {
PostCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroupsKHR(
VkInstance instance, uint32_t *pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties) {
bool skip = false;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
skip = PreCallValidateEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
if (skip) {
return VK_ERROR_VALIDATION_FAILED_EXT;
}
PreCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupProperties);
VkResult result = instance_data->dispatch_table.EnumeratePhysicalDeviceGroupsKHR(instance, pPhysicalDeviceGroupCount,
pPhysicalDeviceGroupProperties);
if (result == VK_SUCCESS) {
PostCallRecordEnumeratePhysicalDeviceGroups(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
}
return result;
}
static bool PreCallValidateCreateDescriptorUpdateTemplate(const char *func_name, layer_data *device_data,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
bool skip = false;
const auto layout = GetDescriptorSetLayout(device_data, pCreateInfo->descriptorSetLayout);
if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) {
auto ds_uint = HandleToUint64(pCreateInfo->descriptorSetLayout);
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
ds_uint, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350",
"%s: Invalid pCreateInfo->descriptorSetLayout (%" PRIx64 ")", func_name, ds_uint);
} else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) {
auto bind_point = pCreateInfo->pipelineBindPoint;
bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE);
if (!valid_bp) {
skip |=
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351",
"%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name, static_cast<uint32_t>(bind_point));
}
const auto pipeline_layout = GetPipelineLayout(device_data, pCreateInfo->pipelineLayout);
if (!pipeline_layout) {
uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout);
skip |=
log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT,
pl_uint, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352",
"%s: Invalid pCreateInfo->pipelineLayout (%" PRIx64 ")", func_name, pl_uint);
} else {
const uint32_t pd_set = pCreateInfo->set;
if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] ||
!pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) {
uint64_t pl_uint = HandleToUint64(pCreateInfo->pipelineLayout);
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, pl_uint,
"VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353",
"%s: pCreateInfo->set (%" PRIu32
") does not refer to the push descriptor set layout for "
"pCreateInfo->pipelineLayout (%" PRIx64 ").",
func_name, pd_set, pl_uint);
}
}
}
return skip;
}
static void PostCallRecordCreateDescriptorUpdateTemplate(layer_data *device_data,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
// Shadow template createInfo for later updates
safe_VkDescriptorUpdateTemplateCreateInfo *local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfo(pCreateInfo);
std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
device_data->desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplate(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", device_data, pCreateInfo,
pAllocator, pDescriptorUpdateTemplate);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
if (!skip) {
lock.unlock();
result =
device_data->dispatch_table.CreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateDescriptorUpdateTemplate(device_data, pCreateInfo, pDescriptorUpdateTemplate);
}
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = PreCallValidateCreateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", device_data, pCreateInfo,
pAllocator, pDescriptorUpdateTemplate);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
if (!skip) {
lock.unlock();
result = device_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator,
pDescriptorUpdateTemplate);
if (VK_SUCCESS == result) {
lock.lock();
PostCallRecordCreateDescriptorUpdateTemplate(device_data, pCreateInfo, pDescriptorUpdateTemplate);
}
}
return result;
}
static void PreCallRecordDestroyDescriptorUpdateTemplate(layer_data *device_data,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate) {
device_data->desc_template_map.erase(descriptorUpdateTemplate);
}
VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyDescriptorUpdateTemplate(device_data, descriptorUpdateTemplate);
lock.unlock();
device_data->dispatch_table.DestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator);
}
VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
unique_lock_t lock(global_lock);
// Pre-record to avoid Destroy/Create race
PreCallRecordDestroyDescriptorUpdateTemplate(device_data, descriptorUpdateTemplate);
lock.unlock();
device_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
}
// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSetsWithTemplate()
static void PostCallRecordUpdateDescriptorSetWithTemplate(layer_data *device_data, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) {
auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
if (template_map_entry == device_data->desc_template_map.end()) {
assert(0);
}
cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_map_entry->second, pData);
}
VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
device_data->dispatch_table.UpdateDescriptorSetWithTemplate(device, descriptorSet, descriptorUpdateTemplate, pData);
PostCallRecordUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
}
VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
device_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
PostCallRecordUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
}
static bool PreCallValidateCmdPushDescriptorSetWithTemplateKHR(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
return ValidateCmd(dev_data, cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, "vkCmdPushDescriptorSetWithTemplateKHR()");
}
VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
VkPipelineLayout layout, uint32_t set, const void *pData) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
// Minimal validation for command buffer state
if (cb_state) {
skip |= PreCallValidateCmdPushDescriptorSetWithTemplateKHR(dev_data, cb_state);
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
}
}
static void PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_layer_data *instanceData,
VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
void *pProperties) {
unique_lock_t lock(global_lock);
auto physical_device_state = GetPhysicalDeviceState(instanceData, physicalDevice);
if (*pPropertyCount) {
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) {
physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT;
}
physical_device_state->display_plane_property_count = *pPropertyCount;
}
if (pProperties) {
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) {
physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS;
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
VkDisplayPlanePropertiesKHR *pProperties) {
VkResult result = VK_SUCCESS;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice,
uint32_t *pPropertyCount,
VkDisplayPlaneProperties2KHR *pProperties) {
VkResult result = VK_SUCCESS;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
result = instance_data->dispatch_table.GetPhysicalDeviceDisplayPlaneProperties2KHR(physicalDevice, pPropertyCount, pProperties);
if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(instance_data, physicalDevice, pPropertyCount, pProperties);
}
return result;
}
static bool ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data *instance_data,
VkPhysicalDevice physicalDevice, uint32_t planeIndex,
const char *api_name) {
bool skip = false;
auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) {
skip |=
log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physicalDevice), kVUID_Core_Swapchain_GetSupportedDisplaysWithoutQuery,
"Potential problem with calling %s() without first querying vkGetPhysicalDeviceDisplayPlanePropertiesKHR "
"or vkGetPhysicalDeviceDisplayPlaneProperties2KHR.",
api_name);
} else {
if (planeIndex >= physical_device_state->display_plane_property_count) {
skip |= log_msg(
instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(physicalDevice), "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-planeIndex-01249",
"%s(): planeIndex must be in the range [0, %d] that was returned by vkGetPhysicalDeviceDisplayPlanePropertiesKHR "
"or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?",
api_name, physical_device_state->display_plane_property_count - 1);
}
}
return skip;
}
static bool PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
uint32_t planeIndex) {
bool skip = false;
lock_guard_t lock(global_lock);
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
"vkGetDisplayPlaneSupportedDisplaysKHR");
return skip;
}
VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
bool skip = PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(instance_data, physicalDevice, planeIndex);
if (!skip) {
result =
instance_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
}
return result;
}
static bool PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
uint32_t planeIndex) {
bool skip = false;
lock_guard_t lock(global_lock);
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
"vkGetDisplayPlaneCapabilitiesKHR");
return skip;
}
VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
bool skip = PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_data, physicalDevice, planeIndex);
if (!skip) {
result = instance_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
VkDisplayPlaneCapabilities2KHR *pCapabilities) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
bool skip = PreCallValidateGetDisplayPlaneCapabilitiesKHR(instance_data, physicalDevice, pDisplayPlaneInfo->planeIndex);
if (!skip) {
result = instance_data->dispatch_table.GetDisplayPlaneCapabilities2KHR(physicalDevice, pDisplayPlaneInfo, pCapabilities);
}
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
unique_lock_t lock(global_lock);
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (pNameInfo->pObjectName) {
device_data->report_data->debugObjectNameMap->insert(
std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->object, pNameInfo->pObjectName));
} else {
device_data->report_data->debugObjectNameMap->erase(pNameInfo->object);
}
lock.unlock();
VkResult result = device_data->dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
return result;
}
VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, VkDebugMarkerObjectTagInfoEXT *pTagInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
VkResult result = device_data->dispatch_table.DebugMarkerSetObjectTagEXT(device, pTagInfo);
return result;
}
static bool PreCallValidateCmdDebugMarkerBeginEXT(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
return ValidateCmd(dev_data, cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()");
}
VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
// Minimal validation for command buffer state
if (cb_state) {
skip |= PreCallValidateCmdDebugMarkerBeginEXT(device_data, cb_state);
}
lock.unlock();
if (!skip) {
device_data->dispatch_table.CmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo);
}
}
static bool PreCallValidateCmdDebugMarkerEndEXT(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
return ValidateCmd(dev_data, cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()");
}
VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
// Minimal validation for command buffer state
if (cb_state) {
skip |= PreCallValidateCmdDebugMarkerEndEXT(device_data, cb_state);
}
lock.unlock();
if (!skip) {
device_data->dispatch_table.CmdDebugMarkerEndEXT(commandBuffer);
}
}
VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer, VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
device_data->dispatch_table.CmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo);
}
static bool PreCallValidateCmdSetDiscardRectangleEXT(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
return ValidateCmd(dev_data, cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()");
}
VKAPI_ATTR void VKAPI_CALL CmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
// Minimal validation for command buffer state
if (cb_state) {
skip |= PreCallValidateCmdSetDiscardRectangleEXT(dev_data, cb_state);
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdSetDiscardRectangleEXT(commandBuffer, firstDiscardRectangle, discardRectangleCount,
pDiscardRectangles);
}
}
static bool PreCallValidateCmdSetSampleLocationsEXT(layer_data *dev_data, GLOBAL_CB_NODE *cb_state) {
return ValidateCmd(dev_data, cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()");
}
VKAPI_ATTR void VKAPI_CALL CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
const VkSampleLocationsInfoEXT *pSampleLocationsInfo) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
unique_lock_t lock(global_lock);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, commandBuffer);
// Minimal validation for command buffer state
if (cb_state) {
skip |= PreCallValidateCmdSetSampleLocationsEXT(dev_data, cb_state);
}
lock.unlock();
if (!skip) {
dev_data->dispatch_table.CmdSetSampleLocationsEXT(commandBuffer, pSampleLocationsInfo);
}
}
static bool PreCallValidateCmdDrawIndirectCountKHR(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer,
VkBuffer count_buffer, bool indexed, VkPipelineBindPoint bind_point,
GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
BUFFER_STATE **count_buffer_state, const char *caller) {
bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECTCOUNTKHR, cb_state, caller,
VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdDrawIndirectCountKHR-commandBuffer-cmdpool",
"VUID-vkCmdDrawIndirectCountKHR-renderpass", "VUID-vkCmdDrawIndirectCountKHR-None-03119",
"VUID-vkCmdDrawIndirectCountKHR-None-03120");
*buffer_state = GetBufferState(dev_data, buffer);
*count_buffer_state = GetBufferState(dev_data, count_buffer);
skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, "VUID-vkCmdDrawIndirectCountKHR-buffer-03104");
skip |=
ValidateMemoryIsBoundToBuffer(dev_data, *count_buffer_state, caller, "VUID-vkCmdDrawIndirectCountKHR-countBuffer-03106");
return skip;
}
static void PreCallRecordCmdDrawIndirectCountKHR(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
BUFFER_STATE *buffer_state, BUFFER_STATE *count_buffer_state) {
UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
AddCommandBufferBindingBuffer(dev_data, cb_state, count_buffer_state);
}
VKAPI_ATTR void VKAPI_CALL CmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
BUFFER_STATE *count_buffer_state = nullptr;
bool skip = false;
if (offset & 3) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndirectCountKHR-offset-03108",
"vkCmdDrawIndirectCountKHR() parameter, VkDeviceSize offset (0x%" PRIxLEAST64 "), is not a multiple of 4.",
offset);
}
if (countBufferOffset & 3) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndirectCountKHR-countBufferOffset-03109",
"vkCmdDrawIndirectCountKHR() parameter, VkDeviceSize countBufferOffset (0x%" PRIxLEAST64
"), is not a multiple of 4.",
countBufferOffset);
}
if ((stride & 3) || stride < sizeof(VkDrawIndirectCommand)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndirectCountKHR-stride-03110",
"vkCmdDrawIndirectCountKHR() parameter, uint32_t stride (0x%" PRIxLEAST32
"), is not a multiple of 4 or smaller than sizeof (VkDrawIndirectCommand).",
stride);
}
unique_lock_t lock(global_lock);
skip |=
PreCallValidateCmdDrawIndirectCountKHR(dev_data, commandBuffer, buffer, countBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS,
&cb_state, &buffer_state, &count_buffer_state, "vkCmdDrawIndirectCountKHR()");
if (!skip) {
PreCallRecordCmdDrawIndirectCountKHR(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state, count_buffer_state);
lock.unlock();
dev_data->dispatch_table.CmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
maxDrawCount, stride);
}
}
static bool PreCallValidateCmdDrawIndexedIndirectCountKHR(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer,
VkBuffer count_buffer, bool indexed, VkPipelineBindPoint bind_point,
GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
BUFFER_STATE **count_buffer_state, const char *caller) {
bool skip = ValidateCmdDrawType(
dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECTCOUNTKHR, cb_state, caller, VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdDrawIndexedIndirectCountKHR-commandBuffer-cmdpool", "VUID-vkCmdDrawIndexedIndirectCountKHR-renderpass",
"VUID-vkCmdDrawIndexedIndirectCountKHR-None-03151", "VUID-vkCmdDrawIndexedIndirectCountKHR-None-03152");
*buffer_state = GetBufferState(dev_data, buffer);
*count_buffer_state = GetBufferState(dev_data, count_buffer);
skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, "VUID-vkCmdDrawIndexedIndirectCountKHR-buffer-03136");
skip |= ValidateMemoryIsBoundToBuffer(dev_data, *count_buffer_state, caller,
"VUID-vkCmdDrawIndexedIndirectCountKHR-countBuffer-03138");
return skip;
}
static void PreCallRecordCmdDrawIndexedIndirectCountKHR(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
VkPipelineBindPoint bind_point, BUFFER_STATE *buffer_state,
BUFFER_STATE *count_buffer_state) {
UpdateStateCmdDrawType(dev_data, cb_state, bind_point);
AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
AddCommandBufferBindingBuffer(dev_data, cb_state, count_buffer_state);
}
VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkBuffer countBuffer, VkDeviceSize countBufferOffset,
uint32_t maxDrawCount, uint32_t stride) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = nullptr;
BUFFER_STATE *buffer_state = nullptr;
BUFFER_STATE *count_buffer_state = nullptr;
bool skip = false;
if (offset & 3) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndexedIndirectCountKHR-offset-03140",
"vkCmdDrawIndexedIndirectCountKHR() parameter, VkDeviceSize offset (0x%" PRIxLEAST64
"), is not a multiple of 4.",
offset);
}
if (countBufferOffset & 3) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndexedIndirectCountKHR-countBufferOffset-03141",
"vkCmdDrawIndexedIndirectCountKHR() parameter, VkDeviceSize countBufferOffset (0x%" PRIxLEAST64
"), is not a multiple of 4.",
countBufferOffset);
}
if ((stride & 3) || stride < sizeof(VkDrawIndexedIndirectCommand)) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdDrawIndexedIndirectCountKHR-stride-03142",
"vkCmdDrawIndexedIndirectCountKHR() parameter, uint32_t stride (0x%" PRIxLEAST32
"), is not a multiple of 4 or smaller than sizeof (VkDrawIndexedIndirectCommand).",
stride);
}
unique_lock_t lock(global_lock);
skip |= PreCallValidateCmdDrawIndexedIndirectCountKHR(dev_data, commandBuffer, buffer, countBuffer, true,
VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, &buffer_state,
&count_buffer_state, "vkCmdDrawIndexedIndirectCountKHR()");
if (!skip) {
PreCallRecordCmdDrawIndexedIndirectCountKHR(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state,
count_buffer_state);
lock.unlock();
dev_data->dispatch_table.CmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset,
maxDrawCount, stride);
}
}
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName);
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName);
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName);
// Map of all APIs to be intercepted by this layer
static const std::unordered_map<std::string, void *> name_to_funcptr_map = {
{"vkGetInstanceProcAddr", (void *)GetInstanceProcAddr},
{"vk_layerGetPhysicalDeviceProcAddr", (void *)GetPhysicalDeviceProcAddr},
{"vkGetDeviceProcAddr", (void *)GetDeviceProcAddr},
{"vkCreateInstance", (void *)CreateInstance},
{"vkCreateDevice", (void *)CreateDevice},
{"vkEnumeratePhysicalDevices", (void *)EnumeratePhysicalDevices},
{"vkGetPhysicalDeviceQueueFamilyProperties", (void *)GetPhysicalDeviceQueueFamilyProperties},
{"vkDestroyInstance", (void *)DestroyInstance},
{"vkEnumerateInstanceLayerProperties", (void *)EnumerateInstanceLayerProperties},
{"vkEnumerateDeviceLayerProperties", (void *)EnumerateDeviceLayerProperties},
{"vkEnumerateInstanceExtensionProperties", (void *)EnumerateInstanceExtensionProperties},
{"vkEnumerateDeviceExtensionProperties", (void *)EnumerateDeviceExtensionProperties},
{"vkCreateDescriptorUpdateTemplate", (void *)CreateDescriptorUpdateTemplate},
{"vkCreateDescriptorUpdateTemplateKHR", (void *)CreateDescriptorUpdateTemplateKHR},
{"vkDestroyDescriptorUpdateTemplate", (void *)DestroyDescriptorUpdateTemplate},
{"vkDestroyDescriptorUpdateTemplateKHR", (void *)DestroyDescriptorUpdateTemplateKHR},
{"vkUpdateDescriptorSetWithTemplate", (void *)UpdateDescriptorSetWithTemplate},
{"vkUpdateDescriptorSetWithTemplateKHR", (void *)UpdateDescriptorSetWithTemplateKHR},
{"vkCmdPushDescriptorSetWithTemplateKHR", (void *)CmdPushDescriptorSetWithTemplateKHR},
{"vkCmdPushDescriptorSetKHR", (void *)CmdPushDescriptorSetKHR},
{"vkCreateSwapchainKHR", (void *)CreateSwapchainKHR},
{"vkDestroySwapchainKHR", (void *)DestroySwapchainKHR},
{"vkGetSwapchainImagesKHR", (void *)GetSwapchainImagesKHR},
{"vkAcquireNextImageKHR", (void *)AcquireNextImageKHR},
{"vkAcquireNextImage2KHR", (void *)AcquireNextImage2KHR},
{"vkQueuePresentKHR", (void *)QueuePresentKHR},
{"vkQueueSubmit", (void *)QueueSubmit},
{"vkWaitForFences", (void *)WaitForFences},
{"vkGetFenceStatus", (void *)GetFenceStatus},
{"vkQueueWaitIdle", (void *)QueueWaitIdle},
{"vkDeviceWaitIdle", (void *)DeviceWaitIdle},
{"vkGetDeviceQueue", (void *)GetDeviceQueue},
{"vkGetDeviceQueue2", (void *)GetDeviceQueue2},
{"vkDestroyDevice", (void *)DestroyDevice},
{"vkDestroyFence", (void *)DestroyFence},
{"vkResetFences", (void *)ResetFences},
{"vkDestroySemaphore", (void *)DestroySemaphore},
{"vkDestroyEvent", (void *)DestroyEvent},
{"vkDestroyQueryPool", (void *)DestroyQueryPool},
{"vkDestroyBuffer", (void *)DestroyBuffer},
{"vkDestroyBufferView", (void *)DestroyBufferView},
{"vkDestroyImage", (void *)DestroyImage},
{"vkDestroyImageView", (void *)DestroyImageView},
{"vkDestroyShaderModule", (void *)DestroyShaderModule},
{"vkDestroyPipeline", (void *)DestroyPipeline},
{"vkDestroyPipelineLayout", (void *)DestroyPipelineLayout},
{"vkDestroySampler", (void *)DestroySampler},
{"vkDestroyDescriptorSetLayout", (void *)DestroyDescriptorSetLayout},
{"vkDestroyDescriptorPool", (void *)DestroyDescriptorPool},
{"vkDestroyFramebuffer", (void *)DestroyFramebuffer},
{"vkDestroyRenderPass", (void *)DestroyRenderPass},
{"vkCreateBuffer", (void *)CreateBuffer},
{"vkCreateBufferView", (void *)CreateBufferView},
{"vkCreateImage", (void *)CreateImage},
{"vkCreateImageView", (void *)CreateImageView},
{"vkCreateFence", (void *)CreateFence},
{"vkCreatePipelineCache", (void *)CreatePipelineCache},
{"vkDestroyPipelineCache", (void *)DestroyPipelineCache},
{"vkGetPipelineCacheData", (void *)GetPipelineCacheData},
{"vkMergePipelineCaches", (void *)MergePipelineCaches},
{"vkCreateGraphicsPipelines", (void *)CreateGraphicsPipelines},
{"vkCreateComputePipelines", (void *)CreateComputePipelines},
{"vkCreateSampler", (void *)CreateSampler},
{"vkCreateDescriptorSetLayout", (void *)CreateDescriptorSetLayout},
{"vkCreatePipelineLayout", (void *)CreatePipelineLayout},
{"vkCreateDescriptorPool", (void *)CreateDescriptorPool},
{"vkResetDescriptorPool", (void *)ResetDescriptorPool},
{"vkAllocateDescriptorSets", (void *)AllocateDescriptorSets},
{"vkFreeDescriptorSets", (void *)FreeDescriptorSets},
{"vkUpdateDescriptorSets", (void *)UpdateDescriptorSets},
{"vkCreateCommandPool", (void *)CreateCommandPool},
{"vkDestroyCommandPool", (void *)DestroyCommandPool},
{"vkResetCommandPool", (void *)ResetCommandPool},
{"vkCreateQueryPool", (void *)CreateQueryPool},
{"vkAllocateCommandBuffers", (void *)AllocateCommandBuffers},
{"vkFreeCommandBuffers", (void *)FreeCommandBuffers},
{"vkBeginCommandBuffer", (void *)BeginCommandBuffer},
{"vkEndCommandBuffer", (void *)EndCommandBuffer},
{"vkResetCommandBuffer", (void *)ResetCommandBuffer},
{"vkCmdBindPipeline", (void *)CmdBindPipeline},
{"vkCmdSetViewport", (void *)CmdSetViewport},
{"vkCmdSetScissor", (void *)CmdSetScissor},
{"vkCmdSetLineWidth", (void *)CmdSetLineWidth},
{"vkCmdSetDepthBias", (void *)CmdSetDepthBias},
{"vkCmdSetBlendConstants", (void *)CmdSetBlendConstants},
{"vkCmdSetDepthBounds", (void *)CmdSetDepthBounds},
{"vkCmdSetStencilCompareMask", (void *)CmdSetStencilCompareMask},
{"vkCmdSetStencilWriteMask", (void *)CmdSetStencilWriteMask},
{"vkCmdSetStencilReference", (void *)CmdSetStencilReference},
{"vkCmdBindDescriptorSets", (void *)CmdBindDescriptorSets},
{"vkCmdBindVertexBuffers", (void *)CmdBindVertexBuffers},
{"vkCmdBindIndexBuffer", (void *)CmdBindIndexBuffer},
{"vkCmdDraw", (void *)CmdDraw},
{"vkCmdDrawIndexed", (void *)CmdDrawIndexed},
{"vkCmdDrawIndirect", (void *)CmdDrawIndirect},
{"vkCmdDrawIndexedIndirect", (void *)CmdDrawIndexedIndirect},
{"vkCmdDispatch", (void *)CmdDispatch},
{"vkCmdDispatchIndirect", (void *)CmdDispatchIndirect},
{"vkCmdCopyBuffer", (void *)CmdCopyBuffer},
{"vkCmdCopyImage", (void *)CmdCopyImage},
{"vkCmdBlitImage", (void *)CmdBlitImage},
{"vkCmdCopyBufferToImage", (void *)CmdCopyBufferToImage},
{"vkCmdCopyImageToBuffer", (void *)CmdCopyImageToBuffer},
{"vkCmdUpdateBuffer", (void *)CmdUpdateBuffer},
{"vkCmdFillBuffer", (void *)CmdFillBuffer},
{"vkCmdClearColorImage", (void *)CmdClearColorImage},
{"vkCmdClearDepthStencilImage", (void *)CmdClearDepthStencilImage},
{"vkCmdClearAttachments", (void *)CmdClearAttachments},
{"vkCmdResolveImage", (void *)CmdResolveImage},
{"vkGetImageSubresourceLayout", (void *)GetImageSubresourceLayout},
{"vkCmdSetEvent", (void *)CmdSetEvent},
{"vkCmdResetEvent", (void *)CmdResetEvent},
{"vkCmdWaitEvents", (void *)CmdWaitEvents},
{"vkCmdPipelineBarrier", (void *)CmdPipelineBarrier},
{"vkCmdBeginQuery", (void *)CmdBeginQuery},
{"vkCmdEndQuery", (void *)CmdEndQuery},
{"vkCmdResetQueryPool", (void *)CmdResetQueryPool},
{"vkCmdCopyQueryPoolResults", (void *)CmdCopyQueryPoolResults},
{"vkCmdPushConstants", (void *)CmdPushConstants},
{"vkCmdWriteTimestamp", (void *)CmdWriteTimestamp},
{"vkCreateFramebuffer", (void *)CreateFramebuffer},
{"vkCreateShaderModule", (void *)CreateShaderModule},
{"vkCreateRenderPass", (void *)CreateRenderPass},
{"vkCmdBeginRenderPass", (void *)CmdBeginRenderPass},
{"vkCmdNextSubpass", (void *)CmdNextSubpass},
{"vkCmdEndRenderPass", (void *)CmdEndRenderPass},
{"vkCmdExecuteCommands", (void *)CmdExecuteCommands},
{"vkCmdDebugMarkerBeginEXT", (void *)CmdDebugMarkerBeginEXT},
{"vkCmdDebugMarkerEndEXT", (void *)CmdDebugMarkerEndEXT},
{"vkCmdDebugMarkerInsertEXT", (void *)CmdDebugMarkerInsertEXT},
{"vkDebugMarkerSetObjectNameEXT", (void *)DebugMarkerSetObjectNameEXT},
{"vkDebugMarkerSetObjectTagEXT", (void *)DebugMarkerSetObjectTagEXT},
{"vkSetEvent", (void *)SetEvent},
{"vkMapMemory", (void *)MapMemory},
{"vkUnmapMemory", (void *)UnmapMemory},
{"vkFlushMappedMemoryRanges", (void *)FlushMappedMemoryRanges},
{"vkInvalidateMappedMemoryRanges", (void *)InvalidateMappedMemoryRanges},
{"vkAllocateMemory", (void *)AllocateMemory},
{"vkFreeMemory", (void *)FreeMemory},
{"vkBindBufferMemory", (void *)BindBufferMemory},
{"vkBindBufferMemory2", (void *)BindBufferMemory2},
{"vkBindBufferMemory2KHR", (void *)BindBufferMemory2KHR},
{"vkGetBufferMemoryRequirements", (void *)GetBufferMemoryRequirements},
{"vkGetBufferMemoryRequirements2", (void *)GetBufferMemoryRequirements2},
{"vkGetBufferMemoryRequirements2KHR", (void *)GetBufferMemoryRequirements2KHR},
{"vkGetImageMemoryRequirements", (void *)GetImageMemoryRequirements},
{"vkGetImageMemoryRequirements2", (void *)GetImageMemoryRequirements2},
{"vkGetImageMemoryRequirements2KHR", (void *)GetImageMemoryRequirements2KHR},
{"vkGetImageSparseMemoryRequirements", (void *)GetImageSparseMemoryRequirements},
{"vkGetImageSparseMemoryRequirements2", (void *)GetImageSparseMemoryRequirements2},
{"vkGetImageSparseMemoryRequirements2KHR", (void *)GetImageSparseMemoryRequirements2KHR},
{"vkGetPhysicalDeviceSparseImageFormatProperties", (void *)GetPhysicalDeviceSparseImageFormatProperties},
{"vkGetPhysicalDeviceSparseImageFormatProperties2", (void *)GetPhysicalDeviceSparseImageFormatProperties2},
{"vkGetPhysicalDeviceSparseImageFormatProperties2KHR", (void *)GetPhysicalDeviceSparseImageFormatProperties2KHR},
{"vkGetQueryPoolResults", (void *)GetQueryPoolResults},
{"vkBindImageMemory", (void *)BindImageMemory},
{"vkBindImageMemory2", (void *)BindImageMemory2},
{"vkBindImageMemory2KHR", (void *)BindImageMemory2KHR},
{"vkQueueBindSparse", (void *)QueueBindSparse},
{"vkCreateSemaphore", (void *)CreateSemaphore},
{"vkCreateEvent", (void *)CreateEvent},
#ifdef VK_USE_PLATFORM_ANDROID_KHR
{"vkCreateAndroidSurfaceKHR", (void *)CreateAndroidSurfaceKHR},
#endif
#ifdef VK_USE_PLATFORM_MIR_KHR
{"vkCreateMirSurfaceKHR", (void *)CreateMirSurfaceKHR},
{"vkGetPhysicalDeviceMirPresentationSupportKHR", (void *)GetPhysicalDeviceMirPresentationSupportKHR},
#endif
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
{"vkCreateWaylandSurfaceKHR", (void *)CreateWaylandSurfaceKHR},
{"vkGetPhysicalDeviceWaylandPresentationSupportKHR", (void *)GetPhysicalDeviceWaylandPresentationSupportKHR},
#endif
#ifdef VK_USE_PLATFORM_WIN32_KHR
{"vkCreateWin32SurfaceKHR", (void *)CreateWin32SurfaceKHR},
{"vkGetPhysicalDeviceWin32PresentationSupportKHR", (void *)GetPhysicalDeviceWin32PresentationSupportKHR},
{"vkImportSemaphoreWin32HandleKHR", (void *)ImportSemaphoreWin32HandleKHR},
{"vkGetSemaphoreWin32HandleKHR", (void *)GetSemaphoreWin32HandleKHR},
{"vkImportFenceWin32HandleKHR", (void *)ImportFenceWin32HandleKHR},
{"vkGetFenceWin32HandleKHR", (void *)GetFenceWin32HandleKHR},
#endif
#ifdef VK_USE_PLATFORM_XCB_KHR
{"vkCreateXcbSurfaceKHR", (void *)CreateXcbSurfaceKHR},
{"vkGetPhysicalDeviceXcbPresentationSupportKHR", (void *)GetPhysicalDeviceXcbPresentationSupportKHR},
#endif
#ifdef VK_USE_PLATFORM_XLIB_KHR
{"vkCreateXlibSurfaceKHR", (void *)CreateXlibSurfaceKHR},
{"vkGetPhysicalDeviceXlibPresentationSupportKHR", (void *)GetPhysicalDeviceXlibPresentationSupportKHR},
#endif
#ifdef VK_USE_PLATFORM_IOS_MVK
{"vkCreateIOSSurfaceMVK", (void *)CreateIOSSurfaceMVK},
#endif
#ifdef VK_USE_PLATFORM_MACOS_MVK
{"vkCreateMacOSSurfaceMVK", (void *)CreateMacOSSurfaceMVK},
#endif
{"vkCreateDisplayPlaneSurfaceKHR", (void *)CreateDisplayPlaneSurfaceKHR},
{"vkDestroySurfaceKHR", (void *)DestroySurfaceKHR},
{"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", (void *)GetPhysicalDeviceSurfaceCapabilitiesKHR},
{"vkGetPhysicalDeviceSurfaceCapabilities2KHR", (void *)GetPhysicalDeviceSurfaceCapabilities2KHR},
{"vkGetPhysicalDeviceSurfaceCapabilities2EXT", (void *)GetPhysicalDeviceSurfaceCapabilities2EXT},
{"vkGetPhysicalDeviceSurfaceSupportKHR", (void *)GetPhysicalDeviceSurfaceSupportKHR},
{"vkGetPhysicalDeviceSurfacePresentModesKHR", (void *)GetPhysicalDeviceSurfacePresentModesKHR},
{"vkGetPhysicalDeviceSurfaceFormatsKHR", (void *)GetPhysicalDeviceSurfaceFormatsKHR},
{"vkGetPhysicalDeviceSurfaceFormats2KHR", (void *)GetPhysicalDeviceSurfaceFormats2KHR},
{"vkGetPhysicalDeviceQueueFamilyProperties2", (void *)GetPhysicalDeviceQueueFamilyProperties2},
{"vkGetPhysicalDeviceQueueFamilyProperties2KHR", (void *)GetPhysicalDeviceQueueFamilyProperties2KHR},
{"vkEnumeratePhysicalDeviceGroups", (void *)EnumeratePhysicalDeviceGroups},
{"vkEnumeratePhysicalDeviceGroupsKHR", (void *)EnumeratePhysicalDeviceGroupsKHR},
{"vkCreateDebugReportCallbackEXT", (void *)CreateDebugReportCallbackEXT},
{"vkDestroyDebugReportCallbackEXT", (void *)DestroyDebugReportCallbackEXT},
{"vkDebugReportMessageEXT", (void *)DebugReportMessageEXT},
{"vkGetPhysicalDeviceDisplayPlanePropertiesKHR", (void *)GetPhysicalDeviceDisplayPlanePropertiesKHR},
{"vkGetPhysicalDeviceDisplayPlaneProperties2KHR", (void *)GetPhysicalDeviceDisplayPlaneProperties2KHR},
{"vkGetDisplayPlaneSupportedDisplaysKHR", (void *)GetDisplayPlaneSupportedDisplaysKHR},
{"vkGetDisplayPlaneCapabilitiesKHR", (void *)GetDisplayPlaneCapabilitiesKHR},
{"vkGetDisplayPlaneCapabilities2KHR", (void *)GetDisplayPlaneCapabilities2KHR},
{"vkImportSemaphoreFdKHR", (void *)ImportSemaphoreFdKHR},
{"vkGetSemaphoreFdKHR", (void *)GetSemaphoreFdKHR},
{"vkImportFenceFdKHR", (void *)ImportFenceFdKHR},
{"vkGetFenceFdKHR", (void *)GetFenceFdKHR},
{"vkCreateValidationCacheEXT", (void *)CreateValidationCacheEXT},
{"vkDestroyValidationCacheEXT", (void *)DestroyValidationCacheEXT},
{"vkGetValidationCacheDataEXT", (void *)GetValidationCacheDataEXT},
{"vkMergeValidationCachesEXT", (void *)MergeValidationCachesEXT},
{"vkCmdSetDiscardRectangleEXT", (void *)CmdSetDiscardRectangleEXT},
{"vkCmdSetSampleLocationsEXT", (void *)CmdSetSampleLocationsEXT},
{"vkSetDebugUtilsObjectNameEXT", (void *)SetDebugUtilsObjectNameEXT},
{"vkSetDebugUtilsObjectTagEXT", (void *)SetDebugUtilsObjectTagEXT},
{"vkQueueBeginDebugUtilsLabelEXT", (void *)QueueBeginDebugUtilsLabelEXT},
{"vkQueueEndDebugUtilsLabelEXT", (void *)QueueEndDebugUtilsLabelEXT},
{"vkQueueInsertDebugUtilsLabelEXT", (void *)QueueInsertDebugUtilsLabelEXT},
{"vkCmdBeginDebugUtilsLabelEXT", (void *)CmdBeginDebugUtilsLabelEXT},
{"vkCmdEndDebugUtilsLabelEXT", (void *)CmdEndDebugUtilsLabelEXT},
{"vkCmdInsertDebugUtilsLabelEXT", (void *)CmdInsertDebugUtilsLabelEXT},
{"vkCreateDebugUtilsMessengerEXT", (void *)CreateDebugUtilsMessengerEXT},
{"vkDestroyDebugUtilsMessengerEXT", (void *)DestroyDebugUtilsMessengerEXT},
{"vkSubmitDebugUtilsMessageEXT", (void *)SubmitDebugUtilsMessageEXT},
{"vkCmdDrawIndirectCountKHR", (void *)CmdDrawIndirectCountKHR},
{"vkCmdDrawIndexedIndirectCountKHR", (void *)CmdDrawIndexedIndirectCountKHR},
};
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
assert(device);
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!ApiParentExtensionEnabled(funcName, device_data->extensions.device_extension_set)) {
return nullptr;
}
// Is API to be intercepted by this layer?
const auto &item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second);
}
auto &table = device_data->dispatch_table;
if (!table.GetDeviceProcAddr) return nullptr;
return table.GetDeviceProcAddr(device, funcName);
}
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
instance_layer_data *instance_data;
// Is API to be intercepted by this layer?
const auto &item = name_to_funcptr_map.find(funcName);
if (item != name_to_funcptr_map.end()) {
return reinterpret_cast<PFN_vkVoidFunction>(item->second);
}
instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
auto &table = instance_data->dispatch_table;
if (!table.GetInstanceProcAddr) return nullptr;
return table.GetInstanceProcAddr(instance, funcName);
}
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
assert(instance);
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
auto &table = instance_data->dispatch_table;
if (!table.GetPhysicalDeviceProcAddr) return nullptr;
return table.GetPhysicalDeviceProcAddr(instance, funcName);
}
} // namespace core_validation
// loader-layer interface v0, just wrappers since there is only a layer
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
VkLayerProperties *pProperties) {
return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
VkLayerProperties *pProperties) {
// the layer command handles VK_NULL_HANDLE just fine internally
assert(physicalDevice == VK_NULL_HANDLE);
return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
const char *pLayerName, uint32_t *pCount,
VkExtensionProperties *pProperties) {
// the layer command handles VK_NULL_HANDLE just fine internally
assert(physicalDevice == VK_NULL_HANDLE);
return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
return core_validation::GetDeviceProcAddr(dev, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
return core_validation::GetInstanceProcAddr(instance, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
const char *funcName) {
return core_validation::GetPhysicalDeviceProcAddr(instance, funcName);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
assert(pVersionStruct != NULL);
assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
// Fill in the function pointers if our version is at least capable of having the structure contain them.
if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
}
if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
core_validation::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
} else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
}
return VK_SUCCESS;
}
| 1 | 8,748 | The original text can be read to say "not waited on by queue ..." meaing that the second queue was supposed to have *waited* for the signal... which is especially confusing when it's the same queue that has signalled the semaphore *twice* without a wait. | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -136,8 +136,8 @@ func (le *LookupEngine) Lookup(ctx context.Context, a types.Address) (peer.ID, e
case out := <-ch:
return out.(peer.ID), nil
case <-time.After(time.Second * 10):
- return "", fmt.Errorf("timed out waiting for response")
+ return "", fmt.Errorf("lookup timed out waiting for response")
case <-ctx.Done():
- return "", fmt.Errorf("context cancled")
+ return "", fmt.Errorf("lookup failed: context cancled")
}
} | 1 | package lookup
import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
logging "gx/ipfs/QmRb5jh8z2E8hMGN2tkvs1yHynUanqnZ3UeKwgN1i9P1F8/go-log"
floodsub "gx/ipfs/QmSFihvoND3eDaAYRCeLgLPt62yCPgMZs1NSZmKFEtJQQw/go-libp2p-floodsub"
peer "gx/ipfs/QmZoWKhxUmZ2seW4BzX6fJkNR8hh9PsGModr7q171yq2SS/go-libp2p-peer"
pubsub "gx/ipfs/QmdbxjQWogRCHRaxhhGnYdT1oQJzL9GdqSKzCdqWr85AP2/pubsub"
types "github.com/filecoin-project/go-filecoin/types"
wallet "github.com/filecoin-project/go-filecoin/wallet"
)
var FilLookupTopic = "/fil/lookup/" // nolint: golint
var log = logging.Logger("lookup")
// LookupEngine can be used to find map address's -> peerId's
type LookupEngine struct { // nolint: golint
lk sync.Mutex
cache map[types.Address]peer.ID
ourPeerID peer.ID
reqPubsub *pubsub.PubSub
ps *floodsub.PubSub
Wallet *wallet.Wallet
}
// NewLookupEngine returns an engine for looking up peerId's
func NewLookupEngine(ps *floodsub.PubSub, wallet *wallet.Wallet, self peer.ID) (*LookupEngine, error) {
sub, err := ps.Subscribe(FilLookupTopic) // nolint: errcheck
if err != nil {
return nil, err
}
le := &LookupEngine{
ps: ps,
cache: make(map[types.Address]peer.ID),
ourPeerID: self,
reqPubsub: pubsub.New(128),
Wallet: wallet,
}
go le.HandleMessages(sub)
return le, nil
}
type message struct {
Address types.Address
Peer string
Request bool
}
// HandleMessages manages sending and receieveing messages
func (le *LookupEngine) HandleMessages(s *floodsub.Subscription) {
defer s.Cancel()
ctx := context.TODO()
for {
msg, err := s.Next(ctx)
if err != nil {
log.Error("from subscription.Next(): ", err)
return
}
if msg.GetFrom() == le.ourPeerID {
continue
}
var m message
// TODO: Replace with cbor
if err := json.Unmarshal(msg.GetData(), &m); err != nil {
log.Error("malformed message: ", err)
continue
}
le.lk.Lock()
if m.Request {
if le.Wallet.HasAddress(m.Address) {
go le.SendMessage(&message{
Address: m.Address,
Peer: le.ourPeerID.Pretty(),
})
}
} else {
pid, err := peer.IDB58Decode(m.Peer)
if err != nil {
log.Error("bad peer ID: ", err)
continue
}
le.cache[m.Address] = pid
le.reqPubsub.Pub(pid, m.Address.String())
}
le.lk.Unlock()
}
}
// SendMessage publishes message m on FilLookupTopic
func (le *LookupEngine) SendMessage(m *message) {
// TODO: Replace with cbor
d, err := json.Marshal(m)
if err != nil {
log.Error("failed to marshal message: ", err)
return
}
if err := le.ps.Publish(FilLookupTopic, d); err != nil { // nolint: errcheck
log.Error("publish failed: ", err)
}
}
// Lookup returns the peerId associated with address a
func (le *LookupEngine) Lookup(ctx context.Context, a types.Address) (peer.ID, error) {
le.lk.Lock()
v, ok := le.cache[a]
le.lk.Unlock()
if ok {
return v, nil
}
ch := le.reqPubsub.SubOnce(a.String())
le.SendMessage(&message{
Address: a,
Request: true,
})
select {
case out := <-ch:
return out.(peer.ID), nil
case <-time.After(time.Second * 10):
return "", fmt.Errorf("timed out waiting for response")
case <-ctx.Done():
return "", fmt.Errorf("context cancled")
}
}
| 1 | 11,052 | just clarifying the error messages a bit. | filecoin-project-venus | go |
@@ -0,0 +1,18 @@
+// <copyright file="TelemetryRequestTypes.cs" company="Datadog">
+// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
+// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
+// </copyright>
+
+namespace Datadog.Trace.Telemetry
+{
+ internal static class TelemetryRequestTypes
+ {
+ public const string AppStarted = "app-started";
+ public const string AppDependenciesLoaded = "app-dependencies-loaded";
+ public const string AppIntegrationsChanged = "app-integrations-change";
+ public const string AppHeartbeat = "app-heartbeat";
+ public const string AppClosing = "app-closing";
+
+ public const string GenerateMetrics = "generate-metrics";
+ }
+} | 1 | 1 | 25,687 | Have those (and other data like conf...) shared across tracers? | DataDog-dd-trace-dotnet | .cs |
|
@@ -0,0 +1,17 @@
+// +build !windows
+
+// Copyright 2020 Antrea Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lib | 1 | 1 | 27,372 | could we add this file only when we need one? | antrea-io-antrea | go |
|
@@ -37,4 +37,7 @@ type Context struct {
// ACMEHTTP01SolverImage is the image to use for solving ACME HTTP01
// challenges
ACMEHTTP01SolverImage string
+ // acmeDNS01ResolvConfFile is an optional custom resolv.conf file for
+ // solve ACME DNS01 challenges
+ ACMEDNS01ResolvConfFile string
} | 1 | package issuer
import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
clientset "github.com/jetstack/cert-manager/pkg/client/clientset/versioned"
informers "github.com/jetstack/cert-manager/pkg/client/informers/externalversions"
kubeinformers "github.com/jetstack/cert-manager/third_party/k8s.io/client-go/informers"
)
// Context contains various types that are used by Issuer implementations.
// We purposely don't have specific informers/listers here, and instead keep
// a reference to a SharedInformerFactory so that issuer constructors can
// choose themselves which listers are required.
type Context struct {
// Client is a Kubernetes clientset
Client kubernetes.Interface
// CMClient is a cert-manager clientset
CMClient clientset.Interface
// Recorder is an EventRecorder to log events to
Recorder record.EventRecorder
// KubeSharedInformerFactory can be used to obtain shared
// SharedIndexInformer instances for Kubernetes types
KubeSharedInformerFactory kubeinformers.SharedInformerFactory
// SharedInformerFactory can be used to obtain shared SharedIndexInformer
// instances
SharedInformerFactory informers.SharedInformerFactory
// Namespace is a namespace to operate within. This should be used when
// constructing SharedIndexInformers for the informer factory.
Namespace string
// ClusterResourceNamespace is the namespace to store resources created by
// non-namespaced resources (e.g. ClusterIssuer) in.
ClusterResourceNamespace string
// ACMEHTTP01SolverImage is the image to use for solving ACME HTTP01
// challenges
ACMEHTTP01SolverImage string
}
| 1 | 11,176 | This may not even be needed. | jetstack-cert-manager | go |
@@ -70,6 +70,12 @@ var KnownFields = map[string]bool{
"Test.Flakiness": true,
"Test.Results": true, // Recall that unsuccessful test results aren't cached...
+ // Debug fields don't contribute to any hash
+ "Debug": true,
+ "Debug.Command": true,
+ "Debug.tools": true,
+ "Debug.namedTools": true,
+
// These only contribute to the runtime hash, not at build time.
"Data": true,
"namedData": true, | 1 | // Test to make sure that every field in BuildTarget has been thought of
// in the rule hash calculation.
// Not every field necessarily needs to be hashed there (and indeed not
// all should be), this is just a guard against adding new fields and
// forgetting to update that function.
package build
import (
"fmt"
"reflect"
"testing"
"github.com/thought-machine/please/src/core"
)
var KnownFields = map[string]bool{
// These fields are explicitly hashed.
"Label": true,
"dependencies": true,
"Hashes": true,
"Sources": true,
"NamedSources": true,
"IsBinary": true,
"IsFilegroup": true,
"IsTextFile": true,
"FileContent": true,
"IsRemoteFile": true,
"Command": true,
"Commands": true,
"NeedsTransitiveDependencies": true,
"Local": true,
"OptionalOutputs": true,
"OutputIsComplete": true,
"Requires": true,
"PassEnv": true,
"Provides": true,
"PreBuildFunction": true,
"PostBuildFunction": true,
"PreBuildHash": true,
"PostBuildHash": true,
"outputs": true,
"namedOutputs": true,
"Licences": true,
"Sandbox": true,
"Tools": true,
"namedTools": true,
"Secrets": true,
"NamedSecrets": true,
"Stamp": true,
"OutputDirectories": true,
"ExitOnError": true,
"EntryPoints": true,
"Env": true,
// Test fields
"Test": true, // We hash the children of this
// Contribute to the runtime hash
"Test.Sandbox": true,
"Test.Commands": true,
"Test.Command": true,
"Test.tools": true,
"Test.namedTools": true,
"Test.Outputs": true,
// These don't need to be hashed
"Test.NoOutput": true,
"Test.Timeout": true,
"Test.Flakiness": true,
"Test.Results": true, // Recall that unsuccessful test results aren't cached...
// These only contribute to the runtime hash, not at build time.
"Data": true,
"namedData": true,
"ContainerSettings": true,
// These would ideally not contribute to the hash, but we need that at present
// because we don't have a good way to force a recheck of its reverse dependencies.
"Visibility": true,
"TestOnly": true,
"Labels": true,
// These fields we have thought about and decided that they shouldn't contribute to the
// hash because they don't affect the actual output of the target.
"Subrepo": true,
"AddedPostBuild": true,
"BuildTimeout": true,
"state": true,
"completedRuns": true,
"BuildingDescription": true,
"ShowProgress": true,
"Progress": true,
"PassUnsafeEnv": true,
"NeededForSubinclude": true,
"mutex": true,
"dependenciesRegistered": true,
"finishedBuilding": true,
// Used to save the rule hash rather than actually being hashed itself.
"RuleHash": true,
}
func TestAllFieldsArePresentAndAccountedFor(t *testing.T) {
target := &core.BuildTarget{}
val := reflect.ValueOf(target)
typ := val.Elem().Type()
for i := 0; i < typ.NumField(); i++ {
if field := typ.Field(i); !KnownFields[field.Name] {
t.Errorf("Unaccounted field in RuleHash: %s", field.Name)
}
}
}
func TestAllTestFieldsArePresentAndAccountedFor(t *testing.T) {
target := &core.TestFields{}
val := reflect.ValueOf(target)
typ := val.Elem().Type()
for i := 0; i < typ.NumField(); i++ {
if field := typ.Field(i); !KnownFields[fmt.Sprintf("Test.%s", field.Name)] {
t.Errorf("Unaccounted field in RuleHash: Test.%s", field.Name)
}
}
}
| 1 | 10,203 | Capitalisation seems inconsistent here? | thought-machine-please | go |
@@ -1,7 +1,11 @@
-namespace Nevermind.Core
+using System;
+
+namespace Nevermind.Core
{
public interface ILogger
{
void Log(string text);
+ void Debug(string text);
+ void Error(string text, Exception ex = null);
}
} | 1 | namespace Nevermind.Core
{
public interface ILogger
{
void Log(string text);
}
} | 1 | 22,277 | maybe better Error(string text) and Error(Exception ex) separately? | NethermindEth-nethermind | .cs |
@@ -58,7 +58,7 @@ func matchingTools(config *core.Configuration, prefix string) map[string]string
"jarcat": config.Java.JarCatTool,
"javacworker": config.Java.JavacWorker,
"junitrunner": config.Java.JUnitRunner,
- "langserver": path.Join(config.Please.Location, "build_langserver"),
+ "lps": path.Join(config.Please.Location, "build_langserver"),
"maven": config.Java.PleaseMavenTool,
"pex": config.Python.PexTool,
} | 1 | // Package tool implements running Please's sub-tools (via "plz tool jarcat" etc).
//
// N.B. This is not how they are invoked during the build; that runs them directly.
// This is only a convenience thing at the command line.
package tool
import (
"os"
"path"
"strings"
"syscall"
"github.com/jessevdk/go-flags"
"gopkg.in/op/go-logging.v1"
"core"
"sort"
)
var log = logging.MustGetLogger("tool")
// A Tool is one of Please's tools; this only exists for facilitating tab-completion for flags.
type Tool string
// Complete suggests completions for a partial tool name.
func (tool Tool) Complete(match string) []flags.Completion {
ret := []flags.Completion{}
for k := range matchingTools(core.DefaultConfiguration(), match) {
ret = append(ret, flags.Completion{Item: k})
}
return ret
}
// Run runs one of the sub-tools.
func Run(config *core.Configuration, tool Tool, args []string) {
tools := matchingTools(config, string(tool))
if len(tools) != 1 {
log.Fatalf("Unknown tool: %s. Must be one of [%s]", tool, strings.Join(allToolNames(config, ""), ", "))
}
target := core.ExpandHomePath(tools[allToolNames(config, string(tool))[0]])
if !core.LooksLikeABuildLabel(target) {
// Hopefully we have an absolute path now, so let's run it.
err := syscall.Exec(target, append([]string{target}, args...), os.Environ())
log.Fatalf("Failed to exec %s: %s", target, err) // Always a failure, exec never returns.
}
// The tool is allowed to be an in-repo target. In that case it's essentially equivalent to "plz run".
// We have to re-exec ourselves in such a case since we don't know enough about it to run it now.
plz, _ := os.Executable()
args = append([]string{os.Args[0], "run", target, "--"}, args...)
err := syscall.Exec(plz, args, os.Environ())
log.Fatalf("Failed to exec %s run %s: %s", plz, target, err) // Always a failure, exec never returns.
}
// matchingTools returns a set of matching tools for a string prefix.
func matchingTools(config *core.Configuration, prefix string) map[string]string {
knownTools := map[string]string{
"gotest": config.Go.TestTool,
"jarcat": config.Java.JarCatTool,
"javacworker": config.Java.JavacWorker,
"junitrunner": config.Java.JUnitRunner,
"langserver": path.Join(config.Please.Location, "build_langserver"),
"maven": config.Java.PleaseMavenTool,
"pex": config.Python.PexTool,
}
ret := map[string]string{}
for k, v := range knownTools {
if strings.HasPrefix(k, prefix) {
ret[k] = v
}
}
return ret
}
// allToolNames returns the names of all available tools.
func allToolNames(config *core.Configuration, prefix string) []string {
ret := []string{}
for k := range matchingTools(config, prefix) {
ret = append(ret, k)
}
sort.Strings(ret)
return ret
}
| 1 | 8,684 | can you leave the old one in too please? at least for now, otherwise anyone using it now will break. | thought-machine-please | go |
@@ -3501,6 +3501,10 @@ void Player::onAttackedCreature(Creature* target)
{
Creature::onAttackedCreature(target);
+ if (target && target->getZone() == ZONE_PVP) {
+ return;
+ }
+
if (target == this) {
addInFightTicks();
return; | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2016 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include <bitset>
#include "bed.h"
#include "chat.h"
#include "combat.h"
#include "configmanager.h"
#include "creatureevent.h"
#include "events.h"
#include "game.h"
#include "iologindata.h"
#include "monster.h"
#include "movement.h"
#include "scheduler.h"
#include "weapons.h"
extern ConfigManager g_config;
extern Game g_game;
extern Chat* g_chat;
extern Vocations g_vocations;
extern MoveEvents* g_moveEvents;
extern Weapons* g_weapons;
extern CreatureEvents* g_creatureEvents;
extern Events* g_events;
MuteCountMap Player::muteCountMap;
uint32_t Player::playerAutoID = 0x10000000;
Player::Player(ProtocolGame_ptr p) :
Creature(), inventory(), client(p), varSkills(), varStats(), inventoryAbilities()
{
isConnecting = false;
accountNumber = 0;
vocation = nullptr;
capacity = 40000;
mana = 0;
manaMax = 0;
manaSpent = 0;
soul = 0;
guildLevel = 0;
guild = nullptr;
level = 1;
levelPercent = 0;
magLevelPercent = 0;
magLevel = 0;
experience = 0;
damageImmunities = 0;
conditionImmunities = 0;
conditionSuppressions = 0;
group = nullptr;
lastLoginSaved = 0;
lastLogout = 0;
lastIP = 0;
lastPing = OTSYS_TIME();
lastPong = lastPing;
MessageBufferTicks = 0;
MessageBufferCount = 0;
nextAction = 0;
windowTextId = 0;
writeItem = nullptr;
maxWriteLen = 0;
editHouse = nullptr;
editListId = 0;
shopOwner = nullptr;
purchaseCallback = -1;
saleCallback = -1;
pzLocked = false;
bloodHitCount = 0;
shieldBlockCount = 0;
lastAttackBlockType = BLOCK_NONE;
addAttackSkillPoint = false;
lastAttack = 0;
blessings = 0;
inMarket = false;
lastDepotId = -1;
chaseMode = CHASEMODE_STANDSTILL;
fightMode = FIGHTMODE_ATTACK;
bedItem = nullptr;
tradePartner = nullptr;
tradeState = TRADE_NONE;
tradeItem = nullptr;
walkTask = nullptr;
walkTaskEvent = 0;
actionTaskEvent = 0;
nextStepEvent = 0;
lastFailedFollow = 0;
lastWalkthroughAttempt = 0;
lastToggleMount = 0;
wasMounted = false;
sex = PLAYERSEX_FEMALE;
town = nullptr;
accountType = ACCOUNT_TYPE_NORMAL;
premiumDays = 0;
idleTime = 0;
skullTicks = 0;
party = nullptr;
bankBalance = 0;
inbox = new Inbox(ITEM_INBOX);
inbox->incrementReferenceCounter();
offlineTrainingSkill = -1;
offlineTrainingTime = 0;
lastStatsTrainingTime = 0;
ghostMode = false;
staminaMinutes = 2520;
lastQuestlogUpdate = 0;
inventoryWeight = 0;
operatingSystem = CLIENTOS_NONE;
secureMode = false;
guid = 0;
}
Player::~Player()
{
for (Item* item : inventory) {
if (item) {
item->setParent(nullptr);
item->decrementReferenceCounter();
}
}
for (const auto& it : depotLockerMap) {
it.second->removeInbox(inbox);
it.second->decrementReferenceCounter();
}
inbox->decrementReferenceCounter();
setWriteItem(nullptr);
setEditHouse(nullptr);
}
bool Player::setVocation(uint16_t vocId)
{
Vocation* voc = g_vocations.getVocation(vocId);
if (!voc) {
return false;
}
vocation = voc;
Condition* condition = getCondition(CONDITION_REGENERATION, CONDITIONID_DEFAULT);
if (condition) {
condition->setParam(CONDITION_PARAM_HEALTHGAIN, vocation->getHealthGainAmount());
condition->setParam(CONDITION_PARAM_HEALTHTICKS, vocation->getHealthGainTicks() * 1000);
condition->setParam(CONDITION_PARAM_MANAGAIN, vocation->getManaGainAmount());
condition->setParam(CONDITION_PARAM_MANATICKS, vocation->getManaGainTicks() * 1000);
}
return true;
}
bool Player::isPushable() const
{
if (hasFlag(PlayerFlag_CannotBePushed)) {
return false;
}
return Creature::isPushable();
}
std::string Player::getDescription(int32_t lookDistance) const
{
std::ostringstream s;
if (lookDistance == -1) {
s << "yourself.";
if (group->access) {
s << " You are " << group->name << '.';
} else if (vocation->getId() != VOCATION_NONE) {
s << " You are " << vocation->getVocDescription() << '.';
} else {
s << " You have no vocation.";
}
} else {
s << name;
if (!group->access) {
s << " (Level " << level << ')';
}
s << '.';
if (sex == PLAYERSEX_FEMALE) {
s << " She";
} else {
s << " He";
}
if (group->access) {
s << " is " << group->name << '.';
} else if (vocation->getId() != VOCATION_NONE) {
s << " is " << vocation->getVocDescription() << '.';
} else {
s << " has no vocation.";
}
}
if (party) {
if (lookDistance == -1) {
s << " Your party has ";
} else if (sex == PLAYERSEX_FEMALE) {
s << " She is in a party with ";
} else {
s << " He is in a party with ";
}
size_t memberCount = party->getMemberCount() + 1;
if (memberCount == 1) {
s << "1 member and ";
} else {
s << memberCount << " members and ";
}
size_t invitationCount = party->getInvitationCount();
if (invitationCount == 1) {
s << "1 pending invitation.";
} else {
s << invitationCount << " pending invitations.";
}
}
if (guild) {
const GuildRank* rank = guild->getRankByLevel(guildLevel);
if (rank) {
if (lookDistance == -1) {
s << " You are ";
} else if (sex == PLAYERSEX_FEMALE) {
s << " She is ";
} else {
s << " He is ";
}
s << rank->name << " of the " << guild->getName();
if (!guildNick.empty()) {
s << " (" << guildNick << ')';
}
size_t memberCount = guild->getMemberCount();
if (memberCount == 1) {
s << ", which has 1 member, " << guild->getMembersOnline().size() << " of them online.";
} else {
s << ", which has " << memberCount << " members, " << guild->getMembersOnline().size() << " of them online.";
}
}
}
return s.str();
}
Item* Player::getInventoryItem(slots_t slot) const
{
if (slot < CONST_SLOT_FIRST || slot > CONST_SLOT_LAST) {
return nullptr;
}
return inventory[slot];
}
void Player::addConditionSuppressions(uint32_t conditions)
{
conditionSuppressions |= conditions;
}
void Player::removeConditionSuppressions(uint32_t conditions)
{
conditionSuppressions &= ~conditions;
}
Item* Player::getWeapon(slots_t slot, bool ignoreAmmo) const
{
Item* item = inventory[slot];
if (!item) {
return nullptr;
}
WeaponType_t weaponType = item->getWeaponType();
if (weaponType == WEAPON_NONE || weaponType == WEAPON_SHIELD || weaponType == WEAPON_AMMO) {
return nullptr;
}
if (!ignoreAmmo && weaponType == WEAPON_DISTANCE) {
const ItemType& it = Item::items[item->getID()];
if (it.ammoType != AMMO_NONE) {
Item* ammoItem = inventory[CONST_SLOT_AMMO];
if (!ammoItem || ammoItem->getAmmoType() != it.ammoType) {
return nullptr;
}
item = ammoItem;
}
}
return item;
}
Item* Player::getWeapon(bool ignoreAmmo/* = false*/) const
{
Item* item = getWeapon(CONST_SLOT_LEFT, ignoreAmmo);
if (item) {
return item;
}
item = getWeapon(CONST_SLOT_RIGHT, ignoreAmmo);
if (item) {
return item;
}
return nullptr;
}
WeaponType_t Player::getWeaponType() const
{
Item* item = getWeapon();
if (!item) {
return WEAPON_NONE;
}
return item->getWeaponType();
}
int32_t Player::getWeaponSkill(const Item* item) const
{
if (!item) {
return getSkillLevel(SKILL_FIST);
}
int32_t attackSkill;
WeaponType_t weaponType = item->getWeaponType();
switch (weaponType) {
case WEAPON_SWORD: {
attackSkill = getSkillLevel(SKILL_SWORD);
break;
}
case WEAPON_CLUB: {
attackSkill = getSkillLevel(SKILL_CLUB);
break;
}
case WEAPON_AXE: {
attackSkill = getSkillLevel(SKILL_AXE);
break;
}
case WEAPON_DISTANCE: {
attackSkill = getSkillLevel(SKILL_DISTANCE);
break;
}
default: {
attackSkill = 0;
break;
}
}
return attackSkill;
}
int32_t Player::getArmor() const
{
int32_t armor = 0;
static const slots_t armorSlots[] = {CONST_SLOT_HEAD, CONST_SLOT_NECKLACE, CONST_SLOT_ARMOR, CONST_SLOT_LEGS, CONST_SLOT_FEET, CONST_SLOT_RING};
for (slots_t slot : armorSlots) {
Item* inventoryItem = inventory[slot];
if (inventoryItem) {
armor += inventoryItem->getArmor();
}
}
return static_cast<int32_t>(armor * vocation->armorMultiplier);
}
void Player::getShieldAndWeapon(const Item*& shield, const Item*& weapon) const
{
shield = nullptr;
weapon = nullptr;
for (uint32_t slot = CONST_SLOT_RIGHT; slot <= CONST_SLOT_LEFT; slot++) {
Item* item = inventory[slot];
if (!item) {
continue;
}
switch (item->getWeaponType()) {
case WEAPON_NONE:
break;
case WEAPON_SHIELD: {
if (!shield || item->getDefense() > shield->getDefense()) {
shield = item;
}
break;
}
default: { // weapons that are not shields
weapon = item;
break;
}
}
}
}
int32_t Player::getDefense() const
{
int32_t baseDefense = 5;
int32_t defenseValue = 0;
int32_t defenseSkill = 0;
int32_t extraDefense = 0;
float defenseFactor = getDefenseFactor();
const Item* weapon;
const Item* shield;
getShieldAndWeapon(shield, weapon);
if (weapon) {
defenseValue = baseDefense + weapon->getDefense();
extraDefense = weapon->getExtraDefense();
defenseSkill = getWeaponSkill(weapon);
}
if (shield && shield->getDefense() >= defenseValue) {
defenseValue = baseDefense + shield->getDefense() + extraDefense;
defenseSkill = getSkillLevel(SKILL_SHIELD);
}
if (defenseSkill == 0) {
return 0;
}
defenseValue = static_cast<int32_t>(defenseValue * vocation->defenseMultiplier);
return static_cast<int32_t>(std::ceil((static_cast<float>(defenseSkill * (defenseValue * 0.015)) + (defenseValue * 0.1)) * defenseFactor));
}
float Player::getAttackFactor() const
{
switch (fightMode) {
case FIGHTMODE_ATTACK: return 1.0f;
case FIGHTMODE_BALANCED: return 1.2f;
case FIGHTMODE_DEFENSE: return 2.0f;
default: return 1.0f;
}
}
float Player::getDefenseFactor() const
{
switch (fightMode) {
case FIGHTMODE_ATTACK: return 1.0f;
case FIGHTMODE_BALANCED: return 1.2f;
case FIGHTMODE_DEFENSE: {
if ((OTSYS_TIME() - lastAttack) < getAttackSpeed()) {
return 1.0f;
}
return 2.0f;
}
default: return 1.0f;
}
}
uint16_t Player::getClientIcons() const
{
uint16_t icons = 0;
for (Condition* condition : conditions) {
if (!isSuppress(condition->getType())) {
icons |= condition->getIcons();
}
}
if (pzLocked) {
icons |= ICON_REDSWORDS;
}
if (_tile->hasFlag(TILESTATE_PROTECTIONZONE)) {
icons |= ICON_PIGEON;
// Don't show ICON_SWORDS if player is in protection zone.
if (hasBitSet(ICON_SWORDS, icons)) {
icons &= ~ICON_SWORDS;
}
}
// Game client debugs with 10 or more icons
// so let's prevent that from happening.
std::bitset<20> icon_bitset(static_cast<uint64_t>(icons));
for (size_t pos = 0, bits_set = icon_bitset.count(); bits_set >= 10; ++pos) {
if (icon_bitset[pos]) {
icon_bitset.reset(pos);
--bits_set;
}
}
return icon_bitset.to_ulong();
}
void Player::updateInventoryWeight()
{
if (hasFlag(PlayerFlag_HasInfiniteCapacity)) {
return;
}
inventoryWeight = 0;
for (int i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) {
const Item* item = inventory[i];
if (item) {
inventoryWeight += item->getWeight();
}
}
}
int32_t Player::getPlayerInfo(playerinfo_t playerinfo) const
{
switch (playerinfo) {
case PLAYERINFO_LEVELPERCENT: return levelPercent;
case PLAYERINFO_MAGICLEVEL: return std::max<int32_t>(0, magLevel + varStats[STAT_MAGICPOINTS]);
case PLAYERINFO_MAGICLEVELPERCENT: return magLevelPercent;
case PLAYERINFO_HEALTH: return health;
case PLAYERINFO_MAXHEALTH: return std::max<int32_t>(1, healthMax + varStats[STAT_MAXHITPOINTS]);
case PLAYERINFO_MANA: return mana;
case PLAYERINFO_MAXMANA: return std::max<int32_t>(0, manaMax + varStats[STAT_MAXMANAPOINTS]);
default: return 0;
}
}
void Player::addSkillAdvance(skills_t skill, uint64_t count)
{
uint64_t currReqTries = vocation->getReqSkillTries(skill, skills[skill].level);
uint64_t nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1);
if (currReqTries >= nextReqTries) {
//player has reached max skill
return;
}
g_events->eventPlayerOnGainSkillTries(this, skill, count);
if (count == 0) {
return;
}
bool sendUpdateSkills = false;
while ((skills[skill].tries + count) >= nextReqTries) {
count -= nextReqTries - skills[skill].tries;
skills[skill].level++;
skills[skill].tries = 0;
skills[skill].percent = 0;
std::ostringstream ss;
ss << "You advanced to " << getSkillName(skill) << " level " << skills[skill].level << '.';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
g_creatureEvents->playerAdvance(this, skill, (skills[skill].level - 1), skills[skill].level);
sendUpdateSkills = true;
currReqTries = nextReqTries;
nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1);
if (currReqTries >= nextReqTries) {
count = 0;
break;
}
}
skills[skill].tries += count;
uint32_t newPercent;
if (nextReqTries > currReqTries) {
newPercent = Player::getPercentLevel(skills[skill].tries, nextReqTries);
} else {
newPercent = 0;
}
if (skills[skill].percent != newPercent) {
skills[skill].percent = newPercent;
sendUpdateSkills = true;
}
if (sendUpdateSkills) {
sendSkills();
}
}
void Player::setVarStats(stats_t stat, int32_t modifier)
{
varStats[stat] += modifier;
switch (stat) {
case STAT_MAXHITPOINTS: {
if (getHealth() > getMaxHealth()) {
Creature::changeHealth(getMaxHealth() - getHealth());
} else {
g_game.addCreatureHealth(this);
}
break;
}
case STAT_MAXMANAPOINTS: {
if (getMana() > getMaxMana()) {
Creature::changeMana(getMaxMana() - getMana());
}
break;
}
default: {
break;
}
}
}
int32_t Player::getDefaultStats(stats_t stat) const
{
switch (stat) {
case STAT_MAXHITPOINTS: return healthMax;
case STAT_MAXMANAPOINTS: return manaMax;
case STAT_MAGICPOINTS: return getBaseMagicLevel();
default: return 0;
}
}
void Player::addContainer(uint8_t cid, Container* container)
{
if (cid > 0xF) {
return;
}
if (container->getID() == ITEM_BROWSEFIELD) {
container->incrementReferenceCounter();
}
auto it = openContainers.find(cid);
if (it != openContainers.end()) {
OpenContainer& openContainer = it->second;
Container* oldContainer = openContainer.container;
if (oldContainer->getID() == ITEM_BROWSEFIELD) {
oldContainer->decrementReferenceCounter();
}
openContainer.container = container;
openContainer.index = 0;
} else {
OpenContainer openContainer;
openContainer.container = container;
openContainer.index = 0;
openContainers[cid] = openContainer;
}
}
void Player::closeContainer(uint8_t cid)
{
auto it = openContainers.find(cid);
if (it == openContainers.end()) {
return;
}
OpenContainer openContainer = it->second;
Container* container = openContainer.container;
openContainers.erase(it);
if (container && container->getID() == ITEM_BROWSEFIELD) {
container->decrementReferenceCounter();
}
}
void Player::setContainerIndex(uint8_t cid, uint16_t index)
{
auto it = openContainers.find(cid);
if (it == openContainers.end()) {
return;
}
it->second.index = index;
}
Container* Player::getContainerByID(uint8_t cid)
{
auto it = openContainers.find(cid);
if (it == openContainers.end()) {
return nullptr;
}
return it->second.container;
}
int8_t Player::getContainerID(const Container* container) const
{
for (const auto& it : openContainers) {
if (it.second.container == container) {
return it.first;
}
}
return -1;
}
uint16_t Player::getContainerIndex(uint8_t cid) const
{
auto it = openContainers.find(cid);
if (it == openContainers.end()) {
return 0;
}
return it->second.index;
}
bool Player::canOpenCorpse(uint32_t ownerId) const
{
return getID() == ownerId || (party && party->canOpenCorpse(ownerId));
}
uint16_t Player::getLookCorpse() const
{
if (sex == PLAYERSEX_FEMALE) {
return ITEM_FEMALE_CORPSE;
} else {
return ITEM_MALE_CORPSE;
}
}
void Player::addStorageValue(const uint32_t key, const int32_t value, const bool isLogin/* = false*/)
{
if (IS_IN_KEYRANGE(key, RESERVED_RANGE)) {
if (IS_IN_KEYRANGE(key, OUTFITS_RANGE)) {
outfits.emplace_back(
value >> 16,
value & 0xFF
);
return;
} else if (IS_IN_KEYRANGE(key, MOUNTS_RANGE)) {
// do nothing
} else {
std::cout << "Warning: unknown reserved key: " << key << " player: " << getName() << std::endl;
return;
}
}
if (value != -1) {
int32_t oldValue;
getStorageValue(key, oldValue);
storageMap[key] = value;
if (!isLogin) {
auto currentFrameTime = g_dispatcher.getDispatcherCycle();
if (lastQuestlogUpdate != currentFrameTime && g_game.quests.isQuestStorage(key, value, oldValue)) {
lastQuestlogUpdate = currentFrameTime;
sendTextMessage(MESSAGE_EVENT_ADVANCE, "Your questlog has been updated.");
}
}
} else {
storageMap.erase(key);
}
}
bool Player::getStorageValue(const uint32_t key, int32_t& value) const
{
auto it = storageMap.find(key);
if (it == storageMap.end()) {
value = -1;
return false;
}
value = it->second;
return true;
}
bool Player::canSee(const Position& pos) const
{
if (!client) {
return false;
}
return client->canSee(pos);
}
bool Player::canSeeCreature(const Creature* creature) const
{
if (creature == this) {
return true;
}
if (creature->isInGhostMode() && !group->access) {
return false;
}
if (!creature->getPlayer() && !canSeeInvisibility() && creature->isInvisible()) {
return false;
}
return true;
}
bool Player::canWalkthrough(const Creature* creature) const
{
if (group->access || creature->isInGhostMode()) {
return true;
}
const Player* player = creature->getPlayer();
if (!player) {
return false;
}
const Tile* playerTile = player->getTile();
if (!playerTile || !playerTile->hasFlag(TILESTATE_PROTECTIONZONE)) {
return false;
}
const Item* playerTileGround = playerTile->getGround();
if (!playerTileGround || !playerTileGround->hasWalkStack()) {
return false;
}
Player* thisPlayer = const_cast<Player*>(this);
if ((OTSYS_TIME() - lastWalkthroughAttempt) > 2000) {
thisPlayer->setLastWalkthroughAttempt(OTSYS_TIME());
return false;
}
if (creature->getPosition() != lastWalkthroughPosition) {
thisPlayer->setLastWalkthroughPosition(creature->getPosition());
return false;
}
thisPlayer->setLastWalkthroughPosition(creature->getPosition());
return true;
}
bool Player::canWalkthroughEx(const Creature* creature) const
{
if (group->access) {
return true;
}
const Player* player = creature->getPlayer();
if (!player) {
return false;
}
const Tile* playerTile = player->getTile();
return playerTile && playerTile->hasFlag(TILESTATE_PROTECTIONZONE);
}
void Player::onReceiveMail() const
{
if (isNearDepotBox()) {
sendTextMessage(MESSAGE_EVENT_ADVANCE, "New mail has arrived.");
}
}
bool Player::isNearDepotBox() const
{
const Position& pos = getPosition();
for (int32_t cx = -1; cx <= 1; ++cx) {
for (int32_t cy = -1; cy <= 1; ++cy) {
Tile* tile = g_game.map.getTile(pos.x + cx, pos.y + cy, pos.z);
if (!tile) {
continue;
}
if (tile->hasFlag(TILESTATE_DEPOT)) {
return true;
}
}
}
return false;
}
DepotChest* Player::getDepotChest(uint32_t depotId, bool autoCreate)
{
auto it = depotChests.find(depotId);
if (it != depotChests.end()) {
return it->second;
}
if (!autoCreate) {
return nullptr;
}
DepotChest* depotChest = new DepotChest(ITEM_DEPOT);
depotChest->incrementReferenceCounter();
depotChest->setMaxDepotItems(getMaxDepotItems());
depotChests[depotId] = depotChest;
return depotChest;
}
DepotLocker* Player::getDepotLocker(uint32_t depotId)
{
auto it = depotLockerMap.find(depotId);
if (it != depotLockerMap.end()) {
inbox->setParent(it->second);
return it->second;
}
DepotLocker* depotLocker = new DepotLocker(ITEM_LOCKER1);
depotLocker->setDepotId(depotId);
depotLocker->internalAddThing(Item::CreateItem(ITEM_MARKET));
depotLocker->internalAddThing(inbox);
depotLocker->internalAddThing(getDepotChest(depotId, true));
depotLockerMap[depotId] = depotLocker;
return depotLocker;
}
void Player::sendCancelMessage(ReturnValue message) const
{
sendCancelMessage(getReturnMessage(message));
}
void Player::sendStats()
{
if (client) {
client->sendStats();
lastStatsTrainingTime = getOfflineTrainingTime() / 60 / 1000;
}
}
void Player::sendPing()
{
int64_t timeNow = OTSYS_TIME();
bool hasLostConnection = false;
if ((timeNow - lastPing) >= 5000) {
lastPing = timeNow;
if (client) {
client->sendPing();
} else {
hasLostConnection = true;
}
}
int64_t noPongTime = timeNow - lastPong;
if ((hasLostConnection || noPongTime >= 7000) && attackedCreature && attackedCreature->getPlayer()) {
setAttackedCreature(nullptr);
}
if (noPongTime >= 60000 && canLogout()) {
if (g_creatureEvents->playerLogout(this)) {
if (client) {
client->logout(true, true);
} else {
g_game.removeCreature(this, true);
}
}
}
}
Item* Player::getWriteItem(uint32_t& _windowTextId, uint16_t& _maxWriteLen)
{
_windowTextId = windowTextId;
_maxWriteLen = maxWriteLen;
return writeItem;
}
void Player::setWriteItem(Item* item, uint16_t _maxWriteLen /*= 0*/)
{
windowTextId++;
if (writeItem) {
writeItem->decrementReferenceCounter();
}
if (item) {
writeItem = item;
maxWriteLen = _maxWriteLen;
writeItem->incrementReferenceCounter();
} else {
writeItem = nullptr;
maxWriteLen = 0;
}
}
House* Player::getEditHouse(uint32_t& _windowTextId, uint32_t& _listId)
{
_windowTextId = windowTextId;
_listId = editListId;
return editHouse;
}
void Player::setEditHouse(House* house, uint32_t listId /*= 0*/)
{
windowTextId++;
editHouse = house;
editListId = listId;
}
void Player::sendHouseWindow(House* house, uint32_t listId) const
{
if (!client) {
return;
}
std::string text;
if (house->getAccessList(listId, text)) {
client->sendHouseWindow(windowTextId, text);
}
}
//container
void Player::sendAddContainerItem(const Container* container, const Item* item)
{
if (!client) {
return;
}
for (const auto& it : openContainers) {
const OpenContainer& openContainer = it.second;
if (openContainer.container != container) {
continue;
}
uint16_t slot = openContainer.index;
if (container->getID() == ITEM_BROWSEFIELD) {
uint16_t containerSize = container->size() - 1;
uint16_t pageEnd = openContainer.index + container->capacity() - 1;
if (containerSize > pageEnd) {
slot = pageEnd;
item = container->getItemByIndex(pageEnd);
} else {
slot = containerSize;
}
} else if (openContainer.index >= container->capacity()) {
item = container->getItemByIndex(openContainer.index - 1);
}
client->sendAddContainerItem(it.first, slot, item);
}
}
void Player::sendUpdateContainerItem(const Container* container, uint16_t slot, const Item* newItem)
{
if (!client) {
return;
}
for (const auto& it : openContainers) {
const OpenContainer& openContainer = it.second;
if (openContainer.container != container) {
continue;
}
if (slot < openContainer.index) {
continue;
}
uint16_t pageEnd = openContainer.index + container->capacity();
if (slot >= pageEnd) {
continue;
}
client->sendUpdateContainerItem(it.first, slot, newItem);
}
}
void Player::sendRemoveContainerItem(const Container* container, uint16_t slot)
{
if (!client) {
return;
}
for (auto& it : openContainers) {
OpenContainer& openContainer = it.second;
if (openContainer.container != container) {
continue;
}
uint16_t& firstIndex = openContainer.index;
if (firstIndex > 0 && firstIndex >= container->size() - 1) {
firstIndex -= container->capacity();
sendContainer(it.first, container, false, firstIndex);
}
client->sendRemoveContainerItem(it.first, std::max<uint16_t>(slot, firstIndex), container->getItemByIndex(container->capacity() + firstIndex));
}
}
void Player::onUpdateTileItem(const Tile* tile, const Position& pos, const Item* oldItem,
const ItemType& oldType, const Item* newItem, const ItemType& newType)
{
Creature::onUpdateTileItem(tile, pos, oldItem, oldType, newItem, newType);
if (oldItem != newItem) {
onRemoveTileItem(tile, pos, oldType, oldItem);
}
if (tradeState != TRADE_TRANSFER) {
if (tradeItem && oldItem == tradeItem) {
g_game.internalCloseTrade(this);
}
}
}
void Player::onRemoveTileItem(const Tile* tile, const Position& pos, const ItemType& iType,
const Item* item)
{
Creature::onRemoveTileItem(tile, pos, iType, item);
if (tradeState != TRADE_TRANSFER) {
checkTradeState(item);
if (tradeItem) {
const Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
g_game.internalCloseTrade(this);
}
}
}
}
void Player::onCreatureAppear(Creature* creature, bool isLogin)
{
Creature::onCreatureAppear(creature, isLogin);
if (isLogin && creature == this) {
for (int32_t slot = CONST_SLOT_FIRST; slot <= CONST_SLOT_LAST; ++slot) {
Item* item = inventory[slot];
if (item) {
item->startDecaying();
g_moveEvents->onPlayerEquip(this, item, static_cast<slots_t>(slot), false);
}
}
for (Condition* condition : storedConditionList) {
addCondition(condition);
}
storedConditionList.clear();
BedItem* bed = g_game.getBedBySleeper(guid);
if (bed) {
bed->wakeUp(this);
}
std::cout << name << " has logged in." << std::endl;
if (guild) {
guild->addMember(this);
}
int32_t offlineTime;
if (getLastLogout() != 0) {
// Not counting more than 21 days to prevent overflow when multiplying with 1000 (for milliseconds).
offlineTime = std::min<int32_t>(time(nullptr) - getLastLogout(), 86400 * 21);
} else {
offlineTime = 0;
}
for (Condition* condition : getMuteConditions()) {
condition->setTicks(condition->getTicks() - (offlineTime * 1000));
if (condition->getTicks() <= 0) {
removeCondition(condition);
}
}
g_game.checkPlayersRecord();
IOLoginData::updateOnlineStatus(guid, true);
}
}
void Player::onAttackedCreatureDisappear(bool isLogout)
{
sendCancelTarget();
if (!isLogout) {
sendTextMessage(MESSAGE_STATUS_SMALL, "Target lost.");
}
}
void Player::onFollowCreatureDisappear(bool isLogout)
{
sendCancelTarget();
if (!isLogout) {
sendTextMessage(MESSAGE_STATUS_SMALL, "Target lost.");
}
}
void Player::onChangeZone(ZoneType_t zone)
{
if (zone == ZONE_PROTECTION) {
if (attackedCreature && !hasFlag(PlayerFlag_IgnoreProtectionZone)) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(false);
}
if (!group->access && isMounted()) {
dismount();
g_game.internalCreatureChangeOutfit(this, defaultOutfit);
wasMounted = true;
}
} else {
if (wasMounted) {
toggleMount(true);
wasMounted = false;
}
}
g_game.updateCreatureWalkthrough(this);
sendIcons();
}
void Player::onAttackedCreatureChangeZone(ZoneType_t zone)
{
if (zone == ZONE_PROTECTION) {
if (!hasFlag(PlayerFlag_IgnoreProtectionZone)) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(false);
}
} else if (zone == ZONE_NOPVP) {
if (attackedCreature->getPlayer()) {
if (!hasFlag(PlayerFlag_IgnoreProtectionZone)) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(false);
}
}
} else if (zone == ZONE_NORMAL) {
//attackedCreature can leave a pvp zone if not pzlocked
if (g_game.getWorldType() == WORLD_TYPE_NO_PVP) {
if (attackedCreature->getPlayer()) {
setAttackedCreature(nullptr);
onAttackedCreatureDisappear(false);
}
}
}
}
void Player::onRemoveCreature(Creature* creature, bool isLogout)
{
Creature::onRemoveCreature(creature, isLogout);
if (creature == this) {
if (isLogout) {
loginPosition = getPosition();
}
lastLogout = time(nullptr);
if (eventWalk != 0) {
setFollowCreature(nullptr);
}
if (tradePartner) {
g_game.internalCloseTrade(this);
}
closeShopWindow();
clearPartyInvitations();
if (party) {
party->leaveParty(this);
}
g_chat->removeUserFromAllChannels(*this);
std::cout << getName() << " has logged out." << std::endl;
if (guild) {
guild->removeMember(this);
}
IOLoginData::updateOnlineStatus(guid, false);
bool saved = false;
for (uint32_t tries = 0; tries < 3; ++tries) {
if (IOLoginData::savePlayer(this)) {
saved = true;
break;
}
}
if (!saved) {
std::cout << "Error while saving player: " << getName() << std::endl;
}
}
}
void Player::openShopWindow(Npc* npc, const std::list<ShopInfo>& shop)
{
shopItemList = shop;
sendShop(npc);
sendSaleItemList();
}
bool Player::closeShopWindow(bool sendCloseShopWindow /*= true*/)
{
//unreference callbacks
int32_t onBuy;
int32_t onSell;
Npc* npc = getShopOwner(onBuy, onSell);
if (!npc) {
shopItemList.clear();
return false;
}
setShopOwner(nullptr, -1, -1);
npc->onPlayerEndTrade(this, onBuy, onSell);
if (sendCloseShopWindow) {
sendCloseShop();
}
shopItemList.clear();
return true;
}
void Player::onWalk(Direction& dir)
{
Creature::onWalk(dir);
setNextActionTask(nullptr);
setNextAction(OTSYS_TIME() + getStepDuration(dir));
}
void Player::onCreatureMove(Creature* creature, const Tile* newTile, const Position& newPos,
const Tile* oldTile, const Position& oldPos, bool teleport)
{
Creature::onCreatureMove(creature, newTile, newPos, oldTile, oldPos, teleport);
if (hasFollowPath && (creature == followCreature || (creature == this && followCreature))) {
isUpdatingPath = false;
g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, &g_game, getID())));
}
if (creature != this) {
return;
}
if (tradeState != TRADE_TRANSFER) {
//check if we should close trade
if (tradeItem && !Position::areInRange<1, 1, 0>(tradeItem->getPosition(), getPosition())) {
g_game.internalCloseTrade(this);
}
if (tradePartner && !Position::areInRange<2, 2, 0>(tradePartner->getPosition(), getPosition())) {
g_game.internalCloseTrade(this);
}
}
// close modal windows
if (!modalWindows.empty()) {
// TODO: This shouldn't be hardcoded
for (uint32_t modalWindowId : modalWindows) {
if (modalWindowId == std::numeric_limits<uint32_t>::max()) {
sendTextMessage(MESSAGE_EVENT_ADVANCE, "Offline training aborted.");
break;
}
}
modalWindows.clear();
}
// leave market
if (inMarket) {
inMarket = false;
}
if (party) {
party->updateSharedExperience();
}
if (teleport || oldPos.z != newPos.z) {
int32_t ticks = g_config.getNumber(ConfigManager::STAIRHOP_DELAY);
if (ticks > 0) {
if (Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_PACIFIED, ticks, 0)) {
addCondition(condition);
}
}
}
}
//container
void Player::onAddContainerItem(const Item* item)
{
checkTradeState(item);
}
void Player::onUpdateContainerItem(const Container* container, const Item* oldItem, const Item* newItem)
{
if (oldItem != newItem) {
onRemoveContainerItem(container, oldItem);
}
if (tradeState != TRADE_TRANSFER) {
checkTradeState(oldItem);
}
}
void Player::onRemoveContainerItem(const Container* container, const Item* item)
{
if (tradeState != TRADE_TRANSFER) {
checkTradeState(item);
if (tradeItem) {
if (tradeItem->getParent() != container && container->isHoldingItem(tradeItem)) {
g_game.internalCloseTrade(this);
}
}
}
}
void Player::onCloseContainer(const Container* container)
{
if (!client) {
return;
}
for (const auto& it : openContainers) {
if (it.second.container == container) {
client->sendCloseContainer(it.first);
}
}
}
void Player::onSendContainer(const Container* container)
{
if (!client) {
return;
}
bool hasParent = container->hasParent();
for (const auto& it : openContainers) {
const OpenContainer& openContainer = it.second;
if (openContainer.container == container) {
client->sendContainer(it.first, container, hasParent, openContainer.index);
}
}
}
//inventory
void Player::onUpdateInventoryItem(Item* oldItem, Item* newItem)
{
if (oldItem != newItem) {
onRemoveInventoryItem(oldItem);
}
if (tradeState != TRADE_TRANSFER) {
checkTradeState(oldItem);
}
}
void Player::onRemoveInventoryItem(Item* item)
{
if (tradeState != TRADE_TRANSFER) {
checkTradeState(item);
if (tradeItem) {
const Container* container = item->getContainer();
if (container && container->isHoldingItem(tradeItem)) {
g_game.internalCloseTrade(this);
}
}
}
}
void Player::checkTradeState(const Item* item)
{
if (!tradeItem || tradeState == TRADE_TRANSFER) {
return;
}
if (tradeItem == item) {
g_game.internalCloseTrade(this);
} else {
const Container* container = dynamic_cast<const Container*>(item->getParent());
while (container) {
if (container == tradeItem) {
g_game.internalCloseTrade(this);
break;
}
container = dynamic_cast<const Container*>(container->getParent());
}
}
}
void Player::setNextWalkActionTask(SchedulerTask* task)
{
if (walkTaskEvent != 0) {
g_scheduler.stopEvent(walkTaskEvent);
walkTaskEvent = 0;
}
delete walkTask;
walkTask = task;
}
void Player::setNextWalkTask(SchedulerTask* task)
{
if (nextStepEvent != 0) {
g_scheduler.stopEvent(nextStepEvent);
nextStepEvent = 0;
}
if (task) {
nextStepEvent = g_scheduler.addEvent(task);
resetIdleTime();
}
}
void Player::setNextActionTask(SchedulerTask* task)
{
if (actionTaskEvent != 0) {
g_scheduler.stopEvent(actionTaskEvent);
actionTaskEvent = 0;
}
if (task) {
actionTaskEvent = g_scheduler.addEvent(task);
resetIdleTime();
}
}
uint32_t Player::getNextActionTime() const
{
return std::max<int64_t>(SCHEDULER_MINTICKS, nextAction - OTSYS_TIME());
}
void Player::onThink(uint32_t interval)
{
Creature::onThink(interval);
sendPing();
MessageBufferTicks += interval;
if (MessageBufferTicks >= 1500) {
MessageBufferTicks = 0;
addMessageBuffer();
}
if (!getTile()->hasFlag(TILESTATE_NOLOGOUT) && !isAccessPlayer()) {
idleTime += interval;
const int32_t kickAfterMinutes = g_config.getNumber(ConfigManager::KICK_AFTER_MINUTES);
if (idleTime > (kickAfterMinutes * 60000) + 60000) {
kickPlayer(true);
} else if (client && idleTime == 60000 * kickAfterMinutes) {
std::ostringstream ss;
ss << "You have been idle for " << kickAfterMinutes << " minutes. You will be disconnected in one minute if you are still idle then.";
client->sendTextMessage(TextMessage(MESSAGE_STATUS_WARNING, ss.str()));
}
}
if (g_game.getWorldType() != WORLD_TYPE_PVP_ENFORCED) {
checkSkullTicks(interval);
}
addOfflineTrainingTime(interval);
if (lastStatsTrainingTime != getOfflineTrainingTime() / 60 / 1000) {
sendStats();
}
}
uint32_t Player::isMuted() const
{
if (hasFlag(PlayerFlag_CannotBeMuted)) {
return 0;
}
int32_t muteTicks = 0;
for (Condition* condition : conditions) {
if (condition->getType() == CONDITION_MUTED && condition->getTicks() > muteTicks) {
muteTicks = condition->getTicks();
}
}
return static_cast<uint32_t>(muteTicks) / 1000;
}
void Player::addMessageBuffer()
{
if (MessageBufferCount > 0 && g_config.getNumber(ConfigManager::MAX_MESSAGEBUFFER) != 0 && !hasFlag(PlayerFlag_CannotBeMuted)) {
--MessageBufferCount;
}
}
void Player::removeMessageBuffer()
{
if (hasFlag(PlayerFlag_CannotBeMuted)) {
return;
}
const int32_t maxMessageBuffer = g_config.getNumber(ConfigManager::MAX_MESSAGEBUFFER);
if (maxMessageBuffer != 0 && MessageBufferCount <= maxMessageBuffer + 1) {
if (++MessageBufferCount > maxMessageBuffer) {
uint32_t muteCount = 1;
auto it = muteCountMap.find(guid);
if (it != muteCountMap.end()) {
muteCount = it->second;
}
uint32_t muteTime = 5 * muteCount * muteCount;
muteCountMap[guid] = muteCount + 1;
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_MUTED, muteTime * 1000, 0);
addCondition(condition);
std::ostringstream ss;
ss << "You are muted for " << muteTime << " seconds.";
sendTextMessage(MESSAGE_STATUS_SMALL, ss.str());
}
}
}
void Player::drainHealth(Creature* attacker, int32_t damage)
{
Creature::drainHealth(attacker, damage);
sendStats();
}
void Player::drainMana(Creature* attacker, int32_t manaLoss)
{
Creature::drainMana(attacker, manaLoss);
sendStats();
}
void Player::addManaSpent(uint64_t amount)
{
if (hasFlag(PlayerFlag_NotGainMana)) {
return;
}
uint64_t currReqMana = vocation->getReqMana(magLevel);
uint64_t nextReqMana = vocation->getReqMana(magLevel + 1);
if (currReqMana >= nextReqMana) {
//player has reached max magic level
return;
}
g_events->eventPlayerOnGainSkillTries(this, SKILL_MAGLEVEL, amount);
if (amount == 0) {
return;
}
bool sendUpdateStats = false;
while ((manaSpent + amount) >= nextReqMana) {
amount -= nextReqMana - manaSpent;
magLevel++;
manaSpent = 0;
std::ostringstream ss;
ss << "You advanced to magic level " << magLevel << '.';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
g_creatureEvents->playerAdvance(this, SKILL_MAGLEVEL, magLevel - 1, magLevel);
sendUpdateStats = true;
currReqMana = nextReqMana;
nextReqMana = vocation->getReqMana(magLevel + 1);
if (currReqMana >= nextReqMana) {
return;
}
}
manaSpent += amount;
uint8_t oldPercent = magLevelPercent;
if (nextReqMana > currReqMana) {
magLevelPercent = Player::getPercentLevel(manaSpent, nextReqMana);
} else {
magLevelPercent = 0;
}
if (oldPercent != magLevelPercent) {
sendUpdateStats = true;
}
if (sendUpdateStats) {
sendStats();
}
}
void Player::addExperience(Creature* source, uint64_t exp, bool sendText/* = false*/)
{
uint64_t currLevelExp = Player::getExpForLevel(level);
uint64_t nextLevelExp = Player::getExpForLevel(level + 1);
uint64_t rawExp = exp;
if (currLevelExp >= nextLevelExp) {
//player has reached max level
levelPercent = 0;
sendStats();
return;
}
g_events->eventPlayerOnGainExperience(this, source, exp, rawExp);
if (exp == 0) {
return;
}
experience += exp;
if (sendText) {
std::string expString = std::to_string(exp) + (exp != 1 ? " experience points." : " experience point.");
TextMessage message(MESSAGE_EXPERIENCE, "You gained " + expString);
message.position = _position;
message.primary.value = exp;
message.primary.color = TEXTCOLOR_WHITE_EXP;
sendTextMessage(message);
SpectatorVec list;
g_game.map.getSpectators(list, _position, false, true);
list.erase(this);
if (!list.empty()) {
message.type = MESSAGE_EXPERIENCE_OTHERS;
message.text = getName() + " gained " + expString;
for (Creature* spectator : list) {
spectator->getPlayer()->sendTextMessage(message);
}
}
}
uint32_t prevLevel = level;
while (experience >= nextLevelExp) {
++level;
healthMax += vocation->getHPGain();
health += vocation->getHPGain();
manaMax += vocation->getManaGain();
mana += vocation->getManaGain();
capacity += vocation->getCapGain();
currLevelExp = nextLevelExp;
nextLevelExp = Player::getExpForLevel(level + 1);
if (currLevelExp >= nextLevelExp) {
//player has reached max level
break;
}
}
if (prevLevel != level) {
health = healthMax;
mana = manaMax;
updateBaseSpeed();
setBaseSpeed(getBaseSpeed());
g_game.changeSpeed(this, 0);
g_game.addCreatureHealth(this);
if (party) {
party->updateSharedExperience();
}
g_creatureEvents->playerAdvance(this, SKILL_LEVEL, prevLevel, level);
std::ostringstream ss;
ss << "You advanced from Level " << prevLevel << " to Level " << level << '.';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
}
if (nextLevelExp > currLevelExp) {
levelPercent = Player::getPercentLevel(experience - currLevelExp, nextLevelExp - currLevelExp);
} else {
levelPercent = 0;
}
sendStats();
}
void Player::removeExperience(uint64_t exp, bool sendText/* = false*/)
{
if (experience == 0 || exp == 0) {
return;
}
g_events->eventPlayerOnLoseExperience(this, exp);
if (exp == 0) {
return;
}
uint64_t lostExp = experience;
experience = std::max<int64_t>(0, experience - exp);
if (sendText) {
lostExp -= experience;
std::string expString = std::to_string(lostExp) + (lostExp != 1 ? " experience points." : " experience point.");
TextMessage message(MESSAGE_EXPERIENCE, "You lost " + expString);
message.position = _position;
message.primary.value = lostExp;
message.primary.color = TEXTCOLOR_RED;
sendTextMessage(message);
SpectatorVec list;
g_game.map.getSpectators(list, _position, false, true);
list.erase(this);
if (!list.empty()) {
message.type = MESSAGE_EXPERIENCE_OTHERS;
message.text = getName() + " lost " + expString;
for (Creature* spectator : list) {
spectator->getPlayer()->sendTextMessage(message);
}
}
}
uint32_t oldLevel = level;
uint64_t currLevelExp = Player::getExpForLevel(level);
while (level > 1 && experience < currLevelExp) {
--level;
healthMax = std::max<int32_t>(0, healthMax - vocation->getHPGain());
manaMax = std::max<int32_t>(0, manaMax - vocation->getManaGain());
capacity = std::max<int32_t>(0, capacity - vocation->getCapGain());
currLevelExp = Player::getExpForLevel(level);
}
if (oldLevel != level) {
health = healthMax;
mana = manaMax;
updateBaseSpeed();
setBaseSpeed(getBaseSpeed());
g_game.changeSpeed(this, 0);
g_game.addCreatureHealth(this);
if (party) {
party->updateSharedExperience();
}
std::ostringstream ss;
ss << "You were downgraded from Level " << oldLevel << " to Level " << level << '.';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
}
uint64_t nextLevelExp = Player::getExpForLevel(level + 1);
if (nextLevelExp > currLevelExp) {
levelPercent = Player::getPercentLevel(experience - currLevelExp, nextLevelExp - currLevelExp);
} else {
levelPercent = 0;
}
sendStats();
}
uint8_t Player::getPercentLevel(uint64_t count, uint64_t nextLevelCount)
{
if (nextLevelCount == 0) {
return 0;
}
uint8_t result = (count * 100) / nextLevelCount;
if (result > 100) {
return 0;
}
return result;
}
void Player::onBlockHit()
{
if (shieldBlockCount > 0) {
--shieldBlockCount;
if (hasShield()) {
addSkillAdvance(SKILL_SHIELD, 1);
}
}
}
void Player::onAttackedCreatureBlockHit(BlockType_t blockType)
{
lastAttackBlockType = blockType;
switch (blockType) {
case BLOCK_NONE: {
addAttackSkillPoint = true;
bloodHitCount = 30;
shieldBlockCount = 30;
break;
}
case BLOCK_DEFENSE:
case BLOCK_ARMOR: {
//need to draw blood every 30 hits
if (bloodHitCount > 0) {
addAttackSkillPoint = true;
--bloodHitCount;
} else {
addAttackSkillPoint = false;
}
break;
}
default: {
addAttackSkillPoint = false;
break;
}
}
}
bool Player::hasShield() const
{
Item* item = inventory[CONST_SLOT_LEFT];
if (item && item->getWeaponType() == WEAPON_SHIELD) {
return true;
}
item = inventory[CONST_SLOT_RIGHT];
if (item && item->getWeaponType() == WEAPON_SHIELD) {
return true;
}
return false;
}
BlockType_t Player::blockHit(Creature* attacker, CombatType_t combatType, int32_t& damage,
bool checkDefense /* = false*/, bool checkArmor /* = false*/, bool field /* = false*/)
{
BlockType_t blockType = Creature::blockHit(attacker, combatType, damage, checkDefense, checkArmor, field);
if (attacker) {
sendCreatureSquare(attacker, SQ_COLOR_BLACK);
}
if (blockType != BLOCK_NONE) {
return blockType;
}
if (damage > 0) {
for (int32_t slot = CONST_SLOT_FIRST; slot <= CONST_SLOT_LAST; ++slot) {
if (!isItemAbilityEnabled(static_cast<slots_t>(slot))) {
continue;
}
Item* item = inventory[slot];
if (!item) {
continue;
}
const ItemType& it = Item::items[item->getID()];
if (it.abilities) {
const int16_t& absorbPercent = it.abilities->absorbPercent[combatTypeToIndex(combatType)];
if (absorbPercent != 0) {
damage -= std::ceil(damage * (absorbPercent / 100.));
uint16_t charges = item->getCharges();
if (charges != 0) {
g_game.transformItem(item, item->getID(), charges - 1);
}
}
if (field) {
const int16_t& fieldAbsorbPercent = it.abilities->fieldAbsorbPercent[combatTypeToIndex(combatType)];
if (fieldAbsorbPercent != 0) {
damage -= std::ceil(damage * (fieldAbsorbPercent / 100.));
uint16_t charges = item->getCharges();
if (charges != 0) {
g_game.transformItem(item, item->getID(), charges - 1);
}
}
}
}
}
if (damage <= 0) {
damage = 0;
blockType = BLOCK_ARMOR;
}
}
return blockType;
}
uint32_t Player::getIP() const
{
if (client) {
return client->getIP();
}
return 0;
}
void Player::death(Creature* _lastHitCreature)
{
loginPosition = town->getTemplePosition();
if (skillLoss) {
uint8_t unfairFightReduction = 100;
if (_lastHitCreature) {
Player* lastHitPlayer = _lastHitCreature->getPlayer();
if (!lastHitPlayer) {
Creature* lastHitMaster = _lastHitCreature->getMaster();
if (lastHitMaster) {
lastHitPlayer = lastHitMaster->getPlayer();
}
}
if (lastHitPlayer) {
uint32_t sumLevels = 0;
uint32_t inFightTicks = g_config.getNumber(ConfigManager::PZ_LOCKED);
for (const auto& it : damageMap) {
CountBlock_t cb = it.second;
if ((OTSYS_TIME() - cb.ticks) <= inFightTicks) {
Player* damageDealer = g_game.getPlayerByID(it.first);
if (damageDealer) {
sumLevels += damageDealer->getLevel();
}
}
}
if (sumLevels > level) {
double reduce = level / static_cast<double>(sumLevels);
unfairFightReduction = std::max<uint8_t>(20, std::floor((reduce * 100) + 0.5));
}
}
}
//Magic level loss
uint64_t sumMana = 0;
uint64_t lostMana = 0;
//sum up all the mana
for (uint32_t i = 1; i <= magLevel; ++i) {
sumMana += vocation->getReqMana(i);
}
sumMana += manaSpent;
double deathLossPercent = getLostPercent() * (unfairFightReduction / 100.);
lostMana = static_cast<uint64_t>(sumMana * deathLossPercent);
while (lostMana > manaSpent && magLevel > 0) {
lostMana -= manaSpent;
manaSpent = vocation->getReqMana(magLevel);
magLevel--;
}
manaSpent -= lostMana;
uint64_t nextReqMana = vocation->getReqMana(magLevel + 1);
if (nextReqMana > vocation->getReqMana(magLevel)) {
magLevelPercent = Player::getPercentLevel(manaSpent, nextReqMana);
} else {
magLevelPercent = 0;
}
//Skill loss
for (uint8_t i = SKILL_FIRST; i <= SKILL_LAST; ++i) { //for each skill
uint64_t sumSkillTries = 0;
for (uint16_t c = 11; c <= skills[i].level; ++c) { //sum up all required tries for all skill levels
sumSkillTries += vocation->getReqSkillTries(i, c);
}
sumSkillTries += skills[i].tries;
uint32_t lostSkillTries = static_cast<uint32_t>(sumSkillTries * deathLossPercent);
while (lostSkillTries > skills[i].tries) {
lostSkillTries -= skills[i].tries;
if (skills[i].level <= 10) {
skills[i].level = 10;
skills[i].tries = 0;
lostSkillTries = 0;
break;
}
skills[i].tries = vocation->getReqSkillTries(i, skills[i].level);
skills[i].level--;
}
skills[i].tries = std::max<int32_t>(0, skills[i].tries - lostSkillTries);
skills[i].percent = Player::getPercentLevel(skills[i].tries, vocation->getReqSkillTries(i, skills[i].level));
}
//Level loss
uint64_t expLoss = static_cast<uint64_t>(experience * deathLossPercent);
g_events->eventPlayerOnLoseExperience(this, expLoss);
if (expLoss != 0) {
uint32_t oldLevel = level;
if (vocation->getId() == VOCATION_NONE || level > 7) {
experience -= expLoss;
}
while (level > 1 && experience < Player::getExpForLevel(level)) {
--level;
healthMax = std::max<int32_t>(0, healthMax - vocation->getHPGain());
manaMax = std::max<int32_t>(0, manaMax - vocation->getManaGain());
capacity = std::max<int32_t>(0, capacity - vocation->getCapGain());
}
if (oldLevel != level) {
std::ostringstream ss;
ss << "You were downgraded from Level " << oldLevel << " to Level " << level << '.';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
}
uint64_t currLevelExp = Player::getExpForLevel(level);
uint64_t nextLevelExp = Player::getExpForLevel(level + 1);
if (nextLevelExp > currLevelExp) {
levelPercent = Player::getPercentLevel(experience - currLevelExp, nextLevelExp - currLevelExp);
} else {
levelPercent = 0;
}
}
std::bitset<6> bitset(blessings);
if (bitset[5]) {
Player* lastHitPlayer;
if (_lastHitCreature) {
lastHitPlayer = _lastHitCreature->getPlayer();
if (!lastHitPlayer) {
Creature* lastHitMaster = _lastHitCreature->getMaster();
if (lastHitMaster) {
lastHitPlayer = lastHitMaster->getPlayer();
}
}
} else {
lastHitPlayer = nullptr;
}
if (lastHitPlayer) {
bitset.reset(5);
blessings = bitset.to_ulong();
} else {
blessings = 32;
}
} else {
blessings = 0;
}
sendStats();
sendSkills();
sendReLoginWindow(unfairFightReduction);
if (getSkull() == SKULL_BLACK) {
health = 40;
mana = 0;
} else {
health = healthMax;
mana = manaMax;
}
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (condition->isPersistent()) {
it = conditions.erase(it);
condition->endCondition(this);
onEndCondition(condition->getType());
delete condition;
} else {
++it;
}
}
} else {
setLossSkill(true);
auto it = conditions.begin(), end = conditions.end();
while (it != end) {
Condition* condition = *it;
if (condition->isPersistent()) {
it = conditions.erase(it);
condition->endCondition(this);
onEndCondition(condition->getType());
delete condition;
} else {
++it;
}
}
health = healthMax;
g_game.internalTeleport(this, getTemplePosition(), true);
g_game.addCreatureHealth(this);
onThink(EVENT_CREATURE_THINK_INTERVAL);
onIdleStatus();
sendStats();
}
}
bool Player::dropCorpse(Creature* _lastHitCreature, Creature* mostDamageCreature, bool lastHitUnjustified, bool mostDamageUnjustified)
{
if (getZone() == ZONE_PVP) {
setDropLoot(true);
return false;
}
return Creature::dropCorpse(_lastHitCreature, mostDamageCreature, lastHitUnjustified, mostDamageUnjustified);
}
Item* Player::getCorpse(Creature* _lastHitCreature, Creature* mostDamageCreature)
{
Item* corpse = Creature::getCorpse(_lastHitCreature, mostDamageCreature);
if (corpse && corpse->getContainer()) {
std::ostringstream ss;
if (_lastHitCreature) {
ss << "You recognize " << getNameDescription() << ". " << (getSex() == PLAYERSEX_FEMALE ? "She" : "He") << " was killed by " << _lastHitCreature->getNameDescription() << '.';
} else {
ss << "You recognize " << getNameDescription() << '.';
}
corpse->setSpecialDescription(ss.str());
}
return corpse;
}
void Player::addInFightTicks(bool pzlock /*= false*/)
{
if (hasFlag(PlayerFlag_NotGainInFight)) {
return;
}
if (pzlock) {
pzLocked = true;
}
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_INFIGHT, g_config.getNumber(ConfigManager::PZ_LOCKED), 0);
addCondition(condition);
}
void Player::removeList()
{
g_game.removePlayer(this);
for (const auto& it : g_game.getPlayers()) {
it.second->notifyStatusChange(this, VIPSTATUS_OFFLINE);
}
}
void Player::addList()
{
for (const auto& it : g_game.getPlayers()) {
it.second->notifyStatusChange(this, VIPSTATUS_ONLINE);
}
g_game.addPlayer(this);
}
void Player::kickPlayer(bool displayEffect)
{
g_creatureEvents->playerLogout(this);
if (client) {
client->logout(displayEffect, true);
} else {
g_game.removeCreature(this);
}
}
void Player::notifyStatusChange(Player* loginPlayer, VipStatus_t status)
{
if (!client) {
return;
}
auto it = VIPList.find(loginPlayer->guid);
if (it == VIPList.end()) {
return;
}
client->sendUpdatedVIPStatus(loginPlayer->guid, status);
if (status == VIPSTATUS_ONLINE) {
client->sendTextMessage(TextMessage(MESSAGE_STATUS_SMALL, loginPlayer->getName() + " has logged in."));
} else if (status == VIPSTATUS_OFFLINE) {
client->sendTextMessage(TextMessage(MESSAGE_STATUS_SMALL, loginPlayer->getName() + " has logged out."));
}
}
bool Player::removeVIP(uint32_t vipGuid)
{
if (VIPList.erase(vipGuid) == 0) {
return false;
}
IOLoginData::removeVIPEntry(accountNumber, vipGuid);
return true;
}
bool Player::addVIP(uint32_t vipGuid, const std::string& vipName, VipStatus_t status)
{
if (guid == vipGuid) {
sendTextMessage(MESSAGE_STATUS_SMALL, "You cannot add yourself.");
return false;
}
if (VIPList.size() >= getMaxVIPEntries() || VIPList.size() == 200) { // max number of buddies is 200 in 9.53
sendTextMessage(MESSAGE_STATUS_SMALL, "You cannot add more buddies.");
return false;
}
auto result = VIPList.insert(vipGuid);
if (!result.second) {
sendTextMessage(MESSAGE_STATUS_SMALL, "This player is already in your list.");
return false;
}
IOLoginData::addVIPEntry(accountNumber, vipGuid, "", 0, false);
if (client) {
client->sendVIP(vipGuid, vipName, "", 0, false, status);
}
return true;
}
bool Player::addVIPInternal(uint32_t vipGuid)
{
if (guid == vipGuid) {
return false;
}
if (VIPList.size() >= getMaxVIPEntries() || VIPList.size() == 200) { // max number of buddies is 200 in 9.53
return false;
}
return VIPList.insert(vipGuid).second;
}
bool Player::editVIP(uint32_t vipGuid, const std::string& description, uint32_t icon, bool notify)
{
auto it = VIPList.find(vipGuid);
if (it == VIPList.end()) {
return false; // player is not in VIP
}
IOLoginData::editVIPEntry(accountNumber, vipGuid, description, icon, notify);
return true;
}
//close container and its child containers
void Player::autoCloseContainers(const Container* container)
{
std::vector<uint32_t> closeList;
for (const auto& it : openContainers) {
Container* tmpContainer = it.second.container;
while (tmpContainer) {
if (tmpContainer->isRemoved() || tmpContainer == container) {
closeList.push_back(it.first);
break;
}
tmpContainer = dynamic_cast<Container*>(tmpContainer->getParent());
}
}
for (uint32_t containerId : closeList) {
closeContainer(containerId);
if (client) {
client->sendCloseContainer(containerId);
}
}
}
bool Player::hasCapacity(const Item* item, uint32_t count) const
{
if (hasFlag(PlayerFlag_CannotPickupItem)) {
return false;
}
if (hasFlag(PlayerFlag_HasInfiniteCapacity) || item->getTopParent() == this) {
return true;
}
uint32_t itemWeight = item->getContainer() != nullptr ? item->getWeight() : item->getBaseWeight();
if (item->isStackable()) {
itemWeight *= count;
}
return itemWeight <= getFreeCapacity();
}
ReturnValue Player::queryAdd(int32_t index, const Thing& thing, uint32_t count, uint32_t flags, Creature*) const
{
const Item* item = thing.getItem();
if (item == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
bool childIsOwner = hasBitSet(FLAG_CHILDISOWNER, flags);
if (childIsOwner) {
//a child container is querying the player, just check if enough capacity
bool skipLimit = hasBitSet(FLAG_NOLIMIT, flags);
if (skipLimit || hasCapacity(item, count)) {
return RETURNVALUE_NOERROR;
}
return RETURNVALUE_NOTENOUGHCAPACITY;
}
if (!item->isPickupable()) {
return RETURNVALUE_CANNOTPICKUP;
}
ReturnValue ret = RETURNVALUE_NOERROR;
const int32_t& slotPosition = item->getSlotPosition();
if ((slotPosition & SLOTP_HEAD) || (slotPosition & SLOTP_NECKLACE) ||
(slotPosition & SLOTP_BACKPACK) || (slotPosition & SLOTP_ARMOR) ||
(slotPosition & SLOTP_LEGS) || (slotPosition & SLOTP_FEET) ||
(slotPosition & SLOTP_RING)) {
ret = RETURNVALUE_CANNOTBEDRESSED;
} else if (slotPosition & SLOTP_TWO_HAND) {
ret = RETURNVALUE_PUTTHISOBJECTINBOTHHANDS;
} else if ((slotPosition & SLOTP_RIGHT) || (slotPosition & SLOTP_LEFT)) {
if (!g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) {
ret = RETURNVALUE_CANNOTBEDRESSED;
} else {
ret = RETURNVALUE_PUTTHISOBJECTINYOURHAND;
}
}
switch (index) {
case CONST_SLOT_HEAD: {
if (slotPosition & SLOTP_HEAD) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_NECKLACE: {
if (slotPosition & SLOTP_NECKLACE) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_BACKPACK: {
if (slotPosition & SLOTP_BACKPACK) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_ARMOR: {
if (slotPosition & SLOTP_ARMOR) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_RIGHT: {
if (slotPosition & SLOTP_RIGHT) {
if (!g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) {
if (item->getWeaponType() != WEAPON_SHIELD) {
ret = RETURNVALUE_CANNOTBEDRESSED;
} else {
const Item* leftItem = inventory[CONST_SLOT_LEFT];
if (leftItem) {
if ((leftItem->getSlotPosition() | slotPosition) & SLOTP_TWO_HAND) {
ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE;
} else {
ret = RETURNVALUE_NOERROR;
}
} else {
ret = RETURNVALUE_NOERROR;
}
}
} else if (slotPosition & SLOTP_TWO_HAND) {
if (inventory[CONST_SLOT_LEFT] && inventory[CONST_SLOT_LEFT] != item) {
ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE;
} else {
ret = RETURNVALUE_NOERROR;
}
} else if (inventory[CONST_SLOT_LEFT]) {
const Item* leftItem = inventory[CONST_SLOT_LEFT];
WeaponType_t type = item->getWeaponType(), leftType = leftItem->getWeaponType();
if (leftItem->getSlotPosition() & SLOTP_TWO_HAND) {
ret = RETURNVALUE_DROPTWOHANDEDITEM;
} else if (item == leftItem && count == item->getItemCount()) {
ret = RETURNVALUE_NOERROR;
} else if (leftType == WEAPON_SHIELD && type == WEAPON_SHIELD) {
ret = RETURNVALUE_CANONLYUSEONESHIELD;
} else if (leftType == WEAPON_NONE || type == WEAPON_NONE ||
leftType == WEAPON_SHIELD || leftType == WEAPON_AMMO
|| type == WEAPON_SHIELD || type == WEAPON_AMMO) {
ret = RETURNVALUE_NOERROR;
} else {
ret = RETURNVALUE_CANONLYUSEONEWEAPON;
}
} else {
ret = RETURNVALUE_NOERROR;
}
}
break;
}
case CONST_SLOT_LEFT: {
if (slotPosition & SLOTP_LEFT) {
if (!g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) {
WeaponType_t type = item->getWeaponType();
if (type == WEAPON_NONE || type == WEAPON_SHIELD) {
ret = RETURNVALUE_CANNOTBEDRESSED;
} else if (inventory[CONST_SLOT_RIGHT] && (slotPosition & SLOTP_TWO_HAND)) {
ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE;
} else {
ret = RETURNVALUE_NOERROR;
}
} else if (slotPosition & SLOTP_TWO_HAND) {
if (inventory[CONST_SLOT_RIGHT] && inventory[CONST_SLOT_RIGHT] != item) {
ret = RETURNVALUE_BOTHHANDSNEEDTOBEFREE;
} else {
ret = RETURNVALUE_NOERROR;
}
} else if (inventory[CONST_SLOT_RIGHT]) {
const Item* rightItem = inventory[CONST_SLOT_RIGHT];
WeaponType_t type = item->getWeaponType(), rightType = rightItem->getWeaponType();
if (rightItem->getSlotPosition() & SLOTP_TWO_HAND) {
ret = RETURNVALUE_DROPTWOHANDEDITEM;
} else if (item == rightItem && count == item->getItemCount()) {
ret = RETURNVALUE_NOERROR;
} else if (rightType == WEAPON_SHIELD && type == WEAPON_SHIELD) {
ret = RETURNVALUE_CANONLYUSEONESHIELD;
} else if (rightType == WEAPON_NONE || type == WEAPON_NONE ||
rightType == WEAPON_SHIELD || rightType == WEAPON_AMMO
|| type == WEAPON_SHIELD || type == WEAPON_AMMO) {
ret = RETURNVALUE_NOERROR;
} else {
ret = RETURNVALUE_CANONLYUSEONEWEAPON;
}
} else {
ret = RETURNVALUE_NOERROR;
}
}
break;
}
case CONST_SLOT_LEGS: {
if (slotPosition & SLOTP_LEGS) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_FEET: {
if (slotPosition & SLOTP_FEET) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_RING: {
if (slotPosition & SLOTP_RING) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_AMMO: {
if ((slotPosition & SLOTP_AMMO) || g_config.getBoolean(ConfigManager::CLASSIC_EQUIPMENT_SLOTS)) {
ret = RETURNVALUE_NOERROR;
}
break;
}
case CONST_SLOT_WHEREEVER:
case -1:
ret = RETURNVALUE_NOTENOUGHROOM;
break;
default:
ret = RETURNVALUE_NOTPOSSIBLE;
break;
}
if (ret == RETURNVALUE_NOERROR || ret == RETURNVALUE_NOTENOUGHROOM) {
//need an exchange with source?
const Item* inventoryItem = getInventoryItem(static_cast<slots_t>(index));
if (inventoryItem && (!inventoryItem->isStackable() || inventoryItem->getID() != item->getID())) {
return RETURNVALUE_NEEDEXCHANGE;
}
//check if enough capacity
if (!hasCapacity(item, count)) {
return RETURNVALUE_NOTENOUGHCAPACITY;
}
if (!g_moveEvents->onPlayerEquip(const_cast<Player*>(this), const_cast<Item*>(item), static_cast<slots_t>(index), true)) {
return RETURNVALUE_CANNOTBEDRESSED;
}
}
return ret;
}
ReturnValue Player::queryMaxCount(int32_t index, const Thing& thing, uint32_t count, uint32_t& maxQueryCount,
uint32_t flags) const
{
const Item* item = thing.getItem();
if (item == nullptr) {
maxQueryCount = 0;
return RETURNVALUE_NOTPOSSIBLE;
}
if (index == INDEX_WHEREEVER) {
uint32_t n = 0;
for (int32_t slotIndex = CONST_SLOT_FIRST; slotIndex <= CONST_SLOT_LAST; ++slotIndex) {
Item* inventoryItem = inventory[slotIndex];
if (inventoryItem) {
if (Container* subContainer = inventoryItem->getContainer()) {
uint32_t queryCount = 0;
subContainer->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), queryCount, flags);
n += queryCount;
//iterate through all items, including sub-containers (deep search)
for (ContainerIterator it = subContainer->iterator(); it.hasNext(); it.advance()) {
if (Container* tmpContainer = (*it)->getContainer()) {
queryCount = 0;
tmpContainer->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), queryCount, flags);
n += queryCount;
}
}
} else if (inventoryItem->isStackable() && item->equals(inventoryItem) && inventoryItem->getItemCount() < 100) {
uint32_t remainder = (100 - inventoryItem->getItemCount());
if (queryAdd(slotIndex, *item, remainder, flags) == RETURNVALUE_NOERROR) {
n += remainder;
}
}
} else if (queryAdd(slotIndex, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) { //empty slot
if (item->isStackable()) {
n += 100;
} else {
++n;
}
}
}
maxQueryCount = n;
} else {
const Item* destItem = nullptr;
const Thing* destThing = getThing(index);
if (destThing) {
destItem = destThing->getItem();
}
if (destItem) {
if (destItem->isStackable() && item->equals(destItem) && destItem->getItemCount() < 100) {
maxQueryCount = 100 - destItem->getItemCount();
} else {
maxQueryCount = 0;
}
} else if (queryAdd(index, *item, count, flags) == RETURNVALUE_NOERROR) { //empty slot
if (item->isStackable()) {
maxQueryCount = 100;
} else {
maxQueryCount = 1;
}
return RETURNVALUE_NOERROR;
}
}
if (maxQueryCount < count) {
return RETURNVALUE_NOTENOUGHROOM;
} else {
return RETURNVALUE_NOERROR;
}
}
ReturnValue Player::queryRemove(const Thing& thing, uint32_t count, uint32_t flags) const
{
int32_t index = getThingIndex(&thing);
if (index == -1) {
return RETURNVALUE_NOTPOSSIBLE;
}
const Item* item = thing.getItem();
if (item == nullptr) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (count == 0 || (item->isStackable() && count > item->getItemCount())) {
return RETURNVALUE_NOTPOSSIBLE;
}
if (!item->isMoveable() && !hasBitSet(FLAG_IGNORENOTMOVEABLE, flags)) {
return RETURNVALUE_NOTMOVEABLE;
}
return RETURNVALUE_NOERROR;
}
Cylinder* Player::queryDestination(int32_t& index, const Thing& thing, Item** destItem,
uint32_t& flags)
{
if (index == 0 /*drop to capacity window*/ || index == INDEX_WHEREEVER) {
*destItem = nullptr;
const Item* item = thing.getItem();
if (item == nullptr) {
return this;
}
bool autoStack = !((flags & FLAG_IGNOREAUTOSTACK) == FLAG_IGNOREAUTOSTACK);
bool isStackable = item->isStackable();
std::vector<Container*> containers;
for (uint32_t slotIndex = CONST_SLOT_FIRST; slotIndex <= CONST_SLOT_LAST; ++slotIndex) {
Item* inventoryItem = inventory[slotIndex];
if (inventoryItem) {
if (inventoryItem == tradeItem) {
continue;
}
if (inventoryItem == item) {
continue;
}
if (autoStack && isStackable) {
//try find an already existing item to stack with
if (queryAdd(slotIndex, *item, item->getItemCount(), 0) == RETURNVALUE_NOERROR) {
if (inventoryItem->equals(item) && inventoryItem->getItemCount() < 100) {
index = slotIndex;
*destItem = inventoryItem;
return this;
}
}
if (Container* subContainer = inventoryItem->getContainer()) {
containers.push_back(subContainer);
}
} else if (Container* subContainer = inventoryItem->getContainer()) {
containers.push_back(subContainer);
}
} else if (queryAdd(slotIndex, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) { //empty slot
index = slotIndex;
*destItem = nullptr;
return this;
}
}
size_t i = 0;
while (i < containers.size()) {
Container* tmpContainer = containers[i++];
if (!autoStack || !isStackable) {
//we need to find first empty container as fast as we can for non-stackable items
uint32_t n = tmpContainer->capacity() - tmpContainer->size();
while (n) {
if (tmpContainer->queryAdd(tmpContainer->capacity() - n, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) {
index = tmpContainer->capacity() - n;
*destItem = nullptr;
return tmpContainer;
}
n--;
}
for (Item* tmpContainerItem : tmpContainer->getItemList()) {
if (Container* subContainer = tmpContainerItem->getContainer()) {
containers.push_back(subContainer);
}
}
continue;
}
uint32_t n = 0;
for (Item* tmpItem : tmpContainer->getItemList()) {
if (tmpItem == tradeItem) {
continue;
}
if (tmpItem == item) {
continue;
}
//try find an already existing item to stack with
if (tmpItem->equals(item) && tmpItem->getItemCount() < 100) {
index = n;
*destItem = tmpItem;
return tmpContainer;
}
if (Container* subContainer = tmpItem->getContainer()) {
containers.push_back(subContainer);
}
n++;
}
if (n < tmpContainer->capacity() && tmpContainer->queryAdd(n, *item, item->getItemCount(), flags) == RETURNVALUE_NOERROR) {
index = n;
*destItem = nullptr;
return tmpContainer;
}
}
return this;
}
Thing* destThing = getThing(index);
if (destThing) {
*destItem = destThing->getItem();
}
Cylinder* subCylinder = dynamic_cast<Cylinder*>(destThing);
if (subCylinder) {
index = INDEX_WHEREEVER;
*destItem = nullptr;
return subCylinder;
} else {
return this;
}
}
void Player::addThing(int32_t index, Thing* thing)
{
if (index < CONST_SLOT_FIRST || index > CONST_SLOT_LAST) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
Item* item = thing->getItem();
if (!item) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
item->setParent(this);
inventory[index] = item;
//send to client
sendInventoryItem(static_cast<slots_t>(index), item);
}
void Player::updateThing(Thing* thing, uint16_t itemId, uint32_t count)
{
int32_t index = getThingIndex(thing);
if (index == -1) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
Item* item = thing->getItem();
if (!item) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
item->setID(itemId);
item->setSubType(count);
//send to client
sendInventoryItem(static_cast<slots_t>(index), item);
//event methods
onUpdateInventoryItem(item, item);
}
void Player::replaceThing(uint32_t index, Thing* thing)
{
if (index > CONST_SLOT_LAST) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
Item* oldItem = getInventoryItem(static_cast<slots_t>(index));
if (!oldItem) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
Item* item = thing->getItem();
if (!item) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
//send to client
sendInventoryItem(static_cast<slots_t>(index), item);
//event methods
onUpdateInventoryItem(oldItem, item);
item->setParent(this);
inventory[index] = item;
}
void Player::removeThing(Thing* thing, uint32_t count)
{
Item* item = thing->getItem();
if (!item) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
int32_t index = getThingIndex(thing);
if (index == -1) {
return /*RETURNVALUE_NOTPOSSIBLE*/;
}
if (item->isStackable()) {
if (count == item->getItemCount()) {
//send change to client
sendInventoryItem(static_cast<slots_t>(index), nullptr);
//event methods
onRemoveInventoryItem(item);
item->setParent(nullptr);
inventory[index] = nullptr;
} else {
uint8_t newCount = static_cast<uint8_t>(std::max<int32_t>(0, item->getItemCount() - count));
item->setItemCount(newCount);
//send change to client
sendInventoryItem(static_cast<slots_t>(index), item);
//event methods
onUpdateInventoryItem(item, item);
}
} else {
//send change to client
sendInventoryItem(static_cast<slots_t>(index), nullptr);
//event methods
onRemoveInventoryItem(item);
item->setParent(nullptr);
inventory[index] = nullptr;
}
}
int32_t Player::getThingIndex(const Thing* thing) const
{
for (int i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) {
if (inventory[i] == thing) {
return i;
}
}
return -1;
}
size_t Player::getFirstIndex() const
{
return CONST_SLOT_FIRST;
}
size_t Player::getLastIndex() const
{
return CONST_SLOT_LAST + 1;
}
uint32_t Player::getItemTypeCount(uint16_t itemId, int32_t subType /*= -1*/) const
{
uint32_t count = 0;
for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; i++) {
Item* item = inventory[i];
if (!item) {
continue;
}
if (item->getID() == itemId) {
count += Item::countByType(item, subType);
}
if (Container* container = item->getContainer()) {
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
if ((*it)->getID() == itemId) {
count += Item::countByType(*it, subType);
}
}
}
}
return count;
}
bool Player::removeItemOfType(uint16_t itemId, uint32_t amount, int32_t subType, bool ignoreEquipped/* = false*/) const
{
if (amount == 0) {
return true;
}
std::vector<Item*> itemList;
uint32_t count = 0;
for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; i++) {
Item* item = inventory[i];
if (!item) {
continue;
}
if (!ignoreEquipped && item->getID() == itemId) {
uint32_t itemCount = Item::countByType(item, subType);
if (itemCount == 0) {
continue;
}
itemList.push_back(item);
count += itemCount;
if (count >= amount) {
g_game.internalRemoveItems(std::move(itemList), amount, Item::items[itemId].stackable);
return true;
}
} else if (Container* container = item->getContainer()) {
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
Item* containerItem = *it;
if (containerItem->getID() == itemId) {
uint32_t itemCount = Item::countByType(containerItem, subType);
if (itemCount == 0) {
continue;
}
itemList.push_back(containerItem);
count += itemCount;
if (count >= amount) {
g_game.internalRemoveItems(std::move(itemList), amount, Item::items[itemId].stackable);
return true;
}
}
}
}
}
return false;
}
std::map<uint32_t, uint32_t>& Player::getAllItemTypeCount(std::map<uint32_t, uint32_t> &countMap) const
{
for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; i++) {
Item* item = inventory[i];
if (!item) {
continue;
}
countMap[item->getID()] += Item::countByType(item, -1);
if (Container* container = item->getContainer()) {
for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) {
countMap[(*it)->getID()] += Item::countByType(*it, -1);
}
}
}
return countMap;
}
Thing* Player::getThing(size_t index) const
{
if (index >= CONST_SLOT_FIRST && index <= CONST_SLOT_LAST) {
return inventory[index];
}
return nullptr;
}
void Player::postAddNotification(Thing* thing, const Cylinder* oldParent, int32_t index, cylinderlink_t link /*= LINK_OWNER*/)
{
if (link == LINK_OWNER) {
//calling movement scripts
g_moveEvents->onPlayerEquip(this, thing->getItem(), static_cast<slots_t>(index), false);
}
bool requireListUpdate = true;
if (link == LINK_OWNER || link == LINK_TOPPARENT) {
const Item* i = (oldParent ? oldParent->getItem() : nullptr);
// Check if we owned the old container too, so we don't need to do anything,
// as the list was updated in postRemoveNotification
assert(i ? i->getContainer() != nullptr : true);
if (i) {
requireListUpdate = i->getContainer()->getHoldingPlayer() != this;
} else {
requireListUpdate = oldParent != this;
}
updateInventoryWeight();
updateItemsLight();
sendStats();
}
if (const Item* item = thing->getItem()) {
if (const Container* container = item->getContainer()) {
onSendContainer(container);
}
if (shopOwner && requireListUpdate) {
updateSaleShopList(item);
}
} else if (const Creature* creature = thing->getCreature()) {
if (creature == this) {
//check containers
std::vector<Container*> containers;
for (const auto& it : openContainers) {
Container* container = it.second.container;
if (!Position::areInRange<1, 1, 0>(container->getPosition(), getPosition())) {
containers.push_back(container);
}
}
for (const Container* container : containers) {
autoCloseContainers(container);
}
}
}
}
void Player::postRemoveNotification(Thing* thing, const Cylinder* newParent, int32_t index, cylinderlink_t link /*= LINK_OWNER*/)
{
if (link == LINK_OWNER) {
//calling movement scripts
g_moveEvents->onPlayerDeEquip(this, thing->getItem(), static_cast<slots_t>(index));
}
bool requireListUpdate = true;
if (link == LINK_OWNER || link == LINK_TOPPARENT) {
const Item* i = (newParent ? newParent->getItem() : nullptr);
// Check if we owned the old container too, so we don't need to do anything,
// as the list was updated in postRemoveNotification
assert(i ? i->getContainer() != nullptr : true);
if (i) {
requireListUpdate = i->getContainer()->getHoldingPlayer() != this;
} else {
requireListUpdate = newParent != this;
}
updateInventoryWeight();
updateItemsLight();
sendStats();
}
if (const Item* item = thing->getItem()) {
if (const Container* container = item->getContainer()) {
if (container->isRemoved() || !Position::areInRange<1, 1, 0>(getPosition(), container->getPosition())) {
autoCloseContainers(container);
} else if (container->getTopParent() == this) {
onSendContainer(container);
} else if (const Container* topContainer = dynamic_cast<const Container*>(container->getTopParent())) {
if (const DepotChest* depotChest = dynamic_cast<const DepotChest*>(topContainer)) {
bool isOwner = false;
for (const auto& it : depotChests) {
if (it.second == depotChest) {
isOwner = true;
onSendContainer(container);
}
}
if (!isOwner) {
autoCloseContainers(container);
}
} else {
onSendContainer(container);
}
} else {
autoCloseContainers(container);
}
}
if (shopOwner && requireListUpdate) {
updateSaleShopList(item);
}
}
}
bool Player::updateSaleShopList(const Item* item)
{
uint16_t itemId = item->getID();
if (itemId != ITEM_GOLD_COIN && itemId != ITEM_PLATINUM_COIN && itemId != ITEM_CRYSTAL_COIN) {
auto it = std::find_if(shopItemList.begin(), shopItemList.end(), [itemId](const ShopInfo& shopInfo) { return shopInfo.itemId == itemId && shopInfo.sellPrice != 0; });
if (it == shopItemList.end()) {
const Container* container = item->getContainer();
if (!container) {
return false;
}
const auto& items = container->getItemList();
return std::any_of(items.begin(), items.end(), [this](const Item* containerItem) {
return updateSaleShopList(containerItem);
});
}
}
if (client) {
client->sendSaleItemList(shopItemList);
}
return true;
}
bool Player::hasShopItemForSale(uint32_t itemId, uint8_t subType) const
{
const ItemType& itemType = Item::items[itemId];
return std::any_of(shopItemList.begin(), shopItemList.end(), [&](const ShopInfo& shopInfo) {
return shopInfo.itemId == itemId && shopInfo.buyPrice != 0 && (!itemType.isFluidContainer() || shopInfo.subType == subType);
});
}
void Player::internalAddThing(Thing* thing)
{
internalAddThing(0, thing);
}
void Player::internalAddThing(uint32_t index, Thing* thing)
{
Item* item = thing->getItem();
if (!item) {
return;
}
//index == 0 means we should equip this item at the most appropiate slot (no action required here)
if (index > 0 && index < 11) {
if (inventory[index]) {
return;
}
inventory[index] = item;
item->setParent(this);
}
}
bool Player::setFollowCreature(Creature* creature)
{
if (!Creature::setFollowCreature(creature)) {
setFollowCreature(nullptr);
setAttackedCreature(nullptr);
sendCancelMessage(RETURNVALUE_THEREISNOWAY);
sendCancelTarget();
stopWalk();
return false;
}
return true;
}
bool Player::setAttackedCreature(Creature* creature)
{
if (!Creature::setAttackedCreature(creature)) {
sendCancelTarget();
return false;
}
if (chaseMode == CHASEMODE_FOLLOW && creature) {
if (followCreature != creature) {
//chase opponent
setFollowCreature(creature);
}
} else if (followCreature) {
setFollowCreature(nullptr);
}
if (creature) {
g_dispatcher.addTask(createTask(std::bind(&Game::checkCreatureAttack, &g_game, getID())));
}
return true;
}
void Player::goToFollowCreature()
{
if (!walkTask) {
if ((OTSYS_TIME() - lastFailedFollow) < 2000) {
return;
}
Creature::goToFollowCreature();
if (followCreature && !hasFollowPath) {
lastFailedFollow = OTSYS_TIME();
}
}
}
void Player::getPathSearchParams(const Creature* creature, FindPathParams& fpp) const
{
Creature::getPathSearchParams(creature, fpp);
fpp.fullPathSearch = true;
}
void Player::doAttacking(uint32_t)
{
if (lastAttack == 0) {
lastAttack = OTSYS_TIME() - getAttackSpeed() - 1;
}
if (hasCondition(CONDITION_PACIFIED)) {
return;
}
if ((OTSYS_TIME() - lastAttack) >= getAttackSpeed()) {
bool result = false;
Item* tool = getWeapon();
const Weapon* weapon = g_weapons->getWeapon(tool);
if (weapon) {
if (!weapon->interruptSwing()) {
result = weapon->useWeapon(this, tool, attackedCreature);
} else if (!canDoAction()) {
uint32_t delay = getNextActionTime();
SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::checkCreatureAttack,
&g_game, getID()));
setNextActionTask(task);
} else {
result = weapon->useWeapon(this, tool, attackedCreature);
}
} else {
result = Weapon::useFist(this, attackedCreature);
}
if (result) {
lastAttack = OTSYS_TIME();
}
}
}
uint64_t Player::getGainedExperience(Creature* attacker) const
{
if (g_config.getBoolean(ConfigManager::EXPERIENCE_FROM_PLAYERS)) {
Player* attackerPlayer = attacker->getPlayer();
if (attackerPlayer && attackerPlayer != this && skillLoss && std::abs(static_cast<int32_t>(attackerPlayer->getLevel() - level)) <= g_config.getNumber(ConfigManager::EXP_FROM_PLAYERS_LEVEL_RANGE)) {
return std::max<uint64_t>(0, std::floor(getLostExperience() * getDamageRatio(attacker) * 0.75));
}
}
return 0;
}
void Player::onFollowCreature(const Creature* creature)
{
if (!creature) {
stopWalk();
}
}
void Player::setChaseMode(chaseMode_t mode)
{
chaseMode_t prevChaseMode = chaseMode;
chaseMode = mode;
if (prevChaseMode != chaseMode) {
if (chaseMode == CHASEMODE_FOLLOW) {
if (!followCreature && attackedCreature) {
//chase opponent
setFollowCreature(attackedCreature);
}
} else if (attackedCreature) {
setFollowCreature(nullptr);
cancelNextWalk = true;
}
}
}
void Player::onWalkAborted()
{
setNextWalkActionTask(nullptr);
sendCancelWalk();
}
void Player::onWalkComplete()
{
if (walkTask) {
walkTaskEvent = g_scheduler.addEvent(walkTask);
walkTask = nullptr;
}
}
void Player::stopWalk()
{
cancelNextWalk = true;
}
void Player::getCreatureLight(LightInfo& light) const
{
if (internalLight.level > itemsLight.level) {
light = internalLight;
} else {
light = itemsLight;
}
}
void Player::updateItemsLight(bool internal /*=false*/)
{
LightInfo maxLight;
LightInfo curLight;
for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) {
Item* item = inventory[i];
if (item) {
item->getLight(curLight);
if (curLight.level > maxLight.level) {
maxLight = curLight;
}
}
}
if (itemsLight.level != maxLight.level || itemsLight.color != maxLight.color) {
itemsLight = maxLight;
if (!internal) {
g_game.changeLight(this);
}
}
}
void Player::onAddCondition(ConditionType_t type)
{
Creature::onAddCondition(type);
if (type == CONDITION_OUTFIT && isMounted()) {
dismount();
}
sendIcons();
}
void Player::onAddCombatCondition(ConditionType_t type)
{
switch (type) {
case CONDITION_POISON:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are poisoned.");
break;
case CONDITION_DROWN:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are drowning.");
break;
case CONDITION_PARALYZE:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are paralyzed.");
break;
case CONDITION_DRUNK:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are drunk.");
break;
case CONDITION_CURSED:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are cursed.");
break;
case CONDITION_FREEZING:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are freezing.");
break;
case CONDITION_DAZZLED:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are dazzled.");
break;
case CONDITION_BLEEDING:
sendTextMessage(MESSAGE_STATUS_DEFAULT, "You are bleeding.");
break;
default:
break;
}
}
void Player::onEndCondition(ConditionType_t type)
{
Creature::onEndCondition(type);
if (type == CONDITION_INFIGHT) {
onIdleStatus();
pzLocked = false;
clearAttacked();
if (getSkull() != SKULL_RED && getSkull() != SKULL_BLACK) {
setSkull(SKULL_NONE);
}
}
sendIcons();
}
void Player::onCombatRemoveCondition(Condition* condition)
{
//Creature::onCombatRemoveCondition(condition);
if (condition->getId() > 0) {
//Means the condition is from an item, id == slot
if (g_game.getWorldType() == WORLD_TYPE_PVP_ENFORCED) {
Item* item = getInventoryItem(static_cast<slots_t>(condition->getId()));
if (item) {
//25% chance to destroy the item
if (25 >= uniform_random(1, 100)) {
g_game.internalRemoveItem(item);
}
}
}
} else {
if (!canDoAction()) {
const uint32_t delay = getNextActionTime();
const int32_t ticks = delay - (delay % EVENT_CREATURE_THINK_INTERVAL);
if (ticks < 0) {
removeCondition(condition);
} else {
condition->setTicks(ticks);
}
} else {
removeCondition(condition);
}
}
}
void Player::onAttackedCreature(Creature* target)
{
Creature::onAttackedCreature(target);
if (target == this) {
addInFightTicks();
return;
}
if (hasFlag(PlayerFlag_NotGainInFight)) {
return;
}
Player* targetPlayer = target->getPlayer();
if (targetPlayer && !isPartner(targetPlayer) && !isGuildMate(targetPlayer)) {
if (!pzLocked && g_game.getWorldType() == WORLD_TYPE_PVP_ENFORCED) {
pzLocked = true;
sendIcons();
}
if (getSkull() == SKULL_NONE && getSkullClient(targetPlayer) == SKULL_YELLOW) {
addAttacked(targetPlayer);
targetPlayer->sendCreatureSkull(this);
} else if (!targetPlayer->hasAttacked(this)) {
if (!pzLocked) {
pzLocked = true;
sendIcons();
}
if (!Combat::isInPvpZone(this, targetPlayer) && !isInWar(targetPlayer)) {
addAttacked(targetPlayer);
if (targetPlayer->getSkull() == SKULL_NONE && getSkull() == SKULL_NONE) {
setSkull(SKULL_WHITE);
}
if (getSkull() == SKULL_NONE) {
targetPlayer->sendCreatureSkull(this);
}
}
}
}
addInFightTicks();
}
void Player::onAttacked()
{
Creature::onAttacked();
addInFightTicks();
}
void Player::onIdleStatus()
{
Creature::onIdleStatus();
if (party) {
party->clearPlayerPoints(this);
}
}
void Player::onPlacedCreature()
{
//scripting event - onLogin
if (!g_creatureEvents->playerLogin(this)) {
kickPlayer(true);
}
}
void Player::onAttackedCreatureDrainHealth(Creature* target, int32_t points)
{
Creature::onAttackedCreatureDrainHealth(target, points);
if (target) {
if (party && !Combat::isPlayerCombat(target)) {
Monster* tmpMonster = target->getMonster();
if (tmpMonster && tmpMonster->isHostile()) {
//We have fulfilled a requirement for shared experience
party->updatePlayerTicks(this, points);
}
}
}
}
void Player::onTargetCreatureGainHealth(Creature* target, int32_t points)
{
if (target && party) {
Player* tmpPlayer = nullptr;
if (target->getPlayer()) {
tmpPlayer = target->getPlayer();
} else if (Creature* targetMaster = target->getMaster()) {
if (Player* targetMasterPlayer = targetMaster->getPlayer()) {
tmpPlayer = targetMasterPlayer;
}
}
if (isPartner(tmpPlayer)) {
party->updatePlayerTicks(this, points);
}
}
}
bool Player::onKilledCreature(Creature* target, bool lastHit/* = true*/)
{
bool unjustified = false;
if (hasFlag(PlayerFlag_NotGenerateLoot)) {
target->setDropLoot(false);
}
Creature::onKilledCreature(target, lastHit);
if (Player* targetPlayer = target->getPlayer()) {
if (targetPlayer && targetPlayer->getZone() == ZONE_PVP) {
targetPlayer->setDropLoot(false);
targetPlayer->setLossSkill(false);
} else if (!hasFlag(PlayerFlag_NotGainInFight) && !isPartner(targetPlayer)) {
if (!Combat::isInPvpZone(this, targetPlayer) && hasAttacked(targetPlayer) && !targetPlayer->hasAttacked(this) && !isGuildMate(targetPlayer) && targetPlayer != this) {
if (targetPlayer->getSkull() == SKULL_NONE && !isInWar(targetPlayer)) {
unjustified = true;
addUnjustifiedDead(targetPlayer);
}
if (lastHit && hasCondition(CONDITION_INFIGHT)) {
pzLocked = true;
Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_INFIGHT, g_config.getNumber(ConfigManager::WHITE_SKULL_TIME), 0);
addCondition(condition);
}
}
}
}
return unjustified;
}
void Player::gainExperience(uint64_t gainExp, Creature* source)
{
if (hasFlag(PlayerFlag_NotGainExperience) || gainExp == 0 || staminaMinutes == 0) {
return;
}
addExperience(source, gainExp, true);
}
void Player::onGainExperience(uint64_t gainExp, Creature* target)
{
if (hasFlag(PlayerFlag_NotGainExperience)) {
return;
}
if (target && !target->getPlayer() && party && party->isSharedExperienceActive() && party->isSharedExperienceEnabled()) {
party->shareExperience(gainExp, target);
//We will get a share of the experience through the sharing mechanism
return;
}
Creature::onGainExperience(gainExp, target);
gainExperience(gainExp, target);
}
void Player::onGainSharedExperience(uint64_t gainExp, Creature* source)
{
gainExperience(gainExp, source);
}
bool Player::isImmune(CombatType_t type) const
{
if (hasFlag(PlayerFlag_CannotBeAttacked)) {
return true;
}
return Creature::isImmune(type);
}
bool Player::isImmune(ConditionType_t type) const
{
if (hasFlag(PlayerFlag_CannotBeAttacked)) {
return true;
}
return Creature::isImmune(type);
}
bool Player::isAttackable() const
{
return !hasFlag(PlayerFlag_CannotBeAttacked);
}
void Player::changeHealth(int32_t healthChange, bool sendHealthChange/* = true*/)
{
Creature::changeHealth(healthChange, sendHealthChange);
sendStats();
}
void Player::changeMana(int32_t manaChange)
{
if (!hasFlag(PlayerFlag_HasInfiniteMana)) {
Creature::changeMana(manaChange);
}
sendStats();
}
void Player::changeSoul(int32_t soulChange)
{
if (soulChange > 0) {
soul += std::min<int32_t>(soulChange, vocation->getSoulMax() - soul);
} else {
soul = std::max<int32_t>(0, soul + soulChange);
}
sendStats();
}
bool Player::canWear(uint32_t lookType, uint8_t addons) const
{
if (group->access) {
return true;
}
const Outfit* outfit = Outfits::getInstance()->getOutfitByLookType(sex, lookType);
if (!outfit) {
return false;
}
if (outfit->premium && !isPremium()) {
return false;
}
if (outfit->unlocked && addons == 0) {
return true;
}
for (const OutfitEntry& outfitEntry : outfits) {
if (outfitEntry.lookType != lookType) {
continue;
}
return (outfitEntry.addons & addons) == addons;
}
return false;
}
bool Player::canLogout()
{
if (isConnecting) {
return false;
}
if (getTile()->hasFlag(TILESTATE_NOLOGOUT)) {
return false;
}
if (getTile()->hasFlag(TILESTATE_PROTECTIONZONE)) {
return true;
}
return !isPzLocked() && !hasCondition(CONDITION_INFIGHT);
}
void Player::genReservedStorageRange()
{
//generate outfits range
uint32_t base_key = PSTRG_OUTFITS_RANGE_START;
for (const OutfitEntry& entry : outfits) {
storageMap[++base_key] = (entry.lookType << 16) | entry.addons;
}
}
void Player::addOutfit(uint16_t lookType, uint8_t addons)
{
for (OutfitEntry& outfitEntry : outfits) {
if (outfitEntry.lookType == lookType) {
outfitEntry.addons |= addons;
return;
}
}
outfits.emplace_back(lookType, addons);
}
bool Player::removeOutfit(uint16_t lookType)
{
for (auto it = outfits.begin(), end = outfits.end(); it != end; ++it) {
OutfitEntry& entry = *it;
if (entry.lookType == lookType) {
outfits.erase(it);
return true;
}
}
return false;
}
bool Player::removeOutfitAddon(uint16_t lookType, uint8_t addons)
{
for (OutfitEntry& outfitEntry : outfits) {
if (outfitEntry.lookType == lookType) {
outfitEntry.addons &= ~addons;
return true;
}
}
return false;
}
bool Player::getOutfitAddons(const Outfit& outfit, uint8_t& addons) const
{
if (group->access) {
addons = 3;
return true;
}
if (outfit.premium && !isPremium()) {
return false;
}
for (const OutfitEntry& outfitEntry : outfits) {
if (outfitEntry.lookType != outfit.lookType) {
continue;
}
addons = outfitEntry.addons;
return true;
}
if (!outfit.unlocked) {
return false;
}
addons = 0;
return true;
}
void Player::setSex(PlayerSex_t newSex)
{
sex = newSex;
}
Skulls_t Player::getSkull() const
{
if (hasFlag(PlayerFlag_NotGainInFight)) {
return SKULL_NONE;
}
return skull;
}
Skulls_t Player::getSkullClient(const Creature* creature) const
{
if (!creature || g_game.getWorldType() != WORLD_TYPE_PVP) {
return SKULL_NONE;
}
const Player* player = creature->getPlayer();
if (player && player->getSkull() == SKULL_NONE) {
if (isInWar(player)) {
return SKULL_GREEN;
}
if (!player->getGuildWarList().empty() && guild == player->getGuild()) {
return SKULL_GREEN;
}
if (player->hasAttacked(this)) {
return SKULL_YELLOW;
}
if (isPartner(player)) {
return SKULL_GREEN;
}
}
return Creature::getSkullClient(creature);
}
bool Player::hasAttacked(const Player* attacked) const
{
if (hasFlag(PlayerFlag_NotGainInFight) || !attacked) {
return false;
}
return attackedSet.find(attacked->guid) != attackedSet.end();
}
void Player::addAttacked(const Player* attacked)
{
if (hasFlag(PlayerFlag_NotGainInFight) || !attacked || attacked == this) {
return;
}
attackedSet.insert(attacked->guid);
}
void Player::clearAttacked()
{
attackedSet.clear();
}
void Player::addUnjustifiedDead(const Player* attacked)
{
if (hasFlag(PlayerFlag_NotGainInFight) || attacked == this || g_game.getWorldType() == WORLD_TYPE_PVP_ENFORCED) {
return;
}
sendTextMessage(MESSAGE_EVENT_ADVANCE, "Warning! The murder of " + attacked->getName() + " was not justified.");
skullTicks += g_config.getNumber(ConfigManager::FRAG_TIME);
if (getSkull() != SKULL_BLACK) {
if (g_config.getNumber(ConfigManager::KILLS_TO_BLACK) != 0 && skullTicks > (g_config.getNumber(ConfigManager::KILLS_TO_BLACK) - 1) * static_cast<int64_t>(g_config.getNumber(ConfigManager::FRAG_TIME))) {
setSkull(SKULL_BLACK);
} else if (getSkull() != SKULL_RED && g_config.getNumber(ConfigManager::KILLS_TO_RED) != 0 && skullTicks > (g_config.getNumber(ConfigManager::KILLS_TO_RED) - 1) * static_cast<int64_t>(g_config.getNumber(ConfigManager::FRAG_TIME))) {
setSkull(SKULL_RED);
}
}
}
void Player::checkSkullTicks(int32_t ticks)
{
int32_t newTicks = skullTicks - ticks;
if (newTicks < 0) {
skullTicks = 0;
} else {
skullTicks = newTicks;
}
if ((skull == SKULL_RED || skull == SKULL_BLACK) && skullTicks < 1000 && !hasCondition(CONDITION_INFIGHT)) {
setSkull(SKULL_NONE);
}
}
bool Player::isPromoted() const
{
uint16_t promotedVocation = g_vocations.getPromotedVocation(vocation->getId());
return promotedVocation == VOCATION_NONE && vocation->getId() != promotedVocation;
}
double Player::getLostPercent() const
{
int32_t blessingCount = std::bitset<5>(blessings).count();
int32_t deathLosePercent = g_config.getNumber(ConfigManager::DEATH_LOSE_PERCENT);
if (deathLosePercent != -1) {
if (isPromoted()) {
deathLosePercent -= 3;
}
deathLosePercent -= blessingCount;
return std::max<int32_t>(0, deathLosePercent) / 100.;
}
double lossPercent;
if (level >= 25) {
double tmpLevel = level + (levelPercent / 100.);
lossPercent = static_cast<double>((tmpLevel + 50) * 50 * ((tmpLevel * tmpLevel) - (5 * tmpLevel) + 8)) / experience;
} else {
lossPercent = 10;
}
if (isPromoted()) {
lossPercent *= 0.7;
}
return lossPercent * pow(0.92, blessingCount) / 100;
}
void Player::learnInstantSpell(const std::string& spellName)
{
if (!hasLearnedInstantSpell(spellName)) {
learnedInstantSpellList.push_front(spellName);
}
}
void Player::forgetInstantSpell(const std::string& spellName)
{
learnedInstantSpellList.remove(spellName);
}
bool Player::hasLearnedInstantSpell(const std::string& spellName) const
{
if (hasFlag(PlayerFlag_CannotUseSpells)) {
return false;
}
if (hasFlag(PlayerFlag_IgnoreSpellCheck)) {
return true;
}
for (const auto& learnedSpellName : learnedInstantSpellList) {
if (strcasecmp(learnedSpellName.c_str(), spellName.c_str()) == 0) {
return true;
}
}
return false;
}
bool Player::isInWar(const Player* player) const
{
if (!player || !guild) {
return false;
}
const Guild* playerGuild = player->getGuild();
if (!playerGuild) {
return false;
}
return isInWarList(playerGuild->getId()) && player->isInWarList(guild->getId());
}
bool Player::isInWarList(uint32_t guildId) const
{
return std::find(guildWarList.begin(), guildWarList.end(), guildId) != guildWarList.end();
}
bool Player::isPremium() const
{
if (g_config.getBoolean(ConfigManager::FREE_PREMIUM) || hasFlag(PlayerFlag_IsAlwaysPremium)) {
return true;
}
return premiumDays > 0;
}
void Player::setPremiumDays(int32_t v)
{
premiumDays = v;
sendBasicData();
}
PartyShields_t Player::getPartyShield(const Player* player) const
{
if (!player) {
return SHIELD_NONE;
}
if (party) {
if (party->getLeader() == player) {
if (party->isSharedExperienceActive()) {
if (party->isSharedExperienceEnabled()) {
return SHIELD_YELLOW_SHAREDEXP;
}
if (party->canUseSharedExperience(player)) {
return SHIELD_YELLOW_NOSHAREDEXP;
}
return SHIELD_YELLOW_NOSHAREDEXP_BLINK;
}
return SHIELD_YELLOW;
}
if (player->party == party) {
if (party->isSharedExperienceActive()) {
if (party->isSharedExperienceEnabled()) {
return SHIELD_BLUE_SHAREDEXP;
}
if (party->canUseSharedExperience(player)) {
return SHIELD_BLUE_NOSHAREDEXP;
}
return SHIELD_BLUE_NOSHAREDEXP_BLINK;
}
return SHIELD_BLUE;
}
if (isInviting(player)) {
return SHIELD_WHITEBLUE;
}
}
if (player->isInviting(this)) {
return SHIELD_WHITEYELLOW;
}
if (player->party) {
return SHIELD_GRAY;
}
return SHIELD_NONE;
}
bool Player::isInviting(const Player* player) const
{
if (!player || !party || party->getLeader() != this) {
return false;
}
return party->isPlayerInvited(player);
}
bool Player::isPartner(const Player* player) const
{
if (!player || !party) {
return false;
}
return party == player->party;
}
bool Player::isGuildMate(const Player* player) const
{
if (!player || !guild) {
return false;
}
return guild == player->guild;
}
void Player::sendPlayerPartyIcons(Player* player)
{
sendCreatureShield(player);
sendCreatureSkull(player);
}
bool Player::addPartyInvitation(Party* party)
{
auto it = std::find(invitePartyList.begin(), invitePartyList.end(), party);
if (it != invitePartyList.end()) {
return false;
}
invitePartyList.push_front(party);
return true;
}
void Player::removePartyInvitation(Party* party)
{
invitePartyList.remove(party);
}
void Player::clearPartyInvitations()
{
for (Party* invitingParty : invitePartyList) {
invitingParty->removeInvite(*this, false);
}
invitePartyList.clear();
}
GuildEmblems_t Player::getGuildEmblem(const Player* player) const
{
if (!player) {
return GUILDEMBLEM_NONE;
}
const Guild* playerGuild = player->getGuild();
if (!playerGuild) {
return GUILDEMBLEM_NONE;
}
if (player->getGuildWarList().empty()) {
if (guild == playerGuild) {
return GUILDEMBLEM_MEMBER;
} else {
return GUILDEMBLEM_OTHER;
}
} else if (guild == playerGuild) {
return GUILDEMBLEM_ALLY;
} else if (isInWar(player)) {
return GUILDEMBLEM_ENEMY;
}
return GUILDEMBLEM_NEUTRAL;
}
uint8_t Player::getCurrentMount() const
{
int32_t value;
if (getStorageValue(PSTRG_MOUNTS_CURRENTMOUNT, value)) {
return value;
}
return 0;
}
void Player::setCurrentMount(uint8_t mount)
{
addStorageValue(PSTRG_MOUNTS_CURRENTMOUNT, mount);
}
bool Player::toggleMount(bool mount)
{
if ((OTSYS_TIME() - lastToggleMount) < 3000 && !wasMounted) {
sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED);
return false;
}
if (mount) {
if (isMounted()) {
return false;
}
if (!group->access && _tile->hasFlag(TILESTATE_PROTECTIONZONE)) {
sendCancelMessage(RETURNVALUE_ACTIONNOTPERMITTEDINPROTECTIONZONE);
return false;
}
const Outfit* playerOutfit = Outfits::getInstance()->getOutfitByLookType(getSex(), defaultOutfit.lookType);
if (!playerOutfit) {
return false;
}
uint8_t currentMountId = getCurrentMount();
if (currentMountId == 0) {
sendOutfitWindow();
return false;
}
Mount* currentMount = g_game.mounts.getMountByID(currentMountId);
if (!currentMount) {
return false;
}
if (!hasMount(currentMount)) {
setCurrentMount(0);
sendOutfitWindow();
return false;
}
if (currentMount->premium && !isPremium()) {
sendCancelMessage(RETURNVALUE_YOUNEEDPREMIUMACCOUNT);
return false;
}
if (hasCondition(CONDITION_OUTFIT)) {
sendCancelMessage(RETURNVALUE_NOTPOSSIBLE);
return false;
}
defaultOutfit.lookMount = currentMount->clientId;
if (currentMount->speed != 0) {
g_game.changeSpeed(this, currentMount->speed);
}
} else {
if (!isMounted()) {
return false;
}
dismount();
}
g_game.internalCreatureChangeOutfit(this, defaultOutfit);
lastToggleMount = OTSYS_TIME();
return true;
}
bool Player::tameMount(uint8_t mountId)
{
if (!g_game.mounts.getMountByID(mountId)) {
return false;
}
const uint8_t tmpMountId = mountId - 1;
const uint32_t key = PSTRG_MOUNTS_RANGE_START + (tmpMountId / 31);
int32_t value;
if (getStorageValue(key, value)) {
value |= (1 << (tmpMountId % 31));
} else {
value = (1 << (tmpMountId % 31));
}
addStorageValue(key, value);
return true;
}
bool Player::untameMount(uint8_t mountId)
{
if (!g_game.mounts.getMountByID(mountId)) {
return false;
}
const uint8_t tmpMountId = mountId - 1;
const uint32_t key = PSTRG_MOUNTS_RANGE_START + (tmpMountId / 31);
int32_t value;
if (!getStorageValue(key, value)) {
return true;
}
value &= ~(1 << (tmpMountId % 31));
addStorageValue(key, value);
if (getCurrentMount() == mountId) {
if (isMounted()) {
dismount();
g_game.internalCreatureChangeOutfit(this, defaultOutfit);
}
setCurrentMount(0);
}
return true;
}
bool Player::hasMount(const Mount* mount) const
{
if (isAccessPlayer()) {
return true;
}
if (mount->premium && !isPremium()) {
return false;
}
const uint8_t tmpMountId = mount->id - 1;
int32_t value;
if (!getStorageValue(PSTRG_MOUNTS_RANGE_START + (tmpMountId / 31), value)) {
return false;
}
return ((1 << (tmpMountId % 31)) & value) != 0;
}
void Player::dismount()
{
Mount* mount = g_game.mounts.getMountByID(getCurrentMount());
if (mount && mount->speed > 0) {
g_game.changeSpeed(this, -mount->speed);
}
defaultOutfit.lookMount = 0;
}
bool Player::addOfflineTrainingTries(skills_t skill, uint64_t tries)
{
if (tries == 0 || skill == SKILL_LEVEL) {
return false;
}
bool sendUpdate = false;
uint32_t oldSkillValue, newSkillValue;
long double oldPercentToNextLevel, newPercentToNextLevel;
if (skill == SKILL_MAGLEVEL) {
uint64_t currReqMana = vocation->getReqMana(magLevel);
uint64_t nextReqMana = vocation->getReqMana(magLevel + 1);
if (currReqMana >= nextReqMana) {
return false;
}
oldSkillValue = magLevel;
oldPercentToNextLevel = static_cast<long double>(manaSpent * 100) / nextReqMana;
g_events->eventPlayerOnGainSkillTries(this, SKILL_MAGLEVEL, tries);
uint32_t currMagLevel = magLevel;
while ((manaSpent + tries) >= nextReqMana) {
tries -= nextReqMana - manaSpent;
magLevel++;
manaSpent = 0;
g_creatureEvents->playerAdvance(this, SKILL_MAGLEVEL, magLevel - 1, magLevel);
sendUpdate = true;
currReqMana = nextReqMana;
nextReqMana = vocation->getReqMana(magLevel + 1);
if (currReqMana >= nextReqMana) {
tries = 0;
break;
}
}
manaSpent += tries;
if (magLevel != currMagLevel) {
std::ostringstream ss;
ss << "You advanced to magic level " << magLevel << '.';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
}
uint8_t newPercent;
if (nextReqMana > currReqMana) {
newPercent = Player::getPercentLevel(manaSpent, nextReqMana);
newPercentToNextLevel = static_cast<long double>(manaSpent * 100) / nextReqMana;
} else {
newPercent = 0;
newPercentToNextLevel = 0;
}
if (newPercent != magLevelPercent) {
magLevelPercent = newPercent;
sendUpdate = true;
}
newSkillValue = magLevel;
} else {
uint64_t currReqTries = vocation->getReqSkillTries(skill, skills[skill].level);
uint64_t nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1);
if (currReqTries >= nextReqTries) {
return false;
}
oldSkillValue = skills[skill].level;
oldPercentToNextLevel = static_cast<long double>(skills[skill].tries * 100) / nextReqTries;
g_events->eventPlayerOnGainSkillTries(this, skill, tries);
uint32_t currSkillLevel = skills[skill].level;
while ((skills[skill].tries + tries) >= nextReqTries) {
tries -= nextReqTries - skills[skill].tries;
skills[skill].level++;
skills[skill].tries = 0;
skills[skill].percent = 0;
g_creatureEvents->playerAdvance(this, skill, (skills[skill].level - 1), skills[skill].level);
sendUpdate = true;
currReqTries = nextReqTries;
nextReqTries = vocation->getReqSkillTries(skill, skills[skill].level + 1);
if (currReqTries >= nextReqTries) {
tries = 0;
break;
}
}
skills[skill].tries += tries;
if (currSkillLevel != skills[skill].level) {
std::ostringstream ss;
ss << "You advanced to " << getSkillName(skill) << " level " << skills[skill].level << '.';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
}
uint8_t newPercent;
if (nextReqTries > currReqTries) {
newPercent = Player::getPercentLevel(skills[skill].tries, nextReqTries);
newPercentToNextLevel = static_cast<long double>(skills[skill].tries * 100) / nextReqTries;
} else {
newPercent = 0;
newPercentToNextLevel = 0;
}
if (skills[skill].percent != newPercent) {
skills[skill].percent = newPercent;
sendUpdate = true;
}
newSkillValue = skills[skill].level;
}
if (sendUpdate) {
sendSkills();
}
std::ostringstream ss;
ss << std::fixed << std::setprecision(2) << "Your " << ucwords(getSkillName(skill)) << " skill changed from level " << oldSkillValue << " (with " << oldPercentToNextLevel << "% progress towards level " << (oldSkillValue + 1) << ") to level " << newSkillValue << " (with " << newPercentToNextLevel << "% progress towards level " << (newSkillValue + 1) << ')';
sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str());
return sendUpdate;
}
bool Player::hasModalWindowOpen(uint32_t modalWindowId) const
{
return find(modalWindows.begin(), modalWindows.end(), modalWindowId) != modalWindows.end();
}
void Player::onModalWindowHandled(uint32_t modalWindowId)
{
modalWindows.remove(modalWindowId);
}
void Player::sendModalWindow(const ModalWindow& modalWindow)
{
if (!client) {
return;
}
modalWindows.push_front(modalWindow.id);
client->sendModalWindow(modalWindow);
}
void Player::clearModalWindows()
{
modalWindows.clear();
}
uint16_t Player::getHelpers() const
{
uint16_t helpers;
if (guild && party) {
std::unordered_set<Player*> helperSet;
const auto& guildMembers = guild->getMembersOnline();
helperSet.insert(guildMembers.begin(), guildMembers.end());
const auto& partyMembers = party->getMembers();
helperSet.insert(partyMembers.begin(), partyMembers.end());
const auto& partyInvitees = party->getInvitees();
helperSet.insert(partyInvitees.begin(), partyInvitees.end());
helperSet.insert(party->getLeader());
helpers = helperSet.size();
} else if (guild) {
helpers = guild->getMembersOnline().size();
} else if (party) {
helpers = party->getMemberCount() + party->getInvitationCount() + 1;
} else {
helpers = 0;
}
return helpers;
}
void Player::sendClosePrivate(uint16_t channelId)
{
if (channelId == CHANNEL_GUILD || channelId == CHANNEL_PARTY) {
g_chat->removeUserFromChannel(*this, channelId);
}
if (client) {
client->sendClosePrivate(channelId);
}
}
uint64_t Player::getMoney() const
{
std::vector<const Container*> containers;
uint64_t moneyCount = 0;
for (int32_t i = CONST_SLOT_FIRST; i <= CONST_SLOT_LAST; ++i) {
Item* item = inventory[i];
if (!item) {
continue;
}
const Container* container = item->getContainer();
if (container) {
containers.push_back(container);
} else {
moneyCount += item->getWorth();
}
}
size_t i = 0;
while (i < containers.size()) {
const Container* container = containers[i++];
for (const Item* item : container->getItemList()) {
const Container* tmpContainer = item->getContainer();
if (tmpContainer) {
containers.push_back(tmpContainer);
} else {
moneyCount += item->getWorth();
}
}
}
return moneyCount;
}
size_t Player::getMaxVIPEntries() const
{
if (group->maxVipEntries != 0) {
return group->maxVipEntries;
} else if (isPremium()) {
return 100;
}
return 20;
}
size_t Player::getMaxDepotItems() const
{
if (group->maxDepotItems != 0) {
return group->maxDepotItems;
} else if (isPremium()) {
return 2000;
}
return 1000;
}
std::forward_list<Condition*> Player::getMuteConditions() const
{
std::forward_list<Condition*> muteConditions;
for (Condition* condition : conditions) {
if (condition->getTicks() <= 0) {
continue;
}
ConditionType_t type = condition->getType();
if (type != CONDITION_MUTED && type != CONDITION_CHANNELMUTEDTICKS && type != CONDITION_YELLTICKS) {
continue;
}
muteConditions.push_front(condition);
}
return muteConditions;
}
void Player::setGuild(Guild* guild)
{
if (guild == this->guild) {
return;
}
Guild* oldGuild = this->guild;
this->guildNick.clear();
this->guild = nullptr;
this->guildLevel = 0;
if (guild) {
const GuildRank* rank = guild->getRankByLevel(1);
if (!rank) {
return;
}
this->guild = guild;
this->guildLevel = 1;
guild->addMember(this);
}
if (oldGuild) {
oldGuild->removeMember(this);
}
}
| 1 | 12,377 | Trailing tab, remove it in another PR. | otland-forgottenserver | cpp |
@@ -156,6 +156,9 @@ func (w *taskWriter) allocTaskIDs(count int) ([]int64, error) {
if w.taskIDBlock.start > w.taskIDBlock.end {
// we ran out of current allocation block
newBlock, err := w.tlMgr.allocTaskIDBlock(w.taskIDBlock.end)
+ if err != nil && w.tlMgr.errShouldUnload(err) {
+ w.tlMgr.signalFatalProblem(w.tlMgr.taskQueueID)
+ }
if err != nil {
return nil, err
} | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package matching
import (
"sync/atomic"
commonpb "go.temporal.io/api/common/v1"
"go.temporal.io/api/serviceerror"
persistencespb "go.temporal.io/server/api/persistence/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/persistence"
)
type (
writeTaskResponse struct {
err error
persistenceResponse *persistence.CreateTasksResponse
}
writeTaskRequest struct {
execution *commonpb.WorkflowExecution
taskInfo *persistencespb.TaskInfo
responseCh chan<- *writeTaskResponse
}
taskIDBlock struct {
start int64
end int64
}
// taskWriter writes tasks sequentially to persistence
taskWriter struct {
status int32
tlMgr *taskQueueManagerImpl
config *taskQueueConfig
taskQueueID *taskQueueID
appendCh chan *writeTaskRequest
taskIDBlock taskIDBlock
maxReadLevel int64
logger log.Logger
shutdownChan chan struct{}
}
)
// errShutdown indicates that the task queue is shutting down
var errShutdown = &persistence.ConditionFailedError{Msg: "task queue shutting down"}
func newTaskWriter(
tlMgr *taskQueueManagerImpl,
) *taskWriter {
return &taskWriter{
status: common.DaemonStatusInitialized,
tlMgr: tlMgr,
config: tlMgr.config,
taskQueueID: tlMgr.taskQueueID,
appendCh: make(chan *writeTaskRequest, tlMgr.config.OutstandingTaskAppendsThreshold()),
logger: tlMgr.logger,
shutdownChan: make(chan struct{}),
}
}
func (w *taskWriter) Start(initialBlock taskIDBlock) {
if !atomic.CompareAndSwapInt32(
&w.status,
common.DaemonStatusInitialized,
common.DaemonStatusStarted,
) {
return
}
w.taskIDBlock = initialBlock
w.maxReadLevel = initialBlock.start - 1
go w.taskWriterLoop()
}
// Stop stops the taskWriter
func (w *taskWriter) Stop() {
if !atomic.CompareAndSwapInt32(
&w.status,
common.DaemonStatusStarted,
common.DaemonStatusStopped,
) {
return
}
close(w.shutdownChan)
}
func (w *taskWriter) appendTask(
execution *commonpb.WorkflowExecution,
taskInfo *persistencespb.TaskInfo,
) (*persistence.CreateTasksResponse, error) {
select {
case <-w.shutdownChan:
return nil, errShutdown
default:
// noop
}
ch := make(chan *writeTaskResponse)
req := &writeTaskRequest{
execution: execution,
taskInfo: taskInfo,
responseCh: ch,
}
select {
case w.appendCh <- req:
select {
case r := <-ch:
return r.persistenceResponse, r.err
case <-w.shutdownChan:
// if we are shutting down, this request will never make
// it to cassandra, just bail out and fail this request
return nil, errShutdown
}
default: // channel is full, throttle
return nil, serviceerror.NewResourceExhausted("Too many outstanding appends to the TaskQueue")
}
}
func (w *taskWriter) GetMaxReadLevel() int64 {
return atomic.LoadInt64(&w.maxReadLevel)
}
func (w *taskWriter) allocTaskIDs(count int) ([]int64, error) {
result := make([]int64, count)
for i := 0; i < count; i++ {
if w.taskIDBlock.start > w.taskIDBlock.end {
// we ran out of current allocation block
newBlock, err := w.tlMgr.allocTaskIDBlock(w.taskIDBlock.end)
if err != nil {
return nil, err
}
w.taskIDBlock = newBlock
}
result[i] = w.taskIDBlock.start
w.taskIDBlock.start++
}
return result, nil
}
func (w *taskWriter) appendTasks(
tasks []*persistencespb.AllocatedTaskInfo,
) (*persistence.CreateTasksResponse, error) {
resp, err := w.tlMgr.db.CreateTasks(tasks)
switch err.(type) {
case nil:
return resp, nil
case *persistence.ConditionFailedError:
w.tlMgr.Stop()
return nil, err
default:
w.logger.Error("Persistent store operation failure",
tag.StoreOperationCreateTask,
tag.Error(err),
tag.WorkflowTaskQueueName(w.taskQueueID.name),
tag.WorkflowTaskQueueType(w.taskQueueID.taskType),
)
return nil, err
}
}
func (w *taskWriter) taskWriterLoop() {
writerLoop:
for {
select {
case request := <-w.appendCh:
// read a batch of requests from the channel
reqs := []*writeTaskRequest{request}
reqs = w.getWriteBatch(reqs)
batchSize := len(reqs)
maxReadLevel := int64(0)
taskIDs, err := w.allocTaskIDs(batchSize)
if err != nil {
w.sendWriteResponse(reqs, nil, err)
continue writerLoop
}
var tasks []*persistencespb.AllocatedTaskInfo
for i, req := range reqs {
tasks = append(tasks, &persistencespb.AllocatedTaskInfo{
TaskId: taskIDs[i],
Data: req.taskInfo,
})
maxReadLevel = taskIDs[i]
}
resp, err := w.appendTasks(tasks)
w.sendWriteResponse(reqs, resp, err)
// Update the maxReadLevel after the writes are completed.
if maxReadLevel > 0 {
atomic.StoreInt64(&w.maxReadLevel, maxReadLevel)
}
case <-w.shutdownChan:
return
}
}
}
func (w *taskWriter) getWriteBatch(reqs []*writeTaskRequest) []*writeTaskRequest {
readLoop:
for i := 0; i < w.config.MaxTaskBatchSize(); i++ {
select {
case req := <-w.appendCh:
reqs = append(reqs, req)
default: // channel is empty, don't block
break readLoop
}
}
return reqs
}
func (w *taskWriter) sendWriteResponse(
reqs []*writeTaskRequest,
persistenceResponse *persistence.CreateTasksResponse,
err error,
) {
for _, req := range reqs {
resp := &writeTaskResponse{
err: err,
persistenceResponse: persistenceResponse,
}
req.responseCh <- resp
}
}
| 1 | 12,287 | what about merging the error checking logic to within `errShouldUnload`? | temporalio-temporal | go |
@@ -0,0 +1,6 @@
+class ArticlesController < ApplicationController
+ def index
+ @topic = Topic.find_by_slug(params[:id])
+ @articles = @topic.articles.by_published
+ end
+end | 1 | 1 | 6,444 | Probably want Topic.find_by_slug! to handle bogus topics. The next line will try to load articles off of nil otherwise. | thoughtbot-upcase | rb |
|
@@ -34,6 +34,11 @@ public:
static constexpr std::int64_t column_count = 2;
static constexpr std::int64_t element_count = row_count * column_count;
+ bool not_available_on_device() {
+ constexpr bool is_svd = std::is_same_v<Method, pca::method::svd>;
+ return get_policy().is_gpu() && is_svd;
+ }
+
auto get_descriptor() const {
return pca::descriptor<float, Method, pca::task::dim_reduction>{};
} | 1 | /*******************************************************************************
* Copyright 2020-2021 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <array>
#include "oneapi/dal/algo/pca/infer.hpp"
#include "oneapi/dal/algo/pca/train.hpp"
#include "oneapi/dal/table/row_accessor.hpp"
#include "oneapi/dal/test/engine/common.hpp"
#include "oneapi/dal/test/engine/fixtures.hpp"
namespace oneapi::dal::pca::test {
namespace te = dal::test::engine;
template <typename Method>
class pca_badarg_test : public te::algo_fixture {
public:
static constexpr std::int64_t row_count = 8;
static constexpr std::int64_t column_count = 2;
static constexpr std::int64_t element_count = row_count * column_count;
auto get_descriptor() const {
return pca::descriptor<float, Method, pca::task::dim_reduction>{};
}
table get_train_data(std::int64_t override_row_count = row_count,
std::int64_t override_column_count = column_count) const {
ONEDAL_ASSERT(override_row_count * override_column_count <= element_count);
return homogen_table::wrap(train_data_.data(), override_row_count, override_column_count);
}
table get_infer_data(std::int64_t override_row_count = row_count,
std::int64_t override_column_count = column_count) const {
ONEDAL_ASSERT(override_row_count * override_column_count <= element_count);
return homogen_table::wrap(infer_data_.data(), override_row_count, override_column_count);
}
private:
static constexpr std::array<float, element_count> train_data_ = {
1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, -1.0, -1.0, -1.0, -2.0, -2.0, -1.0, -2.0, -2.0
};
static constexpr std::array<float, element_count> infer_data_ = {
1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, -1.0, -1.0, -1.0, -2.0, -2.0, -1.0, -2.0, -2.0
};
};
#define PCA_BADARG_TEST(name) \
TEMPLATE_TEST_M(pca_badarg_test, name, "[pca][badarg]", pca::method::cov, pca::method::svd)
PCA_BADARG_TEST("accepts non-negative component_count") {
REQUIRE_NOTHROW(this->get_descriptor().set_component_count(0));
}
PCA_BADARG_TEST("throws if component_count is negative") {
REQUIRE_THROWS_AS(this->get_descriptor().set_component_count(-1), domain_error);
}
PCA_BADARG_TEST("throws if train data is empty") {
const auto pca_desc = this->get_descriptor().set_component_count(2);
REQUIRE_THROWS_AS(this->train(pca_desc, homogen_table{}), domain_error);
}
PCA_BADARG_TEST("throws if train data columns less than component count") {
const auto pca_desc = this->get_descriptor().set_component_count(4);
REQUIRE_THROWS_AS(this->train(pca_desc, this->get_train_data()), invalid_argument);
}
PCA_BADARG_TEST("throws if infer data is empty") {
const auto pca_desc = this->get_descriptor().set_component_count(2);
const auto model = this->train(pca_desc, this->get_train_data()).get_model();
REQUIRE_THROWS_AS(this->infer(pca_desc, model, homogen_table{}), domain_error);
}
PCA_BADARG_TEST("throws if component count neq eigenvector_rows") {
auto pca_desc = this->get_descriptor().set_component_count(2);
const auto model = this->train(pca_desc, this->get_train_data()).get_model();
pca_desc.set_component_count(4);
REQUIRE_THROWS_AS(this->infer(pca_desc, model, this->get_infer_data()), invalid_argument);
}
PCA_BADARG_TEST("throws if infer data column count neq eigenvector columns") {
const auto pca_desc = this->get_descriptor().set_component_count(2);
const auto model = this->train(pca_desc, this->get_train_data()).get_model();
const auto infer_data = this->get_infer_data(4, 4);
REQUIRE_THROWS_AS(this->infer(pca_desc, model, infer_data), invalid_argument);
}
} // namespace oneapi::dal::pca::test
| 1 | 27,196 | Should it really be done on test side? | oneapi-src-oneDAL | cpp |
@@ -350,7 +350,8 @@ func (mtask *managedTask) steadyState() bool {
}
}
-// cleanupCredentials removes credentials for a stopped task
+// cleanupCredentials removes credentials for a stopped task (execution credentials are removed in cleanupTask
+// due to its potential usage in the later phase of the task cleanup such as sending logs)
func (mtask *managedTask) cleanupCredentials() {
taskCredentialsID := mtask.GetCredentialsID()
if taskCredentialsID != "" { | 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package engine
import (
"context"
"fmt"
"io"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/engine/execcmd"
"github.com/aws/amazon-ecs-agent/agent/api"
apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container"
apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status"
apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors"
apitask "github.com/aws/amazon-ecs-agent/agent/api/task"
apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi"
"github.com/aws/amazon-ecs-agent/agent/ecscni"
"github.com/aws/amazon-ecs-agent/agent/engine/dependencygraph"
"github.com/aws/amazon-ecs-agent/agent/eventstream"
"github.com/aws/amazon-ecs-agent/agent/logger"
"github.com/aws/amazon-ecs-agent/agent/logger/field"
"github.com/aws/amazon-ecs-agent/agent/statechange"
"github.com/aws/amazon-ecs-agent/agent/taskresource"
resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status"
"github.com/aws/amazon-ecs-agent/agent/utils/retry"
utilsync "github.com/aws/amazon-ecs-agent/agent/utils/sync"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
)
const (
// waitForPullCredentialsTimeout is the timeout agent trying to wait for pull
// credentials from acs, after the timeout it will check the credentials manager
// and start processing the task or start another round of waiting
waitForPullCredentialsTimeout = 1 * time.Minute
systemPingTimeout = 5 * time.Second
defaultTaskSteadyStatePollInterval = 5 * time.Minute
defaultTaskSteadyStatePollIntervalJitter = 30 * time.Second
transitionPollTime = 5 * time.Second
stoppedSentWaitInterval = 30 * time.Second
maxStoppedWaitTimes = 72 * time.Hour / stoppedSentWaitInterval
taskUnableToTransitionToStoppedReason = "TaskStateError: Agent could not progress task's state to stopped"
)
var (
_stoppedSentWaitInterval = stoppedSentWaitInterval
_maxStoppedWaitTimes = int(maxStoppedWaitTimes)
)
type dockerContainerChange struct {
container *apicontainer.Container
event dockerapi.DockerContainerChangeEvent
}
// resourceStateChange represents the required status change after resource transition
type resourceStateChange struct {
resource taskresource.TaskResource
nextState resourcestatus.ResourceStatus
err error
}
type acsTransition struct {
seqnum int64
desiredStatus apitaskstatus.TaskStatus
}
// containerTransition defines the struct for a container to transition
type containerTransition struct {
nextState apicontainerstatus.ContainerStatus
actionRequired bool
blockedOn *apicontainer.DependsOn
reason error
}
// resourceTransition defines the struct for a resource to transition.
type resourceTransition struct {
// nextState represents the next known status that the resource can move to
nextState resourcestatus.ResourceStatus
// status is the string value of nextState
status string
// actionRequired indicates if the transition function needs to be called for
// the transition to be complete
actionRequired bool
// reason represents the error blocks transition
reason error
}
// managedTask is a type that is meant to manage the lifecycle of a task.
// There should be only one managed task construct for a given task arn and the
// managed task should be the only thing to modify the task's known or desired statuses.
//
// The managedTask should run serially in a single goroutine in which it reads
// messages from the two given channels and acts upon them.
// This design is chosen to allow a safe level if isolation and avoid any race
// conditions around the state of a task.
// The data sources (e.g. docker, acs) that write to the task's channels may
// block and it is expected that the managedTask listen to those channels
// almost constantly.
// The general operation should be:
// 1) Listen to the channels
// 2) On an event, update the status of the task and containers (known/desired)
// 3) Figure out if any action needs to be done. If so, do it
// 4) GOTO 1
// Item '3' obviously might lead to some duration where you are not listening
// to the channels. However, this can be solved by kicking off '3' as a
// goroutine and then only communicating the result back via the channels
// (obviously once you kick off a goroutine you give up the right to write the
// task's statuses yourself)
type managedTask struct {
*apitask.Task
ctx context.Context
cancel context.CancelFunc
engine *DockerTaskEngine
cfg *config.Config
credentialsManager credentials.Manager
cniClient ecscni.CNIClient
dockerClient dockerapi.DockerClient
taskStopWG *utilsync.SequentialWaitGroup
acsMessages chan acsTransition
dockerMessages chan dockerContainerChange
resourceStateChangeEvent chan resourceStateChange
stateChangeEvents chan statechange.Event
containerChangeEventStream *eventstream.EventStream
// unexpectedStart is a once that controls stopping a container that
// unexpectedly started one time.
// This exists because a 'start' after a container is meant to be stopped is
// possible under some circumstances (e.g. a timeout). However, if it
// continues to 'start' when we aren't asking it to, let it go through in
// case it's a user trying to debug it or in case we're fighting with another
// thing managing the container.
unexpectedStart sync.Once
_time ttime.Time
_timeOnce sync.Once
// steadyStatePollInterval is the duration that a managed task waits
// once the task gets into steady state before polling the state of all of
// the task's containers to re-evaluate if the task is still in steady state
// This is set to defaultTaskSteadyStatePollInterval in production code.
// This can be used by tests that are looking to ensure that the steady state
// verification logic gets executed to set it to a low interval
steadyStatePollInterval time.Duration
steadyStatePollIntervalJitter time.Duration
}
// newManagedTask is a method on DockerTaskEngine to create a new managedTask.
// This method must only be called when the engine.processTasks write lock is
// already held.
func (engine *DockerTaskEngine) newManagedTask(task *apitask.Task) *managedTask {
ctx, cancel := context.WithCancel(engine.ctx)
t := &managedTask{
ctx: ctx,
cancel: cancel,
Task: task,
acsMessages: make(chan acsTransition),
dockerMessages: make(chan dockerContainerChange),
resourceStateChangeEvent: make(chan resourceStateChange),
engine: engine,
cfg: engine.cfg,
stateChangeEvents: engine.stateChangeEvents,
containerChangeEventStream: engine.containerChangeEventStream,
credentialsManager: engine.credentialsManager,
cniClient: engine.cniClient,
dockerClient: engine.client,
taskStopWG: engine.taskStopGroup,
steadyStatePollInterval: engine.taskSteadyStatePollInterval,
steadyStatePollIntervalJitter: engine.taskSteadyStatePollIntervalJitter,
}
engine.managedTasks[task.Arn] = t
return t
}
// overseeTask is the main goroutine of the managedTask. It runs an infinite
// loop of receiving messages and attempting to take action based on those
// messages.
func (mtask *managedTask) overseeTask() {
// Do a single updatestatus at the beginning to create the container
// `desiredstatus`es which are a construct of the engine used only here,
// not present on the backend
mtask.UpdateStatus()
// If this was a 'state restore', send all unsent statuses
mtask.emitCurrentStatus()
// Wait for host resources required by this task to become available
mtask.waitForHostResources()
// Main infinite loop. This is where we receive messages and dispatch work.
for {
if mtask.shouldExit() {
return
}
// If it's steadyState, just spin until we need to do work
for mtask.steadyState() {
mtask.waitSteady()
}
if mtask.shouldExit() {
return
}
if !mtask.GetKnownStatus().Terminal() {
// If we aren't terminal and we aren't steady state, we should be
// able to move some containers along.
logger.Debug("Task not steady state or terminal; progressing it", logger.Fields{
field.TaskARN: mtask.Arn,
})
mtask.progressTask()
}
if mtask.GetKnownStatus().Terminal() {
break
}
}
// We only break out of the above if this task is known to be stopped. Do
// onetime cleanup here, including removing the task after a timeout
logger.Info("Managed task has reached stopped; waiting for container cleanup", logger.Fields{
field.TaskARN: mtask.Arn,
})
mtask.engine.checkTearDownPauseContainer(mtask.Task)
mtask.cleanupCredentials()
if mtask.StopSequenceNumber != 0 {
logger.Debug("Marking done for this sequence", logger.Fields{
field.TaskARN: mtask.Arn,
field.Sequence: mtask.StopSequenceNumber,
})
mtask.taskStopWG.Done(mtask.StopSequenceNumber)
}
// TODO: make this idempotent on agent restart
go mtask.releaseIPInIPAM()
mtask.cleanupTask(retry.AddJitter(mtask.cfg.TaskCleanupWaitDuration, mtask.cfg.TaskCleanupWaitDurationJitter))
}
// shouldExit checks if the task manager should exit, as the agent is exiting.
func (mtask *managedTask) shouldExit() bool {
select {
case <-mtask.ctx.Done():
return true
default:
return false
}
}
// emitCurrentStatus emits a container event for every container and a task
// event for the task
func (mtask *managedTask) emitCurrentStatus() {
for _, container := range mtask.Containers {
mtask.emitContainerEvent(mtask.Task, container, "")
}
mtask.emitTaskEvent(mtask.Task, "")
}
// waitForHostResources waits for host resources to become available to start
// the task. This involves waiting for previous stops to complete so the
// resources become free.
func (mtask *managedTask) waitForHostResources() {
if mtask.StartSequenceNumber == 0 {
// This is the first transition on this host. No need to wait
return
}
if mtask.GetDesiredStatus().Terminal() {
// Task's desired status is STOPPED. No need to wait in this case either
return
}
logger.Info("Waiting for any previous stops to complete", logger.Fields{
field.TaskARN: mtask.Arn,
field.Sequence: mtask.StartSequenceNumber,
})
othersStoppedCtx, cancel := context.WithCancel(mtask.ctx)
defer cancel()
go func() {
mtask.taskStopWG.Wait(mtask.StartSequenceNumber)
cancel()
}()
for !mtask.waitEvent(othersStoppedCtx.Done()) {
if mtask.GetDesiredStatus().Terminal() {
// If we end up here, that means we received a start then stop for this
// task before a task that was expected to stop before it could
// actually stop
break
}
}
logger.Info("Wait over; ready to move towards desired status", logger.Fields{
field.TaskARN: mtask.Arn,
field.DesiredStatus: mtask.GetDesiredStatus().String(),
})
}
// waitSteady waits for a task to leave steady-state by waiting for a new
// event, or a timeout.
func (mtask *managedTask) waitSteady() {
logger.Info("Managed task at steady state", logger.Fields{
field.TaskARN: mtask.Arn,
field.KnownStatus: mtask.GetKnownStatus().String(),
})
timeoutCtx, cancel := context.WithTimeout(mtask.ctx, retry.AddJitter(mtask.steadyStatePollInterval, mtask.steadyStatePollIntervalJitter))
defer cancel()
timedOut := mtask.waitEvent(timeoutCtx.Done())
if mtask.shouldExit() {
return
}
if timedOut {
logger.Debug("Checking to verify it's still at steady state", logger.Fields{
field.TaskARN: mtask.Arn,
})
go mtask.engine.checkTaskState(mtask.Task)
}
}
// steadyState returns if the task is in a steady state. Steady state is when task's desired
// and known status are both RUNNING
func (mtask *managedTask) steadyState() bool {
select {
case <-mtask.ctx.Done():
logger.Info("Task manager exiting", logger.Fields{
field.TaskARN: mtask.Arn,
})
return false
default:
taskKnownStatus := mtask.GetKnownStatus()
return taskKnownStatus == apitaskstatus.TaskRunning && taskKnownStatus >= mtask.GetDesiredStatus()
}
}
// cleanupCredentials removes credentials for a stopped task
func (mtask *managedTask) cleanupCredentials() {
taskCredentialsID := mtask.GetCredentialsID()
if taskCredentialsID != "" {
mtask.credentialsManager.RemoveCredentials(taskCredentialsID)
}
}
// waitEvent waits for any event to occur. If an event occurs, the appropriate
// handler is called. Generally the stopWaiting arg is the context's Done
// channel. When the Done channel is signalled by the context, waitEvent will
// return true.
func (mtask *managedTask) waitEvent(stopWaiting <-chan struct{}) bool {
logger.Debug("Waiting for task event", logger.Fields{
field.TaskARN: mtask.Arn,
})
select {
case acsTransition := <-mtask.acsMessages:
logger.Info("Managed task got acs event", logger.Fields{
field.TaskARN: mtask.Arn,
})
mtask.handleDesiredStatusChange(acsTransition.desiredStatus, acsTransition.seqnum)
return false
case dockerChange := <-mtask.dockerMessages:
mtask.handleContainerChange(dockerChange)
return false
case resChange := <-mtask.resourceStateChangeEvent:
res := resChange.resource
logger.Info("Managed task got resource", logger.Fields{
field.TaskARN: mtask.Arn,
field.Resource: res.GetName(),
field.Status: res.StatusString(resChange.nextState),
})
mtask.handleResourceStateChange(resChange)
return false
case <-stopWaiting:
return true
}
}
// handleDesiredStatusChange updates the desired status on the task. Updates
// only occur if the new desired status is "compatible" (farther along than the
// current desired state); "redundant" (less-than or equal desired states) are
// ignored and dropped.
func (mtask *managedTask) handleDesiredStatusChange(desiredStatus apitaskstatus.TaskStatus, seqnum int64) {
// Handle acs message changes this task's desired status to whatever
// acs says it should be if it is compatible
logger.Info("New acs transition", logger.Fields{
field.TaskARN: mtask.Arn,
field.DesiredStatus: desiredStatus.String(),
field.Sequence: seqnum,
"StopNumber": mtask.StopSequenceNumber,
})
if desiredStatus <= mtask.GetDesiredStatus() {
logger.Info("Redundant task transition; ignoring", logger.Fields{
field.TaskARN: mtask.Arn,
field.DesiredStatus: desiredStatus.String(),
field.Sequence: seqnum,
"StopNumber": mtask.StopSequenceNumber,
})
return
}
if desiredStatus == apitaskstatus.TaskStopped && seqnum != 0 && mtask.GetStopSequenceNumber() == 0 {
logger.Info("Managed task moving to stopped, adding to stopgroup with sequence number",
logger.Fields{
field.TaskARN: mtask.Arn,
field.Sequence: seqnum,
})
mtask.SetStopSequenceNumber(seqnum)
mtask.taskStopWG.Add(seqnum, 1)
}
mtask.SetDesiredStatus(desiredStatus)
mtask.UpdateDesiredStatus()
mtask.engine.saveTaskData(mtask.Task)
}
// handleContainerChange updates a container's known status. If the message
// contains any interesting information (like exit codes or ports), they are
// propagated.
func (mtask *managedTask) handleContainerChange(containerChange dockerContainerChange) {
// locate the container
container := containerChange.container
runtimeID := container.GetRuntimeID()
event := containerChange.event
containerKnownStatus := container.GetKnownStatus()
if event.Status != containerKnownStatus {
logger.Info("Handling container change event", logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
field.RuntimeID: runtimeID,
field.Status: event.Status.String(),
})
}
found := mtask.isContainerFound(container)
if !found {
logger.Critical("State error; invoked with another task's container!", logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
field.RuntimeID: runtimeID,
field.Status: event.Status.String(),
})
return
}
// If this is a backwards transition stopped->running, the first time set it
// to be known running so it will be stopped. Subsequently ignore these backward transitions
mtask.handleStoppedToRunningContainerTransition(event.Status, container)
if event.Status <= containerKnownStatus {
logger.Debug("Container change is redundant", logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
field.RuntimeID: runtimeID,
field.Status: event.Status.String(),
field.KnownStatus: containerKnownStatus.String(),
})
// Only update container metadata when status stays RUNNING
if event.Status == containerKnownStatus && event.Status == apicontainerstatus.ContainerRunning {
updateContainerMetadata(&event.DockerContainerMetadata, container, mtask.Task)
}
return
}
// Container has progressed its status if we reach here. Make sure to save it to database.
defer mtask.engine.saveContainerData(container)
// Update the container to be known
currentKnownStatus := containerKnownStatus
container.SetKnownStatus(event.Status)
updateContainerMetadata(&event.DockerContainerMetadata, container, mtask.Task)
if event.Error != nil {
proceedAnyway := mtask.handleEventError(containerChange, currentKnownStatus)
if !proceedAnyway {
return
}
}
if execcmd.IsExecEnabledContainer(container) && container.GetKnownStatus() == apicontainerstatus.ContainerStopped {
// if this is an execute-command-enabled container STOPPED event, we should emit a corresponding managedAgent event
mtask.handleManagedAgentStoppedTransition(container, execcmd.ExecuteCommandAgentName)
}
mtask.RecordExecutionStoppedAt(container)
logger.Debug("Sending container change event to tcs", logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
field.RuntimeID: runtimeID,
field.Status: event.Status.String(),
})
err := mtask.containerChangeEventStream.WriteToEventStream(event)
if err != nil {
logger.Warn("Failed to write container change event to tcs event stream",
logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
field.RuntimeID: runtimeID,
field.Error: err,
})
}
mtask.emitContainerEvent(mtask.Task, container, "")
if mtask.UpdateStatus() {
// If knownStatus changed, let it be known
var taskStateChangeReason string
if mtask.GetKnownStatus().Terminal() {
taskStateChangeReason = mtask.Task.GetTerminalReason()
}
mtask.emitTaskEvent(mtask.Task, taskStateChangeReason)
// Save the new task status to database.
mtask.engine.saveTaskData(mtask.Task)
}
}
// handleResourceStateChange attempts to update resource's known status depending on
// the current status and errors during transition
func (mtask *managedTask) handleResourceStateChange(resChange resourceStateChange) {
// locate the resource
res := resChange.resource
if !mtask.isResourceFound(res) {
logger.Critical("State error; invoked with another task's resource",
logger.Fields{
field.TaskARN: mtask.Arn,
field.Resource: res.GetName(),
})
return
}
status := resChange.nextState
err := resChange.err
currentKnownStatus := res.GetKnownStatus()
if status <= currentKnownStatus {
logger.Info("Redundant resource state change", logger.Fields{
field.TaskARN: mtask.Arn,
field.Resource: res.GetName(),
field.Status: res.StatusString(status),
field.KnownStatus: res.StatusString(currentKnownStatus),
})
return
}
// There is a resource state change. Resource is stored as part of the task, so make sure to save the task
// at the end.
defer mtask.engine.saveTaskData(mtask.Task)
// Set known status regardless of error so the applied status can be cleared. If there is error,
// the known status might be set again below (but that won't affect the applied status anymore).
// This follows how container state change is handled.
res.SetKnownStatus(status)
if err == nil {
return
}
if status == res.SteadyState() { // Failed to create resource.
logger.Error("Managed task [%s]: failed to create task resource [%s]: %v", logger.Fields{
field.TaskARN: mtask.Arn,
field.Resource: res.GetName(),
field.Error: err,
})
res.SetKnownStatus(currentKnownStatus) // Set status back to None.
logger.Info("Marking task desired status to STOPPED", logger.Fields{
field.TaskARN: mtask.Arn,
})
mtask.SetDesiredStatus(apitaskstatus.TaskStopped)
mtask.Task.SetTerminalReason(res.GetTerminalReason())
}
}
func (mtask *managedTask) emitResourceChange(change resourceStateChange) {
select {
case <-mtask.ctx.Done():
logger.Info("Unable to emit resource state change due to exit", logger.Fields{
field.TaskARN: mtask.Arn,
})
case mtask.resourceStateChangeEvent <- change:
}
}
func getContainerEventLogFields(c api.ContainerStateChange) logger.Fields {
f := logger.Fields{
"ContainerName": c.ContainerName,
}
if c.ExitCode != nil {
f["Exit"] = strconv.Itoa(*c.ExitCode)
}
if c.Reason != "" {
f["Reason"] = c.Reason
}
if len(c.PortBindings) != 0 {
f["Ports"] = fmt.Sprintf("%v", c.PortBindings)
}
if c.Container != nil {
f["KnownSent"] = c.Container.GetSentStatus().String()
}
return f
}
func (mtask *managedTask) emitTaskEvent(task *apitask.Task, reason string) {
event, err := api.NewTaskStateChangeEvent(task, reason)
if err != nil {
logger.Debug("Managed task [%s]: skipping emitting event for task [%s]: %v", logger.Fields{
field.TaskARN: mtask.Arn,
field.Reason: reason,
field.Error: err,
})
return
}
logger.Debug("Sending task change event", logger.Fields{
field.TaskARN: task.Arn,
field.Status: event.Status.String(),
field.SentStatus: task.GetSentStatus().String(),
field.Event: event.String(),
})
select {
case <-mtask.ctx.Done():
logger.Info("Unable to send task change event due to exit", logger.Fields{
field.TaskARN: task.Arn,
field.Status: event.Status.String(),
field.SentStatus: task.GetSentStatus().String(),
field.Event: event.String(),
})
case mtask.stateChangeEvents <- event:
}
logger.Debug("Sent task change event", logger.Fields{
field.TaskARN: mtask.Arn,
field.Event: event.String(),
})
}
// emitManagedAgentEvent passes a special task event up through the taskEvents channel if there are managed
// agent changes in the container passed as parameter.
// It will omit events the backend would not process
func (mtask *managedTask) emitManagedAgentEvent(task *apitask.Task, cont *apicontainer.Container, managedAgentName string, reason string) {
event, err := api.NewManagedAgentChangeEvent(task, cont, managedAgentName, reason)
if err != nil {
logger.Error("Skipping emitting ManagedAgent event for task", logger.Fields{
field.TaskARN: task.Arn,
field.Reason: reason,
field.Error: err,
})
return
}
logger.Info("Sending ManagedAgent event", logger.Fields{
field.TaskARN: task.Arn,
field.Event: event.String(),
})
select {
case <-mtask.ctx.Done():
logger.Info("Unable to send managed agent event due to exit", logger.Fields{
field.TaskARN: task.Arn,
field.Event: event.String(),
})
case mtask.stateChangeEvents <- event:
}
logger.Info("Sent managed agent event [%s]", logger.Fields{
field.TaskARN: task.Arn,
field.Event: event.String(),
})
}
// emitContainerEvent passes a given event up through the containerEvents channel if necessary.
// It will omit events the backend would not process and will perform best-effort deduplication of events.
func (mtask *managedTask) emitContainerEvent(task *apitask.Task, cont *apicontainer.Container, reason string) {
event, err := api.NewContainerStateChangeEvent(task, cont, reason)
if err != nil {
logger.Debug("Skipping emitting event for container", logger.Fields{
field.TaskARN: task.Arn,
field.Container: cont.Name,
field.Error: err,
})
return
}
mtask.doEmitContainerEvent(event)
}
func (mtask *managedTask) doEmitContainerEvent(event api.ContainerStateChange) {
logger.Debug("Sending container change event", getContainerEventLogFields(event), logger.Fields{
field.TaskARN: mtask.Arn,
})
select {
case <-mtask.ctx.Done():
logger.Info("Unable to send container change event due to exit", getContainerEventLogFields(event), logger.Fields{
field.TaskARN: mtask.Arn,
})
case mtask.stateChangeEvents <- event:
}
logger.Debug("Sent container change event", getContainerEventLogFields(event), logger.Fields{
field.TaskARN: mtask.Arn,
})
}
func (mtask *managedTask) emitDockerContainerChange(change dockerContainerChange) {
select {
case <-mtask.ctx.Done():
logger.Info("Unable to emit docker container change due to exit", logger.Fields{
field.TaskARN: mtask.Arn,
})
case mtask.dockerMessages <- change:
}
}
func (mtask *managedTask) emitACSTransition(transition acsTransition) {
select {
case <-mtask.ctx.Done():
logger.Info("Unable to emit docker container change due to exit", logger.Fields{
field.TaskARN: mtask.Arn,
})
case mtask.acsMessages <- transition:
}
}
func (mtask *managedTask) isContainerFound(container *apicontainer.Container) bool {
found := false
for _, c := range mtask.Containers {
if container == c {
found = true
break
}
}
return found
}
func (mtask *managedTask) isResourceFound(res taskresource.TaskResource) bool {
for _, r := range mtask.GetResources() {
if res.GetName() == r.GetName() {
return true
}
}
return false
}
// releaseIPInIPAM releases the IP address used by the task in awsvpc mode.
func (mtask *managedTask) releaseIPInIPAM() {
if !mtask.IsNetworkModeAWSVPC() {
return
}
logger.Info("IPAM releasing ip for task eni", logger.Fields{
field.TaskARN: mtask.Arn,
})
cfg, err := mtask.BuildCNIConfig(true, &ecscni.Config{
MinSupportedCNIVersion: config.DefaultMinSupportedCNIVersion,
})
if err != nil {
logger.Error("Failed to release ip; unable to build cni configuration", logger.Fields{
field.TaskARN: mtask.Arn,
field.Error: err,
})
return
}
err = mtask.cniClient.ReleaseIPResource(mtask.ctx, cfg, ipamCleanupTmeout)
if err != nil {
logger.Error("Failed to release ip; IPAM error", logger.Fields{
field.TaskARN: mtask.Arn,
field.Error: err,
})
return
}
}
// handleStoppedToRunningContainerTransition detects a "backwards" container
// transition where a known-stopped container is found to be running again and
// handles it.
func (mtask *managedTask) handleStoppedToRunningContainerTransition(status apicontainerstatus.ContainerStatus, container *apicontainer.Container) {
containerKnownStatus := container.GetKnownStatus()
if status > containerKnownStatus {
// Event status is greater than container's known status.
// This is not a backward transition, return
return
}
if containerKnownStatus != apicontainerstatus.ContainerStopped {
// Container's known status is not STOPPED. Nothing to do here.
return
}
if !status.IsRunning() {
// Container's 'to' transition was not either of RUNNING or RESOURCES_PROVISIONED
// states. Nothing to do in this case as well
return
}
// If the container becomes running after we've stopped it (possibly
// because we got an error running it and it ran anyways), the first time
// update it to 'known running' so that it will be driven back to stopped
mtask.unexpectedStart.Do(func() {
logger.Warn("Stopped container came back; re-stopping it once", logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
})
go mtask.engine.transitionContainer(mtask.Task, container, apicontainerstatus.ContainerStopped)
// This will not proceed afterwards because status <= knownstatus below
})
}
// handleManagedAgentStoppedTransition handles a container change event which has a managed agent status
// we should emit ManagedAgent events for certain container events.
func (mtask *managedTask) handleManagedAgentStoppedTransition(container *apicontainer.Container, managedAgentName string) {
//for now we only have the ExecuteCommandAgent
switch managedAgentName {
case execcmd.ExecuteCommandAgentName:
if !container.UpdateManagedAgentStatus(managedAgentName, apicontainerstatus.ManagedAgentStopped) {
logger.Warn("Cannot find ManagedAgent for container", logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
field.ManagedAgent: managedAgentName,
})
}
mtask.emitManagedAgentEvent(mtask.Task, container, managedAgentName, "Received Container Stopped event")
default:
logger.Warn("Unexpected ManagedAgent in container; unable to process ManagedAgent transition event", logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
field.ManagedAgent: managedAgentName,
})
}
}
// handleEventError handles a container change event error and decides whether
// we should proceed to transition the container
func (mtask *managedTask) handleEventError(containerChange dockerContainerChange, currentKnownStatus apicontainerstatus.ContainerStatus) bool {
container := containerChange.container
event := containerChange.event
if container.ApplyingError == nil {
container.ApplyingError = apierrors.NewNamedError(event.Error)
}
switch event.Status {
// event.Status is the desired container transition from container's known status
// (* -> event.Status)
case apicontainerstatus.ContainerPulled:
// If the agent pull behavior is always or once, we receive the error because
// the image pull fails, the task should fail. If we don't fail task here,
// then the cached image will probably be used for creating container, and we
// don't want to use cached image for both cases.
if mtask.cfg.ImagePullBehavior == config.ImagePullAlwaysBehavior ||
mtask.cfg.ImagePullBehavior == config.ImagePullOnceBehavior {
logger.Error("Error while pulling image; moving task to STOPPED", logger.Fields{
field.TaskARN: mtask.Arn,
field.Image: container.Image,
field.Container: container.Name,
field.Error: event.Error,
})
// The task should be stopped regardless of whether this container is
// essential or non-essential.
mtask.SetDesiredStatus(apitaskstatus.TaskStopped)
return false
}
// If the agent pull behavior is prefer-cached, we receive the error because
// the image pull fails and there is no cached image in local, we don't make
// the task fail here, will let create container handle it instead.
// If the agent pull behavior is default, use local image cache directly,
// assuming it exists.
logger.Error("Error while pulling image; will try to run anyway", logger.Fields{
field.TaskARN: mtask.Arn,
field.Image: container.Image,
field.Container: container.Name,
field.Error: event.Error,
})
// proceed anyway
return true
case apicontainerstatus.ContainerStopped:
// Container's desired transition was to 'STOPPED'
return mtask.handleContainerStoppedTransitionError(event, container, currentKnownStatus)
case apicontainerstatus.ContainerStatusNone:
fallthrough
case apicontainerstatus.ContainerCreated:
// No need to explicitly stop containers if this is a * -> NONE/CREATED transition
logger.Warn("Error creating container; marking its desired status as STOPPED", logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
field.Error: event.Error,
})
container.SetKnownStatus(currentKnownStatus)
container.SetDesiredStatus(apicontainerstatus.ContainerStopped)
return false
default:
// If this is a * -> RUNNING / RESOURCES_PROVISIONED transition, we need to stop
// the container.
logger.Warn("Error starting/provisioning container[%s (Runtime ID: %s)];", logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
field.RuntimeID: container.GetRuntimeID(),
field.Error: event.Error,
})
container.SetKnownStatus(currentKnownStatus)
container.SetDesiredStatus(apicontainerstatus.ContainerStopped)
errorName := event.Error.ErrorName()
errorStr := event.Error.Error()
shouldForceStop := false
if errorName == dockerapi.DockerTimeoutErrorName || errorName == dockerapi.CannotInspectContainerErrorName {
// If there's an error with inspecting the container or in case of timeout error,
// we'll assume that the container has transitioned to RUNNING and issue
// a stop. See #1043 for details
shouldForceStop = true
} else if errorName == dockerapi.CannotStartContainerErrorName && strings.HasSuffix(errorStr, io.EOF.Error()) {
// If we get an EOF error from Docker when starting the container, we don't really know whether the
// container is started anyway. So issuing a stop here as well. See #1708.
shouldForceStop = true
}
if shouldForceStop {
logger.Warn("Forcing container to stop", logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
field.RuntimeID: container.GetRuntimeID(),
})
go mtask.engine.transitionContainer(mtask.Task, container, apicontainerstatus.ContainerStopped)
}
// Container known status not changed, no need for further processing
return false
}
}
// handleContainerStoppedTransitionError handles an error when transitioning a container to
// STOPPED. It returns a boolean indicating whether the tak can continue with updating its
// state
func (mtask *managedTask) handleContainerStoppedTransitionError(event dockerapi.DockerContainerChangeEvent,
container *apicontainer.Container,
currentKnownStatus apicontainerstatus.ContainerStatus) bool {
pr := mtask.dockerClient.SystemPing(mtask.ctx, systemPingTimeout)
if pr.Error != nil {
logger.Info("Error stopping the container, but docker seems to be unresponsive; ignoring state change", logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
field.RuntimeID: container.GetRuntimeID(),
"ErrorName": event.Error.ErrorName(),
field.Error: event.Error.Error(),
"SystemPingError": pr.Error,
})
container.SetKnownStatus(currentKnownStatus)
return false
}
// If we were trying to transition to stopped and had an error, we
// clearly can't just continue trying to transition it to stopped
// again and again. In this case, assume it's stopped (or close
// enough) and get on with it
// This can happen in cases where the container we tried to stop
// was already stopped or did not exist at all.
logger.Warn("Error stopping the container; marking it as stopped anyway", logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
field.RuntimeID: container.GetRuntimeID(),
"ErrorName": event.Error.ErrorName(),
field.Error: event.Error.Error(),
})
container.SetKnownStatus(apicontainerstatus.ContainerStopped)
container.SetDesiredStatus(apicontainerstatus.ContainerStopped)
return true
}
// progressTask tries to step forwards all containers and resources that are able to be
// transitioned in the task's current state.
// It will continue listening to events from all channels while it does so, but
// none of those changes will be acted upon until this set of requests to
// docker completes.
// Container changes may also prompt the task status to change as well.
func (mtask *managedTask) progressTask() {
logger.Debug("Progressing containers and resources in task", logger.Fields{
field.TaskARN: mtask.Arn,
})
// max number of transitions length to ensure writes will never block on
// these and if we exit early transitions can exit the goroutine and it'll
// get GC'd eventually
resources := mtask.GetResources()
transitionChange := make(chan struct{}, len(mtask.Containers)+len(resources))
transitionChangeEntity := make(chan string, len(mtask.Containers)+len(resources))
// startResourceTransitions should always be called before startContainerTransitions,
// else it might result in a state where none of the containers can transition and
// task may be moved to stopped.
// anyResourceTransition is set to true when transition function needs to be called or
// known status can be changed
anyResourceTransition, resTransitions := mtask.startResourceTransitions(
func(resource taskresource.TaskResource, nextStatus resourcestatus.ResourceStatus) {
mtask.transitionResource(resource, nextStatus)
transitionChange <- struct{}{}
transitionChangeEntity <- resource.GetName()
})
anyContainerTransition, blockedDependencies, contTransitions, reasons := mtask.startContainerTransitions(
func(container *apicontainer.Container, nextStatus apicontainerstatus.ContainerStatus) {
mtask.engine.transitionContainer(mtask.Task, container, nextStatus)
transitionChange <- struct{}{}
transitionChangeEntity <- container.Name
})
atLeastOneTransitionStarted := anyResourceTransition || anyContainerTransition
blockedByOrderingDependencies := len(blockedDependencies) > 0
// If no transitions happened and we aren't blocked by ordering dependencies, then we are possibly in a state where
// its impossible for containers to move forward. We will do an additional check to see if we are waiting for ACS
// execution credentials. If not, then we will abort the task progression.
if !atLeastOneTransitionStarted && !blockedByOrderingDependencies {
if !mtask.isWaitingForACSExecutionCredentials(reasons) {
mtask.handleContainersUnableToTransitionState()
}
return
}
// If no containers are starting and we are blocked on ordering dependencies, we should watch for the task to change
// over time. This will update the containers if they become healthy or stop, which makes it possible for the
// conditions "HEALTHY" and "SUCCESS" to succeed.
if !atLeastOneTransitionStarted && blockedByOrderingDependencies {
go mtask.engine.checkTaskState(mtask.Task)
ctx, cancel := context.WithTimeout(context.Background(), transitionPollTime)
defer cancel()
for timeout := mtask.waitEvent(ctx.Done()); !timeout; {
timeout = mtask.waitEvent(ctx.Done())
}
return
}
// combine the resource and container transitions
transitions := make(map[string]string)
for k, v := range resTransitions {
transitions[k] = v
}
for k, v := range contTransitions {
transitions[k] = v.String()
}
// We've kicked off one or more transitions, wait for them to
// complete, but keep reading events as we do. in fact, we have to for
// transitions to complete
mtask.waitForTransition(transitions, transitionChange, transitionChangeEntity)
// update the task status
if mtask.UpdateStatus() {
logger.Info("Container or resource change also resulted in task change", logger.Fields{
field.TaskARN: mtask.Arn,
})
// If knownStatus changed, let it be known
var taskStateChangeReason string
if mtask.GetKnownStatus().Terminal() {
taskStateChangeReason = mtask.Task.GetTerminalReason()
}
mtask.emitTaskEvent(mtask.Task, taskStateChangeReason)
}
}
// isWaitingForACSExecutionCredentials checks if the container that can't be transitioned
// was caused by waiting for credentials and start waiting
func (mtask *managedTask) isWaitingForACSExecutionCredentials(reasons []error) bool {
for _, reason := range reasons {
if reason == dependencygraph.CredentialsNotResolvedErr {
logger.Info("Waiting for credentials to pull from ECR", logger.Fields{
field.TaskARN: mtask.Arn,
})
timeoutCtx, timeoutCancel := context.WithTimeout(mtask.ctx, waitForPullCredentialsTimeout)
defer timeoutCancel()
timedOut := mtask.waitEvent(timeoutCtx.Done())
if timedOut {
logger.Info("Timed out waiting for acs credentials message", logger.Fields{
field.TaskARN: mtask.Arn,
})
}
return true
}
}
return false
}
// startContainerTransitions steps through each container in the task and calls
// the passed transition function when a transition should occur.
func (mtask *managedTask) startContainerTransitions(transitionFunc containerTransitionFunc) (bool, map[string]apicontainer.DependsOn, map[string]apicontainerstatus.ContainerStatus, []error) {
anyCanTransition := false
var reasons []error
blocked := make(map[string]apicontainer.DependsOn)
transitions := make(map[string]apicontainerstatus.ContainerStatus)
for _, cont := range mtask.Containers {
transition := mtask.containerNextState(cont)
if transition.reason != nil {
// container can't be transitioned
reasons = append(reasons, transition.reason)
if transition.blockedOn != nil {
blocked[cont.Name] = *transition.blockedOn
}
continue
}
// If the container is already in a transition, skip
if transition.actionRequired && !cont.SetAppliedStatus(transition.nextState) {
// At least one container is able to be moved forwards, so we're not deadlocked
anyCanTransition = true
continue
}
// At least one container is able to be moved forwards, so we're not deadlocked
anyCanTransition = true
if !transition.actionRequired {
// Updating the container status without calling any docker API, send in
// a goroutine so that it won't block here before the waitForContainerTransition
// was called after this function. And all the events sent to mtask.dockerMessages
// will be handled by handleContainerChange.
go func(cont *apicontainer.Container, status apicontainerstatus.ContainerStatus) {
mtask.dockerMessages <- dockerContainerChange{
container: cont,
event: dockerapi.DockerContainerChangeEvent{
Status: status,
},
}
}(cont, transition.nextState)
continue
}
transitions[cont.Name] = transition.nextState
go transitionFunc(cont, transition.nextState)
}
return anyCanTransition, blocked, transitions, reasons
}
// startResourceTransitions steps through each resource in the task and calls
// the passed transition function when a transition should occur
func (mtask *managedTask) startResourceTransitions(transitionFunc resourceTransitionFunc) (bool, map[string]string) {
anyCanTransition := false
transitions := make(map[string]string)
for _, res := range mtask.GetResources() {
knownStatus := res.GetKnownStatus()
desiredStatus := res.GetDesiredStatus()
if knownStatus >= desiredStatus {
logger.Debug("Resource has already transitioned to or beyond the desired status",
logger.Fields{
field.TaskARN: mtask.Arn,
field.Resource: res.GetName(),
field.KnownStatus: res.StatusString(knownStatus),
field.DesiredStatus: res.StatusString(desiredStatus),
})
continue
}
anyCanTransition = true
transition := mtask.resourceNextState(res)
// If the resource is already in a transition, skip
if transition.actionRequired && !res.SetAppliedStatus(transition.nextState) {
// At least one resource is able to be moved forwards, so we're not deadlocked
continue
}
if !transition.actionRequired {
// no action is required for the transition, just set the known status without
// calling any transition function
go mtask.emitResourceChange(resourceStateChange{
resource: res,
nextState: transition.nextState,
err: nil,
})
continue
}
// At least one resource is able to be move forwards, so we're not deadlocked
transitions[res.GetName()] = transition.status
go transitionFunc(res, transition.nextState)
}
return anyCanTransition, transitions
}
// transitionResource calls applyResourceState, and then notifies the managed
// task of the change. transitionResource is called by progressTask
func (mtask *managedTask) transitionResource(resource taskresource.TaskResource,
to resourcestatus.ResourceStatus) {
err := mtask.applyResourceState(resource, to)
if mtask.engine.isTaskManaged(mtask.Arn) {
mtask.emitResourceChange(resourceStateChange{
resource: resource,
nextState: to,
err: err,
})
}
}
// applyResourceState moves the resource to the given state by calling the
// function defined in the transitionFunctionMap for the state
func (mtask *managedTask) applyResourceState(resource taskresource.TaskResource,
nextState resourcestatus.ResourceStatus) error {
resName := resource.GetName()
resStatus := resource.StatusString(nextState)
err := resource.ApplyTransition(nextState)
if err != nil {
logger.Info("Error transitioning resource", logger.Fields{
field.TaskARN: mtask.Arn,
field.Resource: resName,
field.FailedStatus: resStatus,
field.Error: err,
})
return err
}
logger.Info("Transitioned resource", logger.Fields{
field.TaskARN: mtask.Arn,
field.Resource: resName,
field.FailedStatus: resStatus,
})
return nil
}
type containerTransitionFunc func(container *apicontainer.Container, nextStatus apicontainerstatus.ContainerStatus)
type resourceTransitionFunc func(resource taskresource.TaskResource, nextStatus resourcestatus.ResourceStatus)
// containerNextState determines the next state a container should go to.
// It returns a transition struct including the information:
// * container state it should transition to,
// * a bool indicating whether any action is required
// * an error indicating why this transition can't happen
//
// 'Stopped, false, ""' -> "You can move it to known stopped, but you don't have to call a transition function"
// 'Running, true, ""' -> "You can move it to running and you need to call the transition function"
// 'None, false, containerDependencyNotResolved' -> "This should not be moved; it has unresolved dependencies;"
//
// Next status is determined by whether the known and desired statuses are
// equal, the next numerically greater (iota-wise) status, and whether
// dependencies are fully resolved.
func (mtask *managedTask) containerNextState(container *apicontainer.Container) *containerTransition {
containerKnownStatus := container.GetKnownStatus()
containerDesiredStatus := container.GetDesiredStatus()
if containerKnownStatus == containerDesiredStatus {
logger.Debug("Container at desired status", logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
field.RuntimeID: container.GetRuntimeID(),
field.DesiredStatus: containerDesiredStatus.String(),
})
return &containerTransition{
nextState: apicontainerstatus.ContainerStatusNone,
actionRequired: false,
reason: dependencygraph.ContainerPastDesiredStatusErr,
}
}
if containerKnownStatus > containerDesiredStatus {
logger.Debug("Managed task [%s]: container [%s (Runtime ID: %s)] has already transitioned beyond desired status(%s): %s", logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
field.RuntimeID: container.GetRuntimeID(),
field.KnownStatus: containerKnownStatus.String(),
field.DesiredStatus: containerDesiredStatus.String(),
})
return &containerTransition{
nextState: apicontainerstatus.ContainerStatusNone,
actionRequired: false,
reason: dependencygraph.ContainerPastDesiredStatusErr,
}
}
if blocked, err := dependencygraph.DependenciesAreResolved(container, mtask.Containers,
mtask.Task.GetExecutionCredentialsID(), mtask.credentialsManager, mtask.GetResources(), mtask.cfg); err != nil {
logger.Debug("Can't apply state to container yet due to unresolved dependencies", logger.Fields{
field.TaskARN: mtask.Arn,
field.Container: container.Name,
field.RuntimeID: container.GetRuntimeID(),
field.Error: err,
})
return &containerTransition{
nextState: apicontainerstatus.ContainerStatusNone,
actionRequired: false,
reason: err,
blockedOn: blocked,
}
}
var nextState apicontainerstatus.ContainerStatus
if container.DesiredTerminal() {
nextState = apicontainerstatus.ContainerStopped
// It's not enough to just check if container is in steady state here
// we should really check if >= RUNNING <= STOPPED
if !container.IsRunning() {
// If the container's AppliedStatus is running, it means the StartContainer
// api call has already been scheduled, we should not mark it as stopped
// directly, because when the stopped container comes back, we will end up
// with either:
// 1. The task is not cleaned up, the handleStoppedToRunningContainerTransition
// function will handle this case, but only once. If there are some
// other stopped containers come back, they will not be stopped by
// Agent.
// 2. The task has already been cleaned up, in this case any stopped container
// will not be stopped by Agent when they come back.
if container.GetAppliedStatus() == apicontainerstatus.ContainerRunning {
nextState = apicontainerstatus.ContainerStatusNone
}
return &containerTransition{
nextState: nextState,
actionRequired: false,
}
}
} else {
nextState = container.GetNextKnownStateProgression()
}
return &containerTransition{
nextState: nextState,
actionRequired: true,
}
}
// resourceNextState determines the next state a resource should go to.
// It returns a transition struct including the information:
// * resource state it should transition to,
// * string presentation of the resource state
// * a bool indicating whether any action is required
// * an error indicating why this transition can't happen
//
// Next status is determined by whether the known and desired statuses are
// equal, the next numerically greater (iota-wise) status, and whether
// dependencies are fully resolved.
func (mtask *managedTask) resourceNextState(resource taskresource.TaskResource) *resourceTransition {
resKnownStatus := resource.GetKnownStatus()
resDesiredStatus := resource.GetDesiredStatus()
if resKnownStatus >= resDesiredStatus {
logger.Debug("Managed task [%s]: task resource [%s] has already transitioned to or beyond desired status(%s): %s", logger.Fields{
field.TaskARN: mtask.Arn,
field.Resource: resource.GetName(),
field.KnownStatus: resource.StatusString(resKnownStatus),
field.DesiredStatus: resource.StatusString(resDesiredStatus),
})
return &resourceTransition{
nextState: resourcestatus.ResourceStatusNone,
status: resource.StatusString(resourcestatus.ResourceStatusNone),
actionRequired: false,
reason: dependencygraph.ResourcePastDesiredStatusErr,
}
}
if err := dependencygraph.TaskResourceDependenciesAreResolved(resource, mtask.Containers); err != nil {
logger.Debug("Can't apply state to resource yet due to unresolved dependencies",
logger.Fields{
field.TaskARN: mtask.Arn,
field.Resource: resource.GetName(),
field.Error: err,
})
return &resourceTransition{
nextState: resourcestatus.ResourceStatusNone,
status: resource.StatusString(resourcestatus.ResourceStatusNone),
actionRequired: false,
reason: err,
}
}
var nextState resourcestatus.ResourceStatus
if resource.DesiredTerminal() {
nextState := resource.TerminalStatus()
return &resourceTransition{
nextState: nextState,
status: resource.StatusString(nextState),
actionRequired: false, // Resource cleanup is done while cleaning up task, so not doing anything here.
}
}
nextState = resource.NextKnownState()
return &resourceTransition{
nextState: nextState,
status: resource.StatusString(nextState),
actionRequired: true,
}
}
func (mtask *managedTask) handleContainersUnableToTransitionState() {
logger.Critical("Task in a bad state; it's not steady state but no containers want to transition", logger.Fields{
field.TaskARN: mtask.Arn,
})
if mtask.GetDesiredStatus().Terminal() {
// Ack, really bad. We want it to stop but the containers don't think
// that's possible. let's just break out and hope for the best!
logger.Critical("The state is so bad that we're just giving up on it", logger.Fields{
field.TaskARN: mtask.Arn,
})
mtask.SetKnownStatus(apitaskstatus.TaskStopped)
mtask.emitTaskEvent(mtask.Task, taskUnableToTransitionToStoppedReason)
// TODO we should probably panic here
} else {
logger.Critical("Moving task to stopped due to bad state", logger.Fields{
field.TaskARN: mtask.Arn,
})
mtask.handleDesiredStatusChange(apitaskstatus.TaskStopped, 0)
}
}
func (mtask *managedTask) waitForTransition(transitions map[string]string,
transition <-chan struct{},
transitionChangeEntity <-chan string) {
// There could be multiple transitions, but we just need to wait for one of them
// to ensure that there is at least one container or resource can be processed in the next
// progressTask call. This is done by waiting for one transition/acs/docker message.
if !mtask.waitEvent(transition) {
logger.Debug("Received non-transition events", logger.Fields{
field.TaskARN: mtask.Arn,
})
return
}
transitionedEntity := <-transitionChangeEntity
logger.Debug("Managed task [%s]: transition for [%s] finished", logger.Fields{
field.TaskARN: mtask.Arn,
"TransitionedEntity": transitionedEntity,
})
delete(transitions, transitionedEntity)
logger.Debug("Task still waiting for: %v", logger.Fields{
field.TaskARN: mtask.Arn,
"TransitionedEntity": fmt.Sprintf("%v", transitions),
})
}
func (mtask *managedTask) time() ttime.Time {
mtask._timeOnce.Do(func() {
if mtask._time == nil {
mtask._time = &ttime.DefaultTime{}
}
})
return mtask._time
}
func (mtask *managedTask) cleanupTask(taskStoppedDuration time.Duration) {
cleanupTimeDuration := mtask.GetKnownStatusTime().Add(taskStoppedDuration).Sub(ttime.Now())
cleanupTime := make(<-chan time.Time)
if cleanupTimeDuration < 0 {
logger.Info("Cleanup Duration has been exceeded; starting cleanup now", logger.Fields{
field.TaskARN: mtask.Arn,
})
cleanupTime = mtask.time().After(time.Nanosecond)
} else {
cleanupTime = mtask.time().After(cleanupTimeDuration)
}
cleanupTimeBool := make(chan struct{})
go func() {
<-cleanupTime
close(cleanupTimeBool)
}()
// wait for the cleanup time to elapse, signalled by cleanupTimeBool
for !mtask.waitEvent(cleanupTimeBool) {
}
// wait for apitaskstatus.TaskStopped to be sent
ok := mtask.waitForStopReported()
if !ok {
logger.Error("Aborting cleanup for task as it is not reported as stopped", logger.Fields{
field.TaskARN: mtask.Arn,
field.SentStatus: mtask.GetSentStatus().String(),
})
return
}
logger.Info("Cleaning up task's containers and data", logger.Fields{
field.TaskARN: mtask.Arn,
})
// For the duration of this, simply discard any task events; this ensures the
// speedy processing of other events for other tasks
// discard events while the task is being removed from engine state
go mtask.discardEvents()
mtask.engine.sweepTask(mtask.Task)
mtask.engine.deleteTask(mtask.Task)
// The last thing to do here is to cancel the context, which should cancel
// all outstanding go routines associated with this managed task.
mtask.cancel()
}
func (mtask *managedTask) discardEvents() {
for {
select {
case <-mtask.dockerMessages:
case <-mtask.acsMessages:
case <-mtask.resourceStateChangeEvent:
case <-mtask.ctx.Done():
return
}
}
}
// waitForStopReported will wait for the task to be reported stopped and return true, or will time-out and return false.
// Messages on the mtask.dockerMessages and mtask.acsMessages channels will be handled while this function is waiting.
func (mtask *managedTask) waitForStopReported() bool {
stoppedSentBool := make(chan struct{})
taskStopped := false
go func() {
for i := 0; i < _maxStoppedWaitTimes; i++ {
// ensure that we block until apitaskstatus.TaskStopped is actually sent
sentStatus := mtask.GetSentStatus()
if sentStatus >= apitaskstatus.TaskStopped {
taskStopped = true
break
}
logger.Warn("Blocking cleanup until the task has been reported stopped", logger.Fields{
field.TaskARN: mtask.Arn,
field.SentStatus: sentStatus.String(),
"Attempt": i + 1,
"MaxAttempts": _maxStoppedWaitTimes,
})
mtask._time.Sleep(_stoppedSentWaitInterval)
}
stoppedSentBool <- struct{}{}
close(stoppedSentBool)
}()
// wait for apitaskstatus.TaskStopped to be sent
for !mtask.waitEvent(stoppedSentBool) {
}
return taskStopped
}
| 1 | 26,417 | Can we elaborate on why? I'm guessing because we need the execution role to call FE stopTask during `cleanupTask`, but would be good if we make it clear here. | aws-amazon-ecs-agent | go |
@@ -62,6 +62,10 @@ func newAgentCommand() *cobra.Command {
if err := opts.validate(args); err != nil {
klog.Fatalf("Failed to validate: %v", err)
}
+ // Not passing args again as they are already validated and are not used in flow exporter config
+ if err := opts.validateFlowExporterConfig(); err != nil {
+ klog.Fatalf("Failed to validate flow exporter config: %v", err)
+ }
if err := run(opts); err != nil {
klog.Fatalf("Error running agent: %v", err)
} | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package main under directory cmd parses and validates user input,
// instantiates and initializes objects imported from pkg, and runs
// the process.
package main
import (
"flag"
"os"
"github.com/spf13/cobra"
"k8s.io/component-base/logs"
"k8s.io/klog/v2"
"github.com/vmware-tanzu/antrea/pkg/log"
"github.com/vmware-tanzu/antrea/pkg/version"
)
func init() {
log.InitKlog()
}
func main() {
logs.InitLogs()
defer logs.FlushLogs()
command := newAgentCommand()
if err := command.Execute(); err != nil {
logs.FlushLogs()
os.Exit(1)
}
}
func newAgentCommand() *cobra.Command {
opts := newOptions()
cmd := &cobra.Command{
Use: "antrea-agent",
Long: "The Antrea agent runs on each node.",
Run: func(cmd *cobra.Command, args []string) {
if err := log.Klogv2Flags.Parse(os.Args[1:]); err != nil {
klog.Fatalf("Failed to parse: %v", err)
}
klog.Infof("Args: %v", os.Args)
log.InitLogFileLimits(cmd.Flags())
if err := opts.complete(args); err != nil {
klog.Fatalf("Failed to complete: %v", err)
}
if err := opts.validate(args); err != nil {
klog.Fatalf("Failed to validate: %v", err)
}
if err := run(opts); err != nil {
klog.Fatalf("Error running agent: %v", err)
}
},
Version: version.GetFullVersionWithRuntimeInfo(),
}
flags := cmd.Flags()
opts.addFlags(flags)
log.AddFlags(flags)
// Install log flags
flags.AddGoFlagSet(flag.CommandLine)
return cmd
}
| 1 | 19,310 | is there a reason why this is not called from inside `validate`? | antrea-io-antrea | go |
@@ -46,7 +46,12 @@ type clientServer struct {
// New returns a client/server to bidirectionally communicate with the backend.
// The returned struct should have both 'Connect' and 'Serve' called upon it
// before being used.
-func New(url string, cfg *config.Config, credentialProvider *credentials.Credentials, statsEngine stats.Engine, publishMetricsInterval time.Duration) wsclient.ClientServer {
+func New(url string,
+ cfg *config.Config,
+ credentialProvider *credentials.Credentials,
+ statsEngine stats.Engine,
+ publishMetricsInterval time.Duration,
+ rwTimeout time.Duration) wsclient.ClientServer {
cs := &clientServer{
statsEngine: statsEngine,
publishTicker: nil, | 1 | // Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package tcsclient
import (
"bytes"
"fmt"
"io"
"net/http"
"time"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/stats"
"github.com/aws/amazon-ecs-agent/agent/tcs/model/ecstcs"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/amazon-ecs-agent/agent/wsclient"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/cihub/seelog"
)
// tasksInMessage is the maximum number of tasks that can be sent in a message to the backend
// This is a very conservative estimate assuming max allowed string lengths for all fields.
const tasksInMessage = 10
// clientServer implements wsclient.ClientServer interface for metrics backend.
type clientServer struct {
statsEngine stats.Engine
publishTicker *time.Ticker
endPublish chan struct{}
publishMetricsInterval time.Duration
wsclient.ClientServerImpl
}
// New returns a client/server to bidirectionally communicate with the backend.
// The returned struct should have both 'Connect' and 'Serve' called upon it
// before being used.
func New(url string, cfg *config.Config, credentialProvider *credentials.Credentials, statsEngine stats.Engine, publishMetricsInterval time.Duration) wsclient.ClientServer {
cs := &clientServer{
statsEngine: statsEngine,
publishTicker: nil,
publishMetricsInterval: publishMetricsInterval,
}
cs.URL = url
cs.AgentConfig = cfg
cs.CredentialProvider = credentialProvider
cs.ServiceError = &tcsError{}
cs.RequestHandlers = make(map[string]wsclient.RequestHandler)
cs.TypeDecoder = NewTCSDecoder()
return cs
}
// Serve begins serving requests using previously registered handlers (see
// AddRequestHandler). All request handlers should be added prior to making this
// call as unhandled requests will be discarded.
func (cs *clientServer) Serve() error {
seelog.Debug("TCS client starting websocket poll loop")
if !cs.IsReady() {
return fmt.Errorf("Websocket not ready for connections")
}
if cs.statsEngine == nil {
return fmt.Errorf("uninitialized stats engine")
}
// Start the timer function to publish metrics to the backend.
cs.publishTicker = time.NewTicker(cs.publishMetricsInterval)
cs.endPublish = make(chan struct{})
go cs.publishMetrics()
return cs.ConsumeMessages()
}
// MakeRequest makes a request using the given input. Note, the input *MUST* be
// a pointer to a valid backend type that this client recognises
func (cs *clientServer) MakeRequest(input interface{}) error {
payload, err := cs.CreateRequestMessage(input)
if err != nil {
return err
}
seelog.Debugf("TCS client sending payload: %s", string(payload))
data := cs.signRequest(payload)
// Over the wire we send something like
// {"type":"AckRequest","message":{"messageId":"xyz"}}
return cs.WriteMessage(data)
}
func (cs *clientServer) signRequest(payload []byte) []byte {
reqBody := bytes.NewBuffer(payload)
// NewRequest never returns an error if the url parses and we just verified
// it did above
request, _ := http.NewRequest("GET", cs.URL, reqBody)
utils.SignHTTPRequest(request, cs.AgentConfig.AWSRegion, "ecs", cs.CredentialProvider, aws.ReadSeekCloser(reqBody))
request.Header.Add("Host", request.Host)
var dataBuffer bytes.Buffer
request.Header.Write(&dataBuffer)
io.WriteString(&dataBuffer, "\r\n")
data := dataBuffer.Bytes()
data = append(data, payload...)
return data
}
// Close closes the underlying connection.
func (cs *clientServer) Close() error {
if cs.publishTicker != nil {
cs.publishTicker.Stop()
cs.endPublish <- struct{}{}
}
return cs.Disconnect()
}
// publishMetrics invokes the PublishMetricsRequest on the clientserver object.
func (cs *clientServer) publishMetrics() {
if cs.publishTicker == nil {
seelog.Debug("Skipping publishing metrics. Publish ticker is uninitialized")
return
}
// Publish metrics immediately after we connect and wait for ticks. This makes
// sure that there is no data loss when a scheduled metrics publishing fails
// due to a connection reset.
err := cs.publishMetricsOnce()
if err != nil && err != stats.EmptyMetricsError {
seelog.Warnf("Error publishing metrics: %v", err)
}
// don't simply range over the ticker since its channel doesn't ever get closed
for {
select {
case <-cs.publishTicker.C:
err := cs.publishMetricsOnce()
if err != nil {
seelog.Warnf("Error publishing metrics: %v", err)
}
case <-cs.endPublish:
return
}
}
}
// publishMetricsOnce is invoked by the ticker to periodically publish metrics to backend.
func (cs *clientServer) publishMetricsOnce() error {
// Get the list of objects to send to backend.
requests, err := cs.metricsToPublishMetricRequests()
if err != nil {
return err
}
// Make the publish metrics request to the backend.
for _, request := range requests {
err = cs.MakeRequest(request)
if err != nil {
return err
}
}
return nil
}
// metricsToPublishMetricRequests gets task metrics and converts them to a list of PublishMetricRequest
// objects.
func (cs *clientServer) metricsToPublishMetricRequests() ([]*ecstcs.PublishMetricsRequest, error) {
metadata, taskMetrics, err := cs.statsEngine.GetInstanceMetrics()
if err != nil {
return nil, err
}
var requests []*ecstcs.PublishMetricsRequest
if *metadata.Idle {
metadata.Fin = aws.Bool(true)
// Idle instance, we have only one request to send to backend.
requests = append(requests, ecstcs.NewPublishMetricsRequest(metadata, taskMetrics))
return requests, nil
}
var messageTaskMetrics []*ecstcs.TaskMetric
numTasks := len(taskMetrics)
for i, taskMetric := range taskMetrics {
messageTaskMetrics = append(messageTaskMetrics, taskMetric)
var requestMetadata *ecstcs.MetricsMetadata
if (i + 1) == numTasks {
// If this is the last task to send, set fin to true
requestMetadata = copyMetricsMetadata(metadata, true)
} else {
requestMetadata = copyMetricsMetadata(metadata, false)
}
if (i+1)%tasksInMessage == 0 {
// Construct payload with tasksInMessage number of task metrics and send to backend.
requests = append(requests, ecstcs.NewPublishMetricsRequest(requestMetadata, copyTaskMetrics(messageTaskMetrics)))
messageTaskMetrics = messageTaskMetrics[:0]
}
}
if len(messageTaskMetrics) > 0 {
// Create the new metadata object and set fin to true as this is the last message in the payload.
requestMetadata := copyMetricsMetadata(metadata, true)
// Create a request with remaining task metrics.
requests = append(requests, ecstcs.NewPublishMetricsRequest(requestMetadata, messageTaskMetrics))
}
return requests, nil
}
// copyMetricsMetadata creates a new MetricsMetadata object from a given MetricsMetadata object.
// It copies all the fields from the source object to the new object and sets the 'Fin' field
// as specified by the argument.
func copyMetricsMetadata(metadata *ecstcs.MetricsMetadata, fin bool) *ecstcs.MetricsMetadata {
return &ecstcs.MetricsMetadata{
Cluster: aws.String(*metadata.Cluster),
ContainerInstance: aws.String(*metadata.ContainerInstance),
Idle: aws.Bool(*metadata.Idle),
MessageId: aws.String(*metadata.MessageId),
Fin: aws.Bool(fin),
}
}
// copyTaskMetrics copies a slice of TaskMetric objects to another slice. This is needed as we
// reset the source slice after creating a new PublishMetricsRequest object.
func copyTaskMetrics(from []*ecstcs.TaskMetric) []*ecstcs.TaskMetric {
to := make([]*ecstcs.TaskMetric, len(from))
copy(to, from)
return to
}
| 1 | 17,589 | :+1: I much prefer this style for functions with more than a few arguments. | aws-amazon-ecs-agent | go |
@@ -762,8 +762,7 @@ class CommandLine
}
}
}
-
- if (hasQualifiers)
+ else if (hasQualifiers)
{
sb.AppendLine().Append(" Actions:").AppendLine();
| 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using System;
using System.Diagnostics;
using System.Text;
using System.Collections.Generic;
using System.IO;
using Microsoft.DotNet.Execute;
using System.Reflection;
namespace Microsoft.Fx.CommandLine
{
// See code:#Overview to get started.
/// <summary>
/// #Overview
///
/// The code:CommandLineParser is a utility for parsing command lines. Command lines consist of three basic
/// entities. A command can have any (or none) of the following (separated by whitespace of any size).
///
/// * PARAMETERS - this are non-space strings. They are positional (logicaly they are numbered). Strings
/// with space can be specified by enclosing in double quotes.
///
/// * QUALIFIERS - Qualifiers are name-value pairs. The following syntax is supported.
/// * -QUALIFIER
/// * -QUALIFIER:VALUE
/// * -QUALIFIER=VALUE
/// * -QUALIFIER VALUE
///
/// The end of a value is delimited by space. Again values with spaces can be encoded by enclosing them
/// the value (or the whole qualifer-value string), in double quotes. The first form (where a value is
/// not specified is only available for boolean qualifiers, and boolean values can not use the form where
/// the qualifer and value are separated by space. The '/' character can also be used instead of the '-'
/// to begin a qualifier.
///
/// Unlike parameters, qualifiers are NOT ordered. They may occur in any order with respect to the
/// parameters or other qualifiers and THAT ORDER IS NOT COMMUNICATED THROUGH THE PARSER. Thus it is not
/// possible to have qualifiers that only apply to specific parameters.
///
/// * PARAMETER SET SPECIFIER - A parameter set is optional argument that looks like a boolean qualifier
/// (however if NoDashOnParameterSets is set the dash is not need, so it is looks like a parameter),
/// that is special in that it decides what qualifiers and positional parameters are allowed. See
/// code:#ParameterSets for more
///
/// #ParameterSets
///
/// Parameter sets are an OPTIONAL facility of code:CommandLineParser that allow more complex command lines
/// to be specified accurately. It not uncommon for a EXE to have several 'commands' that are logically
/// independent of one another. For example a for example For example a program might have 'checkin'
/// 'checkout' 'list' commands, and each of these commands has a different set of parameters that are needed
/// and qualifiers that are allowed. (for example checkout will take a list of file names, list needs nothing,
/// and checkin needs a comment). Additionally some qualifiers (like say -dataBaseName can apply to any of hte
/// commands). Thus You would like to say that the following command lines are legal
///
/// * EXE -checkout MyFile1 MyFile -dataBaseName:MyDatabase
/// * EXE -dataBaseName:MyDatabase -list
/// * EXE -comment "Specifying the comment first" -checkin
/// * EXE -checkin -comment "Specifying the comment afterward"
///
/// But the following are not
///
/// * EXE -checkout
/// * EXE -checkout -comment "hello"
/// * EXE -list MyFile
///
/// You do this by specifying 'checkout', 'list' and 'checkin' as parameters sets. On the command line they
/// look like boolean qualifiers, however they have additional sematantics. They must come before any
/// positional parameters (because they affect whether the parameters are allowed and what they are named),
/// and they are mutually exclusive. Each parameter set gets its own set of parameter definitions, and
/// qualifiers can either be associated with a particular parameter set (like -comment) or global to all
/// parameter sets (like -dataBaseName) .
///
/// By default parameter set specifiers look like a boolean specifier (begin with a '-' or '/'), however
/// because it is common practice to NOT have a dash for commands, there there is a Property
/// code:CommandLineParser.NoDashOnParameterSets that indicates that the dash is not used. If this was
/// specified then the following command would be legal.
///
/// * EXE checkout MyFile1 MyFile -dataBaseName:MyDatabase
///
/// #DefaultParameterSet
///
/// One parameters set (which has the empty string name), is special in that it is used when no other
/// parameter set is matched. This is the default parameter set. For example, if -checkout was defined to be
/// the default parameter set, then the following would be legal.
///
/// * EXE Myfile1 Myfile
///
/// And would implicitly mean 'checkout' Myfile1, Myfile2
///
/// If no parameter sets are defined, then all qualifiers and parameters are in the default parameter set.
///
/// -------------------------------------------------------------------------
/// #Syntatic ambiguities
///
/// Because whitespace can separate a qualifier from its value AND Qualifier from each other, and because
/// parameter values might start with a dash (and thus look like qualifiers), the syntax is ambiguous. It is
/// disambigutated with the following rules.
/// * The command line is parsed into 'arguments' that are spearated by whitespace. Any string enclosed
/// in "" will be a single argument even if it has embedded whitespace. Double quote characters can
/// be specified by \" (and a \" literal can be specified by \\" etc).
/// * Arguments are parsed into qualifiers. This parsing stops if a '--' argument is found. Thus all
/// qualifiers must come before any '--' argument but parameters that begin with - can be specified by
/// placing them after the '--' argument,
/// * Qualifiers are parsed. Because spaces can be used to separate a qualifier from its value, the type of
/// the qualifier must be known to parse it. Boolean values never consume an additional parameter, and
/// non-boolean qualifiers ALWAYS consume the next argument (if there is no : or =). If the empty
/// string is to be specified, it must use the ':' or '=' form. Moreover it is illegal for the values
/// that begin with '-' to use space as a separator. They must instead use the ':' or '=' form. This
/// is because it is too confusing for humans to parse (values look like qualifiers).
/// * Parameters are parsed. Whatever arguments that were not used by qualifiers are parameters.
///
/// --------------------------------------------------------------------------------------------
/// #DefiningParametersAndQualifiers
///
/// The following example shows the steps for defining the parameters and qualifiers for the example. Note
/// that the order is important. Qualifiers that apply to all commands must be specified first, then each
/// parameter set then finally the default parameter set. Most steps are optional.
#if EXAMPLE1
class CommandLineParserExample1
{
enum Command { checkout, checkin, list };
static void Main()
{
string dataBaseName = "myDefaultDataBase";
string comment = String.Empty;
Command command = checkout;
string[] fileNames = null;
// Step 1 define the parser.
CommandLineParser commandLineParser = new CommandLineParser(); // by default uses Environment.CommandLine
// Step 2 (optional) define qualifiers that apply to all parameter sets.
commandLineParser.DefineOptionalParameter("dataBaseName", ref dataBaseName, "Help for database.");
// Step 3A define the checkin command this includes all parameters and qualifiers specific to this command
commandLineParser.DefineParameterSet("checkin", ref command, Command.checkin, "Help for checkin.");
commandLineParser.DefineOptionalQualifiers("comment", ref comment, "Help for -comment.");
// Step 3B define the list command this includes all parameters and qualifiers specific to this command
commandLineParser.DefineParameterSet("list", ref command, Command.list, "Help for list.");
// Step 4 (optional) define the default parameter set (in this case checkout).
commandLineParser.DefineDefaultParameterSet("checkout", ref command, Command.checkout, "Help for checkout.");
commandLineParser.DefineParamter("fileNames", ref fileNames, "Help for fileNames.");
// Step 5, do final validation (look for undefined qualifiers, extra parameters ...
commandLineParser.CompleteValidation();
// Step 6 use the parsed values
Console.WriteLine("Got {0} {1} {2} {3} {4}", dataBaseName, command, comment, string.Join(',', fileNames));
}
}
#endif
/// #RequiredAndOptional
///
/// Parameters and qualifiers can be specified as required (the default), or optional. Makeing the default
/// required was choses to make any mistakes 'obvious' since the parser will fail if a required parameter is
/// not present (if the default was optional, it would be easy to make what should have been a required
/// qualifier optional, leading to business logic failiure).
///
/// #ParsedValues
///
/// The class was designed maximize programmer convinience. For each parameter, only one call is needed to
/// both define the parameter, its help message, and retrive its (strong typed) value. For example
///
/// * int count = 5;
/// * parser.DefineOptionalQualifier("count", ref count, "help for optional debugs qualifier");
///
/// Defines a qualifier 'count' and will place its value in the local variable 'count' as a integer. Default
/// values are supported by doing nothing, so in the example above the default value will be 5.
///
/// Types supported: The parser will support any type that has a static method called 'Parse' taking one
/// string argument and returning that type. This is true for all primtive types, DateTime, Enumerations, and
/// any user defined type that follows this convention.
///
/// Array types: The parser has special knowedge of arrays. If the type of a qualifier is an array, then the
/// string value is assumed to be a ',' separated list of strings which will be parsed as the element type of
/// the array. In addition to the ',' syntax, it is also legal to specify the qualifier more than once. For
/// example given the defintion
///
/// * int[] counts;
/// * parser.DefineOptionalQualifier("counts", ref counts, "help for optional counts qualifier");
///
/// The command line
///
/// * EXE -counts 5 SomeArg -counts 6 -counts:7
///
/// Is the same as
///
/// * EXE -counts:5,6,7 SomeArg
///
/// If a qualifier or parameter is an array type and is required, then the array must have at least one
/// element. If it is optional, then the array can be empty (but in all cases, the array is created, thus
/// null is never returned by the command line parser).
///
/// By default is it is illegal for a non-array qualifier to be specified more than once. It is however
/// possible to override this behavior by setting the LastQualifierWins property before defining the qualifier.
///
/// -------------------------------------------------------------------------
/// #Misc
///
/// Qualifier can have more than one string form (typically a long and a short form). These are specified
/// with the code:CommandLineParser.DefineAliases method.
///
/// After defining all the qualifiers and parameters, it is necessary to call the parser to check for the user
/// specifying a qualifier (or parameter) that does not exist. This is the purpose of the
/// code:CommandLineParser.CompleteValidation method.
///
/// When an error is detected at runtime an instance of code:CommandLineParserException is thrown. The error
/// message in this exception was designed to be suitable to print to the user directly.
///
/// #CommandLineHelp
///
/// The parser also can generate help that is correct for the qualifier and parameter definitions. This can be
/// accessed from the code:CommandLineParser.GetHelp method. It is also possible to get the help for just a
/// particular Parameter set with code:CommandLineParser.GetHelpForParameterSet. This help includes command
/// line syntax, whether the qualifier or parameter is optional or a list, the types of the qualifiers and
/// parameters, the help text, and default values. The help text comes from the 'Define' Methods, and is
/// properly word-wrapped. Newlines in the help text indicate new paragraphs.
///
/// #AutomaticExceptionProcessingAndHelp
///
/// In the CommandLineParserExample1, while the command line parser did alot of the work there is still work
/// needed to make the application user friendly that pretty much all applications need. These include
///
/// * Call the code:CommandLineParser constructor and code:CommandLineParser.CompleteValidation
/// * Catch any code:CommandLineParserException and print a friendly message
/// * Define a -? qualifier and wire it up to print the help.
///
/// Since this is stuff that all applications will likely need the
/// code:CommandLineParser.ParseForConsoleApplication was created to do all of this for you, thus making it
/// super-easy to make a production quality parser (and concentrate on getting your application logic instead
/// of command line parsing. Here is an example which defines a 'Ping' command. If you will notice there are
/// very few lines of code that are not expressing something very specific to this applications. This is how
/// it should be!
#if EXAMPLE2
class CommandLineParserExample2
{
static void Main()
{
// Step 1: Initialize to the defaults
string Host = null;
int Timeout = 1000;
bool Forever = false;
// Step 2: Define the parameters, in this case there is only the default parameter set.
CommandLineParser.ParseForConsoleApplication(args, delegate(CommandLineParser parser)
{
parser.DefineOptionalQualifier("Timeout", ref Timeout, "Timeout in milliseconds to wait for each reply.");
parser.DefineOptionalQualifier("Forever", ref Forever, "Ping forever.");
parser.DefineDefaultParameterSet("Ping sends a network request to a host to reply to the message (to check for liveness).");
parser.DefineParameter("Host", ref Host, "The Host to send a ping message to.");
});
// Step 3, use the parameters
Console.WriteLine("Got {0} {1} {2} {3}", Host, Timeout, Forever);
}
}
#endif
/// Using local variables for the parsed arguments if fine when the program is not complex and the values
/// don't need to be passed around to many routines. In general, however it is often a better idea to
/// create a class whose sole purpose is to act as a repository for the parsed arguments. This also nicely
/// separates all command line processing into a single class. This is how the ping example would look in
/// that style. Notice that the main program no longer holds any command line processing logic. and that
/// 'commandLine' can be passed to other routines in bulk easily.
#if EXAMPLE3
class CommandLineParserExample3
{
static void Main()
{
CommandLine commandLine = new CommandLine();
Console.WriteLine("Got {0} {1} {2} {3}", commandLine.Host, commandLine.Timeout, commandLine.Forever);
}
}
class CommandLine
{
public CommandLine()
{
CommandLineParser.ParseForConsoleApplication(args, delegate(CommandLineParser parser)
{
parser.DefineOptionalQualifier("Timeout", ref Timeout, "Timeout in milliseconds to wait for each reply.");
parser.DefineOptionalQualifier("Forever", ref Forever, "Ping forever.");
parser.DefineDefaultParameterSet("Ping sends a network request to a host to reply to the message (to check for liveness).");
parser.DefineParameter("Host", ref Host, "The Host to send a ping message to.");
});
}
public string Host = null;
public int Timeout = 1000;
public bool Forever = false;
};
#endif
/// <summary>
/// see code:#Overview for more
/// </summary>
public class CommandLineParser
{
/// <summary>
/// If you are building a console Application, there is a common structure to parsing arguments. You want
/// the text formated and output for console windows, and you want /? to be wired up to do this. All
/// errors should be caught and displayed in a nice way. This routine does this 'boiler plate'.
/// </summary>
/// <param name="parseBody">parseBody is the body of the parsing that this outer shell does not provide.
/// in this delegate, you should be defining all the command line parameters using calls to Define* methods.
/// </param>
public static bool ParseForConsoleApplication(Action<CommandLineParser> parseBody, string[] args, Setup setupContent)
{
return Parse(parseBody, parser =>
{
var parameterSetTofocusOn = parser.HelpRequested;
string helpString;
if (parameterSetTofocusOn.Length == 0)
{
parameterSetTofocusOn = null;
//helpString = parser.GetHelp(GetConsoleWidth() - 1, parameterSetTofocusOn, true);
}
helpString = parser.GetIntroTextForHelp(GetConsoleWidth() - 1).ToString();
helpString += parser.GetHelp(GetConsoleWidth() - 1, parameterSetTofocusOn, true);
DisplayStringToConsole(helpString);
}, (parser, ex) =>
{
Console.Error.WriteLine("Error: " + ex.Message);
Console.ForegroundColor = ConsoleColor.DarkYellow;
Console.WriteLine("Our dev workflow has changed! Use -? for help in the new options we have and how to pass parameters now.");
Console.ResetColor();
},
setupContent, args);
}
public static bool Parse(Action<CommandLineParser> parseBody, Action<CommandLineParser> helpHandler, Action<CommandLineParser, Exception> errorHandler, Setup setupContent, string[] args)
{
var help = false;
CommandLineParser parser = new CommandLineParser(args, setupContent);
parser.DefineOptionalQualifier("?", ref help, "Print this help guide.", null, null);
try
{
// RawPrint the help.
if (parser.HelpRequested != null)
{
parser._skipParameterSets = true;
parser._skipDefinitions = true;
}
parseBody(parser);
if (parser.HelpRequested != null)
{
helpHandler(parser);
return false;
}
parser.CompleteValidation();
return true;
}
catch (CommandLineParserException e)
{
errorHandler(parser, e);
return false;
}
}
/// <summary>
/// Qualifiers are command line parameters of the form -NAME:VALUE where NAME is an alphanumeric name and
/// VALUE is a string. The parser also accepts -NAME: VALUE and -NAME VALUE but not -NAME : VALUE For
/// boolan parameters, the VALUE can be dropped (which means true), and a empty string VALUE means false.
/// Thus -NAME means the same as -NAME:true and -NAME: means the same as -NAME:false (and boolean
/// qualifiers DONT allow -NAME true or -NAME false).
///
/// The types that are supported are any type that has a static 'Parse' function that takes a string
/// (this includes all primitive types as well as DateTime, and Enumerations, as well as arrays of
/// parsable types (values are comma separated without space).
///
/// Qualifiers that are defined BEFORE any parameter sets apply to ALL parameter sets. qualifiers that
/// are defined AFTER a parameter set will apply only the the parameter set that preceeds them.
///
/// See code:#DefiningParametersAndQualifiers
/// See code:#Overview
/// <param name="name">The name of the qualifier.</param>
/// <param name="retVal">The place to put the parsed value</param>
/// <param name="helpText">Text to print for this qualifier. It will be word-wrapped. Newlines indicate
/// new paragraphs.</param>
/// </summary>
public void DefineOptionalQualifier<T>(string name, ref T retVal, string helpText, string defaultValue, List<string> legalValues)
{
object obj = DefineQualifier(name, typeof(T), retVal, helpText, false, defaultValue, legalValues);
if (obj != null)
retVal = (T)obj;
}
/// <summary>
/// Like code:DeclareOptionalQualifier except it is an error if this parameter is not on the command line.
/// <param name="name">The name of the qualifer.</param>
/// <param name="retVal">The place to put the parsed value</param>
/// <param name="helpText">Text to print for this qualifer. It will be word-wrapped. Newlines indicate
/// new paragraphs.</param>
/// </summary>
public void DefineQualifier<T>(string name, ref T retVal, string helpText, string defaultValue, List<string> legalValues)
{
object obj = DefineQualifier(name, typeof(T), retVal, helpText, true, defaultValue, legalValues);
if (obj != null)
retVal = (T)obj;
}
/// <summary>
/// Specify additional aliases for an qualifier. This call must come BEFORE the call to
/// Define*Qualifier, since the definition needs to know about its aliases to do its job.
/// </summary>
public void DefineAliases(string officalName, params string[] alaises)
{
// TODO assert that aliases are defined before the Definition.
// TODO confirm no ambiguities (same alias used again).
if (_aliasDefinitions != null && _aliasDefinitions.ContainsKey(officalName))
throw new CommandLineParserDesignerException("Named parameter " + officalName + " already has been given aliases.");
if (_aliasDefinitions == null)
_aliasDefinitions = new Dictionary<string, string[]>();
_aliasDefinitions.Add(officalName, alaises);
}
/// <summary>
/// DefineParameter declares an unnamed mandatory parameter (basically any parameter that is not a
/// qualifier). These are given ordinal numbers (starting at 0). You should declare the parameter in the
/// desired order.
///
///
/// See code:#DefiningParametersAndQualifiers
/// See code:#Overview
/// <param name="name">The name of the parameter.</param>
/// <param name="retVal">The place to put the parsed value</param>
/// <param name="helpText">Text to print for this qualifer. It will be word-wrapped. Newlines indicate
/// new paragraphs.</param>
/// </summary>
public void DefineParameter<T>(string name, ref T retVal, string helpText)
{
object obj = DefineParameter(name, typeof(T), retVal, helpText, true);
if (obj != null)
retVal = (T)obj;
}
/// <summary>
/// Like code:DeclareParameter except the parameter is optional.
/// These must come after non-optional (required) parameters.
/// </summary>
/// <param name="name">The name of the parameter</param>
/// <param name="retVal">The location to place the parsed value.</param>
/// <param name="helpText">Text to print for this qualifer. It will be word-wrapped. Newlines indicate
/// new paragraphs.</param>
public void DefineOptionalParameter<T>(string name, ref T retVal, string helpText)
{
object obj = DefineParameter(name, typeof(T), retVal, helpText, false);
if (obj != null)
retVal = (T)obj;
}
/// <summary>
/// A parameter set defines on of a set of 'commands' that decides how to parse the rest of the command
/// line. If this 'command' is present on the command line then 'val' is assigned to 'retVal'.
/// Typically 'retVal' is a variable of a enumerated type (one for each command), and 'val' is one
/// specific value of that enumeration.
///
/// * See code:#ParameterSets
/// * See code:#DefiningParametersAndQualifiers
/// * See code:#Overview
/// <param name="name">The name of the parameter set.</param>
/// <param name="retVal">The place to put the parsed value</param>
/// <param name="val">The value to place into 'retVal' if this parameter set is indicated</param>
/// <param name="helpText">Text to print for this qualifer. It will be word-wrapped. Newlines indicate
/// new paragraphs.</param>
/// </summary>
public void DefineParameterSet<T>(string name, ref T retVal, T val, string helpText)
{
if (DefineParameterSet(name, helpText))
retVal = val;
}
/// <summary>
/// There is one special parameter set called the default parameter set (whose names is empty) which is
/// used when a command line does not have one of defined parameter sets. It is always present, even if
/// this method is not called, so calling this method is optional, however, by calling this method you can
/// add help text for this case. If present this call must be AFTER all other parameter set
/// definitions.
///
/// * See code:#DefaultParameterSet
/// * See code:#DefiningParametersAndQualifiers
/// * See code:#Overview
/// <param name="helpText">Text to print for this qualifer. It will be word-wrapped. Newlines indicate
/// new paragraphs.</param>
/// </summary>
public void DefineDefaultParameterSet(string helpText)
{
DefineParameterSet(String.Empty, helpText);
}
/// <summary>
/// This variation of DefineDefaultParameterSet has a 'retVal' and 'val' parameters. If the command
/// line does not match any of the other parameter set defintions, then 'val' is asigned to 'retVal'.
/// Typically 'retVal' is a variable of a enumerated type and 'val' is a value of that type.
///
/// * See code:DefineDefaultParameterSet for more.
/// <param name="retVal">The place to put the parsed value</param>
/// <param name="val">The value to place into 'retVal' if this parameter set is indicated</param>
/// <param name="helpText">Text to print for this qualifer. It will be word-wrapped. Newlines indicate
/// new paragraphs.</param>
/// </summary>
public void DefineDefaultParameterSet<T>(ref T retVal, T val, string helpText)
{
if (DefineParameterSet(String.Empty, helpText))
retVal = val;
}
// You can influence details of command line parsing by setting the following properties.
// These should be set before the first call to Define* routines and should not change
// thereafter.
/// <summary>
/// By default parameter set specifiers must look like a qualifier (begin with a -), however setting
/// code:NoDashOnParameterSets will define a parameter set marker not to have any special prefix (just
/// the name itself.
/// </summary>
public bool NoDashOnParameterSets
{
get { return _noDashOnParameterSets; }
set
{
if (_noDashOnParameterSets != value)
ThrowIfNotFirst("NoDashOnParameterSets");
_noDashOnParameterSets = value;
}
}
/// <summary>
/// By default, the syntax (-Qualifier:Value) and (-Qualifer Value) are both allowed. However
/// this makes it impossible to use -Qualifier to specify that a qualifier is present but uses
/// a default value (you have to use (-Qualifier: )) Specifying code:NoSpaceOnQualifierValues
/// indicates that the syntax (-Qualifer ObjectEnumerator) is not allowed, which allows this.
/// </summary>
/// TODO decide if we should keep this...
public bool NoSpaceOnQualifierValues
{
get { return _noSpaceOnQualifierValues; }
set
{
if (_noSpaceOnQualifierValues != value)
ThrowIfNotFirst("NoSpaceOnQualifierValues");
_noSpaceOnQualifierValues = value;
}
}
/// <summary>
/// If the positional parameters might look like named parameters (typically happens when the tail of the
/// command line is literal text), it is useful to stop the search for named parameters at the first
/// positional parameter.
///
/// Because some parameters sets might want this behavior and some might not, you specify the list of
/// parameter sets that you want to opt in.
///
/// The expectation is you do something like
/// * commandLine.ParameterSetsWhereQualifiersMustBeFirst = new string[] { "parameterSet1" };
///
/// The empty string is a wildcard that indicats all parameter sets have the qualifiersMustBeFirst
/// attribute. This is the only way to get this attribute on the default parameter set.
/// </summary>
public string[] ParameterSetsWhereQualifiersMustBeFirst
{
get { return _parameterSetsWhereQualifiersMustBeFirst; }
set
{
ThrowIfNotFirst("ParameterSetsWhereQualifiersMustBeFirst");
NoSpaceOnQualifierValues = true;
_parameterSetsWhereQualifiersMustBeFirst = value;
}
}
/// <summary>
/// By default qualifiers may being with a - or a / character. Setting code:QualifiersUseOnlyDash will
/// make / invalid qualifier marker (only - can be used)
/// </summary>
public bool QualifiersUseOnlyDash
{
get { return _qualifiersUseOnlyDash; }
set
{
if (_qualifiersUseOnlyDash != value)
ThrowIfNotFirst("OnlyDashForQualifiers");
_qualifiersUseOnlyDash = value;
}
}
// TODO remove? Not clear it is useful. Can be useful for CMD.EXE alias (which provide a default) but later user may override.
/// <summary>
/// By default, a non-list qualifier can not be specified more than once (since one or the other will
/// have to be ignored). Normally an error is thrown. Setting code:LastQualifierWins makes it legal, and
/// the last qualifier is the one that is used.
/// </summary>
public bool LastQualifierWins
{
get { return _lastQualifierWins; }
set { _lastQualifierWins = value; }
}
public static string ExtraParameters
{
get { return _extraparameters; }
set { _extraparameters = value; }
}
// These routines are typically are not needed because ParseArgsForConsoleApp does the work.
public CommandLineParser(string commandLine)
{
ParseWords(commandLine);
}
public CommandLineParser(string[] args, Setup setupContent)
{
_args = new List<string>(args);
_setupContent = setupContent;
}
/// <summary>
/// Check for any parameters that the user specified but that were not defined by a Define*Parameter call
/// and throw an exception if any are found.
///
/// Returns true if validation was completed. It can return false (rather than throwing), If the user
/// requested help (/?). Thus if this routine returns false, the 'GetHelp' should be called.
/// </summary>
public bool CompleteValidation()
{
// Check if there are any undefined parameters or ones specified twice.
foreach (int encodedPos in _dashedParameterEncodedPositions.Values)
{
throw new CommandLineParserException("Unexpected qualifier: " + _args[GetPosition(encodedPos)] + ".");
}
// Find any 'unused' parameters;
List<string> unusedParameters = new List<string>();
foreach(string param in _args)
{
if(!string.IsNullOrEmpty(param))
{
unusedParameters.Add(param);
}
}
if (unusedParameters.Count > 0)
{
//_extraparameters = string.Join(" ", unusedParameters);
throw new CommandLineParserException("Parameter not recognized: " + unusedParameters[0] + ".");
}
// TODO we should null out data structures we no longer need, to save space.
// Not critical because in the common case, the parser as a whole becomes dead.
if (_helpRequestedFor != null)
return false;
if (!_defaultParamSetEncountered)
{
if (_paramSetEncountered && _parameterSetName == null)
{
if (_noDashOnParameterSets && _curPosition < _args.Count)
throw new CommandLineParserException("Unrecognised command: " + _args[_curPosition]);
else
throw new CommandLineParserException("No command given.");
}
}
return true;
}
/// <summary>
/// Return the string representing the help for a single paramter set. If displayGlobalQualifiers is
/// true than qualifiers that apply to all parameter sets is also included, otherwise it is just the
/// parameters and qualifiers that are specific to that parameters set.
///
/// If the parameterSetName null, then the help for the entire program (all parameter
/// sets) is returned. If parameterSetName is not null (empty string is default parameter set),
/// then it generates help for the parameter set specified on the command line.
///
/// Since most of the time you don't need help, helpInformatin is NOT collected during the Define* calls
/// unless HelpRequested is true. If /? is seen on the command line first, then this works. You can
/// also force this by setting HelpRequested to True before calling all the Define* APIs.
///
/// </summary>
public string GetHelp(int maxLineWidth, string parameterSetName = null, bool displayGlobalQualifiers = true)
{
Debug.Assert(_mustParseHelpStrings);
if (!_mustParseHelpStrings) // Backup for retail code.
return null;
if (parameterSetName == null)
return GetFullHelp(maxLineWidth);
// Find the beginning of the parameter set parameters, as well as the end of the global parameters
// (Which come before any parameters set).
int parameterSetBody = 0; // Points at body of the parameter set (parameters after the parameter set)
CommandLineParameter parameterSetParameter = null;
for (int i = 0; i < _parameterDescriptions.Count; i++)
{
CommandLineParameter parameter = _parameterDescriptions[i];
if (parameter.IsParameterSet)
{
parameterSetParameter = parameter;
if (string.Compare(parameterSetParameter.Name, parameterSetName, StringComparison.OrdinalIgnoreCase) == 0)
{
parameterSetBody = i + 1;
break;
}
}
}
if (parameterSetBody == 0 && parameterSetName != String.Empty)
return String.Empty;
// At this point parameterSetBody and globalParametersEnd are properly set. Start generating strings
StringBuilder sb = new StringBuilder();
// Create the 'Usage' line;
string appName = GetEntryAssemblyName();
string command = parameterSetParameter.Syntax();
sb.Append(command).AppendLine().AppendLine();
sb.Append("Usage: ").Append(appName);
if (parameterSetName.Length > 0)
{
sb.Append(' ');
sb.Append(command);
sb.Append(" [Action] (global settings)");
}
bool hasQualifiers = false;
bool hasParameters = false;
for (int i = parameterSetBody; i < _parameterDescriptions.Count; i++)
{
CommandLineParameter parameter = _parameterDescriptions[i];
if (parameter.IsParameterSet)
break;
if (parameter.IsPositional)
{
hasParameters = true;
sb.Append(' ').Append(parameter.Syntax());
}
else
{
hasQualifiers = true;
break;
}
}
sb.AppendLine();
// Print the help for the command itself.
/*if (parameterSetParameter != null && !string.IsNullOrEmpty(parameterSetParameter.HelpText))
{
sb.AppendLine();
Wrap(sb.Append(" "), parameterSetParameter.HelpText, 2, " ", maxLineWidth);
}*/
if (hasParameters)
{
sb.AppendLine().Append(" Parameters:").AppendLine();
for (int i = parameterSetBody; i < _parameterDescriptions.Count; i++)
{
CommandLineParameter parameter = _parameterDescriptions[i];
if (parameter.IsParameterSet)
break;
if (parameter.IsPositional)
{
ParameterHelp(parameter, sb, QualifierSyntaxWidth, maxLineWidth);
}
}
}
if (hasQualifiers)
{
sb.AppendLine().Append(" Actions:").AppendLine();
for (int i = parameterSetBody; i < _parameterDescriptions.Count; i++)
{
CommandLineParameter parameter = _parameterDescriptions[i];
if (parameter.IsParameterSet)
break;
if (parameter.IsNamed)
{
ParameterHelp(parameter, sb, QualifierSyntaxWidth, maxLineWidth);
string commandSettingsHelp = _setupContent.GetHelpCommand(parameterSetParameter.Name, parameter.Name);
Wrap(sb, commandSettingsHelp, QualifierSyntaxWidth + 7, new string(' ', QualifierSyntaxWidth + 7), maxLineWidth, false);
}
}
}
string globalQualifiers = null;
if (displayGlobalQualifiers)
globalQualifiers = GetHelpGlobalQualifiers(maxLineWidth);
if(!string.IsNullOrEmpty(globalQualifiers))
{
sb.AppendLine().Append('-', maxLineWidth - 1).AppendLine();
sb.Append("Global Settings:").AppendLine();
sb.AppendLine();
sb.Append(globalQualifiers);
}
return sb.ToString();
}
/// <summary>
/// Returns non-null if the user specified /? on the command line. returns the word after /? (which may be the empty string)
/// </summary>
public string HelpRequested
{
get { return _helpRequestedFor; }
set { _helpRequestedFor = value; _mustParseHelpStrings = true; }
}
#region private
/// <summary>
/// CommandLineParameter contains the 'full' information for a parameter or qualifier used for generating help.
/// Most of the time we don't actualy generate instances of this class. (see mustParseHelpStrings)
/// </summary>
private class CommandLineParameter
{
public string Name { get { return _name; } }
public Type Type { get { return _type; } }
public string DefaultValue { get { return _defaultValue; } }
public List<string> LegalValues { get { return _legalValues; } }
public object Value { get { return _value; } }
public bool IsRequired { get { return _isRequired; } }
public bool IsPositional { get { return _isPositional; } }
public bool IsNamed { get { return !_isPositional; } }
public bool IsParameterSet { get { return _isParameterSet; } }
public string HelpText { get { return _helpText; } }
public override string ToString()
{
return "<CommandLineParameter " +
"Name=\"" + _name + "\" " +
"Type=\"" + _type + "\" " +
"DefaultValue=\"" + _defaultValue + "\" " +
"IsRequired=\"" + IsRequired + "\" " +
"IsPositional=\"" + IsPositional + "\" " +
"HelpText=\"" + HelpText + "\"/>";
}
public string Syntax()
{
string ret = _name;
if (IsNamed)
ret = "-" + ret;
if (Type.IsArray)
ret = ret + (IsNamed ? "," : " ") + "...";
if (!IsRequired)
ret = "[" + ret + "]";
return ret;
}
#region private
internal CommandLineParameter(string Name, object value, string helpText, Type type,
bool isRequired, bool isPositional, bool isParameterSet, List<string> legalvalues, string defaultValue = null)
{
_name = Name;
_value = value;
_defaultValue = defaultValue;
_type = type;
_helpText = helpText;
_isRequired = isRequired;
_isPositional = isPositional;
_isParameterSet = isParameterSet;
if(legalvalues != null)
{
_legalValues = new List<string>(legalvalues);
}
else
{
_legalValues = new List<string>();
}
}
private string _name;
private object _value;
private string _defaultValue;
private List<string> _legalValues;
private string _helpText;
private Type _type;
private bool _isRequired;
private bool _isPositional;
private bool _isParameterSet;
#endregion
}
private void ThrowIfNotFirst(string propertyName)
{
if (_qualiferEncountered || _positionalArgEncountered || _paramSetEncountered)
throw new CommandLineParserDesignerException("The property " + propertyName + " can only be set before any calls to Define* Methods.");
}
private static bool IsDash(char c)
{
// can be any of ASCII dash (0x002d), endash (0x2013), emdash (0x2014) or hori-zontal bar (0x2015).
return c == '-' || ('\x2013' <= c && c <= '\x2015');
}
/// <summary>
/// Returns true if 'arg' is a qualifier begins with / or -
/// </summary>
private bool IsQualifier(string arg)
{
if (arg.Length < 1)
return false;
if (IsDash(arg[0]))
return true;
if (!_qualifiersUseOnlyDash && arg[0] == '/')
return true;
return false;
}
/// <summary>
/// Qualifiers have the syntax -/NAME=Value. This returns the NAME
/// </summary>
private string ParseParameterName(string arg)
{
string ret = null;
if (arg != null && IsQualifier(arg))
{
int endName = arg.IndexOfAny(s_separators);
if (endName < 0)
endName = arg.Length;
ret = arg.Substring(1, endName - 1);
}
return ret;
}
/// <summary>
/// Parses 'commandLine' into words (space separated items). Handles quoting (using double quotes)
/// and handles escapes of double quotes and backslashes with the \" and \\ syntax.
/// In theory this mimics the behavior of the parsing done before Main to parse the command line into
/// the string[].
/// </summary>
/// <param name="commandLine"></param>
private void ParseWords(string commandLine)
{
// TODO review this carefully.
_args = new List<string>();
int wordStartIndex = -1;
bool hasExcapedQuotes = false;
bool isResponseFile = false;
int numQuotes = 0;
// Loop to <=length to reuse the same logic for AddWord on spaces and the end of the string
for (int i = 0; i <= commandLine.Length; i++)
{
char c = (i < commandLine.Length ? commandLine[i] : '\0');
if (c == '"')
{
numQuotes++;
if (wordStartIndex < 0)
wordStartIndex = i;
i++;
for (;;)
{
if (i >= commandLine.Length)
throw new CommandLineParserException("Unmatched quote at position " + i + ".");
c = commandLine[i];
if (c == '"')
{
if (i > 0 && commandLine[i - 1] == '\\')
hasExcapedQuotes = true;
else
break;
}
i++;
}
}
else if (c == ' ' || c == '\t' || c == '\0')
{
if (wordStartIndex >= 0)
{
AddWord(ref commandLine, wordStartIndex, i, numQuotes, hasExcapedQuotes, isResponseFile);
wordStartIndex = -1;
hasExcapedQuotes = false;
numQuotes = 0;
isResponseFile = false;
}
}
else if (c == '@')
{
isResponseFile = true;
}
else
{
if (wordStartIndex < 0)
wordStartIndex = i;
}
}
}
private void AddWord(ref string commandLine, int wordStartIndex, int wordEndIndex, int numQuotes, bool hasExcapedQuotes, bool isResponseFile)
{
if (!_seenExeArg)
{
_seenExeArg = true;
return;
}
string word;
if (numQuotes > 0)
{
// Common case, the whole word is quoted, and no escaping happened.
if (!hasExcapedQuotes && numQuotes == 1 && commandLine[wordStartIndex] == '"' && commandLine[wordEndIndex - 1] == '"')
word = commandLine.Substring(wordStartIndex + 1, wordEndIndex - wordStartIndex - 2);
else
{
// Remove "" (except for quoted quotes!)
StringBuilder sb = new StringBuilder();
for (int i = wordStartIndex; i < wordEndIndex;)
{
char c = commandLine[i++];
if (c != '"')
{
if (c == '\\' && i < wordEndIndex && commandLine[i] == '"')
{
sb.Append('"');
i++;
}
else
sb.Append(c);
}
}
word = sb.ToString();
}
}
else
word = commandLine.Substring(wordStartIndex, wordEndIndex - wordStartIndex);
if (isResponseFile)
{
string responseFile = word;
if (!File.Exists(responseFile))
throw new CommandLineParserException("Response file '" + responseFile + "' does not exist!");
// Replace the commandline with the contents from the text file.
StringBuilder sb = new StringBuilder();
foreach (string line in File.ReadAllLines(responseFile))
{
string cleanLine = line.Trim();
if (string.IsNullOrEmpty(cleanLine))
continue;
sb.Append(cleanLine);
sb.Append(' ');
}
if (sb.Length > 0)
commandLine += " " + sb.ToString().Trim();
}
else
{
_args.Add(word);
}
}
private int GetNextOccuranceQualifier(string name, string[] aliases)
{
Debug.Assert(_args != null);
Debug.Assert(_dashedParameterEncodedPositions != null);
int ret = -1;
string match = null;
int encodedPos;
if (_dashedParameterEncodedPositions.TryGetValue(name, out encodedPos))
{
match = name;
ret = GetPosition(encodedPos);
}
if (aliases != null)
{
foreach (string alias in aliases)
{
int aliasEncodedPos;
if (_dashedParameterEncodedPositions.TryGetValue(alias, out aliasEncodedPos))
{
int aliasPos = GetPosition(aliasEncodedPos);
// if this alias occurs before the officialName or if no officialName was found
// choose the alias as a match
if (aliasPos < ret || ret == -1)
{
name = alias;
encodedPos = aliasEncodedPos;
ret = aliasPos;
match = name;
}
}
}
}
if (match != null)
{
if (!IsMulitple(encodedPos))
_dashedParameterEncodedPositions.Remove(match);
else
{
int nextPos = -1;
for (int i = ret + 1; i < _args.Count; i++)
{
if (string.Compare(ParseParameterName(_args[i]), name, StringComparison.OrdinalIgnoreCase) == 0)
{
nextPos = i;
break;
}
}
if (nextPos >= 0)
_dashedParameterEncodedPositions[name] = SetMulitple(nextPos);
else
_dashedParameterEncodedPositions.Remove(name);
}
}
return ret;
}
// Phase 2 parsing works into things that look like qualifiers (but without knowledge of which qualifiers the command supports)
/// <summary>
/// Find the locations of all arguments that look like named parameters.
/// </summary>
private void ParseWordsIntoQualifiers()
{
_dashedParameterEncodedPositions = new Dictionary<string, int>(StringComparer.OrdinalIgnoreCase);
bool paramSetEncountered = false;
string parameterSet = string.Empty;
for (int i = 0; i < _args.Count; i++)
{
string arg = _args[i];
if (arg == null)
continue;
string name = ParseParameterName(arg);
if (name != null)
{
if (IsDash(name[0]))
{
_args[i] = null;
i++;
string[] extraP = new string[_args.Count - i];
for (int j = 0; i < _args.Count; i++, j++)
{
extraP[j] = _args[i];
_args[i] = null;
}
_extraparameters = string.Join(" ", extraP);
break;
}
if (name == "?") // Did the user request help?
{
_args[i] = null;
if(paramSetEncountered)
{
_helpRequestedFor = parameterSet;
}
else
{
_helpRequestedFor = String.Empty;
}
/*if (i + 1 < _args.Count)
{
i++;
_helpRequestedFor = _args[i];
_args[i] = null;
}*/
_mustParseHelpStrings = true;
break;
}
int position = i;
if (_dashedParameterEncodedPositions.TryGetValue(name, out position))
position = SetMulitple(position);
else
position = i;
_dashedParameterEncodedPositions[name] = position;
if (!paramSetEncountered && !_noDashOnParameterSets && IsParameterSetWithqualifiersMustBeFirst(name))
break;
}
else
{
if (!paramSetEncountered)
{
if (_noDashOnParameterSets && IsParameterSetWithqualifiersMustBeFirst(arg))
break;
else if (IsParameterSetWithqualifiersMustBeFirst(String.Empty)) // Then we are the default parameter set
break;
paramSetEncountered = true; // If we have hit a parameter, we must have hit a parameter set.
parameterSet = arg;
}
}
}
}
private bool IsParameterSetWithqualifiersMustBeFirst(string name)
{
if (_parameterSetsWhereQualifiersMustBeFirst != null)
{
foreach (string parameterSetName in _parameterSetsWhereQualifiersMustBeFirst)
{
if (string.Compare(name, parameterSetName, StringComparison.OrdinalIgnoreCase) == 0)
return true;
}
}
return false;
}
// Phase 3, processing user defintions of qualifiers parameter sets etc.
/// <summary>
/// returns the index in the 'args' array of the next instance of the 'name' qualifier. returns -1 if there is
/// no next instance of the qualifer.
/// </summary>
private object DefineQualifier(string name, Type type, object qualifierValue, string helpText, bool isRequired, string defaultValue, List<string> legalValues)
{
Debug.Assert(_args != null);
if (_dashedParameterEncodedPositions == null)
ParseWordsIntoQualifiers();
_qualiferEncountered = true;
if (_mustParseHelpStrings)
AddHelp(new CommandLineParameter(name, qualifierValue, helpText, type, isRequired, false, false, legalValues, defaultValue));
if (_skipDefinitions)
return null;
if (_positionalArgEncountered && !_noSpaceOnQualifierValues)
throw new CommandLineParserDesignerException("Definitions of Named parameters must come before definitions of positional parameters");
object ret = null;
string[] alaises = null;
if (_aliasDefinitions != null)
_aliasDefinitions.TryGetValue(name, out alaises);
int occuranceCount = 0;
List<Array> arrayValues = null;
for (;;)
{
int position = GetNextOccuranceQualifier(name, alaises);
if (position < 0)
break;
string parameterStr = _args[position];
_args[position] = null;
string value = null;
int colonIdx = parameterStr.IndexOfAny(s_separators);
if (colonIdx >= 0)
value = parameterStr.Substring(colonIdx + 1);
if (type == typeof(bool) || type == typeof(bool?))
{
if (value == null)
value = "true";
else if (value == String.Empty)
value = "false";
}
else if (value == null)
{
/*int valuePos = position + 1;
if (_noSpaceOnQualifierValues || valuePos >= _args.Count || _args[valuePos] == null)
{
string message = "Parameter " + name + " is missing a value.";
if (_noSpaceOnQualifierValues)
message += " The syntax -" + name + ":<value> must be used.";
throw new CommandLineParserException(message);
}
value = _args[valuePos];
// Note that I don't absolutely need to exclude values that look like qualifiers, but it does
// complicate the code, and also leads to confusing error messages when we parse the command
// in a very different way then the user is expecting. Since you can provide values that
// begin with a '-' by doing -qualifier:-value instead of -qualifier -value I force the issue
// by excluding it here. TODO: this makes negative numbers harder...
if (value.Length > 0 && IsQualifier(value))
throw new CommandLineParserException("Use " + name + ":" + value + " if " + value +
" is meant to be value rather than a named parameter");
_args[valuePos] = null;*/
value = string.IsNullOrEmpty(defaultValue) ? "true" : defaultValue;
}
ret = ParseValue(value, type, name);
if (type.IsArray)
{
if (arrayValues == null)
arrayValues = new List<Array>();
arrayValues.Add((Array)ret);
ret = null;
}
else if (occuranceCount > 0 && !_lastQualifierWins)
throw new CommandLineParserException("Parameter " + name + " specified more than once.");
occuranceCount++;
}
if (occuranceCount == 0 && isRequired)
throw new CommandLineParserException("Required named parameter " + name + " not present.");
if (arrayValues != null)
ret = ConcatinateArrays(type, arrayValues);
return ret;
}
private object DefineParameter(string name, Type type, object parameterValue, string helpText, bool isRequired)
{
Debug.Assert(_args != null);
if (_dashedParameterEncodedPositions == null)
ParseWordsIntoQualifiers();
if (!isRequired)
_optionalPositionalArgEncountered = true;
else if (_optionalPositionalArgEncountered)
throw new CommandLineParserDesignerException("Optional positional parameters can't preceed required positional parameters");
_positionalArgEncountered = true;
if (_mustParseHelpStrings)
AddHelp(new CommandLineParameter(name, parameterValue, helpText, type, isRequired, true, false, null));
if (_skipDefinitions)
return null;
// Skip any nulled out args (things that used to be named parameters)
while (_curPosition < _args.Count && _args[_curPosition] == null)
_curPosition++;
object ret = null;
if (type.IsArray)
{
// Pass 1, Get the count
int count = 0;
int argPosition = _curPosition;
while (argPosition < _args.Count)
if (_args[argPosition++] != null)
count++;
if (count == 0 && isRequired)
throw new CommandLineParserException("Required positional parameter " + name + " not present.");
Type elementType = type.GetElementType();
Array array = Array.CreateInstance(elementType, count);
argPosition = _curPosition;
int index = 0;
while (argPosition < _args.Count)
{
string arg = _args[argPosition++];
if (arg != null)
array.SetValue(ParseValue(arg, elementType, name), index++);
}
_curPosition = _args.Count;
ret = array;
}
else if (_curPosition < _args.Count) // A 'normal' positional parameter with a value
{
ret = ParseValue(_args[_curPosition++], type, name);
}
else // No value
{
if (isRequired)
throw new CommandLineParserException("Required positional parameter " + name + " not present.");
}
return ret;
}
private bool DefineParameterSet(string name, string helpText)
{
Debug.Assert(_args != null);
if (_dashedParameterEncodedPositions == null)
ParseWordsIntoQualifiers();
if (!_paramSetEncountered && _positionalArgEncountered)
throw new CommandLineParserDesignerException("Positional parameters must not preceed parameter set definitions.");
_paramSetEncountered = true;
_positionalArgEncountered = false; // each parameter set gets a new arg set
_optionalPositionalArgEncountered = false;
if (_defaultParamSetEncountered)
throw new CommandLineParserDesignerException("The default parameter set must be defined last.");
bool isDefaultParameterSet = (name.Length == 0);
if (isDefaultParameterSet)
_defaultParamSetEncountered = true;
if (_mustParseHelpStrings)
AddHelp(new CommandLineParameter(name, null, helpText, typeof(bool), true, _noDashOnParameterSets, true, null));
if (_skipParameterSets)
return false;
// Have we just finish with the parameter set that was actually on the command line?
if (_parameterSetName != null)
{
_skipDefinitions = true;
_skipParameterSets = true; // if yes, we are done, ignore all parameter set definitions.
return false;
}
bool ret = isDefaultParameterSet;
if (!isDefaultParameterSet)
{
for (int i = 0; i < _args.Count; i++)
{
string arg = _args[i];
if (arg == null)
continue;
if (IsQualifier(arg))
{
if (!_noDashOnParameterSets &&
string.Compare(arg, 1, name, 0, int.MaxValue, StringComparison.OrdinalIgnoreCase) == 0)
{
_dashedParameterEncodedPositions.Remove(name);
_args[i] = null;
ret = true;
_parameterSetName = name;
break;
}
}
else
{
if (_noDashOnParameterSets && (string.Compare(arg, name, StringComparison.OrdinalIgnoreCase) == 0))
{
_args[i] = null;
ret = true;
_parameterSetName = name;
}
break;
}
}
}
_skipDefinitions = !((_parameterSetName != null) || isDefaultParameterSet);
// To avoid errors when users ask for help, skip any parsing once we have found a parameter set.
if (_helpRequestedFor != null && ret)
{
_skipDefinitions = true;
_skipParameterSets = true;
}
return ret;
}
private Array ConcatinateArrays(Type arrayType, List<Array> arrays)
{
int totalCount = 0;
for (int i = 0; i < arrays.Count; i++)
totalCount += arrays[i].Length;
Type elementType = arrayType.GetElementType();
Array ret = Array.CreateInstance(elementType, totalCount);
int pos = 0;
for (int i = 0; i < arrays.Count; i++)
{
Array source = arrays[i];
for (int j = 0; j < source.Length; j++)
ret.SetValue(source.GetValue(j), pos++);
}
return ret;
}
// Phase 3A, parsing qualifer strings into a .NET type (int, enums, ...
private object ParseValue(string valueString, Type type, string parameterName)
{
try
{
if (type == typeof(string))
return valueString;
else if (type == typeof(bool))
return bool.Parse(valueString);
else if (type == typeof(int))
{
if (valueString.Length > 2 && valueString[0] == '0' && (valueString[1] == 'x' || valueString[1] == 'X'))
return int.Parse(valueString.Substring(2), System.Globalization.NumberStyles.AllowHexSpecifier);
else
return int.Parse(valueString);
}
else if (type.GetTypeInfo().IsEnum)
return ParseCompositeEnumValue(valueString, type, parameterName);
else if (type == typeof(string[]))
return valueString.Split(',');
else if (type.IsArray)
{
// TODO I need some way of handling string with , in them.
Type elementType = type.GetElementType();
string[] elementStrings = valueString.Split(',');
Array array = Array.CreateInstance(elementType, elementStrings.Length);
for (int i = 0; i < elementStrings.Length; i++)
array.SetValue(ParseValue(elementStrings[i], elementType, parameterName), i);
return array;
}
else if (type.GetTypeInfo().IsGenericType && type.GetGenericTypeDefinition() == typeof(Nullable<>))
{
if (valueString.Length == 0)
return null;
return ParseValue(valueString, type.GetGenericArguments()[0], parameterName);
}
else
{
System.Reflection.MethodInfo parseMethod = type.GetMethod("Parse", new Type[] { typeof(string) });
if (parseMethod == null)
throw new CommandLineParserException("Could not find a parser for type " + type.Name + " for parameter " + parameterName);
return parseMethod.Invoke(null, new object[] { valueString });
}
}
catch (CommandLineParserException)
{
throw;
}
catch (Exception e)
{
if (e is System.Reflection.TargetInvocationException)
e = e.InnerException;
string paramStr = String.Empty;
if (parameterName != null)
paramStr = " for parameter " + parameterName;
if (e is FormatException)
throw new CommandLineParserException("The value '" + valueString + "' can not be parsed to a " + type.Name + paramStr + ".");
else
throw new CommandLineParserException("Failure while converting '" + valueString + "' to a " + type.Name + paramStr + ".");
}
}
/// <summary>
/// Enums that are bitfields can have multiple values. Support + and - (for oring and diffing bits). Returns
/// the final enum value. for the 'valueString' which is a string form of 'type' for the parameter 'parameter'.
/// </summary>
private object ParseCompositeEnumValue(string valueString, Type type, string parameterName)
{
bool knownToBeFlagsEnum = false;
long retValue = 0;
int curIdx = 0;
bool negate = false;
while (curIdx < valueString.Length)
{
int nextIdx = valueString.IndexOfAny(new char[] { ',', '+', '-' }, curIdx);
if (nextIdx < 0)
nextIdx = valueString.Length;
object nextValue = ParseSimpleEnumValue(valueString.Substring(curIdx, nextIdx - curIdx), type, parameterName);
if (curIdx == 0 && nextIdx == valueString.Length)
return nextValue;
if (!knownToBeFlagsEnum)
{
if (!type.GetTypeInfo().IsDefined(typeof(FlagsAttribute)))
{
string paramStr = String.Empty;
if (parameterName != null)
paramStr = " for parameter " + parameterName;
throw new CommandLineParserException("The value '" + valueString + paramStr + " can't have the + or - operators.");
}
knownToBeFlagsEnum = true;
}
long newValue;
if (Enum.GetUnderlyingType(type) == typeof(long))
newValue = (long)nextValue;
else
newValue = (int)nextValue;
if (negate)
retValue &= ~newValue;
else
retValue |= newValue;
negate = (nextIdx < valueString.Length && valueString[nextIdx] == '-');
curIdx = nextIdx + 1;
}
return Enum.ToObject(type, retValue);
}
private object ParseSimpleEnumValue(string valueString, Type type, string parameterName)
{
try
{
if (valueString.StartsWith("0x"))
{
if (Enum.GetUnderlyingType(type) == typeof(long))
return long.Parse(valueString.Substring(2), System.Globalization.NumberStyles.HexNumber);
return int.Parse(valueString.Substring(2), System.Globalization.NumberStyles.HexNumber);
}
return Enum.Parse(type, valueString, ignoreCase: true);
}
catch (ArgumentException)
{
string paramStr = String.Empty;
if (parameterName != null)
paramStr = " for parameter " + parameterName;
StringBuilder sb = new StringBuilder();
sb.Append("The value '").Append(valueString).Append("'").Append(paramStr).Append(" is not a member of the enumeration ").Append(type.Name).Append(".").AppendLine();
sb.Append("The legal values are either a decimal integer, 0x and a hex integer or").AppendLine();
foreach (string name in Enum.GetNames(type))
sb.Append(" ").Append(name).AppendLine();
if (type.GetTypeInfo().IsDefined(typeof(FlagsAttribute)))
sb.Append("The + and - operators can be used to combine the values.").AppendLine();
throw new CommandLineParserException(sb.ToString());
}
}
private StringBuilder GetIntroTextForHelp(int maxLineWidth)
{
string appName = GetEntryAssemblyName();
StringBuilder sb = new StringBuilder();
sb.AppendLine();
string text = "The Run Command Tool is now in charge of running the dev workflow steps. Each step has its own command and set of actions that are listed below. " +
"You could also pass Global Settings to the commands.";
Wrap(sb, text, 0, String.Empty, maxLineWidth, true);
text = "To pass additional parameters that are not described in the Global Settings section, use `--`. After this command, the Run Command Tool will stop processing arguments and will pass all the information as it is to the selected command.";
Wrap(sb, text, 0, String.Empty, maxLineWidth, true);
text = "The information comes from a config.json file. By default the file is in the root of the repo. Otherwise the first parameter should be the path to the config.json file.";
Wrap(sb, text, 0, String.Empty, maxLineWidth, true);
text = "For more information about the Run Command Tool: https://github.com/dotnet/buildtools/blob/master/Documentation/RunCommand.md";
Wrap(sb, text, 0, String.Empty, maxLineWidth, true);
sb.AppendLine().AppendLine().Append("Syntax: run [Command] [Action] (global settings)");
sb.AppendLine().Append('-', maxLineWidth - 1).AppendLine();
return sb;
}
/// <summary>
/// Return a string giving the help for the command, word wrapped at 'maxLineWidth'
/// </summary>
private string GetFullHelp(int maxLineWidth)
{
// Do we have non-default parameter sets?
bool hasParamSets = false;
foreach (CommandLineParameter parameter in _parameterDescriptions)
{
if (parameter.IsParameterSet && parameter.Name != String.Empty)
{
hasParamSets = true;
break;
}
}
if (!hasParamSets)
return GetHelp(maxLineWidth, String.Empty, true);
StringBuilder sb = new StringBuilder();
// Always print the default parameter set first.
if (_defaultParamSetEncountered)
{
sb.Append(GetHelp(maxLineWidth, String.Empty, false));
}
foreach (CommandLineParameter parameter in _parameterDescriptions)
{
if (parameter.IsParameterSet && parameter.Name.Length != 0)
{
sb.Append(GetHelp(maxLineWidth, parameter.Name, false));
}
}
string globalQualifiers = GetHelpGlobalQualifiers(maxLineWidth);
if (globalQualifiers.Length > 0)
{
sb.AppendLine().Append('-', maxLineWidth - 1).AppendLine();
sb.Append("Global settings to all commands:").AppendLine();
sb.AppendLine();
sb.Append(globalQualifiers);
}
return sb.ToString();
}
/// <summary>
/// prints a string to the console in a nice way. In particular it
/// displays a sceeen full of data and then as user to type a space to continue.
/// </summary>
/// <param name="helpString"></param>
private static void DisplayStringToConsole(string helpString)
{
// TODO we do paging, but this is not what we want when it is redirected.
bool first = true;
for (int pos = 0; ;)
{
int nextPos = pos;
int numLines = (first ? GetConsoleHeight() - 2 : GetConsoleHeight() * 3 / 4) - 1;
first = false;
for (int j = 0; j < numLines; j++)
{
int search = helpString.IndexOf("\r\n", nextPos) + 2;
if (search >= 2)
nextPos = search;
else
nextPos = helpString.Length;
}
Console.Write(helpString.Substring(pos, nextPos - pos));
if (nextPos == helpString.Length)
break;
Console.Write("[Press space to continue...]");
Console.Read();
Console.Write("\r \r");
pos = nextPos;
}
}
private void AddHelp(CommandLineParameter parameter)
{
if (_parameterDescriptions == null)
_parameterDescriptions = new List<CommandLineParameter>();
_parameterDescriptions.Add(parameter);
}
private static void ParameterHelp(CommandLineParameter parameter, StringBuilder sb, int firstColumnWidth, int maxLineWidth)
{
// TODO alias information.
sb.Append(" ").Append(parameter.Syntax().PadRight(firstColumnWidth)).Append(' ');
string helpText = parameter.HelpText;
string defValue = string.Empty;
bool shouldPrint = true;
// Bool type implied on named parameters
if (parameter.IsNamed && parameter.Type == typeof(bool))
{
shouldPrint = false;
if (parameter.Value != null && (bool)parameter.Value)
shouldPrint = false;
}
if (shouldPrint)
{
if (!string.IsNullOrEmpty(parameter.DefaultValue))
{
helpText += "\n => Default value: " + parameter.DefaultValue.ToString();
}
}
if (parameter.LegalValues.Count > 0)
helpText = helpText + "\n => Legal values: [" + string.Join(", ", parameter.LegalValues) + "].";
Wrap(sb, helpText, firstColumnWidth + 5, new string(' ', firstColumnWidth + 5), maxLineWidth, true);
}
private static void Wrap(StringBuilder sb, string text, int startColumn, string linePrefix, int maxLineWidth, bool first)
{
if (text != null)
{
//bool first = true;
int column = startColumn;
string previousWord = String.Empty;
string[] paragraphs = text.Split(new char[] { '\n' }, StringSplitOptions.RemoveEmptyEntries);
foreach (string paragraph in paragraphs)
{
if (!first)
{
column = 0;
linePrefix = new string(' ', startColumn);
}
string[] words = paragraph.Split((char[])null, StringSplitOptions.RemoveEmptyEntries); // Split on whitespace
foreach (string word in words)
{
if (column + word.Length >= maxLineWidth)
{
sb.AppendLine();
column = 0;
}
if (column == 0)
{
sb.Append(linePrefix);
column = linePrefix.Length;
}
else if (!first)
{
// add an extra space at the end of sentences.
if (previousWord.Length > 0 && previousWord[previousWord.Length - 1] == '.')
{
sb.Append(' ');
column++;
}
sb.Append(' ');
column++;
}
sb.Append(word);
previousWord = word;
column += word.Length;
first = false;
}
sb.AppendLine();
}
}
sb.AppendLine();
}
private string GetHelpGlobalQualifiers(int maxLineWidth)
{
if (!_paramSetEncountered)
return String.Empty;
StringBuilder sb = new StringBuilder();
for (int i = 0; i < _parameterDescriptions.Count; i++)
{
CommandLineParameter parameter = _parameterDescriptions[i];
if (parameter.IsParameterSet)
break;
if (parameter.IsNamed)
ParameterHelp(parameter, sb, QualifierSyntaxWidth, maxLineWidth);
}
return sb.ToString();
}
private static int GetConsoleWidth()
{
return 120; // Can't retrieve console width in .NET Core
}
private static int GetConsoleHeight()
{
return 100; // Can't retrieve console height in .NET Core
}
private static string GetEntryAssemblyName()
{
Assembly entryAssembly = Assembly.GetEntryAssembly();
if (entryAssembly != null)
{
return Path.GetFileNameWithoutExtension(entryAssembly.ManifestModule.Name);
}
else
{
return string.Empty;
}
}
// TODO expose the ability to change this?
private static char[] s_separators = new char[] { ':', '=' };
// tweeks the user can specify
private bool _noDashOnParameterSets = true;
private bool _noSpaceOnQualifierValues;
private string[] _parameterSetsWhereQualifiersMustBeFirst;
private bool _qualifiersUseOnlyDash = true;
private bool _lastQualifierWins;
private static string _extraparameters;
// In order to produce help, we need to remember everything useful about all the parameters. This list
// does this. It is only done when help is needed, so it is not here in the common scenario.
private List<CommandLineParameter> _parameterDescriptions;
private int _qualifierSyntaxWidth; // When printing help, how much indent any line wraps.
public int QualifierSyntaxWidth
{
get
{
// Find the optimal size for the 'first column' of the help text.
if (_qualifierSyntaxWidth == 0)
{
_qualifierSyntaxWidth = 35;
if (_parameterDescriptions != null)
{
int maxSyntaxWidth = 0;
foreach (CommandLineParameter parameter in _parameterDescriptions)
maxSyntaxWidth = Math.Max(maxSyntaxWidth, parameter.Syntax().Length);
_qualifierSyntaxWidth = Math.Max(8, maxSyntaxWidth + 1); // +1 leaves an extra space
}
}
return _qualifierSyntaxWidth;
}
}
/// <summary>
/// Qualifiers can have aliases (e.g. for short names). This holds these aliases.
/// </summary>
private Dictionary<string, string[]> _aliasDefinitions; // Often null if no aliases are defined.
// Because we do all the parsing for a single parameter at once, it is useful to know quickly if the
// parameter even exists, exists once, or exist more than once. This dictionary holds this. It is
// initialized in code:PreParseQualifiers. The value in this dictionary is an ecncoded position
// which encodes both the position and whether this qualifer occurs multiple times (see GetMultiple,
// GetPosition, IsMultiple methods).
private Dictionary<string, int> _dashedParameterEncodedPositions;
// We steal the top bit to prepresent whether the parameter occurs more than once.
private const int MultiplePositions = unchecked((int)0x80000000);
private static int SetMulitple(int encodedPos) { return encodedPos | MultiplePositions; }
private static int GetPosition(int encodedPos) { return encodedPos & ~MultiplePositions; }
private static bool IsMulitple(int encodedPos) { return (encodedPos & MultiplePositions) != 0; }
// As we parse qualifiers we remove them from the command line by nulling them out, and thus
// ultimately ends up only having positional parameters being non-null.
private List<string> _args;
private Setup _setupContent;
private int _curPosition; // All arguments before this position have been processed.
private bool _skipParameterSets; // Have we found the parameter set qualifer, so we don't look at any others.
private bool _skipDefinitions; // Should we skip all subsequent definitions (typically until the next parameter set def)
private string _parameterSetName; // if we matched a parameter set, this is it.
private bool _mustParseHelpStrings; // we have to go to the overhead of parsing the help strings (because user wants help)
private string _helpRequestedFor; // The user specified /? on the command line. This is the word after the /? may be empty
private bool _seenExeArg; // Used in AddWord, indicates we have seen the exe itself (before the args) on the command line
private bool _paramSetEncountered;
private bool _defaultParamSetEncountered;
private bool _positionalArgEncountered;
private bool _optionalPositionalArgEncountered;
private bool _qualiferEncountered;
#endregion
}
/// <summary>
/// Run time parsing error throw this exception. These are expected to be caught and the error message
/// printed out to the user. Thus the messages should be 'user friendly'.
/// </summary>
public class CommandLineParserException : Exception
{
public CommandLineParserException(string message) : base(message) { }
}
/// <summary>
/// This exception represents a compile time error in the command line parsing. These should not happen in
/// correctly written programs.
/// </summary>
public class CommandLineParserDesignerException : Exception
{
public CommandLineParserDesignerException(string message) : base(message) { }
}
}
| 1 | 10,739 | Are these mutually exclusive options (parameters vs qualifiers)? If parameters aren't supported, may want to consider throwing an exception for "hasParameters". If parameters and qualifiers are both legit options, then maybe change this to `if (hasQualifiers) { ... } if(!hasQualifiers && !hasParameters)` | dotnet-buildtools | .cs |
@@ -43,7 +43,6 @@ public class InvocationStartProcessingEventListener implements EventListener {
if (InvocationType.PRODUCER.equals(event.getInvocationType())) {
ProducerInvocationMonitor monitor = registryMonitor.getProducerInvocationMonitor(event.getOperationName());
monitor.getWaitInQueue().increment(-1);
- monitor.getLifeTimeInQueue().update(event.getInQueueNanoTime());
}
}
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.metrics.core.event;
import org.apache.servicecomb.core.metrics.InvocationStartProcessingEvent;
import org.apache.servicecomb.foundation.common.event.Event;
import org.apache.servicecomb.foundation.common.event.EventListener;
import org.apache.servicecomb.metrics.core.monitor.ProducerInvocationMonitor;
import org.apache.servicecomb.metrics.core.monitor.RegistryMonitor;
import org.apache.servicecomb.swagger.invocation.InvocationType;
public class InvocationStartProcessingEventListener implements EventListener {
private final RegistryMonitor registryMonitor;
public InvocationStartProcessingEventListener(RegistryMonitor registryMonitor) {
this.registryMonitor = registryMonitor;
}
@Override
public Class<? extends Event> getConcernedEvent() {
return InvocationStartProcessingEvent.class;
}
@Override
public void process(Event data) {
InvocationStartProcessingEvent event = (InvocationStartProcessingEvent) data;
if (InvocationType.PRODUCER.equals(event.getInvocationType())) {
ProducerInvocationMonitor monitor = registryMonitor.getProducerInvocationMonitor(event.getOperationName());
monitor.getWaitInQueue().increment(-1);
monitor.getLifeTimeInQueue().update(event.getInQueueNanoTime());
}
}
}
| 1 | 8,790 | Why did you remove this line? | apache-servicecomb-java-chassis | java |
@@ -80,6 +80,7 @@ MSG(object_does_not_provide_read_access_to_columns,
MSG(object_does_not_provide_write_access_to_columns,
"Given object does not provide write access to columns")
MSG(unsupported_conversion_types, "Unsupported conversion types")
+MSG(invalid_indices, "Invalid indices")
/* Ranges */
MSG(invalid_range_of_rows, "Invalid range of rows") | 1 | /*******************************************************************************
* Copyright 2020-2021 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "oneapi/dal/detail/error_messages.hpp"
#define MSG(id, text) \
const char* error_messages::id() noexcept { \
return text; \
}
namespace oneapi::dal::detail {
namespace v1 {
/* Common */
MSG(array_does_not_contain_mutable_data, "Array does not contain mutable data")
MSG(feature_index_is_out_of_range, "Feature index is out of range")
MSG(incompatible_array_reinterpret_cast_types,
"Cannot reinterpret array to provided type, "
"because resulting array size would not match source array size")
MSG(only_homogen_table_is_supported, "Only homogen table is supported")
MSG(overflow_found_in_multiplication_of_two_values,
"Overflow found in multiplication of two values")
MSG(overflow_found_in_sum_of_two_values, "Overflow found in sum of two values")
MSG(unknown_status_code, "Unknown status code")
MSG(unsupported_data_layout, "Unsupported data layout")
MSG(unsupported_data_type, "Requested data type is not supported")
MSG(unsupported_device_type, "Requested device type is not supported")
MSG(small_data_block, "Data block size is smaller than expected")
MSG(invalid_data_block_size, "Invalid data block size")
MSG(method_not_implemented, "Method is not implemented")
MSG(unsupported_feature_type, "Feature type is not supported")
MSG(unknown_memcpy_error, "Unknown error during memory copying")
MSG(unknown_usm_pointer_type, "USM pointer type is unknown in the current context")
MSG(queues_in_different_contexts, "Provided queues are in different contexts")
MSG(unsupported_usm_alloc, "Requested USM alloc type is not supported")
/* Primitives */
MSG(invalid_number_of_elements_to_process, "Invalid number of elements to process")
MSG(invalid_number_of_elements_to_sort, "Invalid number of elements to sort")
MSG(failed_to_compute_eigenvectors, "Failed to compute eigenvectors")
/* Tables */
MSG(allocated_memory_size_is_not_enough_to_copy_data,
"Allocated memory size is not enough to copy the data")
MSG(cannot_get_data_type_from_empty_metadata, "Cannot get data type from empty metadata")
MSG(cannot_get_feature_type_from_empty_metadata, "Cannot get feature type from empty metadata")
MSG(element_count_in_data_type_and_feature_type_arrays_does_not_match,
"Element count in data type and feature type array does not match")
MSG(pulling_column_is_not_supported_for_dpc, "Pulling column is not supported for DPC++")
MSG(pulling_column_is_not_supported, "Pulling column is not supported")
MSG(pulling_rows_is_not_supported_for_dpc, "Pulling rows is not supported for DPC++")
MSG(pulling_rows_is_not_supported, "Pulling rows is not supported")
MSG(pushing_column_is_not_supported_for_dpc, "Pushing column is not supported for DPC++")
MSG(pushing_column_is_not_supported, "Pushing column is not supported")
MSG(pushing_rows_is_not_supported_for_dpc, "Pushing rows is not supported for DPC++")
MSG(pushing_rows_is_not_supported, "Pushing rows is not supported")
MSG(rc_and_cc_do_not_match_element_count_in_array,
"Row count and column count do not match element count in array")
MSG(rc_leq_zero, "Row count is lower than or equal to zero")
MSG(cc_leq_zero, "Column count is lower than or equal to zero")
MSG(object_does_not_provide_read_access_to_rows,
"Given object does not provide read access to rows")
MSG(object_does_not_provide_write_access_to_rows,
"Given object does not provide write access to rows")
MSG(object_does_not_provide_read_access_to_columns,
"Given object does not provide read access to columns")
MSG(object_does_not_provide_write_access_to_columns,
"Given object does not provide write access to columns")
MSG(unsupported_conversion_types, "Unsupported conversion types")
/* Ranges */
MSG(invalid_range_of_rows, "Invalid range of rows")
MSG(invalid_range_of_columns, "Invalid range of columns")
MSG(column_index_out_of_range, "Column index out of range")
/* Graphs */
MSG(vertex_index_out_of_range_expect_from_zero_to_vertex_count,
"Vertex index is out of range, expect index in [0, vertex_count)")
MSG(negative_vertex_id, "Negative vertex ID")
MSG(unimplemented_sorting_procedure, "Unimplemented sorting procedure")
/* General algorithms */
MSG(accuracy_threshold_lt_zero, "Accuracy_threshold is lower than zero")
MSG(class_count_leq_one, "Class count is lower than or equal to one")
MSG(input_data_is_empty, "Input data is empty")
MSG(input_data_rc_neq_input_labels_rc,
"Input data row count is not equal to input labels row count")
MSG(input_data_rc_neq_input_weights_rc,
"Input data row count is not equal to input weights row count")
MSG(input_labels_are_empty, "Labels are empty")
MSG(input_labels_contain_only_one_unique_value_expect_two,
"Input labels contain only one unique value, two unique values are expected")
MSG(input_labels_contain_wrong_unique_values_count_expect_two,
"Input labels contain wrong number of unique values, two unique values are expected")
MSG(input_labels_table_has_wrong_cc_expect_one,
"Input labels table has wrong column count, one column is expected")
MSG(iteration_count_lt_zero, "Iteration count is lower than zero")
MSG(max_iteration_count_leq_zero, "Max iteration count lower than or equal to zero")
MSG(max_iteration_count_lt_zero, "Max iteration count lower than zero")
/* IO */
MSG(file_not_found, "File not found")
/* K-Means */
MSG(cluster_count_leq_zero, "Cluster count is lower than or equal to zero")
MSG(input_initial_centroids_are_empty, "Input initial centroids are empty")
MSG(input_initial_centroids_cc_neq_input_data_cc,
"Input initial centroids column count is not equal to input data column count")
MSG(input_initial_centroids_rc_neq_desc_cluster_count,
"Input initial centroids row count is not equal to descriptor cluster count")
MSG(input_model_centroids_are_empty, "Input model centroids are empty")
MSG(input_model_centroids_cc_neq_input_data_cc,
"Input model centroids column count is not equal to input data column count")
MSG(input_model_centroids_rc_neq_desc_cluster_count,
"Input model centroids row count is not equal to descriptor cluster count")
MSG(kmeans_init_parallel_plus_dense_method_is_not_implemented_for_gpu,
"K-Means init++ parallel dense method is not implemented for GPU")
MSG(kmeans_init_plus_plus_dense_method_is_not_implemented_for_gpu,
"K-Means init++ dense method is not implemented for GPU")
MSG(objective_function_value_lt_zero, "Objective function value is lower than zero")
/* k-NN */
MSG(knn_brute_force_method_is_not_implemented_for_cpu,
"k-NN brute force method is not implemented for CPU")
MSG(knn_kd_tree_method_is_not_implemented_for_gpu,
"k-NN k-d tree method is not implemented for GPU")
MSG(neighbor_count_lt_one, "Neighbor count lower than one")
/* Jaccard */
MSG(column_begin_gt_column_end, "Column begin is greater than column end")
MSG(empty_edge_list, "Empty edge list")
MSG(interval_gt_vertex_count, "Interval is greater than vertex count")
MSG(negative_interval, "Negative interval")
MSG(row_begin_gt_row_end, "Row begin is greater than row end")
MSG(range_idx_gt_max_int32, "Range indexes are greater than max of int32")
/* PCA */
MSG(component_count_lt_zero, "Component count is lower than zero")
MSG(input_data_cc_lt_desc_component_count,
"Input data column count is lower than component count provided in descriptor")
MSG(input_model_eigenvectors_cc_neq_input_data_cc,
"Input model eigenvectors column count is not equal to input data column count")
MSG(input_model_eigenvectors_rc_neq_desc_component_count,
"Eigenvectors' row count in input model is not equal to component count provided in descriptor")
MSG(input_model_eigenvectors_rc_neq_input_data_cc,
"Eigenvectors' row count in input model is not equal to input data column count")
MSG(pca_svd_based_method_is_not_implemented_for_gpu,
"PCA SVD-based method is not implemented for GPU")
/* SVM */
MSG(c_leq_zero, "C is lower than or equal to zero")
MSG(cache_size_lt_zero, "Cache size is lower than zero")
MSG(degree_lt_zero, "Degree lower than zero")
MSG(input_model_coeffs_are_empty, "Input model coeffs are empty")
MSG(input_model_coeffs_rc_neq_input_model_support_vector_count,
"Input model coeffs row count is not equal to support vector count provided in input model")
MSG(input_model_does_not_match_kernel_function, "Input model does not match kernel function type")
MSG(input_model_support_vectors_are_empty, "Input model support vectors are empty")
MSG(input_model_support_vectors_cc_neq_input_data_cc,
"Input model support vectors column count is not equal to input data column count")
MSG(input_model_support_vectors_rc_neq_input_model_support_vector_count,
"Support vectors row count is not equal to support vector count in input model")
MSG(polynomial_kenrel_is_not_implemented_for_gpu, "Polynomial kernel is not implemented for GPU")
MSG(sigma_leq_zero, "Sigma lower than or equal to zero")
MSG(svm_smo_method_is_not_implemented_for_gpu, "SVM SMO method is not implemented for GPU")
MSG(svm_regression_task_is_not_implemented_for_gpu, "Regression SVM is not implemented for GPU")
MSG(svm_multiclass_not_implemented_for_gpu,
"SVM with multiclass support is not implemented for GPU")
MSG(tau_leq_zero, "Tau is lower than or equal to zero")
MSG(epsilon_lt_zero, "Epsilon is lower than zero")
MSG(unknown_kernel_function_type, "Unknown kernel function type")
/* Kernel Functions */
MSG(input_x_cc_neq_y_cc, "Input x column count is not qual to y column count")
MSG(input_x_is_empty, "Input x is empty")
MSG(input_y_is_empty, "Input y is empty")
/* Decision Forest */
MSG(bootstrap_is_incompatible_with_error_metric,
"Values of bootstrap and error metric parameters provided "
"in descriptor are incompatible to each other")
MSG(bootstrap_is_incompatible_with_variable_importance_mode,
"Values of bootstrap and variable importance mode parameters provided "
"in descriptor are incompatible to each other")
MSG(decision_forest_train_dense_method_is_not_implemented_for_gpu,
"Decision forest train dense method is not implemented for GPU")
MSG(decision_forest_train_hist_method_is_not_implemented_for_cpu,
"Decision forest train hist method is not implemented for CPU")
MSG(input_model_is_not_initialized, "Input model is not initialized")
} // namespace v1
} // namespace oneapi::dal::detail
| 1 | 28,586 | Very unclear error message. Remember that these are messages that your users see. Please be more specific | oneapi-src-oneDAL | cpp |
@@ -39,6 +39,12 @@ const (
DisablePublicIP = "DISABLED"
PublicSubnetsPlacement = "PublicSubnets"
PrivateSubnetsPlacement = "PrivateSubnets"
+
+ // RuntimePlatform configuration.
+ linuxOS = "LINUX"
+ // windowsCoreOS = "WINDOWS_SERVER_2019_CORE"
+ // windowsFullOS = "WINDOWS_SERVER_2019_FULL"
+ x86Arch = "X86_64"
)
// Constants for ARN options. | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package template
import (
"bytes"
"fmt"
"text/template"
"github.com/google/uuid"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ecs"
)
// Constants for template paths.
const (
// Paths of workload cloudformation templates under templates/workloads/.
fmtWkldCFTemplatePath = "workloads/%s/%s/cf.yml"
fmtWkldPartialsCFTemplatePath = "workloads/partials/cf/%s.yml"
// Directories under templates/workloads/.
servicesDirName = "services"
jobDirName = "jobs"
// Names of workload templates.
lbWebSvcTplName = "lb-web"
rdWebSvcTplName = "rd-web"
backendSvcTplName = "backend"
workerSvcTplName = "worker"
scheduledJobTplName = "scheduled-job"
)
// Constants for workload options.
const (
// AWS VPC networking configuration.
EnablePublicIP = "ENABLED"
DisablePublicIP = "DISABLED"
PublicSubnetsPlacement = "PublicSubnets"
PrivateSubnetsPlacement = "PrivateSubnets"
)
// Constants for ARN options.
const (
snsARNPattern = "arn:%s:sns:%s:%s:%s-%s-%s-%s"
)
var (
// Template names under "workloads/partials/cf/".
partialsWorkloadCFTemplateNames = []string{
"loggroup",
"envvars",
"secrets",
"executionrole",
"taskrole",
"workload-container",
"fargate-taskdef-base-properties",
"service-base-properties",
"servicediscovery",
"addons",
"sidecars",
"logconfig",
"autoscaling",
"eventrule",
"state-machine",
"state-machine-definition.json",
"efs-access-point",
"env-controller",
"mount-points",
"volumes",
"image-overrides",
"instancerole",
"accessrole",
"publish",
"subscribe",
}
)
// WorkloadNestedStackOpts holds configuration that's needed if the workload stack has a nested stack.
type WorkloadNestedStackOpts struct {
StackName string
VariableOutputs []string
SecretOutputs []string
PolicyOutputs []string
SecurityGroupOutputs []string
}
// SidecarOpts holds configuration that's needed if the service has sidecar containers.
type SidecarOpts struct {
Name *string
Image *string
Essential *bool
Port *string
Protocol *string
CredsParam *string
Variables map[string]string
Secrets map[string]string
MountPoints []*MountPoint
DockerLabels map[string]string
DependsOn map[string]string
EntryPoint []string
Command []string
}
// StorageOpts holds data structures for rendering Volumes and Mount Points
type StorageOpts struct {
Ephemeral *int
Volumes []*Volume
MountPoints []*MountPoint
EFSPerms []*EFSPermission
ManagedVolumeInfo *ManagedVolumeCreationInfo // Used for delegating CreationInfo for Copilot-managed EFS.
}
// requiresEFSCreation returns true if managed volume information is specified; false otherwise.
func (s *StorageOpts) requiresEFSCreation() bool {
return s.ManagedVolumeInfo != nil
}
// EFSPermission holds information needed to render an IAM policy statement.
type EFSPermission struct {
FilesystemID *string
Write bool
AccessPointID *string
}
// MountPoint holds information needed to render a MountPoint in a containerdefinition.
type MountPoint struct {
ContainerPath *string
ReadOnly *bool
SourceVolume *string
}
// Volume contains fields that render a volume, its name, and EFSVolumeConfiguration
type Volume struct {
Name *string
EFS *EFSVolumeConfiguration
}
// ManagedVolumeCreationInfo holds information about how to create Copilot-managed access points.
type ManagedVolumeCreationInfo struct {
Name *string
DirName *string
UID *uint32
GID *uint32
}
// EFSVolumeConfiguration contains information about how to specify externally managed file systems.
type EFSVolumeConfiguration struct {
// EFSVolumeConfiguration
Filesystem *string
RootDirectory *string // "/" or empty are equivalent
// Authorization Config
AccessPointID *string
IAM *string // ENABLED or DISABLED
}
// LogConfigOpts holds configuration that's needed if the service is configured with Firelens to route
// its logs.
type LogConfigOpts struct {
Image *string
Destination map[string]string
EnableMetadata *string
SecretOptions map[string]string
ConfigFile *string
}
// HTTPHealthCheckOpts holds configuration that's needed for HTTP Health Check.
type HTTPHealthCheckOpts struct {
HealthCheckPath string
SuccessCodes string
HealthyThreshold *int64
UnhealthyThreshold *int64
Interval *int64
Timeout *int64
DeregistrationDelay *int64
GracePeriod *int64
}
// AdvancedCount holds configuration for autoscaling and capacity provider
// parameters.
type AdvancedCount struct {
Spot *int
Autoscaling *AutoscalingOpts
Cps []*CapacityProviderStrategy
}
// CapacityProviderStrategy holds the configuration needed for a
// CapacityProviderStrategyItem on a Service
type CapacityProviderStrategy struct {
Base *int
Weight *int
CapacityProvider string
}
// AutoscalingOpts holds configuration that's needed for Auto Scaling.
type AutoscalingOpts struct {
MinCapacity *int
MaxCapacity *int
CPU *float64
Memory *float64
Requests *float64
ResponseTime *float64
}
// ExecuteCommandOpts holds configuration that's needed for ECS Execute Command.
type ExecuteCommandOpts struct{}
// StateMachineOpts holds configuration needed for State Machine retries and timeout.
type StateMachineOpts struct {
Timeout *int
Retries *int
}
// PublishOpts holds configuration needed if the service has publishers.
type PublishOpts struct {
Topics []*Topic
}
// Topics holds information needed to render a SNSTopic in a container definition.
type Topic struct {
Name *string
AllowedWorkers []string
Region string
Partition string
AccountID string
App string
Env string
Svc string
}
// SubscribeOpts holds configuration needed if the service has subscriptions.
type SubscribeOpts struct {
Topics []*TopicSubscription
Queue *SQSQueue
}
// TopicSubscription holds information needed to render a SNS Topic Subscription in a container definition.
type TopicSubscription struct {
Name *string
Service *string
Queue *SQSQueue
}
// SQSQueue holds information needed to render a SQS Queue in a container definition.
type SQSQueue struct {
Retention *int64
Delay *int64
Timeout *int64
DeadLetter *DeadLetterQueue
FIFO *FIFOQueue
}
// DeadLetterQueue holds information needed to render a dead-letter SQS Queue in a container definition.
type DeadLetterQueue struct {
Tries *uint16
}
// FIFOQueue holds information needed to specify a SQS Queue as FIFO in a container definition.
type FIFOQueue struct {
HighThroughput bool
}
// NetworkOpts holds AWS networking configuration for the workloads.
type NetworkOpts struct {
AssignPublicIP string
SubnetsType string
SecurityGroups []string
}
func defaultNetworkOpts() *NetworkOpts {
return &NetworkOpts{
AssignPublicIP: EnablePublicIP,
SubnetsType: PublicSubnetsPlacement,
}
}
// WorkloadOpts holds optional data that can be provided to enable features in a workload stack template.
type WorkloadOpts struct {
// Additional options that are common between **all** workload templates.
Variables map[string]string
Secrets map[string]string
Aliases []string
Tags map[string]string // Used by App Runner workloads to tag App Runner service resources
NestedStack *WorkloadNestedStackOpts // Outputs from nested stacks such as the addons stack.
Sidecars []*SidecarOpts
LogConfig *LogConfigOpts
Autoscaling *AutoscalingOpts
CapacityProviders []*CapacityProviderStrategy
DesiredCountOnSpot *int
Storage *StorageOpts
Network *NetworkOpts
ExecuteCommand *ExecuteCommandOpts
EntryPoint []string
Command []string
DomainAlias string
DockerLabels map[string]string
DependsOn map[string]string
Publish *PublishOpts
ServiceDiscoveryEndpoint string
// Additional options for service templates.
WorkloadType string
HealthCheck *ecs.HealthCheck
HTTPHealthCheck HTTPHealthCheckOpts
DeregistrationDelay *int64
AllowedSourceIps []string
RulePriorityLambda string
DesiredCountLambda string
EnvControllerLambda string
CredentialsParameter string
// Additional options for job templates.
ScheduleExpression string
StateMachine *StateMachineOpts
// Additional options for worker service templates.
Subscribe *SubscribeOpts
}
// ParseRequestDrivenWebServiceInput holds data that can be provided to enable features for a request-driven web service stack.
type ParseRequestDrivenWebServiceInput struct {
Variables map[string]string
Tags map[string]string // Used by App Runner workloads to tag App Runner service resources
NestedStack *WorkloadNestedStackOpts // Outputs from nested stacks such as the addons stack.
EnableHealthCheck bool
EnvControllerLambda string
Publish *PublishOpts
// Input needed for the custom resource that adds a custom domain to the service.
Alias *string
ScriptBucketName *string
CustomDomainLambda *string
AWSSDKLayer *string
AppDNSDelegationRole *string
AppDNSName *string
}
// ParseLoadBalancedWebService parses a load balanced web service's CloudFormation template
// with the specified data object and returns its content.
func (t *Template) ParseLoadBalancedWebService(data WorkloadOpts) (*Content, error) {
if data.Network == nil {
data.Network = defaultNetworkOpts()
}
return t.parseSvc(lbWebSvcTplName, data, withSvcParsingFuncs())
}
// ParseRequestDrivenWebService parses a request-driven web service's CloudFormation template
// with the specified data object and returns its content.
func (t *Template) ParseRequestDrivenWebService(data ParseRequestDrivenWebServiceInput) (*Content, error) {
return t.parseSvc(rdWebSvcTplName, data, withSvcParsingFuncs())
}
// ParseBackendService parses a backend service's CloudFormation template with the specified data object and returns its content.
func (t *Template) ParseBackendService(data WorkloadOpts) (*Content, error) {
if data.Network == nil {
data.Network = defaultNetworkOpts()
}
return t.parseSvc(backendSvcTplName, data, withSvcParsingFuncs())
}
// ParseWorkerService parses a worker service's CloudFormation template with the specified data object and returns its content.
func (t *Template) ParseWorkerService(data WorkloadOpts) (*Content, error) {
if data.Network == nil {
data.Network = defaultNetworkOpts()
}
return t.parseSvc(workerSvcTplName, data, withSvcParsingFuncs())
}
// ParseScheduledJob parses a scheduled job's Cloudformation Template
func (t *Template) ParseScheduledJob(data WorkloadOpts) (*Content, error) {
if data.Network == nil {
data.Network = defaultNetworkOpts()
}
return t.parseJob(scheduledJobTplName, data, withSvcParsingFuncs())
}
// parseSvc parses a service's CloudFormation template with the specified data object and returns its content.
func (t *Template) parseSvc(name string, data interface{}, options ...ParseOption) (*Content, error) {
return t.parseWkld(name, servicesDirName, data, options...)
}
// parseJob parses a job's Cloudformation template with the specified data object and returns its content.
func (t *Template) parseJob(name string, data interface{}, options ...ParseOption) (*Content, error) {
return t.parseWkld(name, jobDirName, data, options...)
}
func (t *Template) parseWkld(name, wkldDirName string, data interface{}, options ...ParseOption) (*Content, error) {
tpl, err := t.parse("base", fmt.Sprintf(fmtWkldCFTemplatePath, wkldDirName, name), options...)
if err != nil {
return nil, err
}
for _, templateName := range partialsWorkloadCFTemplateNames {
nestedTpl, err := t.parse(templateName, fmt.Sprintf(fmtWkldPartialsCFTemplatePath, templateName), options...)
if err != nil {
return nil, err
}
_, err = tpl.AddParseTree(templateName, nestedTpl.Tree)
if err != nil {
return nil, fmt.Errorf("add parse tree of %s to base template: %w", templateName, err)
}
}
buf := &bytes.Buffer{}
if err := tpl.Execute(buf, data); err != nil {
return nil, fmt.Errorf("execute template %s with data %v: %w", name, data, err)
}
return &Content{buf}, nil
}
func withSvcParsingFuncs() ParseOption {
return func(t *template.Template) *template.Template {
return t.Funcs(map[string]interface{}{
"toSnakeCase": ToSnakeCaseFunc,
"hasSecrets": hasSecrets,
"fmtSlice": FmtSliceFunc,
"quoteSlice": QuotePSliceFunc,
"randomUUID": randomUUIDFunc,
"jsonMountPoints": generateMountPointJSON,
"jsonSNSTopics": generateSNSJSON,
"jsonQueueURIs": generateQueueURIJSON,
"envControllerParams": envControllerParameters,
"logicalIDSafe": StripNonAlphaNumFunc,
})
}
}
func hasSecrets(opts WorkloadOpts) bool {
if len(opts.Secrets) > 0 {
return true
}
if opts.NestedStack != nil && (len(opts.NestedStack.SecretOutputs) > 0) {
return true
}
return false
}
func randomUUIDFunc() (string, error) {
id, err := uuid.NewRandom()
if err != nil {
return "", fmt.Errorf("generate random uuid: %w", err)
}
return id.String(), err
}
// envControllerParameters determines which parameters to include in the EnvController template.
func envControllerParameters(o WorkloadOpts) []string {
parameters := []string{}
if o.WorkloadType == "Load Balanced Web Service" {
parameters = append(parameters, []string{"ALBWorkloads,", "Aliases,"}...) // YAML needs the comma separator; resolved in EnvContr.
}
if o.Network.SubnetsType == PrivateSubnetsPlacement {
parameters = append(parameters, "NATWorkloads,") // YAML needs the comma separator; resolved in EnvContr.
}
if o.Storage != nil && o.Storage.requiresEFSCreation() {
parameters = append(parameters, "EFSWorkloads,")
}
return parameters
}
// ARN determines the arn for a topic using the SNSTopic name and account information
func (t Topic) ARN() string {
return fmt.Sprintf(snsARNPattern, t.Partition, t.Region, t.AccountID, t.App, t.Env, t.Svc, aws.StringValue(t.Name))
}
| 1 | 18,788 | Can we add these when we need them | aws-copilot-cli | go |
@@ -460,12 +460,13 @@ class EDS extends DefaultRecord
*/
public function getPrimaryAuthors()
{
- return array_unique(
- $this->extractEbscoDataFromRecordInfo(
- 'BibRecord/BibRelationships/HasContributorRelationships/*/'
+ $authors = $this->extractEbscoDataFromRecordInfo(
+ 'BibRecord/BibRelationships/HasContributorRelationships/*/'
. 'PersonEntity/Name/NameFull'
- )
);
+ return array_unique(array_filter($authors, function ($a) {
+ return !empty($a);
+ }));
}
/** | 1 | <?php
/**
* Model for EDS records.
*
* PHP version 7
*
* Copyright (C) Villanova University 2010.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package RecordDrivers
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:record_drivers Wiki
*/
namespace VuFind\RecordDriver;
/**
* Model for EDS records.
*
* @category VuFind
* @package RecordDrivers
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:record_drivers Wiki
*/
class EDS extends DefaultRecord
{
/**
* Document types that are treated as ePub links.
*
* @var array
*/
protected $epubTypes = ['ebook-epub'];
/**
* Document types that are treated as PDF links.
*
* @var array
*/
protected $pdfTypes = ['ebook-pdf', 'pdflink'];
/**
* Return the unique identifier of this record within the Solr index;
* useful for retrieving additional information (like tags and user
* comments) from the external MySQL database.
*
* @return string Unique identifier.
*/
public function getUniqueID()
{
$dbid = $this->fields['Header']['DbId'];
$an = $this->fields['Header']['An'];
return $dbid . ',' . $an;
}
/**
* Get the short (pre-subtitle) title of the record.
*
* @return string
*/
public function getShortTitle()
{
$title = $this->getTitle();
if (null == $title) {
return '';
}
$parts = explode(':', $title);
return trim(current($parts));
}
/**
* Get the subtitle (if any) of the record.
*
* @return string
*/
public function getSubtitle()
{
$title = $this->getTitle();
if (null == $title) {
return '';
}
$parts = explode(':', $title, 2);
return count($parts) > 1 ? trim(array_pop($parts)) : '';
}
/**
* Get the abstract (summary) of the record.
*
* @return string
*/
public function getItemsAbstract()
{
$abstract = $this->getItems(null, null, 'Ab');
return $abstract[0]['Data'] ?? '';
}
/**
* Get the access level of the record.
*
* @return string
*/
public function getAccessLevel()
{
return $this->fields['Header']['AccessLevel'] ?? '';
}
/**
* Get the authors of the record
*
* @return string
*/
public function getItemsAuthors()
{
$authors = $this->getItemsAuthorsArray();
return empty($authors) ? '' : implode('; ', $authors);
}
/**
* Obtain an array or authors indicated on the record
*
* @return array
*/
protected function getItemsAuthorsArray()
{
return array_map(
function ($data) {
return $data['Data'];
}, $this->getItems(null, null, 'Au')
);
}
/**
* Get the custom links of the record.
*
* @return array
*/
public function getCustomLinks()
{
return $this->fields['CustomLinks'] ?? [];
}
/**
* Get the full text custom links of the record.
*
* @return array
*/
public function getFTCustomLinks()
{
return $this->fields['FullText']['CustomLinks'] ?? [];
}
/**
* Get the database label of the record.
*
* @return string
*/
public function getDbLabel()
{
return $this->fields['Header']['DbLabel'] ?? '';
}
/**
* Get the full text of the record.
*
* @return string
*/
public function getHTMLFullText()
{
return $this->toHTML($this->fields['FullText']['Text']['Value'] ?? '');
}
/**
* Get the full text availability of the record.
*
* @return bool
*/
public function hasHTMLFullTextAvailable()
{
return '1' == ($this->fields['FullText']['Text']['Availability'] ?? '0');
}
/**
* Support method for getItems, used to apply filters.
*
* @param array $item Item to check
* @param string $context The context in which items are being retrieved
* (used for context-sensitive filtering)
*
* @return bool
*/
protected function itemIsExcluded($item, $context)
{
// Create a list of config sections to check, based on context:
$sections = ['ItemGlobalFilter'];
switch ($context) {
case 'result-list':
$sections[] = 'ItemResultListFilter';
break;
case 'core':
$sections[] = 'ItemCoreFilter';
break;
}
// Check to see if anything is filtered:
foreach ($sections as $section) {
$currentConfig = isset($this->recordConfig->$section)
? $this->recordConfig->$section->toArray() : [];
$badLabels = (array)($currentConfig['excludeLabel'] ?? []);
$badGroups = (array)($currentConfig['excludeGroup'] ?? []);
if (in_array($item['Label'], $badLabels)
|| in_array($item['Group'], $badGroups)
) {
return true;
}
}
// If we got this far, no filter was applied:
return false;
}
/**
* Get the items of the record.
*
* @param string $context The context in which items are being retrieved
* (used for context-sensitive filtering)
* @param string $labelFilter A specific label to retrieve (filter out others;
* null for no filter)
* @param string $groupFilter A specific group to retrieve (filter out others;
* null for no filter)
* @param string $nameFilter A specific name to retrieve (filter out others;
* null for no filter)
*
* @return array
*/
public function getItems($context = null, $labelFilter = null,
$groupFilter = null, $nameFilter = null
) {
$items = [];
foreach ($this->fields['Items'] ?? [] as $item) {
$nextItem = [
'Label' => $item['Label'] ?? '',
'Group' => $item['Group'] ?? '',
'Name' => $item['Name'] ?? '',
'Data' => isset($item['Data'])
? $this->toHTML($item['Data'], $item['Group']) : ''
];
if (!$this->itemIsExcluded($nextItem, $context)
&& ($labelFilter === null || $nextItem['Label'] === $labelFilter)
&& ($groupFilter === null || $nextItem['Group'] === $groupFilter)
&& ($nameFilter === null || $nextItem['Name'] === $nameFilter)
) {
$items[] = $nextItem;
}
}
return $items;
}
/**
* Get the full text url of the record.
*
* @return string
*/
public function getPLink()
{
return $this->fields['PLink'] ?? '';
}
/**
* Get the publication type of the record.
*
* @return string
*/
public function getPubType()
{
return $this->fields['Header']['PubType'] ?? '';
}
/**
* Get the publication type id of the record.
*
* @return string
*/
public function getPubTypeId()
{
return $this->fields['Header']['PubTypeId'] ?? '';
}
/**
* Get the ebook availability of the record.
*
* @param array $types Types that we are interested in checking for
*
* @return bool
*/
protected function hasEbookAvailable(array $types)
{
foreach ($this->fields['FullText']['Links'] ?? [] as $link) {
if (in_array($link['Type'] ?? '', $types)) {
return true;
}
}
return false;
}
/**
* Get the PDF availability of the record.
*
* @return bool
*/
public function hasPdfAvailable()
{
return $this->hasEbookAvailable($this->pdfTypes);
}
/**
* Get the ePub availability of the record.
*
* @return bool
*/
public function hasEpubAvailable()
{
return $this->hasEbookAvailable($this->epubTypes);
}
/**
* Get the linked full text availability of the record.
*
* @return bool
*/
public function hasLinkedFullTextAvailable()
{
return $this->hasEbookAvailable(['other']);
}
/**
* Get the ebook url of the record. If missing, return false
*
* @param array $types Types that we are interested in checking for
*
* @return string
*/
public function getEbookLink(array $types)
{
foreach ($this->fields['FullText']['Links'] ?? [] as $link) {
if (!empty($link['Type']) && !empty($link['Url'])
&& in_array($link['Type'], $types)
) {
return $link['Url'];
}
}
return false;
}
/**
* Get the PDF url of the record. If missing, return false
*
* @return string
*/
public function getPdfLink()
{
return $this->getEbookLink($this->pdfTypes);
}
/**
* Get the ePub url of the record. If missing, return false
*
* @return string
*/
public function getEpubLink()
{
return $this->getEbookLink($this->epubTypes);
}
/**
* Get the linked full text url of the record. If missing, return false
*
* @return string
*/
public function getLinkedFullTextLink()
{
return $this->getEbookLink(['other']);
}
/**
* Get the subject data of the record.
*
* @return string
*/
public function getItemsSubjects()
{
$subjects = array_map(
function ($data) {
return $data['Data'];
}, $this->getItems(null, null, 'Su')
);
return empty($subjects) ? '' : implode(', ', $subjects);
}
/**
* Return a URL to a thumbnail preview of the record, if available; false
* otherwise.
*
* @param string $size Size of thumbnail (small, medium or large -- small is
* default).
*
* @return string
*/
public function getThumbnail($size = 'small')
{
foreach ($this->fields['ImageInfo'] ?? [] as $image) {
if ($size == ($image['Size'] ?? '')) {
return $image['Target'] ?? '';
}
}
return false;
}
/**
* Get the title of the record.
*
* @return string
*/
public function getItemsTitle()
{
$title = $this->getItems(null, null, 'Ti');
return $title[0]['Data'] ?? '';
}
/**
* Obtain the title of the record from the record info section
*
* @return string
*/
public function getTitle()
{
$list = $this->extractEbscoDataFromRecordInfo('BibRecord/BibEntity/Titles');
foreach ($list as $titleRecord) {
if ('main' == ($titleRecord['Type'] ?? '')) {
return $titleRecord['TitleFull'];
}
}
return '';
}
/**
* Obtain the authors from a record from the RecordInfo section
*
* @return array
*/
public function getPrimaryAuthors()
{
return array_unique(
$this->extractEbscoDataFromRecordInfo(
'BibRecord/BibRelationships/HasContributorRelationships/*/'
. 'PersonEntity/Name/NameFull'
)
);
}
/**
* Get the source of the record.
*
* @return string
*/
public function getItemsTitleSource()
{
$title = $this->getItems(null, null, 'Src');
return $title[0]['Data'] ?? '';
}
/**
* Performs a regex and replaces any url's with links containing themselves
* as the text
*
* @param string $string String to process
*
* @return string HTML string
*/
public function linkUrls($string)
{
$linkedString = preg_replace_callback(
"/\b(https?):\/\/([-A-Z0-9+&@#\/%?=~_|!:,.;]*[-A-Z0-9+&@#\/%=~_|]*)\b/i",
function ($matches) {
return "<a href='" . $matches[0] . "'>"
. htmlentities($matches[0]) . "</a>";
},
$string
);
return $linkedString;
}
/**
* Parse a SimpleXml element and
* return it's inner XML as an HTML string
*
* @param SimpleXml $data A SimpleXml DOM
* @param string $group Group identifier
*
* @return string The HTML string
*/
protected function toHTML($data, $group = null)
{
// Map xml tags to the HTML tags
// This is just a small list, the total number of xml tags is far greater
// Any group can be added here, but we only use Au (Author)
// Other groups, not present here, won't be transformed to HTML links
$allowed_searchlink_groups = ['au','su'];
$xml_to_html_tags = [
'<jsection' => '<section',
'</jsection' => '</section',
'<highlight' => '<span class="highlight"',
'<highligh' => '<span class="highlight"', // Temporary bug fix
'</highlight>' => '</span>', // Temporary bug fix
'</highligh' => '</span>',
'<text' => '<div',
'</text' => '</div',
'<title' => '<h2',
'</title' => '</h2',
'<anid' => '<p',
'</anid' => '</p',
'<aug' => '<p class="aug"',
'</aug' => '</p',
'<hd' => '<h3',
'</hd' => '</h3',
'<linebr' => '<br',
'</linebr' => '',
'<olist' => '<ol',
'</olist' => '</ol',
'<reflink' => '<a',
'</reflink' => '</a',
'<blist' => '<p class="blist"',
'</blist' => '</p',
'<bibl' => '<a',
'</bibl' => '</a',
'<bibtext' => '<span',
'</bibtext' => '</span',
'<ref' => '<div class="ref"',
'</ref' => '</div',
'<ulink' => '<a',
'</ulink' => '</a',
'<superscript' => '<sup',
'</superscript' => '</sup',
'<relatesTo' => '<sup',
'</relatesTo' => '</sup'
];
// The XML data is escaped, let's unescape html entities (e.g. < => <)
$data = html_entity_decode($data, ENT_QUOTES, "utf-8");
// Start parsing the xml data
if (!empty($data)) {
// Replace the XML tags with HTML tags
$search = array_keys($xml_to_html_tags);
$replace = array_values($xml_to_html_tags);
$data = str_replace($search, $replace, $data);
// Temporary : fix unclosed tags
$data = preg_replace('/<\/highlight/', '</span>', $data);
$data = preg_replace('/<\/span>>/', '</span>', $data);
$data = preg_replace('/<\/searchLink/', '</searchLink>', $data);
$data = preg_replace('/<\/searchLink>>/', '</searchLink>', $data);
//$searchBase = $this->url('eds-search');
// Parse searchLinks
if (!empty($group)) {
$group = strtolower($group);
if (in_array($group, $allowed_searchlink_groups)) {
$type = strtoupper($group);
$link_xml = '/<searchLink fieldCode="([^\"]*)" '
. 'term="%22([^\"]*)%22">/';
$link_html
= "<a href=\"../EDS/Search?lookfor=$2&type={$type}\">";
$data = preg_replace($link_xml, $link_html, $data);
$data = str_replace('</searchLink>', '</a>', $data);
}
}
// Replace the rest of searchLinks with simple spans
$link_xml = '/<searchLink fieldCode="([^\"]*)" term="%22([^\"]*)%22">/';
$link_html = '<span>';
$data = preg_replace($link_xml, $link_html, $data);
$data = str_replace('</searchLink>', '</span>', $data);
// Parse bibliography (anchors and links)
$data = preg_replace('/<a idref="([^\"]*)"/', '<a href="#$1"', $data);
$data = preg_replace(
'/<a id="([^\"]*)" idref="([^\"]*)" type="([^\"]*)"/',
'<a id="$1" href="#$2"', $data
);
$data = $this->replaceBRWithCommas($data, $group);
}
return $data;
}
/**
* Replace <br> tags that are embedded in data to commas
*
* @param string $data Data to process
* @param string $group Group identifier
*
* @return string
*/
protected function replaceBRWithCommas($data, $group)
{
$groupsToReplace = ['au','su'];
if (in_array($group, $groupsToReplace)) {
$br = '/<br \/>/';
$comma = ', ';
return preg_replace($br, $comma, $data);
}
return $data;
}
/**
* Return the first valid DOI found in the record (false if none).
*
* @return mixed
*/
public function getCleanDOI()
{
$doi = $this->getItems(null, null, null, 'DOI');
if (isset($doi[0]['Data'])) {
return $doi[0]['Data'];
}
$dois = $this->getFilteredIdentifiers(['doi']);
return $dois[0] ?? false;
}
/**
* Get record languages
*
* @return array
*/
public function getLanguages()
{
return $this->extractEbscoData(
[
'RecordInfo:BibRecord/BibEntity/Languages/*/Text',
'Items:Languages',
'Items:Language',
]
);
}
/**
* Retrieve identifiers from the EBSCO record and retrieve values filtered by
* type.
*
* @param array $filter Type values to retrieve.
*
* @return array
*/
protected function getFilteredIdentifiers($filter)
{
$raw = array_merge(
$this->extractEbscoDataFromRecordInfo(
'BibRecord/BibRelationships/IsPartOfRelationships/*'
. '/BibEntity/Identifiers'
),
$this->extractEbscoDataFromRecordInfo(
'BibRecord/BibEntity/Identifiers'
)
);
$ids = [];
foreach ($raw as $data) {
$type = strtolower($data['Type'] ?? '');
if (isset($data['Value']) && in_array($type, $filter)) {
$ids[] = $data['Value'];
}
}
return $ids;
}
/**
* Get ISSNs (of containing record)
*
* @return array
*/
public function getISSNs()
{
return $this->getFilteredIdentifiers(['issn-print', 'issn-electronic']);
}
/**
* Get an array of ISBNs
*
* @return array
*/
public function getISBNs()
{
return $this->getFilteredIdentifiers(['isbn-print', 'isbn-electronic']);
}
/**
* Get title of containing record
*
* @return string
*/
public function getContainerTitle()
{
// If there is no source, we don't want to identify a container
// (in this situation, it is likely redundant data):
if (count($this->extractEbscoDataFromItems('Source')) === 0) {
return '';
}
$data = $this->extractEbscoDataFromRecordInfo(
'BibRecord/BibRelationships/IsPartOfRelationships/0'
. '/BibEntity/Titles/0/TitleFull'
);
return $data[0] ?? '';
}
/**
* Extract numbering data of a particular type.
*
* @param string $type Numbering type to return, if present.
*
* @return string
*/
protected function getFilteredNumbering($type)
{
$numbering = $this->extractEbscoDataFromRecordInfo(
'BibRecord/BibRelationships/IsPartOfRelationships/*/BibEntity/Numbering'
);
foreach ($numbering as $data) {
if (strtolower($data['Type'] ?? '') == $type
&& !empty($data['Value'])
) {
return $data['Value'];
}
}
return '';
}
/**
* Get issue of containing record
*
* @return string
*/
public function getContainerIssue()
{
return $this->getFilteredNumbering('issue');
}
/**
* Get volume of containing record
*
* @return string
*/
public function getContainerVolume()
{
return $this->getFilteredNumbering('volume');
}
/**
* Get the publication dates of the record. See also getDateSpan().
*
* @return array
*/
public function getPublicationDates()
{
$pubDates = array_map(
function ($data) {
return $data->getDate();
}, $this->getRawEDSPublicationDetails()
);
return !empty($pubDates) ? $pubDates : $this->extractEbscoDataFromRecordInfo(
'BibRecord/BibRelationships/IsPartOfRelationships/0/BibEntity/Dates/0/Y'
);
}
/**
* Get year of containing record
*
* @return string
*/
public function getContainerStartPage()
{
$pagination = $this->extractEbscoDataFromRecordInfo(
'BibRecord/BibEntity/PhysicalDescription/Pagination'
);
return $pagination['StartPage'] ?? '';
}
/**
* Get the end page of the item that contains this record.
*
* @return string
*/
public function getContainerEndPage()
{
// EBSCO doesn't make this information readily available, but in some
// cases we can abstract it from an OpenURL.
$startPage = $this->getContainerStartPage();
if (!empty($startPage)) {
$regex = "/&pages={$startPage}-(\d+)/";
foreach ($this->getFTCustomLinks() as $link) {
if (preg_match($regex, $link['Url'] ?? '', $matches)) {
if (isset($matches[1])) {
return $matches[1];
}
}
}
}
return '';
}
/**
* Returns an array of formats based on publication type.
*
* @return array
*/
public function getFormats()
{
$formats = [];
$pubType = $this->getPubType();
switch (strtolower($pubType)) {
case 'academic journal':
case 'periodical':
case 'report':
// Add "article" format for better OpenURL generation
$formats[] = $pubType;
$formats[] = 'Article';
break;
case 'ebook':
// Treat eBooks as both "Books" and "Electronic" items
$formats[] = 'Book';
$formats[] = 'Electronic';
break;
case 'dissertation/thesis':
// Simplify wording for consistency with other drivers
$formats[] = 'Thesis';
break;
default:
$formats[] = $pubType;
}
return $formats;
}
/**
* Get the publishers of the record.
*
* @return array
*/
public function getPublishers()
{
return array_map(
function ($data) {
return $data->getName();
}, $this->getRawEDSPublicationDetails()
);
}
/**
* Get the item's place of publication.
*
* @return array
*/
public function getPlacesOfPublication()
{
return array_map(
function ($data) {
return $data->getPlace();
}, $this->getRawEDSPublicationDetails()
);
}
/**
* Get an array of publication detail lines combining information from
* getPublicationDates(), getPublishers() and getPlacesOfPublication().
*
* @return array
*/
public function getPublicationDetails()
{
$details = $this->getRawEDSPublicationDetails();
return !empty($details) ? $details : parent::getPublicationDetails();
}
/**
* Attempt to build up publication details from raw EDS data.
*
* @return array
*/
protected function getRawEDSPublicationDetails()
{
$details = [];
foreach ($this->getItems(null, 'Publication Information') as $pub) {
// Try to extract place, publisher and date:
if (preg_match('/^(.+):(.*)\.\s*(\d{4})$/', $pub['Data'], $matches)) {
$placeParts = explode('.', $matches[1]);
list($place, $pub, $date)
= [trim($matches[1]), trim($matches[2]), $matches[3]];
} elseif (preg_match('/^(.+):(.*)$/', $pub['Data'], $matches)) {
list($place, $pub, $date)
= [trim($matches[1]), trim($matches[2]), ''];
} else {
list($place, $pub, $date) = ['', $pub['Data'], ''];
}
// In some cases, the place may have noise on the front that needs
// to be removed...
$placeParts = explode('.', $place);
$shortPlace = array_pop($placeParts);
$details[] = new Response\PublicationDetails(
strlen($shortPlace) > 5 ? $shortPlace : $place, $pub, $date
);
}
return $details;
}
/**
* Extract data from EBSCO API response using a prioritized list of selectors.
* Selectors can be of the form Items:Label to invoke extractEbscoDataFromItems,
* or RecordInfo:Path/To/Data/Element to invoke extractEbscoDataFromRecordInfo.
*
* @param array $selectors Array of selector strings for extracting data.
*
* @return array
*/
protected function extractEbscoData($selectors)
{
$result = [];
foreach ($selectors as $selector) {
list($method, $params) = explode(':', $selector, 2);
$fullMethod = 'extractEbscoDataFrom' . ucwords($method);
if (!is_callable([$this, $fullMethod])) {
throw new \Exception('Undefined method: ' . $fullMethod);
}
$result = $this->$fullMethod($params);
if (!empty($result)) {
break;
}
}
return $result;
}
/**
* Extract data from the record's "Items" array, based on a label.
*
* @param string $label Label to filter on.
*
* @return array
*/
protected function extractEbscoDataFromItems($label)
{
$items = $this->getItems(null, $label);
$output = [];
foreach ($items as $item) {
$output[] = $item['Data'];
}
return $output;
}
/**
* Extract data from the record's "RecordInfo" array, based on a path.
*
* @param string $path Path to select with (slash-separated element names,
* with special * selector to iterate through all children).
*
* @return array
*/
protected function extractEbscoDataFromRecordInfo($path)
{
return (array)$this->recurseIntoRecordInfo(
$this->fields['RecordInfo'] ?? [],
explode('/', $path)
);
}
/**
* Recursive support method for extractEbscoDataFromRecordInfo().
*
* @param array $data Data to recurse into
* @param array $path Array representing path into data
*
* @return array
*/
protected function recurseIntoRecordInfo($data, $path)
{
$nextField = array_shift($path);
$keys = $nextField === '*' ? array_keys($data) : [$nextField];
$values = [];
foreach ($keys as $key) {
if (isset($data[$key])) {
$values[] = empty($path)
? $data[$key]
: $this->recurseIntoRecordInfo($data[$key], $path);
}
}
return count($values) == 1 ? $values[0] : $values;
}
}
| 1 | 30,550 | I believe you can simplify this to simply `return array_unique(array_filter($authors));` -- the default behavior of array_filter is to filter out empty elements, so the callback should not be required in this situation. Do you mind giving it a try and updating the PR if it works? | vufind-org-vufind | php |
@@ -305,6 +305,10 @@ class UIAHandler(COMObject):
return
import NVDAObjects.UIA
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
+ if not obj:
+ # Sometimes notification events can be fired on a UIAElement that has no windowHandle and does not connect through parents back to the desktop.
+ # There is nothing we can do with these.
+ return
eventHandler.queueEvent("UIA_notification",obj, notificationKind=NotificationKind, notificationProcessing=NotificationProcessing, displayString=displayString, activityId=activityId)
def _isUIAWindowHelper(self,hwnd): | 1 | #_UIAHandler.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2011-2018 NV Access Limited, Joseph Lee, Babbage B.V.
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
from ctypes import *
from ctypes.wintypes import *
import comtypes.client
from comtypes.automation import VT_EMPTY
from comtypes import *
import weakref
import threading
import time
import config
import api
import appModuleHandler
import queueHandler
import controlTypes
import NVDAHelper
import winKernel
import winUser
import eventHandler
from logHandler import log
import UIAUtils
from comtypes.gen.UIAutomationClient import *
#Some new win8 UIA constants that could be missing
UIA_StyleIdAttributeId=40034
UIA_AnnotationAnnotationTypeIdPropertyId=30113
UIA_AnnotationTypesAttributeId=40031
AnnotationType_SpellingError=60001
UIA_AnnotationObjectsAttributeId=40032
StyleId_Heading1=70001
StyleId_Heading9=70009
ItemIndex_Property_GUID=GUID("{92A053DA-2969-4021-BF27-514CFC2E4A69}")
ItemCount_Property_GUID=GUID("{ABBF5C45-5CCC-47b7-BB4E-87CB87BBD162}")
UIA_FullDescriptionPropertyId=30159
UIA_LevelPropertyId=30154
UIA_PositionInSetPropertyId=30152
UIA_SizeOfSetPropertyId=30153
UIA_LocalizedLandmarkTypePropertyId=30158
UIA_LandmarkTypePropertyId=30157
HorizontalTextAlignment_Left=0
HorizontalTextAlignment_Centered=1
HorizontalTextAlignment_Right=2
HorizontalTextAlignment_Justified=3
# The name of the WDAG (Windows Defender Application Guard) process
WDAG_PROCESS_NAME=u'hvsirdpclient'
goodUIAWindowClassNames=[
# A WDAG (Windows Defender Application Guard) Window is always native UIA, even if it doesn't report as such.
'RAIL_WINDOW',
]
badUIAWindowClassNames=[
"SysTreeView32",
"WuDuiListView",
"ComboBox",
"msctls_progress32",
"Edit",
"CommonPlacesWrapperWndClass",
"SysMonthCal32",
"SUPERGRID", #Outlook 2010 message list
"RichEdit",
"RichEdit20",
"RICHEDIT50W",
"SysListView32",
"EXCEL7",
"Button",
# #7497: Windows 10 Fall Creators Update has an incomplete UIA implementation for console windows, therefore for now we should ignore it.
# It does not implement caret/selection, and probably has no new text events.
"ConsoleWindowClass",
]
NVDAUnitsToUIAUnits={
"character":TextUnit_Character,
"word":TextUnit_Word,
"line":TextUnit_Line,
"paragraph":TextUnit_Paragraph,
"readingChunk":TextUnit_Line,
}
UIAControlTypesToNVDARoles={
UIA_ButtonControlTypeId:controlTypes.ROLE_BUTTON,
UIA_CalendarControlTypeId:controlTypes.ROLE_CALENDAR,
UIA_CheckBoxControlTypeId:controlTypes.ROLE_CHECKBOX,
UIA_ComboBoxControlTypeId:controlTypes.ROLE_COMBOBOX,
UIA_EditControlTypeId:controlTypes.ROLE_EDITABLETEXT,
UIA_HyperlinkControlTypeId:controlTypes.ROLE_LINK,
UIA_ImageControlTypeId:controlTypes.ROLE_GRAPHIC,
UIA_ListItemControlTypeId:controlTypes.ROLE_LISTITEM,
UIA_ListControlTypeId:controlTypes.ROLE_LIST,
UIA_MenuControlTypeId:controlTypes.ROLE_POPUPMENU,
UIA_MenuBarControlTypeId:controlTypes.ROLE_MENUBAR,
UIA_MenuItemControlTypeId:controlTypes.ROLE_MENUITEM,
UIA_ProgressBarControlTypeId:controlTypes.ROLE_PROGRESSBAR,
UIA_RadioButtonControlTypeId:controlTypes.ROLE_RADIOBUTTON,
UIA_ScrollBarControlTypeId:controlTypes.ROLE_SCROLLBAR,
UIA_SliderControlTypeId:controlTypes.ROLE_SLIDER,
UIA_SpinnerControlTypeId:controlTypes.ROLE_SPINBUTTON,
UIA_StatusBarControlTypeId:controlTypes.ROLE_STATUSBAR,
UIA_TabControlTypeId:controlTypes.ROLE_TABCONTROL,
UIA_TabItemControlTypeId:controlTypes.ROLE_TAB,
UIA_TextControlTypeId:controlTypes.ROLE_STATICTEXT,
UIA_ToolBarControlTypeId:controlTypes.ROLE_TOOLBAR,
UIA_ToolTipControlTypeId:controlTypes.ROLE_TOOLTIP,
UIA_TreeControlTypeId:controlTypes.ROLE_TREEVIEW,
UIA_TreeItemControlTypeId:controlTypes.ROLE_TREEVIEWITEM,
UIA_CustomControlTypeId:controlTypes.ROLE_UNKNOWN,
UIA_GroupControlTypeId:controlTypes.ROLE_GROUPING,
UIA_ThumbControlTypeId:controlTypes.ROLE_THUMB,
UIA_DataGridControlTypeId:controlTypes.ROLE_DATAGRID,
UIA_DataItemControlTypeId:controlTypes.ROLE_DATAITEM,
UIA_DocumentControlTypeId:controlTypes.ROLE_DOCUMENT,
UIA_SplitButtonControlTypeId:controlTypes.ROLE_SPLITBUTTON,
UIA_WindowControlTypeId:controlTypes.ROLE_WINDOW,
UIA_PaneControlTypeId:controlTypes.ROLE_PANE,
UIA_HeaderControlTypeId:controlTypes.ROLE_HEADER,
UIA_HeaderItemControlTypeId:controlTypes.ROLE_HEADERITEM,
UIA_TableControlTypeId:controlTypes.ROLE_TABLE,
UIA_TitleBarControlTypeId:controlTypes.ROLE_TITLEBAR,
UIA_SeparatorControlTypeId:controlTypes.ROLE_SEPARATOR,
}
UIAPropertyIdsToNVDAEventNames={
UIA_NamePropertyId:"nameChange",
UIA_HelpTextPropertyId:"descriptionChange",
UIA_ExpandCollapseExpandCollapseStatePropertyId:"stateChange",
UIA_ToggleToggleStatePropertyId:"stateChange",
UIA_IsEnabledPropertyId:"stateChange",
UIA_ValueValuePropertyId:"valueChange",
UIA_RangeValueValuePropertyId:"valueChange",
UIA_ControllerForPropertyId:"UIA_controllerFor",
}
UIAEventIdsToNVDAEventNames={
UIA_LiveRegionChangedEventId:"liveRegionChange",
#UIA_Text_TextChangedEventId:"textChanged",
UIA_SelectionItem_ElementSelectedEventId:"UIA_elementSelected",
UIA_MenuOpenedEventId:"gainFocus",
UIA_SelectionItem_ElementAddedToSelectionEventId:"stateChange",
UIA_SelectionItem_ElementRemovedFromSelectionEventId:"stateChange",
#UIA_MenuModeEndEventId:"menuModeEnd",
#UIA_Text_TextSelectionChangedEventId:"caret",
UIA_ToolTipOpenedEventId:"UIA_toolTipOpened",
#UIA_AsyncContentLoadedEventId:"documentLoadComplete",
#UIA_ToolTipClosedEventId:"hide",
UIA_Window_WindowOpenedEventId:"UIA_window_windowOpen",
UIA_SystemAlertEventId:"UIA_systemAlert",
}
class UIAHandler(COMObject):
_com_interfaces_=[IUIAutomationEventHandler,IUIAutomationFocusChangedEventHandler,IUIAutomationPropertyChangedEventHandler,IUIAutomationNotificationEventHandler]
def __init__(self):
super(UIAHandler,self).__init__()
self.MTAThreadInitEvent=threading.Event()
self.MTAThreadStopEvent=threading.Event()
self.MTAThreadInitException=None
self.MTAThread=threading.Thread(target=self.MTAThreadFunc)
self.MTAThread.daemon=True
self.MTAThread.start()
self.MTAThreadInitEvent.wait(2)
if self.MTAThreadInitException:
raise self.MTAThreadInitException
def terminate(self):
MTAThreadHandle=HANDLE(windll.kernel32.OpenThread(winKernel.SYNCHRONIZE,False,self.MTAThread.ident))
self.MTAThreadStopEvent.set()
#Wait for the MTA thread to die (while still message pumping)
if windll.user32.MsgWaitForMultipleObjects(1,byref(MTAThreadHandle),False,200,0)!=0:
log.debugWarning("Timeout or error while waiting for UIAHandler MTA thread")
windll.kernel32.CloseHandle(MTAThreadHandle)
del self.MTAThread
def MTAThreadFunc(self):
try:
oledll.ole32.CoInitializeEx(None,comtypes.COINIT_MULTITHREADED)
isUIA8=False
try:
self.clientObject=CoCreateInstance(CUIAutomation8._reg_clsid_,interface=IUIAutomation,clsctx=CLSCTX_INPROC_SERVER)
isUIA8=True
except (COMError,WindowsError,NameError):
self.clientObject=CoCreateInstance(CUIAutomation._reg_clsid_,interface=IUIAutomation,clsctx=CLSCTX_INPROC_SERVER)
if isUIA8:
# #8009: use appropriate interface based on highest supported interface.
for interface in (IUIAutomation5, IUIAutomation4, IUIAutomation3, IUIAutomation2):
try:
self.clientObject=self.clientObject.QueryInterface(interface)
break
except COMError:
pass
log.info("UIAutomation: %s"%self.clientObject.__class__.__mro__[1].__name__)
self.windowTreeWalker=self.clientObject.createTreeWalker(self.clientObject.CreateNotCondition(self.clientObject.CreatePropertyCondition(UIA_NativeWindowHandlePropertyId,0)))
self.windowCacheRequest=self.clientObject.CreateCacheRequest()
self.windowCacheRequest.AddProperty(UIA_NativeWindowHandlePropertyId)
self.UIAWindowHandleCache={}
self.baseTreeWalker=self.clientObject.RawViewWalker
self.baseCacheRequest=self.windowCacheRequest.Clone()
import UIAHandler
self.ItemIndex_PropertyId=NVDAHelper.localLib.registerUIAProperty(byref(ItemIndex_Property_GUID),u"ItemIndex",1)
self.ItemCount_PropertyId=NVDAHelper.localLib.registerUIAProperty(byref(ItemCount_Property_GUID),u"ItemCount",1)
for propertyId in (UIA_FrameworkIdPropertyId,UIA_AutomationIdPropertyId,UIA_ClassNamePropertyId,UIA_ControlTypePropertyId,UIA_ProviderDescriptionPropertyId,UIA_ProcessIdPropertyId,UIA_IsTextPatternAvailablePropertyId,UIA_IsContentElementPropertyId,UIA_IsControlElementPropertyId):
self.baseCacheRequest.addProperty(propertyId)
self.baseCacheRequest.addPattern(UIA_TextPatternId)
self.rootElement=self.clientObject.getRootElementBuildCache(self.baseCacheRequest)
self.reservedNotSupportedValue=self.clientObject.ReservedNotSupportedValue
self.ReservedMixedAttributeValue=self.clientObject.ReservedMixedAttributeValue
self.clientObject.AddFocusChangedEventHandler(self.baseCacheRequest,self)
self.clientObject.AddPropertyChangedEventHandler(self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self,UIAPropertyIdsToNVDAEventNames.keys())
for x in UIAEventIdsToNVDAEventNames.iterkeys():
self.clientObject.addAutomationEventHandler(x,self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self)
# #7984: add support for notification event (IUIAutomation5, part of Windows 10 build 16299 and later).
if isinstance(self.clientObject, IUIAutomation5):
self.clientObject.AddNotificationEventHandler(self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self)
except Exception as e:
self.MTAThreadInitException=e
finally:
self.MTAThreadInitEvent.set()
self.MTAThreadStopEvent.wait()
self.clientObject.RemoveAllEventHandlers()
def IUIAutomationEventHandler_HandleAutomationEvent(self,sender,eventID):
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
if eventID==UIA_MenuOpenedEventId and eventHandler.isPendingEvents("gainFocus"):
# We don't need the menuOpened event if focus has been fired,
# as focus should be more correct.
return
NVDAEventName=UIAEventIdsToNVDAEventNames.get(eventID,None)
if not NVDAEventName:
return
if not self.isNativeUIAElement(sender):
return
window=self.getNearestWindowHandle(sender)
if window and not eventHandler.shouldAcceptEvent(NVDAEventName,windowHandle=window):
return
import NVDAObjects.UIA
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if (
not obj
or (NVDAEventName=="gainFocus" and not obj.shouldAllowUIAFocusEvent)
or (NVDAEventName=="liveRegionChange" and not obj._shouldAllowUIALiveRegionChangeEvent)
):
return
focus=api.getFocusObject()
if obj==focus:
obj=focus
eventHandler.queueEvent(NVDAEventName,obj)
def IUIAutomationFocusChangedEventHandler_HandleFocusChangedEvent(self,sender):
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
if not self.isNativeUIAElement(sender):
return
import NVDAObjects.UIA
if isinstance(eventHandler.lastQueuedFocusObject,NVDAObjects.UIA.UIA):
lastFocus=eventHandler.lastQueuedFocusObject.UIAElement
# Ignore duplicate focus events.
# It seems that it is possible for compareElements to return True, even though the objects are different.
# Therefore, don't ignore the event if the last focus object has lost its hasKeyboardFocus state.
if self.clientObject.compareElements(sender,lastFocus) and lastFocus.currentHasKeyboardFocus:
return
window=self.getNearestWindowHandle(sender)
if window and not eventHandler.shouldAcceptEvent("gainFocus",windowHandle=window):
return
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if not obj or not obj.shouldAllowUIAFocusEvent:
return
eventHandler.queueEvent("gainFocus",obj)
def IUIAutomationPropertyChangedEventHandler_HandlePropertyChangedEvent(self,sender,propertyId,newValue):
# #3867: For now manually force this VARIANT type to empty to get around a nasty double free in comtypes/ctypes.
# We also don't use the value in this callback.
newValue.vt=VT_EMPTY
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
NVDAEventName=UIAPropertyIdsToNVDAEventNames.get(propertyId,None)
if not NVDAEventName:
return
if not self.isNativeUIAElement(sender):
return
window=self.getNearestWindowHandle(sender)
if window and not eventHandler.shouldAcceptEvent(NVDAEventName,windowHandle=window):
return
import NVDAObjects.UIA
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
if not obj:
return
focus=api.getFocusObject()
if obj==focus:
obj=focus
eventHandler.queueEvent(NVDAEventName,obj)
def IUIAutomationNotificationEventHandler_HandleNotificationEvent(self,sender,NotificationKind,NotificationProcessing,displayString,activityId):
if not self.MTAThreadInitEvent.isSet():
# UIAHandler hasn't finished initialising yet, so just ignore this event.
return
import NVDAObjects.UIA
obj=NVDAObjects.UIA.UIA(UIAElement=sender)
eventHandler.queueEvent("UIA_notification",obj, notificationKind=NotificationKind, notificationProcessing=NotificationProcessing, displayString=displayString, activityId=activityId)
def _isUIAWindowHelper(self,hwnd):
# UIA in NVDA's process freezes in Windows 7 and below
processID=winUser.getWindowThreadProcessID(hwnd)[0]
if windll.kernel32.GetCurrentProcessId()==processID:
return False
import NVDAObjects.window
windowClass=NVDAObjects.window.Window.normalizeWindowClassName(winUser.getClassName(hwnd))
# For certain window classes, we always want to use UIA.
if windowClass in goodUIAWindowClassNames:
return True
# allow the appModule for the window to also choose if this window is good
# An appModule should be able to override bad UIA class names as prescribed by core
appModule=appModuleHandler.getAppModuleFromProcessID(processID)
if appModule and appModule.isGoodUIAWindow(hwnd):
return True
# There are certain window classes that just had bad UIA implementations
if windowClass in badUIAWindowClassNames:
return False
if windowClass=="NetUIHWND":
parentHwnd=winUser.getAncestor(hwnd,winUser.GA_ROOT)
# #2816: Outlook 2010 auto complete does not fire enough UIA events, IAccessible is better.
# #4056: Combo boxes in Office 2010 Options dialogs don't expose a name via UIA, but do via MSAA.
if winUser.getClassName(parentHwnd) in {"Net UI Tool Window","NUIDialog"}:
return False
# allow the appModule for the window to also choose if this window is bad
if appModule and appModule.isBadUIAWindow(hwnd):
return False
# Ask the window if it supports UIA natively
res=windll.UIAutomationCore.UiaHasServerSideProvider(hwnd)
if res:
# the window does support UIA natively, but
# Microsoft Word should not use UIA unless we can't inject or the user explicitly chose to use UIA with Microsoft word
if windowClass=="_WwG" and not (config.conf['UIA']['useInMSWordWhenAvailable'] or not appModule.helperLocalBindingHandle):
return False
return bool(res)
def isUIAWindow(self,hwnd):
now=time.time()
v=self.UIAWindowHandleCache.get(hwnd,None)
if not v or (now-v[1])>0.5:
v=self._isUIAWindowHelper(hwnd),now
self.UIAWindowHandleCache[hwnd]=v
return v[0]
def getNearestWindowHandle(self,UIAElement):
if hasattr(UIAElement,"_nearestWindowHandle"):
# Called previously. Use cached result.
return UIAElement._nearestWindowHandle
try:
processID=UIAElement.cachedProcessID
except COMError:
return None
appModule=appModuleHandler.getAppModuleFromProcessID(processID)
# WDAG (Windows Defender application Guard) UIA elements should be treated as being from a remote machine, and therefore their window handles are completely invalid on this machine.
# Therefore, jump all the way up to the root of the WDAG process and use that window handle as it is local to this machine.
if appModule.appName==WDAG_PROCESS_NAME:
condition=UIAUtils.createUIAMultiPropertyCondition({UIA_ClassNamePropertyId:[u'ApplicationFrameWindow',u'CabinetWClass']})
walker=self.clientObject.createTreeWalker(condition)
else:
# Not WDAG, just walk up to the nearest valid windowHandle
walker=self.windowTreeWalker
try:
new=walker.NormalizeElementBuildCache(UIAElement,self.windowCacheRequest)
except COMError:
return None
try:
window=new.cachedNativeWindowHandle
except COMError:
window=None
# Cache for future use to improve performance.
UIAElement._nearestWindowHandle=window
return window
def isNativeUIAElement(self,UIAElement):
#Due to issues dealing with UIA elements coming from the same process, we do not class these UIA elements as usable.
#It seems to be safe enough to retreave the cached processID, but using tree walkers or fetching other properties causes a freeze.
try:
processID=UIAElement.cachedProcessId
except COMError:
return False
if processID==windll.kernel32.GetCurrentProcessId():
return False
# Whether this is a native element depends on whether its window natively supports UIA.
windowHandle=self.getNearestWindowHandle(UIAElement)
if windowHandle:
if self.isUIAWindow(windowHandle):
return True
if winUser.getClassName(windowHandle)=="DirectUIHWND" and "IEFRAME.dll" in UIAElement.cachedProviderDescription and UIAElement.currentClassName in ("DownloadBox", "accessiblebutton", "DUIToolbarButton", "PushButton"):
# This is the IE 9 downloads list.
# #3354: UiaHasServerSideProvider returns false for the IE 9 downloads list window,
# so we'd normally use MSAA for this control.
# However, its MSAA implementation is broken (fires invalid events) if UIA is initialised,
# whereas its UIA implementation works correctly.
# Therefore, we must use UIA here.
return True
return False
| 1 | 22,294 | Would it make sense to log a debug warning here? | nvaccess-nvda | py |
@@ -35,14 +35,6 @@ var loginCmd = &cobra.Command{
os.Exit(1)
}
- if err := ks.SetDefault(addr); err != nil {
- cmd.Printf("Given address is not present into keystore.\r\nAvailable addresses:\r\n")
- for _, addr := range ks.List() {
- cmd.Println(addr.Address.Hex())
- }
- return
- }
-
// ask for password for default key
pass, err := accounts.NewInteractivePassPhraser().GetPassPhrase()
if err != nil { | 1 | package commands
import (
"os"
"github.com/ethereum/go-ethereum/crypto"
"github.com/sonm-io/core/accounts"
"github.com/sonm-io/core/util"
"github.com/spf13/cobra"
)
var loginCmd = &cobra.Command{
Use: "login [addr]",
Short: "Open or generate Ethereum keys",
Run: func(cmd *cobra.Command, args []string) {
ks, err := initKeystore()
if err != nil {
showError(cmd, "Cannot init keystore", err)
os.Exit(1)
}
keydir := keystorePath()
cmd.Printf("Keystore path: %s\n", keydir)
if len(args) > 0 { // have a key
if len(ks.List()) == 0 {
showError(cmd, "Cannot switch default address: keystore is empty", nil)
os.Exit(1)
}
// check if valid
addr, err := util.HexToAddress(args[0])
if err != nil {
showError(cmd, err.Error(), nil)
os.Exit(1)
}
if err := ks.SetDefault(addr); err != nil {
cmd.Printf("Given address is not present into keystore.\r\nAvailable addresses:\r\n")
for _, addr := range ks.List() {
cmd.Println(addr.Address.Hex())
}
return
}
// ask for password for default key
pass, err := accounts.NewInteractivePassPhraser().GetPassPhrase()
if err != nil {
showError(cmd, "Cannot read pass phrase", err)
os.Exit(1)
}
// try to decrypt default key with given pass
if _, err := ks.GetKeyWithPass(addr, pass); err != nil {
showError(cmd, "Cannot decrypt default key with given pass", err)
os.Exit(1)
}
cfg.Eth.Passphrase = pass
cfg.Eth.Keystore = keydir
cfg.Save()
cmd.Printf("Set \"%s\" as default keystore address\r\n", addr.Hex())
} else { // no keys
ls := ks.List()
if len(ls) == 0 {
// generate new key
cmd.Println("Keystore is empty, generating new key...")
// ask for password for default key
pass, err := accounts.NewInteractivePassPhraser().GetPassPhrase()
newKey, err := ks.GenerateWithPassword(pass)
if err != nil {
showError(cmd, "Cannot generate new key", err)
os.Exit(1)
}
cmd.Printf("Generated key %s set as default\r\n", crypto.PubkeyToAddress(newKey.PublicKey).Hex())
cfg.Eth.Passphrase = pass
cfg.Eth.Keystore = keydir
cfg.Save()
return
}
defaultAddr, err := ks.GetDefaultAddress()
if err != nil {
cmd.Printf("No default address for account, select one from list and use `sonmcli login [addr]`\r\n")
} else {
cmd.Printf("Default key: %s\r\n", defaultAddr.Hex())
}
cmd.Println("Keystore contains following keys:")
for _, acc := range ls {
cmd.Printf(" %s\r\n", acc.Address.Hex())
}
}
},
}
| 1 | 7,142 | "in the keystore", but nevermind | sonm-io-core | go |
@@ -337,7 +337,7 @@ void nano::network::republish_vote (std::shared_ptr<nano::vote> vote_a)
void nano::network::broadcast_confirm_req (std::shared_ptr<nano::block> block_a)
{
auto list (std::make_shared<std::vector<nano::peer_information>> (node.peers.representatives (std::numeric_limits<size_t>::max ())));
- if (list->empty () || node.peers.total_weight () < node.config.online_weight_minimum.number ())
+ if (list->empty () || node.peers.total_weight () < node.delta ())
{
// broadcast request to all peers
list = std::make_shared<std::vector<nano::peer_information>> (node.peers.list_vector (100)); | 1 | #include <nano/node/node.hpp>
#include <nano/lib/interface.h>
#include <nano/lib/utility.hpp>
#include <nano/node/common.hpp>
#include <nano/node/rpc.hpp>
#include <algorithm>
#include <cstdlib>
#include <future>
#include <sstream>
#include <boost/polymorphic_cast.hpp>
#include <boost/property_tree/json_parser.hpp>
double constexpr nano::node::price_max;
double constexpr nano::node::free_cutoff;
std::chrono::seconds constexpr nano::node::period;
std::chrono::seconds constexpr nano::node::cutoff;
std::chrono::seconds constexpr nano::node::syn_cookie_cutoff;
std::chrono::minutes constexpr nano::node::backup_interval;
std::chrono::seconds constexpr nano::node::search_pending_interval;
int constexpr nano::port_mapping::mapping_timeout;
int constexpr nano::port_mapping::check_timeout;
unsigned constexpr nano::active_transactions::announce_interval_ms;
size_t constexpr nano::active_transactions::max_broadcast_queue;
size_t constexpr nano::block_arrival::arrival_size_min;
std::chrono::seconds constexpr nano::block_arrival::arrival_time_min;
namespace nano
{
extern unsigned char nano_bootstrap_weights[];
extern size_t nano_bootstrap_weights_size;
}
nano::network::network (nano::node & node_a, uint16_t port) :
buffer_container (node_a.stats, nano::network::buffer_size, 4096), // 2Mb receive buffer
socket (node_a.io_ctx, nano::endpoint (boost::asio::ip::address_v6::any (), port)),
resolver (node_a.io_ctx),
node (node_a),
on (true)
{
boost::thread::attributes attrs;
nano::thread_attributes::set (attrs);
for (size_t i = 0; i < node.config.network_threads; ++i)
{
packet_processing_threads.push_back (boost::thread (attrs, [this]() {
nano::thread_role::set (nano::thread_role::name::packet_processing);
try
{
process_packets ();
}
catch (boost::system::error_code & ec)
{
BOOST_LOG (this->node.log) << FATAL_LOG_PREFIX << ec.message ();
release_assert (false);
}
catch (std::error_code & ec)
{
BOOST_LOG (this->node.log) << FATAL_LOG_PREFIX << ec.message ();
release_assert (false);
}
catch (std::runtime_error & err)
{
BOOST_LOG (this->node.log) << FATAL_LOG_PREFIX << err.what ();
release_assert (false);
}
catch (...)
{
BOOST_LOG (this->node.log) << FATAL_LOG_PREFIX << "Unknown exception";
release_assert (false);
}
if (this->node.config.logging.network_packet_logging ())
{
BOOST_LOG (this->node.log) << "Exiting packet processing thread";
}
}));
}
}
nano::network::~network ()
{
for (auto & thread : packet_processing_threads)
{
thread.join ();
}
}
void nano::network::start ()
{
for (size_t i = 0; i < node.config.io_threads; ++i)
{
receive ();
}
}
void nano::network::receive ()
{
if (node.config.logging.network_packet_logging ())
{
BOOST_LOG (node.log) << "Receiving packet";
}
std::unique_lock<std::mutex> lock (socket_mutex);
auto data (buffer_container.allocate ());
socket.async_receive_from (boost::asio::buffer (data->buffer, nano::network::buffer_size), data->endpoint, [this, data](boost::system::error_code const & error, size_t size_a) {
if (!error && this->on)
{
data->size = size_a;
this->buffer_container.enqueue (data);
this->receive ();
}
else
{
this->buffer_container.release (data);
if (error)
{
if (this->node.config.logging.network_logging ())
{
BOOST_LOG (this->node.log) << boost::str (boost::format ("UDP Receive error: %1%") % error.message ());
}
}
if (this->on)
{
this->node.alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (5), [this]() { this->receive (); });
}
}
});
}
void nano::network::process_packets ()
{
while (on)
{
auto data (buffer_container.dequeue ());
if (data == nullptr)
{
break;
}
//std::cerr << data->endpoint.address ().to_string ();
receive_action (data);
buffer_container.release (data);
}
}
void nano::network::stop ()
{
on = false;
socket.close ();
resolver.cancel ();
buffer_container.stop ();
}
void nano::network::send_keepalive (nano::endpoint const & endpoint_a)
{
assert (endpoint_a.address ().is_v6 ());
nano::keepalive message;
node.peers.random_fill (message.peers);
auto bytes = message.to_bytes ();
if (node.config.logging.network_keepalive_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Keepalive req sent to %1%") % endpoint_a);
}
std::weak_ptr<nano::node> node_w (node.shared ());
send_buffer (bytes->data (), bytes->size (), endpoint_a, [bytes, node_w, endpoint_a](boost::system::error_code const & ec, size_t) {
if (auto node_l = node_w.lock ())
{
if (ec && node_l->config.logging.network_keepalive_logging ())
{
BOOST_LOG (node_l->log) << boost::str (boost::format ("Error sending keepalive to %1%: %2%") % endpoint_a % ec.message ());
}
else
{
node_l->stats.inc (nano::stat::type::message, nano::stat::detail::keepalive, nano::stat::dir::out);
}
}
});
}
void nano::node::keepalive (std::string const & address_a, uint16_t port_a)
{
auto node_l (shared_from_this ());
network.resolver.async_resolve (boost::asio::ip::udp::resolver::query (address_a, std::to_string (port_a)), [node_l, address_a, port_a](boost::system::error_code const & ec, boost::asio::ip::udp::resolver::iterator i_a) {
if (!ec)
{
for (auto i (i_a), n (boost::asio::ip::udp::resolver::iterator{}); i != n; ++i)
{
node_l->send_keepalive (nano::map_endpoint_to_v6 (i->endpoint ()));
}
}
else
{
BOOST_LOG (node_l->log) << boost::str (boost::format ("Error resolving address: %1%:%2%: %3%") % address_a % port_a % ec.message ());
}
});
}
void nano::network::send_node_id_handshake (nano::endpoint const & endpoint_a, boost::optional<nano::uint256_union> const & query, boost::optional<nano::uint256_union> const & respond_to)
{
assert (endpoint_a.address ().is_v6 ());
boost::optional<std::pair<nano::account, nano::signature>> response (boost::none);
if (respond_to)
{
response = std::make_pair (node.node_id.pub, nano::sign_message (node.node_id.prv, node.node_id.pub, *respond_to));
assert (!nano::validate_message (response->first, *respond_to, response->second));
}
nano::node_id_handshake message (query, response);
auto bytes = message.to_bytes ();
if (node.config.logging.network_node_id_handshake_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Node ID handshake sent with node ID %1% to %2%: query %3%, respond_to %4% (signature %5%)") % node.node_id.pub.to_account () % endpoint_a % (query ? query->to_string () : std::string ("[none]")) % (respond_to ? respond_to->to_string () : std::string ("[none]")) % (response ? response->second.to_string () : std::string ("[none]")));
}
node.stats.inc (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::out);
std::weak_ptr<nano::node> node_w (node.shared ());
send_buffer (bytes->data (), bytes->size (), endpoint_a, [bytes, node_w, endpoint_a](boost::system::error_code const & ec, size_t) {
if (auto node_l = node_w.lock ())
{
if (ec && node_l->config.logging.network_node_id_handshake_logging ())
{
BOOST_LOG (node_l->log) << boost::str (boost::format ("Error sending node ID handshake to %1% %2%") % endpoint_a % ec.message ());
}
}
});
}
void nano::network::republish (nano::block_hash const & hash_a, std::shared_ptr<std::vector<uint8_t>> buffer_a, nano::endpoint endpoint_a)
{
if (node.config.logging.network_publish_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Publishing %1% to %2%") % hash_a.to_string () % endpoint_a);
}
std::weak_ptr<nano::node> node_w (node.shared ());
send_buffer (buffer_a->data (), buffer_a->size (), endpoint_a, [buffer_a, node_w, endpoint_a](boost::system::error_code const & ec, size_t size) {
if (auto node_l = node_w.lock ())
{
if (ec && node_l->config.logging.network_logging ())
{
BOOST_LOG (node_l->log) << boost::str (boost::format ("Error sending publish to %1%: %2%") % endpoint_a % ec.message ());
}
else
{
node_l->stats.inc (nano::stat::type::message, nano::stat::detail::publish, nano::stat::dir::out);
}
}
});
}
template <typename T>
bool confirm_block (nano::transaction const & transaction_a, nano::node & node_a, T & list_a, std::shared_ptr<nano::block> block_a, bool also_publish)
{
bool result (false);
if (node_a.config.enable_voting)
{
node_a.wallets.foreach_representative (transaction_a, [&result, &block_a, &list_a, &node_a, &transaction_a, also_publish](nano::public_key const & pub_a, nano::raw_key const & prv_a) {
result = true;
auto hash (block_a->hash ());
auto vote (node_a.store.vote_generate (transaction_a, pub_a, prv_a, std::vector<nano::block_hash> (1, hash)));
nano::confirm_ack confirm (vote);
auto vote_bytes = confirm.to_bytes ();
nano::publish publish (block_a);
std::shared_ptr<std::vector<uint8_t>> publish_bytes;
if (also_publish)
{
publish_bytes = publish.to_bytes ();
}
for (auto j (list_a.begin ()), m (list_a.end ()); j != m; ++j)
{
node_a.network.confirm_send (confirm, vote_bytes, *j);
if (also_publish)
{
node_a.network.republish (hash, publish_bytes, *j);
}
}
});
}
return result;
}
bool confirm_block (nano::transaction const & transaction_a, nano::node & node_a, nano::endpoint & peer_a, std::shared_ptr<nano::block> block_a, bool also_publish)
{
std::array<nano::endpoint, 1> endpoints;
endpoints[0] = peer_a;
auto result (confirm_block (transaction_a, node_a, endpoints, std::move (block_a), also_publish));
return result;
}
void nano::network::republish_block (std::shared_ptr<nano::block> block)
{
auto hash (block->hash ());
auto list (node.peers.list_fanout ());
nano::publish message (block);
auto bytes = message.to_bytes ();
for (auto i (list.begin ()), n (list.end ()); i != n; ++i)
{
republish (hash, bytes, *i);
}
if (node.config.logging.network_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Block %1% was republished to peers") % hash.to_string ());
}
}
void nano::network::republish_block_batch (std::deque<std::shared_ptr<nano::block>> blocks_a, unsigned delay_a)
{
auto block (blocks_a.front ());
blocks_a.pop_front ();
republish_block (block);
if (!blocks_a.empty ())
{
std::weak_ptr<nano::node> node_w (node.shared ());
node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a + std::rand () % delay_a), [node_w, blocks_a, delay_a]() {
if (auto node_l = node_w.lock ())
{
node_l->network.republish_block_batch (blocks_a, delay_a);
}
});
}
}
// In order to rate limit network traffic we republish:
// 1) Only if they are a non-replay vote of a block that's actively settling. Settling blocks are limited by block PoW
// 2) The rep has a weight > Y to prevent creating a lot of small-weight accounts to send out votes
// 3) Only if a vote for this block from this representative hasn't been received in the previous X second.
// This prevents rapid publishing of votes with increasing sequence numbers.
//
// These rules are implemented by the caller, not this function.
void nano::network::republish_vote (std::shared_ptr<nano::vote> vote_a)
{
nano::confirm_ack confirm (vote_a);
auto bytes = confirm.to_bytes ();
auto list (node.peers.list_fanout ());
for (auto j (list.begin ()), m (list.end ()); j != m; ++j)
{
node.network.confirm_send (confirm, bytes, *j);
}
}
void nano::network::broadcast_confirm_req (std::shared_ptr<nano::block> block_a)
{
auto list (std::make_shared<std::vector<nano::peer_information>> (node.peers.representatives (std::numeric_limits<size_t>::max ())));
if (list->empty () || node.peers.total_weight () < node.config.online_weight_minimum.number ())
{
// broadcast request to all peers
list = std::make_shared<std::vector<nano::peer_information>> (node.peers.list_vector (100));
}
/*
* In either case (broadcasting to all representatives, or broadcasting to
* all peers because there are not enough connected representatives),
* limit each instance to a single random up-to-32 selection. The invoker
* of "broadcast_confirm_req" will be responsible for calling it again
* if the votes for a block have not arrived in time.
*/
const size_t max_endpoints = 32;
std::random_shuffle (list->begin (), list->end ());
if (list->size () > max_endpoints)
{
list->erase (list->begin () + max_endpoints, list->end ());
}
broadcast_confirm_req_base (block_a, list, 0);
}
void nano::network::broadcast_confirm_req_base (std::shared_ptr<nano::block> block_a, std::shared_ptr<std::vector<nano::peer_information>> endpoints_a, unsigned delay_a, bool resumption)
{
const size_t max_reps = 10;
if (!resumption && node.config.logging.network_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Broadcasting confirm req for block %1% to %2% representatives") % block_a->hash ().to_string () % endpoints_a->size ());
}
auto count (0);
while (!endpoints_a->empty () && count < max_reps)
{
send_confirm_req (endpoints_a->back ().endpoint, block_a);
endpoints_a->pop_back ();
count++;
}
if (!endpoints_a->empty ())
{
delay_a += std::rand () % broadcast_interval_ms;
std::weak_ptr<nano::node> node_w (node.shared ());
node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a), [node_w, block_a, endpoints_a, delay_a]() {
if (auto node_l = node_w.lock ())
{
node_l->network.broadcast_confirm_req_base (block_a, endpoints_a, delay_a, true);
}
});
}
}
void nano::network::broadcast_confirm_req_batch (std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<nano::peer_information>>>> deque_a, unsigned delay_a)
{
auto pair (deque_a.front ());
deque_a.pop_front ();
auto block (pair.first);
// confirm_req to representatives
auto endpoints (pair.second);
if (!endpoints->empty ())
{
broadcast_confirm_req_base (block, endpoints, delay_a);
}
/* Continue while blocks remain
Broadcast with random delay between delay_a & 2*delay_a */
if (!deque_a.empty ())
{
std::weak_ptr<nano::node> node_w (node.shared ());
node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a + std::rand () % delay_a), [node_w, deque_a, delay_a]() {
if (auto node_l = node_w.lock ())
{
node_l->network.broadcast_confirm_req_batch (deque_a, delay_a);
}
});
}
}
void nano::network::send_confirm_req (nano::endpoint const & endpoint_a, std::shared_ptr<nano::block> block)
{
nano::confirm_req message (block);
auto bytes = message.to_bytes ();
if (node.config.logging.network_message_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Sending confirm req to %1%") % endpoint_a);
}
std::weak_ptr<nano::node> node_w (node.shared ());
node.stats.inc (nano::stat::type::message, nano::stat::detail::confirm_req, nano::stat::dir::out);
send_buffer (bytes->data (), bytes->size (), endpoint_a, [bytes, node_w](boost::system::error_code const & ec, size_t size) {
if (auto node_l = node_w.lock ())
{
if (ec && node_l->config.logging.network_logging ())
{
BOOST_LOG (node_l->log) << boost::str (boost::format ("Error sending confirm request: %1%") % ec.message ());
}
}
});
}
template <typename T>
void rep_query (nano::node & node_a, T const & peers_a)
{
auto transaction (node_a.store.tx_begin_read ());
std::shared_ptr<nano::block> block (node_a.store.block_random (transaction));
auto hash (block->hash ());
node_a.rep_crawler.add (hash);
for (auto i (peers_a.begin ()), n (peers_a.end ()); i != n; ++i)
{
node_a.peers.rep_request (*i);
node_a.network.send_confirm_req (*i, block);
}
std::weak_ptr<nano::node> node_w (node_a.shared ());
node_a.alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (5), [node_w, hash]() {
if (auto node_l = node_w.lock ())
{
node_l->rep_crawler.remove (hash);
}
});
}
void rep_query (nano::node & node_a, nano::endpoint const & peers_a)
{
std::array<nano::endpoint, 1> peers;
peers[0] = peers_a;
rep_query (node_a, peers);
}
namespace
{
class network_message_visitor : public nano::message_visitor
{
public:
network_message_visitor (nano::node & node_a, nano::endpoint const & sender_a) :
node (node_a),
sender (sender_a)
{
}
virtual ~network_message_visitor () = default;
void keepalive (nano::keepalive const & message_a) override
{
if (node.config.logging.network_keepalive_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Received keepalive message from %1%") % sender);
}
node.stats.inc (nano::stat::type::message, nano::stat::detail::keepalive, nano::stat::dir::in);
if (node.peers.contacted (sender, message_a.header.version_using))
{
auto endpoint_l (nano::map_endpoint_to_v6 (sender));
auto cookie (node.peers.assign_syn_cookie (endpoint_l));
if (cookie)
{
node.network.send_node_id_handshake (endpoint_l, *cookie, boost::none);
}
}
node.network.merge_peers (message_a.peers);
}
void publish (nano::publish const & message_a) override
{
if (node.config.logging.network_message_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Publish message from %1% for %2%") % sender % message_a.block->hash ().to_string ());
}
node.stats.inc (nano::stat::type::message, nano::stat::detail::publish, nano::stat::dir::in);
node.peers.contacted (sender, message_a.header.version_using);
node.process_active (message_a.block);
node.active.publish (message_a.block);
}
void confirm_req (nano::confirm_req const & message_a) override
{
if (node.config.logging.network_message_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Confirm_req message from %1% for %2%") % sender % message_a.block->hash ().to_string ());
}
node.stats.inc (nano::stat::type::message, nano::stat::detail::confirm_req, nano::stat::dir::in);
node.peers.contacted (sender, message_a.header.version_using);
// Don't load nodes with disabled voting
if (node.config.enable_voting)
{
auto transaction (node.store.tx_begin_read ());
auto successor (node.ledger.successor (transaction, message_a.block->root ()));
if (successor != nullptr)
{
auto same_block (successor->hash () == message_a.block->hash ());
confirm_block (transaction, node, sender, std::move (successor), !same_block);
}
}
}
void confirm_ack (nano::confirm_ack const & message_a) override
{
if (node.config.logging.network_message_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Received confirm_ack message from %1% for %2%sequence %3%") % sender % message_a.vote->hashes_string () % std::to_string (message_a.vote->sequence));
}
node.stats.inc (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::in);
node.peers.contacted (sender, message_a.header.version_using);
for (auto & vote_block : message_a.vote->blocks)
{
if (!vote_block.which ())
{
auto block (boost::get<std::shared_ptr<nano::block>> (vote_block));
node.process_active (block);
node.active.publish (block);
}
}
node.vote_processor.vote (message_a.vote, sender);
}
void bulk_pull (nano::bulk_pull const &) override
{
assert (false);
}
void bulk_pull_account (nano::bulk_pull_account const &) override
{
assert (false);
}
void bulk_pull_blocks (nano::bulk_pull_blocks const &) override
{
assert (false);
}
void bulk_push (nano::bulk_push const &) override
{
assert (false);
}
void frontier_req (nano::frontier_req const &) override
{
assert (false);
}
void node_id_handshake (nano::node_id_handshake const & message_a) override
{
if (node.config.logging.network_node_id_handshake_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Received node_id_handshake message from %1% with query %2% and response account %3%") % sender % (message_a.query ? message_a.query->to_string () : std::string ("[none]")) % (message_a.response ? message_a.response->first.to_account () : std::string ("[none]")));
}
node.stats.inc (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::in);
auto endpoint_l (nano::map_endpoint_to_v6 (sender));
boost::optional<nano::uint256_union> out_query;
boost::optional<nano::uint256_union> out_respond_to;
if (message_a.query)
{
out_respond_to = message_a.query;
}
auto validated_response (false);
if (message_a.response)
{
if (!node.peers.validate_syn_cookie (endpoint_l, message_a.response->first, message_a.response->second))
{
validated_response = true;
if (message_a.response->first != node.node_id.pub)
{
node.peers.insert (endpoint_l, message_a.header.version_using);
}
}
else if (node.config.logging.network_node_id_handshake_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Failed to validate syn cookie signature %1% by %2%") % message_a.response->second.to_string () % message_a.response->first.to_account ());
}
}
if (!validated_response && !node.peers.known_peer (endpoint_l))
{
out_query = node.peers.assign_syn_cookie (endpoint_l);
}
if (out_query || out_respond_to)
{
node.network.send_node_id_handshake (sender, out_query, out_respond_to);
}
}
nano::node & node;
nano::endpoint sender;
};
}
void nano::network::receive_action (nano::udp_data * data_a)
{
auto allowed_sender (true);
if (data_a->endpoint == endpoint ())
{
allowed_sender = false;
}
else if (nano::reserved_address (data_a->endpoint, false) && !node.config.allow_local_peers)
{
allowed_sender = false;
}
if (allowed_sender)
{
network_message_visitor visitor (node, data_a->endpoint);
nano::message_parser parser (node.block_uniquer, node.vote_uniquer, visitor, node.work);
parser.deserialize_buffer (data_a->buffer, data_a->size);
if (parser.status != nano::message_parser::parse_status::success)
{
node.stats.inc (nano::stat::type::error);
switch (parser.status)
{
case nano::message_parser::parse_status::insufficient_work:
// We've already increment error count, update detail only
node.stats.inc_detail_only (nano::stat::type::error, nano::stat::detail::insufficient_work);
break;
case nano::message_parser::parse_status::invalid_magic:
node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_magic);
break;
case nano::message_parser::parse_status::invalid_network:
node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_network);
break;
case nano::message_parser::parse_status::invalid_header:
node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_header);
break;
case nano::message_parser::parse_status::invalid_message_type:
node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_message_type);
break;
case nano::message_parser::parse_status::invalid_keepalive_message:
node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_keepalive_message);
break;
case nano::message_parser::parse_status::invalid_publish_message:
node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_publish_message);
break;
case nano::message_parser::parse_status::invalid_confirm_req_message:
node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_confirm_req_message);
break;
case nano::message_parser::parse_status::invalid_confirm_ack_message:
node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_confirm_ack_message);
break;
case nano::message_parser::parse_status::invalid_node_id_handshake_message:
node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_node_id_handshake_message);
break;
case nano::message_parser::parse_status::outdated_version:
node.stats.inc (nano::stat::type::udp, nano::stat::detail::outdated_version);
break;
case nano::message_parser::parse_status::success:
/* Already checked, unreachable */
break;
}
if (node.config.logging.network_logging ())
{
BOOST_LOG (node.log) << "Could not parse message. Error: " << parser.status_string ();
}
}
else
{
node.stats.add (nano::stat::type::traffic, nano::stat::dir::in, data_a->size);
}
}
else
{
if (node.config.logging.network_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Reserved sender %1%") % data_a->endpoint.address ().to_string ());
}
node.stats.inc_detail_only (nano::stat::type::error, nano::stat::detail::bad_sender);
}
}
// Send keepalives to all the peers we've been notified of
void nano::network::merge_peers (std::array<nano::endpoint, 8> const & peers_a)
{
for (auto i (peers_a.begin ()), j (peers_a.end ()); i != j; ++i)
{
if (!node.peers.reachout (*i))
{
send_keepalive (*i);
}
}
}
bool nano::operation::operator> (nano::operation const & other_a) const
{
return wakeup > other_a.wakeup;
}
nano::alarm::alarm (boost::asio::io_context & io_ctx_a) :
io_ctx (io_ctx_a),
thread ([this]() {
nano::thread_role::set (nano::thread_role::name::alarm);
run ();
})
{
}
nano::alarm::~alarm ()
{
add (std::chrono::steady_clock::now (), nullptr);
thread.join ();
}
void nano::alarm::run ()
{
std::unique_lock<std::mutex> lock (mutex);
auto done (false);
while (!done)
{
if (!operations.empty ())
{
auto & operation (operations.top ());
if (operation.function)
{
if (operation.wakeup <= std::chrono::steady_clock::now ())
{
io_ctx.post (operation.function);
operations.pop ();
}
else
{
auto wakeup (operation.wakeup);
condition.wait_until (lock, wakeup);
}
}
else
{
done = true;
}
}
else
{
condition.wait (lock);
}
}
}
void nano::alarm::add (std::chrono::steady_clock::time_point const & wakeup_a, std::function<void()> const & operation)
{
{
std::lock_guard<std::mutex> lock (mutex);
operations.push (nano::operation ({ wakeup_a, operation }));
}
condition.notify_all ();
}
nano::node_init::node_init () :
block_store_init (false),
wallet_init (false)
{
}
bool nano::node_init::error ()
{
return block_store_init || wallet_init;
}
nano::vote_processor::vote_processor (nano::node & node_a) :
node (node_a),
started (false),
stopped (false),
active (false),
thread ([this]() {
nano::thread_role::set (nano::thread_role::name::vote_processing);
process_loop ();
})
{
std::unique_lock<std::mutex> lock (mutex);
while (!started)
{
condition.wait (lock);
}
}
void nano::vote_processor::process_loop ()
{
std::chrono::steady_clock::time_point start_time, end_time;
std::chrono::steady_clock::duration elapsed_time;
std::chrono::milliseconds elapsed_time_ms;
uint64_t elapsed_time_ms_int;
bool log_this_iteration;
std::unique_lock<std::mutex> lock (mutex);
started = true;
lock.unlock ();
condition.notify_all ();
lock.lock ();
while (!stopped)
{
if (!votes.empty ())
{
std::deque<std::pair<std::shared_ptr<nano::vote>, nano::endpoint>> votes_l;
votes_l.swap (votes);
log_this_iteration = false;
if (node.config.logging.network_logging () && votes_l.size () > 50)
{
/*
* Only log the timing information for this iteration if
* there are a sufficient number of items for it to be relevant
*/
log_this_iteration = true;
start_time = std::chrono::steady_clock::now ();
}
active = true;
lock.unlock ();
verify_votes (votes_l);
{
std::unique_lock<std::mutex> active_single_lock (node.active.mutex);
auto transaction (node.store.tx_begin_read ());
uint64_t count (1);
for (auto & i : votes_l)
{
vote_blocking (transaction, i.first, i.second, true);
// Free active_transactions mutex each 100 processed votes
if (count % 100 == 0)
{
active_single_lock.unlock ();
active_single_lock.lock ();
}
count++;
}
}
lock.lock ();
active = false;
lock.unlock ();
condition.notify_all ();
lock.lock ();
if (log_this_iteration)
{
end_time = std::chrono::steady_clock::now ();
elapsed_time = end_time - start_time;
elapsed_time_ms = std::chrono::duration_cast<std::chrono::milliseconds> (elapsed_time);
elapsed_time_ms_int = elapsed_time_ms.count ();
if (elapsed_time_ms_int >= 100)
{
/*
* If the time spent was less than 100ms then
* the results are probably not useful as well,
* so don't spam the logs.
*/
BOOST_LOG (node.log) << boost::str (boost::format ("Processed %1% votes in %2% milliseconds (rate of %3% votes per second)") % votes_l.size () % elapsed_time_ms_int % ((votes_l.size () * 1000ULL) / elapsed_time_ms_int));
}
}
}
else
{
condition.wait (lock);
}
}
}
void nano::vote_processor::vote (std::shared_ptr<nano::vote> vote_a, nano::endpoint endpoint_a)
{
assert (endpoint_a.address ().is_v6 ());
std::unique_lock<std::mutex> lock (mutex);
if (!stopped)
{
bool process (false);
/* Random early delection levels
Always process votes for test network (process = true)
Stop processing with max 144 * 1024 votes */
if (nano::nano_network != nano::nano_networks::nano_test_network)
{
// Level 0 (< 0.1%)
if (votes.size () < 96 * 1024)
{
process = true;
}
// Level 1 (0.1-1%)
else if (votes.size () < 112 * 1024)
{
process = (representatives_1.find (vote_a->account) != representatives_1.end ());
}
// Level 2 (1-5%)
else if (votes.size () < 128 * 1024)
{
process = (representatives_2.find (vote_a->account) != representatives_2.end ());
}
// Level 3 (> 5%)
else if (votes.size () < 144 * 1024)
{
process = (representatives_3.find (vote_a->account) != representatives_3.end ());
}
}
else
{
// Process for test network
process = true;
}
if (process)
{
votes.push_back (std::make_pair (vote_a, endpoint_a));
lock.unlock ();
condition.notify_all ();
lock.lock ();
}
else
{
node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_overflow);
if (node.config.logging.vote_logging ())
{
BOOST_LOG (node.log) << "Votes overflow";
}
}
}
}
void nano::vote_processor::verify_votes (std::deque<std::pair<std::shared_ptr<nano::vote>, nano::endpoint>> & votes_a)
{
auto size (votes_a.size ());
std::vector<unsigned char const *> messages;
messages.reserve (size);
std::vector<nano::uint256_union> hashes;
hashes.reserve (size);
std::vector<size_t> lengths (size, sizeof (nano::uint256_union));
std::vector<unsigned char const *> pub_keys;
pub_keys.reserve (size);
std::vector<unsigned char const *> signatures;
signatures.reserve (size);
std::vector<int> verifications;
verifications.resize (size);
for (auto & vote : votes_a)
{
hashes.push_back (vote.first->hash ());
messages.push_back (hashes.back ().bytes.data ());
pub_keys.push_back (vote.first->account.bytes.data ());
signatures.push_back (vote.first->signature.bytes.data ());
}
std::promise<void> promise;
nano::signature_check_set check = { size, messages.data (), lengths.data (), pub_keys.data (), signatures.data (), verifications.data (), &promise };
node.checker.add (check);
promise.get_future ().wait ();
std::remove_reference_t<decltype (votes_a)> result;
auto i (0);
for (auto & vote : votes_a)
{
assert (verifications[i] == 1 || verifications[i] == 0);
if (verifications[i] == 1)
{
result.push_back (vote);
}
++i;
}
votes_a.swap (result);
}
// node.active.mutex lock required
nano::vote_code nano::vote_processor::vote_blocking (nano::transaction const & transaction_a, std::shared_ptr<nano::vote> vote_a, nano::endpoint endpoint_a, bool validated)
{
assert (endpoint_a.address ().is_v6 ());
assert (!node.active.mutex.try_lock ());
auto result (nano::vote_code::invalid);
if (validated || !vote_a->validate ())
{
auto max_vote (node.store.vote_max (transaction_a, vote_a));
result = nano::vote_code::replay;
if (!node.active.vote (vote_a, true))
{
result = nano::vote_code::vote;
}
switch (result)
{
case nano::vote_code::vote:
node.observers.vote.notify (transaction_a, vote_a, endpoint_a);
case nano::vote_code::replay:
// This tries to assist rep nodes that have lost track of their highest sequence number by replaying our highest known vote back to them
// Only do this if the sequence number is significantly different to account for network reordering
// Amplify attack considerations: We're sending out a confirm_ack in response to a confirm_ack for no net traffic increase
if (max_vote->sequence > vote_a->sequence + 10000)
{
nano::confirm_ack confirm (max_vote);
node.network.confirm_send (confirm, confirm.to_bytes (), endpoint_a);
}
break;
case nano::vote_code::invalid:
assert (false);
break;
}
}
std::string status;
switch (result)
{
case nano::vote_code::invalid:
status = "Invalid";
node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_invalid);
break;
case nano::vote_code::replay:
status = "Replay";
node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_replay);
break;
case nano::vote_code::vote:
status = "Vote";
node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_valid);
break;
}
if (node.config.logging.vote_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Vote from: %1% sequence: %2% block(s): %3%status: %4%") % vote_a->account.to_account () % std::to_string (vote_a->sequence) % vote_a->hashes_string () % status);
}
return result;
}
void nano::vote_processor::stop ()
{
{
std::lock_guard<std::mutex> lock (mutex);
stopped = true;
}
condition.notify_all ();
if (thread.joinable ())
{
thread.join ();
}
}
void nano::vote_processor::flush ()
{
std::unique_lock<std::mutex> lock (mutex);
while (active || !votes.empty ())
{
condition.wait (lock);
}
}
void nano::vote_processor::calculate_weights ()
{
std::unique_lock<std::mutex> lock (mutex);
if (!stopped)
{
representatives_1.clear ();
representatives_2.clear ();
representatives_3.clear ();
auto supply (node.online_reps.online_stake ());
auto transaction (node.store.tx_begin_read ());
for (auto i (node.store.representation_begin (transaction)), n (node.store.representation_end ()); i != n; ++i)
{
nano::account representative (i->first);
auto weight (node.ledger.weight (transaction, representative));
if (weight > supply / 1000) // 0.1% or above (level 1)
{
representatives_1.insert (representative);
if (weight > supply / 100) // 1% or above (level 2)
{
representatives_2.insert (representative);
if (weight > supply / 20) // 5% or above (level 3)
{
representatives_3.insert (representative);
}
}
}
}
}
}
void nano::rep_crawler::add (nano::block_hash const & hash_a)
{
std::lock_guard<std::mutex> lock (mutex);
active.insert (hash_a);
}
void nano::rep_crawler::remove (nano::block_hash const & hash_a)
{
std::lock_guard<std::mutex> lock (mutex);
active.erase (hash_a);
}
bool nano::rep_crawler::exists (nano::block_hash const & hash_a)
{
std::lock_guard<std::mutex> lock (mutex);
return active.count (hash_a) != 0;
}
nano::signature_checker::signature_checker () :
started (false),
stopped (false),
thread ([this]() { run (); })
{
std::unique_lock<std::mutex> lock (mutex);
while (!started)
{
condition.wait (lock);
}
}
nano::signature_checker::~signature_checker ()
{
stop ();
}
void nano::signature_checker::add (nano::signature_check_set & check_a)
{
{
std::lock_guard<std::mutex> lock (mutex);
checks.push_back (check_a);
}
condition.notify_all ();
}
void nano::signature_checker::stop ()
{
std::unique_lock<std::mutex> lock (mutex);
stopped = true;
lock.unlock ();
condition.notify_all ();
if (thread.joinable ())
{
thread.join ();
}
}
void nano::signature_checker::flush ()
{
std::unique_lock<std::mutex> lock (mutex);
while (!stopped && !checks.empty ())
{
condition.wait (lock);
}
}
void nano::signature_checker::verify (nano::signature_check_set & check_a)
{
/* Verifications is vector if signatures check results
validate_message_batch returing "true" if there are at least 1 invalid signature */
auto code (nano::validate_message_batch (check_a.messages, check_a.message_lengths, check_a.pub_keys, check_a.signatures, check_a.size, check_a.verifications));
(void)code;
release_assert (std::all_of (check_a.verifications, check_a.verifications + check_a.size, [](int verification) { return verification == 0 || verification == 1; }));
check_a.promise->set_value ();
}
void nano::signature_checker::run ()
{
nano::thread_role::set (nano::thread_role::name::signature_checking);
std::unique_lock<std::mutex> lock (mutex);
started = true;
lock.unlock ();
condition.notify_all ();
lock.lock ();
while (!stopped)
{
if (!checks.empty ())
{
auto check (checks.front ());
checks.pop_front ();
lock.unlock ();
verify (check);
condition.notify_all ();
lock.lock ();
}
else
{
condition.wait (lock);
}
}
}
nano::block_processor::block_processor (nano::node & node_a) :
stopped (false),
active (false),
next_log (std::chrono::steady_clock::now ()),
node (node_a),
generator (node_a, nano::nano_network == nano::nano_networks::nano_test_network ? std::chrono::milliseconds (10) : std::chrono::milliseconds (500))
{
}
nano::block_processor::~block_processor ()
{
stop ();
}
void nano::block_processor::stop ()
{
generator.stop ();
{
std::lock_guard<std::mutex> lock (mutex);
stopped = true;
}
condition.notify_all ();
}
void nano::block_processor::flush ()
{
node.checker.flush ();
std::unique_lock<std::mutex> lock (mutex);
while (!stopped && (have_blocks () || active))
{
condition.wait (lock);
}
}
bool nano::block_processor::full ()
{
std::unique_lock<std::mutex> lock (mutex);
return (blocks.size () + state_blocks.size ()) > 16384;
}
void nano::block_processor::add (std::shared_ptr<nano::block> block_a, std::chrono::steady_clock::time_point origination)
{
if (!nano::work_validate (block_a->root (), block_a->block_work ()))
{
{
std::lock_guard<std::mutex> lock (mutex);
if (blocks_hashes.find (block_a->hash ()) == blocks_hashes.end ())
{
if (block_a->type () == nano::block_type::state && !node.ledger.is_epoch_link (block_a->link ()))
{
state_blocks.push_back (std::make_pair (block_a, origination));
}
else
{
blocks.push_back (std::make_pair (block_a, origination));
}
}
condition.notify_all ();
}
}
else
{
BOOST_LOG (node.log) << "nano::block_processor::add called for hash " << block_a->hash ().to_string () << " with invalid work " << nano::to_string_hex (block_a->block_work ());
assert (false && "nano::block_processor::add called with invalid work");
}
}
void nano::block_processor::force (std::shared_ptr<nano::block> block_a)
{
{
std::lock_guard<std::mutex> lock (mutex);
forced.push_back (block_a);
}
condition.notify_all ();
}
void nano::block_processor::process_blocks ()
{
std::unique_lock<std::mutex> lock (mutex);
while (!stopped)
{
if (have_blocks ())
{
active = true;
lock.unlock ();
process_receive_many (lock);
lock.lock ();
active = false;
}
else
{
lock.unlock ();
condition.notify_all ();
lock.lock ();
condition.wait (lock);
}
}
}
bool nano::block_processor::should_log (bool first_time)
{
auto result (false);
auto now (std::chrono::steady_clock::now ());
if (first_time || next_log < now)
{
next_log = now + std::chrono::seconds (15);
result = true;
}
return result;
}
bool nano::block_processor::have_blocks ()
{
assert (!mutex.try_lock ());
return !blocks.empty () || !forced.empty () || !state_blocks.empty ();
}
void nano::block_processor::verify_state_blocks (std::unique_lock<std::mutex> & lock_a, size_t max_count)
{
assert (!mutex.try_lock ());
auto start_time (std::chrono::steady_clock::now ());
std::deque<std::pair<std::shared_ptr<nano::block>, std::chrono::steady_clock::time_point>> items;
if (max_count == std::numeric_limits<size_t>::max () || max_count >= state_blocks.size ())
{
items.swap (state_blocks);
}
else
{
auto keep_size (state_blocks.size () - max_count);
items.resize (keep_size);
std::swap_ranges (state_blocks.end () - keep_size, state_blocks.end (), items.begin ());
state_blocks.resize (max_count);
items.swap (state_blocks);
}
lock_a.unlock ();
auto size (items.size ());
std::vector<nano::uint256_union> hashes;
hashes.reserve (size);
std::vector<unsigned char const *> messages;
messages.reserve (size);
std::vector<size_t> lengths;
lengths.reserve (size);
std::vector<unsigned char const *> pub_keys;
pub_keys.reserve (size);
std::vector<unsigned char const *> signatures;
signatures.reserve (size);
std::vector<int> verifications;
verifications.resize (size, 0);
for (auto i (0); i < size; ++i)
{
auto & block (static_cast<nano::state_block &> (*items[i].first));
hashes.push_back (block.hash ());
messages.push_back (hashes.back ().bytes.data ());
lengths.push_back (sizeof (decltype (hashes)::value_type));
pub_keys.push_back (block.hashables.account.bytes.data ());
signatures.push_back (block.signature.bytes.data ());
}
std::promise<void> promise;
nano::signature_check_set check = { size, messages.data (), lengths.data (), pub_keys.data (), signatures.data (), verifications.data (), &promise };
node.checker.add (check);
promise.get_future ().wait ();
lock_a.lock ();
for (auto i (0); i < size; ++i)
{
assert (verifications[i] == 1 || verifications[i] == 0);
if (verifications[i] == 1)
{
blocks.push_back (items.front ());
}
items.pop_front ();
}
if (node.config.logging.timing_logging ())
{
auto end_time (std::chrono::steady_clock::now ());
auto elapsed_time_ms (std::chrono::duration_cast<std::chrono::milliseconds> (end_time - start_time));
auto elapsed_time_ms_int (elapsed_time_ms.count ());
BOOST_LOG (node.log) << boost::str (boost::format ("Batch verified %1% state blocks in %2% milliseconds") % size % elapsed_time_ms_int);
}
}
void nano::block_processor::process_receive_many (std::unique_lock<std::mutex> & lock_a)
{
lock_a.lock ();
auto start_time (std::chrono::steady_clock::now ());
// Limit state blocks verification time
while (!state_blocks.empty () && std::chrono::steady_clock::now () - start_time < std::chrono::seconds (2))
{
verify_state_blocks (lock_a, 2048);
}
lock_a.unlock ();
auto transaction (node.store.tx_begin_write ());
start_time = std::chrono::steady_clock::now ();
lock_a.lock ();
// Processing blocks
auto first_time (true);
unsigned number_of_blocks_processed (0), number_of_forced_processed (0);
while ((!blocks.empty () || !forced.empty ()) && std::chrono::steady_clock::now () - start_time < node.config.block_processor_batch_max_time)
{
auto log_this_record (false);
if (node.config.logging.timing_logging ())
{
if (should_log (first_time))
{
log_this_record = true;
}
}
else
{
if (((blocks.size () + state_blocks.size () + forced.size ()) > 64 && should_log (false)))
{
log_this_record = true;
}
}
if (log_this_record)
{
first_time = false;
BOOST_LOG (node.log) << boost::str (boost::format ("%1% blocks (+ %2% state blocks) (+ %3% forced) in processing queue") % blocks.size () % state_blocks.size () % forced.size ());
}
std::pair<std::shared_ptr<nano::block>, std::chrono::steady_clock::time_point> block;
bool force (false);
if (forced.empty ())
{
block = blocks.front ();
blocks.pop_front ();
blocks_hashes.erase (block.first->hash ());
}
else
{
block = std::make_pair (forced.front (), std::chrono::steady_clock::now ());
forced.pop_front ();
force = true;
number_of_forced_processed++;
}
lock_a.unlock ();
auto hash (block.first->hash ());
if (force)
{
auto successor (node.ledger.successor (transaction, block.first->root ()));
if (successor != nullptr && successor->hash () != hash)
{
// Replace our block with the winner and roll back any dependent blocks
BOOST_LOG (node.log) << boost::str (boost::format ("Rolling back %1% and replacing with %2%") % successor->hash ().to_string () % hash.to_string ());
node.ledger.rollback (transaction, successor->hash ());
}
}
/* Forced state blocks are not validated in verify_state_blocks () function
Because of that we should set set validated_state_block as "false" for forced state blocks (!force) */
bool validated_state_block (!force && block.first->type () == nano::block_type::state);
auto process_result (process_receive_one (transaction, block.first, block.second, validated_state_block));
number_of_blocks_processed++;
(void)process_result;
lock_a.lock ();
/* Verify more state blocks if blocks deque is empty
Because verification is long process, avoid large deque verification inside of write transaction */
if (blocks.empty () && !state_blocks.empty ())
{
verify_state_blocks (lock_a, 256);
}
}
lock_a.unlock ();
if (node.config.logging.timing_logging ())
{
auto end_time (std::chrono::steady_clock::now ());
auto elapsed_time_ms (std::chrono::duration_cast<std::chrono::milliseconds> (end_time - start_time));
auto elapsed_time_ms_int (elapsed_time_ms.count ());
BOOST_LOG (node.log) << boost::str (boost::format ("Processed %1% blocks (%2% blocks were forced) in %3% milliseconds") % number_of_blocks_processed % number_of_forced_processed % elapsed_time_ms_int);
}
}
nano::process_return nano::block_processor::process_receive_one (nano::transaction const & transaction_a, std::shared_ptr<nano::block> block_a, std::chrono::steady_clock::time_point origination, bool validated_state_block)
{
nano::process_return result;
auto hash (block_a->hash ());
result = node.ledger.process (transaction_a, *block_a, validated_state_block);
switch (result.code)
{
case nano::process_result::progress:
{
if (node.config.logging.ledger_logging ())
{
std::string block;
block_a->serialize_json (block);
BOOST_LOG (node.log) << boost::str (boost::format ("Processing block %1%: %2%") % hash.to_string () % block);
}
if (node.block_arrival.recent (hash))
{
node.active.start (block_a);
if (node.config.enable_voting)
{
generator.add (hash);
}
}
queue_unchecked (transaction_a, hash);
break;
}
case nano::process_result::gap_previous:
{
if (node.config.logging.ledger_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Gap previous for: %1%") % hash.to_string ());
}
node.store.unchecked_put (transaction_a, block_a->previous (), block_a);
node.gap_cache.add (transaction_a, block_a);
break;
}
case nano::process_result::gap_source:
{
if (node.config.logging.ledger_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Gap source for: %1%") % hash.to_string ());
}
node.store.unchecked_put (transaction_a, node.ledger.block_source (transaction_a, *block_a), block_a);
node.gap_cache.add (transaction_a, block_a);
break;
}
case nano::process_result::old:
{
if (node.config.logging.ledger_duplicate_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Old for: %1%") % block_a->hash ().to_string ());
}
queue_unchecked (transaction_a, hash);
node.active.update_difficulty (*block_a);
break;
}
case nano::process_result::bad_signature:
{
if (node.config.logging.ledger_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Bad signature for: %1%") % hash.to_string ());
}
break;
}
case nano::process_result::negative_spend:
{
if (node.config.logging.ledger_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Negative spend for: %1%") % hash.to_string ());
}
break;
}
case nano::process_result::unreceivable:
{
if (node.config.logging.ledger_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Unreceivable for: %1%") % hash.to_string ());
}
break;
}
case nano::process_result::fork:
{
if (origination < std::chrono::steady_clock::now () - std::chrono::seconds (15))
{
// Only let the bootstrap attempt know about forked blocks that not originate recently.
node.process_fork (transaction_a, block_a);
}
if (node.config.logging.ledger_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Fork for: %1% root: %2%") % hash.to_string () % block_a->root ().to_string ());
}
break;
}
case nano::process_result::opened_burn_account:
{
BOOST_LOG (node.log) << boost::str (boost::format ("*** Rejecting open block for burn account ***: %1%") % hash.to_string ());
break;
}
case nano::process_result::balance_mismatch:
{
if (node.config.logging.ledger_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Balance mismatch for: %1%") % hash.to_string ());
}
break;
}
case nano::process_result::representative_mismatch:
{
if (node.config.logging.ledger_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Representative mismatch for: %1%") % hash.to_string ());
}
break;
}
case nano::process_result::block_position:
{
if (node.config.logging.ledger_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Block %1% cannot follow predecessor %2%") % hash.to_string () % block_a->previous ().to_string ());
}
break;
}
}
return result;
}
void nano::block_processor::queue_unchecked (nano::transaction const & transaction_a, nano::block_hash const & hash_a)
{
auto cached (node.store.unchecked_get (transaction_a, hash_a));
for (auto i (cached.begin ()), n (cached.end ()); i != n; ++i)
{
node.store.unchecked_del (transaction_a, nano::unchecked_key (hash_a, (*i)->hash ()));
add (*i, std::chrono::steady_clock::time_point ());
}
std::lock_guard<std::mutex> lock (node.gap_cache.mutex);
node.gap_cache.blocks.get<1> ().erase (hash_a);
}
nano::node::node (nano::node_init & init_a, boost::asio::io_context & io_ctx_a, uint16_t peering_port_a, boost::filesystem::path const & application_path_a, nano::alarm & alarm_a, nano::logging const & logging_a, nano::work_pool & work_a) :
node (init_a, io_ctx_a, application_path_a, alarm_a, nano::node_config (peering_port_a, logging_a), work_a)
{
}
nano::node::node (nano::node_init & init_a, boost::asio::io_context & io_ctx_a, boost::filesystem::path const & application_path_a, nano::alarm & alarm_a, nano::node_config const & config_a, nano::work_pool & work_a) :
io_ctx (io_ctx_a),
config (config_a),
alarm (alarm_a),
work (work_a),
store_impl (std::make_unique<nano::mdb_store> (init_a.block_store_init, config.logging, application_path_a / "data.ldb", config_a.lmdb_max_dbs)),
store (*store_impl),
gap_cache (*this),
ledger (store, stats, config.epoch_block_link, config.epoch_block_signer),
active (*this),
network (*this, config.peering_port),
bootstrap_initiator (*this),
bootstrap (io_ctx_a, config.peering_port, *this),
peers (network.endpoint ()),
application_path (application_path_a),
wallets (init_a.block_store_init, *this),
port_mapping (*this),
vote_processor (*this),
warmed_up (0),
block_processor (*this),
block_processor_thread ([this]() {
nano::thread_role::set (nano::thread_role::name::block_processing);
this->block_processor.process_blocks ();
}),
online_reps (*this),
stats (config.stat_config),
vote_uniquer (block_uniquer)
{
wallets.observer = [this](bool active) {
observers.wallet.notify (active);
};
peers.peer_observer = [this](nano::endpoint const & endpoint_a) {
observers.endpoint.notify (endpoint_a);
};
peers.disconnect_observer = [this]() {
observers.disconnect.notify ();
};
if (!config.callback_address.empty ())
{
observers.blocks.add ([this](std::shared_ptr<nano::block> block_a, nano::account const & account_a, nano::amount const & amount_a, bool is_state_send_a) {
if (this->block_arrival.recent (block_a->hash ()))
{
auto node_l (shared_from_this ());
background ([node_l, block_a, account_a, amount_a, is_state_send_a]() {
boost::property_tree::ptree event;
event.add ("account", account_a.to_account ());
event.add ("hash", block_a->hash ().to_string ());
std::string block_text;
block_a->serialize_json (block_text);
event.add ("block", block_text);
event.add ("amount", amount_a.to_string_dec ());
if (is_state_send_a)
{
event.add ("is_send", is_state_send_a);
}
std::stringstream ostream;
boost::property_tree::write_json (ostream, event);
ostream.flush ();
auto body (std::make_shared<std::string> (ostream.str ()));
auto address (node_l->config.callback_address);
auto port (node_l->config.callback_port);
auto target (std::make_shared<std::string> (node_l->config.callback_target));
auto resolver (std::make_shared<boost::asio::ip::tcp::resolver> (node_l->io_ctx));
resolver->async_resolve (boost::asio::ip::tcp::resolver::query (address, std::to_string (port)), [node_l, address, port, target, body, resolver](boost::system::error_code const & ec, boost::asio::ip::tcp::resolver::iterator i_a) {
if (!ec)
{
for (auto i (i_a), n (boost::asio::ip::tcp::resolver::iterator{}); i != n; ++i)
{
auto sock (std::make_shared<boost::asio::ip::tcp::socket> (node_l->io_ctx));
sock->async_connect (i->endpoint (), [node_l, target, body, sock, address, port](boost::system::error_code const & ec) {
if (!ec)
{
auto req (std::make_shared<boost::beast::http::request<boost::beast::http::string_body>> ());
req->method (boost::beast::http::verb::post);
req->target (*target);
req->version (11);
req->insert (boost::beast::http::field::host, address);
req->insert (boost::beast::http::field::content_type, "application/json");
req->body () = *body;
//req->prepare (*req);
//boost::beast::http::prepare(req);
req->prepare_payload ();
boost::beast::http::async_write (*sock, *req, [node_l, sock, address, port, req](boost::system::error_code const & ec, size_t bytes_transferred) {
if (!ec)
{
auto sb (std::make_shared<boost::beast::flat_buffer> ());
auto resp (std::make_shared<boost::beast::http::response<boost::beast::http::string_body>> ());
boost::beast::http::async_read (*sock, *sb, *resp, [node_l, sb, resp, sock, address, port](boost::system::error_code const & ec, size_t bytes_transferred) {
if (!ec)
{
if (resp->result () == boost::beast::http::status::ok)
{
node_l->stats.inc (nano::stat::type::http_callback, nano::stat::detail::initiate, nano::stat::dir::out);
}
else
{
if (node_l->config.logging.callback_logging ())
{
BOOST_LOG (node_l->log) << boost::str (boost::format ("Callback to %1%:%2% failed with status: %3%") % address % port % resp->result ());
}
node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out);
}
}
else
{
if (node_l->config.logging.callback_logging ())
{
BOOST_LOG (node_l->log) << boost::str (boost::format ("Unable complete callback: %1%:%2%: %3%") % address % port % ec.message ());
}
node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out);
};
});
}
else
{
if (node_l->config.logging.callback_logging ())
{
BOOST_LOG (node_l->log) << boost::str (boost::format ("Unable to send callback: %1%:%2%: %3%") % address % port % ec.message ());
}
node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out);
}
});
}
else
{
if (node_l->config.logging.callback_logging ())
{
BOOST_LOG (node_l->log) << boost::str (boost::format ("Unable to connect to callback address: %1%:%2%: %3%") % address % port % ec.message ());
}
node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out);
}
});
}
}
else
{
if (node_l->config.logging.callback_logging ())
{
BOOST_LOG (node_l->log) << boost::str (boost::format ("Error resolving callback: %1%:%2%: %3%") % address % port % ec.message ());
}
node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out);
}
});
});
}
});
}
observers.endpoint.add ([this](nano::endpoint const & endpoint_a) {
this->network.send_keepalive (endpoint_a);
rep_query (*this, endpoint_a);
});
observers.vote.add ([this](nano::transaction const & transaction, std::shared_ptr<nano::vote> vote_a, nano::endpoint const & endpoint_a) {
assert (endpoint_a.address ().is_v6 ());
this->gap_cache.vote (vote_a);
this->online_reps.vote (vote_a);
nano::uint128_t rep_weight;
nano::uint128_t min_rep_weight;
{
rep_weight = ledger.weight (transaction, vote_a->account);
min_rep_weight = online_reps.online_stake () / 1000;
}
if (rep_weight > min_rep_weight)
{
bool rep_crawler_exists (false);
for (auto hash : *vote_a)
{
if (this->rep_crawler.exists (hash))
{
rep_crawler_exists = true;
break;
}
}
if (rep_crawler_exists)
{
// We see a valid non-replay vote for a block we requested, this node is probably a representative
if (this->peers.rep_response (endpoint_a, vote_a->account, rep_weight))
{
BOOST_LOG (log) << boost::str (boost::format ("Found a representative at %1%") % endpoint_a);
// Rebroadcasting all active votes to new representative
auto blocks (this->active.list_blocks (true));
for (auto i (blocks.begin ()), n (blocks.end ()); i != n; ++i)
{
if (*i != nullptr)
{
this->network.send_confirm_req (endpoint_a, *i);
}
}
}
}
}
});
BOOST_LOG (log) << "Node starting, version: " << NANO_VERSION_MAJOR << "." << NANO_VERSION_MINOR;
BOOST_LOG (log) << boost::str (boost::format ("Work pool running %1% threads") % work.threads.size ());
if (!init_a.error ())
{
if (config.logging.node_lifetime_tracing ())
{
BOOST_LOG (log) << "Constructing node";
}
nano::genesis genesis;
auto transaction (store.tx_begin_write ());
if (store.latest_begin (transaction) == store.latest_end ())
{
// Store was empty meaning we just created it, add the genesis block
store.initialize (transaction, genesis);
}
if (!store.block_exists (transaction, genesis.hash ()))
{
BOOST_LOG (log) << "Genesis block not found. Make sure the node network ID is correct.";
std::exit (1);
}
node_id = nano::keypair (store.get_node_id (transaction));
BOOST_LOG (log) << "Node ID: " << node_id.pub.to_account ();
}
peers.online_weight_minimum = config.online_weight_minimum.number ();
if (nano::nano_network == nano::nano_networks::nano_live_network || nano::nano_network == nano::nano_networks::nano_beta_network)
{
nano::bufferstream weight_stream ((const uint8_t *)nano_bootstrap_weights, nano_bootstrap_weights_size);
nano::uint128_union block_height;
if (!nano::read (weight_stream, block_height))
{
auto max_blocks = (uint64_t)block_height.number ();
auto transaction (store.tx_begin_read ());
if (ledger.store.block_count (transaction).sum () < max_blocks)
{
ledger.bootstrap_weight_max_blocks = max_blocks;
while (true)
{
nano::account account;
if (nano::read (weight_stream, account.bytes))
{
break;
}
nano::amount weight;
if (nano::read (weight_stream, weight.bytes))
{
break;
}
BOOST_LOG (log) << "Using bootstrap rep weight: " << account.to_account () << " -> " << weight.format_balance (Mxrb_ratio, 0, true) << " XRB";
ledger.bootstrap_weights[account] = weight.number ();
}
}
}
}
}
nano::node::~node ()
{
if (config.logging.node_lifetime_tracing ())
{
BOOST_LOG (log) << "Destructing node";
}
stop ();
}
bool nano::node::copy_with_compaction (boost::filesystem::path const & destination_file)
{
return !mdb_env_copy2 (boost::polymorphic_downcast<nano::mdb_store *> (store_impl.get ())->env.environment, destination_file.string ().c_str (), MDB_CP_COMPACT);
}
void nano::node::send_keepalive (nano::endpoint const & endpoint_a)
{
network.send_keepalive (nano::map_endpoint_to_v6 (endpoint_a));
}
void nano::node::process_fork (nano::transaction const & transaction_a, std::shared_ptr<nano::block> block_a)
{
auto root (block_a->root ());
if (!store.block_exists (transaction_a, block_a->type (), block_a->hash ()) && store.root_exists (transaction_a, block_a->root ()))
{
std::shared_ptr<nano::block> ledger_block (ledger.forked_block (transaction_a, *block_a));
if (ledger_block)
{
std::weak_ptr<nano::node> this_w (shared_from_this ());
if (!active.start (ledger_block, [this_w, root](std::shared_ptr<nano::block>) {
if (auto this_l = this_w.lock ())
{
auto attempt (this_l->bootstrap_initiator.current_attempt ());
if (attempt && !attempt->lazy_mode)
{
auto transaction (this_l->store.tx_begin_read ());
auto account (this_l->ledger.store.frontier_get (transaction, root));
if (!account.is_zero ())
{
attempt->requeue_pull (nano::pull_info (account, root, root));
}
else if (this_l->ledger.store.account_exists (transaction, root))
{
attempt->requeue_pull (nano::pull_info (root, nano::block_hash (0), nano::block_hash (0)));
}
}
}
}))
{
BOOST_LOG (log) << boost::str (boost::format ("Resolving fork between our block: %1% and block %2% both with root %3%") % ledger_block->hash ().to_string () % block_a->hash ().to_string () % block_a->root ().to_string ());
network.broadcast_confirm_req (ledger_block);
}
}
}
}
nano::gap_cache::gap_cache (nano::node & node_a) :
node (node_a)
{
}
void nano::gap_cache::add (nano::transaction const & transaction_a, std::shared_ptr<nano::block> block_a)
{
auto hash (block_a->hash ());
std::lock_guard<std::mutex> lock (mutex);
auto existing (blocks.get<1> ().find (hash));
if (existing != blocks.get<1> ().end ())
{
blocks.get<1> ().modify (existing, [](nano::gap_information & info) {
info.arrival = std::chrono::steady_clock::now ();
});
}
else
{
blocks.insert ({ std::chrono::steady_clock::now (), hash, std::unordered_set<nano::account> () });
if (blocks.size () > max)
{
blocks.get<0> ().erase (blocks.get<0> ().begin ());
}
}
}
void nano::gap_cache::vote (std::shared_ptr<nano::vote> vote_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto transaction (node.store.tx_begin_read ());
for (auto hash : *vote_a)
{
auto existing (blocks.get<1> ().find (hash));
if (existing != blocks.get<1> ().end ())
{
auto is_new (false);
blocks.get<1> ().modify (existing, [&](nano::gap_information & info) { is_new = info.voters.insert (vote_a->account).second; });
if (is_new)
{
uint128_t tally;
for (auto & voter : existing->voters)
{
tally += node.ledger.weight (transaction, voter);
}
bool start_bootstrap (false);
if (!node.flags.disable_lazy_bootstrap)
{
if (tally >= node.config.online_weight_minimum.number ())
{
start_bootstrap = true;
}
}
else if (!node.flags.disable_legacy_bootstrap && tally > bootstrap_threshold (transaction))
{
start_bootstrap = true;
}
if (start_bootstrap)
{
auto node_l (node.shared ());
auto now (std::chrono::steady_clock::now ());
node.alarm.add (nano::nano_network == nano::nano_networks::nano_test_network ? now + std::chrono::milliseconds (5) : now + std::chrono::seconds (5), [node_l, hash]() {
auto transaction (node_l->store.tx_begin_read ());
if (!node_l->store.block_exists (transaction, hash))
{
if (!node_l->bootstrap_initiator.in_progress ())
{
BOOST_LOG (node_l->log) << boost::str (boost::format ("Missing block %1% which has enough votes to warrant lazy bootstrapping it") % hash.to_string ());
}
if (!node_l->flags.disable_lazy_bootstrap)
{
node_l->bootstrap_initiator.bootstrap_lazy (hash);
}
else if (!node_l->flags.disable_legacy_bootstrap)
{
node_l->bootstrap_initiator.bootstrap ();
}
}
});
}
}
}
}
}
nano::uint128_t nano::gap_cache::bootstrap_threshold (nano::transaction const & transaction_a)
{
auto result ((node.online_reps.online_stake () / 256) * node.config.bootstrap_fraction_numerator);
return result;
}
void nano::network::confirm_send (nano::confirm_ack const & confirm_a, std::shared_ptr<std::vector<uint8_t>> bytes_a, nano::endpoint const & endpoint_a)
{
if (node.config.logging.network_publish_logging ())
{
BOOST_LOG (node.log) << boost::str (boost::format ("Sending confirm_ack for block(s) %1%to %2% sequence %3%") % confirm_a.vote->hashes_string () % endpoint_a % std::to_string (confirm_a.vote->sequence));
}
std::weak_ptr<nano::node> node_w (node.shared ());
node.network.send_buffer (bytes_a->data (), bytes_a->size (), endpoint_a, [bytes_a, node_w, endpoint_a](boost::system::error_code const & ec, size_t size_a) {
if (auto node_l = node_w.lock ())
{
if (ec && node_l->config.logging.network_logging ())
{
BOOST_LOG (node_l->log) << boost::str (boost::format ("Error broadcasting confirm_ack to %1%: %2%") % endpoint_a % ec.message ());
}
else
{
node_l->stats.inc (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::out);
}
}
});
}
void nano::node::process_active (std::shared_ptr<nano::block> incoming)
{
block_arrival.add (incoming->hash ());
block_processor.add (incoming, std::chrono::steady_clock::now ());
}
nano::process_return nano::node::process (nano::block const & block_a)
{
auto transaction (store.tx_begin_write ());
auto result (ledger.process (transaction, block_a));
return result;
}
void nano::node::start ()
{
network.start ();
ongoing_keepalive ();
ongoing_syn_cookie_cleanup ();
if (!flags.disable_legacy_bootstrap)
{
ongoing_bootstrap ();
}
ongoing_store_flush ();
ongoing_rep_crawl ();
ongoing_rep_calculation ();
if (!flags.disable_bootstrap_listener)
{
bootstrap.start ();
}
if (!flags.disable_backup)
{
backup_wallet ();
}
search_pending ();
online_reps.recalculate_stake ();
port_mapping.start ();
add_initial_peers ();
}
void nano::node::stop ()
{
BOOST_LOG (log) << "Node stopping";
block_processor.stop ();
if (block_processor_thread.joinable ())
{
block_processor_thread.join ();
}
vote_processor.stop ();
active.stop ();
network.stop ();
bootstrap_initiator.stop ();
bootstrap.stop ();
port_mapping.stop ();
checker.stop ();
wallets.stop ();
}
void nano::node::keepalive_preconfigured (std::vector<std::string> const & peers_a)
{
for (auto i (peers_a.begin ()), n (peers_a.end ()); i != n; ++i)
{
keepalive (*i, nano::network::node_port);
}
}
nano::block_hash nano::node::latest (nano::account const & account_a)
{
auto transaction (store.tx_begin_read ());
return ledger.latest (transaction, account_a);
}
nano::uint128_t nano::node::balance (nano::account const & account_a)
{
auto transaction (store.tx_begin_read ());
return ledger.account_balance (transaction, account_a);
}
std::shared_ptr<nano::block> nano::node::block (nano::block_hash const & hash_a)
{
auto transaction (store.tx_begin_read ());
return store.block_get (transaction, hash_a);
}
std::pair<nano::uint128_t, nano::uint128_t> nano::node::balance_pending (nano::account const & account_a)
{
std::pair<nano::uint128_t, nano::uint128_t> result;
auto transaction (store.tx_begin_read ());
result.first = ledger.account_balance (transaction, account_a);
result.second = ledger.account_pending (transaction, account_a);
return result;
}
nano::uint128_t nano::node::weight (nano::account const & account_a)
{
auto transaction (store.tx_begin_read ());
return ledger.weight (transaction, account_a);
}
nano::account nano::node::representative (nano::account const & account_a)
{
auto transaction (store.tx_begin_read ());
nano::account_info info;
nano::account result (0);
if (!store.account_get (transaction, account_a, info))
{
result = info.rep_block;
}
return result;
}
void nano::node::ongoing_keepalive ()
{
keepalive_preconfigured (config.preconfigured_peers);
auto peers_l (peers.purge_list (std::chrono::steady_clock::now () - cutoff));
for (auto i (peers_l.begin ()), j (peers_l.end ()); i != j && std::chrono::steady_clock::now () - i->last_attempt > period; ++i)
{
network.send_keepalive (i->endpoint);
}
std::weak_ptr<nano::node> node_w (shared_from_this ());
alarm.add (std::chrono::steady_clock::now () + period, [node_w]() {
if (auto node_l = node_w.lock ())
{
node_l->ongoing_keepalive ();
}
});
}
void nano::node::ongoing_syn_cookie_cleanup ()
{
peers.purge_syn_cookies (std::chrono::steady_clock::now () - syn_cookie_cutoff);
std::weak_ptr<nano::node> node_w (shared_from_this ());
alarm.add (std::chrono::steady_clock::now () + (syn_cookie_cutoff * 2), [node_w]() {
if (auto node_l = node_w.lock ())
{
node_l->ongoing_syn_cookie_cleanup ();
}
});
}
void nano::node::ongoing_rep_crawl ()
{
auto now (std::chrono::steady_clock::now ());
auto peers_l (peers.rep_crawl ());
rep_query (*this, peers_l);
if (network.on)
{
std::weak_ptr<nano::node> node_w (shared_from_this ());
alarm.add (now + std::chrono::seconds (4), [node_w]() {
if (auto node_l = node_w.lock ())
{
node_l->ongoing_rep_crawl ();
}
});
}
}
void nano::node::ongoing_rep_calculation ()
{
auto now (std::chrono::steady_clock::now ());
vote_processor.calculate_weights ();
std::weak_ptr<nano::node> node_w (shared_from_this ());
alarm.add (now + std::chrono::minutes (10), [node_w]() {
if (auto node_l = node_w.lock ())
{
node_l->ongoing_rep_calculation ();
}
});
}
void nano::node::ongoing_bootstrap ()
{
auto next_wakeup (300);
if (warmed_up < 3)
{
// Re-attempt bootstrapping more aggressively on startup
next_wakeup = 5;
if (!bootstrap_initiator.in_progress () && !peers.empty ())
{
++warmed_up;
}
}
bootstrap_initiator.bootstrap ();
std::weak_ptr<nano::node> node_w (shared_from_this ());
alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (next_wakeup), [node_w]() {
if (auto node_l = node_w.lock ())
{
node_l->ongoing_bootstrap ();
}
});
}
void nano::node::ongoing_store_flush ()
{
{
auto transaction (store.tx_begin_write ());
store.flush (transaction);
}
std::weak_ptr<nano::node> node_w (shared_from_this ());
alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (5), [node_w]() {
if (auto node_l = node_w.lock ())
{
node_l->ongoing_store_flush ();
}
});
}
void nano::node::backup_wallet ()
{
auto transaction (store.tx_begin_read ());
for (auto i (wallets.items.begin ()), n (wallets.items.end ()); i != n; ++i)
{
boost::system::error_code error_chmod;
auto backup_path (application_path / "backup");
boost::filesystem::create_directories (backup_path);
nano::set_secure_perm_directory (backup_path, error_chmod);
i->second->store.write_backup (transaction, backup_path / (i->first.to_string () + ".json"));
}
auto this_l (shared ());
alarm.add (std::chrono::steady_clock::now () + backup_interval, [this_l]() {
this_l->backup_wallet ();
});
}
void nano::node::search_pending ()
{
wallets.search_pending_all ();
auto this_l (shared ());
alarm.add (std::chrono::steady_clock::now () + search_pending_interval, [this_l]() {
this_l->search_pending ();
});
}
int nano::node::price (nano::uint128_t const & balance_a, int amount_a)
{
assert (balance_a >= amount_a * nano::Gxrb_ratio);
auto balance_l (balance_a);
double result (0.0);
for (auto i (0); i < amount_a; ++i)
{
balance_l -= nano::Gxrb_ratio;
auto balance_scaled ((balance_l / nano::Mxrb_ratio).convert_to<double> ());
auto units (balance_scaled / 1000.0);
auto unit_price (((free_cutoff - units) / free_cutoff) * price_max);
result += std::min (std::max (0.0, unit_price), price_max);
}
return static_cast<int> (result * 100.0);
}
namespace
{
class work_request
{
public:
work_request (boost::asio::io_context & io_ctx_a, boost::asio::ip::address address_a, uint16_t port_a) :
address (address_a),
port (port_a),
socket (io_ctx_a)
{
}
boost::asio::ip::address address;
uint16_t port;
boost::beast::flat_buffer buffer;
boost::beast::http::response<boost::beast::http::string_body> response;
boost::asio::ip::tcp::socket socket;
};
class distributed_work : public std::enable_shared_from_this<distributed_work>
{
public:
distributed_work (std::shared_ptr<nano::node> const & node_a, nano::block_hash const & root_a, std::function<void(uint64_t)> callback_a, uint64_t difficulty_a) :
distributed_work (1, node_a, root_a, callback_a, difficulty_a)
{
assert (node_a != nullptr);
}
distributed_work (unsigned int backoff_a, std::shared_ptr<nano::node> const & node_a, nano::block_hash const & root_a, std::function<void(uint64_t)> callback_a, uint64_t difficulty_a) :
callback (callback_a),
backoff (backoff_a),
node (node_a),
root (root_a),
need_resolve (node_a->config.work_peers),
difficulty (difficulty_a)
{
assert (node_a != nullptr);
completed.clear ();
}
void start ()
{
if (need_resolve.empty ())
{
start_work ();
}
else
{
auto current (need_resolve.back ());
need_resolve.pop_back ();
auto this_l (shared_from_this ());
boost::system::error_code ec;
auto parsed_address (boost::asio::ip::address_v6::from_string (current.first, ec));
if (!ec)
{
outstanding[parsed_address] = current.second;
start ();
}
else
{
node->network.resolver.async_resolve (boost::asio::ip::udp::resolver::query (current.first, std::to_string (current.second)), [current, this_l](boost::system::error_code const & ec, boost::asio::ip::udp::resolver::iterator i_a) {
if (!ec)
{
for (auto i (i_a), n (boost::asio::ip::udp::resolver::iterator{}); i != n; ++i)
{
auto endpoint (i->endpoint ());
this_l->outstanding[endpoint.address ()] = endpoint.port ();
}
}
else
{
BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Error resolving work peer: %1%:%2%: %3%") % current.first % current.second % ec.message ());
}
this_l->start ();
});
}
}
}
void start_work ()
{
if (!outstanding.empty ())
{
auto this_l (shared_from_this ());
std::lock_guard<std::mutex> lock (mutex);
for (auto const & i : outstanding)
{
auto host (i.first);
auto service (i.second);
node->background ([this_l, host, service]() {
auto connection (std::make_shared<work_request> (this_l->node->io_ctx, host, service));
connection->socket.async_connect (nano::tcp_endpoint (host, service), [this_l, connection](boost::system::error_code const & ec) {
if (!ec)
{
std::string request_string;
{
boost::property_tree::ptree request;
request.put ("action", "work_generate");
request.put ("hash", this_l->root.to_string ());
std::stringstream ostream;
boost::property_tree::write_json (ostream, request);
request_string = ostream.str ();
}
auto request (std::make_shared<boost::beast::http::request<boost::beast::http::string_body>> ());
request->method (boost::beast::http::verb::post);
request->target ("/");
request->version (11);
request->body () = request_string;
request->prepare_payload ();
boost::beast::http::async_write (connection->socket, *request, [this_l, connection, request](boost::system::error_code const & ec, size_t bytes_transferred) {
if (!ec)
{
boost::beast::http::async_read (connection->socket, connection->buffer, connection->response, [this_l, connection](boost::system::error_code const & ec, size_t bytes_transferred) {
if (!ec)
{
if (connection->response.result () == boost::beast::http::status::ok)
{
this_l->success (connection->response.body (), connection->address);
}
else
{
BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Work peer responded with an error %1% %2%: %3%") % connection->address % connection->port % connection->response.result ());
this_l->failure (connection->address);
}
}
else
{
BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Unable to read from work_peer %1% %2%: %3% (%4%)") % connection->address % connection->port % ec.message () % ec.value ());
this_l->failure (connection->address);
}
});
}
else
{
BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Unable to write to work_peer %1% %2%: %3% (%4%)") % connection->address % connection->port % ec.message () % ec.value ());
this_l->failure (connection->address);
}
});
}
else
{
BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Unable to connect to work_peer %1% %2%: %3% (%4%)") % connection->address % connection->port % ec.message () % ec.value ());
this_l->failure (connection->address);
}
});
});
}
}
else
{
handle_failure (true);
}
}
void stop ()
{
auto this_l (shared_from_this ());
std::lock_guard<std::mutex> lock (mutex);
for (auto const & i : outstanding)
{
auto host (i.first);
node->background ([this_l, host]() {
std::string request_string;
{
boost::property_tree::ptree request;
request.put ("action", "work_cancel");
request.put ("hash", this_l->root.to_string ());
std::stringstream ostream;
boost::property_tree::write_json (ostream, request);
request_string = ostream.str ();
}
boost::beast::http::request<boost::beast::http::string_body> request;
request.method (boost::beast::http::verb::post);
request.target ("/");
request.version (11);
request.body () = request_string;
request.prepare_payload ();
auto socket (std::make_shared<boost::asio::ip::tcp::socket> (this_l->node->io_ctx));
boost::beast::http::async_write (*socket, request, [socket](boost::system::error_code const & ec, size_t bytes_transferred) {
});
});
}
outstanding.clear ();
}
void success (std::string const & body_a, boost::asio::ip::address const & address)
{
auto last (remove (address));
std::stringstream istream (body_a);
try
{
boost::property_tree::ptree result;
boost::property_tree::read_json (istream, result);
auto work_text (result.get<std::string> ("work"));
uint64_t work;
if (!nano::from_string_hex (work_text, work))
{
if (!nano::work_validate (root, work))
{
set_once (work);
stop ();
}
else
{
BOOST_LOG (node->log) << boost::str (boost::format ("Incorrect work response from %1% for root %2%: %3%") % address % root.to_string () % work_text);
handle_failure (last);
}
}
else
{
BOOST_LOG (node->log) << boost::str (boost::format ("Work response from %1% wasn't a number: %2%") % address % work_text);
handle_failure (last);
}
}
catch (...)
{
BOOST_LOG (node->log) << boost::str (boost::format ("Work response from %1% wasn't parsable: %2%") % address % body_a);
handle_failure (last);
}
}
void set_once (uint64_t work_a)
{
if (!completed.test_and_set ())
{
callback (work_a);
}
}
void failure (boost::asio::ip::address const & address)
{
auto last (remove (address));
handle_failure (last);
}
void handle_failure (bool last)
{
if (last)
{
if (!completed.test_and_set ())
{
if (node->config.work_threads != 0 || node->work.opencl)
{
auto callback_l (callback);
node->work.generate (root, [callback_l](boost::optional<uint64_t> const & work_a) {
callback_l (work_a.value ());
},
difficulty);
}
else
{
if (backoff == 1 && node->config.logging.work_generation_time ())
{
BOOST_LOG (node->log) << "Work peer(s) failed to generate work for root " << root.to_string () << ", retrying...";
}
auto now (std::chrono::steady_clock::now ());
auto root_l (root);
auto callback_l (callback);
std::weak_ptr<nano::node> node_w (node);
auto next_backoff (std::min (backoff * 2, (unsigned int)60 * 5));
node->alarm.add (now + std::chrono::seconds (backoff), [ node_w, root_l, callback_l, next_backoff, difficulty = difficulty ] {
if (auto node_l = node_w.lock ())
{
auto work_generation (std::make_shared<distributed_work> (next_backoff, node_l, root_l, callback_l, difficulty));
work_generation->start ();
}
});
}
}
}
}
bool remove (boost::asio::ip::address const & address)
{
std::lock_guard<std::mutex> lock (mutex);
outstanding.erase (address);
return outstanding.empty ();
}
std::function<void(uint64_t)> callback;
unsigned int backoff; // in seconds
std::shared_ptr<nano::node> node;
nano::block_hash root;
std::mutex mutex;
std::map<boost::asio::ip::address, uint16_t> outstanding;
std::vector<std::pair<std::string, uint16_t>> need_resolve;
std::atomic_flag completed;
uint64_t difficulty;
};
}
void nano::node::work_generate_blocking (nano::block & block_a, uint64_t difficulty_a)
{
block_a.block_work_set (work_generate_blocking (block_a.root (), difficulty_a));
}
void nano::node::work_generate (nano::uint256_union const & hash_a, std::function<void(uint64_t)> callback_a, uint64_t difficulty_a)
{
auto work_generation (std::make_shared<distributed_work> (shared (), hash_a, callback_a, difficulty_a));
work_generation->start ();
}
uint64_t nano::node::work_generate_blocking (nano::uint256_union const & hash_a, uint64_t difficulty_a)
{
std::promise<uint64_t> promise;
work_generate (hash_a, [&promise](uint64_t work_a) {
promise.set_value (work_a);
},
difficulty_a);
return promise.get_future ().get ();
}
void nano::node::add_initial_peers ()
{
}
void nano::node::block_confirm (std::shared_ptr<nano::block> block_a)
{
active.start (block_a);
network.broadcast_confirm_req (block_a);
}
nano::uint128_t nano::node::delta ()
{
auto result ((online_reps.online_stake () / 100) * config.online_weight_quorum);
return result;
}
namespace
{
class confirmed_visitor : public nano::block_visitor
{
public:
confirmed_visitor (nano::transaction const & transaction_a, nano::node & node_a, std::shared_ptr<nano::block> block_a, nano::block_hash const & hash_a) :
transaction (transaction_a),
node (node_a),
block (block_a),
hash (hash_a)
{
}
virtual ~confirmed_visitor () = default;
void scan_receivable (nano::account const & account_a)
{
for (auto i (node.wallets.items.begin ()), n (node.wallets.items.end ()); i != n; ++i)
{
auto wallet (i->second);
if (wallet->store.exists (transaction, account_a))
{
nano::account representative;
nano::pending_info pending;
representative = wallet->store.representative (transaction);
auto error (node.store.pending_get (transaction, nano::pending_key (account_a, hash), pending));
if (!error)
{
auto node_l (node.shared ());
auto amount (pending.amount.number ());
wallet->receive_async (block, representative, amount, [](std::shared_ptr<nano::block>) {});
}
else
{
if (!node.store.block_exists (transaction, hash))
{
BOOST_LOG (node.log) << boost::str (boost::format ("Confirmed block is missing: %1%") % hash.to_string ());
assert (false && "Confirmed block is missing");
}
else
{
BOOST_LOG (node.log) << boost::str (boost::format ("Block %1% has already been received") % hash.to_string ());
}
}
}
}
}
void state_block (nano::state_block const & block_a) override
{
scan_receivable (block_a.hashables.link);
}
void send_block (nano::send_block const & block_a) override
{
scan_receivable (block_a.hashables.destination);
}
void receive_block (nano::receive_block const &) override
{
}
void open_block (nano::open_block const &) override
{
}
void change_block (nano::change_block const &) override
{
}
nano::transaction const & transaction;
nano::node & node;
std::shared_ptr<nano::block> block;
nano::block_hash const & hash;
};
}
void nano::node::process_confirmed (std::shared_ptr<nano::block> block_a)
{
auto hash (block_a->hash ());
bool exists (ledger.block_exists (block_a->type (), hash));
// Attempt to process confirmed block if it's not in ledger yet
if (!exists)
{
auto transaction (store.tx_begin_write ());
block_processor.process_receive_one (transaction, block_a);
exists = store.block_exists (transaction, block_a->type (), hash);
}
if (exists)
{
auto transaction (store.tx_begin_read ());
confirmed_visitor visitor (transaction, *this, block_a, hash);
block_a->visit (visitor);
auto account (ledger.account (transaction, hash));
auto amount (ledger.amount (transaction, hash));
bool is_state_send (false);
nano::account pending_account (0);
if (auto state = dynamic_cast<nano::state_block *> (block_a.get ()))
{
is_state_send = ledger.is_send (transaction, *state);
pending_account = state->hashables.link;
}
if (auto send = dynamic_cast<nano::send_block *> (block_a.get ()))
{
pending_account = send->hashables.destination;
}
observers.blocks.notify (block_a, account, amount, is_state_send);
if (amount > 0)
{
observers.account_balance.notify (account, false);
if (!pending_account.is_zero ())
{
observers.account_balance.notify (pending_account, true);
}
}
}
}
void nano::node::process_message (nano::message & message_a, nano::endpoint const & sender_a)
{
network_message_visitor visitor (*this, sender_a);
message_a.visit (visitor);
}
nano::endpoint nano::network::endpoint ()
{
boost::system::error_code ec;
auto port (socket.local_endpoint (ec).port ());
if (ec)
{
BOOST_LOG (node.log) << "Unable to retrieve port: " << ec.message ();
}
return nano::endpoint (boost::asio::ip::address_v6::loopback (), port);
}
bool nano::block_arrival::add (nano::block_hash const & hash_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto now (std::chrono::steady_clock::now ());
auto inserted (arrival.insert (nano::block_arrival_info{ now, hash_a }));
auto result (!inserted.second);
return result;
}
bool nano::block_arrival::recent (nano::block_hash const & hash_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto now (std::chrono::steady_clock::now ());
while (arrival.size () > arrival_size_min && arrival.begin ()->arrival + arrival_time_min < now)
{
arrival.erase (arrival.begin ());
}
return arrival.get<1> ().find (hash_a) != arrival.get<1> ().end ();
}
nano::online_reps::online_reps (nano::node & node) :
node (node)
{
}
void nano::online_reps::vote (std::shared_ptr<nano::vote> const & vote_a)
{
auto rep (vote_a->account);
std::lock_guard<std::mutex> lock (mutex);
auto now (std::chrono::steady_clock::now ());
auto transaction (node.store.tx_begin_read ());
auto current (reps.begin ());
while (current != reps.end () && current->last_heard + std::chrono::seconds (nano::node::cutoff) < now)
{
auto old_stake (online_stake_total);
online_stake_total -= node.ledger.weight (transaction, current->representative);
if (online_stake_total > old_stake)
{
// underflow
online_stake_total = 0;
}
current = reps.erase (current);
}
auto rep_it (reps.get<1> ().find (rep));
auto info (nano::rep_last_heard_info{ now, rep });
if (rep_it == reps.get<1> ().end ())
{
auto old_stake (online_stake_total);
online_stake_total += node.ledger.weight (transaction, rep);
if (online_stake_total < old_stake)
{
// overflow
online_stake_total = std::numeric_limits<nano::uint128_t>::max ();
}
reps.insert (info);
}
else
{
reps.get<1> ().replace (rep_it, info);
}
}
void nano::online_reps::recalculate_stake ()
{
std::lock_guard<std::mutex> lock (mutex);
online_stake_total = 0;
auto transaction (node.store.tx_begin_read ());
for (auto it : reps)
{
online_stake_total += node.ledger.weight (transaction, it.representative);
}
auto now (std::chrono::steady_clock::now ());
std::weak_ptr<nano::node> node_w (node.shared ());
node.alarm.add (now + std::chrono::minutes (5), [node_w]() {
if (auto node_l = node_w.lock ())
{
node_l->online_reps.recalculate_stake ();
}
});
}
nano::uint128_t nano::online_reps::online_stake ()
{
std::lock_guard<std::mutex> lock (mutex);
return std::max (online_stake_total, node.config.online_weight_minimum.number ());
}
std::vector<nano::account> nano::online_reps::list ()
{
std::vector<nano::account> result;
std::lock_guard<std::mutex> lock (mutex);
for (auto i (reps.begin ()), n (reps.end ()); i != n; ++i)
{
result.push_back (i->representative);
}
return result;
}
namespace
{
boost::asio::ip::address_v6 mapped_from_v4_bytes (unsigned long address_a)
{
return boost::asio::ip::address_v6::v4_mapped (boost::asio::ip::address_v4 (address_a));
}
}
bool nano::reserved_address (nano::endpoint const & endpoint_a, bool blacklist_loopback)
{
assert (endpoint_a.address ().is_v6 ());
auto bytes (endpoint_a.address ().to_v6 ());
auto result (false);
static auto const rfc1700_min (mapped_from_v4_bytes (0x00000000ul));
static auto const rfc1700_max (mapped_from_v4_bytes (0x00fffffful));
static auto const ipv4_loopback_min (mapped_from_v4_bytes (0x7f000000ul));
static auto const ipv4_loopback_max (mapped_from_v4_bytes (0x7ffffffful));
static auto const rfc1918_1_min (mapped_from_v4_bytes (0x0a000000ul));
static auto const rfc1918_1_max (mapped_from_v4_bytes (0x0afffffful));
static auto const rfc1918_2_min (mapped_from_v4_bytes (0xac100000ul));
static auto const rfc1918_2_max (mapped_from_v4_bytes (0xac1ffffful));
static auto const rfc1918_3_min (mapped_from_v4_bytes (0xc0a80000ul));
static auto const rfc1918_3_max (mapped_from_v4_bytes (0xc0a8fffful));
static auto const rfc6598_min (mapped_from_v4_bytes (0x64400000ul));
static auto const rfc6598_max (mapped_from_v4_bytes (0x647ffffful));
static auto const rfc5737_1_min (mapped_from_v4_bytes (0xc0000200ul));
static auto const rfc5737_1_max (mapped_from_v4_bytes (0xc00002fful));
static auto const rfc5737_2_min (mapped_from_v4_bytes (0xc6336400ul));
static auto const rfc5737_2_max (mapped_from_v4_bytes (0xc63364fful));
static auto const rfc5737_3_min (mapped_from_v4_bytes (0xcb007100ul));
static auto const rfc5737_3_max (mapped_from_v4_bytes (0xcb0071fful));
static auto const ipv4_multicast_min (mapped_from_v4_bytes (0xe0000000ul));
static auto const ipv4_multicast_max (mapped_from_v4_bytes (0xeffffffful));
static auto const rfc6890_min (mapped_from_v4_bytes (0xf0000000ul));
static auto const rfc6890_max (mapped_from_v4_bytes (0xfffffffful));
static auto const rfc6666_min (boost::asio::ip::address_v6::from_string ("100::"));
static auto const rfc6666_max (boost::asio::ip::address_v6::from_string ("100::ffff:ffff:ffff:ffff"));
static auto const rfc3849_min (boost::asio::ip::address_v6::from_string ("2001:db8::"));
static auto const rfc3849_max (boost::asio::ip::address_v6::from_string ("2001:db8:ffff:ffff:ffff:ffff:ffff:ffff"));
static auto const rfc4193_min (boost::asio::ip::address_v6::from_string ("fc00::"));
static auto const rfc4193_max (boost::asio::ip::address_v6::from_string ("fd00:ffff:ffff:ffff:ffff:ffff:ffff:ffff"));
static auto const ipv6_multicast_min (boost::asio::ip::address_v6::from_string ("ff00::"));
static auto const ipv6_multicast_max (boost::asio::ip::address_v6::from_string ("ff00:ffff:ffff:ffff:ffff:ffff:ffff:ffff"));
if (bytes >= rfc1700_min && bytes <= rfc1700_max)
{
result = true;
}
else if (bytes >= rfc5737_1_min && bytes <= rfc5737_1_max)
{
result = true;
}
else if (bytes >= rfc5737_2_min && bytes <= rfc5737_2_max)
{
result = true;
}
else if (bytes >= rfc5737_3_min && bytes <= rfc5737_3_max)
{
result = true;
}
else if (bytes >= ipv4_multicast_min && bytes <= ipv4_multicast_max)
{
result = true;
}
else if (bytes >= rfc6890_min && bytes <= rfc6890_max)
{
result = true;
}
else if (bytes >= rfc6666_min && bytes <= rfc6666_max)
{
result = true;
}
else if (bytes >= rfc3849_min && bytes <= rfc3849_max)
{
result = true;
}
else if (bytes >= ipv6_multicast_min && bytes <= ipv6_multicast_max)
{
result = true;
}
else if (blacklist_loopback && bytes.is_loopback ())
{
result = true;
}
else if (blacklist_loopback && bytes >= ipv4_loopback_min && bytes <= ipv4_loopback_max)
{
result = true;
}
else if (nano::nano_network == nano::nano_networks::nano_live_network)
{
if (bytes >= rfc1918_1_min && bytes <= rfc1918_1_max)
{
result = true;
}
else if (bytes >= rfc1918_2_min && bytes <= rfc1918_2_max)
{
result = true;
}
else if (bytes >= rfc1918_3_min && bytes <= rfc1918_3_max)
{
result = true;
}
else if (bytes >= rfc6598_min && bytes <= rfc6598_max)
{
result = true;
}
else if (bytes >= rfc4193_min && bytes <= rfc4193_max)
{
result = true;
}
}
return result;
}
void nano::network::send_buffer (uint8_t const * data_a, size_t size_a, nano::endpoint const & endpoint_a, std::function<void(boost::system::error_code const &, size_t)> callback_a)
{
std::unique_lock<std::mutex> lock (socket_mutex);
if (node.config.logging.network_packet_logging ())
{
BOOST_LOG (node.log) << "Sending packet";
}
socket.async_send_to (boost::asio::buffer (data_a, size_a), endpoint_a, [this, callback_a](boost::system::error_code const & ec, size_t size_a) {
callback_a (ec, size_a);
this->node.stats.add (nano::stat::type::traffic, nano::stat::dir::out, size_a);
if (ec == boost::system::errc::host_unreachable)
{
this->node.stats.inc (nano::stat::type::error, nano::stat::detail::unreachable_host, nano::stat::dir::out);
}
if (this->node.config.logging.network_packet_logging ())
{
BOOST_LOG (this->node.log) << "Packet send complete";
}
});
}
std::shared_ptr<nano::node> nano::node::shared ()
{
return shared_from_this ();
}
nano::election_vote_result::election_vote_result () :
replay (false),
processed (false)
{
}
nano::election_vote_result::election_vote_result (bool replay_a, bool processed_a)
{
replay = replay_a;
processed = processed_a;
}
nano::election::election (nano::node & node_a, std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a) :
confirmation_action (confirmation_action_a),
node (node_a),
root (block_a->root ()),
election_start (std::chrono::steady_clock::now ()),
status ({ block_a, 0 }),
confirmed (false),
stopped (false),
announcements (0)
{
last_votes.insert (std::make_pair (nano::not_an_account, nano::vote_info{ std::chrono::steady_clock::now (), 0, block_a->hash () }));
blocks.insert (std::make_pair (block_a->hash (), block_a));
}
void nano::election::compute_rep_votes (nano::transaction const & transaction_a)
{
if (node.config.enable_voting)
{
node.wallets.foreach_representative (transaction_a, [this, &transaction_a](nano::public_key const & pub_a, nano::raw_key const & prv_a) {
auto vote (this->node.store.vote_generate (transaction_a, pub_a, prv_a, status.winner));
this->node.vote_processor.vote (vote, this->node.network.endpoint ());
});
}
}
void nano::election::confirm_once (nano::transaction const & transaction_a)
{
if (!confirmed.exchange (true))
{
status.election_end = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::system_clock::now ().time_since_epoch ());
status.election_duration = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::steady_clock::now () - election_start);
auto winner_l (status.winner);
auto node_l (node.shared ());
auto confirmation_action_l (confirmation_action);
node.background ([node_l, winner_l, confirmation_action_l]() {
node_l->process_confirmed (winner_l);
confirmation_action_l (winner_l);
});
confirm_back (transaction_a);
}
}
void nano::election::confirm_back (nano::transaction const & transaction_a)
{
std::vector<nano::block_hash> hashes = { status.winner->previous (), status.winner->source (), status.winner->link () };
for (auto & hash : hashes)
{
if (!hash.is_zero () && !node.ledger.is_epoch_link (hash))
{
auto existing (node.active.blocks.find (hash));
if (existing != node.active.blocks.end () && !existing->second->confirmed && !existing->second->stopped && existing->second->blocks.size () == 1)
{
existing->second->confirm_once (transaction_a);
}
}
}
}
void nano::election::stop ()
{
stopped = true;
}
bool nano::election::have_quorum (nano::tally_t const & tally_a, nano::uint128_t tally_sum)
{
bool result = false;
if (tally_sum >= node.config.online_weight_minimum.number ())
{
auto i (tally_a.begin ());
auto first (i->first);
++i;
auto second (i != tally_a.end () ? i->first : 0);
auto delta_l (node.delta ());
result = tally_a.begin ()->first > (second + delta_l);
}
return result;
}
nano::tally_t nano::election::tally (nano::transaction const & transaction_a)
{
std::unordered_map<nano::block_hash, nano::uint128_t> block_weights;
for (auto vote_info : last_votes)
{
block_weights[vote_info.second.hash] += node.ledger.weight (transaction_a, vote_info.first);
}
last_tally = block_weights;
nano::tally_t result;
for (auto item : block_weights)
{
auto block (blocks.find (item.first));
if (block != blocks.end ())
{
result.insert (std::make_pair (item.second, block->second));
}
}
return result;
}
void nano::election::confirm_if_quorum (nano::transaction const & transaction_a)
{
auto tally_l (tally (transaction_a));
assert (tally_l.size () > 0);
auto winner (tally_l.begin ());
auto block_l (winner->second);
status.tally = winner->first;
nano::uint128_t sum (0);
for (auto & i : tally_l)
{
sum += i.first;
}
if (sum >= node.config.online_weight_minimum.number () && block_l->hash () != status.winner->hash ())
{
auto node_l (node.shared ());
node_l->block_processor.force (block_l);
status.winner = block_l;
}
if (have_quorum (tally_l, sum))
{
if (node.config.logging.vote_logging () || blocks.size () > 1)
{
log_votes (tally_l);
}
confirm_once (transaction_a);
}
}
void nano::election::log_votes (nano::tally_t const & tally_a)
{
std::stringstream tally;
tally << boost::str (boost::format ("\nVote tally for root %1%") % status.winner->root ().to_string ());
for (auto i (tally_a.begin ()), n (tally_a.end ()); i != n; ++i)
{
tally << boost::str (boost::format ("\nBlock %1% weight %2%") % i->second->hash ().to_string () % i->first.convert_to<std::string> ());
}
for (auto i (last_votes.begin ()), n (last_votes.end ()); i != n; ++i)
{
tally << boost::str (boost::format ("\n%1% %2%") % i->first.to_account () % i->second.hash.to_string ());
}
BOOST_LOG (node.log) << tally.str ();
}
nano::election_vote_result nano::election::vote (nano::account rep, uint64_t sequence, nano::block_hash block_hash)
{
// see republish_vote documentation for an explanation of these rules
auto transaction (node.store.tx_begin_read ());
auto replay (false);
auto supply (node.online_reps.online_stake ());
auto weight (node.ledger.weight (transaction, rep));
auto should_process (false);
if (nano::nano_network == nano::nano_networks::nano_test_network || weight > supply / 1000) // 0.1% or above
{
unsigned int cooldown;
if (weight < supply / 100) // 0.1% to 1%
{
cooldown = 15;
}
else if (weight < supply / 20) // 1% to 5%
{
cooldown = 5;
}
else // 5% or above
{
cooldown = 1;
}
auto last_vote_it (last_votes.find (rep));
if (last_vote_it == last_votes.end ())
{
should_process = true;
}
else
{
auto last_vote (last_vote_it->second);
if (last_vote.sequence < sequence || (last_vote.sequence == sequence && last_vote.hash < block_hash))
{
if (last_vote.time <= std::chrono::steady_clock::now () - std::chrono::seconds (cooldown))
{
should_process = true;
}
}
else
{
replay = true;
}
}
if (should_process)
{
last_votes[rep] = { std::chrono::steady_clock::now (), sequence, block_hash };
if (!confirmed)
{
confirm_if_quorum (transaction);
}
}
}
return nano::election_vote_result (replay, should_process);
}
bool nano::node::validate_block_by_previous (nano::transaction const & transaction, std::shared_ptr<nano::block> block_a)
{
bool result (false);
nano::account account;
if (!block_a->previous ().is_zero ())
{
if (store.block_exists (transaction, block_a->previous ()))
{
account = ledger.account (transaction, block_a->previous ());
}
else
{
result = true;
}
}
else
{
account = block_a->root ();
}
if (!result && block_a->type () == nano::block_type::state)
{
std::shared_ptr<nano::state_block> block_l (std::static_pointer_cast<nano::state_block> (block_a));
nano::amount prev_balance (0);
if (!block_l->hashables.previous.is_zero ())
{
if (store.block_exists (transaction, block_l->hashables.previous))
{
prev_balance = ledger.balance (transaction, block_l->hashables.previous);
}
else
{
result = true;
}
}
if (!result)
{
if (block_l->hashables.balance == prev_balance && !ledger.epoch_link.is_zero () && ledger.is_epoch_link (block_l->hashables.link))
{
account = ledger.epoch_signer;
}
}
}
if (!result && (account.is_zero () || nano::validate_message (account, block_a->hash (), block_a->block_signature ())))
{
result = true;
}
return result;
}
bool nano::election::publish (std::shared_ptr<nano::block> block_a)
{
auto result (false);
if (blocks.size () >= 10)
{
if (last_tally[block_a->hash ()] < node.online_reps.online_stake () / 10)
{
result = true;
}
}
if (!result)
{
auto transaction (node.store.tx_begin_read ());
result = node.validate_block_by_previous (transaction, block_a);
if (!result)
{
if (blocks.find (block_a->hash ()) == blocks.end ())
{
blocks.insert (std::make_pair (block_a->hash (), block_a));
confirm_if_quorum (transaction);
node.network.republish_block (block_a);
}
}
}
return result;
}
void nano::active_transactions::announce_votes (std::unique_lock<std::mutex> & lock_a)
{
std::unordered_set<nano::block_hash> inactive;
auto transaction (node.store.tx_begin_read ());
unsigned unconfirmed_count (0);
unsigned unconfirmed_announcements (0);
std::deque<std::shared_ptr<nano::block>> rebroadcast_bundle;
std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<nano::peer_information>>>> confirm_req_bundle;
auto roots_size (roots.size ());
for (auto i (roots.get<1> ().begin ()), n (roots.get<1> ().end ()); i != n; ++i)
{
lock_a.unlock ();
auto election_l (i->election);
if ((election_l->confirmed || election_l->stopped) && i->election->announcements >= announcement_min - 1)
{
if (election_l->confirmed)
{
confirmed.push_back (i->election->status);
if (confirmed.size () > election_history_size)
{
confirmed.pop_front ();
}
}
inactive.insert (election_l->root);
}
else
{
if (i->election->announcements > announcement_long)
{
++unconfirmed_count;
unconfirmed_announcements += i->election->announcements;
// Log votes for very long unconfirmed elections
if (i->election->announcements % 50 == 1)
{
auto tally_l (election_l->tally (transaction));
election_l->log_votes (tally_l);
}
/* Escalation for long unconfirmed elections
Start new elections for previous block & source
if there are less than 100 active elections */
if (i->election->announcements % announcement_long == 1 && roots_size < 100 && nano::nano_network != nano::nano_networks::nano_test_network)
{
std::shared_ptr<nano::block> previous;
auto previous_hash (election_l->status.winner->previous ());
if (!previous_hash.is_zero ())
{
previous = node.store.block_get (transaction, previous_hash);
if (previous != nullptr)
{
add (std::move (previous));
}
}
/* If previous block not existing/not commited yet, block_source can cause segfault for state blocks
So source check can be done only if previous != nullptr or previous is 0 (open account) */
if (previous_hash.is_zero () || previous != nullptr)
{
auto source_hash (node.ledger.block_source (transaction, *election_l->status.winner));
if (!source_hash.is_zero ())
{
auto source (node.store.block_get (transaction, source_hash));
if (source != nullptr)
{
add (std::move (source));
}
}
}
}
}
if (i->election->announcements < announcement_long || i->election->announcements % announcement_long == 1)
{
if (node.ledger.could_fit (transaction, *election_l->status.winner))
{
// Broadcast winner
if (rebroadcast_bundle.size () < max_broadcast_queue)
{
rebroadcast_bundle.push_back (election_l->status.winner);
}
}
else
{
if (i->election->announcements != 0)
{
election_l->stop ();
}
}
}
if (i->election->announcements % 4 == 1)
{
auto reps (std::make_shared<std::vector<nano::peer_information>> (node.peers.representatives (std::numeric_limits<size_t>::max ())));
std::unordered_set<nano::account> probable_reps;
nano::uint128_t total_weight (0);
for (auto j (reps->begin ()), m (reps->end ()); j != m;)
{
auto & rep_votes (i->election->last_votes);
auto rep_acct (j->probable_rep_account);
// Calculate if representative isn't recorded for several IP addresses
if (probable_reps.find (rep_acct) == probable_reps.end ())
{
total_weight = total_weight + j->rep_weight.number ();
probable_reps.insert (rep_acct);
}
if (rep_votes.find (rep_acct) != rep_votes.end ())
{
if (j + 1 == reps->end ())
{
reps->pop_back ();
break;
}
std::swap (*j, reps->back ());
reps->pop_back ();
m = reps->end ();
}
else
{
++j;
if (node.config.logging.vote_logging ())
{
BOOST_LOG (node.log) << "Representative did not respond to confirm_req, retrying: " << rep_acct.to_account ();
}
}
}
if ((!reps->empty () && total_weight > node.config.online_weight_minimum.number ()) || roots_size > 5)
{
if (confirm_req_bundle.size () < max_broadcast_queue)
{
confirm_req_bundle.push_back (std::make_pair (i->election->status.winner, reps));
}
}
else
{
// broadcast request to all peers
confirm_req_bundle.push_back (std::make_pair (i->election->status.winner, std::make_shared<std::vector<nano::peer_information>> (node.peers.list_vector (100))));
}
}
}
++election_l->announcements;
lock_a.lock ();
}
// Rebroadcast unconfirmed blocks
if (!rebroadcast_bundle.empty ())
{
node.network.republish_block_batch (rebroadcast_bundle);
}
//confirm_req broadcast
if (!confirm_req_bundle.empty ())
{
node.network.broadcast_confirm_req_batch (confirm_req_bundle);
}
for (auto i (inactive.begin ()), n (inactive.end ()); i != n; ++i)
{
auto root_it (roots.find (*i));
assert (root_it != roots.end ());
for (auto & block : root_it->election->blocks)
{
auto erased (blocks.erase (block.first));
(void)erased;
assert (erased == 1);
}
roots.erase (*i);
}
if (unconfirmed_count > 0)
{
BOOST_LOG (node.log) << boost::str (boost::format ("%1% blocks have been unconfirmed averaging %2% announcements") % unconfirmed_count % (unconfirmed_announcements / unconfirmed_count));
}
}
void nano::active_transactions::announce_loop ()
{
std::unique_lock<std::mutex> lock (mutex);
started = true;
lock.unlock ();
condition.notify_all ();
lock.lock ();
while (!stopped)
{
announce_votes (lock);
unsigned extra_delay (std::min (roots.size (), max_broadcast_queue) * node.network.broadcast_interval_ms * 2);
condition.wait_for (lock, std::chrono::milliseconds (announce_interval_ms + extra_delay));
}
}
void nano::active_transactions::stop ()
{
std::unique_lock<std::mutex> lock (mutex);
while (!started)
{
condition.wait (lock);
}
stopped = true;
lock.unlock ();
condition.notify_all ();
if (thread.joinable ())
{
thread.join ();
}
lock.lock ();
roots.clear ();
}
bool nano::active_transactions::start (std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a)
{
std::lock_guard<std::mutex> lock (mutex);
return add (block_a, confirmation_action_a);
}
bool nano::active_transactions::add (std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a)
{
auto error (true);
if (!stopped)
{
auto root (block_a->root ());
auto existing (roots.find (root));
if (existing == roots.end ())
{
auto election (std::make_shared<nano::election> (node, block_a, confirmation_action_a));
uint64_t difficulty (0);
auto error (nano::work_validate (*block_a, &difficulty));
release_assert (!error);
roots.insert (nano::conflict_info{ root, difficulty, election });
blocks.insert (std::make_pair (block_a->hash (), election));
}
error = existing != roots.end ();
}
return error;
}
// Validate a vote and apply it to the current election if one exists
bool nano::active_transactions::vote (std::shared_ptr<nano::vote> vote_a, bool single_lock)
{
std::shared_ptr<nano::election> election;
bool replay (false);
bool processed (false);
{
std::unique_lock<std::mutex> lock;
if (!single_lock)
{
lock = std::unique_lock<std::mutex> (mutex);
}
for (auto vote_block : vote_a->blocks)
{
nano::election_vote_result result;
if (vote_block.which ())
{
auto block_hash (boost::get<nano::block_hash> (vote_block));
auto existing (blocks.find (block_hash));
if (existing != blocks.end ())
{
result = existing->second->vote (vote_a->account, vote_a->sequence, block_hash);
}
}
else
{
auto block (boost::get<std::shared_ptr<nano::block>> (vote_block));
auto existing (roots.find (block->root ()));
if (existing != roots.end ())
{
result = existing->election->vote (vote_a->account, vote_a->sequence, block->hash ());
}
}
replay = replay || result.replay;
processed = processed || result.processed;
}
}
if (processed)
{
node.network.republish_vote (vote_a);
}
return replay;
}
bool nano::active_transactions::active (nano::block const & block_a)
{
std::lock_guard<std::mutex> lock (mutex);
return roots.find (block_a.root ()) != roots.end ();
}
void nano::active_transactions::update_difficulty (nano::block const & block_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto existing (roots.find (block_a.root ()));
if (existing != roots.end ())
{
uint64_t difficulty;
auto error (nano::work_validate (block_a, &difficulty));
assert (!error);
roots.modify (existing, [difficulty](nano::conflict_info & info_a) {
info_a.difficulty = difficulty;
});
}
}
// List of active blocks in elections
std::deque<std::shared_ptr<nano::block>> nano::active_transactions::list_blocks (bool single_lock)
{
std::deque<std::shared_ptr<nano::block>> result;
std::unique_lock<std::mutex> lock;
if (!single_lock)
{
lock = std::unique_lock<std::mutex> (mutex);
}
for (auto i (roots.begin ()), n (roots.end ()); i != n; ++i)
{
result.push_back (i->election->status.winner);
}
return result;
}
void nano::active_transactions::erase (nano::block const & block_a)
{
std::lock_guard<std::mutex> lock (mutex);
if (roots.find (block_a.root ()) != roots.end ())
{
roots.erase (block_a.root ());
BOOST_LOG (node.log) << boost::str (boost::format ("Election erased for block block %1% root %2%") % block_a.hash ().to_string () % block_a.root ().to_string ());
}
}
nano::active_transactions::active_transactions (nano::node & node_a) :
node (node_a),
started (false),
stopped (false),
thread ([this]() {
nano::thread_role::set (nano::thread_role::name::announce_loop);
announce_loop ();
})
{
std::unique_lock<std::mutex> lock (mutex);
while (!started)
{
condition.wait (lock);
}
}
nano::active_transactions::~active_transactions ()
{
stop ();
}
bool nano::active_transactions::publish (std::shared_ptr<nano::block> block_a)
{
std::lock_guard<std::mutex> lock (mutex);
auto existing (roots.find (block_a->root ()));
auto result (true);
if (existing != roots.end ())
{
result = existing->election->publish (block_a);
if (!result)
{
blocks.insert (std::make_pair (block_a->hash (), existing->election));
}
}
return result;
}
int nano::node::store_version ()
{
auto transaction (store.tx_begin_read ());
return store.version_get (transaction);
}
nano::thread_runner::thread_runner (boost::asio::io_context & io_ctx_a, unsigned service_threads_a)
{
boost::thread::attributes attrs;
nano::thread_attributes::set (attrs);
for (auto i (0); i < service_threads_a; ++i)
{
threads.push_back (boost::thread (attrs, [&io_ctx_a]() {
nano::thread_role::set (nano::thread_role::name::io);
try
{
io_ctx_a.run ();
}
catch (...)
{
#ifndef NDEBUG
/*
* In a release build, catch and swallow the
* io_context exception, in debug mode pass it
* on
*/
throw;
#endif
}
}));
}
}
nano::thread_runner::~thread_runner ()
{
join ();
}
void nano::thread_runner::join ()
{
for (auto & i : threads)
{
if (i.joinable ())
{
i.join ();
}
}
}
nano::inactive_node::inactive_node (boost::filesystem::path const & path, uint16_t peering_port_a) :
path (path),
io_context (std::make_shared<boost::asio::io_context> ()),
alarm (*io_context),
work (1, nullptr),
peering_port (peering_port_a)
{
boost::system::error_code error_chmod;
/*
* @warning May throw a filesystem exception
*/
boost::filesystem::create_directories (path);
nano::set_secure_perm_directory (path, error_chmod);
logging.max_size = std::numeric_limits<std::uintmax_t>::max ();
logging.init (path);
node = std::make_shared<nano::node> (init, *io_context, peering_port, path, alarm, logging, work);
}
nano::inactive_node::~inactive_node ()
{
node->stop ();
}
nano::udp_buffer::udp_buffer (nano::stat & stats, size_t size, size_t count) :
stats (stats),
free (count),
full (count),
slab (size * count),
entries (count),
stopped (false)
{
assert (count > 0);
assert (size > 0);
auto slab_data (slab.data ());
auto entry_data (entries.data ());
for (auto i (0); i < count; ++i, ++entry_data)
{
*entry_data = { slab_data + i * size, 0, nano::endpoint () };
free.push_back (entry_data);
}
}
nano::udp_data * nano::udp_buffer::allocate ()
{
std::unique_lock<std::mutex> lock (mutex);
while (!stopped && free.empty () && full.empty ())
{
stats.inc (nano::stat::type::udp, nano::stat::detail::blocking, nano::stat::dir::in);
condition.wait (lock);
}
nano::udp_data * result (nullptr);
if (!free.empty ())
{
result = free.front ();
free.pop_front ();
}
if (result == nullptr)
{
result = full.front ();
full.pop_front ();
stats.inc (nano::stat::type::udp, nano::stat::detail::overflow, nano::stat::dir::in);
}
return result;
}
void nano::udp_buffer::enqueue (nano::udp_data * data_a)
{
assert (data_a != nullptr);
{
std::lock_guard<std::mutex> lock (mutex);
full.push_back (data_a);
}
condition.notify_all ();
}
nano::udp_data * nano::udp_buffer::dequeue ()
{
std::unique_lock<std::mutex> lock (mutex);
while (!stopped && full.empty ())
{
condition.wait (lock);
}
nano::udp_data * result (nullptr);
if (!full.empty ())
{
result = full.front ();
full.pop_front ();
}
return result;
}
void nano::udp_buffer::release (nano::udp_data * data_a)
{
assert (data_a != nullptr);
{
std::lock_guard<std::mutex> lock (mutex);
free.push_back (data_a);
}
condition.notify_all ();
}
void nano::udp_buffer::stop ()
{
{
std::lock_guard<std::mutex> lock (mutex);
stopped = true;
}
condition.notify_all ();
}
| 1 | 14,714 | This should be done in a separate PR. | nanocurrency-nano-node | cpp |
@@ -138,8 +138,9 @@ public class DropPartyPlugin extends Plugin
{
continue;
}
- if (Text.standardize(player.getName()).equalsIgnoreCase(playerName))
+ if (player.getName().equalsIgnoreCase(playerName))
{
+ log.error("found running player");
runningPlayer = player;
break;
} | 1 | /*
* Copyright (c) 2017, Adam <[email protected]>
* All rights reserved.
*
*
* Modified by farhan1666
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.client.plugins.dropparty;
import com.google.inject.Provides;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import net.runelite.api.Client;
import net.runelite.api.Player;
import net.runelite.api.coords.WorldPoint;
import net.runelite.api.events.ConfigChanged;
import net.runelite.api.events.GameTick;
import net.runelite.api.util.Text;
import net.runelite.client.Notifier;
import net.runelite.client.config.ConfigManager;
import net.runelite.client.eventbus.EventBus;
import net.runelite.client.plugins.Plugin;
import net.runelite.client.plugins.PluginDescriptor;
import net.runelite.client.plugins.PluginType;
import net.runelite.client.ui.overlay.OverlayManager;
import javax.inject.Inject;
import javax.inject.Singleton;
import java.awt.*;
import java.util.ArrayList;
import java.util.List;
@PluginDescriptor(
name = "Drop Party",
description = "Marks where a user ran, for drop partys",
tags = {"Drop", "Party", "marker", "player"},
type = PluginType.UTILITY,
enabledByDefault = false
)
@Singleton
@Slf4j
public class DropPartyPlugin extends Plugin
{
@Inject
private DropPartyConfig config;
@Getter(AccessLevel.PACKAGE)
private List<WorldPoint> playerPath = new ArrayList<>();
@Getter(AccessLevel.PACKAGE)
private String playerName = "";
@Getter(AccessLevel.PACKAGE)
private int showAmmount = 0;
@Getter(AccessLevel.PACKAGE)
private int MAXPATHSIZE = 100;
private Player runningPlayer;
@Getter(AccessLevel.PACKAGE)
private Color overlayColor;
@Inject
private Notifier notifier;
@Inject
private OverlayManager overlayManager;
@Inject
private DropPartyOverlay coreOverlay;
@Inject
private EventBus eventbus;
@Inject
private Client client;
@Getter(AccessLevel.PACKAGE)
private int fontStyle;
@Getter(AccessLevel.PACKAGE)
private int textSize;
@Provides
DropPartyConfig getConfig(ConfigManager configManager)
{
return configManager.getConfig(DropPartyConfig.class);
}
@Override
protected void startUp()
{
updateConfig();
addSubscriptions();
overlayManager.add(coreOverlay);
reset();
}
@Override
protected void shutDown()
{
overlayManager.remove(coreOverlay);
reset();
eventbus.unregister(this);
}
private void addSubscriptions()
{
eventbus.subscribe(ConfigChanged.class, this, this::onConfigChanged);
eventbus.subscribe(GameTick.class, this, this::onGameTick);
}
private void onGameTick(GameTick event)
{
shuffleList();
if (playerName.equalsIgnoreCase(""))
{
return;
}
runningPlayer = null;
for (Player player : client.getPlayers())
{
if (player.getName() == null)
{
continue;
}
if (Text.standardize(player.getName()).equalsIgnoreCase(playerName))
{
runningPlayer = player;
break;
}
}
if (runningPlayer == null)
{
cordsError();
return;
}
addCords();
}
private void cordsError()
{
playerPath.add(null);
}
private void shuffleList()
{
if (playerPath.size() > MAXPATHSIZE - 1)
{
playerPath.remove(0);
}
}
private void addCords()
{
while (true)
{
if (playerPath.size() >= MAXPATHSIZE)
{
playerPath.add(runningPlayer.getWorldLocation());
break;
}
playerPath.add(null);
}
}
private void onConfigChanged(ConfigChanged event)
{
if (!event.getGroup().equals("drop"))
{
return;
}
updateConfig();
}
private void reset()
{
playerPath.clear();
}
private void updateConfig()
{
this.playerName = config.playerName();
this.showAmmount = config.showAmmount();
this.overlayColor = config.overlayColor();
this.fontStyle = config.fontStyle().getFont();
this.textSize = config.textSize();
}
}
| 1 | 15,768 | Rather than this maybe `Text.sanitize` would be better here | open-osrs-runelite | java |
@@ -82,7 +82,7 @@ public class NeighborsMsgSerializer : DiscoveryMsgSerializerBase, IMessageSerial
}
ReadOnlySpan<byte> id = ctx.DecodeByteArraySpan();
- return new Node(new PublicKey(id), address);
+ return new Node(new PublicKey(id), address, false);
});
}
} | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System.Net;
using Nethermind.Core.Crypto;
using Nethermind.Crypto;
using Nethermind.Network.Discovery.Messages;
using Nethermind.Serialization.Rlp;
using Nethermind.Stats.Model;
namespace Nethermind.Network.Discovery.Serializers;
public class NeighborsMsgSerializer : DiscoveryMsgSerializerBase, IMessageSerializer<NeighborsMsg>
{
public NeighborsMsgSerializer(IEcdsa ecdsa,
IPrivateKeyGenerator nodeKey,
INodeIdResolver nodeIdResolver) : base(ecdsa, nodeKey, nodeIdResolver)
{
}
public byte[] Serialize(NeighborsMsg msg)
{
Rlp[]? nodes = null;
if (msg.Nodes.Any())
{
nodes = new Rlp[msg.Nodes.Length];
for (int i = 0; i < msg.Nodes.Length; i++)
{
Node node = msg.Nodes[i];
Rlp serializedNode = SerializeNode(node.Address, node.Id.Bytes);
nodes[i] = serializedNode;
}
}
byte[] data = Rlp.Encode(
nodes == null ? Rlp.OfEmptySequence : Rlp.Encode(nodes),
Rlp.Encode(msg.ExpirationTime)
).Bytes;
byte[] serializedMsg = Serialize((byte) msg.MsgType, data);
return serializedMsg;
}
public NeighborsMsg Deserialize(byte[] msgBytes)
{
(PublicKey FarPublicKey, byte[] Mdc, byte[] Data) results = PrepareForDeserialization(msgBytes);
RlpStream rlp = results.Data.AsRlpStream();
rlp.ReadSequenceLength();
Node[] nodes = DeserializeNodes(rlp) as Node[];
long expirationTime = rlp.DecodeLong();
NeighborsMsg msg = new(results.FarPublicKey, expirationTime, nodes);
return msg;
}
private static Node?[] DeserializeNodes(RlpStream rlpStream)
{
return rlpStream.DecodeArray(ctx =>
{
int lastPosition = ctx.ReadSequenceLength() + ctx.Position;
int count = ctx.ReadNumberOfItemsRemaining(lastPosition);
ReadOnlySpan<byte> ip = ctx.DecodeByteArraySpan();
IPEndPoint address = GetAddress(ip, ctx.DecodeInt());
if (count > 3)
{
ctx.DecodeInt();
}
ReadOnlySpan<byte> id = ctx.DecodeByteArraySpan();
return new Node(new PublicKey(id), address);
});
}
}
| 1 | 26,529 | lot of places setting default 'false' to static value - noise in PR | NethermindEth-nethermind | .cs |
@@ -361,11 +361,19 @@ class Image extends BaseI18nLoop implements PropelSearchLoopInterface
// Dispatch image processing event
$this->dispatcher->dispatch(TheliaEvents::IMAGE_PROCESS, $event);
+ $originalImageSize = getimagesize($sourceFilePath);
+
+ $imageSize = getimagesize($event->getCacheFilepath());
+
$loopResultRow
->set("IMAGE_URL", $event->getFileUrl())
->set("ORIGINAL_IMAGE_URL", $event->getOriginalFileUrl())
->set("IMAGE_PATH", $event->getCacheFilepath())
->set("PROCESSING_ERROR", false)
+ ->set("IMAGE_WIDH", $imageSize[0])
+ ->set("IMAGE_HEIGHT", $imageSize[1])
+ ->set("ORIGINAL_IMAGE_WIDH", $originalImageSize[0])
+ ->set("ORIGINAL_IMAGE_HEIGHT", $originalImageSize[1])
;
} catch (\Exception $ex) {
// Ignore the result and log an error | 1 | <?php
/*************************************************************************************/
/* This file is part of the Thelia package. */
/* */
/* Copyright (c) OpenStudio */
/* email : [email protected] */
/* web : http://www.thelia.net */
/* */
/* For the full copyright and license information, please view the LICENSE.txt */
/* file that was distributed with this source code. */
/*************************************************************************************/
namespace Thelia\Core\Template\Loop;
use Propel\Runtime\ActiveQuery\ModelCriteria;
use Thelia\Core\Template\Element\BaseI18nLoop;
use Thelia\Core\Template\Element\PropelSearchLoopInterface;
use Thelia\Core\Template\Loop\Argument\Argument;
use Thelia\Core\Event\Image\ImageEvent;
use Thelia\Core\Event\TheliaEvents;
use Thelia\Core\Template\Loop\Argument\ArgumentCollection;
use Thelia\Model\ProductDocumentQuery;
use Thelia\Model\ProductImage;
use Thelia\Type\BooleanOrBothType;
use Thelia\Type\TypeCollection;
use Thelia\Type\EnumListType;
use Propel\Runtime\ActiveQuery\Criteria;
use Thelia\Model\ConfigQuery;
use Thelia\Core\Template\Element\LoopResultRow;
use Thelia\Core\Template\Element\LoopResult;
use Thelia\Type\EnumType;
use Thelia\Log\Tlog;
/**
* The image loop
*
* @author Franck Allimant <[email protected]>
*
* {@inheritdoc}
* @method int[] getId()
* @method bool|string getVisible()
* @method int[] getExclude()
* @method int getWidth()
* @method int getHeight()
* @method int getRotation()
* @method string getBackgroundColor()
* @method int getQuality()
* @method string getEffects()
* @method int getCategory()
* @method int getProduct()
* @method int getFolder()
* @method int getContent()
* @method string getSource()
* @method int getSourceId()
* @method string getQueryNamespace()
* @method bool getAllowZoom()
* @method bool getIgnoreProcessingErrors()
* @method string getResizeMode()
* @method string[] getOrder()
*/
class Image extends BaseI18nLoop implements PropelSearchLoopInterface
{
protected $objectType;
protected $objectId;
protected $timestampable = true;
/**
* @var array Possible standard image sources
*/
protected $possible_sources = array('category', 'product', 'folder', 'content', 'module', 'brand');
/**
* @return \Thelia\Core\Template\Loop\Argument\ArgumentCollection
*/
protected function getArgDefinitions()
{
$collection = new ArgumentCollection(
Argument::createIntListTypeArgument('id'),
Argument::createIntListTypeArgument('exclude'),
Argument::createBooleanOrBothTypeArgument('visible', 1),
new Argument(
'order',
new TypeCollection(
new EnumListType(array('alpha', 'alpha-reverse', 'manual', 'manual-reverse', 'random'))
),
'manual'
),
Argument::createIntTypeArgument('width'),
Argument::createIntTypeArgument('height'),
Argument::createIntTypeArgument('rotation', 0),
Argument::createAnyTypeArgument('background_color'),
Argument::createIntTypeArgument('quality'),
new Argument(
'resize_mode',
new TypeCollection(
new EnumType(array('crop', 'borders', 'none'))
),
'none'
),
Argument::createAnyTypeArgument('effects'),
Argument::createIntTypeArgument('category'),
Argument::createIntTypeArgument('product'),
Argument::createIntTypeArgument('folder'),
Argument::createIntTypeArgument('content'),
Argument::createAnyTypeArgument('source'),
Argument::createIntTypeArgument('source_id'),
Argument::createBooleanTypeArgument('force_return', true),
Argument::createBooleanTypeArgument('ignore_processing_errors', true),
Argument::createAnyTypeArgument('query_namespace', 'Thelia\\Model'),
Argument::createBooleanTypeArgument('allow_zoom', false)
);
// Add possible image sources
foreach ($this->possible_sources as $source) {
$collection->addArgument(Argument::createIntTypeArgument($source));
}
return $collection;
}
/**
* Dynamically create the search query, and set the proper filter and order
*
* @param string $source a valid source identifier (@see $possible_sources)
* @param int $object_id the source object ID
* @return ModelCriteria the propel Query object
*/
protected function createSearchQuery($source, $object_id)
{
$object = ucfirst($source);
$ns = $this->getQueryNamespace();
if ('\\' !== $ns[0]) {
$ns = '\\'.$ns;
}
$queryClass = sprintf("%s\\%sImageQuery", $ns, $object);
$filterMethod = sprintf("filterBy%sId", $object);
// xxxImageQuery::create()
$method = new \ReflectionMethod($queryClass, 'create');
$search = $method->invoke(null); // Static !
// $query->filterByXXX(id)
if (! is_null($object_id)) {
$method = new \ReflectionMethod($queryClass, $filterMethod);
$method->invoke($search, $object_id);
}
$orders = $this->getOrder();
// Results ordering
foreach ($orders as $order) {
switch ($order) {
case "alpha":
$search->addAscendingOrderByColumn('i18n_TITLE');
break;
case "alpha-reverse":
$search->addDescendingOrderByColumn('i18n_TITLE');
break;
case "manual-reverse":
$search->orderByPosition(Criteria::DESC);
break;
case "manual":
$search->orderByPosition(Criteria::ASC);
break;
case "random":
$search->clearOrderByColumns();
$search->addAscendingOrderByColumn('RAND()');
break(2);
break;
}
}
return $search;
}
/**
* Dynamically create the search query, and set the proper filter and order
*
* @param string $objectType (returned) the a valid source identifier (@see $possible_sources)
* @param string $objectId (returned) the ID of the source object
* @return ModelCriteria the propel Query object
*/
protected function getSearchQuery(&$objectType, &$objectId)
{
$search = null;
// Check form source="product" source_id="123" style arguments
$source = $this->getSource();
if (! is_null($source)) {
$sourceId = $this->getSourceId();
$id = $this->getId();
if (is_null($sourceId) && is_null($id)) {
throw new \InvalidArgumentException(
"If 'source' argument is specified, 'id' or 'source_id' argument should be specified"
);
}
$search = $this->createSearchQuery($source, $sourceId);
$objectType = $source;
$objectId = $sourceId;
} else {
// Check for product="id" folder="id", etc. style arguments
foreach ($this->possible_sources as $source) {
$argValue = $this->getArgValue($source);
if (! empty($argValue)) {
$argValue = intval($argValue);
$search = $this->createSearchQuery($source, $argValue);
$objectType = $source;
$objectId = $argValue;
break;
}
}
}
if ($search == null) {
throw new \InvalidArgumentException(
sprintf("Unable to find image source. Valid sources are %s", implode(',', $this->possible_sources))
);
}
return $search;
}
public function buildModelCriteria()
{
// Select the proper query to use, and get the object type
$this->objectType = $this->objectId = null;
/** @var ProductDocumentQuery $search */
$search = $this->getSearchQuery($this->objectType, $this->objectId);
/* manage translations */
$this->configureI18nProcessing($search);
$id = $this->getId();
if (! is_null($id)) {
$search->filterById($id, Criteria::IN);
}
$exclude = $this->getExclude();
if (!is_null($exclude)) {
$search->filterById($exclude, Criteria::NOT_IN);
}
$visible = $this->getVisible();
if ($visible !== BooleanOrBothType::ANY) {
$search->filterByVisible($visible ? 1 : 0);
}
return $search;
}
public function parseResults(LoopResult $loopResult)
{
// Create image processing event
$event = new ImageEvent($this->request);
// Prepare tranformations
$width = $this->getWidth();
$height = $this->getHeight();
$rotation = $this->getRotation();
$background_color = $this->getBackgroundColor();
$quality = $this->getQuality();
$effects = $this->getEffects();
$event->setAllowZoom($this->getAllowZoom());
if (! is_null($effects)) {
$effects = explode(',', $effects);
}
switch ($this->getResizeMode()) {
case 'crop':
$resizeMode = \Thelia\Action\Image::EXACT_RATIO_WITH_CROP;
break;
case 'borders':
$resizeMode = \Thelia\Action\Image::EXACT_RATIO_WITH_BORDERS;
break;
case 'none':
default:
$resizeMode = \Thelia\Action\Image::KEEP_IMAGE_RATIO;
}
$baseSourceFilePath = ConfigQuery::read('images_library_path');
if ($baseSourceFilePath === null) {
$baseSourceFilePath = THELIA_LOCAL_DIR . 'media' . DS . 'images';
} else {
$baseSourceFilePath = THELIA_ROOT . $baseSourceFilePath;
}
/** @var ProductImage $result */
foreach ($loopResult->getResultDataCollection() as $result) {
// Setup required transformations
if (! is_null($width)) {
$event->setWidth($width);
}
if (! is_null($height)) {
$event->setHeight($height);
}
$event->setResizeMode($resizeMode);
if (! is_null($rotation)) {
$event->setRotation($rotation);
}
if (! is_null($background_color)) {
$event->setBackgroundColor($background_color);
}
if (! is_null($quality)) {
$event->setQuality($quality);
}
if (! is_null($effects)) {
$event->setEffects($effects);
}
// Put source image file path
$sourceFilePath = sprintf(
'%s/%s/%s',
$baseSourceFilePath,
$this->objectType,
$result->getFile()
);
$event->setSourceFilepath($sourceFilePath);
$event->setCacheSubdirectory($this->objectType);
$loopResultRow = new LoopResultRow($result);
$loopResultRow
->set("ID", $result->getId())
->set("LOCALE", $this->locale)
->set("ORIGINAL_IMAGE_PATH", $sourceFilePath)
->set("TITLE", $result->getVirtualColumn('i18n_TITLE'))
->set("CHAPO", $result->getVirtualColumn('i18n_CHAPO'))
->set("DESCRIPTION", $result->getVirtualColumn('i18n_DESCRIPTION'))
->set("POSTSCRIPTUM", $result->getVirtualColumn('i18n_POSTSCRIPTUM'))
->set("VISIBLE", $result->getVisible())
->set("POSITION", $result->getPosition())
->set("OBJECT_TYPE", $this->objectType)
->set("OBJECT_ID", $this->objectId)
;
$addRow = true;
$returnErroredImages = $this->getBackendContext() || ! $this->getIgnoreProcessingErrors();
try {
// Dispatch image processing event
$this->dispatcher->dispatch(TheliaEvents::IMAGE_PROCESS, $event);
$loopResultRow
->set("IMAGE_URL", $event->getFileUrl())
->set("ORIGINAL_IMAGE_URL", $event->getOriginalFileUrl())
->set("IMAGE_PATH", $event->getCacheFilepath())
->set("PROCESSING_ERROR", false)
;
} catch (\Exception $ex) {
// Ignore the result and log an error
Tlog::getInstance()->addError(sprintf("Failed to process image in image loop: %s", $ex->getMessage()));
if ($returnErroredImages) {
$loopResultRow
->set("IMAGE_URL", '')
->set("ORIGINAL_IMAGE_URL", '')
->set("IMAGE_PATH", '')
->set("PROCESSING_ERROR", true)
;
} else {
$addRow = false;
}
}
if ($addRow) {
$this->addOutputFields($loopResultRow, $result);
$loopResult->addRow($loopResultRow);
}
}
return $loopResult;
}
}
| 1 | 11,593 | I think you should use `$event->getOriginalFileUrl()` instead of `$sourceFilePath` here. | thelia-thelia | php |
@@ -99,6 +99,7 @@ def run(inventory_index_id,
# pylint: disable=too-many-locals
global_configs = service_config.get_global_config()
notifier_configs = service_config.get_notifier_config()
+ api_quota_configs = notifier_configs.get('api_quota')
with service_config.scoped_session() as session:
if scanner_index_id: | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Notifier runner."""
from builtins import str
import importlib
import inspect
import traceback
# pylint: disable=line-too-long
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import string_formats
from google.cloud.forseti.notifier.notifiers.base_notification import BaseNotification
from google.cloud.forseti.notifier.notifiers import cscc_notifier
from google.cloud.forseti.notifier.notifiers.inventory_summary import InventorySummary
from google.cloud.forseti.services.inventory.storage import DataAccess
from google.cloud.forseti.services.scanner import dao as scanner_dao
# pylint: enable=line-too-long
LOGGER = logger.get_logger(__name__)
# pylint: disable=inconsistent-return-statements
def find_notifiers(notifier_name):
"""Get the first class in the given sub module
Args:
notifier_name (str): Name of the notifier.
Return:
class: The class in the sub module
"""
try:
module = importlib.import_module(
'google.cloud.forseti.notifier.notifiers.{0}'.format(
notifier_name))
for filename in dir(module):
obj = getattr(module, filename)
if (inspect.isclass(obj) and
issubclass(obj, BaseNotification) and
obj is not BaseNotification):
return obj
except ImportError:
LOGGER.exception('Can\'t import notifier %s', notifier_name)
# pylint: enable=inconsistent-return-statements
def convert_to_timestamp(violations):
"""Convert violation created_at_datetime to timestamp string.
Args:
violations (dict): List of violations as dict with
created_at_datetime.
Returns:
list: List of violations as dict with created_at_datetime
converted to timestamp string.
"""
for violation in violations:
violation['created_at_datetime'] = (
violation['created_at_datetime'].strftime(
string_formats.TIMESTAMP_TIMEZONE))
return violations
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
def run(inventory_index_id,
scanner_index_id,
progress_queue,
service_config=None):
"""Run the notifier.
Entry point when the notifier is run as a library.
Args:
inventory_index_id (int64): Inventory index id.
scanner_index_id (int64): Scanner index id.
progress_queue (Queue): The progress queue.
service_config (ServiceConfig): Forseti 2.0 service configs.
Returns:
int: Status code.
"""
# pylint: disable=too-many-locals
global_configs = service_config.get_global_config()
notifier_configs = service_config.get_notifier_config()
with service_config.scoped_session() as session:
if scanner_index_id:
inventory_index_id = (
DataAccess.get_inventory_index_id_by_scanner_index_id(
session,
scanner_index_id))
else:
if not inventory_index_id:
inventory_index_id = (
DataAccess.get_latest_inventory_index_id(session))
scanner_index_id = scanner_dao.get_latest_scanner_index_id(
session, inventory_index_id)
if not scanner_index_id:
LOGGER.error(
'No success or partial success scanner index found for '
'inventory index: "%s".', str(inventory_index_id))
else:
# get violations
violation_access = scanner_dao.ViolationAccess(session)
violations = violation_access.list(
scanner_index_id=scanner_index_id)
violations_as_dict = []
for violation in violations:
violations_as_dict.append(
scanner_dao.convert_sqlalchemy_object_to_dict(violation))
violations_as_dict = convert_to_timestamp(violations_as_dict)
violation_map = scanner_dao.map_by_resource(violations_as_dict)
for retrieved_v in violation_map:
log_message = (
'Retrieved {} violations for resource \'{}\''.format(
len(violation_map[retrieved_v]), retrieved_v))
LOGGER.info(log_message)
progress_queue.put(log_message)
# build notification notifiers
notifiers = []
for resource in notifier_configs['resources']:
if violation_map.get(resource['resource']) is None:
log_message = 'Resource \'{}\' has no violations'.format(
resource['resource'])
progress_queue.put(log_message)
LOGGER.info(log_message)
continue
if not resource['should_notify']:
LOGGER.debug('Not notifying for: %s', resource['resource'])
continue
for notifier in resource['notifiers']:
log_message = (
'Running \'{}\' notifier for resource \'{}\''.format(
notifier['name'], resource['resource']))
progress_queue.put(log_message)
LOGGER.info(log_message)
try:
chosen_pipeline = find_notifiers(notifier['name'])
notifiers.append(chosen_pipeline(
resource['resource'], inventory_index_id,
violation_map[resource['resource']], global_configs,
notifier_configs, notifier.get('configuration')))
except Exception as e: # pylint: disable=broad-except
error_message = ('Error running \'{}\' notifier for '
'resource \'{}\': \'{}\''.format(
notifier['name'],
resource['resource'],
traceback.format_exc()))
progress_queue.put(error_message)
LOGGER.exception(e)
# Run the notifiers.
for notifier in notifiers:
notifier.run()
# Run the CSCC notifier.
violation_configs = notifier_configs.get('violation')
if violation_configs:
if violation_configs.get('cscc').get('enabled'):
source_id = violation_configs.get('cscc').get('source_id')
# beta mode
LOGGER.debug(
'Running CSCC notifier with beta API. source_id: '
'%s', source_id)
(cscc_notifier.CsccNotifier(inventory_index_id)
.run(violations_as_dict, source_id=source_id))
InventorySummary(service_config, inventory_index_id).run()
log_message = 'Notification completed!'
progress_queue.put(log_message)
progress_queue.put(None)
LOGGER.info(log_message)
return 0
# pylint: enable=too-many-branches,too-many-statements
| 1 | 35,001 | nit: It's a matter of taste, but it would be tighter if this is called `api_quota`. | forseti-security-forseti-security | py |
@@ -16,9 +16,10 @@ package org.hyperledger.besu.ethereum.mainnet.precompiles;
import static org.assertj.core.api.Assertions.assertThat;
-import org.hyperledger.besu.ethereum.core.Gas;
-import org.hyperledger.besu.ethereum.vm.GasCalculator;
-import org.hyperledger.besu.ethereum.vm.MessageFrame;
+import org.hyperledger.besu.evm.Gas;
+import org.hyperledger.besu.evm.frame.MessageFrame;
+import org.hyperledger.besu.evm.gascalculator.GasCalculator;
+import org.hyperledger.besu.evm.precompile.AltBN128PairingPrecompiledContract;
import org.apache.tuweni.bytes.Bytes;
import org.junit.Test; | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.mainnet.precompiles;
import static org.assertj.core.api.Assertions.assertThat;
import org.hyperledger.besu.ethereum.core.Gas;
import org.hyperledger.besu.ethereum.vm.GasCalculator;
import org.hyperledger.besu.ethereum.vm.MessageFrame;
import org.apache.tuweni.bytes.Bytes;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class AltBN128PairingPrecompiledContractTest {
@Mock MessageFrame messageFrame;
@Mock GasCalculator gasCalculator;
private final AltBN128PairingPrecompiledContract byzantiumContract =
AltBN128PairingPrecompiledContract.byzantium(gasCalculator);
private final AltBN128PairingPrecompiledContract istanbulContract =
AltBN128PairingPrecompiledContract.istanbul(gasCalculator);
@Test
public void compute_validPoints() {
final Bytes input = validPointBytes();
final Bytes result = byzantiumContract.compute(input, messageFrame);
assertThat(result).isEqualTo(AltBN128PairingPrecompiledContract.TRUE);
}
public Bytes validPointBytes() {
final Bytes g1Point0 =
Bytes.concatenate(
Bytes.fromHexString(
"0x0000000000000000000000000000000000000000000000000000000000000001"),
Bytes.fromHexString(
"0x0000000000000000000000000000000000000000000000000000000000000002"));
final Bytes g2Point0 =
Bytes.concatenate(
Bytes.fromHexString(
"0x198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2"),
Bytes.fromHexString(
"0x1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed"),
Bytes.fromHexString(
"0x090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b"),
Bytes.fromHexString(
"0x12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa"));
final Bytes g1Point1 =
Bytes.concatenate(
Bytes.fromHexString(
"0x0000000000000000000000000000000000000000000000000000000000000001"),
Bytes.fromHexString(
"0x30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd45"));
final Bytes g2Point1 =
Bytes.concatenate(
Bytes.fromHexString(
"0x198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2"),
Bytes.fromHexString(
"0x1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed"),
Bytes.fromHexString(
"0x090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b"),
Bytes.fromHexString(
"0x12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa"));
return Bytes.concatenate(g1Point0, g2Point0, g1Point1, g2Point1);
}
@Test
public void compute_invalidPointsOutsideSubgroupG2() {
final Bytes g1Point0 =
Bytes.concatenate(
Bytes.fromHexString(
"0x0000000000000000000000000000000000000000000000000000000000000001"),
Bytes.fromHexString(
"0x0000000000000000000000000000000000000000000000000000000000000002"));
final Bytes g2Point0 =
Bytes.concatenate(
Bytes.fromHexString(
"0x1382cd45e5674247f9c900b5c6f6cabbc189c2fabe2df0bf5acd84c97818f508"),
Bytes.fromHexString(
"0x1246178655ab8f2f26956b189894b7eb93cd4215b9937e7969e44305f80f521e"),
Bytes.fromHexString(
"0x08331c0a261a74e7e75db1232956663cbc88110f726159c5cba1857ecd03fa64"),
Bytes.fromHexString(
"0x1fbf8045ce3e79b5cde4112d38bcd0efbdb1295d2eefdf58151ae309d7ded7db"));
final Bytes g1Point1 =
Bytes.concatenate(
Bytes.fromHexString(
"0x0000000000000000000000000000000000000000000000000000000000000001"),
Bytes.fromHexString(
"0x30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd45"));
final Bytes g2Point1 =
Bytes.concatenate(
Bytes.fromHexString(
"0x1382cd45e5674247f9c900b5c6f6cabbc189c2fabe2df0bf5acd84c97818f508"),
Bytes.fromHexString(
"0x1246178655ab8f2f26956b189894b7eb93cd4215b9937e7969e44305f80f521e"),
Bytes.fromHexString(
"0x08331c0a261a74e7e75db1232956663cbc88110f726159c5cba1857ecd03fa64"),
Bytes.fromHexString(
"0x1fbf8045ce3e79b5cde4112d38bcd0efbdb1295d2eefdf58151ae309d7ded7db"));
final Bytes input = Bytes.concatenate(g1Point0, g2Point0, g1Point1, g2Point1);
final Bytes result = byzantiumContract.compute(input, messageFrame);
assertThat(result).isNull();
}
@Test
public void gasPrice_byzantium() {
assertThat(byzantiumContract.gasRequirement(validPointBytes())).isEqualTo(Gas.of(260_000));
}
@Test
public void gasPrice_istanbul() {
assertThat(istanbulContract.gasRequirement(validPointBytes())).isEqualTo(Gas.of(113_000));
}
}
| 1 | 25,988 | I noticed in all of these we now have to import the PrecompiledContract under testing. Could also rename the package these are in s/precompiles/precompile to keep them in the same package as they are elsewhere. | hyperledger-besu | java |
@@ -288,7 +288,7 @@ class SnakebiteHdfsClient(HdfsClient):
client_kwargs = dict(filter(lambda (k, v): v is not None and v != '', {
'hadoop_version': self.config.getint("hdfs", "client_version", None),
'effective_user': self.config.get("hdfs", "effective_user", None)
- }.items()))
+ }.iteritems()))
if self.config.getboolean("hdfs", "snakebite_autoconfig", False):
"""
This is fully backwards compatible with the vanilla Client and can be used for a non HA cluster as well. | 1 | # Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import subprocess
import os
import random
import urlparse
import luigi.format
import luigi.contrib.target
import datetime
import re
import warnings
from luigi.target import FileSystem, FileSystemTarget, FileAlreadyExists
import configuration
import logging
import getpass
logger = logging.getLogger('luigi-interface')
class HDFSCliError(Exception):
def __init__(self, command, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
msg = ("Command %r failed [exit code %d]\n" +
"---stdout---\n" +
"%s\n" +
"---stderr---\n" +
"%s" +
"------------") % (command, returncode, stdout, stderr)
super(HDFSCliError, self).__init__(msg)
def call_check(command):
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise HDFSCliError(command, p.returncode, stdout, stderr)
return stdout
def load_hadoop_cmd():
return [luigi.configuration.get_config().get('hadoop', 'command', 'hadoop')]
def tmppath(path=None, include_unix_username=True):
"""
@param path: target path for which it is needed to generate temporary location
@type path: str
@type include_unix_username: bool
@rtype: str
Note that include_unix_username might work on windows too.
"""
addon = "luigitemp-%08d" % random.randrange(1e9)
temp_dir = '/tmp' # default tmp dir if none is specified in config
#1. Figure out to which temporary directory to place
configured_hdfs_tmp_dir = configuration.get_config().get('core', 'hdfs-tmp-dir', None)
if configured_hdfs_tmp_dir is not None:
#config is superior
base_dir = configured_hdfs_tmp_dir
elif path is not None:
#need to copy correct schema and network location
parsed = urlparse.urlparse(path)
base_dir = urlparse.urlunparse((parsed.scheme, parsed.netloc, temp_dir, '', '', ''))
else:
#just system temporary directory
base_dir = temp_dir
#2. Figure out what to place
if path is not None:
if path.startswith(temp_dir + '/'):
#Not 100%, but some protection from directories like /tmp/tmp/file
subdir = path[len(temp_dir):]
else:
#Protection from /tmp/hdfs:/dir/file
parsed = urlparse.urlparse(path)
subdir = parsed.path
subdir = subdir.lstrip('/') + '-'
else:
#just return any random temporary location
subdir = ''
if include_unix_username:
subdir = os.path.join(getpass.getuser(), subdir)
return os.path.join(base_dir, subdir + addon)
def list_path(path):
if isinstance(path, list) or isinstance(path, tuple):
return path
if isinstance(path, str) or isinstance(path, unicode):
return [path, ]
return [str(path), ]
class HdfsClient(FileSystem):
"""This client uses Apache 2.x syntax for file system commands, which also matched CDH4"""
recursive_listdir_cmd = ['-ls', '-R']
def exists(self, path):
""" Use ``hadoop fs -stat`` to check file existence
"""
cmd = load_hadoop_cmd() + ['fs', '-stat', path]
logger.debug('Running file existence check: %s' % u' '.join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
if p.returncode == 0:
return True
else:
not_found_pattern = "^.*No such file or directory$"
not_found_re = re.compile(not_found_pattern)
for line in stderr.split('\n'):
if not_found_re.match(line):
return False
raise HDFSCliError(cmd, p.returncode, stdout, stderr)
def rename(self, path, dest):
parent_dir = os.path.dirname(dest)
if parent_dir != '' and not self.exists(parent_dir):
self.mkdir(parent_dir)
if type(path) not in (list, tuple):
path = [path]
else:
warnings.warn("Renaming multiple files at once is not atomic.")
call_check(load_hadoop_cmd() + ['fs', '-mv'] + path + [dest])
def remove(self, path, recursive=True, skip_trash=False):
if recursive:
cmd = load_hadoop_cmd() + ['fs', '-rm', '-r']
else:
cmd = load_hadoop_cmd() + ['fs', '-rm']
if skip_trash:
cmd = cmd + ['-skipTrash']
cmd = cmd + [path]
call_check(cmd)
def chmod(self, path, permissions, recursive=False):
if recursive:
cmd = load_hadoop_cmd() + ['fs', '-chmod', '-R', permissions, path]
else:
cmd = load_hadoop_cmd() + ['fs', '-chmod', permissions, path]
call_check(cmd)
def chown(self, path, owner, group, recursive=False):
if owner is None:
owner = ''
if group is None:
group = ''
ownership = "%s:%s" % (owner, group)
if recursive:
cmd = load_hadoop_cmd() + ['fs', '-chown', '-R', ownership, path]
else:
cmd = load_hadoop_cmd() + ['fs', '-chown', ownership, path]
call_check(cmd)
def count(self, path):
cmd = load_hadoop_cmd() + ['fs', '-count', path]
stdout = call_check(cmd)
lines = stdout.split('\n')
for line in stdout.split('\n'):
if line.startswith("OpenJDK 64-Bit Server VM warning") or line.startswith("It's highly recommended") or not line:
lines.pop(lines.index(line))
else:
(dir_count, file_count, content_size, ppath) = stdout.split()
results = {'content_size': content_size, 'dir_count': dir_count, 'file_count': file_count}
return results
def copy(self, path, destination):
call_check(load_hadoop_cmd() + ['fs', '-cp', path, destination])
def put(self, local_path, destination):
call_check(load_hadoop_cmd() + ['fs', '-put', local_path, destination])
def get(self, path, local_destination):
call_check(load_hadoop_cmd() + ['fs', '-get', path, local_destination])
def getmerge(self, path, local_destination, new_line=False):
if new_line:
cmd = load_hadoop_cmd() + ['fs', '-getmerge', '-nl', path, local_destination]
else:
cmd = load_hadoop_cmd() + ['fs', '-getmerge', path, local_destination]
call_check(cmd)
def mkdir(self, path, parents=True, raise_if_exists=False):
if (parents and raise_if_exists):
raise NotImplementedError("HdfsClient.mkdir can't raise with -p")
try:
cmd = (load_hadoop_cmd() + ['fs', '-mkdir'] +
(['-p'] if parents else []) +
[path])
call_check(cmd)
except HDFSCliError, ex:
if "File exists" in ex.stderr:
if raise_if_exists:
raise FileAlreadyExists(ex.stderr)
else:
raise
def listdir(self, path, ignore_directories=False, ignore_files=False,
include_size=False, include_type=False, include_time=False, recursive=False):
if not path:
path = "." # default to current/home catalog
if recursive:
cmd = load_hadoop_cmd() + ['fs'] + self.recursive_listdir_cmd + [path]
else:
cmd = load_hadoop_cmd() + ['fs', '-ls', path]
lines = call_check(cmd).split('\n')
for line in lines:
if not line:
continue
elif line.startswith('OpenJDK 64-Bit Server VM warning') or line.startswith('It\'s highly recommended') or line.startswith('Found'):
continue # "hadoop fs -ls" outputs "Found %d items" as its first line
elif ignore_directories and line[0] == 'd':
continue
elif ignore_files and line[0] == '-':
continue
data = line.split(' ')
file = data[-1]
size = int(data[-4])
line_type = line[0]
extra_data = ()
if include_size:
extra_data += (size,)
if include_type:
extra_data += (line_type,)
if include_time:
time_str = '%sT%s' % (data[-3], data[-2])
modification_time = datetime.datetime.strptime(time_str,
'%Y-%m-%dT%H:%M')
extra_data += (modification_time,)
if len(extra_data) > 0:
yield (file,) + extra_data
else:
yield file
class SnakebiteHdfsClient(HdfsClient):
"""
This client uses Spotify's snakebite client whenever possible.
@author: Alan Brenner <[email protected]> github.com/alanbbr
"""
def __init__(self):
super(SnakebiteHdfsClient, self).__init__()
try:
from snakebite.client import Client
self.config = configuration.get_config()
self._bite = None
self.pid = -1
except Exception as err: # IGNORE:broad-except
raise RuntimeError("You must specify namenode_host and namenode_port "
"in the [hdfs] section of your luigi config in "
"order to use luigi's snakebite support", err)
def __new__(cls):
try:
from snakebite.client import Client
this = super(SnakebiteHdfsClient, cls).__new__(cls)
return this
except ImportError:
logger.warning("Failed to load snakebite.client. Using HdfsClient.")
return HdfsClient()
def get_bite(self):
"""
If Luigi has forked, we have a different PID, and need to reconnect.
"""
if self.pid != os.getpid() or not self._bite:
client_kwargs = dict(filter(lambda (k, v): v is not None and v != '', {
'hadoop_version': self.config.getint("hdfs", "client_version", None),
'effective_user': self.config.get("hdfs", "effective_user", None)
}.items()))
if self.config.getboolean("hdfs", "snakebite_autoconfig", False):
"""
This is fully backwards compatible with the vanilla Client and can be used for a non HA cluster as well.
This client tries to read ``${HADOOP_PATH}/conf/hdfs-site.xml`` to get the address of the namenode.
The behaviour is the same as Client.
"""
from snakebite.client import AutoConfigClient
self._bite = AutoConfigClient(**client_kwargs)
else:
from snakebite.client import Client
self._bite = Client(self.config.get("hdfs", "namenode_host"), self.config.getint("hdfs", "namenode_port"), **client_kwargs)
return self._bite
def exists(self, path):
"""
Use snakebite.test to check file existence.
:param path: path to test
:type path: string
:return: boolean, True if path exists in HDFS
"""
try:
return self.get_bite().test(path, exists=True)
except Exception as err: # IGNORE:broad-except
raise HDFSCliError("snakebite.test", -1, str(err), repr(err))
def rename(self, path, dest):
"""
Use snakebite.rename, if available.
:param path: source file(s)
:type path: either a string or sequence of strings
:param dest: destination file (single input) or directory (multiple)
:type dest: string
:return: list of renamed items
"""
parts = dest.rstrip('/').split('/')
if len(parts) > 1:
dir_path = '/'.join(parts[0:-1])
if not self.exists(dir_path):
self.mkdir(dir_path, parents=True)
return list(self.get_bite().rename(list_path(path), dest))
def remove(self, path, recursive=True, skip_trash=False):
"""
Use snakebite.delete, if available.
:param path: delete-able file(s) or directory(ies)
:type path: either a string or a sequence of strings
:param recursive: delete directories trees like \*nix: rm -r
:type recursive: boolean, default is True
:param skip_trash: do or don't move deleted items into the trash first
:type skip_trash: boolean, default is False (use trash)
:return: list of deleted items
"""
return list(self.get_bite().delete(list_path(path), recurse=recursive))
def chmod(self, path, permissions, recursive=False):
"""
Use snakebite.chmod, if available.
:param path: update-able file(s)
:type path: either a string or sequence of strings
:param permissions: \*nix style permission number
:type permissions: octal
:param recursive: change just listed entry(ies) or all in directories
:type recursive: boolean, default is False
:return: list of all changed items
"""
return list(self.get_bite().chmod(list_path(path),
permissions, recursive))
def chown(self, path, owner, group, recursive=False):
"""
Use snakebite.chown/chgrp, if available.
One of owner or group must be set. Just setting group calls chgrp.
:param path: update-able file(s)
:type path: either a string or sequence of strings
:param owner: new owner, can be blank
:type owner: string
:param group: new group, can be blank
:type group: string
:param recursive: change just listed entry(ies) or all in directories
:type recursive: boolean, default is False
:return: list of all changed items
"""
bite = self.get_bite()
if owner:
if group:
return all(bite.chown(list_path(path), "%s:%s" % (owner, group),
recurse=recursive))
return all(bite.chown(list_path(path), owner, recurse=recursive))
return list(bite.chgrp(list_path(path), group, recurse=recursive))
def count(self, path):
"""
Use snakebite.count, if available.
:param path: directory to count the contents of
:type path: string
:return: dictionary with content_size, dir_count and file_count keys
"""
try:
(dir_count, file_count, content_size, ppath) = \
self.get_bite().count(list_path(path)).next().split()
except StopIteration:
dir_count = file_count = content_size = 0
return {'content_size': content_size, 'dir_count': dir_count,
'file_count': file_count}
def get(self, path, local_destination):
"""
Use snakebite.copyToLocal, if available.
:param path: HDFS file
:type path: string
:param local_destination: path on the system running Luigi
:type local_destination: string
"""
return list(self.get_bite().copyToLocal(list_path(path),
local_destination))
def mkdir(self, path, parents=True, mode=0755, raise_if_exists=False):
"""
Use snakebite.mkdir, if available.
Snakebite's mkdir method allows control over full path creation, so by
default, tell it to build a full path to work like ``hadoop fs -mkdir``.
:param path: HDFS path to create
:type path: string
:param parents: create any missing parent directories
:type parents: boolean, default is True
:param mode: \*nix style owner/group/other permissions
:type mode: octal, default 0755
"""
result = list(self.get_bite().mkdir(list_path(path),
create_parent=parents, mode=mode))
if raise_if_exists and "ile exists" in result[0].get('error', ''):
raise luigi.target.FileAlreadyExists("%s exists" % (path, ))
return result
def listdir(self, path, ignore_directories=False, ignore_files=False,
include_size=False, include_type=False, include_time=False,
recursive=False):
"""
Use snakebite.ls to get the list of items in a directory.
:param path: the directory to list
:type path: string
:param ignore_directories: if True, do not yield directory entries
:type ignore_directories: boolean, default is False
:param ignore_files: if True, do not yield file entries
:type ignore_files: boolean, default is False
:param include_size: include the size in bytes of the current item
:type include_size: boolean, default is False (do not include)
:param include_type: include the type (d or f) of the current item
:type include_type: boolean, default is False (do not include)
:param include_time: include the last modification time of the current item
:type include_time: boolean, default is False (do not include)
:param recursive: list subdirectory contents
:type recursive: boolean, default is False (do not recurse)
:return: yield with a string, or if any of the include_* settings are
true, a tuple starting with the path, and include_* items in order
"""
bite = self.get_bite()
for entry in bite.ls(list_path(path), recurse=recursive):
if ignore_directories and entry['file_type'] == 'd':
continue
if ignore_files and entry['file_type'] == 'f':
continue
rval = [entry['path'], ]
if include_size:
rval.append(entry['length'])
if include_type:
rval.append(entry['file_type'])
if include_time:
rval.append(datetime.datetime.fromtimestamp(entry['modification_time'] / 1000))
if len(rval) > 1:
yield tuple(rval)
else:
yield rval[0]
class HdfsClientCdh3(HdfsClient):
"""This client uses CDH3 syntax for file system commands"""
def mkdir(self, path):
'''
No -p switch, so this will fail creating ancestors
'''
try:
call_check(load_hadoop_cmd() + ['fs', '-mkdir', path])
except HDFSCliError, ex:
if "File exists" in ex.stderr:
raise FileAlreadyExists(ex.stderr)
else:
raise
def remove(self, path, recursive=True, skip_trash=False):
if recursive:
cmd = load_hadoop_cmd() + ['fs', '-rmr']
else:
cmd = load_hadoop_cmd() + ['fs', '-rm']
if skip_trash:
cmd = cmd + ['-skipTrash']
cmd = cmd + [path]
call_check(cmd)
class HdfsClientApache1(HdfsClientCdh3):
"""This client uses Apache 1.x syntax for file system commands,
which are similar to CDH3 except for the file existence check"""
recursive_listdir_cmd = ['-lsr']
def exists(self, path):
cmd = load_hadoop_cmd() + ['fs', '-test', '-e', path]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
if p.returncode == 0:
return True
elif p.returncode == 1:
return False
else:
raise HDFSCliError(cmd, p.returncode, stdout, stderr)
def get_configured_hadoop_version():
"""
CDH4 (hadoop 2+) has a slightly different syntax for interacting with hdfs
via the command line. The default version is CDH4, but one can override
this setting with "cdh3" or "apache1" in the hadoop section of the config
in order to use the old syntax
"""
return configuration.get_config().get("hadoop", "version", "cdh4").lower()
def get_configured_hdfs_client(show_warnings=True):
""" This is a helper that fetches the configuration value for 'client' in
the [hdfs] section. It will return the client that retains backwards
compatibility when 'client' isn't configured. """
config = configuration.get_config()
custom = config.get("hdfs", "client", None)
if custom:
# Eventually this should be the only valid code path
return custom
if config.getboolean("hdfs", "use_snakebite", False):
if show_warnings:
warnings.warn("Deprecated: Just specify 'client: snakebite' in config")
return "snakebite"
if show_warnings:
warnings.warn("Deprecated: Specify 'client: hadoopcli' in config")
return "hadoopcli" # The old default when not specified
def create_hadoopcli_client():
""" Given that we want one of the hadoop cli clients (unlike snakebite),
this one will return the right one """
version = get_configured_hadoop_version()
if version == "cdh4":
return HdfsClient()
elif version == "cdh3":
return HdfsClientCdh3()
elif version == "apache1":
return HdfsClientApache1()
else:
raise Exception("Error: Unknown version specified in Hadoop version"
"configuration parameter")
def get_autoconfig_client(show_warnings=True):
"""Creates the client as specified in the `client.cfg` configuration"""
configured_client = get_configured_hdfs_client(show_warnings=show_warnings)
if configured_client == "snakebite":
return SnakebiteHdfsClient()
if configured_client == "snakebite_with_hadoopcli_fallback":
return luigi.contrib.target.CascadingClient([SnakebiteHdfsClient(),
create_hadoopcli_client()])
if configured_client == "hadoopcli":
return create_hadoopcli_client()
raise Exception("Unknown hdfs client " + get_configured_hdfs_client())
# Suppress warnings so that importing luigi.hdfs doesn't show a deprecated warning.
client = get_autoconfig_client(show_warnings=False)
exists = client.exists
rename = client.rename
remove = client.remove
mkdir = client.mkdir
listdir = client.listdir
class HdfsReadPipe(luigi.format.InputPipeProcessWrapper):
def __init__(self, path):
super(HdfsReadPipe, self).__init__(load_hadoop_cmd() + ['fs', '-cat', path])
class HdfsAtomicWritePipe(luigi.format.OutputPipeProcessWrapper):
""" File like object for writing to HDFS
The referenced file is first written to a temporary location and then
renamed to final location on close(). If close() isn't called
the temporary file will be cleaned up when this object is
garbage collected
TODO: if this is buggy, change it so it first writes to a
local temporary file and then uploads it on completion
"""
def __init__(self, path):
self.path = path
self.tmppath = tmppath(self.path)
parent_dir = os.path.dirname(self.tmppath)
mkdir(parent_dir, parents=True, raise_if_exists=False)
super(HdfsAtomicWritePipe, self).__init__(load_hadoop_cmd() + ['fs', '-put', '-', self.tmppath])
def abort(self):
logger.info("Aborting %s('%s'). Removing temporary file '%s'",
self.__class__.__name__, self.path, self.tmppath)
super(HdfsAtomicWritePipe, self).abort()
remove(self.tmppath)
def close(self):
super(HdfsAtomicWritePipe, self).close()
rename(self.tmppath, self.path)
class HdfsAtomicWriteDirPipe(luigi.format.OutputPipeProcessWrapper):
""" Writes a data<data_extension> file to a directory at <path> """
def __init__(self, path, data_extension=""):
self.path = path
self.tmppath = tmppath(self.path)
self.datapath = self.tmppath + ("/data%s" % data_extension)
super(HdfsAtomicWriteDirPipe, self).__init__(load_hadoop_cmd() + ['fs', '-put', '-', self.datapath])
def abort(self):
logger.info("Aborting %s('%s'). Removing temporary dir '%s'",
self.__class__.__name__, self.path, self.tmppath)
super(HdfsAtomicWriteDirPipe, self).abort()
remove(self.tmppath)
def close(self):
super(HdfsAtomicWriteDirPipe, self).close()
rename(self.tmppath, self.path)
class Plain(luigi.format.Format):
@classmethod
def hdfs_reader(cls, path):
return HdfsReadPipe(path)
@classmethod
def pipe_writer(cls, output_pipe):
return output_pipe
class PlainDir(luigi.format.Format):
@classmethod
def hdfs_reader(cls, path):
# exclude underscore-prefixedfiles/folders (created by MapReduce)
return HdfsReadPipe("%s/[^_]*" % path)
@classmethod
def hdfs_writer(cls, path):
return HdfsAtomicWriteDirPipe(path)
class HdfsTarget(FileSystemTarget):
def __init__(self, path=None, format=Plain, is_tmp=False, fs=None):
if path is None:
assert is_tmp
path = tmppath()
super(HdfsTarget, self).__init__(path)
self.format = format
self.is_tmp = is_tmp
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(path)
if ":" in path:
raise ValueError('colon is not allowed in hdfs filenames')
self._fs = fs or get_autoconfig_client()
def __del__(self):
#TODO: not sure is_tmp belongs in Targets construction arguments
if self.is_tmp and self.exists():
self.remove()
@property
def fs(self):
return self._fs
def glob_exists(self, expected_files):
ls = list(listdir(self.path))
if len(ls) == expected_files:
return True
return False
def open(self, mode='r'):
if mode not in ('r', 'w'):
raise ValueError("Unsupported open mode '%s'" % mode)
if mode == 'r':
try:
return self.format.hdfs_reader(self.path)
except NotImplementedError:
return self.format.pipe_reader(HdfsReadPipe(self.path))
else:
try:
return self.format.hdfs_writer(self.path)
except NotImplementedError:
return self.format.pipe_writer(HdfsAtomicWritePipe(self.path))
def remove(self, skip_trash=False):
remove(self.path, skip_trash=skip_trash)
@luigi.util.deprecate_kwarg('fail_if_exists', 'raise_if_exists', False)
def rename(self, path, fail_if_exists=False):
""" Rename does not change self.path, so be careful with assumptions
Not recommendeed for directories. Use move_dir. spotify/luigi#522
"""
if isinstance(path, HdfsTarget):
path = path.path
if fail_if_exists and exists(path):
raise RuntimeError('Destination exists: %s' % path)
rename(self.path, path)
@luigi.util.deprecate_kwarg('fail_if_exists', 'raise_if_exists', False)
def move(self, path, fail_if_exists=False):
""" Move does not change self.path, so be careful with assumptions
Not recommendeed for directories. Use move_dir. spotify/luigi#522
"""
self.rename(path, raise_if_exists=fail_if_exists)
def move_dir(self, path):
# mkdir will fail if directory already exists, thereby ensuring atomicity
if isinstance(path, HdfsTarget):
path = path.path
mkdir(path, parents=False, raise_if_exists=True)
rename(self.path + '/*', path)
self.remove()
def is_writable(self):
if "/" in self.path:
# example path: /log/ap/2013-01-17/00
parts = self.path.split("/")
# start with the full path and then up the tree until we can check
length = len(parts)
for part in xrange(length):
path = "/".join(parts[0:length - part]) + "/"
if exists(path):
# if the path exists and we can write there, great!
if self._is_writable(path):
return True
# if it exists and we can't =( sad panda
else:
return False
# We went through all parts of the path and we still couldn't find
# one that exists.
return False
def _is_writable(self, path):
test_path = path + '.test_write_access-%09d' % random.randrange(1e10)
return_value = subprocess.call(load_hadoop_cmd() + ['fs', '-touchz', test_path])
if return_value != 0:
return False
else:
remove(test_path, recursive=False)
return True
| 1 | 10,490 | doubt this matters... | spotify-luigi | py |
@@ -36,6 +36,7 @@ from scapy.automaton import *
from scapy.autorun import *
from scapy.main import *
+from scapy.consts import *
from scapy.layers.all import *
| 1 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
Aggregate top level objects from all Scapy modules.
"""
from scapy.base_classes import *
from scapy.config import *
from scapy.dadict import *
from scapy.data import *
from scapy.error import *
from scapy.themes import *
from scapy.arch import *
from scapy.plist import *
from scapy.fields import *
from scapy.packet import *
from scapy.asn1fields import *
from scapy.asn1packet import *
from scapy.utils import *
from scapy.route import *
if conf.ipv6_enabled:
from scapy.utils6 import *
from scapy.route6 import *
from scapy.sendrecv import *
from scapy.supersocket import *
from scapy.volatile import *
from scapy.as_resolvers import *
from scapy.ansmachine import *
from scapy.automaton import *
from scapy.autorun import *
from scapy.main import *
from scapy.layers.all import *
from scapy.asn1.asn1 import *
from scapy.asn1.ber import *
from scapy.asn1.mib import *
from scapy.pipetool import *
from scapy.scapypipes import *
| 1 | 10,283 | Gets updated versions of `LOOPBACK_INTERFACE`, `LOOPBACK_NAME` when importing scapy. | secdev-scapy | py |
@@ -372,7 +372,7 @@ public class SparkTableUtil {
try {
PartitionSpec spec = SparkSchemaUtil.specForTable(spark, sourceTableIdentWithDB.unquotedString());
- if (spec == PartitionSpec.unpartitioned()) {
+ if (Objects.equal(spec, PartitionSpec.unpartitioned())) {
importUnpartitionedSparkTable(spark, sourceTableIdentWithDB, targetTable);
} else {
List<SparkPartition> sourceTablePartitions = getPartitions(spark, sourceTableIdent); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark;
import java.io.IOException;
import java.io.Serializable;
import java.net.URI;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.iceberg.AppendFiles;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.ManifestFile;
import org.apache.iceberg.ManifestFiles;
import org.apache.iceberg.ManifestWriter;
import org.apache.iceberg.MetricsConfig;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.data.TableMigrationUtil;
import org.apache.iceberg.hadoop.HadoopFileIO;
import org.apache.iceberg.hadoop.SerializableConfiguration;
import org.apache.iceberg.hadoop.Util;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.mapping.NameMapping;
import org.apache.iceberg.mapping.NameMappingParser;
import org.apache.iceberg.relocated.com.google.common.base.Joiner;
import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
import org.apache.iceberg.relocated.com.google.common.base.Objects;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.util.PropertyUtil;
import org.apache.iceberg.util.Tasks;
import org.apache.spark.TaskContext;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.api.java.function.MapPartitionsFunction;
import org.apache.spark.sql.AnalysisException;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.catalyst.TableIdentifier;
import org.apache.spark.sql.catalyst.analysis.NoSuchDatabaseException;
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException;
import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute;
import org.apache.spark.sql.catalyst.catalog.CatalogTable;
import org.apache.spark.sql.catalyst.catalog.CatalogTablePartition;
import org.apache.spark.sql.catalyst.catalog.SessionCatalog;
import org.apache.spark.sql.catalyst.expressions.Expression;
import org.apache.spark.sql.catalyst.expressions.NamedExpression;
import org.apache.spark.sql.catalyst.parser.ParseException;
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan;
import scala.Function2;
import scala.Option;
import scala.Some;
import scala.Tuple2;
import scala.collection.JavaConverters;
import scala.collection.Seq;
import scala.runtime.AbstractPartialFunction;
import static org.apache.spark.sql.functions.col;
/**
* Java version of the original SparkTableUtil.scala
* https://github.com/apache/iceberg/blob/apache-iceberg-0.8.0-incubating/spark/src/main/scala/org/apache/iceberg/spark/SparkTableUtil.scala
*/
public class SparkTableUtil {
private static final Joiner.MapJoiner MAP_JOINER = Joiner.on(",").withKeyValueSeparator("=");
private static final PathFilter HIDDEN_PATH_FILTER =
p -> !p.getName().startsWith("_") && !p.getName().startsWith(".");
private SparkTableUtil() {
}
/**
* Returns a DataFrame with a row for each partition in the table.
*
* The DataFrame has 3 columns, partition key (a=1/b=2), partition location, and format
* (avro or parquet).
*
* @param spark a Spark session
* @param table a table name and (optional) database
* @return a DataFrame of the table's partitions
*/
public static Dataset<Row> partitionDF(SparkSession spark, String table) {
List<SparkPartition> partitions = getPartitions(spark, table);
return spark.createDataFrame(partitions, SparkPartition.class).toDF("partition", "uri", "format");
}
/**
* Returns a DataFrame with a row for each partition that matches the specified 'expression'.
*
* @param spark a Spark session.
* @param table name of the table.
* @param expression The expression whose matching partitions are returned.
* @return a DataFrame of the table partitions.
*/
public static Dataset<Row> partitionDFByFilter(SparkSession spark, String table, String expression) {
List<SparkPartition> partitions = getPartitionsByFilter(spark, table, expression);
return spark.createDataFrame(partitions, SparkPartition.class).toDF("partition", "uri", "format");
}
/**
* Returns all partitions in the table.
*
* @param spark a Spark session
* @param table a table name and (optional) database
* @return all table's partitions
*/
public static List<SparkPartition> getPartitions(SparkSession spark, String table) {
try {
TableIdentifier tableIdent = spark.sessionState().sqlParser().parseTableIdentifier(table);
return getPartitions(spark, tableIdent);
} catch (ParseException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unable to parse table identifier: %s", table);
}
}
/**
* Returns all partitions in the table.
*
* @param spark a Spark session
* @param tableIdent a table identifier
* @return all table's partitions
*/
public static List<SparkPartition> getPartitions(SparkSession spark, TableIdentifier tableIdent) {
try {
SessionCatalog catalog = spark.sessionState().catalog();
CatalogTable catalogTable = catalog.getTableMetadata(tableIdent);
Seq<CatalogTablePartition> partitions = catalog.listPartitions(tableIdent, Option.empty());
return JavaConverters
.seqAsJavaListConverter(partitions)
.asJava()
.stream()
.map(catalogPartition -> toSparkPartition(catalogPartition, catalogTable))
.collect(Collectors.toList());
} catch (NoSuchDatabaseException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unknown table: %s. Database not found in catalog.", tableIdent);
} catch (NoSuchTableException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unknown table: %s. Table not found in catalog.", tableIdent);
}
}
/**
* Returns partitions that match the specified 'predicate'.
*
* @param spark a Spark session
* @param table a table name and (optional) database
* @param predicate a predicate on partition columns
* @return matching table's partitions
*/
public static List<SparkPartition> getPartitionsByFilter(SparkSession spark, String table, String predicate) {
TableIdentifier tableIdent;
try {
tableIdent = spark.sessionState().sqlParser().parseTableIdentifier(table);
} catch (ParseException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unable to parse the table identifier: %s", table);
}
Expression unresolvedPredicateExpr;
try {
unresolvedPredicateExpr = spark.sessionState().sqlParser().parseExpression(predicate);
} catch (ParseException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unable to parse the predicate expression: %s", predicate);
}
Expression resolvedPredicateExpr = resolveAttrs(spark, table, unresolvedPredicateExpr);
return getPartitionsByFilter(spark, tableIdent, resolvedPredicateExpr);
}
/**
* Returns partitions that match the specified 'predicate'.
*
* @param spark a Spark session
* @param tableIdent a table identifier
* @param predicateExpr a predicate expression on partition columns
* @return matching table's partitions
*/
public static List<SparkPartition> getPartitionsByFilter(SparkSession spark, TableIdentifier tableIdent,
Expression predicateExpr) {
try {
SessionCatalog catalog = spark.sessionState().catalog();
CatalogTable catalogTable = catalog.getTableMetadata(tableIdent);
Expression resolvedPredicateExpr;
if (!predicateExpr.resolved()) {
resolvedPredicateExpr = resolveAttrs(spark, tableIdent.quotedString(), predicateExpr);
} else {
resolvedPredicateExpr = predicateExpr;
}
Seq<Expression> predicates = JavaConverters
.collectionAsScalaIterableConverter(ImmutableList.of(resolvedPredicateExpr))
.asScala().toSeq();
Seq<CatalogTablePartition> partitions = catalog.listPartitionsByFilter(tableIdent, predicates);
return JavaConverters
.seqAsJavaListConverter(partitions)
.asJava()
.stream()
.map(catalogPartition -> toSparkPartition(catalogPartition, catalogTable))
.collect(Collectors.toList());
} catch (NoSuchDatabaseException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unknown table: %s. Database not found in catalog.", tableIdent);
} catch (NoSuchTableException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unknown table: %s. Table not found in catalog.", tableIdent);
}
}
/**
* Returns the data files in a partition by listing the partition location.
*
* For Parquet and ORC partitions, this will read metrics from the file footer. For Avro partitions,
* metrics are set to null.
* @deprecated use {@link TableMigrationUtil#listPartition(Map, String, String, PartitionSpec, Configuration,
* MetricsConfig, NameMapping)}
*
* @param partition a partition
* @param conf a serializable Hadoop conf
* @param metricsConfig a metrics conf
* @return a List of DataFile
*/
@Deprecated
public static List<DataFile> listPartition(SparkPartition partition, PartitionSpec spec,
SerializableConfiguration conf, MetricsConfig metricsConfig) {
return listPartition(partition, spec, conf, metricsConfig, null);
}
/**
* Returns the data files in a partition by listing the partition location.
*
* For Parquet and ORC partitions, this will read metrics from the file footer. For Avro partitions,
* metrics are set to null.
* @deprecated use {@link TableMigrationUtil#listPartition(Map, String, String, PartitionSpec, Configuration,
* MetricsConfig, NameMapping)}
*
* @param partition a partition
* @param conf a serializable Hadoop conf
* @param metricsConfig a metrics conf
* @param mapping a name mapping
* @return a List of DataFile
*/
@Deprecated
public static List<DataFile> listPartition(SparkPartition partition, PartitionSpec spec,
SerializableConfiguration conf, MetricsConfig metricsConfig,
NameMapping mapping) {
return TableMigrationUtil.listPartition(partition.values, partition.uri, partition.format, spec, conf.get(),
metricsConfig, mapping);
}
private static SparkPartition toSparkPartition(CatalogTablePartition partition, CatalogTable table) {
Option<URI> locationUri = partition.storage().locationUri();
Option<String> serde = partition.storage().serde();
Preconditions.checkArgument(locationUri.nonEmpty(), "Partition URI should be defined");
Preconditions.checkArgument(serde.nonEmpty() || table.provider().nonEmpty(),
"Partition format should be defined");
String uri = Util.uriToString(locationUri.get());
String format = serde.nonEmpty() ? serde.get() : table.provider().get();
Map<String, String> partitionSpec = JavaConverters.mapAsJavaMapConverter(partition.spec()).asJava();
return new SparkPartition(partitionSpec, uri, format);
}
private static Expression resolveAttrs(SparkSession spark, String table, Expression expr) {
Function2<String, String, Object> resolver = spark.sessionState().analyzer().resolver();
LogicalPlan plan = spark.table(table).queryExecution().analyzed();
return expr.transform(new AbstractPartialFunction<Expression, Expression>() {
@Override
public Expression apply(Expression attr) {
UnresolvedAttribute unresolvedAttribute = (UnresolvedAttribute) attr;
Option<NamedExpression> namedExpressionOption = plan.resolve(unresolvedAttribute.nameParts(), resolver);
if (namedExpressionOption.isDefined()) {
return (Expression) namedExpressionOption.get();
} else {
throw new IllegalArgumentException(
String.format("Could not resolve %s using columns: %s", attr, plan.output()));
}
}
@Override
public boolean isDefinedAt(Expression attr) {
return attr instanceof UnresolvedAttribute;
}
});
}
private static Iterator<ManifestFile> buildManifest(SerializableConfiguration conf, PartitionSpec spec,
String basePath, Iterator<Tuple2<String, DataFile>> fileTuples) {
if (fileTuples.hasNext()) {
FileIO io = new HadoopFileIO(conf.get());
TaskContext ctx = TaskContext.get();
String suffix = String.format("stage-%d-task-%d-manifest", ctx.stageId(), ctx.taskAttemptId());
Path location = new Path(basePath, suffix);
String outputPath = FileFormat.AVRO.addExtension(location.toString());
OutputFile outputFile = io.newOutputFile(outputPath);
ManifestWriter<DataFile> writer = ManifestFiles.write(spec, outputFile);
try (ManifestWriter<DataFile> writerRef = writer) {
fileTuples.forEachRemaining(fileTuple -> writerRef.add(fileTuple._2));
} catch (IOException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unable to close the manifest writer: %s", outputPath);
}
ManifestFile manifestFile = writer.toManifestFile();
return ImmutableList.of(manifestFile).iterator();
} else {
return Collections.emptyIterator();
}
}
/**
* Import files from an existing Spark table to an Iceberg table.
*
* The import uses the Spark session to get table metadata. It assumes no
* operation is going on the original and target table and thus is not
* thread-safe.
*
* @param spark a Spark session
* @param sourceTableIdent an identifier of the source Spark table
* @param targetTable an Iceberg table where to import the data
* @param stagingDir a staging directory to store temporary manifest files
* @param partitionFilter only import partitions whose values match those in the map, can be partially defined
*/
public static void importSparkTable(SparkSession spark, TableIdentifier sourceTableIdent, Table targetTable,
String stagingDir, Map<String, String> partitionFilter) {
SessionCatalog catalog = spark.sessionState().catalog();
String db = sourceTableIdent.database().nonEmpty() ?
sourceTableIdent.database().get() :
catalog.getCurrentDatabase();
TableIdentifier sourceTableIdentWithDB = new TableIdentifier(sourceTableIdent.table(), Some.apply(db));
if (!catalog.tableExists(sourceTableIdentWithDB)) {
throw new org.apache.iceberg.exceptions.NoSuchTableException("Table %s does not exist", sourceTableIdentWithDB);
}
try {
PartitionSpec spec = SparkSchemaUtil.specForTable(spark, sourceTableIdentWithDB.unquotedString());
if (spec == PartitionSpec.unpartitioned()) {
importUnpartitionedSparkTable(spark, sourceTableIdentWithDB, targetTable);
} else {
List<SparkPartition> sourceTablePartitions = getPartitions(spark, sourceTableIdent);
Preconditions.checkArgument(!sourceTablePartitions.isEmpty(),
"Cannot find any partitions in table %s", sourceTableIdent);
List<SparkPartition> filteredPartitions = filterPartitions(sourceTablePartitions, partitionFilter);
Preconditions.checkArgument(!filteredPartitions.isEmpty(),
"Cannot find any partitions which match the given filter. Partition filter is %s",
MAP_JOINER.join(partitionFilter));
importSparkPartitions(spark, filteredPartitions, targetTable, spec, stagingDir);
}
} catch (AnalysisException e) {
throw SparkExceptionUtil.toUncheckedException(
e, "Unable to get partition spec for table: %s", sourceTableIdentWithDB);
}
}
/**
* Import files from an existing Spark table to an Iceberg table.
*
* The import uses the Spark session to get table metadata. It assumes no
* operation is going on the original and target table and thus is not
* thread-safe.
*
* @param spark a Spark session
* @param sourceTableIdent an identifier of the source Spark table
* @param targetTable an Iceberg table where to import the data
* @param stagingDir a staging directory to store temporary manifest files
*/
public static void importSparkTable(SparkSession spark, TableIdentifier sourceTableIdent, Table targetTable,
String stagingDir) {
importSparkTable(spark, sourceTableIdent, targetTable, stagingDir, Collections.emptyMap());
}
private static void importUnpartitionedSparkTable(SparkSession spark, TableIdentifier sourceTableIdent,
Table targetTable) {
try {
CatalogTable sourceTable = spark.sessionState().catalog().getTableMetadata(sourceTableIdent);
Option<String> format =
sourceTable.storage().serde().nonEmpty() ? sourceTable.storage().serde() : sourceTable.provider();
Preconditions.checkArgument(format.nonEmpty(), "Could not determine table format");
Map<String, String> partition = Collections.emptyMap();
PartitionSpec spec = PartitionSpec.unpartitioned();
Configuration conf = spark.sessionState().newHadoopConf();
MetricsConfig metricsConfig = MetricsConfig.fromProperties(targetTable.properties());
String nameMappingString = targetTable.properties().get(TableProperties.DEFAULT_NAME_MAPPING);
NameMapping nameMapping = nameMappingString != null ? NameMappingParser.fromJson(nameMappingString) : null;
List<DataFile> files = TableMigrationUtil.listPartition(
partition, Util.uriToString(sourceTable.location()), format.get(), spec, conf, metricsConfig, nameMapping);
AppendFiles append = targetTable.newAppend();
files.forEach(append::appendFile);
append.commit();
} catch (NoSuchDatabaseException e) {
throw SparkExceptionUtil.toUncheckedException(
e, "Unknown table: %s. Database not found in catalog.", sourceTableIdent);
} catch (NoSuchTableException e) {
throw SparkExceptionUtil.toUncheckedException(
e, "Unknown table: %s. Table not found in catalog.", sourceTableIdent);
}
}
/**
* Import files from given partitions to an Iceberg table.
*
* @param spark a Spark session
* @param partitions partitions to import
* @param targetTable an Iceberg table where to import the data
* @param spec a partition spec
* @param stagingDir a staging directory to store temporary manifest files
*/
public static void importSparkPartitions(SparkSession spark, List<SparkPartition> partitions, Table targetTable,
PartitionSpec spec, String stagingDir) {
Configuration conf = spark.sessionState().newHadoopConf();
SerializableConfiguration serializableConf = new SerializableConfiguration(conf);
int parallelism = Math.min(partitions.size(), spark.sessionState().conf().parallelPartitionDiscoveryParallelism());
int numShufflePartitions = spark.sessionState().conf().numShufflePartitions();
MetricsConfig metricsConfig = MetricsConfig.fromProperties(targetTable.properties());
String nameMappingString = targetTable.properties().get(TableProperties.DEFAULT_NAME_MAPPING);
NameMapping nameMapping = nameMappingString != null ? NameMappingParser.fromJson(nameMappingString) : null;
JavaSparkContext sparkContext = JavaSparkContext.fromSparkContext(spark.sparkContext());
JavaRDD<SparkPartition> partitionRDD = sparkContext.parallelize(partitions, parallelism);
Dataset<SparkPartition> partitionDS = spark.createDataset(
partitionRDD.rdd(),
Encoders.javaSerialization(SparkPartition.class));
List<ManifestFile> manifests = partitionDS
.flatMap((FlatMapFunction<SparkPartition, DataFile>) sparkPartition ->
listPartition(sparkPartition, spec, serializableConf, metricsConfig, nameMapping).iterator(),
Encoders.javaSerialization(DataFile.class))
.repartition(numShufflePartitions)
.map((MapFunction<DataFile, Tuple2<String, DataFile>>) file ->
Tuple2.apply(file.path().toString(), file),
Encoders.tuple(Encoders.STRING(), Encoders.javaSerialization(DataFile.class)))
.orderBy(col("_1"))
.mapPartitions(
(MapPartitionsFunction<Tuple2<String, DataFile>, ManifestFile>) fileTuple ->
buildManifest(serializableConf, spec, stagingDir, fileTuple),
Encoders.javaSerialization(ManifestFile.class))
.collectAsList();
try {
boolean snapshotIdInheritanceEnabled = PropertyUtil.propertyAsBoolean(
targetTable.properties(),
TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED,
TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT);
AppendFiles append = targetTable.newAppend();
manifests.forEach(append::appendManifest);
append.commit();
if (!snapshotIdInheritanceEnabled) {
// delete original manifests as they were rewritten before the commit
deleteManifests(targetTable.io(), manifests);
}
} catch (Throwable e) {
deleteManifests(targetTable.io(), manifests);
throw e;
}
}
public static List<SparkPartition> filterPartitions(List<SparkPartition> partitions,
Map<String, String> partitionFilter) {
if (partitionFilter.isEmpty()) {
return partitions;
} else {
return partitions.stream()
.filter(p -> p.getValues().entrySet().containsAll(partitionFilter.entrySet()))
.collect(Collectors.toList());
}
}
private static void deleteManifests(FileIO io, List<ManifestFile> manifests) {
Tasks.foreach(manifests)
.noRetry()
.suppressFailureWhenFinished()
.run(item -> io.deleteFile(item.path()));
}
/**
* Class representing a table partition.
*/
public static class SparkPartition implements Serializable {
private final Map<String, String> values;
private final String uri;
private final String format;
public SparkPartition(Map<String, String> values, String uri, String format) {
this.values = ImmutableMap.copyOf(values);
this.uri = uri;
this.format = format;
}
public Map<String, String> getValues() {
return values;
}
public String getUri() {
return uri;
}
public String getFormat() {
return format;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("values", values)
.add("uri", uri)
.add("format", format)
.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
SparkPartition that = (SparkPartition) o;
return Objects.equal(values, that.values) &&
Objects.equal(uri, that.uri) &&
Objects.equal(format, that.format);
}
@Override
public int hashCode() {
return Objects.hashCode(values, uri, format);
}
}
}
| 1 | 38,577 | checking for ref. equality is probably fine here, but it takes a reader longer to navigate the code and figure out whether ref equality is really wanted here vs just using `equals()` | apache-iceberg | java |
@@ -60,12 +60,14 @@ type (
matchingClient matchingservice.MatchingServiceClient
config *configs.Config
searchAttributesProvider searchattribute.Provider
+ workflowDeleteManager workflow.DeleteManager
}
)
func newTransferQueueTaskExecutorBase(
shard shard.Context,
historyEngine *historyEngineImpl,
+ workflowDeleteManager workflow.DeleteManager,
logger log.Logger,
metricsClient metrics.Client,
config *configs.Config, | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package history
import (
"context"
"time"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
taskqueuepb "go.temporal.io/api/taskqueue/v1"
"go.temporal.io/server/api/matchingservice/v1"
m "go.temporal.io/server/api/matchingservice/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/namespace"
"go.temporal.io/server/common/searchattribute"
"go.temporal.io/server/service/history/configs"
"go.temporal.io/server/service/history/shard"
"go.temporal.io/server/service/history/tasks"
"go.temporal.io/server/service/history/workflow"
"go.temporal.io/server/service/worker/archiver"
)
const (
transferActiveTaskDefaultTimeout = 20 * time.Second
)
type (
transferQueueTaskExecutorBase struct {
shard shard.Context
historyService *historyEngineImpl
cache workflow.Cache
logger log.Logger
metricsClient metrics.Client
matchingClient matchingservice.MatchingServiceClient
config *configs.Config
searchAttributesProvider searchattribute.Provider
}
)
func newTransferQueueTaskExecutorBase(
shard shard.Context,
historyEngine *historyEngineImpl,
logger log.Logger,
metricsClient metrics.Client,
config *configs.Config,
matchingClient matchingservice.MatchingServiceClient,
) *transferQueueTaskExecutorBase {
return &transferQueueTaskExecutorBase{
shard: shard,
historyService: historyEngine,
cache: historyEngine.historyCache,
logger: logger,
metricsClient: metricsClient,
matchingClient: matchingClient,
config: config,
searchAttributesProvider: shard.GetSearchAttributesProvider(),
}
}
func (t *transferQueueTaskExecutorBase) getNamespaceIDAndWorkflowExecution(
task tasks.Task,
) (namespace.ID, commonpb.WorkflowExecution) {
return namespace.ID(task.GetNamespaceID()), commonpb.WorkflowExecution{
WorkflowId: task.GetWorkflowID(),
RunId: task.GetRunID(),
}
}
func (t *transferQueueTaskExecutorBase) pushActivity(
task *tasks.ActivityTask,
activityScheduleToStartTimeout *time.Duration,
) error {
ctx, cancel := context.WithTimeout(context.Background(), transferActiveTaskDefaultTimeout)
defer cancel()
_, err := t.matchingClient.AddActivityTask(ctx, &m.AddActivityTaskRequest{
NamespaceId: task.TargetNamespaceID,
SourceNamespaceId: task.NamespaceID,
Execution: &commonpb.WorkflowExecution{
WorkflowId: task.WorkflowID,
RunId: task.RunID,
},
TaskQueue: &taskqueuepb.TaskQueue{
Name: task.TaskQueue,
Kind: enumspb.TASK_QUEUE_KIND_NORMAL,
},
ScheduleId: task.ScheduleID,
ScheduleToStartTimeout: activityScheduleToStartTimeout,
})
return err
}
func (t *transferQueueTaskExecutorBase) pushWorkflowTask(
task *tasks.WorkflowTask,
taskqueue *taskqueuepb.TaskQueue,
workflowTaskScheduleToStartTimeout *time.Duration,
) error {
ctx, cancel := context.WithTimeout(context.Background(), transferActiveTaskDefaultTimeout)
defer cancel()
_, err := t.matchingClient.AddWorkflowTask(ctx, &m.AddWorkflowTaskRequest{
NamespaceId: task.NamespaceID,
Execution: &commonpb.WorkflowExecution{
WorkflowId: task.WorkflowID,
RunId: task.RunID,
},
TaskQueue: taskqueue,
ScheduleId: task.ScheduleID,
ScheduleToStartTimeout: workflowTaskScheduleToStartTimeout,
})
return err
}
func (t *transferQueueTaskExecutorBase) recordWorkflowClosed(
namespaceID namespace.ID,
workflowID string,
runID string,
workflowTypeName string,
startTime time.Time,
executionTime time.Time,
endTime time.Time,
status enumspb.WorkflowExecutionStatus,
historyLength int64,
visibilityMemo *commonpb.Memo,
searchAttributes *commonpb.SearchAttributes,
) error {
namespaceEntry, err := t.shard.GetNamespaceRegistry().GetNamespaceByID(namespaceID)
if err != nil {
return err
}
clusterConfiguredForVisibilityArchival := t.shard.GetArchivalMetadata().GetVisibilityConfig().ClusterConfiguredForArchival()
namespaceConfiguredForVisibilityArchival := namespaceEntry.VisibilityArchivalState().State == enumspb.ARCHIVAL_STATE_ENABLED
archiveVisibility := clusterConfiguredForVisibilityArchival && namespaceConfiguredForVisibilityArchival
if !archiveVisibility {
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), t.config.TransferProcessorVisibilityArchivalTimeLimit())
defer cancel()
saTypeMap, err := t.searchAttributesProvider.GetSearchAttributes(t.config.DefaultVisibilityIndexName, false)
if err != nil {
return err
}
// Setting search attributes types here because archival client needs to stringify them
// and it might not have access to type map (i.e. type needs to be embedded).
searchattribute.ApplyTypeMap(searchAttributes, saTypeMap)
_, err = t.historyService.archivalClient.Archive(ctx, &archiver.ClientRequest{
ArchiveRequest: &archiver.ArchiveRequest{
NamespaceID: namespaceID.String(),
Namespace: namespaceEntry.Name().String(),
WorkflowID: workflowID,
RunID: runID,
WorkflowTypeName: workflowTypeName,
StartTime: startTime,
ExecutionTime: executionTime,
CloseTime: endTime,
Status: status,
HistoryLength: historyLength,
Memo: visibilityMemo,
SearchAttributes: searchAttributes,
VisibilityURI: namespaceEntry.VisibilityArchivalState().URI,
HistoryURI: namespaceEntry.HistoryArchivalState().URI,
Targets: []archiver.ArchivalTarget{archiver.ArchiveTargetVisibility},
},
CallerService: common.HistoryServiceName,
AttemptArchiveInline: true, // archive visibility inline by default
})
return err
}
| 1 | 13,825 | looks like it's not used? Do we plan to use it in the future? | temporalio-temporal | go |
@@ -34,8 +34,8 @@ import (
)
type (
- // Config supports common options that apply to statsd exporters.
- Config struct {
+ // Options supports common options that apply to statsd exporters.
+ Options struct {
// URL describes the destination for exporting statsd data.
// e.g., udp://host:port
// tcp://host:port | 1 | // Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statsd
// See https://github.com/b/statsd_spec for the best-available statsd
// syntax specification. See also
// https://github.com/statsd/statsd/edit/master/docs/metric_types.md
import (
"bytes"
"context"
"fmt"
"io"
"net"
"net/url"
"strconv"
"go.opentelemetry.io/otel/api/core"
"go.opentelemetry.io/otel/api/unit"
export "go.opentelemetry.io/otel/sdk/export/metric"
"go.opentelemetry.io/otel/sdk/export/metric/aggregator"
)
type (
// Config supports common options that apply to statsd exporters.
Config struct {
// URL describes the destination for exporting statsd data.
// e.g., udp://host:port
// tcp://host:port
// unix:///socket/path
URL string
// Writer is an alternate to providing a URL. When Writer is
// non-nil, URL will be ignored and the exporter will write to
// the configured Writer interface.
Writer io.Writer
// MaxPacketSize this limits the packet size for packet-oriented transports.
MaxPacketSize int
// TODO support Dial and Write timeouts
}
// Exporter is common type meant to implement concrete statsd
// exporters.
Exporter struct {
adapter Adapter
config Config
conn net.Conn
writer io.Writer
buffer bytes.Buffer
}
// Adapter supports statsd syntax variations, primarily plain
// statsd vs. dogstatsd.
Adapter interface {
AppendName(export.Record, *bytes.Buffer)
AppendTags(export.Record, *bytes.Buffer)
}
)
const (
formatCounter = "c"
formatHistogram = "h"
formatGauge = "g"
formatTiming = "ms"
MaxPacketSize = 1 << 16
)
var (
_ export.Exporter = &Exporter{}
ErrInvalidScheme = fmt.Errorf("invalid statsd transport")
)
// NewExport returns a common implementation for exporters that Export
// statsd syntax.
func NewExporter(config Config, adapter Adapter) (*Exporter, error) {
if config.MaxPacketSize <= 0 {
config.MaxPacketSize = MaxPacketSize
}
var writer io.Writer
var conn net.Conn
var err error
if config.Writer != nil {
writer = config.Writer
} else {
conn, err = dial(config.URL)
if conn != nil {
writer = conn
}
}
// TODO: If err != nil, we return it _with_ a valid exporter; the
// exporter should attempt to re-dial if it's retryable. Add a
// Start() and Stop() API.
return &Exporter{
adapter: adapter,
config: config,
conn: conn,
writer: writer,
}, err
}
// dial connects to a statsd service using several common network
// types. Presently "udp" and "unix" datagram socket connections are
// supported.
func dial(endpoint string) (net.Conn, error) {
dest, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
// TODO: Support tcp destination, need configurable timeouts first.
scheme := dest.Scheme
switch scheme {
case "udp", "udp4", "udp6":
udpAddr, err := net.ResolveUDPAddr(scheme, dest.Host)
locAddr := &net.UDPAddr{}
if err != nil {
return nil, err
}
conn, err := net.DialUDP(scheme, locAddr, udpAddr)
if err != nil {
return nil, err
}
return conn, err
case "unix", "unixgram":
scheme = "unixgram"
locAddr := &net.UnixAddr{}
sockAddr, err := net.ResolveUnixAddr(scheme, dest.Path)
if err != nil {
return nil, err
}
conn, err := net.DialUnix(scheme, locAddr, sockAddr)
if err != nil {
return nil, err
}
return conn, err
}
return nil, ErrInvalidScheme
}
// Export is common code for any statsd-based metric.Exporter implementation.
func (e *Exporter) Export(_ context.Context, checkpointSet export.CheckpointSet) error {
buf := &e.buffer
buf.Reset()
var aggErr error
var sendErr error
checkpointSet.ForEach(func(rec export.Record) {
before := buf.Len()
if err := e.formatMetric(rec, buf); err != nil && aggErr == nil {
aggErr = err
return
}
if buf.Len() < e.config.MaxPacketSize {
return
}
if before == 0 {
// A single metric >= packet size
if err := e.send(buf.Bytes()); err != nil && sendErr == nil {
sendErr = err
}
buf.Reset()
return
}
// Send and copy the leftover
if err := e.send(buf.Bytes()[:before]); err != nil && sendErr == nil {
sendErr = err
}
leftover := buf.Len() - before
copy(buf.Bytes()[0:leftover], buf.Bytes()[before:])
buf.Truncate(leftover)
})
if err := e.send(buf.Bytes()); err != nil && sendErr == nil {
sendErr = err
}
if sendErr != nil {
return sendErr
}
return aggErr
}
// send writes a complete buffer to the writer as a blocking call.
func (e *Exporter) send(buf []byte) error {
for len(buf) != 0 {
n, err := e.writer.Write(buf)
if err != nil {
return err
}
buf = buf[n:]
}
return nil
}
// formatMetric formats an individual export record. For some records
// this will emit a single statistic, for some it will emit more than
// one.
func (e *Exporter) formatMetric(rec export.Record, buf *bytes.Buffer) error {
desc := rec.Descriptor()
agg := rec.Aggregator()
// TODO handle non-Points Distribution/MaxSumCount by
// formatting individual quantiles, the sum, and the count as
// single statistics. For the dogstatsd variation, assuming
// open-source systems like Veneur add support, figure out the
// proper encoding for "d"-type distribution data.
if pts, ok := agg.(aggregator.Points); ok {
var format string
if desc.Unit() == unit.Milliseconds {
format = formatTiming
} else {
format = formatHistogram
}
points, err := pts.Points()
if err != nil {
return err
}
for _, pt := range points {
e.formatSingleStat(rec, pt, format, buf)
}
} else if sum, ok := agg.(aggregator.Sum); ok {
sum, err := sum.Sum()
if err != nil {
return err
}
e.formatSingleStat(rec, sum, formatCounter, buf)
} else if lv, ok := agg.(aggregator.LastValue); ok {
lv, _, err := lv.LastValue()
if err != nil {
return err
}
e.formatSingleStat(rec, lv, formatGauge, buf)
}
return nil
}
// formatSingleStat encodes a single item of statsd data followed by a
// newline.
func (e *Exporter) formatSingleStat(rec export.Record, val core.Number, fmtStr string, buf *bytes.Buffer) {
e.adapter.AppendName(rec, buf)
_, _ = buf.WriteRune(':')
writeNumber(buf, val, rec.Descriptor().NumberKind())
_, _ = buf.WriteRune('|')
_, _ = buf.WriteString(fmtStr)
e.adapter.AppendTags(rec, buf)
_, _ = buf.WriteRune('\n')
}
func writeNumber(buf *bytes.Buffer, num core.Number, kind core.NumberKind) {
var tmp [128]byte
var conv []byte
switch kind {
case core.Int64NumberKind:
conv = strconv.AppendInt(tmp[:0], num.AsInt64(), 10)
case core.Float64NumberKind:
conv = strconv.AppendFloat(tmp[:0], num.AsFloat64(), 'g', -1, 64)
case core.Uint64NumberKind:
conv = strconv.AppendUint(tmp[:0], num.AsUint64(), 10)
}
_, _ = buf.Write(conv)
}
| 1 | 10,856 | I think this is an unrelated remark. I thought we had moved toward the practice of using "Config" as the structure name, and Option as a functional argument (`func(*Config)`), and Options as a `[]Option`. See api/trace `StartConfig` and `StartOption`, for example. That's why I prefer this struct be called Config. | open-telemetry-opentelemetry-go | go |
@@ -2,7 +2,8 @@ import random
import string
from kinto.core.storage import generators, exceptions
-from pyramid import httpexceptions
+from pyramid.httpexceptions import (HTTPNotFound)
+from kinto.core.errors import http_error, ERRORS
class NameGenerator(generators.Generator): | 1 | import random
import string
from kinto.core.storage import generators, exceptions
from pyramid import httpexceptions
class NameGenerator(generators.Generator):
def __call__(self):
ascii_letters = ('abcdefghijklmopqrstuvwxyz'
'ABCDEFGHIJKLMOPQRSTUVWXYZ')
alphabet = ascii_letters + string.digits + '-_'
letters = [random.choice(ascii_letters + string.digits)]
letters += [random.choice(alphabet) for x in range(7)]
return ''.join(letters)
class RelaxedUUID(generators.UUID4):
"""A generator that generates UUIDs but accepts any string.
"""
regexp = generators.Generator.regexp
def object_exists_or_404(request, collection_id, object_id, parent_id=''):
storage = request.registry.storage
try:
return storage.get(collection_id=collection_id,
parent_id=parent_id,
object_id=object_id)
except exceptions.RecordNotFoundError:
# XXX: We gave up putting details about parent id here (See #53).
raise httpexceptions.HTTPNotFound()
| 1 | 9,979 | nitpick: superfluous parenthesis | Kinto-kinto | py |
@@ -219,6 +219,7 @@ namespace Datadog.Trace.ClrProfiler.Integrations
private static void DecorateSpan(Span span, GraphQLTags tags)
{
span.Type = SpanTypes.GraphQL;
+ span.SetMetric(Tags.Measured, 1);
}
private static Scope CreateScopeFromValidate(object document) | 1 | using System;
using System.Collections.Generic;
using System.Text;
using System.Threading.Tasks;
using Datadog.Trace.ClrProfiler.Emit;
using Datadog.Trace.ClrProfiler.Helpers;
using Datadog.Trace.Logging;
namespace Datadog.Trace.ClrProfiler.Integrations
{
/// <summary>
/// Tracing integration for GraphQL.Server.Transports.AspNetCore
/// </summary>
public static class GraphQLIntegration
{
internal const string IntegrationName = "GraphQL";
private const string ServiceName = "graphql";
private const string Major2 = "2";
private const string Major2Minor3 = "2.3";
private const string ParseOperationName = "graphql.parse"; // Instrumentation not yet implemented
private const string ValidateOperationName = "graphql.validate";
private const string ExecuteOperationName = "graphql.execute";
private const string ResolveOperationName = "graphql.resolve"; // Instrumentation not yet implemented
private const string GraphQLAssemblyName = "GraphQL";
private const string GraphQLDocumentValidatorInterfaceName = "GraphQL.Validation.IDocumentValidator";
private const string GraphQLExecutionResultName = "GraphQL.ExecutionResult";
private const string GraphQLExecutionStrategyInterfaceName = "GraphQL.Execution.IExecutionStrategy";
private const string GraphQLValidationResultInterfaceName = "GraphQL.Validation.IValidationResult";
private const string TaskOfGraphQLExecutionResult = "System.Threading.Tasks.Task`1<" + GraphQLExecutionResultName + ">";
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.GetLogger(typeof(GraphQLIntegration));
/// <summary>
/// Wrap the original method by adding instrumentation code around it.
/// </summary>
/// <param name="documentValidator">The instance of GraphQL.Validation.IDocumentValidator.</param>
/// <param name="originalQuery">The source of the original GraphQL query.</param>
/// <param name="schema">The GraphQL schema.</param>
/// <param name="document">The GraphQL document.</param>
/// <param name="rules">The list of validation rules.</param>
/// <param name="userContext">The user context.</param>
/// <param name="inputs">The input variables.</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
/// <returns>The original method's return value.</returns>
[InterceptMethod(
TargetAssembly = GraphQLAssemblyName,
TargetType = GraphQLDocumentValidatorInterfaceName,
TargetSignatureTypes = new[] { GraphQLValidationResultInterfaceName, ClrNames.String, "GraphQL.Types.ISchema", "GraphQL.Language.AST.Document", "System.Collections.Generic.IEnumerable`1<GraphQL.Validation.IValidationRule>", ClrNames.Ignore, "GraphQL.Inputs" },
TargetMinimumVersion = Major2Minor3,
TargetMaximumVersion = Major2)]
public static object Validate(
object documentValidator,
object originalQuery,
object schema,
object document,
object rules,
object userContext,
object inputs,
int opCode,
int mdToken,
long moduleVersionPtr)
{
if (documentValidator == null) { throw new ArgumentNullException(nameof(documentValidator)); }
const string methodName = nameof(Validate);
// At runtime, get a Type object for GraphQL.ExecutionResult
var documentValidatorInstanceType = documentValidator.GetType();
Func<object, object, object, object, object, object, object, object> instrumentedMethod;
try
{
instrumentedMethod =
MethodBuilder<Func<object, object, object, object, object, object, object, object>>
.Start(moduleVersionPtr, mdToken, opCode, methodName)
.WithConcreteType(documentValidatorInstanceType)
.WithParameters(originalQuery, schema, document, rules, userContext, inputs)
.WithNamespaceAndNameFilters(
GraphQLValidationResultInterfaceName,
ClrNames.String,
"GraphQL.Types.ISchema",
"GraphQL.Language.AST.Document",
"System.Collections.Generic.IEnumerable`1",
ClrNames.Ignore,
"GraphQL.Inputs")
.Build();
}
catch (Exception ex)
{
Log.ErrorRetrievingMethod(
exception: ex,
moduleVersionPointer: moduleVersionPtr,
mdToken: mdToken,
opCode: opCode,
instrumentedType: GraphQLDocumentValidatorInterfaceName,
methodName: methodName,
instanceType: documentValidator.GetType().AssemblyQualifiedName);
throw;
}
using (var scope = CreateScopeFromValidate(document))
{
try
{
var validationResult = instrumentedMethod(documentValidator, originalQuery, schema, document, rules, userContext, inputs);
RecordExecutionErrorsIfPresent(scope.Span, "GraphQL.Validation.ValidationError", validationResult.GetProperty("Errors").GetValueOrDefault());
return validationResult;
}
catch (Exception ex)
{
scope?.Span.SetException(ex);
throw;
}
}
}
/// <summary>
/// Wrap the original method by adding instrumentation code around it.
/// </summary>
/// <param name="executionStrategy">The instance of GraphQL.Execution.IExecutionStrategy.</param>
/// <param name="context">The execution context of the GraphQL operation.</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
/// <returns>The original method's return value.</returns>
[InterceptMethod(
TargetAssembly = GraphQLAssemblyName,
TargetType = GraphQLExecutionStrategyInterfaceName,
TargetSignatureTypes = new[] { TaskOfGraphQLExecutionResult, "GraphQL.Execution.ExecutionContext" },
TargetMinimumVersion = Major2Minor3,
TargetMaximumVersion = Major2)]
public static object ExecuteAsync(object executionStrategy, object context, int opCode, int mdToken, long moduleVersionPtr)
{
if (executionStrategy == null) { throw new ArgumentNullException(nameof(executionStrategy)); }
const string methodName = nameof(ExecuteAsync);
// At runtime, get a Type object for GraphQL.ExecutionResult
var executionStrategyInstanceType = executionStrategy.GetType();
Type graphQLExecutionResultType;
Type executionStrategyInterfaceType;
try
{
executionStrategyInterfaceType = executionStrategy.GetInstrumentedInterface(GraphQLExecutionStrategyInterfaceName);
graphQLExecutionResultType = executionStrategyInterfaceType.Assembly.GetType(GraphQLExecutionResultName, throwOnError: true);
}
catch (Exception ex)
{
// This shouldn't happen because the GraphQL assembly should have been loaded to construct various other types
// profiled app will not continue working as expected without this method
Log.Error(ex, "Error finding types in the GraphQL assembly.");
throw;
}
Func<object, object, object> instrumentedMethod;
try
{
instrumentedMethod =
MethodBuilder<Func<object, object, object>>
.Start(moduleVersionPtr, mdToken, opCode, methodName)
.WithConcreteType(executionStrategyInstanceType)
.WithParameters(context)
.WithNamespaceAndNameFilters(ClrNames.GenericTask, "GraphQL.Execution.ExecutionContext")
.Build();
}
catch (Exception ex)
{
Log.ErrorRetrievingMethod(
exception: ex,
moduleVersionPointer: moduleVersionPtr,
mdToken: mdToken,
opCode: opCode,
instrumentedType: GraphQLExecutionStrategyInterfaceName,
methodName: methodName,
instanceType: executionStrategy.GetType().AssemblyQualifiedName);
throw;
}
return AsyncHelper.InvokeGenericTaskDelegate(
owningType: executionStrategyInterfaceType,
taskResultType: graphQLExecutionResultType,
nameOfIntegrationMethod: nameof(CallGraphQLExecuteAsyncInternal),
integrationType: typeof(GraphQLIntegration),
executionStrategy,
context,
instrumentedMethod);
}
private static async Task<T> CallGraphQLExecuteAsyncInternal<T>(
object executionStrategy,
object executionContext,
Func<object, object, object> originalMethod)
{
using (var scope = CreateScopeFromExecuteAsync(executionContext))
{
try
{
var task = (Task<T>)originalMethod(executionStrategy, executionContext);
var executionResult = await task.ConfigureAwait(false);
RecordExecutionErrorsIfPresent(scope.Span, "GraphQL.ExecutionError", executionContext.GetProperty("Errors").GetValueOrDefault());
return executionResult;
}
catch (Exception ex)
{
scope?.Span.SetException(ex);
throw;
}
}
}
private static void DecorateSpan(Span span, GraphQLTags tags)
{
span.Type = SpanTypes.GraphQL;
}
private static Scope CreateScopeFromValidate(object document)
{
if (!Tracer.Instance.Settings.IsIntegrationEnabled(IntegrationName))
{
// integration disabled, don't create a scope, skip this trace
return null;
}
Tracer tracer = Tracer.Instance;
string source = document.GetProperty<string>("OriginalQuery")
.GetValueOrDefault();
string serviceName = $"{tracer.DefaultServiceName}-{ServiceName}";
Scope scope = null;
try
{
var tags = new GraphQLTags();
scope = tracer.StartActiveWithTags(ValidateOperationName, serviceName: serviceName, tags: tags);
var span = scope.Span;
DecorateSpan(span, tags);
tags.Source = source;
tags.SetAnalyticsSampleRate(IntegrationName, tracer.Settings, enabledWithGlobalSetting: false);
}
catch (Exception ex)
{
Log.Error(ex, "Error creating or populating scope.");
}
return scope;
}
private static Scope CreateScopeFromExecuteAsync(object executionContext)
{
if (!Tracer.Instance.Settings.IsIntegrationEnabled(IntegrationName))
{
// integration disabled, don't create a scope, skip this trace
return null;
}
Tracer tracer = Tracer.Instance;
string source = executionContext.GetProperty("Document")
.GetProperty<string>("OriginalQuery")
.GetValueOrDefault();
string operationName = executionContext.GetProperty("Operation")
.GetProperty<string>("Name")
.GetValueOrDefault();
string operationType = executionContext.GetProperty("Operation")
.GetProperty<Enum>("OperationType")
.GetValueOrDefault()
.ToString();
string serviceName = $"{tracer.DefaultServiceName}-{ServiceName}";
Scope scope = null;
try
{
var tags = new GraphQLTags();
scope = tracer.StartActiveWithTags(ExecuteOperationName, serviceName: serviceName, tags: tags);
var span = scope.Span;
DecorateSpan(span, tags);
span.ResourceName = $"{operationType} {operationName ?? "operation"}";
tags.Source = source;
tags.OperationName = operationName;
tags.OperationType = operationType;
tags.SetAnalyticsSampleRate(IntegrationName, tracer.Settings, enabledWithGlobalSetting: false);
}
catch (Exception ex)
{
Log.Error(ex, "Error creating or populating scope.");
}
return scope;
}
private static void RecordExecutionErrorsIfPresent(Span span, string errorType, object executionErrors)
{
var errorCount = executionErrors.GetProperty<int>("Count").GetValueOrDefault();
if (errorCount > 0)
{
span.Error = true;
span.SetTag(Trace.Tags.ErrorMsg, $"{errorCount} error(s)");
span.SetTag(Trace.Tags.ErrorType, errorType);
span.SetTag(Trace.Tags.ErrorStack, ConstructErrorMessage(executionErrors));
}
}
private static string ConstructErrorMessage(object executionErrors)
{
if (executionErrors == null)
{
return string.Empty;
}
var builder = new StringBuilder();
var tab = " ";
builder.AppendLine("errors: [");
var enumerator = executionErrors.CallMethod<IEnumerator<object>>("GetEnumerator").GetValueOrDefault();
if (enumerator != null)
{
try
{
while (enumerator.MoveNext())
{
var executionError = enumerator.GetProperty("Current").GetValueOrDefault();
builder.AppendLine($"{tab}{{");
var message = executionError.GetProperty<string>("Message").GetValueOrDefault();
if (message != null)
{
builder.AppendLine($"{tab + tab}\"message\": \"{message.Replace("\r", "\\r").Replace("\n", "\\n")}\",");
}
var path = executionError.GetProperty<IEnumerable<string>>("Path").GetValueOrDefault();
if (path != null)
{
builder.AppendLine($"{tab + tab}\"path\": \"{string.Join(".", path)}\",");
}
var code = executionError.GetProperty<string>("Code").GetValueOrDefault();
if (code != null)
{
builder.AppendLine($"{tab + tab}\"code\": \"{code}\",");
}
builder.AppendLine($"{tab + tab}\"locations\": [");
var locations = executionError.GetProperty<IEnumerable<object>>("Locations").GetValueOrDefault();
if (locations != null)
{
foreach (var location in locations)
{
var line = location.GetProperty<int>("Line").GetValueOrDefault();
var column = location.GetProperty<int>("Column").GetValueOrDefault();
builder.AppendLine($"{tab + tab + tab}{{");
builder.AppendLine($"{tab + tab + tab + tab}\"line\": {line},");
builder.AppendLine($"{tab + tab + tab + tab}\"column\": {column}");
builder.AppendLine($"{tab + tab + tab}}},");
}
}
builder.AppendLine($"{tab + tab}]");
builder.AppendLine($"{tab}}},");
}
enumerator.Dispose();
}
catch (Exception ex)
{
Log.Error(ex, "Error creating GraphQL error message.");
return "errors: []";
}
}
builder.AppendLine("]");
return builder.ToString();
}
}
}
| 1 | 18,463 | If, for a given integration, the tag/measure is always set, it should be added to the strongly-typed tags (here for instance, in GraphQLTags). This way, the underlying dictionary is allocated only in the rare case where users add custom tags | DataDog-dd-trace-dotnet | .cs |
@@ -243,12 +243,12 @@ func ExecuteContract(
if err := stateDB.CommitContracts(); err != nil {
return nil, nil, errors.Wrap(err, "failed to commit contracts to underlying db")
}
- stateDB.clear()
receipt.AddLogs(stateDB.Logs()...).AddTransactionLogs(depositLog, burnLog)
if receipt.Status == uint64(iotextypes.ReceiptStatus_Success) ||
featureCtx.AddOutOfGasToTransactionLog && receipt.Status == uint64(iotextypes.ReceiptStatus_ErrCodeStoreOutOfGas) {
receipt.AddTransactionLogs(stateDB.TransactionLogs()...)
}
+ stateDB.clear()
if featureCtx.SetRevertMessageToReceipt && receipt.Status == uint64(iotextypes.ReceiptStatus_ErrExecutionReverted) && retval != nil && bytes.Equal(retval[:4], revertSelector) {
// in case of the execution revert error, parse the retVal and add to receipt | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package evm
import (
"bytes"
"context"
"math"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/protobuf/proto"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/tracer"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
)
var (
// TODO: whenever ActionGasLimit is removed from genesis, we need to hard code it to 5M to make it compatible with
// the mainnet.
preAleutianActionGasLimit = genesis.Default.ActionGasLimit
inContractTransfer = hash.BytesToHash256([]byte{byte(iotextypes.TransactionLogType_IN_CONTRACT_TRANSFER)})
// revertSelector is a special function selector for revert reason unpacking.
revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4]
// ErrInconsistentNonce is the error that the nonce is different from executor's nonce
ErrInconsistentNonce = errors.New("Nonce is not identical to executor nonce")
)
// CanTransfer checks whether the from account has enough balance
func CanTransfer(db vm.StateDB, fromHash common.Address, balance *big.Int) bool {
return db.GetBalance(fromHash).Cmp(balance) >= 0
}
// MakeTransfer transfers account
func MakeTransfer(db vm.StateDB, fromHash, toHash common.Address, amount *big.Int) {
db.SubBalance(fromHash, amount)
db.AddBalance(toHash, amount)
db.AddLog(&types.Log{
Topics: []common.Hash{
common.BytesToHash(inContractTransfer[:]),
common.BytesToHash(fromHash[:]),
common.BytesToHash(toHash[:]),
},
Data: amount.Bytes(),
})
}
type (
// Params is the context and parameters
Params struct {
context vm.BlockContext
txCtx vm.TxContext
nonce uint64
executorRawAddress string
amount *big.Int
contract *common.Address
gas uint64
data []byte
}
)
// newParams creates a new context for use in the EVM.
func newParams(
ctx context.Context,
execution *action.Execution,
stateDB *StateDBAdapter,
getBlockHash GetBlockHash,
) (*Params, error) {
actionCtx := protocol.MustGetActionCtx(ctx)
blkCtx := protocol.MustGetBlockCtx(ctx)
featureCtx := protocol.MustGetFeatureCtx(ctx)
executorAddr := common.BytesToAddress(actionCtx.Caller.Bytes())
var contractAddrPointer *common.Address
if execution.Contract() != action.EmptyAddress {
contract, err := address.FromString(execution.Contract())
if err != nil {
return nil, errors.Wrap(err, "failed to convert encoded contract address to address")
}
contractAddr := common.BytesToAddress(contract.Bytes())
contractAddrPointer = &contractAddr
}
gasLimit := execution.GasLimit()
// Reset gas limit to the system wide action gas limit cap if it's greater than it
if blkCtx.BlockHeight > 0 && featureCtx.SystemWideActionGasLimit && gasLimit > preAleutianActionGasLimit {
gasLimit = preAleutianActionGasLimit
}
var getHashFn vm.GetHashFunc
switch {
case featureCtx.CorrectGetHashFn:
getHashFn = func(n uint64) common.Hash {
hash, err := getBlockHash(n)
if err == nil {
return common.BytesToHash(hash[:])
}
return common.Hash{}
}
case featureCtx.FixGetHashFnHeight:
getHashFn = func(n uint64) common.Hash {
hash, err := getBlockHash(stateDB.blockHeight - (n + 1))
if err == nil {
return common.BytesToHash(hash[:])
}
return common.Hash{}
}
default:
getHashFn = func(n uint64) common.Hash {
hash, err := getBlockHash(stateDB.blockHeight - n)
if err != nil {
// initial implementation did wrong, should return common.Hash{} in case of error
return common.BytesToHash(hash[:])
}
return common.Hash{}
}
}
context := vm.BlockContext{
CanTransfer: CanTransfer,
Transfer: MakeTransfer,
GetHash: getHashFn,
Coinbase: common.BytesToAddress(blkCtx.Producer.Bytes()),
BlockNumber: new(big.Int).SetUint64(blkCtx.BlockHeight),
Time: new(big.Int).SetInt64(blkCtx.BlockTimeStamp.Unix()),
Difficulty: new(big.Int).SetUint64(uint64(50)),
GasLimit: gasLimit,
}
return &Params{
context,
vm.TxContext{
Origin: executorAddr,
GasPrice: execution.GasPrice(),
},
execution.Nonce(),
actionCtx.Caller.String(),
execution.Amount(),
contractAddrPointer,
gasLimit,
execution.Data(),
}, nil
}
func securityDeposit(ps *Params, stateDB vm.StateDB, gasLimit uint64) error {
executorNonce := stateDB.GetNonce(ps.txCtx.Origin)
if executorNonce > ps.nonce {
log.S().Errorf("Nonce on %v: %d vs %d", ps.txCtx.Origin, executorNonce, ps.nonce)
// TODO ignore inconsistent nonce problem until the actions are executed sequentially
// return ErrInconsistentNonce
}
if gasLimit < ps.gas {
return action.ErrGasLimit
}
gasConsumed := new(big.Int).Mul(new(big.Int).SetUint64(ps.gas), ps.txCtx.GasPrice)
if stateDB.GetBalance(ps.txCtx.Origin).Cmp(gasConsumed) < 0 {
return action.ErrInsufficientFunds
}
stateDB.SubBalance(ps.txCtx.Origin, gasConsumed)
return nil
}
// ExecuteContract processes a transfer which contains a contract
func ExecuteContract(
ctx context.Context,
sm protocol.StateManager,
execution *action.Execution,
getBlockHash GetBlockHash,
depositGasFunc DepositGas,
) ([]byte, *action.Receipt, error) {
ctx, span := tracer.NewSpan(ctx, "evm.ExecuteContract")
defer span.End()
actionCtx := protocol.MustGetActionCtx(ctx)
blkCtx := protocol.MustGetBlockCtx(ctx)
g := genesis.MustExtractGenesisContext(ctx)
featureCtx := protocol.MustGetFeatureCtx(ctx)
stateDB := prepareStateDB(ctx, sm)
ps, err := newParams(ctx, execution, stateDB, getBlockHash)
if err != nil {
return nil, nil, err
}
retval, depositGas, remainingGas, contractAddress, statusCode, err := executeInEVM(ctx, ps, stateDB, g.Blockchain, blkCtx.GasLimit, blkCtx.BlockHeight)
if err != nil {
return nil, nil, err
}
receipt := &action.Receipt{
GasConsumed: ps.gas - remainingGas,
BlockHeight: blkCtx.BlockHeight,
ActionHash: actionCtx.ActionHash,
ContractAddress: contractAddress,
}
receipt.Status = statusCode
var burnLog *action.TransactionLog
if featureCtx.FixDoubleChargeGas {
// Refund all deposit and, actual gas fee will be subtracted when depositing gas fee to the rewarding protocol
stateDB.AddBalance(ps.txCtx.Origin, big.NewInt(0).Mul(big.NewInt(0).SetUint64(depositGas), ps.txCtx.GasPrice))
} else {
if remainingGas > 0 {
remainingValue := new(big.Int).Mul(new(big.Int).SetUint64(remainingGas), ps.txCtx.GasPrice)
stateDB.AddBalance(ps.txCtx.Origin, remainingValue)
}
if depositGas-remainingGas > 0 {
burnLog = &action.TransactionLog{
Type: iotextypes.TransactionLogType_GAS_FEE,
Sender: actionCtx.Caller.String(),
Recipient: "", // burned
Amount: new(big.Int).Mul(new(big.Int).SetUint64(depositGas-remainingGas), ps.txCtx.GasPrice),
}
}
}
var depositLog *action.TransactionLog
if depositGas-remainingGas > 0 {
gasValue := new(big.Int).Mul(new(big.Int).SetUint64(depositGas-remainingGas), ps.txCtx.GasPrice)
depositLog, err = depositGasFunc(ctx, sm, gasValue)
if err != nil {
return nil, nil, err
}
}
if err := stateDB.CommitContracts(); err != nil {
return nil, nil, errors.Wrap(err, "failed to commit contracts to underlying db")
}
stateDB.clear()
receipt.AddLogs(stateDB.Logs()...).AddTransactionLogs(depositLog, burnLog)
if receipt.Status == uint64(iotextypes.ReceiptStatus_Success) ||
featureCtx.AddOutOfGasToTransactionLog && receipt.Status == uint64(iotextypes.ReceiptStatus_ErrCodeStoreOutOfGas) {
receipt.AddTransactionLogs(stateDB.TransactionLogs()...)
}
if featureCtx.SetRevertMessageToReceipt && receipt.Status == uint64(iotextypes.ReceiptStatus_ErrExecutionReverted) && retval != nil && bytes.Equal(retval[:4], revertSelector) {
// in case of the execution revert error, parse the retVal and add to receipt
data := retval[4:]
msgLength := byteutil.BytesToUint64BigEndian(data[56:64])
revertMsg := string(data[64 : 64+msgLength])
receipt.SetExecutionRevertMsg(revertMsg)
}
log.S().Debugf("Receipt: %+v, %v", receipt, err)
return retval, receipt, nil
}
// ReadContractStorage reads contract's storage
func ReadContractStorage(
ctx context.Context,
sm protocol.StateManager,
contract address.Address,
key []byte,
) ([]byte, error) {
bcCtx := protocol.MustGetBlockchainCtx(ctx)
ctx = protocol.WithFeatureCtx(protocol.WithBlockCtx(protocol.WithActionCtx(ctx,
protocol.ActionCtx{
ActionHash: hash.ZeroHash256,
}),
protocol.BlockCtx{
BlockHeight: bcCtx.Tip.Height + 1,
},
))
stateDB := prepareStateDB(ctx, sm)
res := stateDB.GetState(common.BytesToAddress(contract.Bytes()), common.BytesToHash(key))
return res[:], nil
}
func prepareStateDB(ctx context.Context, sm protocol.StateManager) *StateDBAdapter {
actionCtx := protocol.MustGetActionCtx(ctx)
blkCtx := protocol.MustGetBlockCtx(ctx)
featureCtx := protocol.MustGetFeatureCtx(ctx)
opts := []StateDBAdapterOption{}
if featureCtx.UsePendingNonceOption {
opts = append(opts, SortCachedContractsOption(), UsePendingNonceOption())
}
if featureCtx.NotFixTopicCopyBug {
opts = append(opts, NotFixTopicCopyBugOption())
}
if featureCtx.AsyncContractTrie {
opts = append(opts, AsyncContractTrieOption())
}
if featureCtx.FixSnapshotOrder {
opts = append(opts, FixSnapshotOrderOption())
}
return NewStateDBAdapter(
sm,
blkCtx.BlockHeight,
actionCtx.ActionHash,
opts...,
)
}
func getChainConfig(g genesis.Blockchain, height uint64) *params.ChainConfig {
var chainConfig params.ChainConfig
chainConfig.ConstantinopleBlock = new(big.Int).SetUint64(0) // Constantinople switch block (nil = no fork, 0 = already activated)
chainConfig.BeringBlock = new(big.Int).SetUint64(g.BeringBlockHeight)
// enable earlier Ethereum forks at Greenland
chainConfig.GreenlandBlock = new(big.Int).SetUint64(g.GreenlandBlockHeight)
// support chainid and enable Istanbul + MuirGlacier at Iceland
chainConfig.IstanbulBlock = new(big.Int).SetUint64(g.IcelandBlockHeight)
chainConfig.MuirGlacierBlock = new(big.Int).SetUint64(g.IcelandBlockHeight)
if g.IsIceland(height) {
chainConfig.ChainID = new(big.Int).SetUint64(uint64(config.EVMNetworkID()))
}
return &chainConfig
}
//Error in executeInEVM is a consensus issue
func executeInEVM(ctx context.Context, evmParams *Params, stateDB *StateDBAdapter, g genesis.Blockchain, gasLimit uint64, blockHeight uint64) ([]byte, uint64, uint64, string, uint64, error) {
remainingGas := evmParams.gas
if err := securityDeposit(evmParams, stateDB, gasLimit); err != nil {
log.L().Warn("unexpected error: not enough security deposit", zap.Error(err))
return nil, 0, 0, action.EmptyAddress, uint64(iotextypes.ReceiptStatus_Failure), err
}
var config vm.Config
if vmCfg, ok := protocol.GetVMConfigCtx(ctx); ok {
config = vmCfg
}
chainConfig := getChainConfig(g, blockHeight)
evm := vm.NewEVM(evmParams.context, evmParams.txCtx, stateDB, chainConfig, config)
intriGas, err := intrinsicGas(evmParams.data)
if err != nil {
return nil, evmParams.gas, remainingGas, action.EmptyAddress, uint64(iotextypes.ReceiptStatus_Failure), err
}
if remainingGas < intriGas {
return nil, evmParams.gas, remainingGas, action.EmptyAddress, uint64(iotextypes.ReceiptStatus_Failure), action.ErrInsufficientFunds
}
remainingGas -= intriGas
contractRawAddress := action.EmptyAddress
executor := vm.AccountRef(evmParams.txCtx.Origin)
var ret []byte
var evmErr error
if evmParams.contract == nil {
// create contract
var evmContractAddress common.Address
_, evmContractAddress, remainingGas, evmErr = evm.Create(executor, evmParams.data, remainingGas, evmParams.amount)
log.L().Debug("evm Create.", log.Hex("addrHash", evmContractAddress[:]))
if evmErr == nil {
if contractAddress, err := address.FromBytes(evmContractAddress.Bytes()); err == nil {
contractRawAddress = contractAddress.String()
}
}
} else {
stateDB.SetNonce(evmParams.txCtx.Origin, stateDB.GetNonce(evmParams.txCtx.Origin)+1)
// process contract
ret, remainingGas, evmErr = evm.Call(executor, *evmParams.contract, evmParams.data, remainingGas, evmParams.amount)
}
if evmErr != nil {
log.L().Debug("evm error", zap.Error(evmErr))
// The only possible consensus-error would be if there wasn't
// sufficient balance to make the transfer happen.
// Should be a hard fork (Bering)
if evmErr == vm.ErrInsufficientBalance && g.IsBering(blockHeight) {
return nil, evmParams.gas, remainingGas, action.EmptyAddress, uint64(iotextypes.ReceiptStatus_Failure), evmErr
}
}
if stateDB.Error() != nil {
log.L().Debug("statedb error", zap.Error(stateDB.Error()))
}
refund := (evmParams.gas - remainingGas) / 2
if refund > stateDB.GetRefund() {
refund = stateDB.GetRefund()
}
remainingGas += refund
errCode := uint64(iotextypes.ReceiptStatus_Success)
if evmErr != nil {
errCode = evmErrToErrStatusCode(evmErr, g, blockHeight)
if errCode == uint64(iotextypes.ReceiptStatus_ErrUnknown) {
var addr string
if evmParams.contract != nil {
ioAddr, _ := address.FromBytes((*evmParams.contract)[:])
addr = ioAddr.String()
} else {
addr = "contract creation"
}
log.L().Warn("evm internal error", zap.Error(evmErr),
zap.String("address", addr),
log.Hex("calldata", evmParams.data))
}
}
return ret, evmParams.gas, remainingGas, contractRawAddress, errCode, nil
}
// evmErrToErrStatusCode returns ReceiptStatuscode which describes error type
func evmErrToErrStatusCode(evmErr error, g genesis.Blockchain, height uint64) (errStatusCode uint64) {
if g.IsJutland(height) {
switch evmErr {
case vm.ErrOutOfGas:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrOutOfGas)
case vm.ErrCodeStoreOutOfGas:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrCodeStoreOutOfGas)
case vm.ErrDepth:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrDepth)
case vm.ErrContractAddressCollision:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrContractAddressCollision)
case vm.ErrExecutionReverted:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrExecutionReverted)
case vm.ErrMaxCodeSizeExceeded:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrMaxCodeSizeExceeded)
case vm.ErrWriteProtection:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrWriteProtection)
case vm.ErrInsufficientBalance:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrInsufficientBalance)
case vm.ErrInvalidJump:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrInvalidJump)
case vm.ErrReturnDataOutOfBounds:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrReturnDataOutOfBounds)
case vm.ErrGasUintOverflow:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrGasUintOverflow)
default:
//This errors from go-ethereum, are not-accessible variable.
switch evmErr.Error() {
case "no compatible interpreter":
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrNoCompatibleInterpreter)
default:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrUnknown)
}
}
return
}
if g.IsBering(height) {
switch evmErr {
case vm.ErrOutOfGas:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrOutOfGas)
case vm.ErrCodeStoreOutOfGas:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrCodeStoreOutOfGas)
case vm.ErrDepth:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrDepth)
case vm.ErrContractAddressCollision:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrContractAddressCollision)
case vm.ErrExecutionReverted:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrExecutionReverted)
case vm.ErrMaxCodeSizeExceeded:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrMaxCodeSizeExceeded)
case vm.ErrWriteProtection:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrWriteProtection)
default:
//This errors from go-ethereum, are not-accessible variable.
switch evmErr.Error() {
case "no compatible interpreter":
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrNoCompatibleInterpreter)
default:
errStatusCode = uint64(iotextypes.ReceiptStatus_ErrUnknown)
}
}
return
}
// before Bering height, return one common failure
errStatusCode = uint64(iotextypes.ReceiptStatus_Failure)
return
}
// intrinsicGas returns the intrinsic gas of an execution
func intrinsicGas(data []byte) (uint64, error) {
if action.ExecutionDataGas == 0 {
panic("payload gas price cannot be zero")
}
dataSize := uint64(len(data))
if (math.MaxInt64-action.ExecutionBaseIntrinsicGas)/action.ExecutionDataGas < dataSize {
return 0, action.ErrInsufficientFunds
}
return dataSize*action.ExecutionDataGas + action.ExecutionBaseIntrinsicGas, nil
}
// SimulateExecution simulates the execution in evm
func SimulateExecution(
ctx context.Context,
sm protocol.StateManager,
caller address.Address,
ex *action.Execution,
getBlockHash GetBlockHash,
) ([]byte, *action.Receipt, error) {
ctx, span := tracer.NewSpan(ctx, "evm.SimulateExecution")
defer span.End()
bcCtx := protocol.MustGetBlockchainCtx(ctx)
g := genesis.MustExtractGenesisContext(ctx)
ctx = protocol.WithActionCtx(
ctx,
protocol.ActionCtx{
Caller: caller,
ActionHash: hash.Hash256b(byteutil.Must(proto.Marshal(ex.Proto()))),
},
)
zeroAddr, err := address.FromString(address.ZeroAddress)
if err != nil {
return nil, nil, err
}
ctx = protocol.WithBlockCtx(
ctx,
protocol.BlockCtx{
BlockHeight: bcCtx.Tip.Height + 1,
BlockTimeStamp: bcCtx.Tip.Timestamp.Add(g.BlockInterval),
GasLimit: g.BlockGasLimit,
Producer: zeroAddr,
},
)
ctx = protocol.WithFeatureCtx(ctx)
return ExecuteContract(
ctx,
sm,
ex,
getBlockHash,
func(context.Context, protocol.StateManager, *big.Int) (*action.TransactionLog, error) {
return nil, nil
},
)
}
| 1 | 24,666 | The position change of this line may be a hard fork. | iotexproject-iotex-core | go |
@@ -541,12 +541,7 @@ void Items::buildInventoryList()
void Items::parseItemNode(const pugi::xml_node& itemNode, uint16_t id)
{
- if (id > 30000 && id < 30100) {
- id -= 30000;
-
- if (id >= items.size()) {
- items.resize(id + 1);
- }
+ if (id > 0 && id < 100) {
ItemType& iType = items[id];
iType.id = id;
} | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "items.h"
#include "spells.h"
#include "movement.h"
#include "weapons.h"
#include "pugicast.h"
extern MoveEvents* g_moveEvents;
extern Weapons* g_weapons;
const std::unordered_map<std::string, ItemParseAttributes_t> ItemParseAttributesMap = {
{"type", ITEM_PARSE_TYPE},
{"description", ITEM_PARSE_DESCRIPTION},
{"runespellname", ITEM_PARSE_RUNESPELLNAME},
{"weight", ITEM_PARSE_WEIGHT},
{"showcount", ITEM_PARSE_SHOWCOUNT},
{"armor", ITEM_PARSE_ARMOR},
{"defense", ITEM_PARSE_DEFENSE},
{"extradef", ITEM_PARSE_EXTRADEF},
{"attack", ITEM_PARSE_ATTACK},
{"rotateto", ITEM_PARSE_ROTATETO},
{"moveable", ITEM_PARSE_MOVEABLE},
{"movable", ITEM_PARSE_MOVEABLE},
{"blockprojectile", ITEM_PARSE_BLOCKPROJECTILE},
{"allowpickupable", ITEM_PARSE_PICKUPABLE},
{"pickupable", ITEM_PARSE_PICKUPABLE},
{"forceserialize", ITEM_PARSE_FORCESERIALIZE},
{"forcesave", ITEM_PARSE_FORCESERIALIZE},
{"floorchange", ITEM_PARSE_FLOORCHANGE},
{"corpsetype", ITEM_PARSE_CORPSETYPE},
{"containersize", ITEM_PARSE_CONTAINERSIZE},
{"fluidsource", ITEM_PARSE_FLUIDSOURCE},
{"readable", ITEM_PARSE_READABLE},
{"writeable", ITEM_PARSE_WRITEABLE},
{"maxtextlen", ITEM_PARSE_MAXTEXTLEN},
{"writeonceitemid", ITEM_PARSE_WRITEONCEITEMID},
{"weapontype", ITEM_PARSE_WEAPONTYPE},
{"slottype", ITEM_PARSE_SLOTTYPE},
{"ammotype", ITEM_PARSE_AMMOTYPE},
{"shoottype", ITEM_PARSE_SHOOTTYPE},
{"effect", ITEM_PARSE_EFFECT},
{"range", ITEM_PARSE_RANGE},
{"stopduration", ITEM_PARSE_STOPDURATION},
{"decayto", ITEM_PARSE_DECAYTO},
{"transformequipto", ITEM_PARSE_TRANSFORMEQUIPTO},
{"transformdeequipto", ITEM_PARSE_TRANSFORMDEEQUIPTO},
{"duration", ITEM_PARSE_DURATION},
{"showduration", ITEM_PARSE_SHOWDURATION},
{"charges", ITEM_PARSE_CHARGES},
{"showcharges", ITEM_PARSE_SHOWCHARGES},
{"showattributes", ITEM_PARSE_SHOWATTRIBUTES},
{"hitchance", ITEM_PARSE_HITCHANCE},
{"maxhitchance", ITEM_PARSE_MAXHITCHANCE},
{"invisible", ITEM_PARSE_INVISIBLE},
{"speed", ITEM_PARSE_SPEED},
{"healthgain", ITEM_PARSE_HEALTHGAIN},
{"healthticks", ITEM_PARSE_HEALTHTICKS},
{"managain", ITEM_PARSE_MANAGAIN},
{"manaticks", ITEM_PARSE_MANATICKS},
{"manashield", ITEM_PARSE_MANASHIELD},
{"skillsword", ITEM_PARSE_SKILLSWORD},
{"skillaxe", ITEM_PARSE_SKILLAXE},
{"skillclub", ITEM_PARSE_SKILLCLUB},
{"skilldist", ITEM_PARSE_SKILLDIST},
{"skillfish", ITEM_PARSE_SKILLFISH},
{"skillshield", ITEM_PARSE_SKILLSHIELD},
{"skillfist", ITEM_PARSE_SKILLFIST},
{"maxhitpoints", ITEM_PARSE_MAXHITPOINTS},
{"maxhitpointspercent", ITEM_PARSE_MAXHITPOINTSPERCENT},
{"maxmanapoints", ITEM_PARSE_MAXMANAPOINTS},
{"maxmanapointspercent", ITEM_PARSE_MAXMANAPOINTSPERCENT},
{"magicpoints", ITEM_PARSE_MAGICPOINTS},
{"magiclevelpoints", ITEM_PARSE_MAGICPOINTS},
{"magicpointspercent", ITEM_PARSE_MAGICPOINTSPERCENT},
{"criticalhitchance", ITEM_PARSE_CRITICALHITCHANCE},
{"criticalhitamount", ITEM_PARSE_CRITICALHITAMOUNT},
{"lifeleechchance", ITEM_PARSE_LIFELEECHCHANCE},
{"lifeleechamount", ITEM_PARSE_LIFELEECHAMOUNT},
{"manaleechchance", ITEM_PARSE_MANALEECHCHANCE},
{"manaleechamount", ITEM_PARSE_MANALEECHAMOUNT},
{"fieldabsorbpercentenergy", ITEM_PARSE_FIELDABSORBPERCENTENERGY},
{"fieldabsorbpercentfire", ITEM_PARSE_FIELDABSORBPERCENTFIRE},
{"fieldabsorbpercentpoison", ITEM_PARSE_FIELDABSORBPERCENTPOISON},
{"fieldabsorbpercentearth", ITEM_PARSE_FIELDABSORBPERCENTPOISON},
{"absorbpercentall", ITEM_PARSE_ABSORBPERCENTALL},
{"absorbpercentallelements", ITEM_PARSE_ABSORBPERCENTALL},
{"absorbpercentelements", ITEM_PARSE_ABSORBPERCENTELEMENTS},
{"absorbpercentmagic", ITEM_PARSE_ABSORBPERCENTMAGIC},
{"absorbpercentenergy", ITEM_PARSE_ABSORBPERCENTENERGY},
{"absorbpercentfire", ITEM_PARSE_ABSORBPERCENTFIRE},
{"absorbpercentpoison", ITEM_PARSE_ABSORBPERCENTPOISON},
{"absorbpercentearth", ITEM_PARSE_ABSORBPERCENTPOISON},
{"absorbpercentice", ITEM_PARSE_ABSORBPERCENTICE},
{"absorbpercentholy", ITEM_PARSE_ABSORBPERCENTHOLY},
{"absorbpercentdeath", ITEM_PARSE_ABSORBPERCENTDEATH},
{"absorbpercentlifedrain", ITEM_PARSE_ABSORBPERCENTLIFEDRAIN},
{"absorbpercentmanadrain", ITEM_PARSE_ABSORBPERCENTMANADRAIN},
{"absorbpercentdrown", ITEM_PARSE_ABSORBPERCENTDROWN},
{"absorbpercentphysical", ITEM_PARSE_ABSORBPERCENTPHYSICAL},
{"absorbpercenthealing", ITEM_PARSE_ABSORBPERCENTHEALING},
{"absorbpercentundefined", ITEM_PARSE_ABSORBPERCENTUNDEFINED},
{"suppressdrunk", ITEM_PARSE_SUPPRESSDRUNK},
{"suppressenergy", ITEM_PARSE_SUPPRESSENERGY},
{"suppressfire", ITEM_PARSE_SUPPRESSFIRE},
{"suppresspoison", ITEM_PARSE_SUPPRESSPOISON},
{"suppressdrown", ITEM_PARSE_SUPPRESSDROWN},
{"suppressphysical", ITEM_PARSE_SUPPRESSPHYSICAL},
{"suppressfreeze", ITEM_PARSE_SUPPRESSFREEZE},
{"suppressdazzle", ITEM_PARSE_SUPPRESSDAZZLE},
{"suppresscurse", ITEM_PARSE_SUPPRESSCURSE},
{"field", ITEM_PARSE_FIELD},
{"replaceable", ITEM_PARSE_REPLACEABLE},
{"partnerdirection", ITEM_PARSE_PARTNERDIRECTION},
{"leveldoor", ITEM_PARSE_LEVELDOOR},
{"maletransformto", ITEM_PARSE_MALETRANSFORMTO},
{"malesleeper", ITEM_PARSE_MALETRANSFORMTO},
{"femaletransformto", ITEM_PARSE_FEMALETRANSFORMTO},
{"femalesleeper", ITEM_PARSE_FEMALETRANSFORMTO},
{"transformto", ITEM_PARSE_TRANSFORMTO},
{"destroyto", ITEM_PARSE_DESTROYTO},
{"elementice", ITEM_PARSE_ELEMENTICE},
{"elementearth", ITEM_PARSE_ELEMENTEARTH},
{"elementfire", ITEM_PARSE_ELEMENTFIRE},
{"elementenergy", ITEM_PARSE_ELEMENTENERGY},
{"elementdeath", ITEM_PARSE_ELEMENTDEATH},
{"elementholy", ITEM_PARSE_ELEMENTHOLY},
{"walkstack", ITEM_PARSE_WALKSTACK},
{"blocking", ITEM_PARSE_BLOCKING},
{"allowdistread", ITEM_PARSE_ALLOWDISTREAD},
{"storeitem", ITEM_PARSE_STOREITEM},
};
const std::unordered_map<std::string, ItemTypes_t> ItemTypesMap = {
{"key", ITEM_TYPE_KEY},
{"magicfield", ITEM_TYPE_MAGICFIELD},
{"container", ITEM_TYPE_CONTAINER},
{"depot", ITEM_TYPE_DEPOT},
{"mailbox", ITEM_TYPE_MAILBOX},
{"trashholder", ITEM_TYPE_TRASHHOLDER},
{"teleport", ITEM_TYPE_TELEPORT},
{"door", ITEM_TYPE_DOOR},
{"bed", ITEM_TYPE_BED},
{"rune", ITEM_TYPE_RUNE},
};
const std::unordered_map<std::string, tileflags_t> TileStatesMap = {
{"down", TILESTATE_FLOORCHANGE_DOWN},
{"north", TILESTATE_FLOORCHANGE_NORTH},
{"south", TILESTATE_FLOORCHANGE_SOUTH},
{"southalt", TILESTATE_FLOORCHANGE_SOUTH_ALT},
{"west", TILESTATE_FLOORCHANGE_WEST},
{"east", TILESTATE_FLOORCHANGE_EAST},
{"eastalt", TILESTATE_FLOORCHANGE_EAST_ALT},
};
const std::unordered_map<std::string, RaceType_t> RaceTypesMap = {
{"venom", RACE_VENOM},
{"blood", RACE_BLOOD},
{"undead", RACE_UNDEAD},
{"fire", RACE_FIRE},
{"energy", RACE_ENERGY},
};
const std::unordered_map<std::string, WeaponType_t> WeaponTypesMap = {
{"sword", WEAPON_SWORD},
{"club", WEAPON_CLUB},
{"axe", WEAPON_AXE},
{"shield", WEAPON_SHIELD},
{"distance", WEAPON_DISTANCE},
{"wand", WEAPON_WAND},
{"ammunition", WEAPON_AMMO},
};
const std::unordered_map<std::string, FluidTypes_t> FluidTypesMap = {
{"water", FLUID_WATER},
{"blood", FLUID_BLOOD},
{"beer", FLUID_BEER},
{"slime", FLUID_SLIME},
{"lemonade", FLUID_LEMONADE},
{"milk", FLUID_MILK},
{"mana", FLUID_MANA},
{"life", FLUID_LIFE},
{"oil", FLUID_OIL},
{"urine", FLUID_URINE},
{"coconut", FLUID_COCONUTMILK},
{"wine", FLUID_WINE},
{"mud", FLUID_MUD},
{"fruitjuice", FLUID_FRUITJUICE},
{"lava", FLUID_LAVA},
{"rum", FLUID_RUM},
{"swamp", FLUID_SWAMP},
{"tea", FLUID_TEA},
{"mead", FLUID_MEAD},
};
Items::Items()
{
items.reserve(30000);
nameToItems.reserve(30000);
}
void Items::clear()
{
items.clear();
clientIdToServerIdMap.clear();
nameToItems.clear();
}
bool Items::reload()
{
clear();
loadFromOtb("data/items/items.otb");
if (!loadFromXml()) {
return false;
}
g_moveEvents->reload();
g_weapons->reload();
g_weapons->loadDefaults();
return true;
}
constexpr auto OTBI = OTB::Identifier{{'O','T', 'B', 'I'}};
bool Items::loadFromOtb(const std::string& file)
{
OTB::Loader loader{file, OTBI};
auto& root = loader.parseTree();
PropStream props;
if (loader.getProps(root, props)) {
//4 byte flags
//attributes
//0x01 = version data
uint32_t flags;
if (!props.read<uint32_t>(flags)) {
return false;
}
uint8_t attr;
if (!props.read<uint8_t>(attr)) {
return false;
}
if (attr == ROOT_ATTR_VERSION) {
uint16_t datalen;
if (!props.read<uint16_t>(datalen)) {
return false;
}
if (datalen != sizeof(VERSIONINFO)) {
return false;
}
VERSIONINFO vi;
if (!props.read(vi)) {
return false;
}
majorVersion = vi.dwMajorVersion; //items otb format file version
minorVersion = vi.dwMinorVersion; //client version
buildNumber = vi.dwBuildNumber; //revision
}
}
if (majorVersion == 0xFFFFFFFF) {
std::cout << "[Warning - Items::loadFromOtb] items.otb using generic client version." << std::endl;
} else if (majorVersion != 3) {
std::cout << "Old version detected, a newer version of items.otb is required." << std::endl;
return false;
} else if (minorVersion < CLIENT_VERSION_1098) {
std::cout << "A newer version of items.otb is required." << std::endl;
return false;
}
for (auto& itemNode : root.children) {
PropStream stream;
if (!loader.getProps(itemNode, stream)) {
return false;
}
uint32_t flags;
if (!stream.read<uint32_t>(flags)) {
return false;
}
uint16_t serverId = 0;
uint16_t clientId = 0;
uint16_t speed = 0;
uint16_t wareId = 0;
uint8_t lightLevel = 0;
uint8_t lightColor = 0;
uint8_t alwaysOnTopOrder = 0;
uint8_t attrib;
while (stream.read<uint8_t>(attrib)) {
uint16_t datalen;
if (!stream.read<uint16_t>(datalen)) {
return false;
}
switch (attrib) {
case ITEM_ATTR_SERVERID: {
if (datalen != sizeof(uint16_t)) {
return false;
}
if (!stream.read<uint16_t>(serverId)) {
return false;
}
if (serverId > 30000 && serverId < 30100) {
serverId -= 30000;
}
break;
}
case ITEM_ATTR_CLIENTID: {
if (datalen != sizeof(uint16_t)) {
return false;
}
if (!stream.read<uint16_t>(clientId)) {
return false;
}
break;
}
case ITEM_ATTR_SPEED: {
if (datalen != sizeof(uint16_t)) {
return false;
}
if (!stream.read<uint16_t>(speed)) {
return false;
}
break;
}
case ITEM_ATTR_LIGHT2: {
if (datalen != sizeof(lightBlock2)) {
return false;
}
lightBlock2 lb2;
if (!stream.read(lb2)) {
return false;
}
lightLevel = static_cast<uint8_t>(lb2.lightLevel);
lightColor = static_cast<uint8_t>(lb2.lightColor);
break;
}
case ITEM_ATTR_TOPORDER: {
if (datalen != sizeof(uint8_t)) {
return false;
}
if (!stream.read<uint8_t>(alwaysOnTopOrder)) {
return false;
}
break;
}
case ITEM_ATTR_WAREID: {
if (datalen != sizeof(uint16_t)) {
return false;
}
if (!stream.read<uint16_t>(wareId)) {
return false;
}
break;
}
default: {
//skip unknown attributes
if (!stream.skip(datalen)) {
return false;
}
break;
}
}
}
clientIdToServerIdMap.emplace(clientId, serverId);
// store the found item
if (serverId >= items.size()) {
items.resize(serverId + 1);
}
ItemType& iType = items[serverId];
iType.group = static_cast<itemgroup_t>(itemNode.type);
switch (itemNode.type) {
case ITEM_GROUP_CONTAINER:
iType.type = ITEM_TYPE_CONTAINER;
break;
case ITEM_GROUP_DOOR:
//not used
iType.type = ITEM_TYPE_DOOR;
break;
case ITEM_GROUP_MAGICFIELD:
//not used
iType.type = ITEM_TYPE_MAGICFIELD;
break;
case ITEM_GROUP_TELEPORT:
//not used
iType.type = ITEM_TYPE_TELEPORT;
break;
case ITEM_GROUP_NONE:
case ITEM_GROUP_GROUND:
case ITEM_GROUP_SPLASH:
case ITEM_GROUP_FLUID:
case ITEM_GROUP_CHARGES:
case ITEM_GROUP_DEPRECATED:
break;
default:
return false;
}
iType.blockSolid = hasBitSet(FLAG_BLOCK_SOLID, flags);
iType.blockProjectile = hasBitSet(FLAG_BLOCK_PROJECTILE, flags);
iType.blockPathFind = hasBitSet(FLAG_BLOCK_PATHFIND, flags);
iType.hasHeight = hasBitSet(FLAG_HAS_HEIGHT, flags);
iType.useable = hasBitSet(FLAG_USEABLE, flags);
iType.pickupable = hasBitSet(FLAG_PICKUPABLE, flags);
iType.moveable = hasBitSet(FLAG_MOVEABLE, flags);
iType.stackable = hasBitSet(FLAG_STACKABLE, flags);
iType.alwaysOnTop = hasBitSet(FLAG_ALWAYSONTOP, flags);
iType.isVertical = hasBitSet(FLAG_VERTICAL, flags);
iType.isHorizontal = hasBitSet(FLAG_HORIZONTAL, flags);
iType.isHangable = hasBitSet(FLAG_HANGABLE, flags);
iType.allowDistRead = hasBitSet(FLAG_ALLOWDISTREAD, flags);
iType.rotatable = hasBitSet(FLAG_ROTATABLE, flags);
iType.canReadText = hasBitSet(FLAG_READABLE, flags);
iType.lookThrough = hasBitSet(FLAG_LOOKTHROUGH, flags);
iType.isAnimation = hasBitSet(FLAG_ANIMATION, flags);
// iType.walkStack = !hasBitSet(FLAG_FULLTILE, flags);
iType.forceUse = hasBitSet(FLAG_FORCEUSE, flags);
iType.id = serverId;
iType.clientId = clientId;
iType.speed = speed;
iType.lightLevel = lightLevel;
iType.lightColor = lightColor;
iType.wareId = wareId;
iType.alwaysOnTopOrder = alwaysOnTopOrder;
}
items.shrink_to_fit();
return true;
}
bool Items::loadFromXml()
{
pugi::xml_document doc;
pugi::xml_parse_result result = doc.load_file("data/items/items.xml");
if (!result) {
printXMLError("Error - Items::loadFromXml", "data/items/items.xml", result);
return false;
}
for (auto itemNode : doc.child("items").children()) {
pugi::xml_attribute idAttribute = itemNode.attribute("id");
if (idAttribute) {
parseItemNode(itemNode, pugi::cast<uint16_t>(idAttribute.value()));
continue;
}
pugi::xml_attribute fromIdAttribute = itemNode.attribute("fromid");
if (!fromIdAttribute) {
std::cout << "[Warning - Items::loadFromXml] No item id found" << std::endl;
continue;
}
pugi::xml_attribute toIdAttribute = itemNode.attribute("toid");
if (!toIdAttribute) {
std::cout << "[Warning - Items::loadFromXml] fromid (" << fromIdAttribute.value() << ") without toid" << std::endl;
continue;
}
uint16_t id = pugi::cast<uint16_t>(fromIdAttribute.value());
uint16_t toId = pugi::cast<uint16_t>(toIdAttribute.value());
while (id <= toId) {
parseItemNode(itemNode, id++);
}
}
buildInventoryList();
return true;
}
void Items::buildInventoryList()
{
inventory.reserve(items.size());
for (const auto& type: items) {
if (type.weaponType != WEAPON_NONE || type.ammoType != AMMO_NONE ||
type.attack != 0 || type.defense != 0 ||
type.extraDefense != 0 || type.armor != 0 ||
type.slotPosition & SLOTP_NECKLACE ||
type.slotPosition & SLOTP_RING ||
type.slotPosition & SLOTP_AMMO ||
type.slotPosition & SLOTP_FEET ||
type.slotPosition & SLOTP_HEAD ||
type.slotPosition & SLOTP_ARMOR ||
type.slotPosition & SLOTP_LEGS)
{
inventory.push_back(type.clientId);
}
}
inventory.shrink_to_fit();
std::sort(inventory.begin(), inventory.end());
}
void Items::parseItemNode(const pugi::xml_node& itemNode, uint16_t id)
{
if (id > 30000 && id < 30100) {
id -= 30000;
if (id >= items.size()) {
items.resize(id + 1);
}
ItemType& iType = items[id];
iType.id = id;
}
ItemType& it = getItemType(id);
if (it.id == 0) {
return;
}
if (!it.name.empty()) {
std::cout << "[Warning - Items::parseItemNode] Duplicate item with id: " << id << std::endl;
return;
}
it.name = itemNode.attribute("name").as_string();
nameToItems.insert({ asLowerCaseString(it.name), id });
pugi::xml_attribute articleAttribute = itemNode.attribute("article");
if (articleAttribute) {
it.article = articleAttribute.as_string();
}
pugi::xml_attribute pluralAttribute = itemNode.attribute("plural");
if (pluralAttribute) {
it.pluralName = pluralAttribute.as_string();
}
Abilities& abilities = it.getAbilities();
for (auto attributeNode : itemNode.children()) {
pugi::xml_attribute keyAttribute = attributeNode.attribute("key");
if (!keyAttribute) {
continue;
}
pugi::xml_attribute valueAttribute = attributeNode.attribute("value");
if (!valueAttribute) {
continue;
}
std::string tmpStrValue = asLowerCaseString(keyAttribute.as_string());
auto parseAttribute = ItemParseAttributesMap.find(tmpStrValue);
if (parseAttribute != ItemParseAttributesMap.end()) {
ItemParseAttributes_t parseType = parseAttribute->second;
switch (parseType) {
case ITEM_PARSE_TYPE: {
tmpStrValue = asLowerCaseString(valueAttribute.as_string());
auto it2 = ItemTypesMap.find(tmpStrValue);
if (it2 != ItemTypesMap.end()) {
it.type = it2->second;
if (it.type == ITEM_TYPE_CONTAINER) {
it.group = ITEM_GROUP_CONTAINER;
}
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown type: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_DESCRIPTION: {
it.description = valueAttribute.as_string();
break;
}
case ITEM_PARSE_RUNESPELLNAME: {
it.runeSpellName = valueAttribute.as_string();
break;
}
case ITEM_PARSE_WEIGHT: {
it.weight = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SHOWCOUNT: {
it.showCount = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_ARMOR: {
it.armor = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_DEFENSE: {
it.defense = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_EXTRADEF: {
it.extraDefense = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ATTACK: {
it.attack = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ROTATETO: {
it.rotateTo = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MOVEABLE: {
it.moveable = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_BLOCKPROJECTILE: {
it.blockProjectile = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_PICKUPABLE: {
it.allowPickupable = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_FORCESERIALIZE: {
it.forceSerialize = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_FLOORCHANGE: {
tmpStrValue = asLowerCaseString(valueAttribute.as_string());
auto it2 = TileStatesMap.find(tmpStrValue);
if (it2 != TileStatesMap.end()) {
it.floorChange |= it2->second;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown floorChange: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_CORPSETYPE: {
tmpStrValue = asLowerCaseString(valueAttribute.as_string());
auto it2 = RaceTypesMap.find(tmpStrValue);
if (it2 != RaceTypesMap.end()) {
it.corpseType = it2->second;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown corpseType: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_CONTAINERSIZE: {
it.maxItems = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_FLUIDSOURCE: {
tmpStrValue = asLowerCaseString(valueAttribute.as_string());
auto it2 = FluidTypesMap.find(tmpStrValue);
if (it2 != FluidTypesMap.end()) {
it.fluidSource = it2->second;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown fluidSource: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_READABLE: {
it.canReadText = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_WRITEABLE: {
it.canWriteText = valueAttribute.as_bool();
it.canReadText = it.canWriteText;
break;
}
case ITEM_PARSE_MAXTEXTLEN: {
it.maxTextLen = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_WRITEONCEITEMID: {
it.writeOnceItemId = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_WEAPONTYPE: {
tmpStrValue = asLowerCaseString(valueAttribute.as_string());
auto it2 = WeaponTypesMap.find(tmpStrValue);
if (it2 != WeaponTypesMap.end()) {
it.weaponType = it2->second;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown weaponType: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_SLOTTYPE: {
tmpStrValue = asLowerCaseString(valueAttribute.as_string());
if (tmpStrValue == "head") {
it.slotPosition |= SLOTP_HEAD;
} else if (tmpStrValue == "body") {
it.slotPosition |= SLOTP_ARMOR;
} else if (tmpStrValue == "legs") {
it.slotPosition |= SLOTP_LEGS;
} else if (tmpStrValue == "feet") {
it.slotPosition |= SLOTP_FEET;
} else if (tmpStrValue == "backpack") {
it.slotPosition |= SLOTP_BACKPACK;
} else if (tmpStrValue == "two-handed") {
it.slotPosition |= SLOTP_TWO_HAND;
} else if (tmpStrValue == "right-hand") {
it.slotPosition &= ~SLOTP_LEFT;
} else if (tmpStrValue == "left-hand") {
it.slotPosition &= ~SLOTP_RIGHT;
} else if (tmpStrValue == "necklace") {
it.slotPosition |= SLOTP_NECKLACE;
} else if (tmpStrValue == "ring") {
it.slotPosition |= SLOTP_RING;
} else if (tmpStrValue == "ammo") {
it.slotPosition |= SLOTP_AMMO;
} else if (tmpStrValue == "hand") {
it.slotPosition |= SLOTP_HAND;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown slotType: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_AMMOTYPE: {
it.ammoType = getAmmoType(asLowerCaseString(valueAttribute.as_string()));
if (it.ammoType == AMMO_NONE) {
std::cout << "[Warning - Items::parseItemNode] Unknown ammoType: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_SHOOTTYPE: {
ShootType_t shoot = getShootType(asLowerCaseString(valueAttribute.as_string()));
if (shoot != CONST_ANI_NONE) {
it.shootType = shoot;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown shootType: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_EFFECT: {
MagicEffectClasses effect = getMagicEffect(asLowerCaseString(valueAttribute.as_string()));
if (effect != CONST_ME_NONE) {
it.magicEffect = effect;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown effect: " << valueAttribute.as_string() << std::endl;
}
break;
}
case ITEM_PARSE_RANGE: {
it.shootRange = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_STOPDURATION: {
it.stopTime = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_DECAYTO: {
it.decayTo = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_TRANSFORMEQUIPTO: {
it.transformEquipTo = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_TRANSFORMDEEQUIPTO: {
it.transformDeEquipTo = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_DURATION: {
it.decayTime = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SHOWDURATION: {
it.showDuration = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_CHARGES: {
it.charges = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SHOWCHARGES: {
it.showCharges = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_SHOWATTRIBUTES: {
it.showAttributes = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_HITCHANCE: {
it.hitChance = std::min<int8_t>(100, std::max<int8_t>(-100, pugi::cast<int16_t>(valueAttribute.value())));
break;
}
case ITEM_PARSE_MAXHITCHANCE: {
it.maxHitChance = std::min<uint32_t>(100, pugi::cast<uint32_t>(valueAttribute.value()));
break;
}
case ITEM_PARSE_INVISIBLE: {
abilities.invisible = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_SPEED: {
abilities.speed = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_HEALTHGAIN: {
abilities.regeneration = true;
abilities.healthGain = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_HEALTHTICKS: {
abilities.regeneration = true;
abilities.healthTicks = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MANAGAIN: {
abilities.regeneration = true;
abilities.manaGain = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MANATICKS: {
abilities.regeneration = true;
abilities.manaTicks = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MANASHIELD: {
abilities.manaShield = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_SKILLSWORD: {
abilities.skills[SKILL_SWORD] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SKILLAXE: {
abilities.skills[SKILL_AXE] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SKILLCLUB: {
abilities.skills[SKILL_CLUB] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SKILLDIST: {
abilities.skills[SKILL_DISTANCE] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SKILLFISH: {
abilities.skills[SKILL_FISHING] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SKILLSHIELD: {
abilities.skills[SKILL_SHIELD] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SKILLFIST: {
abilities.skills[SKILL_FIST] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_CRITICALHITAMOUNT: {
abilities.specialSkills[SPECIALSKILL_CRITICALHITAMOUNT] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_CRITICALHITCHANCE: {
abilities.specialSkills[SPECIALSKILL_CRITICALHITCHANCE] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MANALEECHAMOUNT: {
abilities.specialSkills[SPECIALSKILL_MANALEECHAMOUNT] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MANALEECHCHANCE: {
abilities.specialSkills[SPECIALSKILL_MANALEECHCHANCE] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_LIFELEECHAMOUNT: {
abilities.specialSkills[SPECIALSKILL_LIFELEECHAMOUNT] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_LIFELEECHCHANCE: {
abilities.specialSkills[SPECIALSKILL_LIFELEECHCHANCE] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAXHITPOINTS: {
abilities.stats[STAT_MAXHITPOINTS] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAXHITPOINTSPERCENT: {
abilities.statsPercent[STAT_MAXHITPOINTS] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAXMANAPOINTS: {
abilities.stats[STAT_MAXMANAPOINTS] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAXMANAPOINTSPERCENT: {
abilities.statsPercent[STAT_MAXMANAPOINTS] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICPOINTS: {
abilities.stats[STAT_MAGICPOINTS] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MAGICPOINTSPERCENT: {
abilities.statsPercent[STAT_MAGICPOINTS] = pugi::cast<int32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_FIELDABSORBPERCENTENERGY: {
abilities.fieldAbsorbPercent[combatTypeToIndex(COMBAT_ENERGYDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_FIELDABSORBPERCENTFIRE: {
abilities.fieldAbsorbPercent[combatTypeToIndex(COMBAT_FIREDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_FIELDABSORBPERCENTPOISON: {
abilities.fieldAbsorbPercent[combatTypeToIndex(COMBAT_EARTHDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTALL: {
int16_t value = pugi::cast<int16_t>(valueAttribute.value());
for (auto& i : abilities.absorbPercent) {
i += value;
}
break;
}
case ITEM_PARSE_ABSORBPERCENTELEMENTS: {
int16_t value = pugi::cast<int16_t>(valueAttribute.value());
abilities.absorbPercent[combatTypeToIndex(COMBAT_ENERGYDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_FIREDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_EARTHDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_ICEDAMAGE)] += value;
break;
}
case ITEM_PARSE_ABSORBPERCENTMAGIC: {
int16_t value = pugi::cast<int16_t>(valueAttribute.value());
abilities.absorbPercent[combatTypeToIndex(COMBAT_ENERGYDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_FIREDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_EARTHDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_ICEDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_HOLYDAMAGE)] += value;
abilities.absorbPercent[combatTypeToIndex(COMBAT_DEATHDAMAGE)] += value;
break;
}
case ITEM_PARSE_ABSORBPERCENTENERGY: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_ENERGYDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTFIRE: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_FIREDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTPOISON: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_EARTHDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTICE: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_ICEDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTHOLY: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_HOLYDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTDEATH: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_DEATHDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTLIFEDRAIN: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_LIFEDRAIN)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTMANADRAIN: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_MANADRAIN)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTDROWN: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_DROWNDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTPHYSICAL: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_PHYSICALDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTHEALING: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_HEALING)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ABSORBPERCENTUNDEFINED: {
abilities.absorbPercent[combatTypeToIndex(COMBAT_UNDEFINEDDAMAGE)] += pugi::cast<int16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_SUPPRESSDRUNK: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_DRUNK;
}
break;
}
case ITEM_PARSE_SUPPRESSENERGY: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_ENERGY;
}
break;
}
case ITEM_PARSE_SUPPRESSFIRE: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_FIRE;
}
break;
}
case ITEM_PARSE_SUPPRESSPOISON: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_POISON;
}
break;
}
case ITEM_PARSE_SUPPRESSDROWN: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_DROWN;
}
break;
}
case ITEM_PARSE_SUPPRESSPHYSICAL: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_BLEEDING;
}
break;
}
case ITEM_PARSE_SUPPRESSFREEZE: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_FREEZING;
}
break;
}
case ITEM_PARSE_SUPPRESSDAZZLE: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_DAZZLED;
}
break;
}
case ITEM_PARSE_SUPPRESSCURSE: {
if (valueAttribute.as_bool()) {
abilities.conditionSuppressions |= CONDITION_CURSED;
}
break;
}
case ITEM_PARSE_FIELD: {
it.group = ITEM_GROUP_MAGICFIELD;
it.type = ITEM_TYPE_MAGICFIELD;
CombatType_t combatType = COMBAT_NONE;
ConditionDamage* conditionDamage = nullptr;
tmpStrValue = asLowerCaseString(valueAttribute.as_string());
if (tmpStrValue == "fire") {
conditionDamage = new ConditionDamage(CONDITIONID_COMBAT, CONDITION_FIRE);
combatType = COMBAT_FIREDAMAGE;
} else if (tmpStrValue == "energy") {
conditionDamage = new ConditionDamage(CONDITIONID_COMBAT, CONDITION_ENERGY);
combatType = COMBAT_ENERGYDAMAGE;
} else if (tmpStrValue == "poison") {
conditionDamage = new ConditionDamage(CONDITIONID_COMBAT, CONDITION_POISON);
combatType = COMBAT_EARTHDAMAGE;
} else if (tmpStrValue == "drown") {
conditionDamage = new ConditionDamage(CONDITIONID_COMBAT, CONDITION_DROWN);
combatType = COMBAT_DROWNDAMAGE;
} else if (tmpStrValue == "physical") {
conditionDamage = new ConditionDamage(CONDITIONID_COMBAT, CONDITION_BLEEDING);
combatType = COMBAT_PHYSICALDAMAGE;
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown field value: " << valueAttribute.as_string() << std::endl;
}
if (combatType != COMBAT_NONE) {
it.combatType = combatType;
it.conditionDamage.reset(conditionDamage);
uint32_t ticks = 0;
int32_t start = 0;
int32_t count = 1;
int32_t initDamage = -1;
int32_t damage = 0;
for (auto subAttributeNode : attributeNode.children()) {
pugi::xml_attribute subKeyAttribute = subAttributeNode.attribute("key");
if (!subKeyAttribute) {
continue;
}
pugi::xml_attribute subValueAttribute = subAttributeNode.attribute("value");
if (!subValueAttribute) {
continue;
}
tmpStrValue = asLowerCaseString(subKeyAttribute.as_string());
if (tmpStrValue == "initdamage") {
initDamage = pugi::cast<int32_t>(subValueAttribute.value());
} else if (tmpStrValue == "ticks") {
ticks = pugi::cast<uint32_t>(subValueAttribute.value());
} else if (tmpStrValue == "count") {
count = std::max<int32_t>(1, pugi::cast<int32_t>(subValueAttribute.value()));
} else if (tmpStrValue == "start") {
start = std::max<int32_t>(0, pugi::cast<int32_t>(subValueAttribute.value()));
} else if (tmpStrValue == "damage") {
damage = -pugi::cast<int32_t>(subValueAttribute.value());
if (start > 0) {
std::list<int32_t> damageList;
ConditionDamage::generateDamageList(damage, start, damageList);
for (int32_t damageValue : damageList) {
conditionDamage->addDamage(1, ticks, -damageValue);
}
start = 0;
} else {
conditionDamage->addDamage(count, ticks, damage);
}
}
}
// datapack compatibility, presume damage to be initialdamage if initialdamage is not declared.
// initDamage = 0 (dont override initDamage with damage, dont set any initDamage)
// initDamage = -1 (undefined, override initDamage with damage)
if (initDamage > 0 || initDamage < -1) {
conditionDamage->setInitDamage(-initDamage);
} else if (initDamage == -1 && damage != 0) {
conditionDamage->setInitDamage(damage);
}
conditionDamage->setParam(CONDITION_PARAM_FIELD, 1);
if (conditionDamage->getTotalDamage() > 0) {
conditionDamage->setParam(CONDITION_PARAM_FORCEUPDATE, 1);
}
}
break;
}
case ITEM_PARSE_REPLACEABLE: {
it.replaceable = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_PARTNERDIRECTION: {
it.bedPartnerDir = getDirection(valueAttribute.as_string());
break;
}
case ITEM_PARSE_LEVELDOOR: {
it.levelDoor = pugi::cast<uint32_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_MALETRANSFORMTO: {
uint16_t value = pugi::cast<uint16_t>(valueAttribute.value());
it.transformToOnUse[PLAYERSEX_MALE] = value;
ItemType& other = getItemType(value);
if (other.transformToFree == 0) {
other.transformToFree = it.id;
}
if (it.transformToOnUse[PLAYERSEX_FEMALE] == 0) {
it.transformToOnUse[PLAYERSEX_FEMALE] = value;
}
break;
}
case ITEM_PARSE_FEMALETRANSFORMTO: {
uint16_t value = pugi::cast<uint16_t>(valueAttribute.value());
it.transformToOnUse[PLAYERSEX_FEMALE] = value;
ItemType& other = getItemType(value);
if (other.transformToFree == 0) {
other.transformToFree = it.id;
}
if (it.transformToOnUse[PLAYERSEX_MALE] == 0) {
it.transformToOnUse[PLAYERSEX_MALE] = value;
}
break;
}
case ITEM_PARSE_TRANSFORMTO: {
it.transformToFree = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_DESTROYTO: {
it.destroyTo = pugi::cast<uint16_t>(valueAttribute.value());
break;
}
case ITEM_PARSE_ELEMENTICE: {
abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value());
abilities.elementType = COMBAT_ICEDAMAGE;
break;
}
case ITEM_PARSE_ELEMENTEARTH: {
abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value());
abilities.elementType = COMBAT_EARTHDAMAGE;
break;
}
case ITEM_PARSE_ELEMENTFIRE: {
abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value());
abilities.elementType = COMBAT_FIREDAMAGE;
break;
}
case ITEM_PARSE_ELEMENTENERGY: {
abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value());
abilities.elementType = COMBAT_ENERGYDAMAGE;
break;
}
case ITEM_PARSE_ELEMENTDEATH: {
abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value());
abilities.elementType = COMBAT_DEATHDAMAGE;
break;
}
case ITEM_PARSE_ELEMENTHOLY: {
abilities.elementDamage = pugi::cast<uint16_t>(valueAttribute.value());
abilities.elementType = COMBAT_HOLYDAMAGE;
break;
}
case ITEM_PARSE_WALKSTACK: {
it.walkStack = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_BLOCKING: {
it.blockSolid = valueAttribute.as_bool();
break;
}
case ITEM_PARSE_ALLOWDISTREAD: {
it.allowDistRead = booleanString(valueAttribute.as_string());
break;
}
case ITEM_PARSE_STOREITEM: {
it.storeItem = booleanString(valueAttribute.as_string());
break;
}
default: {
// It should not ever get to here, only if you add a new key to the map and don't configure a case for it.
std::cout << "[Warning - Items::parseItemNode] Not configured key value: " << keyAttribute.as_string() << std::endl;
break;
}
}
} else {
std::cout << "[Warning - Items::parseItemNode] Unknown key value: " << keyAttribute.as_string() << std::endl;
}
}
//check bed items
if ((it.transformToFree != 0 || it.transformToOnUse[PLAYERSEX_FEMALE] != 0 || it.transformToOnUse[PLAYERSEX_MALE] != 0) && it.type != ITEM_TYPE_BED) {
std::cout << "[Warning - Items::parseItemNode] Item " << it.id << " is not set as a bed-type" << std::endl;
}
}
ItemType& Items::getItemType(size_t id)
{
if (id < items.size()) {
return items[id];
}
return items.front();
}
const ItemType& Items::getItemType(size_t id) const
{
if (id < items.size()) {
return items[id];
}
return items.front();
}
const ItemType& Items::getItemIdByClientId(uint16_t spriteId) const
{
if (spriteId >= 100) {
if (uint16_t serverId = clientIdToServerIdMap.getServerId(spriteId)) {
return getItemType(serverId);
}
}
return items.front();
}
uint16_t Items::getItemIdByName(const std::string& name)
{
auto result = nameToItems.find(asLowerCaseString(name));
if (result == nameToItems.end())
return 0;
return result->second;
}
| 1 | 17,967 | id is unsigned | otland-forgottenserver | cpp |
@@ -13,6 +13,7 @@ Puppet::Functions.create_function(:run_command) do
# @param options A hash of additional options.
# @option options [Boolean] _catch_errors Whether to catch raised errors.
# @option options [String] _run_as User to run as using privilege escalation.
+ # @option options [Hash] _env_vars Map of environment variables to set
# @return A list of results, one entry per target.
# @example Run a command on targets
# run_command('hostname', $targets, '_catch_errors' => true) | 1 | # frozen_string_literal: true
require 'bolt/error'
# Runs a command on the given set of targets and returns the result from each command execution.
# This function does nothing if the list of targets is empty.
#
# > **Note:** Not available in apply block
Puppet::Functions.create_function(:run_command) do
# Run a command.
# @param command A command to run on target.
# @param targets A pattern identifying zero or more targets. See {get_targets} for accepted patterns.
# @param options A hash of additional options.
# @option options [Boolean] _catch_errors Whether to catch raised errors.
# @option options [String] _run_as User to run as using privilege escalation.
# @return A list of results, one entry per target.
# @example Run a command on targets
# run_command('hostname', $targets, '_catch_errors' => true)
dispatch :run_command do
param 'String[1]', :command
param 'Boltlib::TargetSpec', :targets
optional_param 'Hash[String[1], Any]', :options
return_type 'ResultSet'
end
# Run a command, logging the provided description.
# @param command A command to run on target.
# @param targets A pattern identifying zero or more targets. See {get_targets} for accepted patterns.
# @param description A description to be output when calling this function.
# @param options A hash of additional options.
# @option options [Boolean] _catch_errors Whether to catch raised errors.
# @option options [String] _run_as User to run as using privilege escalation.
# @return A list of results, one entry per target.
# @example Run a command on targets
# run_command('hostname', $targets, 'Get hostname')
dispatch :run_command_with_description do
param 'String[1]', :command
param 'Boltlib::TargetSpec', :targets
param 'String', :description
optional_param 'Hash[String[1], Any]', :options
return_type 'ResultSet'
end
def run_command(command, targets, options = {})
run_command_with_description(command, targets, nil, options)
end
def run_command_with_description(command, targets, description = nil, options = {})
unless Puppet[:tasks]
raise Puppet::ParseErrorWithIssue
.from_issue_and_stack(Bolt::PAL::Issues::PLAN_OPERATION_NOT_SUPPORTED_WHEN_COMPILING, action: 'run_command')
end
options = options.transform_keys { |k| k.sub(/^_/, '').to_sym }
options[:description] = description if description
executor = Puppet.lookup(:bolt_executor)
inventory = Puppet.lookup(:bolt_inventory)
# Send Analytics Report
executor.report_function_call(self.class.name)
# Ensure that given targets are all Target instances
targets = inventory.get_targets(targets)
if targets.empty?
call_function('debug', "Simulating run_command('#{command}') - no targets given - no action taken")
r = Bolt::ResultSet.new([])
else
r = executor.run_command(targets, command, options)
end
if !r.ok && !options[:catch_errors]
raise Bolt::RunFailure.new(r, 'run_command', command)
end
r
end
end
| 1 | 15,232 | It looks like Puppet doesn't provide type validation for options and we don't provide it ourselves as part of the function. Is that something we'd want to do at some point? | puppetlabs-bolt | rb |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.