hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
5b1cdef794989182294c2eac41d1a20a879e254d | 11,587 | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
bitfield::bitfield,
failure::{bail, format_err, Error, ResultExt},
fidl::endpoints::ClientEnd,
fidl_fuchsia_media::{
AudioSampleFormat, AudioStreamType, MediumSpecificStreamType, SimpleStreamSinkProxy,
StreamPacket, StreamType, AUDIO_ENCODING_SBC, NO_TIMESTAMP,
STREAM_PACKET_FLAG_DISCONTINUITY,
},
fidl_fuchsia_media_playback::{
PlayerEvent, PlayerEventStream, PlayerMarker, PlayerProxy, SourceMarker,
},
fuchsia_zircon as zx,
futures::{stream, StreamExt},
};
const DEFAULT_BUFFER_LEN: usize = 65536;
/// Players are configured and accept media frames, which are sent to the
/// media subsystem.
pub struct Player {
buffer: zx::Vmo,
buffer_len: usize,
codec: String,
current_offset: usize,
stream_source: SimpleStreamSinkProxy,
player: PlayerProxy,
events: PlayerEventStream,
playing: bool,
next_packet_flags: u32,
}
#[derive(Debug, PartialEq)]
enum ChannelMode {
Mono,
DualChannel,
Stereo,
JointStereo,
}
impl From<u8> for ChannelMode {
fn from(bits: u8) -> Self {
match bits {
0 => ChannelMode::Mono,
1 => ChannelMode::DualChannel,
2 => ChannelMode::Stereo,
3 => ChannelMode::JointStereo,
_ => panic!("invalid channel mode"),
}
}
}
bitfield! {
pub struct SbcHeader(u32);
impl Debug;
u8;
syncword, _: 7, 0;
subbands, _: 8;
allocation_method, _: 9;
into ChannelMode, channel_mode, _: 11, 10;
blocks_bits, _: 13, 12;
frequency_bits, _: 15, 14;
bitpool_bits, _: 23, 16;
crccheck, _: 31, 24;
}
impl SbcHeader {
/// The number of channels, based on the channel mode in the header.
/// From Table 12.18 in the A2DP Spec.
fn channels(&self) -> usize {
match self.channel_mode() {
ChannelMode::Mono => 1,
_ => 2,
}
}
fn has_syncword(&self) -> bool {
const SBC_SYNCWORD: u8 = 0x9c;
self.syncword() == SBC_SYNCWORD
}
/// The number of blocks, based on tbe bits in the header.
/// From Table 12.17 in the A2DP Spec.
fn blocks(&self) -> usize {
4 * (self.blocks_bits() + 1) as usize
}
fn bitpool(&self) -> usize {
self.bitpool_bits() as usize
}
/// Number of subbands based on the header bit.
/// From Table 12.20 in the A2DP Spec.
fn num_subbands(&self) -> usize {
if self.subbands() {
8
} else {
4
}
}
/// Calculates the frame length.
/// Formula from Section 12.9 of the A2DP Spec.
fn frame_length(&self) -> Result<usize, Error> {
if !self.has_syncword() {
return Err(format_err!("syncword does not match"));
}
let len = 4 + (4 * self.num_subbands() * self.channels()) / 8;
let rest = (match self.channel_mode() {
ChannelMode::Mono | ChannelMode::DualChannel => {
self.blocks() * self.channels() * self.bitpool()
}
ChannelMode::Stereo => self.blocks() * self.bitpool(),
ChannelMode::JointStereo => self.num_subbands() + (self.blocks() * self.bitpool()),
} as f64
/ 8.0)
.ceil() as usize;
Ok(len + rest)
}
}
impl Player {
/// Attempt to make a new player that decodes and plays frames encoded in the
/// `codec`
// TODO(jamuraa): add encoding parameters for this (SetConfiguration)
pub async fn new(codec: String) -> Result<Player, Error> {
let player = fuchsia_component::client::connect_to_service::<PlayerMarker>()
.context("Failed to connect to media player")?;
let (source_client, source) = fidl::endpoints::create_endpoints()?;
let source_proxy = source_client.into_proxy()?;
player.create_elementary_source(0, false, false, None, source)?;
let audio_stream_type = AudioStreamType {
sample_format: AudioSampleFormat::Signed16,
channels: 2, // Stereo
frames_per_second: 44100, // 44.1kHz
};
let mut stream_type = StreamType {
medium_specific: MediumSpecificStreamType::Audio(audio_stream_type),
encoding: codec.clone(),
encoding_parameters: None,
};
let (stream_source, stream_source_sink) = fidl::endpoints::create_endpoints()?;
let stream_source = stream_source.into_proxy()?;
source_proxy.add_stream(&mut stream_type, 44100, 1, stream_source_sink)?;
// TODO: vmar map this for faster access.
let buffer = zx::Vmo::create(DEFAULT_BUFFER_LEN as u64)?;
stream_source.add_payload_buffer(
0,
buffer.create_child(
zx::VmoChildOptions::COPY_ON_WRITE,
0,
DEFAULT_BUFFER_LEN as u64,
)?,
)?;
let mut player_event_stream = player.take_event_stream();
let source_client_channel = source_proxy.into_channel().unwrap().into_zx_channel();
let upcasted_source = ClientEnd::<SourceMarker>::new(source_client_channel);
player.set_source(Some(upcasted_source))?;
// We should be able to wait until either
// (1) audio is connected or
// (2) there is a Problem.
loop {
let x = player_event_stream.next().await;
if x.is_none() {
// The player closed the event stream, something is wrong.
return Err(format_err!("MediaPlayer closed"));
}
let evt = x.unwrap();
if evt.is_err() {
return Err(evt.unwrap_err().into());
}
let PlayerEvent::OnStatusChanged { player_status } = evt.unwrap();
if let Some(problem) = player_status.problem {
return Err(format_err!(
"Problem setting up: {} - {:?}",
problem.type_,
problem.details
));
}
if player_status.audio_connected {
break;
}
}
Ok(Player {
buffer,
buffer_len: DEFAULT_BUFFER_LEN,
codec,
stream_source,
player,
events: player_event_stream,
current_offset: 0,
playing: false,
next_packet_flags: 0,
})
}
/// Interpret the first four octets of the slice in `bytes` as a little-endian u32
/// Panics if the slice is not at least four octets.
fn as_u32_le(bytes: &[u8]) -> u32 {
((bytes[3] as u32) << 24)
+ ((bytes[2] as u32) << 16)
+ ((bytes[1] as u32) << 8)
+ ((bytes[0] as u32) << 0)
}
/// Given a buffer with an SBC frame at the start, find the length of the
/// SBC frame.
fn find_sbc_frame_len(buf: &[u8]) -> Result<usize, Error> {
if buf.len() < 4 {
return Err(format_err!("Buffer too short for header"));
}
SbcHeader(Player::as_u32_le(&buf[0..4])).frame_length()
}
/// Accepts a payload which may contain multiple frames and breaks it into
/// frames and sends it to media.
pub fn push_payload(&mut self, payload: &[u8]) -> Result<(), Error> {
let mut offset = 13;
while offset < payload.len() {
if self.codec == AUDIO_ENCODING_SBC {
let len = Player::find_sbc_frame_len(&payload[offset..]).or_else(|e| {
self.next_packet_flags |= STREAM_PACKET_FLAG_DISCONTINUITY;
Err(e)
})?;
if offset + len > payload.len() {
self.next_packet_flags |= STREAM_PACKET_FLAG_DISCONTINUITY;
return Err(format_err!("Ran out of buffer for SBC frame"));
}
self.send_frame(&payload[offset..offset + len])?;
offset += len;
} else {
return Err(format_err!("Unrecognized codec!"));
}
}
Ok(())
}
/// Push an encoded media frame into the buffer and signal that it's there to media.
pub fn send_frame(&mut self, frame: &[u8]) -> Result<(), Error> {
if frame.len() > self.buffer_len {
self.stream_source.end_of_stream()?;
bail!("frame is too large for buffer");
}
if self.current_offset + frame.len() > self.buffer_len {
self.current_offset = 0;
}
self.buffer.write(frame, self.current_offset as u64)?;
let mut packet = StreamPacket {
pts: NO_TIMESTAMP,
payload_buffer_id: 0,
payload_offset: self.current_offset as u64,
payload_size: frame.len() as u64,
buffer_config: 0,
flags: self.next_packet_flags,
stream_segment_id: 0,
};
self.stream_source.send_packet_no_reply(&mut packet)?;
self.current_offset += frame.len();
self.next_packet_flags = 0;
Ok(())
}
pub fn playing(&self) -> bool {
self.playing
}
pub fn play(&mut self) -> Result<(), Error> {
self.player.play()?;
self.playing = true;
Ok(())
}
pub fn pause(&mut self) -> Result<(), Error> {
self.player.pause()?;
self.playing = false;
Ok(())
}
pub fn next_event<'a>(&'a mut self) -> stream::Next<PlayerEventStream> {
self.events.next()
}
}
impl Drop for Player {
fn drop(&mut self) {
self.pause().unwrap_or_else(|e| println!("Error in drop: {:}", e));
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_frame_length() {
// 44.1, 16 blocks, Joint Stereo, Loudness, 8 subbands, 53 bitpool (Android P)
let header1 = [0x9c, 0xBD, 0x35, 0xA2];
const HEADER1_FRAMELEN: usize = 119;
let head = SbcHeader(Player::as_u32_le(&header1));
assert!(head.has_syncword());
assert_eq!(16, head.blocks());
assert_eq!(ChannelMode::JointStereo, head.channel_mode());
assert_eq!(2, head.channels());
assert_eq!(53, head.bitpool());
assert_eq!(HEADER1_FRAMELEN, head.frame_length().unwrap());
assert_eq!(
HEADER1_FRAMELEN,
Player::find_sbc_frame_len(&[0x9c, 0xBD, 0x35, 0xA2]).unwrap()
);
// 44.1, 16 blocks, Stereo, Loudness, 8 subbands, 53 bitpool (OS X)
let header2 = [0x9c, 0xB9, 0x35, 0xA2];
const HEADER2_FRAMELEN: usize = 118;
let head = SbcHeader(Player::as_u32_le(&header2));
assert!(head.has_syncword());
assert_eq!(16, head.blocks());
assert_eq!(ChannelMode::Stereo, head.channel_mode());
assert_eq!(2, head.channels());
assert_eq!(53, head.bitpool());
assert_eq!(HEADER2_FRAMELEN, head.frame_length().unwrap());
assert_eq!(HEADER2_FRAMELEN, Player::find_sbc_frame_len(&header2).unwrap());
}
#[test]
#[should_panic(expected = "out of bounds")]
fn test_as_u32_le_len() {
let _ = Player::as_u32_le(&[0, 1, 2]);
}
#[test]
fn test_as_u32_le() {
assert_eq!(1, Player::as_u32_le(&[1, 0, 0, 0]));
assert_eq!(0xff00ff00, Player::as_u32_le(&[0, 0xff, 0, 0xff]));
assert_eq!(0xffffffff, Player::as_u32_le(&[0xff, 0xff, 0xff, 0xff]));
}
}
| 32.917614 | 95 | 0.569431 |
21bae3ff36e0f82564a3ab729be18d9383e749ac | 1,263 | //! Call Management functional descriptor
/// Call Management functional descriptor
#[allow(non_snake_case)]
#[derive(Clone, Copy)]
pub struct Desc {
// bFunctionLength: u8,
// bDescriptorType: u8,
// bDescriptorSubtype: u8,
/// Capabilities
pub bmCapabilities: Capabilities,
/// Interface of the Data Class interface
pub bDataInterface: u8,
}
/// Capabilities
#[derive(Clone, Copy)]
pub struct Capabilities {
/// Device handles call management itself
pub call_management: bool,
/// Device can send/receive call management information over a Data Class interface
pub data_class: bool,
}
impl Capabilities {
fn byte(&self) -> u8 {
let mut byte = 0;
if self.call_management {
byte |= 1 << 0;
}
if self.data_class {
byte |= 1 << 1;
}
byte
}
}
impl Desc {
/// Size of this descriptor on the wire
pub const SIZE: u8 = 5;
/// Returns the wire representation of this device endpoint
pub fn bytes(&self) -> [u8; Self::SIZE as usize] {
[
Self::SIZE,
super::CS_INTERFACE,
super::SUBTYPE_CALL,
self.bmCapabilities.byte(),
self.bDataInterface,
]
}
}
| 23.830189 | 87 | 0.596991 |
7add0f4a4362eb6d27c08f0cf8a796173f0a6935 | 109,432 | //! HIR (previously known as descriptors) provides a high-level object oriented
//! access to Rust code.
//!
//! The principal difference between HIR and syntax trees is that HIR is bound
//! to a particular crate instance. That is, it has cfg flags and features
//! applied. So, the relation between syntax and HIR is many-to-one.
//!
//! HIR is the public API of the all of the compiler logic above syntax trees.
//! It is written in "OO" style. Each type is self contained (as in, it knows it's
//! parents and full context). It should be "clean code".
//!
//! `hir_*` crates are the implementation of the compiler logic.
//! They are written in "ECS" style, with relatively little abstractions.
//! Many types are not self-contained, and explicitly use local indexes, arenas, etc.
//!
//! `hir` is what insulates the "we don't know how to actually write an incremental compiler"
//! from the ide with completions, hovers, etc. It is a (soft, internal) boundary:
//! <https://www.tedinski.com/2018/02/06/system-boundaries.html>.
#![recursion_limit = "512"]
mod semantics;
mod source_analyzer;
mod from_id;
mod attrs;
mod has_source;
pub mod diagnostics;
pub mod db;
mod display;
use std::{iter, ops::ControlFlow, sync::Arc};
use arrayvec::ArrayVec;
use base_db::{CrateDisplayName, CrateId, CrateOrigin, Edition, FileId};
use either::Either;
use hir_def::{
adt::{ReprKind, VariantData},
body::{BodyDiagnostic, SyntheticSyntax},
expr::{BindingAnnotation, LabelId, Pat, PatId},
lang_item::LangItemTarget,
nameres,
per_ns::PerNs,
resolver::{HasResolver, Resolver},
AttrDefId, ConstId, ConstParamId, EnumId, FunctionId, GenericDefId, HasModule, LifetimeParamId,
LocalEnumVariantId, LocalFieldId, StaticId, StructId, TypeAliasId, TypeParamId, UnionId,
};
use hir_expand::{name::name, MacroCallKind, MacroDefKind};
use hir_ty::{
autoderef,
consteval::ConstExt,
could_unify,
diagnostics::BodyValidationDiagnostic,
method_resolution::{self, TyFingerprint},
primitive::UintTy,
subst_prefix,
traits::FnTrait,
AliasEq, AliasTy, BoundVar, CallableDefId, CallableSig, Canonical, CanonicalVarKinds, Cast,
DebruijnIndex, InEnvironment, Interner, QuantifiedWhereClause, Scalar, Solution, Substitution,
TraitEnvironment, TraitRefExt, Ty, TyBuilder, TyDefId, TyExt, TyKind, TyVariableKind,
WhereClause,
};
use itertools::Itertools;
use nameres::diagnostics::DefDiagnosticKind;
use once_cell::unsync::Lazy;
use rustc_hash::FxHashSet;
use stdx::{format_to, impl_from};
use syntax::{
ast::{self, HasAttrs as _, HasName},
AstNode, AstPtr, SmolStr, SyntaxKind, SyntaxNodePtr,
};
use tt::{Ident, Leaf, Literal, TokenTree};
use crate::db::{DefDatabase, HirDatabase};
pub use crate::{
attrs::{HasAttrs, Namespace},
diagnostics::{
AddReferenceHere, AnyDiagnostic, BreakOutsideOfLoop, InactiveCode, IncorrectCase,
InvalidDeriveTarget, MacroError, MalformedDerive, MismatchedArgCount, MissingFields,
MissingMatchArms, MissingOkOrSomeInTailExpr, MissingUnsafe, NoSuchField,
RemoveThisSemicolon, ReplaceFilterMapNextWithFindMap, UnimplementedBuiltinMacro,
UnresolvedExternCrate, UnresolvedImport, UnresolvedMacroCall, UnresolvedModule,
UnresolvedProcMacro,
},
has_source::HasSource,
semantics::{PathResolution, Semantics, SemanticsScope, TypeInfo},
};
// Be careful with these re-exports.
//
// `hir` is the boundary between the compiler and the IDE. It should try hard to
// isolate the compiler from the ide, to allow the two to be refactored
// independently. Re-exporting something from the compiler is the sure way to
// breach the boundary.
//
// Generally, a refactoring which *removes* a name from this list is a good
// idea!
pub use {
cfg::{CfgAtom, CfgExpr, CfgOptions},
hir_def::{
adt::StructKind,
attr::{Attr, Attrs, AttrsWithOwner, Documentation},
builtin_attr::AttributeTemplate,
find_path::PrefixKind,
import_map,
item_scope::ItemScope,
item_tree::ItemTreeNode,
nameres::{DefMap, ModuleData, ModuleOrigin, ModuleSource},
path::{ModPath, PathKind},
src::HasSource as DefHasSource, // xx: I don't like this shadowing of HasSource... :(
type_ref::{Mutability, TypeRef},
visibility::Visibility,
AdtId,
AssocItemId,
AssocItemLoc,
DefWithBodyId,
ImplId,
ItemContainerId,
ItemLoc,
Lookup,
ModuleDefId,
ModuleId,
TraitId,
},
hir_expand::{
name::{known, Name},
ExpandResult, HirFileId, InFile, MacroDefId, MacroFile, Origin,
},
hir_ty::display::HirDisplay,
};
// These are negative re-exports: pub using these names is forbidden, they
// should remain private to hir internals.
#[allow(unused)]
use {
hir_def::path::Path,
hir_expand::{hygiene::Hygiene, name::AsName},
};
/// hir::Crate describes a single crate. It's the main interface with which
/// a crate's dependencies interact. Mostly, it should be just a proxy for the
/// root module.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Crate {
pub(crate) id: CrateId,
}
#[derive(Debug)]
pub struct CrateDependency {
pub krate: Crate,
pub name: Name,
}
impl Crate {
pub fn origin(self, db: &dyn HirDatabase) -> CrateOrigin {
db.crate_graph()[self.id].origin.clone()
}
pub fn dependencies(self, db: &dyn HirDatabase) -> Vec<CrateDependency> {
db.crate_graph()[self.id]
.dependencies
.iter()
.map(|dep| {
let krate = Crate { id: dep.crate_id };
let name = dep.as_name();
CrateDependency { krate, name }
})
.collect()
}
pub fn reverse_dependencies(self, db: &dyn HirDatabase) -> Vec<Crate> {
let crate_graph = db.crate_graph();
crate_graph
.iter()
.filter(|&krate| {
crate_graph[krate].dependencies.iter().any(|it| it.crate_id == self.id)
})
.map(|id| Crate { id })
.collect()
}
pub fn transitive_reverse_dependencies(self, db: &dyn HirDatabase) -> Vec<Crate> {
db.crate_graph().transitive_rev_deps(self.id).into_iter().map(|id| Crate { id }).collect()
}
pub fn root_module(self, db: &dyn HirDatabase) -> Module {
let def_map = db.crate_def_map(self.id);
Module { id: def_map.module_id(def_map.root()) }
}
pub fn root_file(self, db: &dyn HirDatabase) -> FileId {
db.crate_graph()[self.id].root_file_id
}
pub fn edition(self, db: &dyn HirDatabase) -> Edition {
db.crate_graph()[self.id].edition
}
pub fn version(self, db: &dyn HirDatabase) -> Option<String> {
db.crate_graph()[self.id].version.clone()
}
pub fn display_name(self, db: &dyn HirDatabase) -> Option<CrateDisplayName> {
db.crate_graph()[self.id].display_name.clone()
}
pub fn query_external_importables(
self,
db: &dyn DefDatabase,
query: import_map::Query,
) -> impl Iterator<Item = Either<ModuleDef, MacroDef>> {
let _p = profile::span("query_external_importables");
import_map::search_dependencies(db, self.into(), query).into_iter().map(|item| {
match ItemInNs::from(item) {
ItemInNs::Types(mod_id) | ItemInNs::Values(mod_id) => Either::Left(mod_id),
ItemInNs::Macros(mac_id) => Either::Right(mac_id),
}
})
}
pub fn all(db: &dyn HirDatabase) -> Vec<Crate> {
db.crate_graph().iter().map(|id| Crate { id }).collect()
}
/// Try to get the root URL of the documentation of a crate.
pub fn get_html_root_url(self: &Crate, db: &dyn HirDatabase) -> Option<String> {
// Look for #![doc(html_root_url = "...")]
let attrs = db.attrs(AttrDefId::ModuleId(self.root_module(db).into()));
let doc_attr_q = attrs.by_key("doc");
if !doc_attr_q.exists() {
return None;
}
let doc_url = doc_attr_q.tt_values().map(|tt| {
let name = tt.token_trees.iter()
.skip_while(|tt| !matches!(tt, TokenTree::Leaf(Leaf::Ident(Ident { text, ..} )) if text == "html_root_url"))
.nth(2);
match name {
Some(TokenTree::Leaf(Leaf::Literal(Literal{ref text, ..}))) => Some(text),
_ => None
}
}).flatten().next();
doc_url.map(|s| s.trim_matches('"').trim_end_matches('/').to_owned() + "/")
}
pub fn cfg(&self, db: &dyn HirDatabase) -> CfgOptions {
db.crate_graph()[self.id].cfg_options.clone()
}
pub fn potential_cfg(&self, db: &dyn HirDatabase) -> CfgOptions {
db.crate_graph()[self.id].potential_cfg_options.clone()
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Module {
pub(crate) id: ModuleId,
}
/// The defs which can be visible in the module.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum ModuleDef {
Module(Module),
Function(Function),
Adt(Adt),
// Can't be directly declared, but can be imported.
Variant(Variant),
Const(Const),
Static(Static),
Trait(Trait),
TypeAlias(TypeAlias),
BuiltinType(BuiltinType),
}
impl_from!(
Module,
Function,
Adt(Struct, Enum, Union),
Variant,
Const,
Static,
Trait,
TypeAlias,
BuiltinType
for ModuleDef
);
impl From<VariantDef> for ModuleDef {
fn from(var: VariantDef) -> Self {
match var {
VariantDef::Struct(t) => Adt::from(t).into(),
VariantDef::Union(t) => Adt::from(t).into(),
VariantDef::Variant(t) => t.into(),
}
}
}
impl ModuleDef {
pub fn module(self, db: &dyn HirDatabase) -> Option<Module> {
match self {
ModuleDef::Module(it) => it.parent(db),
ModuleDef::Function(it) => Some(it.module(db)),
ModuleDef::Adt(it) => Some(it.module(db)),
ModuleDef::Variant(it) => Some(it.module(db)),
ModuleDef::Const(it) => Some(it.module(db)),
ModuleDef::Static(it) => Some(it.module(db)),
ModuleDef::Trait(it) => Some(it.module(db)),
ModuleDef::TypeAlias(it) => Some(it.module(db)),
ModuleDef::BuiltinType(_) => None,
}
}
pub fn canonical_path(&self, db: &dyn HirDatabase) -> Option<String> {
let mut segments = vec![self.name(db)?];
for m in self.module(db)?.path_to_root(db) {
segments.extend(m.name(db))
}
segments.reverse();
Some(segments.into_iter().join("::"))
}
pub fn canonical_module_path(
&self,
db: &dyn HirDatabase,
) -> Option<impl Iterator<Item = Module>> {
self.module(db).map(|it| it.path_to_root(db).into_iter().rev())
}
pub fn name(self, db: &dyn HirDatabase) -> Option<Name> {
let name = match self {
ModuleDef::Module(it) => it.name(db)?,
ModuleDef::Const(it) => it.name(db)?,
ModuleDef::Adt(it) => it.name(db),
ModuleDef::Trait(it) => it.name(db),
ModuleDef::Function(it) => it.name(db),
ModuleDef::Variant(it) => it.name(db),
ModuleDef::TypeAlias(it) => it.name(db),
ModuleDef::Static(it) => it.name(db),
ModuleDef::BuiltinType(it) => it.name(),
};
Some(name)
}
pub fn diagnostics(self, db: &dyn HirDatabase) -> Vec<AnyDiagnostic> {
let id = match self {
ModuleDef::Adt(it) => match it {
Adt::Struct(it) => it.id.into(),
Adt::Enum(it) => it.id.into(),
Adt::Union(it) => it.id.into(),
},
ModuleDef::Trait(it) => it.id.into(),
ModuleDef::Function(it) => it.id.into(),
ModuleDef::TypeAlias(it) => it.id.into(),
ModuleDef::Module(it) => it.id.into(),
ModuleDef::Const(it) => it.id.into(),
ModuleDef::Static(it) => it.id.into(),
_ => return Vec::new(),
};
let module = match self.module(db) {
Some(it) => it,
None => return Vec::new(),
};
let mut acc = Vec::new();
match self.as_def_with_body() {
Some(def) => {
def.diagnostics(db, &mut acc);
}
None => {
for diag in hir_ty::diagnostics::incorrect_case(db, module.id.krate(), id) {
acc.push(diag.into())
}
}
}
acc
}
pub fn as_def_with_body(self) -> Option<DefWithBody> {
match self {
ModuleDef::Function(it) => Some(it.into()),
ModuleDef::Const(it) => Some(it.into()),
ModuleDef::Static(it) => Some(it.into()),
ModuleDef::Module(_)
| ModuleDef::Adt(_)
| ModuleDef::Variant(_)
| ModuleDef::Trait(_)
| ModuleDef::TypeAlias(_)
| ModuleDef::BuiltinType(_) => None,
}
}
pub fn attrs(&self, db: &dyn HirDatabase) -> Option<AttrsWithOwner> {
Some(match self {
ModuleDef::Module(it) => it.attrs(db),
ModuleDef::Function(it) => it.attrs(db),
ModuleDef::Adt(it) => it.attrs(db),
ModuleDef::Variant(it) => it.attrs(db),
ModuleDef::Const(it) => it.attrs(db),
ModuleDef::Static(it) => it.attrs(db),
ModuleDef::Trait(it) => it.attrs(db),
ModuleDef::TypeAlias(it) => it.attrs(db),
ModuleDef::BuiltinType(_) => return None,
})
}
}
impl HasVisibility for ModuleDef {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
match *self {
ModuleDef::Module(it) => it.visibility(db),
ModuleDef::Function(it) => it.visibility(db),
ModuleDef::Adt(it) => it.visibility(db),
ModuleDef::Const(it) => it.visibility(db),
ModuleDef::Static(it) => it.visibility(db),
ModuleDef::Trait(it) => it.visibility(db),
ModuleDef::TypeAlias(it) => it.visibility(db),
ModuleDef::Variant(it) => it.visibility(db),
ModuleDef::BuiltinType(_) => Visibility::Public,
}
}
}
impl Module {
/// Name of this module.
pub fn name(self, db: &dyn HirDatabase) -> Option<Name> {
let def_map = self.id.def_map(db.upcast());
let parent = def_map[self.id.local_id].parent?;
def_map[parent].children.iter().find_map(|(name, module_id)| {
if *module_id == self.id.local_id {
Some(name.clone())
} else {
None
}
})
}
/// Returns the crate this module is part of.
pub fn krate(self) -> Crate {
Crate { id: self.id.krate() }
}
/// Topmost parent of this module. Every module has a `crate_root`, but some
/// might be missing `krate`. This can happen if a module's file is not included
/// in the module tree of any target in `Cargo.toml`.
pub fn crate_root(self, db: &dyn HirDatabase) -> Module {
let def_map = db.crate_def_map(self.id.krate());
Module { id: def_map.module_id(def_map.root()) }
}
/// Iterates over all child modules.
pub fn children(self, db: &dyn HirDatabase) -> impl Iterator<Item = Module> {
let def_map = self.id.def_map(db.upcast());
let children = def_map[self.id.local_id]
.children
.iter()
.map(|(_, module_id)| Module { id: def_map.module_id(*module_id) })
.collect::<Vec<_>>();
children.into_iter()
}
/// Finds a parent module.
pub fn parent(self, db: &dyn HirDatabase) -> Option<Module> {
// FIXME: handle block expressions as modules (their parent is in a different DefMap)
let def_map = self.id.def_map(db.upcast());
let parent_id = def_map[self.id.local_id].parent?;
Some(Module { id: def_map.module_id(parent_id) })
}
pub fn path_to_root(self, db: &dyn HirDatabase) -> Vec<Module> {
let mut res = vec![self];
let mut curr = self;
while let Some(next) = curr.parent(db) {
res.push(next);
curr = next
}
res
}
/// Returns a `ModuleScope`: a set of items, visible in this module.
pub fn scope(
self,
db: &dyn HirDatabase,
visible_from: Option<Module>,
) -> Vec<(Name, ScopeDef)> {
self.id.def_map(db.upcast())[self.id.local_id]
.scope
.entries()
.filter_map(|(name, def)| {
if let Some(m) = visible_from {
let filtered =
def.filter_visibility(|vis| vis.is_visible_from(db.upcast(), m.id));
if filtered.is_none() && !def.is_none() {
None
} else {
Some((name, filtered))
}
} else {
Some((name, def))
}
})
.flat_map(|(name, def)| {
ScopeDef::all_items(def).into_iter().map(move |item| (name.clone(), item))
})
.collect()
}
pub fn diagnostics(self, db: &dyn HirDatabase, acc: &mut Vec<AnyDiagnostic>) {
let _p = profile::span("Module::diagnostics").detail(|| {
format!("{:?}", self.name(db).map_or("<unknown>".into(), |name| name.to_string()))
});
let def_map = self.id.def_map(db.upcast());
for diag in def_map.diagnostics() {
if diag.in_module != self.id.local_id {
// FIXME: This is accidentally quadratic.
continue;
}
match &diag.kind {
DefDiagnosticKind::UnresolvedModule { ast: declaration, candidate } => {
let decl = declaration.to_node(db.upcast());
acc.push(
UnresolvedModule {
decl: InFile::new(declaration.file_id, AstPtr::new(&decl)),
candidate: candidate.clone(),
}
.into(),
)
}
DefDiagnosticKind::UnresolvedExternCrate { ast } => {
let item = ast.to_node(db.upcast());
acc.push(
UnresolvedExternCrate {
decl: InFile::new(ast.file_id, AstPtr::new(&item)),
}
.into(),
);
}
DefDiagnosticKind::UnresolvedImport { id, index } => {
let file_id = id.file_id();
let item_tree = id.item_tree(db.upcast());
let import = &item_tree[id.value];
let use_tree = import.use_tree_to_ast(db.upcast(), file_id, *index);
acc.push(
UnresolvedImport { decl: InFile::new(file_id, AstPtr::new(&use_tree)) }
.into(),
);
}
DefDiagnosticKind::UnconfiguredCode { ast, cfg, opts } => {
let item = ast.to_node(db.upcast());
acc.push(
InactiveCode {
node: ast.with_value(AstPtr::new(&item).into()),
cfg: cfg.clone(),
opts: opts.clone(),
}
.into(),
);
}
DefDiagnosticKind::UnresolvedProcMacro { ast } => {
let mut precise_location = None;
let (node, name) = match ast {
MacroCallKind::FnLike { ast_id, .. } => {
let node = ast_id.to_node(db.upcast());
(ast_id.with_value(SyntaxNodePtr::from(AstPtr::new(&node))), None)
}
MacroCallKind::Derive { ast_id, derive_name, .. } => {
let node = ast_id.to_node(db.upcast());
// Compute the precise location of the macro name's token in the derive
// list.
// FIXME: This does not handle paths to the macro, but neither does the
// rest of r-a.
let derive_attrs =
node.attrs().filter_map(|attr| match attr.as_simple_call() {
Some((name, args)) if name == "derive" => Some(args),
_ => None,
});
'outer: for attr in derive_attrs {
let tokens =
attr.syntax().children_with_tokens().filter_map(|elem| {
match elem {
syntax::NodeOrToken::Node(_) => None,
syntax::NodeOrToken::Token(tok) => Some(tok),
}
});
for token in tokens {
if token.kind() == SyntaxKind::IDENT
&& token.text() == &**derive_name
{
precise_location = Some(token.text_range());
break 'outer;
}
}
}
(
ast_id.with_value(SyntaxNodePtr::from(AstPtr::new(&node))),
Some(derive_name.clone()),
)
}
MacroCallKind::Attr { ast_id, invoc_attr_index, attr_name, .. } => {
let node = ast_id.to_node(db.upcast());
let attr =
node.attrs().nth((*invoc_attr_index) as usize).unwrap_or_else(
|| panic!("cannot find attribute #{}", invoc_attr_index),
);
(
ast_id.with_value(SyntaxNodePtr::from(AstPtr::new(&attr))),
Some(attr_name.clone()),
)
}
};
acc.push(
UnresolvedProcMacro {
node,
precise_location,
macro_name: name.map(Into::into),
}
.into(),
);
}
DefDiagnosticKind::UnresolvedMacroCall { ast, path } => {
let node = ast.to_node(db.upcast());
acc.push(
UnresolvedMacroCall {
macro_call: InFile::new(ast.file_id, AstPtr::new(&node)),
path: path.clone(),
}
.into(),
);
}
DefDiagnosticKind::MacroError { ast, message } => {
let node = match ast {
MacroCallKind::FnLike { ast_id, .. } => {
let node = ast_id.to_node(db.upcast());
ast_id.with_value(SyntaxNodePtr::from(AstPtr::new(&node)))
}
MacroCallKind::Derive { ast_id, .. }
| MacroCallKind::Attr { ast_id, .. } => {
// FIXME: point to the attribute instead, this creates very large diagnostics
let node = ast_id.to_node(db.upcast());
ast_id.with_value(SyntaxNodePtr::from(AstPtr::new(&node)))
}
};
acc.push(MacroError { node, message: message.clone() }.into());
}
DefDiagnosticKind::UnimplementedBuiltinMacro { ast } => {
let node = ast.to_node(db.upcast());
// Must have a name, otherwise we wouldn't emit it.
let name = node.name().expect("unimplemented builtin macro with no name");
acc.push(
UnimplementedBuiltinMacro {
node: ast.with_value(SyntaxNodePtr::from(AstPtr::new(&name))),
}
.into(),
);
}
DefDiagnosticKind::InvalidDeriveTarget { ast, id } => {
let node = ast.to_node(db.upcast());
let derive = node.attrs().nth(*id as usize);
match derive {
Some(derive) => {
acc.push(
InvalidDeriveTarget {
node: ast.with_value(SyntaxNodePtr::from(AstPtr::new(&derive))),
}
.into(),
);
}
None => stdx::never!("derive diagnostic on item without derive attribute"),
}
}
DefDiagnosticKind::MalformedDerive { ast, id } => {
let node = ast.to_node(db.upcast());
let derive = node.attrs().nth(*id as usize);
match derive {
Some(derive) => {
acc.push(
MalformedDerive {
node: ast.with_value(SyntaxNodePtr::from(AstPtr::new(&derive))),
}
.into(),
);
}
None => stdx::never!("derive diagnostic on item without derive attribute"),
}
}
}
}
for decl in self.declarations(db) {
match decl {
ModuleDef::Module(m) => {
// Only add diagnostics from inline modules
if def_map[m.id.local_id].origin.is_inline() {
m.diagnostics(db, acc)
}
}
_ => acc.extend(decl.diagnostics(db)),
}
}
for impl_def in self.impl_defs(db) {
for item in impl_def.items(db) {
let def: DefWithBody = match item {
AssocItem::Function(it) => it.into(),
AssocItem::Const(it) => it.into(),
AssocItem::TypeAlias(_) => continue,
};
def.diagnostics(db, acc);
}
}
}
pub fn declarations(self, db: &dyn HirDatabase) -> Vec<ModuleDef> {
let def_map = self.id.def_map(db.upcast());
let scope = &def_map[self.id.local_id].scope;
scope
.declarations()
.map(ModuleDef::from)
.chain(scope.unnamed_consts().map(|id| ModuleDef::Const(Const::from(id))))
.collect()
}
pub fn impl_defs(self, db: &dyn HirDatabase) -> Vec<Impl> {
let def_map = self.id.def_map(db.upcast());
def_map[self.id.local_id].scope.impls().map(Impl::from).collect()
}
/// Finds a path that can be used to refer to the given item from within
/// this module, if possible.
pub fn find_use_path(self, db: &dyn DefDatabase, item: impl Into<ItemInNs>) -> Option<ModPath> {
hir_def::find_path::find_path(db, item.into().into(), self.into())
}
/// Finds a path that can be used to refer to the given item from within
/// this module, if possible. This is used for returning import paths for use-statements.
pub fn find_use_path_prefixed(
self,
db: &dyn DefDatabase,
item: impl Into<ItemInNs>,
prefix_kind: PrefixKind,
) -> Option<ModPath> {
hir_def::find_path::find_path_prefixed(db, item.into().into(), self.into(), prefix_kind)
}
}
impl HasVisibility for Module {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
let def_map = self.id.def_map(db.upcast());
let module_data = &def_map[self.id.local_id];
module_data.visibility
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Field {
pub(crate) parent: VariantDef,
pub(crate) id: LocalFieldId,
}
#[derive(Debug, PartialEq, Eq)]
pub enum FieldSource {
Named(ast::RecordField),
Pos(ast::TupleField),
}
impl Field {
pub fn name(&self, db: &dyn HirDatabase) -> Name {
self.parent.variant_data(db).fields()[self.id].name.clone()
}
/// Returns the type as in the signature of the struct (i.e., with
/// placeholder types for type parameters). Only use this in the context of
/// the field definition.
pub fn ty(&self, db: &dyn HirDatabase) -> Type {
let var_id = self.parent.into();
let generic_def_id: GenericDefId = match self.parent {
VariantDef::Struct(it) => it.id.into(),
VariantDef::Union(it) => it.id.into(),
VariantDef::Variant(it) => it.parent.id.into(),
};
let substs = TyBuilder::type_params_subst(db, generic_def_id);
let ty = db.field_types(var_id)[self.id].clone().substitute(&Interner, &substs);
Type::new(db, self.parent.module(db).id.krate(), var_id, ty)
}
pub fn parent_def(&self, _db: &dyn HirDatabase) -> VariantDef {
self.parent
}
}
impl HasVisibility for Field {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
let variant_data = self.parent.variant_data(db);
let visibility = &variant_data.fields()[self.id].visibility;
let parent_id: hir_def::VariantId = self.parent.into();
visibility.resolve(db.upcast(), &parent_id.resolver(db.upcast()))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Struct {
pub(crate) id: StructId,
}
impl Struct {
pub fn module(self, db: &dyn HirDatabase) -> Module {
Module { id: self.id.lookup(db.upcast()).container }
}
pub fn name(self, db: &dyn HirDatabase) -> Name {
db.struct_data(self.id).name.clone()
}
pub fn fields(self, db: &dyn HirDatabase) -> Vec<Field> {
db.struct_data(self.id)
.variant_data
.fields()
.iter()
.map(|(id, _)| Field { parent: self.into(), id })
.collect()
}
pub fn ty(self, db: &dyn HirDatabase) -> Type {
Type::from_def(db, self.id.lookup(db.upcast()).container.krate(), self.id)
}
pub fn repr(self, db: &dyn HirDatabase) -> Option<ReprKind> {
db.struct_data(self.id).repr.clone()
}
pub fn kind(self, db: &dyn HirDatabase) -> StructKind {
self.variant_data(db).kind()
}
fn variant_data(self, db: &dyn HirDatabase) -> Arc<VariantData> {
db.struct_data(self.id).variant_data.clone()
}
}
impl HasVisibility for Struct {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
db.struct_data(self.id).visibility.resolve(db.upcast(), &self.id.resolver(db.upcast()))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Union {
pub(crate) id: UnionId,
}
impl Union {
pub fn name(self, db: &dyn HirDatabase) -> Name {
db.union_data(self.id).name.clone()
}
pub fn module(self, db: &dyn HirDatabase) -> Module {
Module { id: self.id.lookup(db.upcast()).container }
}
pub fn ty(self, db: &dyn HirDatabase) -> Type {
Type::from_def(db, self.id.lookup(db.upcast()).container.krate(), self.id)
}
pub fn fields(self, db: &dyn HirDatabase) -> Vec<Field> {
db.union_data(self.id)
.variant_data
.fields()
.iter()
.map(|(id, _)| Field { parent: self.into(), id })
.collect()
}
fn variant_data(self, db: &dyn HirDatabase) -> Arc<VariantData> {
db.union_data(self.id).variant_data.clone()
}
}
impl HasVisibility for Union {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
db.union_data(self.id).visibility.resolve(db.upcast(), &self.id.resolver(db.upcast()))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Enum {
pub(crate) id: EnumId,
}
impl Enum {
pub fn module(self, db: &dyn HirDatabase) -> Module {
Module { id: self.id.lookup(db.upcast()).container }
}
pub fn name(self, db: &dyn HirDatabase) -> Name {
db.enum_data(self.id).name.clone()
}
pub fn variants(self, db: &dyn HirDatabase) -> Vec<Variant> {
db.enum_data(self.id).variants.iter().map(|(id, _)| Variant { parent: self, id }).collect()
}
pub fn ty(self, db: &dyn HirDatabase) -> Type {
Type::from_def(db, self.id.lookup(db.upcast()).container.krate(), self.id)
}
}
impl HasVisibility for Enum {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
db.enum_data(self.id).visibility.resolve(db.upcast(), &self.id.resolver(db.upcast()))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Variant {
pub(crate) parent: Enum,
pub(crate) id: LocalEnumVariantId,
}
impl Variant {
pub fn module(self, db: &dyn HirDatabase) -> Module {
self.parent.module(db)
}
pub fn parent_enum(self, _db: &dyn HirDatabase) -> Enum {
self.parent
}
pub fn name(self, db: &dyn HirDatabase) -> Name {
db.enum_data(self.parent.id).variants[self.id].name.clone()
}
pub fn fields(self, db: &dyn HirDatabase) -> Vec<Field> {
self.variant_data(db)
.fields()
.iter()
.map(|(id, _)| Field { parent: self.into(), id })
.collect()
}
pub fn kind(self, db: &dyn HirDatabase) -> StructKind {
self.variant_data(db).kind()
}
pub(crate) fn variant_data(self, db: &dyn HirDatabase) -> Arc<VariantData> {
db.enum_data(self.parent.id).variants[self.id].variant_data.clone()
}
}
/// Variants inherit visibility from the parent enum.
impl HasVisibility for Variant {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
self.parent_enum(db).visibility(db)
}
}
/// A Data Type
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum Adt {
Struct(Struct),
Union(Union),
Enum(Enum),
}
impl_from!(Struct, Union, Enum for Adt);
impl Adt {
pub fn has_non_default_type_params(self, db: &dyn HirDatabase) -> bool {
let subst = db.generic_defaults(self.into());
subst.iter().any(|ty| ty.skip_binders().is_unknown())
}
/// Turns this ADT into a type. Any type parameters of the ADT will be
/// turned into unknown types, which is good for e.g. finding the most
/// general set of completions, but will not look very nice when printed.
pub fn ty(self, db: &dyn HirDatabase) -> Type {
let id = AdtId::from(self);
Type::from_def(db, id.module(db.upcast()).krate(), id)
}
pub fn module(self, db: &dyn HirDatabase) -> Module {
match self {
Adt::Struct(s) => s.module(db),
Adt::Union(s) => s.module(db),
Adt::Enum(e) => e.module(db),
}
}
pub fn name(self, db: &dyn HirDatabase) -> Name {
match self {
Adt::Struct(s) => s.name(db),
Adt::Union(u) => u.name(db),
Adt::Enum(e) => e.name(db),
}
}
}
impl HasVisibility for Adt {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
match self {
Adt::Struct(it) => it.visibility(db),
Adt::Union(it) => it.visibility(db),
Adt::Enum(it) => it.visibility(db),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum VariantDef {
Struct(Struct),
Union(Union),
Variant(Variant),
}
impl_from!(Struct, Union, Variant for VariantDef);
impl VariantDef {
pub fn fields(self, db: &dyn HirDatabase) -> Vec<Field> {
match self {
VariantDef::Struct(it) => it.fields(db),
VariantDef::Union(it) => it.fields(db),
VariantDef::Variant(it) => it.fields(db),
}
}
pub fn module(self, db: &dyn HirDatabase) -> Module {
match self {
VariantDef::Struct(it) => it.module(db),
VariantDef::Union(it) => it.module(db),
VariantDef::Variant(it) => it.module(db),
}
}
pub fn name(&self, db: &dyn HirDatabase) -> Name {
match self {
VariantDef::Struct(s) => s.name(db),
VariantDef::Union(u) => u.name(db),
VariantDef::Variant(e) => e.name(db),
}
}
pub(crate) fn variant_data(self, db: &dyn HirDatabase) -> Arc<VariantData> {
match self {
VariantDef::Struct(it) => it.variant_data(db),
VariantDef::Union(it) => it.variant_data(db),
VariantDef::Variant(it) => it.variant_data(db),
}
}
}
/// The defs which have a body.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum DefWithBody {
Function(Function),
Static(Static),
Const(Const),
}
impl_from!(Function, Const, Static for DefWithBody);
impl DefWithBody {
pub fn module(self, db: &dyn HirDatabase) -> Module {
match self {
DefWithBody::Const(c) => c.module(db),
DefWithBody::Function(f) => f.module(db),
DefWithBody::Static(s) => s.module(db),
}
}
pub fn name(self, db: &dyn HirDatabase) -> Option<Name> {
match self {
DefWithBody::Function(f) => Some(f.name(db)),
DefWithBody::Static(s) => Some(s.name(db)),
DefWithBody::Const(c) => c.name(db),
}
}
/// Returns the type this def's body has to evaluate to.
pub fn body_type(self, db: &dyn HirDatabase) -> Type {
match self {
DefWithBody::Function(it) => it.ret_type(db),
DefWithBody::Static(it) => it.ty(db),
DefWithBody::Const(it) => it.ty(db),
}
}
pub fn diagnostics(self, db: &dyn HirDatabase, acc: &mut Vec<AnyDiagnostic>) {
let krate = self.module(db).id.krate();
let source_map = db.body_with_source_map(self.into()).1;
for diag in source_map.diagnostics() {
match diag {
BodyDiagnostic::InactiveCode { node, cfg, opts } => acc.push(
InactiveCode { node: node.clone(), cfg: cfg.clone(), opts: opts.clone() }
.into(),
),
BodyDiagnostic::MacroError { node, message } => acc.push(
MacroError {
node: node.clone().map(|it| it.into()),
message: message.to_string(),
}
.into(),
),
BodyDiagnostic::UnresolvedProcMacro { node } => acc.push(
UnresolvedProcMacro {
node: node.clone().map(|it| it.into()),
precise_location: None,
macro_name: None,
}
.into(),
),
BodyDiagnostic::UnresolvedMacroCall { node, path } => acc.push(
UnresolvedMacroCall { macro_call: node.clone(), path: path.clone() }.into(),
),
}
}
let infer = db.infer(self.into());
let source_map = Lazy::new(|| db.body_with_source_map(self.into()).1);
for d in &infer.diagnostics {
match d {
hir_ty::InferenceDiagnostic::NoSuchField { expr } => {
let field = source_map.field_syntax(*expr);
acc.push(NoSuchField { field }.into())
}
hir_ty::InferenceDiagnostic::BreakOutsideOfLoop { expr } => {
let expr = source_map
.expr_syntax(*expr)
.expect("break outside of loop in synthetic syntax");
acc.push(BreakOutsideOfLoop { expr }.into())
}
}
}
for expr in hir_ty::diagnostics::missing_unsafe(db, self.into()) {
match source_map.expr_syntax(expr) {
Ok(expr) => acc.push(MissingUnsafe { expr }.into()),
Err(SyntheticSyntax) => {
// FIXME: Here and eslwhere in this file, the `expr` was
// desugared, report or assert that this doesn't happen.
}
}
}
for diagnostic in BodyValidationDiagnostic::collect(db, self.into()) {
match diagnostic {
BodyValidationDiagnostic::RecordMissingFields {
record,
variant,
missed_fields,
} => {
let variant_data = variant.variant_data(db.upcast());
let missed_fields = missed_fields
.into_iter()
.map(|idx| variant_data.fields()[idx].name.clone())
.collect();
match record {
Either::Left(record_expr) => match source_map.expr_syntax(record_expr) {
Ok(source_ptr) => {
let root = source_ptr.file_syntax(db.upcast());
if let ast::Expr::RecordExpr(record_expr) =
&source_ptr.value.to_node(&root)
{
if record_expr.record_expr_field_list().is_some() {
acc.push(
MissingFields {
file: source_ptr.file_id,
field_list_parent: Either::Left(AstPtr::new(
record_expr,
)),
field_list_parent_path: record_expr
.path()
.map(|path| AstPtr::new(&path)),
missed_fields,
}
.into(),
)
}
}
}
Err(SyntheticSyntax) => (),
},
Either::Right(record_pat) => match source_map.pat_syntax(record_pat) {
Ok(source_ptr) => {
if let Some(expr) = source_ptr.value.as_ref().left() {
let root = source_ptr.file_syntax(db.upcast());
if let ast::Pat::RecordPat(record_pat) = expr.to_node(&root) {
if record_pat.record_pat_field_list().is_some() {
acc.push(
MissingFields {
file: source_ptr.file_id,
field_list_parent: Either::Right(AstPtr::new(
&record_pat,
)),
field_list_parent_path: record_pat
.path()
.map(|path| AstPtr::new(&path)),
missed_fields,
}
.into(),
)
}
}
}
}
Err(SyntheticSyntax) => (),
},
}
}
BodyValidationDiagnostic::ReplaceFilterMapNextWithFindMap { method_call_expr } => {
if let Ok(next_source_ptr) = source_map.expr_syntax(method_call_expr) {
acc.push(
ReplaceFilterMapNextWithFindMap {
file: next_source_ptr.file_id,
next_expr: next_source_ptr.value,
}
.into(),
);
}
}
BodyValidationDiagnostic::MismatchedArgCount { call_expr, expected, found } => {
match source_map.expr_syntax(call_expr) {
Ok(source_ptr) => acc.push(
MismatchedArgCount { call_expr: source_ptr, expected, found }.into(),
),
Err(SyntheticSyntax) => (),
}
}
BodyValidationDiagnostic::RemoveThisSemicolon { expr } => {
match source_map.expr_syntax(expr) {
Ok(expr) => acc.push(RemoveThisSemicolon { expr }.into()),
Err(SyntheticSyntax) => (),
}
}
BodyValidationDiagnostic::MissingOkOrSomeInTailExpr { expr, required } => {
match source_map.expr_syntax(expr) {
Ok(expr) => acc.push(
MissingOkOrSomeInTailExpr {
expr,
required,
expected: self.body_type(db),
}
.into(),
),
Err(SyntheticSyntax) => (),
}
}
BodyValidationDiagnostic::MissingMatchArms { match_expr } => {
match source_map.expr_syntax(match_expr) {
Ok(source_ptr) => {
let root = source_ptr.file_syntax(db.upcast());
if let ast::Expr::MatchExpr(match_expr) =
&source_ptr.value.to_node(&root)
{
if let (Some(match_expr), Some(arms)) =
(match_expr.expr(), match_expr.match_arm_list())
{
acc.push(
MissingMatchArms {
file: source_ptr.file_id,
match_expr: AstPtr::new(&match_expr),
arms: AstPtr::new(&arms),
}
.into(),
)
}
}
}
Err(SyntheticSyntax) => (),
}
}
BodyValidationDiagnostic::AddReferenceHere { arg_expr, mutability } => {
match source_map.expr_syntax(arg_expr) {
Ok(expr) => acc.push(AddReferenceHere { expr, mutability }.into()),
Err(SyntheticSyntax) => (),
}
}
}
}
let def: ModuleDef = match self {
DefWithBody::Function(it) => it.into(),
DefWithBody::Static(it) => it.into(),
DefWithBody::Const(it) => it.into(),
};
for diag in hir_ty::diagnostics::incorrect_case(db, krate, def.into()) {
acc.push(diag.into())
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Function {
pub(crate) id: FunctionId,
}
impl Function {
pub fn module(self, db: &dyn HirDatabase) -> Module {
self.id.lookup(db.upcast()).module(db.upcast()).into()
}
pub fn name(self, db: &dyn HirDatabase) -> Name {
db.function_data(self.id).name.clone()
}
/// Get this function's return type
pub fn ret_type(self, db: &dyn HirDatabase) -> Type {
let resolver = self.id.resolver(db.upcast());
let krate = self.id.lookup(db.upcast()).container.module(db.upcast()).krate();
let ret_type = &db.function_data(self.id).ret_type;
let ctx = hir_ty::TyLoweringContext::new(db, &resolver);
let ty = ctx.lower_ty(ret_type);
Type::new_with_resolver_inner(db, krate, &resolver, ty)
}
pub fn self_param(self, db: &dyn HirDatabase) -> Option<SelfParam> {
if !db.function_data(self.id).has_self_param() {
return None;
}
Some(SelfParam { func: self.id })
}
pub fn assoc_fn_params(self, db: &dyn HirDatabase) -> Vec<Param> {
let resolver = self.id.resolver(db.upcast());
let krate = self.id.lookup(db.upcast()).container.module(db.upcast()).krate();
let ctx = hir_ty::TyLoweringContext::new(db, &resolver);
let environment = db.trait_environment(self.id.into());
db.function_data(self.id)
.params
.iter()
.enumerate()
.map(|(idx, type_ref)| {
let ty = Type { krate, env: environment.clone(), ty: ctx.lower_ty(type_ref) };
Param { func: self, ty, idx }
})
.collect()
}
pub fn method_params(self, db: &dyn HirDatabase) -> Option<Vec<Param>> {
if self.self_param(db).is_none() {
return None;
}
let mut res = self.assoc_fn_params(db);
res.remove(0);
Some(res)
}
pub fn is_unsafe(self, db: &dyn HirDatabase) -> bool {
db.function_data(self.id).is_unsafe()
}
pub fn is_const(self, db: &dyn HirDatabase) -> bool {
db.function_data(self.id).is_const()
}
pub fn is_async(self, db: &dyn HirDatabase) -> bool {
db.function_data(self.id).is_async()
}
/// Whether this function declaration has a definition.
///
/// This is false in the case of required (not provided) trait methods.
pub fn has_body(self, db: &dyn HirDatabase) -> bool {
db.function_data(self.id).has_body()
}
/// A textual representation of the HIR of this function for debugging purposes.
pub fn debug_hir(self, db: &dyn HirDatabase) -> String {
let body = db.body(self.id.into());
let mut result = String::new();
format_to!(result, "HIR expressions in the body of `{}`:\n", self.name(db));
for (id, expr) in body.exprs.iter() {
format_to!(result, "{:?}: {:?}\n", id, expr);
}
result
}
}
// Note: logically, this belongs to `hir_ty`, but we are not using it there yet.
pub enum Access {
Shared,
Exclusive,
Owned,
}
impl From<hir_ty::Mutability> for Access {
fn from(mutability: hir_ty::Mutability) -> Access {
match mutability {
hir_ty::Mutability::Not => Access::Shared,
hir_ty::Mutability::Mut => Access::Exclusive,
}
}
}
#[derive(Clone, Debug)]
pub struct Param {
func: Function,
/// The index in parameter list, including self parameter.
idx: usize,
ty: Type,
}
impl Param {
pub fn ty(&self) -> &Type {
&self.ty
}
pub fn as_local(&self, db: &dyn HirDatabase) -> Local {
let parent = DefWithBodyId::FunctionId(self.func.into());
let body = db.body(parent);
Local { parent, pat_id: body.params[self.idx] }
}
pub fn pattern_source(&self, db: &dyn HirDatabase) -> Option<ast::Pat> {
self.source(db).and_then(|p| p.value.pat())
}
pub fn source(&self, db: &dyn HirDatabase) -> Option<InFile<ast::Param>> {
let InFile { file_id, value } = self.func.source(db)?;
let params = value.param_list()?;
if params.self_param().is_some() {
params.params().nth(self.idx.checked_sub(1)?)
} else {
params.params().nth(self.idx)
}
.map(|value| InFile { file_id, value })
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct SelfParam {
func: FunctionId,
}
impl SelfParam {
pub fn access(self, db: &dyn HirDatabase) -> Access {
let func_data = db.function_data(self.func);
func_data
.params
.first()
.map(|param| match &**param {
TypeRef::Reference(.., mutability) => match mutability {
hir_def::type_ref::Mutability::Shared => Access::Shared,
hir_def::type_ref::Mutability::Mut => Access::Exclusive,
},
_ => Access::Owned,
})
.unwrap_or(Access::Owned)
}
pub fn display(self, db: &dyn HirDatabase) -> &'static str {
match self.access(db) {
Access::Shared => "&self",
Access::Exclusive => "&mut self",
Access::Owned => "self",
}
}
pub fn source(&self, db: &dyn HirDatabase) -> Option<InFile<ast::SelfParam>> {
let InFile { file_id, value } = Function::from(self.func).source(db)?;
value
.param_list()
.and_then(|params| params.self_param())
.map(|value| InFile { file_id, value })
}
}
impl HasVisibility for Function {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
let function_data = db.function_data(self.id);
let visibility = &function_data.visibility;
visibility.resolve(db.upcast(), &self.id.resolver(db.upcast()))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Const {
pub(crate) id: ConstId,
}
impl Const {
pub fn module(self, db: &dyn HirDatabase) -> Module {
Module { id: self.id.lookup(db.upcast()).module(db.upcast()) }
}
pub fn name(self, db: &dyn HirDatabase) -> Option<Name> {
db.const_data(self.id).name.clone()
}
pub fn value(self, db: &dyn HirDatabase) -> Option<ast::Expr> {
self.source(db)?.value.body()
}
pub fn ty(self, db: &dyn HirDatabase) -> Type {
let data = db.const_data(self.id);
let resolver = self.id.resolver(db.upcast());
let krate = self.id.lookup(db.upcast()).container.krate(db);
let ctx = hir_ty::TyLoweringContext::new(db, &resolver);
let ty = ctx.lower_ty(&data.type_ref);
Type::new_with_resolver_inner(db, krate.id, &resolver, ty)
}
}
impl HasVisibility for Const {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
let function_data = db.const_data(self.id);
let visibility = &function_data.visibility;
visibility.resolve(db.upcast(), &self.id.resolver(db.upcast()))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Static {
pub(crate) id: StaticId,
}
impl Static {
pub fn module(self, db: &dyn HirDatabase) -> Module {
Module { id: self.id.lookup(db.upcast()).module(db.upcast()) }
}
pub fn name(self, db: &dyn HirDatabase) -> Name {
db.static_data(self.id).name.clone()
}
pub fn is_mut(self, db: &dyn HirDatabase) -> bool {
db.static_data(self.id).mutable
}
pub fn value(self, db: &dyn HirDatabase) -> Option<ast::Expr> {
self.source(db)?.value.body()
}
pub fn ty(self, db: &dyn HirDatabase) -> Type {
let data = db.static_data(self.id);
let resolver = self.id.resolver(db.upcast());
let krate = self.id.lookup(db.upcast()).container.module(db.upcast()).krate();
let ctx = hir_ty::TyLoweringContext::new(db, &resolver);
let ty = ctx.lower_ty(&data.type_ref);
Type::new_with_resolver_inner(db, krate, &resolver, ty)
}
}
impl HasVisibility for Static {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
db.static_data(self.id).visibility.resolve(db.upcast(), &self.id.resolver(db.upcast()))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Trait {
pub(crate) id: TraitId,
}
impl Trait {
pub fn module(self, db: &dyn HirDatabase) -> Module {
Module { id: self.id.lookup(db.upcast()).container }
}
pub fn name(self, db: &dyn HirDatabase) -> Name {
db.trait_data(self.id).name.clone()
}
pub fn items(self, db: &dyn HirDatabase) -> Vec<AssocItem> {
db.trait_data(self.id).items.iter().map(|(_name, it)| (*it).into()).collect()
}
pub fn is_auto(self, db: &dyn HirDatabase) -> bool {
db.trait_data(self.id).is_auto
}
pub fn is_unsafe(&self, db: &dyn HirDatabase) -> bool {
db.trait_data(self.id).is_unsafe
}
}
impl HasVisibility for Trait {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
db.trait_data(self.id).visibility.resolve(db.upcast(), &self.id.resolver(db.upcast()))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct TypeAlias {
pub(crate) id: TypeAliasId,
}
impl TypeAlias {
pub fn has_non_default_type_params(self, db: &dyn HirDatabase) -> bool {
let subst = db.generic_defaults(self.id.into());
subst.iter().any(|ty| ty.skip_binders().is_unknown())
}
pub fn module(self, db: &dyn HirDatabase) -> Module {
Module { id: self.id.lookup(db.upcast()).module(db.upcast()) }
}
pub fn type_ref(self, db: &dyn HirDatabase) -> Option<TypeRef> {
db.type_alias_data(self.id).type_ref.as_deref().cloned()
}
pub fn ty(self, db: &dyn HirDatabase) -> Type {
Type::from_def(db, self.id.lookup(db.upcast()).module(db.upcast()).krate(), self.id)
}
pub fn name(self, db: &dyn HirDatabase) -> Name {
db.type_alias_data(self.id).name.clone()
}
}
impl HasVisibility for TypeAlias {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
let function_data = db.type_alias_data(self.id);
let visibility = &function_data.visibility;
visibility.resolve(db.upcast(), &self.id.resolver(db.upcast()))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct BuiltinType {
pub(crate) inner: hir_def::builtin_type::BuiltinType,
}
impl BuiltinType {
pub fn str() -> BuiltinType {
BuiltinType { inner: hir_def::builtin_type::BuiltinType::Str }
}
pub fn ty(self, db: &dyn HirDatabase, module: Module) -> Type {
let resolver = module.id.resolver(db.upcast());
Type::new_with_resolver(db, &resolver, TyBuilder::builtin(self.inner))
.expect("crate not present in resolver")
}
pub fn name(self) -> Name {
self.inner.as_name()
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum MacroKind {
/// `macro_rules!` or Macros 2.0 macro.
Declarative,
/// A built-in or custom derive.
Derive,
/// A built-in function-like macro.
BuiltIn,
/// A procedural attribute macro.
Attr,
/// A function-like procedural macro.
ProcMacro,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct MacroDef {
pub(crate) id: MacroDefId,
}
impl MacroDef {
/// FIXME: right now, this just returns the root module of the crate that
/// defines this macro. The reasons for this is that macros are expanded
/// early, in `hir_expand`, where modules simply do not exist yet.
pub fn module(self, db: &dyn HirDatabase) -> Option<Module> {
let krate = self.id.krate;
let def_map = db.crate_def_map(krate);
let module_id = def_map.root();
Some(Module { id: def_map.module_id(module_id) })
}
/// XXX: this parses the file
pub fn name(self, db: &dyn HirDatabase) -> Option<Name> {
match self.source(db)?.value {
Either::Left(it) => it.name().map(|it| it.as_name()),
Either::Right(_) => {
let krate = self.id.krate;
let def_map = db.crate_def_map(krate);
let (_, name) = def_map.exported_proc_macros().find(|&(id, _)| id == self.id)?;
Some(name)
}
}
}
pub fn kind(&self) -> MacroKind {
match self.id.kind {
MacroDefKind::Declarative(_) => MacroKind::Declarative,
MacroDefKind::BuiltIn(_, _) | MacroDefKind::BuiltInEager(_, _) => MacroKind::BuiltIn,
MacroDefKind::BuiltInDerive(_, _) => MacroKind::Derive,
MacroDefKind::BuiltInAttr(_, _) => MacroKind::Attr,
MacroDefKind::ProcMacro(_, base_db::ProcMacroKind::CustomDerive, _) => {
MacroKind::Derive
}
MacroDefKind::ProcMacro(_, base_db::ProcMacroKind::Attr, _) => MacroKind::Attr,
MacroDefKind::ProcMacro(_, base_db::ProcMacroKind::FuncLike, _) => MacroKind::ProcMacro,
}
}
pub fn is_fn_like(&self) -> bool {
match self.kind() {
MacroKind::Declarative | MacroKind::BuiltIn | MacroKind::ProcMacro => true,
MacroKind::Attr | MacroKind::Derive => false,
}
}
pub fn is_attr(&self) -> bool {
matches!(self.kind(), MacroKind::Attr)
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
pub enum ItemInNs {
Types(ModuleDef),
Values(ModuleDef),
Macros(MacroDef),
}
impl From<MacroDef> for ItemInNs {
fn from(it: MacroDef) -> Self {
Self::Macros(it)
}
}
impl From<ModuleDef> for ItemInNs {
fn from(module_def: ModuleDef) -> Self {
match module_def {
ModuleDef::Static(_) | ModuleDef::Const(_) | ModuleDef::Function(_) => {
ItemInNs::Values(module_def)
}
_ => ItemInNs::Types(module_def),
}
}
}
impl ItemInNs {
pub fn as_module_def(self) -> Option<ModuleDef> {
match self {
ItemInNs::Types(id) | ItemInNs::Values(id) => Some(id),
ItemInNs::Macros(_) => None,
}
}
/// Returns the crate defining this item (or `None` if `self` is built-in).
pub fn krate(&self, db: &dyn HirDatabase) -> Option<Crate> {
match self {
ItemInNs::Types(did) | ItemInNs::Values(did) => did.module(db).map(|m| m.krate()),
ItemInNs::Macros(id) => id.module(db).map(|m| m.krate()),
}
}
pub fn attrs(&self, db: &dyn HirDatabase) -> Option<AttrsWithOwner> {
match self {
ItemInNs::Types(it) | ItemInNs::Values(it) => it.attrs(db),
ItemInNs::Macros(it) => Some(it.attrs(db)),
}
}
}
/// Invariant: `inner.as_assoc_item(db).is_some()`
/// We do not actively enforce this invariant.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum AssocItem {
Function(Function),
Const(Const),
TypeAlias(TypeAlias),
}
#[derive(Debug)]
pub enum AssocItemContainer {
Trait(Trait),
Impl(Impl),
}
pub trait AsAssocItem {
fn as_assoc_item(self, db: &dyn HirDatabase) -> Option<AssocItem>;
}
impl AsAssocItem for Function {
fn as_assoc_item(self, db: &dyn HirDatabase) -> Option<AssocItem> {
as_assoc_item(db, AssocItem::Function, self.id)
}
}
impl AsAssocItem for Const {
fn as_assoc_item(self, db: &dyn HirDatabase) -> Option<AssocItem> {
as_assoc_item(db, AssocItem::Const, self.id)
}
}
impl AsAssocItem for TypeAlias {
fn as_assoc_item(self, db: &dyn HirDatabase) -> Option<AssocItem> {
as_assoc_item(db, AssocItem::TypeAlias, self.id)
}
}
impl AsAssocItem for ModuleDef {
fn as_assoc_item(self, db: &dyn HirDatabase) -> Option<AssocItem> {
match self {
ModuleDef::Function(it) => it.as_assoc_item(db),
ModuleDef::Const(it) => it.as_assoc_item(db),
ModuleDef::TypeAlias(it) => it.as_assoc_item(db),
_ => None,
}
}
}
fn as_assoc_item<ID, DEF, CTOR, AST>(db: &dyn HirDatabase, ctor: CTOR, id: ID) -> Option<AssocItem>
where
ID: Lookup<Data = AssocItemLoc<AST>>,
DEF: From<ID>,
CTOR: FnOnce(DEF) -> AssocItem,
AST: ItemTreeNode,
{
match id.lookup(db.upcast()).container {
ItemContainerId::TraitId(_) | ItemContainerId::ImplId(_) => Some(ctor(DEF::from(id))),
ItemContainerId::ModuleId(_) | ItemContainerId::ExternBlockId(_) => None,
}
}
impl AssocItem {
pub fn name(self, db: &dyn HirDatabase) -> Option<Name> {
match self {
AssocItem::Function(it) => Some(it.name(db)),
AssocItem::Const(it) => it.name(db),
AssocItem::TypeAlias(it) => Some(it.name(db)),
}
}
pub fn module(self, db: &dyn HirDatabase) -> Module {
match self {
AssocItem::Function(f) => f.module(db),
AssocItem::Const(c) => c.module(db),
AssocItem::TypeAlias(t) => t.module(db),
}
}
pub fn container(self, db: &dyn HirDatabase) -> AssocItemContainer {
let container = match self {
AssocItem::Function(it) => it.id.lookup(db.upcast()).container,
AssocItem::Const(it) => it.id.lookup(db.upcast()).container,
AssocItem::TypeAlias(it) => it.id.lookup(db.upcast()).container,
};
match container {
ItemContainerId::TraitId(id) => AssocItemContainer::Trait(id.into()),
ItemContainerId::ImplId(id) => AssocItemContainer::Impl(id.into()),
ItemContainerId::ModuleId(_) | ItemContainerId::ExternBlockId(_) => {
panic!("invalid AssocItem")
}
}
}
pub fn containing_trait(self, db: &dyn HirDatabase) -> Option<Trait> {
match self.container(db) {
AssocItemContainer::Trait(t) => Some(t),
_ => None,
}
}
pub fn containing_trait_impl(self, db: &dyn HirDatabase) -> Option<Trait> {
match self.container(db) {
AssocItemContainer::Impl(i) => i.trait_(db),
_ => None,
}
}
pub fn containing_trait_or_trait_impl(self, db: &dyn HirDatabase) -> Option<Trait> {
match self.container(db) {
AssocItemContainer::Trait(t) => Some(t),
AssocItemContainer::Impl(i) => i.trait_(db),
}
}
}
impl HasVisibility for AssocItem {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility {
match self {
AssocItem::Function(f) => f.visibility(db),
AssocItem::Const(c) => c.visibility(db),
AssocItem::TypeAlias(t) => t.visibility(db),
}
}
}
impl From<AssocItem> for ModuleDef {
fn from(assoc: AssocItem) -> Self {
match assoc {
AssocItem::Function(it) => ModuleDef::Function(it),
AssocItem::Const(it) => ModuleDef::Const(it),
AssocItem::TypeAlias(it) => ModuleDef::TypeAlias(it),
}
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
pub enum GenericDef {
Function(Function),
Adt(Adt),
Trait(Trait),
TypeAlias(TypeAlias),
Impl(Impl),
// enum variants cannot have generics themselves, but their parent enums
// can, and this makes some code easier to write
Variant(Variant),
// consts can have type parameters from their parents (i.e. associated consts of traits)
Const(Const),
}
impl_from!(
Function,
Adt(Struct, Enum, Union),
Trait,
TypeAlias,
Impl,
Variant,
Const
for GenericDef
);
impl GenericDef {
pub fn params(self, db: &dyn HirDatabase) -> Vec<GenericParam> {
let generics = db.generic_params(self.into());
let ty_params = generics
.types
.iter()
.map(|(local_id, _)| TypeParam { id: TypeParamId { parent: self.into(), local_id } })
.map(GenericParam::TypeParam);
let lt_params = generics
.lifetimes
.iter()
.map(|(local_id, _)| LifetimeParam {
id: LifetimeParamId { parent: self.into(), local_id },
})
.map(GenericParam::LifetimeParam);
let const_params = generics
.consts
.iter()
.map(|(local_id, _)| ConstParam { id: ConstParamId { parent: self.into(), local_id } })
.map(GenericParam::ConstParam);
ty_params.chain(lt_params).chain(const_params).collect()
}
pub fn type_params(self, db: &dyn HirDatabase) -> Vec<TypeParam> {
let generics = db.generic_params(self.into());
generics
.types
.iter()
.map(|(local_id, _)| TypeParam { id: TypeParamId { parent: self.into(), local_id } })
.collect()
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct Local {
pub(crate) parent: DefWithBodyId,
pub(crate) pat_id: PatId,
}
impl Local {
pub fn is_param(self, db: &dyn HirDatabase) -> bool {
let src = self.source(db);
match src.value {
Either::Left(bind_pat) => {
bind_pat.syntax().ancestors().any(|it| ast::Param::can_cast(it.kind()))
}
Either::Right(_self_param) => true,
}
}
pub fn as_self_param(self, db: &dyn HirDatabase) -> Option<SelfParam> {
match self.parent {
DefWithBodyId::FunctionId(func) if self.is_self(db) => Some(SelfParam { func }),
_ => None,
}
}
// FIXME: why is this an option? It shouldn't be?
pub fn name(self, db: &dyn HirDatabase) -> Option<Name> {
let body = db.body(self.parent);
match &body[self.pat_id] {
Pat::Bind { name, .. } => Some(name.clone()),
_ => None,
}
}
pub fn is_self(self, db: &dyn HirDatabase) -> bool {
self.name(db) == Some(name![self])
}
pub fn is_mut(self, db: &dyn HirDatabase) -> bool {
let body = db.body(self.parent);
matches!(&body[self.pat_id], Pat::Bind { mode: BindingAnnotation::Mutable, .. })
}
pub fn is_ref(self, db: &dyn HirDatabase) -> bool {
let body = db.body(self.parent);
matches!(
&body[self.pat_id],
Pat::Bind { mode: BindingAnnotation::Ref | BindingAnnotation::RefMut, .. }
)
}
pub fn parent(self, _db: &dyn HirDatabase) -> DefWithBody {
self.parent.into()
}
pub fn module(self, db: &dyn HirDatabase) -> Module {
self.parent(db).module(db)
}
pub fn ty(self, db: &dyn HirDatabase) -> Type {
let def = self.parent;
let infer = db.infer(def);
let ty = infer[self.pat_id].clone();
let krate = def.module(db.upcast()).krate();
Type::new(db, krate, def, ty)
}
pub fn source(self, db: &dyn HirDatabase) -> InFile<Either<ast::IdentPat, ast::SelfParam>> {
let (_body, source_map) = db.body_with_source_map(self.parent);
let src = source_map.pat_syntax(self.pat_id).unwrap(); // Hmm...
let root = src.file_syntax(db.upcast());
src.map(|ast| {
ast.map_left(|it| it.cast().unwrap().to_node(&root)).map_right(|it| it.to_node(&root))
})
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct BuiltinAttr(usize);
impl BuiltinAttr {
pub(crate) fn by_name(name: &str) -> Option<Self> {
// FIXME: def maps registered attrs?
hir_def::builtin_attr::find_builtin_attr_idx(name).map(Self)
}
pub fn name(&self, _: &dyn HirDatabase) -> &str {
// FIXME: Return a `Name` here
hir_def::builtin_attr::INERT_ATTRIBUTES[self.0].name
}
pub fn template(&self, _: &dyn HirDatabase) -> AttributeTemplate {
hir_def::builtin_attr::INERT_ATTRIBUTES[self.0].template
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct ToolModule(usize);
impl ToolModule {
pub(crate) fn by_name(name: &str) -> Option<Self> {
// FIXME: def maps registered tools
hir_def::builtin_attr::TOOL_MODULES.iter().position(|&tool| tool == name).map(Self)
}
pub fn name(&self, _: &dyn HirDatabase) -> &str {
// FIXME: Return a `Name` here
hir_def::builtin_attr::TOOL_MODULES[self.0]
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct Label {
pub(crate) parent: DefWithBodyId,
pub(crate) label_id: LabelId,
}
impl Label {
pub fn module(self, db: &dyn HirDatabase) -> Module {
self.parent(db).module(db)
}
pub fn parent(self, _db: &dyn HirDatabase) -> DefWithBody {
self.parent.into()
}
pub fn name(self, db: &dyn HirDatabase) -> Name {
let body = db.body(self.parent);
body[self.label_id].name.clone()
}
pub fn source(self, db: &dyn HirDatabase) -> InFile<ast::Label> {
let (_body, source_map) = db.body_with_source_map(self.parent);
let src = source_map.label_syntax(self.label_id);
let root = src.file_syntax(db.upcast());
src.map(|ast| ast.to_node(&root))
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum GenericParam {
TypeParam(TypeParam),
LifetimeParam(LifetimeParam),
ConstParam(ConstParam),
}
impl_from!(TypeParam, LifetimeParam, ConstParam for GenericParam);
impl GenericParam {
pub fn module(self, db: &dyn HirDatabase) -> Module {
match self {
GenericParam::TypeParam(it) => it.module(db),
GenericParam::LifetimeParam(it) => it.module(db),
GenericParam::ConstParam(it) => it.module(db),
}
}
pub fn name(self, db: &dyn HirDatabase) -> Name {
match self {
GenericParam::TypeParam(it) => it.name(db),
GenericParam::LifetimeParam(it) => it.name(db),
GenericParam::ConstParam(it) => it.name(db),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct TypeParam {
pub(crate) id: TypeParamId,
}
impl TypeParam {
pub fn name(self, db: &dyn HirDatabase) -> Name {
let params = db.generic_params(self.id.parent);
params.types[self.id.local_id].name.clone().unwrap_or_else(Name::missing)
}
pub fn module(self, db: &dyn HirDatabase) -> Module {
self.id.parent.module(db.upcast()).into()
}
pub fn ty(self, db: &dyn HirDatabase) -> Type {
let resolver = self.id.parent.resolver(db.upcast());
let krate = self.id.parent.module(db.upcast()).krate();
let ty = TyKind::Placeholder(hir_ty::to_placeholder_idx(db, self.id)).intern(&Interner);
Type::new_with_resolver_inner(db, krate, &resolver, ty)
}
pub fn trait_bounds(self, db: &dyn HirDatabase) -> Vec<Trait> {
db.generic_predicates_for_param(self.id, None)
.iter()
.filter_map(|pred| match &pred.skip_binders().skip_binders() {
hir_ty::WhereClause::Implemented(trait_ref) => {
Some(Trait::from(trait_ref.hir_trait_id()))
}
_ => None,
})
.collect()
}
pub fn default(self, db: &dyn HirDatabase) -> Option<Type> {
let params = db.generic_defaults(self.id.parent);
let local_idx = hir_ty::param_idx(db, self.id)?;
let resolver = self.id.parent.resolver(db.upcast());
let krate = self.id.parent.module(db.upcast()).krate();
let ty = params.get(local_idx)?.clone();
let subst = TyBuilder::type_params_subst(db, self.id.parent);
let ty = ty.substitute(&Interner, &subst_prefix(&subst, local_idx));
Some(Type::new_with_resolver_inner(db, krate, &resolver, ty))
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct LifetimeParam {
pub(crate) id: LifetimeParamId,
}
impl LifetimeParam {
pub fn name(self, db: &dyn HirDatabase) -> Name {
let params = db.generic_params(self.id.parent);
params.lifetimes[self.id.local_id].name.clone()
}
pub fn module(self, db: &dyn HirDatabase) -> Module {
self.id.parent.module(db.upcast()).into()
}
pub fn parent(self, _db: &dyn HirDatabase) -> GenericDef {
self.id.parent.into()
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct ConstParam {
pub(crate) id: ConstParamId,
}
impl ConstParam {
pub fn name(self, db: &dyn HirDatabase) -> Name {
let params = db.generic_params(self.id.parent);
params.consts[self.id.local_id].name.clone()
}
pub fn module(self, db: &dyn HirDatabase) -> Module {
self.id.parent.module(db.upcast()).into()
}
pub fn parent(self, _db: &dyn HirDatabase) -> GenericDef {
self.id.parent.into()
}
pub fn ty(self, db: &dyn HirDatabase) -> Type {
let def = self.id.parent;
let krate = def.module(db.upcast()).krate();
Type::new(db, krate, def, db.const_param_ty(self.id))
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Impl {
pub(crate) id: ImplId,
}
impl Impl {
pub fn all_in_crate(db: &dyn HirDatabase, krate: Crate) -> Vec<Impl> {
let inherent = db.inherent_impls_in_crate(krate.id);
let trait_ = db.trait_impls_in_crate(krate.id);
inherent.all_impls().chain(trait_.all_impls()).map(Self::from).collect()
}
pub fn all_for_type(db: &dyn HirDatabase, Type { krate, ty, .. }: Type) -> Vec<Impl> {
let def_crates = match method_resolution::def_crates(db, &ty, krate) {
Some(def_crates) => def_crates,
None => return Vec::new(),
};
let filter = |impl_def: &Impl| {
let self_ty = impl_def.self_ty(db);
let rref = self_ty.remove_ref();
ty.equals_ctor(rref.as_ref().map_or(&self_ty.ty, |it| &it.ty))
};
let fp = TyFingerprint::for_inherent_impl(&ty);
let fp = match fp {
Some(fp) => fp,
None => return Vec::new(),
};
let mut all = Vec::new();
def_crates.iter().for_each(|&id| {
all.extend(
db.inherent_impls_in_crate(id)
.for_self_ty(&ty)
.iter()
.cloned()
.map(Self::from)
.filter(filter),
)
});
for id in def_crates
.iter()
.flat_map(|&id| Crate { id }.transitive_reverse_dependencies(db))
.map(|Crate { id }| id)
.chain(def_crates.iter().copied())
.unique()
{
all.extend(
db.trait_impls_in_crate(id)
.for_self_ty_without_blanket_impls(fp)
.map(Self::from)
.filter(filter),
);
}
all
}
pub fn all_for_trait(db: &dyn HirDatabase, trait_: Trait) -> Vec<Impl> {
let krate = trait_.module(db).krate();
let mut all = Vec::new();
for Crate { id } in krate.transitive_reverse_dependencies(db).into_iter() {
let impls = db.trait_impls_in_crate(id);
all.extend(impls.for_trait(trait_.id).map(Self::from))
}
all
}
// FIXME: the return type is wrong. This should be a hir version of
// `TraitRef` (to account for parameters and qualifiers)
pub fn trait_(self, db: &dyn HirDatabase) -> Option<Trait> {
let trait_ref = db.impl_trait(self.id)?.skip_binders().clone();
let id = hir_ty::from_chalk_trait_id(trait_ref.trait_id);
Some(Trait { id })
}
pub fn self_ty(self, db: &dyn HirDatabase) -> Type {
let impl_data = db.impl_data(self.id);
let resolver = self.id.resolver(db.upcast());
let krate = self.id.lookup(db.upcast()).container.krate();
let ctx = hir_ty::TyLoweringContext::new(db, &resolver);
let ty = ctx.lower_ty(&impl_data.self_ty);
Type::new_with_resolver_inner(db, krate, &resolver, ty)
}
pub fn items(self, db: &dyn HirDatabase) -> Vec<AssocItem> {
db.impl_data(self.id).items.iter().map(|it| (*it).into()).collect()
}
pub fn is_negative(self, db: &dyn HirDatabase) -> bool {
db.impl_data(self.id).is_negative
}
pub fn module(self, db: &dyn HirDatabase) -> Module {
self.id.lookup(db.upcast()).container.into()
}
pub fn is_builtin_derive(self, db: &dyn HirDatabase) -> Option<InFile<ast::Attr>> {
let src = self.source(db)?;
let item = src.file_id.is_builtin_derive(db.upcast())?;
let hygenic = hir_expand::hygiene::Hygiene::new(db.upcast(), item.file_id);
// FIXME: handle `cfg_attr`
let attr = item
.value
.attrs()
.filter_map(|it| {
let path = ModPath::from_src(db.upcast(), it.path()?, &hygenic)?;
if path.as_ident()?.to_smol_str() == "derive" {
Some(it)
} else {
None
}
})
.last()?;
Some(item.with_value(attr))
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Type {
krate: CrateId,
env: Arc<TraitEnvironment>,
ty: Ty,
}
impl Type {
pub(crate) fn new_with_resolver(
db: &dyn HirDatabase,
resolver: &Resolver,
ty: Ty,
) -> Option<Type> {
let krate = resolver.krate()?;
Some(Type::new_with_resolver_inner(db, krate, resolver, ty))
}
pub(crate) fn new_with_resolver_inner(
db: &dyn HirDatabase,
krate: CrateId,
resolver: &Resolver,
ty: Ty,
) -> Type {
let environment = resolver
.generic_def()
.map_or_else(|| Arc::new(TraitEnvironment::empty(krate)), |d| db.trait_environment(d));
Type { krate, env: environment, ty }
}
fn new(db: &dyn HirDatabase, krate: CrateId, lexical_env: impl HasResolver, ty: Ty) -> Type {
let resolver = lexical_env.resolver(db.upcast());
let environment = resolver
.generic_def()
.map_or_else(|| Arc::new(TraitEnvironment::empty(krate)), |d| db.trait_environment(d));
Type { krate, env: environment, ty }
}
fn from_def(
db: &dyn HirDatabase,
krate: CrateId,
def: impl HasResolver + Into<TyDefId>,
) -> Type {
let ty = TyBuilder::def_ty(db, def.into()).fill_with_unknown().build();
Type::new(db, krate, def, ty)
}
pub fn new_slice(ty: Type) -> Type {
Type { krate: ty.krate, env: ty.env, ty: TyBuilder::slice(ty.ty) }
}
pub fn is_unit(&self) -> bool {
matches!(self.ty.kind(&Interner), TyKind::Tuple(0, ..))
}
pub fn is_bool(&self) -> bool {
matches!(self.ty.kind(&Interner), TyKind::Scalar(Scalar::Bool))
}
pub fn is_never(&self) -> bool {
matches!(self.ty.kind(&Interner), TyKind::Never)
}
pub fn is_mutable_reference(&self) -> bool {
matches!(self.ty.kind(&Interner), TyKind::Ref(hir_ty::Mutability::Mut, ..))
}
pub fn is_reference(&self) -> bool {
matches!(self.ty.kind(&Interner), TyKind::Ref(..))
}
pub fn is_usize(&self) -> bool {
matches!(self.ty.kind(&Interner), TyKind::Scalar(Scalar::Uint(UintTy::Usize)))
}
pub fn remove_ref(&self) -> Option<Type> {
match &self.ty.kind(&Interner) {
TyKind::Ref(.., ty) => Some(self.derived(ty.clone())),
_ => None,
}
}
pub fn strip_references(&self) -> Type {
self.derived(self.ty.strip_references().clone())
}
pub fn is_unknown(&self) -> bool {
self.ty.is_unknown()
}
/// Checks that particular type `ty` implements `std::future::Future`.
/// This function is used in `.await` syntax completion.
pub fn impls_future(&self, db: &dyn HirDatabase) -> bool {
// No special case for the type of async block, since Chalk can figure it out.
let krate = self.krate;
let std_future_trait =
db.lang_item(krate, SmolStr::new_inline("future_trait")).and_then(|it| it.as_trait());
let std_future_trait = match std_future_trait {
Some(it) => it,
None => return false,
};
let canonical_ty =
Canonical { value: self.ty.clone(), binders: CanonicalVarKinds::empty(&Interner) };
method_resolution::implements_trait(
&canonical_ty,
db,
self.env.clone(),
krate,
std_future_trait,
)
}
/// Checks that particular type `ty` implements `std::ops::FnOnce`.
///
/// This function can be used to check if a particular type is callable, since FnOnce is a
/// supertrait of Fn and FnMut, so all callable types implements at least FnOnce.
pub fn impls_fnonce(&self, db: &dyn HirDatabase) -> bool {
let krate = self.krate;
let fnonce_trait = match FnTrait::FnOnce.get_id(db, krate) {
Some(it) => it,
None => return false,
};
let canonical_ty =
Canonical { value: self.ty.clone(), binders: CanonicalVarKinds::empty(&Interner) };
method_resolution::implements_trait_unique(
&canonical_ty,
db,
self.env.clone(),
krate,
fnonce_trait,
)
}
pub fn impls_trait(&self, db: &dyn HirDatabase, trait_: Trait, args: &[Type]) -> bool {
let trait_ref = TyBuilder::trait_ref(db, trait_.id)
.push(self.ty.clone())
.fill(args.iter().map(|t| t.ty.clone()))
.build();
let goal = Canonical {
value: hir_ty::InEnvironment::new(&self.env.env, trait_ref.cast(&Interner)),
binders: CanonicalVarKinds::empty(&Interner),
};
db.trait_solve(self.krate, goal).is_some()
}
pub fn normalize_trait_assoc_type(
&self,
db: &dyn HirDatabase,
args: &[Type],
alias: TypeAlias,
) -> Option<Type> {
let projection = TyBuilder::assoc_type_projection(db, alias.id)
.push(self.ty.clone())
.fill(args.iter().map(|t| t.ty.clone()))
.build();
let goal = hir_ty::make_canonical(
InEnvironment::new(
&self.env.env,
AliasEq {
alias: AliasTy::Projection(projection),
ty: TyKind::BoundVar(BoundVar::new(DebruijnIndex::INNERMOST, 0))
.intern(&Interner),
}
.cast(&Interner),
),
[TyVariableKind::General].into_iter(),
);
match db.trait_solve(self.krate, goal)? {
Solution::Unique(s) => s
.value
.subst
.as_slice(&Interner)
.first()
.map(|ty| self.derived(ty.assert_ty_ref(&Interner).clone())),
Solution::Ambig(_) => None,
}
}
pub fn is_copy(&self, db: &dyn HirDatabase) -> bool {
let lang_item = db.lang_item(self.krate, SmolStr::new_inline("copy"));
let copy_trait = match lang_item {
Some(LangItemTarget::TraitId(it)) => it,
_ => return false,
};
self.impls_trait(db, copy_trait.into(), &[])
}
pub fn as_callable(&self, db: &dyn HirDatabase) -> Option<Callable> {
let def = self.ty.callable_def(db);
let sig = self.ty.callable_sig(db)?;
Some(Callable { ty: self.clone(), sig, def, is_bound_method: false })
}
pub fn is_closure(&self) -> bool {
matches!(&self.ty.kind(&Interner), TyKind::Closure { .. })
}
pub fn is_fn(&self) -> bool {
matches!(&self.ty.kind(&Interner), TyKind::FnDef(..) | TyKind::Function { .. })
}
pub fn is_packed(&self, db: &dyn HirDatabase) -> bool {
let adt_id = match *self.ty.kind(&Interner) {
TyKind::Adt(hir_ty::AdtId(adt_id), ..) => adt_id,
_ => return false,
};
let adt = adt_id.into();
match adt {
Adt::Struct(s) => matches!(s.repr(db), Some(ReprKind::Packed)),
_ => false,
}
}
pub fn is_raw_ptr(&self) -> bool {
matches!(&self.ty.kind(&Interner), TyKind::Raw(..))
}
pub fn contains_unknown(&self) -> bool {
return go(&self.ty);
fn go(ty: &Ty) -> bool {
match ty.kind(&Interner) {
TyKind::Error => true,
TyKind::Adt(_, substs)
| TyKind::AssociatedType(_, substs)
| TyKind::Tuple(_, substs)
| TyKind::OpaqueType(_, substs)
| TyKind::FnDef(_, substs)
| TyKind::Closure(_, substs) => {
substs.iter(&Interner).filter_map(|a| a.ty(&Interner)).any(go)
}
TyKind::Array(_ty, len) if len.is_unknown() => true,
TyKind::Array(ty, _)
| TyKind::Slice(ty)
| TyKind::Raw(_, ty)
| TyKind::Ref(_, _, ty) => go(ty),
TyKind::Scalar(_)
| TyKind::Str
| TyKind::Never
| TyKind::Placeholder(_)
| TyKind::BoundVar(_)
| TyKind::InferenceVar(_, _)
| TyKind::Dyn(_)
| TyKind::Function(_)
| TyKind::Alias(_)
| TyKind::Foreign(_)
| TyKind::Generator(..)
| TyKind::GeneratorWitness(..) => false,
}
}
}
pub fn fields(&self, db: &dyn HirDatabase) -> Vec<(Field, Type)> {
let (variant_id, substs) = match self.ty.kind(&Interner) {
TyKind::Adt(hir_ty::AdtId(AdtId::StructId(s)), substs) => ((*s).into(), substs),
TyKind::Adt(hir_ty::AdtId(AdtId::UnionId(u)), substs) => ((*u).into(), substs),
_ => return Vec::new(),
};
db.field_types(variant_id)
.iter()
.map(|(local_id, ty)| {
let def = Field { parent: variant_id.into(), id: local_id };
let ty = ty.clone().substitute(&Interner, substs);
(def, self.derived(ty))
})
.collect()
}
pub fn tuple_fields(&self, _db: &dyn HirDatabase) -> Vec<Type> {
if let TyKind::Tuple(_, substs) = &self.ty.kind(&Interner) {
substs
.iter(&Interner)
.map(|ty| self.derived(ty.assert_ty_ref(&Interner).clone()))
.collect()
} else {
Vec::new()
}
}
pub fn autoderef<'a>(&'a self, db: &'a dyn HirDatabase) -> impl Iterator<Item = Type> + 'a {
self.autoderef_(db).map(move |ty| self.derived(ty))
}
pub fn autoderef_<'a>(&'a self, db: &'a dyn HirDatabase) -> impl Iterator<Item = Ty> + 'a {
// There should be no inference vars in types passed here
let canonical = hir_ty::replace_errors_with_variables(&self.ty);
let environment = self.env.env.clone();
let ty = InEnvironment { goal: canonical, environment };
autoderef(db, Some(self.krate), ty).map(|canonical| canonical.value)
}
// This would be nicer if it just returned an iterator, but that runs into
// lifetime problems, because we need to borrow temp `CrateImplDefs`.
pub fn iterate_assoc_items<T>(
self,
db: &dyn HirDatabase,
krate: Crate,
mut callback: impl FnMut(AssocItem) -> Option<T>,
) -> Option<T> {
let mut slot = None;
self.iterate_assoc_items_dyn(db, krate, &mut |assoc_item_id| {
slot = callback(assoc_item_id.into());
slot.is_some()
});
slot
}
fn iterate_assoc_items_dyn(
self,
db: &dyn HirDatabase,
krate: Crate,
callback: &mut dyn FnMut(AssocItemId) -> bool,
) {
let def_crates = match method_resolution::def_crates(db, &self.ty, krate.id) {
Some(it) => it,
None => return,
};
for krate in def_crates {
let impls = db.inherent_impls_in_crate(krate);
for impl_def in impls.for_self_ty(&self.ty) {
for &item in db.impl_data(*impl_def).items.iter() {
if callback(item) {
return;
}
}
}
}
}
pub fn type_arguments(&self) -> impl Iterator<Item = Type> + '_ {
self.ty
.strip_references()
.as_adt()
.into_iter()
.flat_map(|(_, substs)| substs.iter(&Interner))
.filter_map(|arg| arg.ty(&Interner).cloned())
.map(move |ty| self.derived(ty))
}
pub fn iterate_method_candidates<T>(
&self,
db: &dyn HirDatabase,
krate: Crate,
traits_in_scope: &FxHashSet<TraitId>,
name: Option<&Name>,
mut callback: impl FnMut(Type, Function) -> Option<T>,
) -> Option<T> {
let _p = profile::span("iterate_method_candidates");
let mut slot = None;
self.iterate_method_candidates_dyn(
db,
krate,
traits_in_scope,
name,
&mut |ty, assoc_item_id| {
if let AssocItemId::FunctionId(func) = assoc_item_id {
if let Some(res) = callback(self.derived(ty.clone()), func.into()) {
slot = Some(res);
return ControlFlow::Break(());
}
}
ControlFlow::Continue(())
},
);
slot
}
fn iterate_method_candidates_dyn(
&self,
db: &dyn HirDatabase,
krate: Crate,
traits_in_scope: &FxHashSet<TraitId>,
name: Option<&Name>,
callback: &mut dyn FnMut(&Ty, AssocItemId) -> ControlFlow<()>,
) {
// There should be no inference vars in types passed here
let canonical = hir_ty::replace_errors_with_variables(&self.ty);
let env = self.env.clone();
let krate = krate.id;
method_resolution::iterate_method_candidates_dyn(
&canonical,
db,
env,
krate,
traits_in_scope,
None,
name,
method_resolution::LookupMode::MethodCall,
&mut |ty, id| callback(&ty.value, id),
);
}
pub fn iterate_path_candidates<T>(
&self,
db: &dyn HirDatabase,
krate: Crate,
traits_in_scope: &FxHashSet<TraitId>,
name: Option<&Name>,
mut callback: impl FnMut(Type, AssocItem) -> Option<T>,
) -> Option<T> {
let _p = profile::span("iterate_path_candidates");
let mut slot = None;
self.iterate_path_candidates_dyn(
db,
krate,
traits_in_scope,
name,
&mut |ty, assoc_item_id| {
if let Some(res) = callback(self.derived(ty.clone()), assoc_item_id.into()) {
slot = Some(res);
return ControlFlow::Break(());
}
ControlFlow::Continue(())
},
);
slot
}
fn iterate_path_candidates_dyn(
&self,
db: &dyn HirDatabase,
krate: Crate,
traits_in_scope: &FxHashSet<TraitId>,
name: Option<&Name>,
callback: &mut dyn FnMut(&Ty, AssocItemId) -> ControlFlow<()>,
) {
let canonical = hir_ty::replace_errors_with_variables(&self.ty);
let env = self.env.clone();
let krate = krate.id;
method_resolution::iterate_method_candidates_dyn(
&canonical,
db,
env,
krate,
traits_in_scope,
None,
name,
method_resolution::LookupMode::Path,
&mut |ty, id| callback(&ty.value, id),
);
}
pub fn as_adt(&self) -> Option<Adt> {
let (adt, _subst) = self.ty.as_adt()?;
Some(adt.into())
}
pub fn as_builtin(&self) -> Option<BuiltinType> {
self.ty.as_builtin().map(|inner| BuiltinType { inner })
}
pub fn as_dyn_trait(&self) -> Option<Trait> {
self.ty.dyn_trait().map(Into::into)
}
/// If a type can be represented as `dyn Trait`, returns all traits accessible via this type,
/// or an empty iterator otherwise.
pub fn applicable_inherent_traits<'a>(
&'a self,
db: &'a dyn HirDatabase,
) -> impl Iterator<Item = Trait> + 'a {
let _p = profile::span("applicable_inherent_traits");
self.autoderef_(db)
.filter_map(|ty| ty.dyn_trait())
.flat_map(move |dyn_trait_id| hir_ty::all_super_traits(db.upcast(), dyn_trait_id))
.map(Trait::from)
}
pub fn env_traits<'a>(&'a self, db: &'a dyn HirDatabase) -> impl Iterator<Item = Trait> + 'a {
let _p = profile::span("env_traits");
self.autoderef_(db)
.filter(|ty| matches!(ty.kind(&Interner), TyKind::Placeholder(_)))
.flat_map(|ty| {
self.env
.traits_in_scope_from_clauses(ty)
.flat_map(|t| hir_ty::all_super_traits(db.upcast(), t))
})
.map(Trait::from)
}
pub fn as_impl_traits(&self, db: &dyn HirDatabase) -> Option<impl Iterator<Item = Trait>> {
self.ty.impl_trait_bounds(db).map(|it| {
it.into_iter().filter_map(|pred| match pred.skip_binders() {
hir_ty::WhereClause::Implemented(trait_ref) => {
Some(Trait::from(trait_ref.hir_trait_id()))
}
_ => None,
})
})
}
pub fn as_associated_type_parent_trait(&self, db: &dyn HirDatabase) -> Option<Trait> {
self.ty.associated_type_parent_trait(db).map(Into::into)
}
fn derived(&self, ty: Ty) -> Type {
Type { krate: self.krate, env: self.env.clone(), ty }
}
pub fn walk(&self, db: &dyn HirDatabase, mut cb: impl FnMut(Type)) {
// TypeWalk::walk for a Ty at first visits parameters and only after that the Ty itself.
// We need a different order here.
fn walk_substs(
db: &dyn HirDatabase,
type_: &Type,
substs: &Substitution,
cb: &mut impl FnMut(Type),
) {
for ty in substs.iter(&Interner).filter_map(|a| a.ty(&Interner)) {
walk_type(db, &type_.derived(ty.clone()), cb);
}
}
fn walk_bounds(
db: &dyn HirDatabase,
type_: &Type,
bounds: &[QuantifiedWhereClause],
cb: &mut impl FnMut(Type),
) {
for pred in bounds {
if let WhereClause::Implemented(trait_ref) = pred.skip_binders() {
cb(type_.clone());
// skip the self type. it's likely the type we just got the bounds from
for ty in trait_ref
.substitution
.iter(&Interner)
.skip(1)
.filter_map(|a| a.ty(&Interner))
{
walk_type(db, &type_.derived(ty.clone()), cb);
}
}
}
}
fn walk_type(db: &dyn HirDatabase, type_: &Type, cb: &mut impl FnMut(Type)) {
let ty = type_.ty.strip_references();
match ty.kind(&Interner) {
TyKind::Adt(_, substs) => {
cb(type_.derived(ty.clone()));
walk_substs(db, type_, substs, cb);
}
TyKind::AssociatedType(_, substs) => {
if ty.associated_type_parent_trait(db).is_some() {
cb(type_.derived(ty.clone()));
}
walk_substs(db, type_, substs, cb);
}
TyKind::OpaqueType(_, subst) => {
if let Some(bounds) = ty.impl_trait_bounds(db) {
walk_bounds(db, &type_.derived(ty.clone()), &bounds, cb);
}
walk_substs(db, type_, subst, cb);
}
TyKind::Alias(AliasTy::Opaque(opaque_ty)) => {
if let Some(bounds) = ty.impl_trait_bounds(db) {
walk_bounds(db, &type_.derived(ty.clone()), &bounds, cb);
}
walk_substs(db, type_, &opaque_ty.substitution, cb);
}
TyKind::Placeholder(_) => {
if let Some(bounds) = ty.impl_trait_bounds(db) {
walk_bounds(db, &type_.derived(ty.clone()), &bounds, cb);
}
}
TyKind::Dyn(bounds) => {
walk_bounds(
db,
&type_.derived(ty.clone()),
bounds.bounds.skip_binders().interned(),
cb,
);
}
TyKind::Ref(_, _, ty)
| TyKind::Raw(_, ty)
| TyKind::Array(ty, _)
| TyKind::Slice(ty) => {
walk_type(db, &type_.derived(ty.clone()), cb);
}
TyKind::FnDef(_, substs)
| TyKind::Tuple(_, substs)
| TyKind::Closure(.., substs) => {
walk_substs(db, type_, substs, cb);
}
TyKind::Function(hir_ty::FnPointer { substitution, .. }) => {
walk_substs(db, type_, &substitution.0, cb);
}
_ => {}
}
}
walk_type(db, self, &mut cb);
}
pub fn could_unify_with(&self, db: &dyn HirDatabase, other: &Type) -> bool {
let tys = hir_ty::replace_errors_with_variables(&(self.ty.clone(), other.ty.clone()));
could_unify(db, self.env.clone(), &tys)
}
}
// FIXME: closures
#[derive(Debug)]
pub struct Callable {
ty: Type,
sig: CallableSig,
def: Option<CallableDefId>,
pub(crate) is_bound_method: bool,
}
pub enum CallableKind {
Function(Function),
TupleStruct(Struct),
TupleEnumVariant(Variant),
Closure,
}
impl Callable {
pub fn kind(&self) -> CallableKind {
match self.def {
Some(CallableDefId::FunctionId(it)) => CallableKind::Function(it.into()),
Some(CallableDefId::StructId(it)) => CallableKind::TupleStruct(it.into()),
Some(CallableDefId::EnumVariantId(it)) => CallableKind::TupleEnumVariant(it.into()),
None => CallableKind::Closure,
}
}
pub fn receiver_param(&self, db: &dyn HirDatabase) -> Option<ast::SelfParam> {
let func = match self.def {
Some(CallableDefId::FunctionId(it)) if self.is_bound_method => it,
_ => return None,
};
let src = func.lookup(db.upcast()).source(db.upcast());
let param_list = src.value.param_list()?;
param_list.self_param()
}
pub fn n_params(&self) -> usize {
self.sig.params().len() - if self.is_bound_method { 1 } else { 0 }
}
pub fn params(
&self,
db: &dyn HirDatabase,
) -> Vec<(Option<Either<ast::SelfParam, ast::Pat>>, Type)> {
let types = self
.sig
.params()
.iter()
.skip(if self.is_bound_method { 1 } else { 0 })
.map(|ty| self.ty.derived(ty.clone()));
let patterns = match self.def {
Some(CallableDefId::FunctionId(func)) => {
let src = func.lookup(db.upcast()).source(db.upcast());
src.value.param_list().map(|param_list| {
param_list
.self_param()
.map(|it| Some(Either::Left(it)))
.filter(|_| !self.is_bound_method)
.into_iter()
.chain(param_list.params().map(|it| it.pat().map(Either::Right)))
})
}
_ => None,
};
patterns.into_iter().flatten().chain(iter::repeat(None)).zip(types).collect()
}
pub fn return_type(&self) -> Type {
self.ty.derived(self.sig.ret().clone())
}
}
/// For IDE only
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum ScopeDef {
ModuleDef(ModuleDef),
MacroDef(MacroDef),
GenericParam(GenericParam),
ImplSelfType(Impl),
AdtSelfType(Adt),
Local(Local),
Label(Label),
Unknown,
}
impl ScopeDef {
pub fn all_items(def: PerNs) -> ArrayVec<Self, 3> {
let mut items = ArrayVec::new();
match (def.take_types(), def.take_values()) {
(Some(m1), None) => items.push(ScopeDef::ModuleDef(m1.into())),
(None, Some(m2)) => items.push(ScopeDef::ModuleDef(m2.into())),
(Some(m1), Some(m2)) => {
// Some items, like unit structs and enum variants, are
// returned as both a type and a value. Here we want
// to de-duplicate them.
if m1 != m2 {
items.push(ScopeDef::ModuleDef(m1.into()));
items.push(ScopeDef::ModuleDef(m2.into()));
} else {
items.push(ScopeDef::ModuleDef(m1.into()));
}
}
(None, None) => {}
};
if let Some(macro_def_id) = def.take_macros() {
items.push(ScopeDef::MacroDef(macro_def_id.into()));
}
if items.is_empty() {
items.push(ScopeDef::Unknown);
}
items
}
pub fn attrs(&self, db: &dyn HirDatabase) -> Option<AttrsWithOwner> {
match self {
ScopeDef::ModuleDef(it) => it.attrs(db),
ScopeDef::MacroDef(it) => Some(it.attrs(db)),
ScopeDef::GenericParam(it) => Some(it.attrs(db)),
ScopeDef::ImplSelfType(_)
| ScopeDef::AdtSelfType(_)
| ScopeDef::Local(_)
| ScopeDef::Label(_)
| ScopeDef::Unknown => None,
}
}
pub fn krate(&self, db: &dyn HirDatabase) -> Option<Crate> {
match self {
ScopeDef::ModuleDef(it) => it.module(db).map(|m| m.krate()),
ScopeDef::MacroDef(it) => it.module(db).map(|m| m.krate()),
ScopeDef::GenericParam(it) => Some(it.module(db).krate()),
ScopeDef::ImplSelfType(_) => None,
ScopeDef::AdtSelfType(it) => Some(it.module(db).krate()),
ScopeDef::Local(it) => Some(it.module(db).krate()),
ScopeDef::Label(it) => Some(it.module(db).krate()),
ScopeDef::Unknown => None,
}
}
}
impl From<ItemInNs> for ScopeDef {
fn from(item: ItemInNs) -> Self {
match item {
ItemInNs::Types(id) => ScopeDef::ModuleDef(id),
ItemInNs::Values(id) => ScopeDef::ModuleDef(id),
ItemInNs::Macros(id) => ScopeDef::MacroDef(id),
}
}
}
pub trait HasVisibility {
fn visibility(&self, db: &dyn HirDatabase) -> Visibility;
fn is_visible_from(&self, db: &dyn HirDatabase, module: Module) -> bool {
let vis = self.visibility(db);
vis.is_visible_from(db.upcast(), module.id)
}
}
/// Trait for obtaining the defining crate of an item.
pub trait HasCrate {
fn krate(&self, db: &dyn HirDatabase) -> Crate;
}
impl<T: hir_def::HasModule> HasCrate for T {
fn krate(&self, db: &dyn HirDatabase) -> Crate {
self.module(db.upcast()).krate().into()
}
}
impl HasCrate for AssocItem {
fn krate(&self, db: &dyn HirDatabase) -> Crate {
self.module(db).krate()
}
}
impl HasCrate for Field {
fn krate(&self, db: &dyn HirDatabase) -> Crate {
self.parent_def(db).module(db).krate()
}
}
impl HasCrate for Function {
fn krate(&self, db: &dyn HirDatabase) -> Crate {
self.module(db).krate()
}
}
impl HasCrate for Const {
fn krate(&self, db: &dyn HirDatabase) -> Crate {
self.module(db).krate()
}
}
impl HasCrate for TypeAlias {
fn krate(&self, db: &dyn HirDatabase) -> Crate {
self.module(db).krate()
}
}
impl HasCrate for Type {
fn krate(&self, _db: &dyn HirDatabase) -> Crate {
self.krate.into()
}
}
| 34.619424 | 124 | 0.527707 |
082aecb6a483d6c770c8146720d62a5fe785c0ca | 2,909 | use pgx::*;
use serde::{Deserialize, Serialize};
use std::ffi::CStr;
use std::str::FromStr;
#[derive(Copy, Clone, PostgresType)]
#[pgvarlena_inoutfuncs]
pub struct VarlenaType {
a: f32,
b: f32,
c: i64,
}
impl PgVarlenaInOutFuncs for VarlenaType {
fn input(input: &CStr) -> PgVarlena<Self> where {
let mut iter = input.to_str().unwrap().split(',');
let (a, b, c) = (iter.next(), iter.next(), iter.next());
let mut result = PgVarlena::<VarlenaType>::new();
result.a = f32::from_str(a.unwrap()).expect("a is not a valid f32");
result.b = f32::from_str(b.unwrap()).expect("b is not a valid f32");
result.c = i64::from_str(c.unwrap()).expect("c is not a valid i64");
result
}
fn output(&self, buffer: &mut StringInfo) {
buffer.push_str(&format!("{},{},{}", self.a, self.b, self.c))
}
}
#[derive(Serialize, Deserialize, PostgresType)]
#[inoutfuncs]
pub struct CustomTextFormatSerializedType {
a: f32,
b: f32,
c: i64,
}
impl InOutFuncs for CustomTextFormatSerializedType {
fn input(input: &CStr) -> Self {
let mut iter = input.to_str().unwrap().split(',');
let (a, b, c) = (iter.next(), iter.next(), iter.next());
CustomTextFormatSerializedType {
a: f32::from_str(a.unwrap()).expect("a is not a valid f32"),
b: f32::from_str(b.unwrap()).expect("b is not a valid f32"),
c: i64::from_str(c.unwrap()).expect("c is not a valid i64"),
}
}
fn output(&self, buffer: &mut StringInfo) {
buffer.push_str(&format!("{},{},{}", self.a, self.b, self.c))
}
}
#[derive(Serialize, Deserialize, PostgresType)]
pub struct JsonType {
a: f32,
b: f32,
c: i64,
}
#[cfg(any(test, feature = "pg_test"))]
mod tests {
#[allow(unused_imports)]
use crate as pgx_tests;
use crate::tests::postgres_type_tests::{
CustomTextFormatSerializedType, JsonType, VarlenaType,
};
use pgx::*;
#[pg_test]
fn test_mytype() {
let result = Spi::get_one::<PgVarlena<VarlenaType>>("SELECT '1.0,2.0,3'::VarlenaType")
.expect("SPI returned NULL");
assert_eq!(result.a, 1.0);
assert_eq!(result.b, 2.0);
assert_eq!(result.c, 3);
}
#[pg_test]
fn test_serializedtype() {
let result = Spi::get_one::<CustomTextFormatSerializedType>(
"SELECT '1.0,2.0,3'::CustomTextFormatSerializedType",
)
.expect("SPI returned NULL");
assert_eq!(result.a, 1.0);
assert_eq!(result.b, 2.0);
assert_eq!(result.c, 3);
}
#[pg_test]
fn test_jsontype() {
let result = Spi::get_one::<JsonType>(r#"SELECT '{"a": 1.0, "b": 2.0, "c": 3}'::JsonType"#)
.expect("SPI returned NULL");
assert_eq!(result.a, 1.0);
assert_eq!(result.b, 2.0);
assert_eq!(result.c, 3);
}
}
| 28.519608 | 99 | 0.577862 |
ef7672ccd7f0df1c9766dcb702767ee8811494a6 | 30,073 | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
fuchsia_async as fasync,
fuchsia_zircon::{self as zx, AsHandleRef, Duration},
futures::{
channel::mpsc::Receiver,
future::{Fuse, FutureExt},
select,
sink::SinkExt,
stream::StreamExt,
},
parking_lot::Mutex,
};
use crate::{
control_plane::{ControlPlane, Message, Responder},
log::*,
snoop::Snoop,
transport::{HwTransport, IncomingPacket, OutgoingPacket},
};
/// Wrap the open fuchsia_zircon::Channel `in_chan` as an fuchsia_async::Channel in `channel`.
/// Send a response using `responder`.
/// If `channel` is already `Option::Some`, then an error is sent, and the channel is not replaced.
///
/// Returns `true` if `channel` is set by this future and `false` if it is not.
async fn try_open_channel(
in_chan: zx::Channel,
channel: &mut Option<fasync::Channel>,
mut responder: Responder,
) -> bool {
let status = if channel.is_some() {
zx::Status::ALREADY_BOUND
} else {
match fasync::Channel::from_channel(in_chan) {
Ok(in_chan) => {
*channel = Some(in_chan);
zx::Status::OK
}
Err(e) => e.into(),
}
};
responder.send(status).await.unwrap_or_else(log_responder_error);
status == zx::Status::OK
}
/// A handle that can be used to communicate with the worker task running on a
/// separate thread. Messages should be sent using the `control_plane`.
pub struct WorkerHandle {
pub control_plane: Mutex<ControlPlane>,
/// `thread` always starts out as `Some(zx::Thread)` when the thread is spawned and the thread
/// object is taken out when it is shutdown due to a `WorkerHandle::shutdown` call.
thread: Option<zx::Thread>,
}
impl WorkerHandle {
/// Spawn a thread that runs the worker task. Returns a handle that can be
/// used to communicate with that running task.
pub fn new(name: impl Into<String>) -> Result<WorkerHandle, zx::Status> {
let (control_plane, receiver) = ControlPlane::new();
let (th_tx, th_rx) = std::sync::mpsc::channel();
let thread_builder = std::thread::Builder::new().name(name.into());
thread_builder.spawn(move || {
bt_log_spew!("spawned worker thread");
// Create and immediately run a closure returning a result for more convenient error
// propogation.
let result = move || -> Result<(), anyhow::Error> {
let handle = fuchsia_runtime::thread_self().duplicate(zx::Rights::SAME_RIGHTS)?;
th_tx.send(handle)?;
let mut executor = fasync::Executor::new()?;
executor.run_singlethreaded(run(receiver));
Ok(())
}();
if let Err(e) = result {
bt_log_err!("error running worker thread: {}", e);
};
unsafe { zx::sys::zx_thread_exit() }
})?;
// Error receiving the worker thread handle indicates that the worker thread died before it
// could send the handle or that it was never spawned.
let handle = th_rx.recv().map_err(|_| zx::Status::INTERNAL)?;
Ok(WorkerHandle { control_plane: Mutex::new(control_plane), thread: Some(handle) })
}
pub fn shutdown(self, timeout: Duration) {
self.control_plane.lock().close();
if let Some(t) = &self.thread {
match t.wait_handle(zx::Signals::THREAD_TERMINATED, zx::Time::after(timeout)) {
Ok(_) => bt_log_info!("child thread has exited"),
Err(e) => bt_log_info!("wait on child thread termination failed: {}", e),
}
} else {
bt_log_warn!("No handle for child thread available to wait on.");
}
}
}
/// Holds the state kept by the main worker task.
pub(crate) struct Worker {
transport: Box<dyn HwTransport>,
cmd: Option<fasync::Channel>,
acl: Option<fasync::Channel>,
snoop: Snoop,
}
impl Worker {
fn new(
transport: Box<dyn HwTransport>,
cmd: Option<fasync::Channel>,
acl: Option<fasync::Channel>,
snoop: Snoop,
) -> Self {
Self { transport, cmd, acl, snoop }
}
/// Wait until worker's transport object and cmd channel have been set. This prevents the driver
/// from reading data from a source without having anywhere for the data to go.
pub async fn build(control_plane: &mut Receiver<(Message, Responder)>) -> Option<Worker> {
bt_log_trace!("building read task");
let mut transport: Option<Box<dyn HwTransport>> = None;
let mut host_cmd = None;
let mut host_acl = None;
let mut snoop = Snoop::default();
while let Some(msg) = control_plane.next().await {
match msg {
(Message::OpenTransport(builder), mut responder) => {
if transport.is_some() {
bt_log_err!("transport already bound");
responder
.send(zx::Status::ALREADY_BOUND)
.await
.unwrap_or_else(log_responder_error);
// The OpenTransport message is controlled by the driver rather than as part
// of the bt-transport protocol. Therefore, setting it multiple times
// indicates an error in the driver's program logic rather than a client-
// side issue. Because of this, `build` gives up on trying to build a Worker
// object.
return None;
}
match builder.build() {
Ok(h) => {
responder
.send(zx::Status::OK)
.await
.unwrap_or_else(log_responder_error);
if host_cmd.is_some() {
return Some(Worker::new(h, host_cmd, host_acl, snoop));
} else {
transport = Some(h);
}
}
Err(e) => {
responder.send(e.into()).await.unwrap_or_else(log_responder_error);
}
}
}
(Message::OpenCmd(c), responder) => {
if !try_open_channel(c, &mut host_cmd, responder).await {
// Continue to the next loop iteration, if there is no newly opened
// command channel.
continue;
}
if let Some(transport) = transport {
return Some(Worker::new(transport, host_cmd, host_acl, snoop));
}
}
(Message::OpenAcl(c), responder) => {
try_open_channel(c, &mut host_acl, responder).await;
}
(Message::OpenSnoop(c), responder) => {
try_open_channel(c, &mut snoop.channel, responder).await;
}
(Message::Unbind, mut responder) => {
// Close all open resources before responding to Unbind message
drop(host_cmd);
drop(host_acl);
drop(snoop);
if let Some(mut t) = transport {
unsafe {
t.unbind();
}
}
responder.send(zx::Status::OK).await.unwrap_or_else(log_responder_error);
return None;
}
}
}
None
}
}
/// Main async task that proxies data between the higher layers of the Bluetooth system and the
/// underlying hardware over the Bluetooth Host Controller Interface.
async fn run(mut control_plane: Receiver<(Message, Responder)>) {
// Declare all local variables needed by this task
let mut worker: Worker;
let mut cmd_buf = zx::MessageBuf::new();
let mut acl_buf = zx::MessageBuf::new();
let transport_borrow;
let mut cmd_read;
let mut acl_read;
let mut incoming_buffer = Vec::with_capacity(0);
// Get all handles before reading any data.
// Set up read futures from sockets
if let Some(w) = Worker::build(&mut control_plane).await {
worker = w;
transport_borrow = worker.transport.as_mut();
cmd_read = if let Some(cmd) = worker.cmd.as_ref() {
cmd.recv_msg(&mut cmd_buf).fuse()
} else {
Fuse::terminated()
};
acl_read = if let Some(acl) = worker.acl.as_ref() {
acl.recv_msg(&mut acl_buf).fuse()
} else {
Fuse::terminated()
};
} else {
return;
}
loop {
select! {
msg = control_plane.next() => {
if let Some(m) = msg {
match m {
(Message::OpenTransport(_), mut responder) => {
bt_log_warn!("transport already bound");
responder.send(zx::Status::ALREADY_BOUND).await
.unwrap_or_else(log_responder_error);
}
(Message::OpenCmd(c), responder) => {
cmd_read = Fuse::terminated();
if try_open_channel(c, &mut worker.cmd, responder).await {
let cmd = worker.cmd.as_ref()
.expect("try_open_channel returning true indicates cmd is Some");
cmd_read = cmd.recv_msg(&mut cmd_buf).fuse();
}
}
(Message::OpenAcl(c), responder) => {
acl_read = Fuse::terminated();
if try_open_channel(c, &mut worker.acl, responder).await {
let acl = worker.acl.as_ref()
.expect("try_open_channel returning true indicates acl is Some");
acl_read = acl.recv_msg(&mut acl_buf).fuse();
}
}
(Message::OpenSnoop(c), mut responder) => {
// Because the snoop channel is not read from, it needs to be polled
// here to determine whether it is bound or not. If a read was
// performed on the channel, a closed notification would be surfaced at
// the point where the read is performed.
if worker.snoop.is_bound() {
responder.send(zx::Status::ALREADY_BOUND).await
.unwrap_or_else(log_responder_error);
} else {
try_open_channel(c, &mut worker.snoop.channel, responder).await;
}
}
(Message::Unbind, mut responder) => {
// Signal unbind to transport and close all worker resources before
// responding.
unsafe { worker.transport.unbind(); }
drop(worker);
responder
.send(zx::Status::OK)
.await
.unwrap_or_else(log_responder_error);
return;
}
}
} else {
// driver has dropped the sender so we should close
bt_log_warn!("driver channel closed. read task ending");
return;
}
}
res = cmd_read => {
trace_duration!("Worker::CommandReadOutgoing");
// End current borrow of cmd by cmd_read
cmd_read = Fuse::terminated();
if let Err(status) = res {
log_read_error(status, "Command");
worker.cmd = None;
continue;
}
// forward data to the transport
transport_borrow.send(OutgoingPacket::Cmd(cmd_buf.bytes())).await
.expect("Underlying transport driver error");
// write data to snoop channel
worker.snoop.write(Snoop::OUTGOING_CMD, cmd_buf.bytes());
// rearm read future
cmd_read = worker.cmd.as_ref().expect("cmd must be some in this select branch")
.recv_msg(&mut cmd_buf).fuse();
}
res = acl_read => {
trace_duration!("Worker::AclReadOutgoing");
// End current borrow of acl by acl_read
acl_read = Fuse::terminated();
if let Err(status) = res {
log_read_error(status, "Acl");
worker.acl = None;
continue;
}
// forward data to the transport
transport_borrow.send(OutgoingPacket::Acl(acl_buf.bytes())).await
.expect("Underlying transport driver error");
// write data to snoop channel
worker.snoop.write(Snoop::OUTGOING_ACL, acl_buf.bytes());
// rearm future
acl_read = worker.acl.as_ref().expect("acl must be some in this select branch")
.recv_msg(&mut acl_buf).fuse();
}
res = transport_borrow.next() => {
trace_duration!("Worker::IncomingPacket");
match res {
Some(token) => {
match transport_borrow.take_incoming(token, incoming_buffer) {
IncomingPacket::Event(mut data) => {
trace_duration!("Worker::EventSendIncoming");
incoming_buffer = data;
let mut success = true;
if let Some(cmd) = worker.cmd.as_ref() {
success = cmd.write(&incoming_buffer, &mut vec![]).is_ok();
}
if !success {
bt_log_warn!("Failed write to command channel");
cmd_read = Fuse::terminated();
worker.cmd = None;
}
worker.snoop.write(Snoop::INCOMING_EVT, &incoming_buffer);
}
IncomingPacket::Acl(mut data) => {
trace_duration!("Worker::AclSendIncoming");
incoming_buffer = data;
let mut success = true;
if let Some(acl) = worker.acl.as_ref() {
success = acl.write(&incoming_buffer, &mut vec![]).is_ok();
}
if !success {
bt_log_warn!("Failed write to acl channel");
acl_read = Fuse::terminated();
worker.acl = None;
}
worker.snoop.write(Snoop::INCOMING_ACL, &incoming_buffer);
}
}
}
None => {
// TODO (49096): unbind the driver or attempt to reopen the underlying
// driver.
bt_log_err!("Error fetching data from underlying transport driver");
return;
}
}
}
}
}
}
fn log_read_error(status: zx::Status, channel_name: &'static str) {
if status == zx::Status::PEER_CLOSED {
bt_log_info!("{} channel closed", channel_name);
} else {
bt_log_info!("Error reading from {} channel {:?} -- closing", channel_name, status);
}
}
fn log_responder_error<E: std::fmt::Debug>(e: E) {
bt_log_err!("could not notify main thread of message response: {:?}", e);
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{test_utils::*, transport::IncomingPacket};
use async_utils::PollExt;
use futures::future;
#[fasync::run_until_stalled(test)]
async fn build_worker_not_yet_complete() {
let (mut control_plane, mut receiver) = ControlPlane::new();
let worker = async move {
Worker::build(&mut receiver).await;
panic!("worker stopped unexpectedly");
}
.fuse();
// Handle worker requests in the background, panicking if the `Worker::build` future
// completes.
fasync::Task::local(worker).detach();
let (cmd, cmd_) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenCmd(cmd)).await, zx::Status::OK);
assert_eq!(
control_plane.async_send(Message::OpenCmd(cmd_)).await,
zx::Status::ALREADY_BOUND
);
let (snoop, snoop_) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenSnoop(snoop)).await, zx::Status::OK);
assert_eq!(
control_plane.async_send(Message::OpenSnoop(snoop_)).await,
zx::Status::ALREADY_BOUND
);
let (acl, acl_) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenAcl(acl)).await, zx::Status::OK);
assert_eq!(
control_plane.async_send(Message::OpenAcl(acl_)).await,
zx::Status::ALREADY_BOUND
);
}
#[fasync::run_until_stalled(test)]
async fn build_worker_not_yet_complete_then_unbind_and_check_host_resources() {
let (mut control_plane, mut receiver) = ControlPlane::new();
let worker = async move {
let res = Worker::build(&mut receiver).await;
// Worker should never be built in this test since unbind is called before worker
// finishes building
assert!(res.is_none());
}
.fuse();
// Handle worker requests in the background.
fasync::Task::local(worker).detach();
// Create channels from host to check that all host side resources are cleaned up on unbind
// Do not create Transport yet so that the worker does not complete the "build" function.
let (cmd, cmd_) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenCmd(cmd)).await, zx::Status::OK);
let (snoop, snoop_) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenSnoop(snoop)).await, zx::Status::OK);
let (acl, acl_) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenAcl(acl)).await, zx::Status::OK);
// Send an unbind message
assert_eq!(control_plane.async_send(Message::Unbind).await, zx::Status::OK);
// All peer channels must be closed here
assert_eq!(cmd_.write(b"", &mut vec![]), Err(zx::Status::PEER_CLOSED));
assert_eq!(snoop_.write(b"", &mut vec![]), Err(zx::Status::PEER_CLOSED));
assert_eq!(acl_.write(b"", &mut vec![]), Err(zx::Status::PEER_CLOSED));
}
#[fasync::run_until_stalled(test)]
async fn build_worker_not_yet_complete_then_unbind_and_check_transport_resources() {
let (mut control_plane, mut receiver) = ControlPlane::new();
let worker = async move {
let res = Worker::build(&mut receiver).await;
// Worker should never be built in this test since unbind is called before worker
// finishes building
assert!(res.is_none());
}
.fuse();
// Handle worker requests in the background.
fasync::Task::local(worker).detach();
// Create transport resource to check that it is cleaned up on unbind message
let (transport, _, _) = TestTransport::new();
// Future that will complete once `HwTransport::unbind` is called.
let unbound = transport.unbound.wait_or_dropped();
assert_eq!(
control_plane.async_send(Message::OpenTransport(transport)).await,
zx::Status::OK
);
// Send an unbind message
assert_eq!(control_plane.async_send(Message::Unbind).await, zx::Status::OK);
// Check that the `HwTransport` received the unbind method call.
let _ = futures::poll!(unbound)
.expect("unbound event to be signaled after unbind message is handled");
}
#[fasync::run_until_stalled(test)]
async fn build_worker_drop_control_plane_returns_none() {
let (mut control_plane, mut receiver) = ControlPlane::new();
let worker = Worker::build(&mut receiver);
let tests = async {
let (transport, _, _) = TestTransport::new();
assert_eq!(
control_plane.async_send(Message::OpenTransport(transport)).await,
zx::Status::OK
);
drop(control_plane);
};
let (w, _) = future::join(worker, tests).await;
assert!(w.is_none());
}
#[fasync::run_until_stalled(test)]
async fn build_worker_returns_none() {
let (mut control_plane, mut receiver) = ControlPlane::new();
let worker = Worker::build(&mut receiver);
let tests = async {
let (transport, _, _) = TestTransport::new();
assert_eq!(
control_plane.async_send(Message::OpenTransport(transport)).await,
zx::Status::OK
);
let (transport, _, _) = TestTransport::new();
assert_eq!(
control_plane.async_send(Message::OpenTransport(transport)).await,
zx::Status::ALREADY_BOUND
);
};
let worker = future::join(worker, tests).await;
assert!(worker.0.is_none());
}
#[fasync::run_until_stalled(test)]
async fn build_worker_success() {
let (mut control_plane, mut receiver) = ControlPlane::new();
let worker = Worker::build(&mut receiver);
let tests = async {
let (cmd, _cmd) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenCmd(cmd)).await, zx::Status::OK);
let (transport, _, _) = TestTransport::new();
assert_eq!(
control_plane.async_send(Message::OpenTransport(transport)).await,
zx::Status::OK
);
};
let (w, _) = future::join(worker, tests).await;
assert!(w.is_some());
}
#[fasync::run_until_stalled(test)]
async fn run_worker_receive_messages() {
let (mut control_plane, receiver) = ControlPlane::new();
fasync::Task::local(run(receiver)).detach();
let (transport, _in, _out) = TestTransport::new();
assert_eq!(
control_plane.async_send(Message::OpenTransport(transport)).await,
zx::Status::OK
);
let (cmd, _cmd) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenCmd(cmd)).await, zx::Status::OK);
// At this point we have a Worker object built in the background thread
let (cmd, _cmd) = zx::Channel::create().unwrap();
assert_eq!(
control_plane.async_send(Message::OpenCmd(cmd)).await,
zx::Status::ALREADY_BOUND
);
let (acl, _acl) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenAcl(acl)).await, zx::Status::OK);
let (acl, _acl) = zx::Channel::create().unwrap();
assert_eq!(
control_plane.async_send(Message::OpenAcl(acl)).await,
zx::Status::ALREADY_BOUND
);
let (snoop, _snoop) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenSnoop(snoop)).await, zx::Status::OK);
let (snoop, _snoop) = zx::Channel::create().unwrap();
assert_eq!(
control_plane.async_send(Message::OpenSnoop(snoop)).await,
zx::Status::ALREADY_BOUND
);
}
#[fasync::run_until_stalled(test)]
async fn run_worker_send_recv_cmd_channel_data() {
let (mut control_plane, receiver) = ControlPlane::new();
fasync::Task::local(run(receiver)).detach();
let (transport, transport_in, mut transport_out) = TestTransport::new();
assert_eq!(
control_plane.async_send(Message::OpenTransport(transport)).await,
zx::Status::OK
);
let (cmd, c) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenCmd(cmd)).await, zx::Status::OK);
let cmd = fasync::Channel::from_channel(c).unwrap();
let (snoop, s) = zx::Channel::create().unwrap();
let snoop_sink = SnoopSink::spawn_from_channel(s);
assert_eq!(control_plane.async_send(Message::OpenSnoop(snoop)).await, zx::Status::OK);
let data = vec![1, 2, 3];
cmd.write(&data, &mut vec![]).unwrap();
assert_eq!(transport_out.next().await.unwrap(), OwnedOutgoingPacket::Cmd(data));
let data = vec![4, 5, 6];
let expected = data.clone();
let mut buf = zx::MessageBuf::new();
let cmd_read = cmd.recv_msg(&mut buf);
transport_in.unbounded_send(IncomingPacket::Event(data)).unwrap();
cmd_read.await.unwrap();
assert_eq!(buf.bytes(), &expected[..]);
// assert snoop reads
let snoop_output = snoop_sink.data().await;
assert_eq!(snoop_output.len(), 2);
assert_eq!(snoop_output[0], vec![0, 1, 2, 3]);
assert_eq!(snoop_output[1], vec![5, 4, 5, 6]);
}
#[fasync::run_until_stalled(test)]
async fn run_worker_send_recv_acl_channel_data() {
let (mut control_plane, receiver) = ControlPlane::new();
fasync::Task::local(run(receiver)).detach();
let (transport, transport_in, mut transport_out) = TestTransport::new();
assert_eq!(
control_plane.async_send(Message::OpenTransport(transport)).await,
zx::Status::OK
);
let (cmd, _c) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenCmd(cmd)).await, zx::Status::OK);
let (acl, a) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenAcl(acl)).await, zx::Status::OK);
let acl = fasync::Channel::from_channel(a).unwrap();
let (snoop, s) = zx::Channel::create().unwrap();
let snoop_sink = SnoopSink::spawn_from_channel(s);
assert_eq!(control_plane.async_send(Message::OpenSnoop(snoop)).await, zx::Status::OK);
let data = vec![1, 2, 3];
acl.write(&data, &mut vec![]).unwrap();
assert_eq!(transport_out.next().await.unwrap(), OwnedOutgoingPacket::Acl(data));
let data = vec![4, 5, 6];
let expected = data.clone();
let mut buf = zx::MessageBuf::new();
let acl_read = acl.recv_msg(&mut buf);
transport_in.unbounded_send(IncomingPacket::Acl(data)).unwrap();
acl_read.await.unwrap();
assert_eq!(buf.bytes(), &expected[..]);
// assert snoop reads
let snoop_output = snoop_sink.data().await;
assert_eq!(snoop_output.len(), 2);
assert_eq!(snoop_output[0], vec![2, 1, 2, 3]);
assert_eq!(snoop_output[1], vec![6, 4, 5, 6]);
}
#[fasync::run_until_stalled(test)]
async fn worker_unbind_then_resources_are_closed() {
let (mut control_plane, receiver) = ControlPlane::new();
fasync::Task::local(run(receiver)).detach();
// Create transport resource to check that it is cleaned up on unbind message
let (transport, _transport_in, _transport_out) = TestTransport::new();
// Future that will complete once `HwTransport::unbind` is called.
let unbound = transport.unbound.wait();
assert_eq!(
control_plane.async_send(Message::OpenTransport(transport)).await,
zx::Status::OK
);
// Create channels from host to check that all host side resources are cleaned up on unbind
let (cmd, cmd_) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenCmd(cmd)).await, zx::Status::OK);
let (snoop, snoop_) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenSnoop(snoop)).await, zx::Status::OK);
let (acl, acl_) = zx::Channel::create().unwrap();
assert_eq!(control_plane.async_send(Message::OpenAcl(acl)).await, zx::Status::OK);
// Send an unbind message
assert_eq!(control_plane.async_send(Message::Unbind).await, zx::Status::OK);
// All peer channels must be closed here
assert_eq!(cmd_.write(b"", &mut vec![]), Err(zx::Status::PEER_CLOSED));
assert_eq!(snoop_.write(b"", &mut vec![]), Err(zx::Status::PEER_CLOSED));
assert_eq!(acl_.write(b"", &mut vec![]), Err(zx::Status::PEER_CLOSED));
// Check that the `HwTransport` received the unbind method call.
let _ = futures::poll!(unbound)
.expect("unbound event to be signaled after unbind message is handled");
}
}
| 41.537293 | 101 | 0.538722 |
03d198a34080f4fb201966406d39e35b7fcf55ed | 3,450 | use crate::{config::ResourceType, InMemoryCache, UpdateCache};
use twilight_model::{
channel::Channel,
gateway::payload::incoming::{ChannelCreate, ChannelDelete, ChannelPinsUpdate, ChannelUpdate},
id::{marker::ChannelMarker, Id},
};
impl InMemoryCache {
pub(crate) fn cache_channels(&self, channels: impl IntoIterator<Item = Channel>) {
for channel in channels {
self.cache_channel(channel);
}
}
pub(crate) fn cache_channel(&self, channel: Channel) {
if let Some(guild_id) = channel.guild_id {
self.guild_channels
.entry(guild_id)
.or_default()
.insert(channel.id);
}
self.channels.insert(channel.id, channel);
}
/// Delete a guild channel from the cache.
///
/// The guild channel data itself and the channel entry in its guild's list
/// of channels will be deleted.
pub(crate) fn delete_channel(&self, channel_id: Id<ChannelMarker>) {
if let Some((_, channel)) = self.channels.remove(&channel_id) {
if let Some(guild_id) = channel.guild_id {
let maybe_channels = self.guild_channels.get_mut(&guild_id);
if let Some(mut channels) = maybe_channels {
channels.remove(&channel_id);
}
}
}
}
}
impl UpdateCache for ChannelCreate {
fn update(&self, cache: &InMemoryCache) {
if !cache.wants(ResourceType::CHANNEL) {
return;
}
cache.cache_channel(self.0.clone());
}
}
impl UpdateCache for ChannelDelete {
fn update(&self, cache: &InMemoryCache) {
if !cache.wants(ResourceType::CHANNEL) {
return;
}
cache.delete_channel(self.0.id);
}
}
impl UpdateCache for ChannelPinsUpdate {
fn update(&self, cache: &InMemoryCache) {
if !cache.wants(ResourceType::CHANNEL) {
return;
}
if let Some(mut channel) = cache.channels.get_mut(&self.channel_id) {
channel.last_pin_timestamp = self.last_pin_timestamp;
}
}
}
impl UpdateCache for ChannelUpdate {
fn update(&self, cache: &InMemoryCache) {
if !cache.wants(ResourceType::CHANNEL) {
return;
}
cache.cache_channel(self.0.clone());
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test;
use twilight_model::gateway::event::Event;
#[test]
fn channel_delete_guild() {
let cache = InMemoryCache::new();
let (guild_id, channel_id, channel) = test::guild_channel_text();
cache.cache_channel(channel.clone());
assert_eq!(1, cache.channels.len());
assert!(cache
.guild_channels
.get(&guild_id)
.unwrap()
.contains(&channel_id));
cache.update(&Event::ChannelDelete(Box::new(ChannelDelete(channel))));
assert!(cache.channels.is_empty());
assert!(cache.guild_channels.get(&guild_id).unwrap().is_empty());
}
#[test]
fn channel_update_guild() {
let cache = InMemoryCache::new();
let (guild_id, channel_id, channel) = test::guild_channel_text();
cache.update(&ChannelUpdate(channel));
assert_eq!(1, cache.channels.len());
assert!(cache
.guild_channels
.get(&guild_id)
.unwrap()
.contains(&channel_id));
}
}
| 28.04878 | 97 | 0.588696 |
5b003630da57c6812ea576437619f5be322f97dd | 3,723 | use pir_8_emu::isa::instruction::{ParseInstructionError, Instruction};
use rand::distributions::{Alphanumeric, Distribution};
use self::super::super::super::alt_gp_registers;
use self::super::unrecognised_register_letter;
use pir_8_emu::isa::GeneralPurposeRegister;
use rand::thread_rng;
#[test]
fn load_immedate_byte() {
for pad_left in 1..5 {
for pad_right in 1..5 {
unrecognised_register_letter(&format!("LOAD{e:wl$}IMM{e:wr$}BYTE", e = "", wl = pad_left, wr = pad_right));
}
}
}
#[test]
fn load_indirect() {
for pad in 1..5 {
unrecognised_register_letter(&format!("LOAD{e:w$}IND", e = "", w = pad));
}
}
#[test]
fn save() {
unrecognised_register_letter("SAVE");
}
#[test]
fn move_qqq() {
unrecognised_register_letter("MOVE");
}
#[test]
fn move_rrr() {
for regs in &[GeneralPurposeRegister::defaults(), alt_gp_registers()] {
for pad_left in 1..5 {
for pad_center in 1..5 {
for pad_right in 1..5 {
for pad_rright in 1..5 {
for qqq in regs {
for _ in 0..10 {
let qqq = qqq.letter();
let rrr = Alphanumeric.sample_iter(thread_rng())
.find(|rrr| regs.iter().find(|v| v.letter().eq_ignore_ascii_case(rrr)).is_none())
.unwrap();
let instr = format!("{e:wl$}MOVE{e:wc$}{}{e:wr$}{}{e:wrr$}",
qqq,
rrr,
e = "",
wl = pad_left,
wc = pad_center,
wr = pad_right,
wrr = pad_rright);
assert_eq!(Instruction::from_str(&instr, regs),
Err(ParseInstructionError::UnrecognisedRegisterLetter(pad_left + 4 + pad_center + 1 + pad_right + 1,
rrr,
[regs[0].letter(),
regs[1].letter(),
regs[2].letter(),
regs[3].letter(),
regs[4].letter(),
regs[5].letter(),
regs[6].letter(),
regs[7].letter()])),
"{:?}",
instr);
}
}
}
}
}
}
}
}
#[test]
fn comp() {
unrecognised_register_letter("COMP");
}
#[test]
fn port() {
for dir in &["IN", "OUT"] {
for pad in 1..5 {
unrecognised_register_letter(&format!("PORT{e:w$}{}", dir, e = "", w = pad));
}
}
}
| 40.032258 | 143 | 0.327424 |
f7ce7658e000c3ab06581c0c72597a2f86520e9a | 5,561 | use async_ftp::FtpStream;
use lazy_static::*;
use libunftp::Server;
use unftp_sbe_gcs::CloudStorage;
use more_asserts::assert_ge;
use path_abs::PathInfo;
use pretty_assertions::assert_eq;
use slog::Drain;
use slog::*;
use std::{
io::{Cursor, Read},
path::PathBuf,
process::{Child, Command},
str,
time::Duration,
};
use tokio::{macros::support::Future, sync::Mutex};
use unftp_sbe_gcs::options::AuthMethod;
/*
FIXME: this is just MVP tests. need to add:
- deleting_directory_deletes_files_in_it() and/or deleting_directory_fails_if_contains_file()
- ...
*/
lazy_static! {
static ref DOCKER: Mutex<Child> = initialize_docker();
}
// FIXME: auto-allocate port
const ADDR: &str = "127.0.0.1:1234";
const GCS_BASE_URL: &str = "http://localhost:9081";
const GCS_BUCKET: &str = "test-bucket";
pub fn initialize_docker() -> Mutex<Child> {
let buf = std::env::current_dir().unwrap();
let current_dir = buf.display();
Command::new("docker").arg("stop").arg("fake-gcs").status().unwrap();
Command::new("docker").arg("rm").arg("fake-gcs").status().unwrap();
let mut command = Command::new("docker");
command
.arg("run")
.arg("-d")
.arg("--name")
.arg("fake-gcs")
.arg("-v")
.arg(format!("{}/tests/resources/data:/data", current_dir))
.arg("-p")
.arg("9081:9081")
.arg("fsouza/fake-gcs-server")
.arg("-scheme")
.arg("http")
.arg("-port")
.arg("9081");
println!("docker command: {:?}", command);
let result = Mutex::new(command.spawn().expect("docker failed"));
// FIXME: on linux, `docker -d` returns extremely quickly, but container startup continues in background. Replace this stupid wait with checking container status (a sort of startup probe)
std::thread::sleep(Duration::new(10, 0));
result
}
#[tokio::test(flavor = "current_thread")]
async fn newly_created_dir_is_empty() {
run_test(async {
let mut ftp_stream = FtpStream::connect(ADDR).await.unwrap();
ftp_stream.login("anonymous", "").await.unwrap();
ftp_stream.mkdir("newly_created_dir_is_empty").await.unwrap();
ftp_stream.cwd("newly_created_dir_is_empty").await.unwrap();
let list = ftp_stream.list(None).await.unwrap();
assert_eq!(list.len(), 0)
})
.await;
}
#[tokio::test(flavor = "current_thread")]
async fn creating_directory_with_file_in_it() {
run_test(async {
let mut ftp_stream = FtpStream::connect(ADDR).await.unwrap();
ftp_stream.login("anonymous", "").await.unwrap();
ftp_stream.mkdir("creating_directory_with_file_in_it").await.unwrap();
ftp_stream.cwd("creating_directory_with_file_in_it").await.unwrap();
let content = b"Hello from this test!\n";
let mut reader = Cursor::new(content);
ftp_stream.put("greeting.txt", &mut reader).await.unwrap();
let list_in = ftp_stream.list(None).await.unwrap();
assert_eq!(list_in.len(), 1);
assert!(list_in[0].ends_with(" greeting.txt"));
// FIXME: `CWD ..` does nothing in GCS ATM (TODO)
// ftp_stream.cwd("..").await.unwrap();
ftp_stream.cdup().await.unwrap();
let list_out = ftp_stream.list(None).await.unwrap();
assert_ge!(list_out.len(), 1);
assert!(list_out.iter().any(|t| t.ends_with("creating_directory_with_file_in_it")))
})
.await;
}
#[tokio::test(flavor = "current_thread")]
async fn file_sizes() {
run_test(async {
let mut ftp_stream = FtpStream::connect(ADDR).await.unwrap();
ftp_stream.login("anonymous", "").await.unwrap();
ftp_stream.mkdir("file_sizes").await.unwrap();
ftp_stream.cwd("file_sizes").await.unwrap();
ftp_stream.put("10 bytes", &mut Cursor::new(b"1234567890")).await.unwrap();
ftp_stream.put("12 bytes", &mut Cursor::new(b"123456789012")).await.unwrap();
ftp_stream.put("17 bytes", &mut Cursor::new(b"12345678901234567")).await.unwrap();
let list = ftp_stream.list(None).await.unwrap();
assert_eq!(list.len(), 3);
list.iter().for_each(|f| {
println!("{}", f);
let vec: Vec<&str> = f.split_whitespace().collect();
// "coincidentally", file name matches file size
assert_eq!(vec[3], vec[7]);
});
})
.await;
}
// FIXME: `move async` is beta in rust 1.48, hence the `impl Future`
async fn run_test(test: impl Future<Output = ()>) {
let mut child = DOCKER.lock().await;
let decorator = slog_term::TermDecorator::new().stderr().build();
let drain = slog_term::FullFormat::new(decorator).build().fuse();
let drain = slog_async::Async::new(drain).build().fuse();
tokio::spawn(
Server::new(Box::new(move || {
CloudStorage::with_api_base(
GCS_BASE_URL,
GCS_BUCKET,
PathBuf::from("/unftp"),
AuthMethod::ServiceAccountKey(b"unftp_test".to_vec()),
)
}))
.logger(Some(Logger::root(drain, o!())))
.listen(ADDR),
);
tokio::time::sleep(Duration::new(1, 0)).await;
test.await;
let mut stdout = String::new();
let mut stderr = String::new();
child.stdout.as_mut().map(|s| s.read_to_string(&mut stdout));
child.stderr.as_mut().map(|s| s.read_to_string(&mut stderr));
println!("stdout: {}", stdout);
println!("stderr: {}", stderr);
// FIXME: stop docker container (atm there is no mechanism in cargo test for cleanup hooks)
}
| 33.908537 | 191 | 0.619493 |
d538a8068ac4a16a392e79e43adaa1011022e6d9 | 740 | #![allow(non_upper_case_globals)]
use abi::call::{FnType, ArgType};
fn classify_ret_ty<Ty>(ret: &mut ArgType<Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
ret.make_indirect();
} else {
ret.extend_integer_width_to(32);
}
}
fn classify_arg_ty<Ty>(arg: &mut ArgType<Ty>) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
arg.make_indirect();
} else {
arg.extend_integer_width_to(32);
}
}
pub fn compute_abi_info<Ty>(fty: &mut FnType<Ty>) {
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
}
for arg in &mut fty.args {
if arg.is_ignore() {
continue;
}
classify_arg_ty(arg);
}
}
| 22.424242 | 65 | 0.586486 |
096a56c11f623a5822b2eca3d344daafa3bd004f | 249 | use wasm_compiler::process;
#[no_mangle]
pub fn compile(code_ptr: usize) -> usize {
process(code_ptr, |code| {
Ok(match wat::parse_str(code) {
Ok(a) => a,
Err(e) => return Err(e.to_string()),
})
})
}
| 20.75 | 48 | 0.526104 |
f4cca9df6a5772e2b851a49af19bfc0521ea1310 | 39,954 | /* This is dvipdfmx, an eXtended version of dvipdfm by Mark A. Wicks.
Copyright (C) 2002-2016 by Jin-Hwan Cho and Shunsaku Hirata,
the dvipdfmx project team.
Copyright (C) 1998, 1999 by Mark A. Wicks <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*/
#![allow(
mutable_transmutes,
non_camel_case_types,
non_snake_case,
non_upper_case_globals,
)]
use super::dpx_sfnt::{
dfont_open, sfnt_close, sfnt_create_FontFile_stream, sfnt_open, sfnt_read_table_directory,
sfnt_require_table, sfnt_set_table,
};
use crate::streq_ptr;
use crate::DisplayExt;
use crate::{info, warn};
use std::ffi::CStr;
use std::ptr;
use super::dpx_agl::{
agl_chop_suffix, agl_lookup_list, agl_name_convert_unicode, agl_name_is_unicode,
agl_suffix_to_otltag,
};
use super::dpx_dpxfile::{dpx_open_dfont_file, dpx_open_truetype_file};
use super::dpx_mem::new;
use super::dpx_pdfencoding::{pdf_encoding_get_encoding, pdf_encoding_is_predefined};
use super::dpx_pdffont::{
pdf_font, pdf_font_get_descriptor, pdf_font_get_encoding, pdf_font_get_ident,
pdf_font_get_index, pdf_font_get_mapname, pdf_font_get_resource, pdf_font_get_usedchars,
pdf_font_get_verbose, pdf_font_is_in_use, pdf_font_set_fontname,
};
use super::dpx_tfm::{tfm_get_width, tfm_open};
use super::dpx_tt_aux::tt_get_fontdesc;
use super::dpx_tt_aux::ttc_read_offset;
use super::dpx_tt_cmap::{tt_cmap_lookup, tt_cmap_read, tt_cmap_release};
use super::dpx_tt_glyf::{
tt_add_glyph, tt_build_finish, tt_build_init, tt_build_tables, tt_find_glyph, tt_get_index,
};
use super::dpx_tt_gsub::{
otl_gsub, otl_gsub_add_feat, otl_gsub_apply, otl_gsub_apply_alt, otl_gsub_apply_lig,
otl_gsub_new, otl_gsub_release, otl_gsub_select,
};
use super::dpx_tt_post::{tt_lookup_post_table, tt_read_post_table, tt_release_post_table};
use super::dpx_tt_table::tt_get_ps_fontname;
use crate::dpx_pdfobj::{pdf_obj, pdf_ref_obj, pdf_release_obj, IntoObj, PushObj};
use crate::shims::sprintf;
use libc::{atoi, free, memcpy, memmove, memset, strchr, strcpy, strlen, strncpy};
use super::dpx_sfnt::{put_big_endian, sfnt};
use super::dpx_tt_post::tt_post_table;
use super::dpx_tt_cmap::tt_cmap;
pub use sfnt_table_info::SfntTableInfo;
/// Tag consts for SfntTableInfo.
pub mod sfnt_table_info {
pub type Tag = &'static [u8; 4];
/*
* The 'name' table should be preserved since it contains copyright
* information, but it might cause problem when there are invalid
* table entries (wrongly encoded text which is often the case in
* CJK fonts). Acrobat does not use 'name' table. Unicode TrueType
* fonts may have 10K bytes 'name' table...
*
* We preserve the 'OS/2' table too, since it contains the license
* information. PDF applications should use this table to decide
* whether the font is embedded only for the purpose of preview &
* printing. Otherwise, we must encrypt the document. Acrobat does
* not use 'OS/2' table, though...
*/
#[derive(Copy, Clone)]
pub struct SfntTableInfo {
name: Tag,
must_exist: bool,
}
impl SfntTableInfo {
pub const fn new(name: Tag, must_exist: bool) -> Self {
SfntTableInfo { name, must_exist }
}
pub const fn name(&self) -> Tag {
self.name
}
/// # Safety
/// This function assumes the name is valid utf8.
pub unsafe fn name_str(&self) -> &str {
&std::str::from_utf8_unchecked(self.name)
}
pub const fn must_exist(&self) -> bool {
self.must_exist
}
}
pub const OS_2: Tag = b"OS/2";
pub const HEAD: Tag = b"head";
pub const HHEA: Tag = b"hhea";
pub const LOCA: Tag = b"loca";
pub const MAXP: Tag = b"maxp";
pub const NAME: Tag = b"name";
pub const GLYF: Tag = b"glyf";
pub const HMTX: Tag = b"hmtx";
pub const FPGM: Tag = b"fpgm";
pub const CVT: Tag = b"cvt ";
pub const PREP: Tag = b"prep";
pub const CMAP: Tag = b"cmap";
}
/* Acoid conflict with CHAR ... from <winnt.h>. */
/* Data Types as described in Apple's TTRefMan */
/* Order of lookup should be
* post, unicode+otl
*/
#[derive(Copy, Clone)]
#[repr(C)]
pub struct glyph_mapper {
pub codetogid: *mut tt_cmap,
pub gsub: *mut otl_gsub,
pub sfont: *mut sfnt,
pub nametogid: *mut tt_post_table,
}
/* tectonic/core-strutils.h: miscellaneous C string utilities
Copyright 2016-2018 the Tectonic Project
Licensed under the MIT License.
*/
/* Note that we explicitly do *not* change this on Windows. For maximum
* portability, we should probably accept *either* forward or backward slashes
* as directory separators. */
/* TrueType */
/* Modifying this has no effect :P */
pub unsafe fn pdf_font_open_truetype(font: *mut pdf_font) -> i32 {
let mut embedding: i32 = 1i32;
assert!(!font.is_null());
let ident = pdf_font_get_ident(font); /* Must be embedded. */
let index = pdf_font_get_index(font);
assert!(!ident.is_null());
let sfont = if let Some(handle) = dpx_open_truetype_file(ident) {
sfnt_open(handle)
} else if let Some(handle) = dpx_open_dfont_file(ident) {
dfont_open(handle, index)
} else {
return -1i32;
};
if sfont.is_null() {
warn!(
"Could not open TrueType font: {}",
CStr::from_ptr(ident).display(),
);
return -1i32;
}
let error = if (*sfont).type_0 == 1i32 << 4i32 {
let offset = ttc_read_offset(sfont, index);
if offset == 0_u32 {
panic!("Invalid TTC index in {}.", CStr::from_ptr(ident).display());
}
sfnt_read_table_directory(sfont, offset)
} else {
sfnt_read_table_directory(sfont, (*sfont).offset)
};
if error != 0 {
sfnt_close(sfont);
return -1i32;
/* Silently */
}
/* Reading fontdict before checking fonttype conflicts with PKFONT
* because pdf_font_get_resource() always makes a dictionary.
*/
let encoding_id = pdf_font_get_encoding(font);
let fontdict = pdf_font_get_resource(&mut *font);
let descriptor = pdf_font_get_descriptor(font);
/* ENABLE_NOEMBED */
assert!(!descriptor.is_null());
let mut fontname: [i8; 256] = [0; 256];
memset(fontname.as_mut_ptr() as *mut libc::c_void, 0i32, 256);
let mut length = tt_get_ps_fontname(sfont, fontname.as_mut_ptr(), 255_u16) as i32;
if length < 1i32 {
length = (if strlen(ident) < 255 {
strlen(ident) as _
} else {
255
}) as i32;
/* Suppress some warnings on GCC. Clang supports the same warning control
* #pragmas (and #defines __GNUC__!), but not these particular warnings, which
* leads to a meta-warning if they're left unguarded. */
strncpy(fontname.as_mut_ptr(), ident, length as _);
}
fontname[length as usize] = '\u{0}' as i32 as i8;
for n in 0..length {
if fontname[n as usize] as i32 == 0i32 {
memmove(
fontname.as_mut_ptr().offset(n as isize) as *mut libc::c_void,
fontname.as_mut_ptr().offset(n as isize).offset(1) as *const libc::c_void,
(length - n - 1) as _,
);
}
}
if strlen(fontname.as_mut_ptr()) == 0 {
panic!(
"Can\'t find valid fontname for \"{}\".",
CStr::from_ptr(ident).display()
);
}
pdf_font_set_fontname(font, fontname.as_mut_ptr());
let tmp = tt_get_fontdesc(sfont, &mut embedding, -1i32, 1i32, fontname.as_mut_ptr());
if tmp.is_none() {
sfnt_close(sfont);
panic!("Could not obtain necessary font info.");
}
let tmp = tmp.unwrap();
(*descriptor).as_dict_mut().merge(&tmp);
if embedding == 0 {
if encoding_id >= 0i32 && pdf_encoding_is_predefined(encoding_id) == 0 {
sfnt_close(sfont);
panic!("Custom encoding not allowed for non-embedded TrueType font.");
} else {
/* There are basically no guarantee for font substitution
* can work with "symblic" fonts. At least all glyphs
* contained in the font must be identified; glyphs covers
* by this instance of font should contain glyphs only from
* Adobe Standard Latin Set. We allow non-embedded font
* only to predefined encodings for this reason. Note that
* "builtin" encoding means "MacRoman" here.
*/
panic!(
"Font file=\"{}\" can\'t be embedded due to liscence restrictions.",
CStr::from_ptr(ident).display()
);
/* ENABLE_NOEMBED */
}
}
sfnt_close(sfont);
fontdict.as_dict_mut().set("Type", "Font");
fontdict.as_dict_mut().set("Subtype", "TrueType");
0i32
}
const required_table: [SfntTableInfo; 12] = {
use sfnt_table_info::*;
[
SfntTableInfo::new(OS_2, false),
SfntTableInfo::new(HEAD, false),
SfntTableInfo::new(HHEA, true),
SfntTableInfo::new(LOCA, true),
SfntTableInfo::new(MAXP, true),
SfntTableInfo::new(NAME, true),
SfntTableInfo::new(GLYF, true),
SfntTableInfo::new(HMTX, true),
SfntTableInfo::new(FPGM, false),
SfntTableInfo::new(CVT, false),
SfntTableInfo::new(PREP, false),
SfntTableInfo::new(CMAP, true),
]
};
unsafe fn do_widths(font: *mut pdf_font, widths: *mut f64) {
let fontdict = pdf_font_get_resource(&mut *font);
let usedchars = pdf_font_get_usedchars(font);
let mut tmparray = vec![];
let mut firstchar = 255i32;
let mut lastchar = 0i32;
for code in 0..256 {
if *usedchars.offset(code as isize) != 0 {
if code < firstchar {
firstchar = code
}
if code > lastchar {
lastchar = code
}
}
}
if firstchar > lastchar {
warn!("No glyphs actually used???");
return;
}
let tfm_id = tfm_open(pdf_font_get_mapname(font), 0i32);
for code in firstchar..=lastchar {
if *usedchars.offset(code as isize) != 0 {
let width = if tfm_id < 0i32 {
/* tfm is not found */
*widths.offset(code as isize)
} else {
1000. * tfm_get_width(tfm_id, code)
};
tmparray.push_obj((width / 0.1 + 0.5).floor() * 0.1);
} else {
tmparray.push_obj(0f64);
}
}
let empty = tmparray.is_empty();
let tmparray = tmparray.into_obj();
if !empty {
fontdict.as_dict_mut().set("Widths", pdf_ref_obj(tmparray));
}
pdf_release_obj(tmparray);
fontdict.as_dict_mut().set("FirstChar", firstchar as f64);
fontdict.as_dict_mut().set("LastChar", lastchar as f64);
}
static mut verbose: i32 = 0i32;
/*
* There are several issues in TrueType font support in PDF.
* How PDF viewers select TrueType cmap table is not so clear.
* Most reliable way seem to reencode font and sort glyphs as
* charcode == gid and to use Mac-Roman format 0 subtable.
* It does not work with encodings that uses full 256 range since
* GID = 0 is reserved for .notdef, so GID = 256 is not accessible.
*/
unsafe fn do_builtin_encoding(font: *mut pdf_font, usedchars: *const i8, sfont: *mut sfnt) -> i32 {
let mut widths: [f64; 256] = [0.; 256];
let ttcm = tt_cmap_read(sfont, 1_u16, 0_u16);
if ttcm.is_null() {
warn!("Could not read Mac-Roman TrueType cmap table...");
return -1i32;
}
let cmap_table =
new((274_u64).wrapping_mul(::std::mem::size_of::<i8>() as u64) as u32) as *mut i8;
memset(cmap_table as *mut libc::c_void, 0i32, 274);
put_big_endian(cmap_table as *mut libc::c_void, 0i32, 2i32);
/* Version */
put_big_endian(cmap_table.offset(2) as *mut libc::c_void, 1i32, 2i32);
/* Number of subtables */
put_big_endian(cmap_table.offset(4) as *mut libc::c_void, 1u32 as i32, 2i32);
/* Platform ID */
put_big_endian(cmap_table.offset(6) as *mut libc::c_void, 0u32 as i32, 2i32);
/* Encoding ID */
put_big_endian(cmap_table.offset(8) as *mut libc::c_void, 12i32, 4i32);
/* Offset */
put_big_endian(cmap_table.offset(12) as *mut libc::c_void, 0i32, 2i32);
/* Format */
put_big_endian(cmap_table.offset(14) as *mut libc::c_void, 262i32, 2i32);
/* Length */
put_big_endian(cmap_table.offset(16) as *mut libc::c_void, 0i32, 2i32);
/* Language */
let glyphs = tt_build_init(); /* .notdef */
if verbose > 2i32 {
info!("[glyphs:/.notdef");
}
let mut count = 1;
for code in 0..256 {
if !(*usedchars.offset(code as isize) == 0) {
if verbose > 2i32 {
info!("/.c0x{:02x}", code);
}
let mut idx;
let gid = tt_cmap_lookup(ttcm, code as u32);
if gid as i32 == 0i32 {
warn!(
"Glyph for character code=0x{:02x} missing in font font-file=\"{}\".",
code,
CStr::from_ptr(pdf_font_get_ident(font)).display(),
);
idx = 0_u16
} else {
idx = tt_find_glyph(glyphs, gid);
if idx as i32 == 0i32 {
idx = tt_add_glyph(glyphs, gid, count as u16)
}
/* count returned. */
} /* bug here */
*cmap_table.offset((18i32 + code) as isize) = (idx as i32 & 0xffi32) as i8;
count += 1
}
}
tt_cmap_release(ttcm);
if verbose > 2i32 {
info!("]");
}
if tt_build_tables(sfont, glyphs) < 0i32 {
warn!("Packing TrueType font into SFNT failed!");
tt_build_finish(glyphs);
free(cmap_table as *mut libc::c_void);
return -1i32;
}
for code in 0..256 {
if *usedchars.offset(code as isize) != 0 {
let idx = tt_get_index(glyphs, *cmap_table.offset((18i32 + code) as isize) as u16);
widths[code as usize] = (1000.0f64
* (*(*glyphs).gd.offset(idx as isize)).advw as i32 as f64
/ (*glyphs).emsize as i32 as f64
/ 1i32 as f64
+ 0.5f64)
.floor()
* 1i32 as f64
} else {
widths[code as usize] = 0.0f64
}
}
do_widths(font, widths.as_mut_ptr());
if verbose > 1i32 {
info!("[{} glyphs]", (*glyphs).num_glyphs as i32);
}
tt_build_finish(glyphs);
sfnt_set_table(
sfont,
sfnt_table_info::CMAP,
cmap_table as *mut libc::c_void,
274_u32,
);
0i32
}
/* WARNING: This modifies glyphname itself */
unsafe fn agl_decompose_glyphname(
glyphname: *mut i8,
nptrs: *mut *mut i8,
size: i32,
suffix: *mut *mut i8,
) -> i32 {
let mut p: *mut i8 = glyphname; /* _FIXME_ */
let mut q = strchr(p, '.' as i32); /* chop every thing after *first* dot */
if q.is_null() {
*suffix = ptr::null_mut()
} else {
*q = '\u{0}' as i32 as i8;
q = q.offset(1);
*suffix = q
}
let ref mut fresh0 = *nptrs.offset(0);
*fresh0 = p;
let mut n = 1;
while !p.is_null() && *p as i32 != 0 {
p = strchr(p, '_' as i32);
if p.is_null() || *p.offset(1) as i32 == '\u{0}' as i32 {
break;
}
if n >= size {
panic!("Uh ah...");
}
*p = '\u{0}' as i32 as i8;
p = p.offset(1);
let ref mut fresh1 = *nptrs.offset(n as isize);
*fresh1 = p;
n += 1
}
n
}
unsafe fn select_gsub(feat: &[u8], gm: *mut glyph_mapper) -> i32 {
if feat.is_empty() || gm.is_null() || (*gm).gsub.is_null() {
return -1i32;
}
/* First treat as is */
let idx = otl_gsub_select((*gm).gsub, b"*", b"*", feat);
if idx >= 0i32 {
return 0i32;
}
if verbose > 1i32 {
info!(
"\ntrutype>> Try loading OTL GSUB for \"*.*.{}\"...",
feat.display()
);
}
let error = otl_gsub_add_feat((*gm).gsub, b"*", b"*", feat, (*gm).sfont);
if error == 0 {
let idx = otl_gsub_select((*gm).gsub, b"*", b"*", feat);
return if idx >= 0i32 { 0i32 } else { -1i32 };
}
-1i32
}
/* Apply GSUB. This is a bit tricky... */
unsafe fn selectglyph(
mut in_0: u16,
suffix: *const i8,
gm: *mut glyph_mapper,
out: *mut u16,
) -> i32 {
let mut t: [i8; 5] = [0; 5];
let mut error;
assert!(!suffix.is_null() && !gm.is_null() && !out.is_null());
assert!(!suffix.is_null() && *suffix as i32 != 0i32);
let s = new((strlen(suffix).wrapping_add(1) as u32 as u64)
.wrapping_mul(::std::mem::size_of::<i8>() as u64) as u32) as *mut i8;
strcpy(s, suffix);
/* First try converting suffix to feature tag.
* agl.c currently only knows less ambiguos cases;
* e.g., 'sc', 'superior', etc.
*/
if let Some(r) = agl_suffix_to_otltag(CStr::from_ptr(s).to_bytes())
/* 'suffix' may represent feature tag. */
{
/* We found feature tag for 'suffix'. */
error = select_gsub(r, gm); /* no fallback for this */
if error == 0 {
error = otl_gsub_apply((*gm).gsub, &mut in_0)
}
} else {
/* Try loading GSUB only when length of 'suffix' is less
* than or equal to 4. tt_gsub give a warning otherwise.
*/
if strlen(s) > 4 {
error = -1i32
} else if strlen(s) == 4 {
error = select_gsub(CStr::from_ptr(s).to_bytes(), gm)
} else {
/* Uh */
/* less than 4. pad ' '. */
memset(t.as_mut_ptr() as *mut libc::c_void, ' ' as i32, 4);
t[4] = '\u{0}' as i32 as i8;
memcpy(
t.as_mut_ptr() as *mut libc::c_void,
s as *const libc::c_void,
strlen(s),
);
error = select_gsub(CStr::from_ptr(t.as_mut_ptr()).to_bytes(), gm)
}
if error == 0 {
/* 'suffix' represents feature tag. */
error = otl_gsub_apply((*gm).gsub, &mut in_0)
} else {
/* other case: alt1, nalt10... (alternates) */
let mut q = s.offset(strlen(s) as isize).offset(-1);
while q > s && *q as i32 >= '0' as i32 && *q as i32 <= '9' as i32 {
q = q.offset(-1)
}
if q == s {
error = -1i32
} else {
/* starting at 1 */
let n = atoi(q.offset(1)) - 1i32;
*q.offset(1) = '\u{0}' as i32 as i8;
if strlen(s) > 4 {
error = -1i32
} else {
/* This may be alternate substitution. */
memset(t.as_mut_ptr() as *mut libc::c_void, ' ' as i32, 4);
t[4] = '\u{0}' as i32 as i8;
memcpy(
t.as_mut_ptr() as *mut libc::c_void,
s as *const libc::c_void,
strlen(s),
);
error = select_gsub(CStr::from_ptr(s).to_bytes(), gm);
if error == 0 {
error = otl_gsub_apply_alt((*gm).gsub, n as u16, &mut in_0 as *mut u16)
}
}
}
}
}
free(s as *mut libc::c_void);
*out = in_0;
error
}
/* Compose glyphs via ligature substitution. */
unsafe fn composeglyph(
glyphs: *mut u16,
n_glyphs: i32,
feat: *const i8,
gm: *mut glyph_mapper,
gid: *mut u16,
) -> i32 {
let mut t: [i8; 5] = [
' ' as i32 as i8,
' ' as i32 as i8,
' ' as i32 as i8,
' ' as i32 as i8,
0_i8,
];
assert!(!glyphs.is_null() && n_glyphs > 0i32 && !gm.is_null() && !gid.is_null());
let mut error = if feat.is_null() || *feat.offset(0) as i32 == '\u{0}' as i32 {
/* meaning "Unknown" */
select_gsub(b"(?lig|lig?|?cmp|cmp?|frac|afrc)", gm)
} else if strlen(feat) > 4 {
-1
} else {
memcpy(
t.as_mut_ptr() as *mut libc::c_void,
feat as *const libc::c_void,
strlen(feat),
);
select_gsub(CStr::from_ptr(t.as_mut_ptr()).to_bytes(), gm)
};
if error == 0 {
error = otl_gsub_apply_lig((*gm).gsub, glyphs, n_glyphs as u16, gid)
}
error
}
/* This may be called by findparanoiac(). */
unsafe fn composeuchar(
unicodes: *mut i32,
n_unicodes: i32,
feat: *const i8,
gm: *mut glyph_mapper,
gid: *mut u16,
) -> i32 {
let mut error: i32 = 0i32;
if (*gm).codetogid.is_null() {
return -1i32;
}
let gids =
new((n_unicodes as u32 as u64).wrapping_mul(::std::mem::size_of::<u16>() as u64) as u32)
as *mut u16;
let mut i = 0;
while error == 0 && i < n_unicodes {
*gids.offset(i as isize) =
tt_cmap_lookup((*gm).codetogid, *unicodes.offset(i as isize) as u32);
error = if *gids.offset(i as isize) as i32 == 0i32 {
-1i32
} else {
0i32
};
i += 1
}
if error == 0 {
error = composeglyph(gids, n_unicodes, feat, gm, gid)
}
free(gids as *mut libc::c_void);
error
}
/* Search 'post' table. */
unsafe fn findposttable(glyph_name: *const i8, gid: *mut u16, gm: *mut glyph_mapper) -> i32 {
if (*gm).nametogid.is_null() {
return -1i32;
}
*gid = tt_lookup_post_table((*gm).nametogid, glyph_name);
if *gid as i32 == 0i32 {
-1i32
} else {
0i32
}
}
/* This is wrong. We must care about '.'. */
/* Glyph names are concatinated with '_'. */
unsafe fn findcomposite(glyphname: *const i8, gid: *mut u16, gm: *mut glyph_mapper) -> i32 {
let mut suffix: *mut i8 = ptr::null_mut();
let mut gids: [u16; 32] = [0; 32];
let mut nptrs: [*mut i8; 32] = [ptr::null_mut(); 32];
let error = findposttable(glyphname, gid, gm);
if error == 0 {
return 0i32;
}
let gname =
new((strlen(glyphname).wrapping_add(1)).wrapping_mul(::std::mem::size_of::<i8>()) as _)
as *mut i8; /* first try composing glyph */
strcpy(gname, glyphname);
memset(
gids.as_mut_ptr() as *mut libc::c_void,
0i32,
(32usize).wrapping_mul(::std::mem::size_of::<u16>()),
);
let n_comp = agl_decompose_glyphname(gname, nptrs.as_mut_ptr(), 32i32, &mut suffix);
let mut error = 0i32;
let mut i = 0;
while error == 0 && i < n_comp as usize {
error = resolve_glyph(nptrs[i], &mut *gids.as_mut_ptr().offset(i as isize), gm);
if error != 0 {
warn!(
"Could not resolve glyph \"{}\" ({}th component of glyph \"{}\").",
CStr::from_ptr(nptrs[i]).display(),
i,
CStr::from_ptr(glyphname).display(),
);
}
i += 1
}
if error == 0 {
if !suffix.is_null()
&& (streq_ptr(suffix, b"liga\x00" as *const u8 as *const i8) as i32 != 0
|| streq_ptr(suffix, b"dlig\x00" as *const u8 as *const i8) as i32 != 0
|| streq_ptr(suffix, b"hlig\x00" as *const u8 as *const i8) as i32 != 0
|| streq_ptr(suffix, b"frac\x00" as *const u8 as *const i8) as i32 != 0
|| streq_ptr(suffix, b"ccmp\x00" as *const u8 as *const i8) as i32 != 0
|| streq_ptr(suffix, b"afrc\x00" as *const u8 as *const i8) as i32 != 0)
{
error = composeglyph(gids.as_mut_ptr(), n_comp, suffix, gm, gid)
} else {
error = composeglyph(gids.as_mut_ptr(), n_comp, ptr::null(), gm, gid);
if error == 0 && !suffix.is_null() {
/* a_b_c.vert */
error = selectglyph(*gid, suffix, gm, gid)
}
}
}
free(gname as *mut libc::c_void);
error
}
/* glyphname should not have suffix here */
unsafe fn findparanoiac(glyphname: *const i8, gid: *mut u16, gm: *mut glyph_mapper) -> i32 {
let mut idx: u16 = 0_u16;
let mut error;
let mut agln = agl_lookup_list(glyphname);
while !agln.is_null() && idx as i32 == 0i32 {
if !(*agln).suffix.is_null() {
error = findparanoiac((*agln).name, &mut idx, gm);
if error != 0 {
return error;
}
error = selectglyph(idx, (*agln).suffix, gm, &mut idx);
if error != 0 {
warn!(
"Variant \"{}\" for glyph \"{}\" might not be found.",
CStr::from_ptr((*agln).suffix).display(),
CStr::from_ptr((*agln).name).display(),
);
warn!("Using glyph name without suffix instead...");
//error = 0i32
/* ignore */
}
} else if (*agln).n_components == 1i32 {
idx = tt_cmap_lookup((*gm).codetogid, (*agln).unicodes[0] as u32)
} else if (*agln).n_components > 1i32 {
if verbose >= 0i32 {
/* give warning */
warn!(
"Glyph \"{}\" looks like a composite glyph...",
CStr::from_ptr((*agln).name).display(),
);
}
error = composeuchar(
(*agln).unicodes.as_mut_ptr(),
(*agln).n_components,
ptr::null(),
gm,
&mut idx,
);
if verbose >= 0i32 {
if error != 0 {
warn!("Not found...");
} else {
let mut _i: i32 = 0;
let mut _n: i32 = 0i32;
let mut _p: *mut i8 = ptr::null_mut();
let mut _buf: [i8; 256] = [0; 256];
warn!(
">> Composite glyph glyph-name=\"{}\" found at glyph-id=\"{}\".",
CStr::from_ptr((*agln).name).display(),
idx,
);
_p = _buf.as_mut_ptr();
_i = 0i32;
while _i < (*agln).n_components && _n < 245i32 {
let fresh2 = _n;
_n = _n + 1;
*_p.offset(fresh2 as isize) =
(if _i == 0i32 { '<' as i32 } else { ' ' as i32 }) as i8;
if (*agln).unicodes[_i as usize] >= 0x10000i32 {
_n += sprintf(
_p.offset(_n as isize),
b"U+%06X\x00" as *const u8 as *const i8,
(*agln).unicodes[_i as usize],
)
} else {
_n += sprintf(
_p.offset(_n as isize),
b"U+%04X\x00" as *const u8 as *const i8,
(*agln).unicodes[_i as usize],
)
}
let fresh3 = _n;
_n = _n + 1;
*_p.offset(fresh3 as isize) = (if _i == (*agln).n_components - 1i32 {
'>' as i32
} else {
',' as i32
}) as i8;
_i += 1;
}
let fresh4 = _n;
_n = _n + 1;
*_p.offset(fresh4 as isize) = '\u{0}' as i32 as i8;
warn!(">> Input Unicode seq.=\"{}\" ==> glyph-id=\"{}\" in font-file=\"_please_try_-v_\".",
CStr::from_ptr(_buf.as_mut_ptr()).display(), idx);
}
}
} else {
unreachable!();
}
agln = (*agln).alternate
}
*gid = idx;
if idx as i32 == 0i32 {
-1i32
} else {
0i32
}
}
unsafe fn resolve_glyph(glyphname: *const i8, gid: *mut u16, gm: *mut glyph_mapper) -> i32 {
assert!(!glyphname.is_null());
/* Boooo */
/*
* First we try glyph name to GID mapping using post table if post table
* is available. If post table is not available or glyph is not listed
* in the post table, then we try Unicode if Windows-Unicode TrueType
* cmap is available.
*/
let error = findposttable(glyphname, gid, gm);
if error == 0 {
return 0i32;
}
if (*gm).codetogid.is_null() {
return -1i32;
}
let (name, suffix) = agl_chop_suffix(CStr::from_ptr(glyphname).to_bytes());
if let Some(name) = name {
let mut error = if agl_name_is_unicode(name.to_bytes()) {
let ucv = agl_name_convert_unicode(name.as_ptr());
*gid = tt_cmap_lookup((*gm).codetogid, ucv as u32);
if *gid as i32 == 0i32 {
-1
} else {
0
}
} else {
findparanoiac(name.as_ptr(), gid, gm)
};
if error == 0 {
if let Some(suffix) = suffix {
error = selectglyph(*gid, suffix.as_ptr(), gm, gid);
if error != 0 {
warn!(
"Variant \"{}\" for glyph \"{}\" might not be found.",
suffix.display(),
name.display(),
);
warn!("Using glyph name without suffix instead...");
error = 0i32
/* ignore */
}
}
}
error
} else {
/* .notdef, .foo */
-1
}
}
/* Things are complicated. We still need to use PostScript
* glyph names. But OpenType fonts may not have PS name to
* glyph mapping. We use Unicode plus OTL GSUB for finding
* glyphs in this case.
*/
unsafe fn setup_glyph_mapper(mut gm: *mut glyph_mapper, sfont: *mut sfnt) -> i32 {
(*gm).sfont = sfont;
(*gm).nametogid = tt_read_post_table(sfont);
(*gm).codetogid = tt_cmap_read(sfont, 3_u16, 10_u16);
if (*gm).codetogid.is_null() {
(*gm).codetogid = tt_cmap_read(sfont, 3_u16, 1_u16)
}
if (*gm).nametogid.is_null() && (*gm).codetogid.is_null() {
return -1i32;
}
(*gm).gsub = otl_gsub_new();
0i32
}
unsafe fn clean_glyph_mapper(mut gm: *mut glyph_mapper) {
if !(*gm).gsub.is_null() {
otl_gsub_release((*gm).gsub);
}
if !(*gm).codetogid.is_null() {
tt_cmap_release((*gm).codetogid);
}
if !(*gm).nametogid.is_null() {
tt_release_post_table((*gm).nametogid);
}
(*gm).gsub = ptr::null_mut();
(*gm).codetogid = ptr::null_mut();
(*gm).nametogid = ptr::null_mut();
(*gm).sfont = ptr::null_mut();
}
unsafe fn do_custom_encoding(
font: *mut pdf_font,
encoding: *mut *mut i8,
usedchars: *const i8,
sfont: *mut sfnt,
) -> i32 {
let mut widths: [f64; 256] = [0.; 256];
let mut gm: glyph_mapper = glyph_mapper {
codetogid: ptr::null_mut(),
gsub: ptr::null_mut(),
sfont: ptr::null_mut(),
nametogid: ptr::null_mut(),
};
assert!(!font.is_null() && !encoding.is_null() && !usedchars.is_null() && !sfont.is_null());
let error = setup_glyph_mapper(&mut gm, sfont);
if error != 0 {
warn!(
"No post table nor Unicode cmap found in font: {}",
CStr::from_ptr(pdf_font_get_ident(font)).display(),
);
warn!(">> I can\'t find glyphs without this!");
return -1i32;
}
let cmap_table =
new((274_u64).wrapping_mul(::std::mem::size_of::<i8>() as u64) as u32) as *mut i8;
memset(cmap_table as *mut libc::c_void, 0i32, 274);
put_big_endian(cmap_table as *mut libc::c_void, 0i32, 2i32);
/* Version */
put_big_endian(cmap_table.offset(2) as *mut libc::c_void, 1i32, 2i32);
/* Number of subtables */
put_big_endian(cmap_table.offset(4) as *mut libc::c_void, 1u32 as i32, 2i32);
/* Platform ID */
put_big_endian(cmap_table.offset(6) as *mut libc::c_void, 0u32 as i32, 2i32);
/* Encoding ID */
put_big_endian(cmap_table.offset(8) as *mut libc::c_void, 12i32, 4i32);
/* Offset */
put_big_endian(cmap_table.offset(12) as *mut libc::c_void, 0i32, 2i32);
/* Format */
put_big_endian(cmap_table.offset(14) as *mut libc::c_void, 262i32, 2i32);
/* Length */
put_big_endian(cmap_table.offset(16) as *mut libc::c_void, 0i32, 2i32);
/* Language */
let glyphs = tt_build_init(); /* +1 for .notdef */
let mut count = 1;
for code in 0..256 {
if !(*usedchars.offset(code as isize) == 0) {
let mut gid: u16 = 0;
let mut idx;
if (*encoding.offset(code as isize)).is_null()
|| streq_ptr(
*encoding.offset(code as isize),
b".notdef\x00" as *const u8 as *const i8,
) as i32
!= 0
{
warn!("Character code=\"0x{:02X}\" mapped to \".notdef\" glyph used in font font-file=\"{}\"", code,
CStr::from_ptr(pdf_font_get_ident(font)).display());
warn!(">> Maybe incorrect encoding specified?");
idx = 0_u16
} else {
let error = if !strchr(*encoding.offset(code as isize), '_' as i32).is_null() {
findcomposite(*encoding.offset(code as isize), &mut gid, &mut gm)
} else {
resolve_glyph(*encoding.offset(code as isize), &mut gid, &mut gm)
};
/*
* Older versions of gs had problem with glyphs (other than .notdef)
* mapped to gid = 0.
*/
if error != 0 {
warn!(
"Glyph \"{}\" not available in font \"{}\".",
CStr::from_ptr(*encoding.offset(code as isize)).display(),
CStr::from_ptr(pdf_font_get_ident(font)).display(),
); /* count returned. */
} else if verbose > 1i32 {
info!(
"truetype>> Glyph glyph-name=\"{}\" found at glyph-id=\"{}\".\n",
CStr::from_ptr(*encoding.offset(code as isize)).display(),
gid,
);
}
idx = tt_find_glyph(glyphs, gid);
if idx as i32 == 0i32 {
idx = tt_add_glyph(glyphs, gid, count as u16);
count += 1
}
}
*cmap_table.offset((18i32 + code) as isize) = (idx as i32 & 0xffi32) as i8
}
/* bug here */
} /* _FIXME_: wrong message */
clean_glyph_mapper(&mut gm);
if tt_build_tables(sfont, glyphs) < 0i32 {
warn!("Packing TrueType font into SFNT file faild...");
tt_build_finish(glyphs);
free(cmap_table as *mut libc::c_void);
return -1i32;
}
for code in 0..256 {
if *usedchars.offset(code as isize) != 0 {
let idx = tt_get_index(glyphs, *cmap_table.offset((18i32 + code) as isize) as u16);
widths[code as usize] = (1000.0f64
* (*(*glyphs).gd.offset(idx as isize)).advw as i32 as f64
/ (*glyphs).emsize as i32 as f64
/ 1i32 as f64
+ 0.5f64)
.floor()
* 1i32 as f64
} else {
widths[code as usize] = 0.0f64
}
}
do_widths(font, widths.as_mut_ptr());
if verbose > 1i32 {
info!("[{} glyphs]", (*glyphs).num_glyphs as i32);
}
tt_build_finish(glyphs);
sfnt_set_table(
sfont,
sfnt_table_info::CMAP,
cmap_table as *mut libc::c_void,
274_u32,
);
0i32
}
pub unsafe fn pdf_font_load_truetype(font: *mut pdf_font) -> i32 {
let descriptor: *mut pdf_obj = pdf_font_get_descriptor(font);
let ident: *mut i8 = pdf_font_get_ident(font);
let encoding_id: i32 = pdf_font_get_encoding(font);
let usedchars: *mut i8 = pdf_font_get_usedchars(font);
/* ENABLE_NOEMBED */
let index: i32 = pdf_font_get_index(font); /* Should find *truetype* here */
if !pdf_font_is_in_use(font) {
return 0i32;
}
verbose = pdf_font_get_verbose();
let sfont = if let Some(handle) = dpx_open_truetype_file(ident) {
sfnt_open(handle)
} else if let Some(handle) = dpx_open_dfont_file(ident) {
dfont_open(handle, index)
} else {
panic!(
"Unable to open TrueType/dfont font file: {}",
CStr::from_ptr(ident).display(),
);
};
if sfont.is_null() {
panic!(
"Unable to open TrueType/dfont file: {}",
CStr::from_ptr(ident).display(),
);
} else {
if (*sfont).type_0 != 1i32 << 0i32
&& (*sfont).type_0 != 1i32 << 4i32
&& (*sfont).type_0 != 1i32 << 8i32
{
sfnt_close(sfont);
panic!(
"Font \"{}\" not a TrueType/dfont font?",
CStr::from_ptr(ident).display()
);
}
}
let error = if (*sfont).type_0 == 1i32 << 4i32 {
let offset = ttc_read_offset(sfont, index);
if offset == 0_u32 {
panic!("Invalid TTC index in {}.", CStr::from_ptr(ident).display());
}
sfnt_read_table_directory(sfont, offset)
} else {
sfnt_read_table_directory(sfont, (*sfont).offset)
};
if error != 0 {
sfnt_close(sfont);
panic!(
"Reading SFND table dir failed for font-file=\"{}\"... Not a TrueType font?",
CStr::from_ptr(ident).display()
);
}
/*
* Create new TrueType cmap table with MacRoman encoding.
*/
let error = if encoding_id < 0i32 {
do_builtin_encoding(font, usedchars, sfont)
} else {
let enc_vec = pdf_encoding_get_encoding(encoding_id);
do_custom_encoding(font, enc_vec, usedchars, sfont)
};
if error != 0 {
sfnt_close(sfont);
panic!(
"Error occured while creating font subfont for \"{}\"",
CStr::from_ptr(ident).display()
);
}
/* ENABLE_NOEMBED */
/*
* TODO: post table?
*/
for table in &required_table {
if sfnt_require_table(sfont.as_mut().unwrap(), table).is_err() {
sfnt_close(sfont);
panic!(
"Required TrueType table \"{}\" does not exist in font: {}",
table.name_str(),
CStr::from_ptr(ident).display(),
);
}
}
/*
* FontFile2
*/
let fontfile = sfnt_create_FontFile_stream(sfont); /* XXX */
sfnt_close(sfont);
if verbose > 1i32 {
info!("[{} bytes]", fontfile.len());
}
let fontfile = fontfile.into_obj();
(*descriptor)
.as_dict_mut()
.set("FontFile2", pdf_ref_obj(fontfile));
pdf_release_obj(fontfile);
0i32
}
| 36.027051 | 116 | 0.534815 |
de52cab4c64c903a4468153ae123601261ea9f1d | 21,256 | use crate::did_resolve::DIDResolver;
use crate::jsonld::REVOCATION_LIST_2020_V1_CONTEXT;
use crate::one_or_many::OneOrMany;
use crate::vc::{Credential, CredentialStatus, Issuer, VerificationResult, URI};
use async_trait::async_trait;
use bitvec::prelude::Lsb0;
use bitvec::slice::BitSlice;
use bitvec::vec::BitVec;
use core::convert::TryFrom;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use thiserror::Error;
#[allow(clippy::upper_case_acronyms)]
type URL = String;
/// Minimum length of a revocation list bitstring
/// <https://w3c-ccg.github.io/vc-status-rl-2020/#revocation-bitstring-length>
pub const MIN_BITSTRING_LENGTH: usize = 131072;
/// Maximum size of a revocation list credential loaded using [`load_credential`].
pub const MAX_RESPONSE_LENGTH: usize = 2097152; // 2MB
const EMPTY_RLIST: &str = "H4sIAAAAAAAA_-3AMQEAAADCoPVPbQwfKAAAAAAAAAAAAAAAAAAAAOBthtJUqwBAAAA";
/// Credential Status object for use in a Verifiable Credential.
/// <https://w3c-ccg.github.io/vc-status-rl-2020/#revocationlist2020status>
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct RevocationList2020Status {
/// URL for status information of the verifiable credential - but not the URL of the revocation
/// list.
pub id: URI,
/// Index of this credential's status in the revocation list credential
pub revocation_list_index: RevocationListIndex,
/// URL to a [RevocationList2020Credential]
pub revocation_list_credential: URL,
}
/// Integer identifying a bit position of the revocation status of a verifiable credential in a
/// revocation list, e.g. in a [RevocationList2020].
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(try_from = "String")]
#[serde(into = "String")]
pub struct RevocationListIndex(usize);
/// Verifiable Credential of type RevocationList2020Credential.
/// <https://w3c-ccg.github.io/vc-status-rl-2020/#revocationlist2020credential>
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct RevocationList2020Credential {
pub id: URI,
pub issuer: Issuer,
pub credential_subject: RevocationList2020Subject,
#[serde(flatten)]
pub more_properties: Value,
}
/// [Credential subject](https://www.w3.org/TR/vc-data-model/#credential-subject) of a [RevocationList2020Credential]
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(tag = "type")]
pub enum RevocationList2020Subject {
RevocationList2020(RevocationList2020),
}
/// Credential subject of type RevocationList2020, expected to be used in a Verifiable Credential of type [RevocationList2020Credential]
/// <https://w3c-ccg.github.io/vc-status-rl-2020/#revocationlist2020credential>
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
#[serde(rename_all = "camelCase")]
pub struct RevocationList2020 {
pub encoded_list: EncodedList,
#[serde(flatten)]
pub more_properties: Value,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct EncodedList(pub String);
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(tag = "type")]
pub enum RevocationSubject {
RevocationList2020(RevocationList2020),
}
/// A decoded [revocation list][EncodedList].
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct List(pub Vec<u8>);
impl TryFrom<String> for RevocationListIndex {
type Error = std::num::ParseIntError;
fn try_from(string: String) -> Result<Self, Self::Error> {
Ok(Self(string.parse()?))
}
}
impl From<RevocationListIndex> for String {
fn from(idx: RevocationListIndex) -> String {
idx.0.to_string()
}
}
#[derive(Error, Debug)]
pub enum SetStatusError {
#[error("Encode list: {0}")]
Encode(#[from] EncodeListError),
#[error("Decode list: {0}")]
Decode(#[from] DecodeListError),
#[error("Out of bounds: bitstring index {0} but length is {1}")]
OutOfBounds(usize, usize),
#[error("Revocation list bitstring is too large for BitVec: {0}")]
ListTooLarge(usize),
#[error("Revocation list bitstring is too small: {0}. Minimum: {1}")]
ListTooSmall(usize, usize),
}
impl RevocationList2020 {
/// Set the revocation status for a given index in the list.
pub fn set_status(&mut self, index: usize, revoked: bool) -> Result<(), SetStatusError> {
let mut list = List::try_from(&self.encoded_list)?;
let bitstring_len = list.0.len() * 8;
let mut bitstring = BitVec::<Lsb0, u8>::try_from_vec(list.0)
.map_err(|_| SetStatusError::ListTooLarge(bitstring_len))?;
if bitstring_len < MIN_BITSTRING_LENGTH {
return Err(SetStatusError::ListTooSmall(
bitstring_len,
MIN_BITSTRING_LENGTH,
));
}
if let Some(mut bitref) = bitstring.get_mut(index) {
*bitref = revoked;
} else {
return Err(SetStatusError::OutOfBounds(index, bitstring_len));
}
list.0 = bitstring.into_vec();
self.encoded_list = EncodedList::try_from(&list)?;
Ok(())
}
}
#[derive(Error, Debug)]
pub enum ListIterDecodeError {
#[error("Unable to reference indexes: {0}")]
BitSpan(#[from] bitvec::ptr::BitSpanError<u8>),
#[error("Revocation list bitstring is too small: {0}. Minimum: {1}")]
ListTooSmall(usize, usize),
}
impl List {
/// Get an array of indices in the revocation list for credentials that are revoked.
pub fn iter_revoked_indexes(
&self,
) -> Result<bitvec::slice::IterOnes<Lsb0, u8>, ListIterDecodeError> {
let bitstring = BitSlice::<Lsb0, u8>::from_slice(&self.0[..])?;
if bitstring.len() < MIN_BITSTRING_LENGTH {
return Err(ListIterDecodeError::ListTooSmall(
bitstring.len(),
MIN_BITSTRING_LENGTH,
));
}
Ok(bitstring.iter_ones())
}
}
#[derive(Error, Debug)]
pub enum DecodeListError {
#[error("Base64url: {0}")]
Build(#[from] base64::DecodeError),
#[error("Decompression: {0}")]
Decompress(#[from] std::io::Error),
}
#[derive(Error, Debug)]
pub enum EncodeListError {
#[error("Compression: {0}")]
Compress(#[from] std::io::Error),
}
impl Default for EncodedList {
/// Generate a 16KB list of zeros.
fn default() -> Self {
Self(EMPTY_RLIST.to_string())
}
}
impl TryFrom<&EncodedList> for List {
type Error = DecodeListError;
fn try_from(encoded_list: &EncodedList) -> Result<Self, Self::Error> {
let string = &encoded_list.0;
let bytes = base64::decode_config(string, base64::URL_SAFE)?;
let mut data = Vec::new();
use flate2::bufread::GzDecoder;
use std::io::Read;
GzDecoder::new(bytes.as_slice()).read_to_end(&mut data)?;
Ok(Self(data))
// TODO: streaming decode the revocation list, for less memory use for large bitvecs.
}
}
impl TryFrom<&List> for EncodedList {
type Error = EncodeListError;
fn try_from(list: &List) -> Result<Self, Self::Error> {
use flate2::{write::GzEncoder, Compression};
use std::io::Write;
let mut e = GzEncoder::new(Vec::new(), Compression::default());
e.write_all(&list.0)?;
let bytes = e.finish()?;
let string = base64::encode_config(bytes, base64::URL_SAFE_NO_PAD);
Ok(EncodedList(string))
}
}
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
impl CredentialStatus for RevocationList2020Status {
/// Validate a credential's revocation status according to [Revocation List 2020](https://w3c-ccg.github.io/vc-status-rl-2020/#validate-algorithm).
async fn check(
&self,
credential: &Credential,
resolver: &dyn DIDResolver,
) -> VerificationResult {
let mut result = VerificationResult::new();
// TODO: prefix errors or change return type
let issuer_id = match &credential.issuer {
Some(issuer) => issuer.get_id().clone(),
None => {
return result.with_error("Credential is missing issuer".to_string());
}
};
if !credential
.context
.contains_uri(REVOCATION_LIST_2020_V1_CONTEXT)
{
// TODO: support JSON-LD credentials defining the terms elsewhere.
return result.with_error(format!(
"Missing expected context URI {} for RevocationList2020",
REVOCATION_LIST_2020_V1_CONTEXT
));
}
if self.id == URI::String(self.revocation_list_credential.clone()) {
return result.with_error(format!(
"Expected revocationListCredential to be different from status id: {}",
self.id
));
}
// Check the revocation list URL before attempting to load it.
// Revocation List 2020 does not specify an expected URL scheme (URI scheme), but
// examples and test vectors use https.
match self.revocation_list_credential.split_once(':') {
Some(("https", _)) => (),
// TODO: an option to allow HTTP?
// TODO: load from DID URLs?
Some((_scheme, _)) => return result.with_error(format!("Invalid schema: {}", self.id)),
_ => return result.with_error(format!("Invalid rsrc: {}", self.id)),
}
let revocation_list_credential =
match load_credential(&self.revocation_list_credential).await {
Ok(credential) => credential,
Err(e) => {
return result.with_error(format!(
"Unable to fetch revocation list credential: {}",
e.to_string()
));
}
};
let list_issuer_id = match &revocation_list_credential.issuer {
Some(issuer) => issuer.get_id().clone(),
None => {
return result
.with_error("Revocation list credential is missing issuer".to_string());
}
};
if issuer_id != list_issuer_id {
return result.with_error(format!(
"Revocation list issuer mismatch. Credential: {}, Revocation list: {}",
issuer_id, list_issuer_id
));
}
match revocation_list_credential.validate() {
Err(e) => {
return result.with_error(format!("Invalid list credential: {}", e.to_string()));
}
Ok(()) => {}
}
let vc_result = revocation_list_credential.verify(None, resolver).await;
for warning in vc_result.warnings {
result
.warnings
.push(format!("Revocation list: {}", warning));
}
for error in vc_result.errors {
result.errors.push(format!("Revocation list: {}", error));
}
if !result.errors.is_empty() {
return result;
}
// Note: vc_result.checks is not checked here. It is assumed that default checks passed.
let revocation_list_credential =
match RevocationList2020Credential::try_from(revocation_list_credential) {
Ok(credential) => credential,
Err(e) => {
return result.with_error(format!(
"Unable to parse revocation list credential: {}",
e.to_string()
));
}
};
if revocation_list_credential.id != URI::String(self.revocation_list_credential.to_string())
{
return result.with_error(format!(
"Revocation list credential id mismatch. revocationListCredential: {}, id: {}",
self.revocation_list_credential, revocation_list_credential.id
));
}
let RevocationList2020Subject::RevocationList2020(revocation_list) =
revocation_list_credential.credential_subject;
let list = match List::try_from(&revocation_list.encoded_list) {
Ok(list) => list,
Err(e) => {
return result.with_error(format!(
"Unable to decode revocation list: {}",
e.to_string()
))
}
};
let credential_index = self.revocation_list_index.0;
use bitvec::prelude::*;
let bitstring = match BitVec::<Lsb0, u8>::try_from_vec(list.0) {
Ok(bitstring) => bitstring,
Err(list) => {
return result.with_error(format!(
"Revocation list is too large for bitvec: {}",
list.len()
))
}
};
let revoked = match bitstring.get(credential_index) {
Some(bitref) => *bitref,
None => {
return result
.with_error("Credential index in revocation list is invalid.".to_string());
}
};
if revoked {
return result.with_error("Credential is revoked.".to_string());
}
result
}
}
#[derive(Error, Debug)]
pub enum LoadResourceError {
#[error("Error building HTTP client: {0}")]
Build(reqwest::Error),
#[error("Error sending HTTP request: {0}")]
Request(reqwest::Error),
#[error("Parse error: {0}")]
Response(String),
#[error("Not found")]
NotFound,
#[error("HTTP error: {0}")]
HTTP(String),
/// The resource is larger than an expected/allowed maximum size.
#[error("Resource is too large: {size}, expected maximum: {max}")]
TooLarge {
/// The size of the resource so far, in bytes.
size: usize,
/// Maximum expected size of the resource, in bytes.
///
/// e.g. [`MAX_RESPONSE_LENGTH`]
max: usize,
},
/// Unable to convert content-length header value.
#[error("Unable to convert content-length header value")]
ContentLengthConversion(#[source] std::num::TryFromIntError),
}
async fn load_resource(url: &str) -> Result<Vec<u8>, LoadResourceError> {
#[cfg(test)]
match url {
crate::vc::tests::EXAMPLE_REVOCATION_2020_LIST_URL => {
return Ok(crate::vc::tests::EXAMPLE_REVOCATION_2020_LIST.to_vec());
}
_ => {}
}
let mut headers = reqwest::header::HeaderMap::new();
headers.insert(
"User-Agent",
reqwest::header::HeaderValue::from_static(crate::USER_AGENT),
);
let client = reqwest::Client::builder()
.default_headers(headers)
.build()
.map_err(LoadResourceError::Build)?;
let accept = "application/json".to_string();
let resp = client
.get(url)
.header("Accept", accept)
.send()
.await
.map_err(LoadResourceError::Request)?;
if let Err(err) = resp.error_for_status_ref() {
if err.status() == Some(reqwest::StatusCode::NOT_FOUND) {
return Err(LoadResourceError::NotFound);
}
return Err(LoadResourceError::HTTP(err.to_string()));
}
#[allow(unused_variables)]
let content_length_opt = if let Some(content_length) = resp.content_length() {
let len =
usize::try_from(content_length).map_err(LoadResourceError::ContentLengthConversion)?;
if len > MAX_RESPONSE_LENGTH {
// Fail early if content-length header indicates body is too large.
return Err(LoadResourceError::TooLarge {
size: len,
max: MAX_RESPONSE_LENGTH,
});
}
Some(len)
} else {
None
};
#[cfg(target_arch = "wasm32")]
{
// Reqwest's WASM backend doesn't offer streamed/chunked response reading.
// So we cannot check the response size while reading the response here.
// Relevant issue: https://github.com/seanmonstar/reqwest/issues/1234
// Instead, we hope that the content-length is correct, read the body all at once,
// and apply the length check afterwards, for consistency.
let bytes = resp
.bytes()
.await
.map_err(|e| LoadResourceError::Response(e.to_string()))?
.to_vec();
if bytes.len() > MAX_RESPONSE_LENGTH {
return Err(LoadResourceError::TooLarge {
size: bytes.len(),
max: MAX_RESPONSE_LENGTH,
});
}
Ok(bytes)
}
#[cfg(not(target_arch = "wasm32"))]
{
// For non-WebAssembly, read the response up to the allowed maximimum size.
let mut bytes = if let Some(len) = content_length_opt {
Vec::with_capacity(len)
} else {
Vec::new()
};
let mut resp = resp;
while let Some(chunk) = resp
.chunk()
.await
.map_err(|e| LoadResourceError::Response(e.to_string()))?
{
let len = bytes.len() + chunk.len();
if len > MAX_RESPONSE_LENGTH {
return Err(LoadResourceError::TooLarge {
size: len,
max: MAX_RESPONSE_LENGTH,
});
}
bytes.append(&mut chunk.to_vec());
}
Ok(bytes)
}
}
#[derive(Error, Debug)]
pub enum LoadCredentialError {
#[error("Unable to load resource: {0}")]
Load(#[from] LoadResourceError),
#[error("Error reading HTTP response: {0}")]
Parse(#[from] serde_json::Error),
}
/// Fetch a credential from a HTTP(S) URL.
/// The resulting verifiable credential is not yet validated or verified.
///
/// The size of the loaded credential must not be greater than [`MAX_RESPONSE_LENGTH`].
pub async fn load_credential(url: &str) -> Result<Credential, LoadCredentialError> {
let data = load_resource(url).await?;
// TODO: support JWT-VC
let credential: Credential = serde_json::from_slice(&data)?;
Ok(credential)
}
#[derive(Error, Debug)]
pub enum CredentialConversionError {
#[error("Conversion to JSON: {0}")]
ToValue(serde_json::Error),
#[error("Conversion from JSON: {0}")]
FromValue(serde_json::Error),
#[error("Missing expected URI in @context: {0}")]
MissingContext(&'static str),
#[error("Missing expected type: {0}. Found: {0:?}")]
MissingType(&'static str, OneOrMany<String>),
#[error("Missing issuer")]
MissingIssuer,
}
/// Convert Credential to a [RevocationList2020Credential], while validating it.
// https://w3c-ccg.github.io/vc-status-rl-2020/#validate-algorithm
impl TryFrom<Credential> for RevocationList2020Credential {
type Error = CredentialConversionError;
fn try_from(credential: Credential) -> Result<Self, Self::Error> {
if !credential
.context
.contains_uri(REVOCATION_LIST_2020_V1_CONTEXT)
{
return Err(CredentialConversionError::MissingContext(
REVOCATION_LIST_2020_V1_CONTEXT,
));
}
if !credential
.type_
.contains(&"RevocationList2020Credential".to_string())
{
return Err(CredentialConversionError::MissingType(
"RevocationList2020Credential",
credential.type_,
));
}
let credential =
serde_json::to_value(credential).map_err(CredentialConversionError::ToValue)?;
let credential =
serde_json::from_value(credential).map_err(CredentialConversionError::FromValue)?;
Ok(credential)
}
}
impl TryFrom<RevocationList2020Credential> for Credential {
type Error = CredentialConversionError;
fn try_from(credential: RevocationList2020Credential) -> Result<Self, Self::Error> {
let mut credential =
serde_json::to_value(credential).map_err(CredentialConversionError::ToValue)?;
use crate::vc::DEFAULT_CONTEXT;
use serde_json::json;
credential["@context"] = json!([DEFAULT_CONTEXT, REVOCATION_LIST_2020_V1_CONTEXT]);
credential["type"] = json!(["VerifiableCredential", "RevocationList2020Credential"]);
let credential =
serde_json::from_value(credential).map_err(CredentialConversionError::FromValue)?;
Ok(credential)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn default_list() {
let list = List(vec![0; MIN_BITSTRING_LENGTH / 8]);
let revoked_indexes = list.iter_revoked_indexes().unwrap().collect::<Vec<usize>>();
let empty: Vec<usize> = Vec::new();
assert_eq!(revoked_indexes, empty);
let el = EncodedList::try_from(&list).unwrap();
assert_eq!(EncodedList::default(), el);
let decoded_list = List::try_from(&el).unwrap();
assert_eq!(decoded_list, list);
}
#[test]
fn set_status() {
let mut rl = RevocationList2020::default();
rl.set_status(1, true).unwrap();
rl.set_status(5, true).unwrap();
let decoded_list = List::try_from(&rl.encoded_list).unwrap();
let revoked_indexes = decoded_list
.iter_revoked_indexes()
.unwrap()
.collect::<Vec<usize>>();
assert_eq!(revoked_indexes, vec![1, 5]);
}
}
| 36.775087 | 151 | 0.61305 |
d73662f49362ebd7c547d5e5c6485e50fec94a06 | 15,074 | #![allow(clippy::wildcard_imports, clippy::enum_glob_use)]
use crate::utils::ast_utils::{eq_field_pat, eq_id, eq_pat, eq_path};
use crate::utils::{over, span_lint_and_then};
use rustc_ast::mut_visit::*;
use rustc_ast::ptr::P;
use rustc_ast::{self as ast, Pat, PatKind, PatKind::*, DUMMY_NODE_ID};
use rustc_ast_pretty::pprust;
use rustc_errors::Applicability;
use rustc_lint::{EarlyContext, EarlyLintPass};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::DUMMY_SP;
use std::cell::Cell;
use std::mem;
declare_clippy_lint! {
/// **What it does:**
///
/// Checks for unnested or-patterns, e.g., `Some(0) | Some(2)` and
/// suggests replacing the pattern with a nested one, `Some(0 | 2)`.
///
/// Another way to think of this is that it rewrites patterns in
/// *disjunctive normal form (DNF)* into *conjunctive normal form (CNF)*.
///
/// **Why is this bad?**
///
/// In the example above, `Some` is repeated, which unncessarily complicates the pattern.
///
/// **Known problems:** None.
///
/// **Example:**
///
/// ```rust
/// fn main() {
/// if let Some(0) | Some(2) = Some(0) {}
/// }
/// ```
/// Use instead:
/// ```rust
/// #![feature(or_patterns)]
///
/// fn main() {
/// if let Some(0 | 2) = Some(0) {}
/// }
/// ```
pub UNNESTED_OR_PATTERNS,
pedantic,
"unnested or-patterns, e.g., `Foo(Bar) | Foo(Baz) instead of `Foo(Bar | Baz)`"
}
declare_lint_pass!(UnnestedOrPatterns => [UNNESTED_OR_PATTERNS]);
impl EarlyLintPass for UnnestedOrPatterns {
fn check_arm(&mut self, cx: &EarlyContext<'_>, a: &ast::Arm) {
lint_unnested_or_patterns(cx, &a.pat);
}
fn check_expr(&mut self, cx: &EarlyContext<'_>, e: &ast::Expr) {
if let ast::ExprKind::Let(pat, _) = &e.kind {
lint_unnested_or_patterns(cx, pat);
}
}
fn check_param(&mut self, cx: &EarlyContext<'_>, p: &ast::Param) {
lint_unnested_or_patterns(cx, &p.pat);
}
fn check_local(&mut self, cx: &EarlyContext<'_>, l: &ast::Local) {
lint_unnested_or_patterns(cx, &l.pat);
}
}
fn lint_unnested_or_patterns(cx: &EarlyContext<'_>, pat: &Pat) {
if !cx.sess.features_untracked().or_patterns {
// Do not suggest nesting the patterns if the feature `or_patterns` is not enabled.
return;
}
if let Ident(.., None) | Lit(_) | Wild | Path(..) | Range(..) | Rest | MacCall(_) = pat.kind {
// This is a leaf pattern, so cloning is unprofitable.
return;
}
let mut pat = P(pat.clone());
// Nix all the paren patterns everywhere so that they aren't in our way.
remove_all_parens(&mut pat);
// Transform all unnested or-patterns into nested ones, and if there were none, quit.
if !unnest_or_patterns(&mut pat) {
return;
}
span_lint_and_then(cx, UNNESTED_OR_PATTERNS, pat.span, "unnested or-patterns", |db| {
insert_necessary_parens(&mut pat);
db.span_suggestion_verbose(
pat.span,
"nest the patterns",
pprust::pat_to_string(&pat),
Applicability::MachineApplicable,
);
});
}
/// Remove all `(p)` patterns in `pat`.
fn remove_all_parens(pat: &mut P<Pat>) {
struct Visitor;
impl MutVisitor for Visitor {
fn visit_pat(&mut self, pat: &mut P<Pat>) {
noop_visit_pat(pat, self);
let inner = match &mut pat.kind {
Paren(i) => mem::replace(&mut i.kind, Wild),
_ => return,
};
pat.kind = inner;
}
}
Visitor.visit_pat(pat);
}
/// Insert parens where necessary according to Rust's precedence rules for patterns.
fn insert_necessary_parens(pat: &mut P<Pat>) {
struct Visitor;
impl MutVisitor for Visitor {
fn visit_pat(&mut self, pat: &mut P<Pat>) {
use ast::{BindingMode::*, Mutability::*};
noop_visit_pat(pat, self);
let target = match &mut pat.kind {
// `i @ a | b`, `box a | b`, and `& mut? a | b`.
Ident(.., Some(p)) | Box(p) | Ref(p, _) if matches!(&p.kind, Or(ps) if ps.len() > 1) => p,
Ref(p, Not) if matches!(p.kind, Ident(ByValue(Mut), ..)) => p, // `&(mut x)`
_ => return,
};
target.kind = Paren(P(take_pat(target)));
}
}
Visitor.visit_pat(pat);
}
/// Unnest or-patterns `p0 | ... | p1` in the pattern `pat`.
/// For example, this would transform `Some(0) | FOO | Some(2)` into `Some(0 | 2) | FOO`.
fn unnest_or_patterns(pat: &mut P<Pat>) -> bool {
struct Visitor {
changed: bool,
}
impl MutVisitor for Visitor {
fn visit_pat(&mut self, p: &mut P<Pat>) {
// This is a bottom up transformation, so recurse first.
noop_visit_pat(p, self);
// Don't have an or-pattern? Just quit early on.
let alternatives = match &mut p.kind {
Or(ps) => ps,
_ => return,
};
// Collapse or-patterns directly nested in or-patterns.
let mut idx = 0;
let mut this_level_changed = false;
while idx < alternatives.len() {
let inner = if let Or(ps) = &mut alternatives[idx].kind {
mem::take(ps)
} else {
idx += 1;
continue;
};
this_level_changed = true;
alternatives.splice(idx..=idx, inner);
}
// Focus on `p_n` and then try to transform all `p_i` where `i > n`.
let mut focus_idx = 0;
while focus_idx < alternatives.len() {
this_level_changed |= transform_with_focus_on_idx(alternatives, focus_idx);
focus_idx += 1;
}
self.changed |= this_level_changed;
// Deal with `Some(Some(0)) | Some(Some(1))`.
if this_level_changed {
noop_visit_pat(p, self);
}
}
}
let mut visitor = Visitor { changed: false };
visitor.visit_pat(pat);
visitor.changed
}
/// Match `$scrutinee` against `$pat` and extract `$then` from it.
/// Panics if there is no match.
macro_rules! always_pat {
($scrutinee:expr, $pat:pat => $then:expr) => {
match $scrutinee {
$pat => $then,
_ => unreachable!(),
}
};
}
/// Focus on `focus_idx` in `alternatives`,
/// attempting to extend it with elements of the same constructor `C`
/// in `alternatives[focus_idx + 1..]`.
fn transform_with_focus_on_idx(alternatives: &mut Vec<P<Pat>>, focus_idx: usize) -> bool {
// Extract the kind; we'll need to make some changes in it.
let mut focus_kind = mem::replace(&mut alternatives[focus_idx].kind, PatKind::Wild);
// We'll focus on `alternatives[focus_idx]`,
// so we're draining from `alternatives[focus_idx + 1..]`.
let start = focus_idx + 1;
// We're trying to find whatever kind (~"constructor") we found in `alternatives[start..]`.
let changed = match &mut focus_kind {
// These pattern forms are "leafs" and do not have sub-patterns.
// Therefore they are not some form of constructor `C`,
// with which a pattern `C(p_0)` may be formed,
// which we would want to join with other `C(p_j)`s.
Ident(.., None) | Lit(_) | Wild | Path(..) | Range(..) | Rest | MacCall(_)
// Dealt with elsewhere.
| Or(_) | Paren(_) => false,
// Transform `box x | ... | box y` into `box (x | y)`.
//
// The cases below until `Slice(...)` deal with *singleton* products.
// These patterns have the shape `C(p)`, and not e.g., `C(p0, ..., pn)`.
Box(target) => extend_with_matching(
target, start, alternatives,
|k| matches!(k, Box(_)),
|k| always_pat!(k, Box(p) => p),
),
// Transform `&m x | ... | &m y` into `&m (x | y)`.
Ref(target, m1) => extend_with_matching(
target, start, alternatives,
|k| matches!(k, Ref(_, m2) if m1 == m2), // Mutabilities must match.
|k| always_pat!(k, Ref(p, _) => p),
),
// Transform `b @ p0 | ... b @ p1` into `b @ (p0 | p1)`.
Ident(b1, i1, Some(target)) => extend_with_matching(
target, start, alternatives,
// Binding names must match.
|k| matches!(k, Ident(b2, i2, Some(_)) if b1 == b2 && eq_id(*i1, *i2)),
|k| always_pat!(k, Ident(_, _, Some(p)) => p),
),
// Transform `[pre, x, post] | ... | [pre, y, post]` into `[pre, x | y, post]`.
Slice(ps1) => extend_with_matching_product(
ps1, start, alternatives,
|k, ps1, idx| matches!(k, Slice(ps2) if eq_pre_post(ps1, ps2, idx)),
|k| always_pat!(k, Slice(ps) => ps),
),
// Transform `(pre, x, post) | ... | (pre, y, post)` into `(pre, x | y, post)`.
Tuple(ps1) => extend_with_matching_product(
ps1, start, alternatives,
|k, ps1, idx| matches!(k, Tuple(ps2) if eq_pre_post(ps1, ps2, idx)),
|k| always_pat!(k, Tuple(ps) => ps),
),
// Transform `S(pre, x, post) | ... | S(pre, y, post)` into `S(pre, x | y, post)`.
TupleStruct(path1, ps1) => extend_with_matching_product(
ps1, start, alternatives,
|k, ps1, idx| matches!(
k,
TupleStruct(path2, ps2) if eq_path(path1, path2) && eq_pre_post(ps1, ps2, idx)
),
|k| always_pat!(k, TupleStruct(_, ps) => ps),
),
// Transform a record pattern `S { fp_0, ..., fp_n }`.
Struct(path1, fps1, rest1) => extend_with_struct_pat(path1, fps1, *rest1, start, alternatives),
};
alternatives[focus_idx].kind = focus_kind;
changed
}
/// Here we focusing on a record pattern `S { fp_0, ..., fp_n }`.
/// In particular, for a record pattern, the order in which the field patterns is irrelevant.
/// So when we fixate on some `ident_k: pat_k`, we try to find `ident_k` in the other pattern
/// and check that all `fp_i` where `i ∈ ((0...n) \ k)` between two patterns are equal.
fn extend_with_struct_pat(
path1: &ast::Path,
fps1: &mut Vec<ast::FieldPat>,
rest1: bool,
start: usize,
alternatives: &mut Vec<P<Pat>>,
) -> bool {
(0..fps1.len()).any(|idx| {
let pos_in_2 = Cell::new(None); // The element `k`.
let tail_or = drain_matching(
start,
alternatives,
|k| {
matches!(k, Struct(path2, fps2, rest2)
if rest1 == *rest2 // If one struct pattern has `..` so must the other.
&& eq_path(path1, path2)
&& fps1.len() == fps2.len()
&& fps1.iter().enumerate().all(|(idx_1, fp1)| {
if idx_1 == idx {
// In the case of `k`, we merely require identical field names
// so that we will transform into `ident_k: p1_k | p2_k`.
let pos = fps2.iter().position(|fp2| eq_id(fp1.ident, fp2.ident));
pos_in_2.set(pos);
pos.is_some()
} else {
fps2.iter().any(|fp2| eq_field_pat(fp1, fp2))
}
}))
},
// Extract `p2_k`.
|k| always_pat!(k, Struct(_, mut fps, _) => fps.swap_remove(pos_in_2.take().unwrap()).pat),
);
extend_with_tail_or(&mut fps1[idx].pat, tail_or)
})
}
/// Like `extend_with_matching` but for products with > 1 factor, e.g., `C(p_0, ..., p_n)`.
/// Here, the idea is that we fixate on some `p_k` in `C`,
/// allowing it to vary between two `targets` and `ps2` (returned by `extract`),
/// while also requiring `ps1[..n] ~ ps2[..n]` (pre) and `ps1[n + 1..] ~ ps2[n + 1..]` (post),
/// where `~` denotes semantic equality.
fn extend_with_matching_product(
targets: &mut Vec<P<Pat>>,
start: usize,
alternatives: &mut Vec<P<Pat>>,
predicate: impl Fn(&PatKind, &[P<Pat>], usize) -> bool,
extract: impl Fn(PatKind) -> Vec<P<Pat>>,
) -> bool {
(0..targets.len()).any(|idx| {
let tail_or = drain_matching(
start,
alternatives,
|k| predicate(k, targets, idx),
|k| extract(k).swap_remove(idx),
);
extend_with_tail_or(&mut targets[idx], tail_or)
})
}
/// Extract the pattern from the given one and replace it with `Wild`.
/// This is meant for temporarily swapping out the pattern for manipulation.
fn take_pat(from: &mut Pat) -> Pat {
let dummy = Pat {
id: DUMMY_NODE_ID,
kind: Wild,
span: DUMMY_SP,
};
mem::replace(from, dummy)
}
/// Extend `target` as an or-pattern with the alternatives
/// in `tail_or` if there are any and return if there were.
fn extend_with_tail_or(target: &mut Pat, tail_or: Vec<P<Pat>>) -> bool {
fn extend(target: &mut Pat, mut tail_or: Vec<P<Pat>>) {
match target {
// On an existing or-pattern in the target, append to it.
Pat { kind: Or(ps), .. } => ps.append(&mut tail_or),
// Otherwise convert the target to an or-pattern.
target => {
let mut init_or = vec![P(take_pat(target))];
init_or.append(&mut tail_or);
target.kind = Or(init_or);
},
}
}
let changed = !tail_or.is_empty();
if changed {
// Extend the target.
extend(target, tail_or);
}
changed
}
// Extract all inner patterns in `alternatives` matching our `predicate`.
// Only elements beginning with `start` are considered for extraction.
fn drain_matching(
start: usize,
alternatives: &mut Vec<P<Pat>>,
predicate: impl Fn(&PatKind) -> bool,
extract: impl Fn(PatKind) -> P<Pat>,
) -> Vec<P<Pat>> {
let mut tail_or = vec![];
let mut idx = 0;
for pat in alternatives.drain_filter(|p| {
// Check if we should extract, but only if `idx >= start`.
idx += 1;
idx > start && predicate(&p.kind)
}) {
tail_or.push(extract(pat.into_inner().kind));
}
tail_or
}
fn extend_with_matching(
target: &mut Pat,
start: usize,
alternatives: &mut Vec<P<Pat>>,
predicate: impl Fn(&PatKind) -> bool,
extract: impl Fn(PatKind) -> P<Pat>,
) -> bool {
extend_with_tail_or(target, drain_matching(start, alternatives, predicate, extract))
}
/// Are the patterns in `ps1` and `ps2` equal save for `ps1[idx]` compared to `ps2[idx]`?
fn eq_pre_post(ps1: &[P<Pat>], ps2: &[P<Pat>], idx: usize) -> bool {
ps1.len() == ps2.len()
&& ps1[idx].is_rest() == ps2[idx].is_rest() // Avoid `[x, ..] | [x, 0]` => `[x, .. | 0]`.
&& over(&ps1[..idx], &ps2[..idx], |l, r| eq_pat(l, r))
&& over(&ps1[idx + 1..], &ps2[idx + 1..], |l, r| eq_pat(l, r))
}
| 36.946078 | 106 | 0.549224 |
f4d5a42b663defc767e319f0b30d1a9b304c200a | 1,875 | //! Core libraries for libsecp256k1.
#![allow(
clippy::cast_ptr_alignment,
clippy::identity_op,
clippy::many_single_char_names,
clippy::needless_range_loop,
clippy::suspicious_op_assign_impl,
clippy::too_many_arguments,
clippy::type_complexity
)]
#![deny(
unused_import_braces,
unused_imports,
unused_comparisons,
unused_must_use,
unused_variables,
non_shorthand_field_patterns,
unreachable_code,
unused_parens
)]
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
#[macro_use]
mod field;
#[macro_use]
mod group;
mod der;
mod ecdh;
mod ecdsa;
mod ecmult;
mod error;
mod scalar;
pub use crate::error::Error;
/// Curve related structs.
pub mod curve {
pub use crate::{
field::{Field, FieldStorage},
group::{Affine, AffineStorage, Jacobian, AFFINE_G, CURVE_B},
scalar::Scalar,
};
pub use crate::ecmult::{ECMultContext, ECMultGenContext};
}
/// Utilities to manipulate the secp256k1 curve parameters.
pub mod util {
pub const TAG_PUBKEY_EVEN: u8 = 0x02;
pub const TAG_PUBKEY_ODD: u8 = 0x03;
pub const TAG_PUBKEY_FULL: u8 = 0x04;
pub const TAG_PUBKEY_HYBRID_EVEN: u8 = 0x06;
pub const TAG_PUBKEY_HYBRID_ODD: u8 = 0x07;
pub const MESSAGE_SIZE: usize = 32;
pub const SECRET_KEY_SIZE: usize = 32;
pub const RAW_PUBLIC_KEY_SIZE: usize = 64;
pub const FULL_PUBLIC_KEY_SIZE: usize = 65;
pub const COMPRESSED_PUBLIC_KEY_SIZE: usize = 33;
pub const SIGNATURE_SIZE: usize = 64;
pub const DER_MAX_SIGNATURE_SIZE: usize = 72;
pub use crate::{
ecmult::{
odd_multiples_table, ECMULT_TABLE_SIZE_A, ECMULT_TABLE_SIZE_G, WINDOW_A, WINDOW_G,
},
group::{globalz_set_table_gej, set_table_gej_var, AFFINE_INFINITY, JACOBIAN_INFINITY},
};
pub use crate::der::{Decoder, SignatureArray};
}
| 25.337838 | 94 | 0.6976 |
4bc67bfdee3e7901f82a025aa60ec8a01736f264 | 2,691 | // Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use crate::{make_empty_map, make_map_with_root_and_bitwidth, FIRST_NON_SINGLETON_ADDR};
use address::{Address, Protocol};
use cid::Cid;
use encoding::tuple::*;
use encoding::Cbor;
use fil_types::{ActorID, HAMT_BIT_WIDTH};
use ipld_blockstore::BlockStore;
use ipld_hamt::Error as HamtError;
use std::error::Error as StdError;
/// State is reponsible for creating
#[derive(Serialize_tuple, Deserialize_tuple)]
pub struct State {
pub address_map: Cid,
pub next_id: ActorID,
pub network_name: String,
}
impl State {
pub fn new<BS: BlockStore>(
store: &BS,
network_name: String,
) -> Result<Self, Box<dyn StdError>> {
let empty_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH)
.flush()
.map_err(|e| format!("failed to create empty map: {}", e))?;
Ok(Self {
address_map: empty_map,
next_id: FIRST_NON_SINGLETON_ADDR,
network_name,
})
}
/// Allocates a new ID address and stores a mapping of the argument address to it.
/// Returns the newly-allocated address.
pub fn map_address_to_new_id<BS: BlockStore>(
&mut self,
store: &BS,
addr: &Address,
) -> Result<Address, HamtError> {
let id = self.next_id;
self.next_id += 1;
let mut map = make_map_with_root_and_bitwidth(&self.address_map, store, HAMT_BIT_WIDTH)?;
map.set(addr.to_bytes().into(), id)?;
self.address_map = map.flush()?;
Ok(Address::new_id(id))
}
/// ResolveAddress resolves an address to an ID-address, if possible.
/// If the provided address is an ID address, it is returned as-is.
/// This means that mapped ID-addresses (which should only appear as values, not keys) and
/// singleton actor addresses (which are not in the map) pass through unchanged.
///
/// Returns an ID-address and `true` if the address was already an ID-address or was resolved
/// in the mapping.
/// Returns an undefined address and `false` if the address was not an ID-address and not found
/// in the mapping.
/// Returns an error only if state was inconsistent.
pub fn resolve_address<BS: BlockStore>(
&self,
store: &BS,
addr: &Address,
) -> Result<Option<Address>, Box<dyn StdError>> {
if addr.protocol() == Protocol::ID {
return Ok(Some(*addr));
}
let map = make_map_with_root_and_bitwidth(&self.address_map, store, HAMT_BIT_WIDTH)?;
Ok(map.get(&addr.to_bytes())?.copied().map(Address::new_id))
}
}
impl Cbor for State {}
| 33.6375 | 99 | 0.643255 |
11412e430b01a832154ef614dec2f959d5450253 | 630 | mod coin_change;
mod edit_distance;
mod egg_dropping;
mod fibonacci;
mod knapsack;
mod longest_common_subsequence;
mod longest_increasing_subsequence;
mod maximum_subarray;
pub use self::coin_change::coin_change;
pub use self::edit_distance::{edit_distance, edit_distance_se};
pub use self::egg_dropping::egg_drop;
pub use self::fibonacci::fibonacci;
pub use self::fibonacci::recursive_fibonacci;
pub use self::knapsack::knapsack;
pub use self::longest_common_subsequence::longest_common_subsequence;
pub use self::longest_increasing_subsequence::longest_increasing_subsequence;
pub use self::maximum_subarray::maximum_subarray;
| 33.157895 | 77 | 0.839683 |
72e76f0298b5c3369647ec0c961ec20285143364 | 20,919 | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::alloc::alloc::{alloc, dealloc, Layout};
use crate::alloc::boxed::Box;
use crate::alloc::{vec, vec::Vec};
use core::any::{type_name, TypeId};
use core::hash::{BuildHasher, BuildHasherDefault, Hasher};
use core::ops::Deref;
use core::ptr::{self, NonNull};
use core::{fmt, slice};
use hashbrown::{hash_map::DefaultHashBuilder, HashMap};
use crate::borrow::AtomicBorrow;
use crate::query::Fetch;
use crate::{Access, Component, Query};
/// A collection of entities having the same component types
///
/// Accessing `Archetype`s is only required in niche cases. Typical use should go through the
/// [`World`](crate::World).
pub struct Archetype {
types: Vec<TypeInfo>,
index: OrderedTypeIdMap<usize>,
len: u32,
entities: Box<[u32]>,
/// One allocation per type, in the same order as `types`
data: Box<[Data]>,
}
impl Archetype {
fn assert_type_info(types: &[TypeInfo]) {
types.windows(2).for_each(|x| match x[0].cmp(&x[1]) {
core::cmp::Ordering::Less => (),
#[cfg(debug_assertions)]
core::cmp::Ordering::Equal => panic!(
"attempted to allocate entity with duplicate {} components; \
each type must occur at most once!",
x[0].type_name
),
#[cfg(not(debug_assertions))]
core::cmp::Ordering::Equal => panic!(
"attempted to allocate entity with duplicate components; \
each type must occur at most once!"
),
core::cmp::Ordering::Greater => panic!("type info is unsorted"),
});
}
pub(crate) fn new(types: Vec<TypeInfo>) -> Self {
let max_align = types.first().map_or(1, |ty| ty.layout.align());
Self::assert_type_info(&types);
let component_count = types.len();
Self {
index: OrderedTypeIdMap::new(types.iter().enumerate().map(|(i, ty)| (ty.id, i))),
types,
entities: Box::new([]),
len: 0,
data: (0..component_count)
.map(|_| Data {
state: AtomicBorrow::new(),
storage: NonNull::new(max_align as *mut u8).unwrap(),
})
.collect(),
}
}
pub(crate) fn clear(&mut self) {
for (ty, data) in self.types.iter().zip(&*self.data) {
for index in 0..self.len {
unsafe {
let removed = data.storage.as_ptr().add(index as usize * ty.layout.size());
(ty.drop)(removed);
}
}
}
self.len = 0;
}
/// Whether this archetype contains `T` components
pub fn has<T: Component>(&self) -> bool {
self.has_dynamic(TypeId::of::<T>())
}
/// Whether this archetype contains components with the type identified by `id`
pub fn has_dynamic(&self, id: TypeId) -> bool {
self.index.contains_key(&id)
}
/// Find the state index associated with `T`, if present
pub(crate) fn get_state<T: Component>(&self) -> Option<usize> {
self.index.get(&TypeId::of::<T>()).copied()
}
/// Get the address of the first `T` component using an index from `get_state::<T>`
pub(crate) fn get_base<T: Component>(&self, state: usize) -> NonNull<T> {
assert_eq!(self.types[state].id, TypeId::of::<T>());
unsafe {
NonNull::new_unchecked(
self.data.get_unchecked(state).storage.as_ptr().cast::<T>() as *mut T
)
}
}
/// Get the `T` components of these entities, if present
///
/// Useful for efficient serialization.
pub fn get<T: Component>(&self) -> Option<ArchetypeColumn<'_, T>> {
let state = self.get_state::<T>()?;
let ptr = self.get_base::<T>(state);
let column = unsafe { slice::from_raw_parts_mut(ptr.as_ptr(), self.len as usize) };
self.borrow::<T>(state);
Some(ArchetypeColumn {
archetype: self,
column,
})
}
pub(crate) fn borrow<T: Component>(&self, state: usize) {
assert_eq!(self.types[state].id, TypeId::of::<T>());
if !self.data[state].state.borrow() {
panic!("{} already borrowed uniquely", type_name::<T>());
}
}
pub(crate) fn borrow_mut<T: Component>(&self, state: usize) {
assert_eq!(self.types[state].id, TypeId::of::<T>());
if !self.data[state].state.borrow_mut() {
panic!("{} already borrowed", type_name::<T>());
}
}
pub(crate) fn release<T: Component>(&self, state: usize) {
assert_eq!(self.types[state].id, TypeId::of::<T>());
self.data[state].state.release();
}
pub(crate) fn release_mut<T: Component>(&self, state: usize) {
assert_eq!(self.types[state].id, TypeId::of::<T>());
self.data[state].state.release_mut();
}
/// Number of entities in this archetype
#[inline]
pub fn len(&self) -> u32 {
self.len
}
/// Whether this archetype contains no entities
#[inline]
pub fn is_empty(&self) -> bool {
self.len == 0
}
#[inline]
pub(crate) fn entities(&self) -> NonNull<u32> {
unsafe { NonNull::new_unchecked(self.entities.as_ptr() as *mut _) }
}
pub(crate) fn entity_id(&self, index: u32) -> u32 {
self.entities[index as usize]
}
#[inline]
pub(crate) fn set_entity_id(&mut self, index: usize, id: u32) {
self.entities[index] = id;
}
pub(crate) fn types(&self) -> &[TypeInfo] {
&self.types
}
/// Enumerate the types of the components of entities stored in this archetype.
///
/// Convenient for dispatching logic which needs to be performed on sets of type ids. For
/// example, suppose you're building a scripting system, and you want to integrate the scripting
/// language with your ECS. This functionality allows you to iterate through all of the
/// archetypes of the world with [`World::archetypes()`](crate::World::archetypes()) and extract
/// all possible combinations of component types which are currently stored in the `World`.
/// From there, you can then create a mapping of archetypes to wrapper objects for your
/// scripting language that provide functionality based off of the components of any given
/// [`Entity`], and bind them onto an [`Entity`] when passed into your scripting language by
/// looking up the [`Entity`]'s archetype using
/// [`EntityRef::component_types`](crate::EntityRef::component_types).
///
/// [`Entity`]: crate::Entity
pub fn component_types(&self) -> impl ExactSizeIterator<Item = TypeId> + '_ {
self.types.iter().map(|typeinfo| typeinfo.id)
}
/// `index` must be in-bounds or just past the end
pub(crate) unsafe fn get_dynamic(
&self,
ty: TypeId,
size: usize,
index: u32,
) -> Option<NonNull<u8>> {
debug_assert!(index <= self.len);
Some(NonNull::new_unchecked(
self.data
.get_unchecked(*self.index.get(&ty)?)
.storage
.as_ptr()
.add(size * index as usize)
.cast::<u8>(),
))
}
/// Every type must be written immediately after this call
pub(crate) unsafe fn allocate(&mut self, id: u32) -> u32 {
if self.len as usize == self.entities.len() {
self.grow(64);
}
self.entities[self.len as usize] = id;
self.len += 1;
self.len - 1
}
pub(crate) unsafe fn set_len(&mut self, len: u32) {
debug_assert!(len <= self.capacity());
self.len = len;
}
pub(crate) fn reserve(&mut self, additional: u32) {
if additional > (self.capacity() - self.len()) {
let increment = additional - (self.capacity() - self.len());
self.grow(increment.max(64));
}
}
pub(crate) fn capacity(&self) -> u32 {
self.entities.len() as u32
}
/// Increase capacity by at least `min_increment`
fn grow(&mut self, min_increment: u32) {
// Double capacity or increase it by `min_increment`, whichever is larger.
self.grow_exact(self.capacity().max(min_increment))
}
/// Increase capacity by exactly `increment`
fn grow_exact(&mut self, increment: u32) {
unsafe {
let old_count = self.len as usize;
let old_cap = self.entities.len();
let new_cap = self.entities.len() + increment as usize;
let mut new_entities = vec![!0; new_cap].into_boxed_slice();
new_entities[0..old_count].copy_from_slice(&self.entities[0..old_count]);
self.entities = new_entities;
let new_data = self
.types
.iter()
.zip(&*self.data)
.map(|(info, old)| {
let storage = if info.layout.size() == 0 {
NonNull::new(info.layout.align() as *mut u8).unwrap()
} else {
let mem = alloc(
Layout::from_size_align(
info.layout.size() * new_cap,
info.layout.align(),
)
.unwrap(),
);
ptr::copy_nonoverlapping(
old.storage.as_ptr(),
mem,
info.layout.size() * old_count,
);
if old_cap > 0 {
dealloc(
old.storage.as_ptr(),
Layout::from_size_align(
info.layout.size() * old_cap,
info.layout.align(),
)
.unwrap(),
);
}
NonNull::new(mem).unwrap()
};
Data {
state: AtomicBorrow::new(), // &mut self guarantees no outstanding borrows
storage,
}
})
.collect::<Box<[_]>>();
self.data = new_data;
}
}
/// Returns the ID of the entity moved into `index`, if any
pub(crate) unsafe fn remove(&mut self, index: u32, drop: bool) -> Option<u32> {
let last = self.len - 1;
for (ty, data) in self.types.iter().zip(&*self.data) {
let removed = data.storage.as_ptr().add(index as usize * ty.layout.size());
if drop {
(ty.drop)(removed);
}
if index != last {
let moved = data.storage.as_ptr().add(last as usize * ty.layout.size());
ptr::copy_nonoverlapping(moved, removed, ty.layout.size());
}
}
self.len = last;
if index != last {
self.entities[index as usize] = self.entities[last as usize];
Some(self.entities[last as usize])
} else {
None
}
}
/// Returns the ID of the entity moved into `index`, if any
pub(crate) unsafe fn move_to(
&mut self,
index: u32,
mut f: impl FnMut(*mut u8, TypeId, usize),
) -> Option<u32> {
let last = self.len - 1;
for (ty, data) in self.types.iter().zip(&*self.data) {
let moved_out = data.storage.as_ptr().add(index as usize * ty.layout.size());
f(moved_out, ty.id(), ty.layout().size());
if index != last {
let moved = data.storage.as_ptr().add(last as usize * ty.layout.size());
ptr::copy_nonoverlapping(moved, moved_out, ty.layout.size());
}
}
self.len -= 1;
if index != last {
self.entities[index as usize] = self.entities[last as usize];
Some(self.entities[last as usize])
} else {
None
}
}
pub(crate) unsafe fn put_dynamic(
&mut self,
component: *mut u8,
ty: TypeId,
size: usize,
index: u32,
) {
let ptr = self
.get_dynamic(ty, size, index)
.unwrap()
.as_ptr()
.cast::<u8>();
ptr::copy_nonoverlapping(component, ptr, size);
}
/// How, if at all, `Q` will access entities in this archetype
pub fn access<Q: Query>(&self) -> Option<Access> {
Q::Fetch::access(self)
}
/// Add components from another archetype with identical components
///
/// # Safety
///
/// Component types must match exactly.
pub(crate) unsafe fn merge(&mut self, mut other: Archetype) {
self.reserve(other.len);
for ((info, dst), src) in self.types.iter().zip(&*self.data).zip(&*other.data) {
dst.storage
.as_ptr()
.add(self.len as usize * info.layout.size())
.copy_from_nonoverlapping(
src.storage.as_ptr(),
other.len as usize * info.layout.size(),
)
}
self.len += other.len;
other.len = 0;
}
/// Raw IDs of the entities in this archetype
///
/// Convertible into [`Entity`](crate::Entity)s with
/// [`World::find_entity_from_id()`](crate::World::find_entity_from_id). Useful for efficient
/// serialization.
#[inline]
pub fn ids(&self) -> &[u32] {
&self.entities[0..self.len as usize]
}
}
impl Drop for Archetype {
fn drop(&mut self) {
self.clear();
if self.entities.len() == 0 {
return;
}
for (info, data) in self.types.iter().zip(&*self.data) {
if info.layout.size() != 0 {
unsafe {
dealloc(
data.storage.as_ptr(),
Layout::from_size_align_unchecked(
info.layout.size() * self.entities.len(),
info.layout.align(),
),
);
}
}
}
}
}
struct Data {
state: AtomicBorrow,
storage: NonNull<u8>,
}
/// A hasher optimized for hashing a single TypeId.
///
/// TypeId is already thoroughly hashed, so there's no reason to hash it again.
/// Just leave the bits unchanged.
#[derive(Default)]
pub(crate) struct TypeIdHasher {
hash: u64,
}
impl Hasher for TypeIdHasher {
fn write_u64(&mut self, n: u64) {
// Only a single value can be hashed, so the old hash should be zero.
debug_assert_eq!(self.hash, 0);
self.hash = n;
}
// Tolerate TypeId being either u64 or u128.
fn write_u128(&mut self, n: u128) {
debug_assert_eq!(self.hash, 0);
self.hash = n as u64;
}
fn write(&mut self, bytes: &[u8]) {
debug_assert_eq!(self.hash, 0);
// This will only be called if TypeId is neither u64 nor u128, which is not anticipated.
// In that case we'll just fall back to using a different hash implementation.
let mut hasher = <DefaultHashBuilder as BuildHasher>::Hasher::default();
hasher.write(bytes);
self.hash = hasher.finish();
}
fn finish(&self) -> u64 {
self.hash
}
}
/// A HashMap with TypeId keys
///
/// Because TypeId is already a fully-hashed u64 (including data in the high seven bits,
/// which hashbrown needs), there is no need to hash it again. Instead, this uses the much
/// faster no-op hash.
pub(crate) type TypeIdMap<V> = HashMap<TypeId, V, BuildHasherDefault<TypeIdHasher>>;
pub(crate) struct OrderedTypeIdMap<V>(Box<[(TypeId, V)]>);
impl<V> OrderedTypeIdMap<V> {
fn new(iter: impl Iterator<Item = (TypeId, V)>) -> Self {
let mut vals = iter.collect::<Box<[_]>>();
vals.sort_unstable_by_key(|(id, _)| *id);
Self(vals)
}
fn search(&self, id: &TypeId) -> Option<usize> {
self.0.binary_search_by_key(id, |(id, _)| *id).ok()
}
fn contains_key(&self, id: &TypeId) -> bool {
self.search(id).is_some()
}
fn get(&self, id: &TypeId) -> Option<&V> {
self.search(id).map(move |idx| &self.0[idx].1)
}
}
/// Metadata required to store a component.
///
/// All told, this means a [`TypeId`], to be able to dynamically name/check the component type; a
/// [`Layout`], so that we know how to allocate memory for this component type; and a drop function
/// which internally calls [`core::ptr::drop_in_place`] with the correct type parameter.
#[derive(Debug, Copy, Clone)]
pub struct TypeInfo {
id: TypeId,
layout: Layout,
drop: unsafe fn(*mut u8),
#[cfg(debug_assertions)]
type_name: &'static str,
}
impl TypeInfo {
/// Construct a `TypeInfo` directly from the static type.
pub fn of<T: 'static>() -> Self {
unsafe fn drop_ptr<T>(x: *mut u8) {
x.cast::<T>().drop_in_place()
}
Self {
id: TypeId::of::<T>(),
layout: Layout::new::<T>(),
drop: drop_ptr::<T>,
#[cfg(debug_assertions)]
type_name: core::any::type_name::<T>(),
}
}
/// Construct a `TypeInfo` from its components. This is useful in the rare case that you have
/// some kind of pointer to raw bytes/erased memory holding a component type, coming from a
/// source unrelated to hecs, and you want to treat it as an insertable component by
/// implementing the `DynamicBundle` API.
pub fn from_parts(id: TypeId, layout: Layout, drop: unsafe fn(*mut u8)) -> Self {
Self {
id,
layout,
drop,
#[cfg(debug_assertions)]
type_name: "<unknown> (TypeInfo constructed from parts)",
}
}
/// Access the `TypeId` for this component type.
pub fn id(&self) -> TypeId {
self.id
}
/// Access the `Layout` of this component type.
pub fn layout(&self) -> Layout {
self.layout
}
/// Directly call the destructor on a pointer to data of this component type.
///
/// # Safety
///
/// All of the caveats of [`core::ptr::drop_in_place`] apply, with the additional requirement
/// that this method is being called on a pointer to an object of the correct component type.
pub unsafe fn drop(&self, data: *mut u8) {
(self.drop)(data)
}
/// Get the function pointer encoding the destructor for the component type this `TypeInfo`
/// represents.
pub fn drop_shim(&self) -> unsafe fn(*mut u8) {
self.drop
}
#[cfg(debug_assertions)]
pub(crate) fn name(&self) -> Option<&str> {
Some(self.type_name)
}
#[cfg(not(debug_assertions))]
pub(crate) fn name(&self) -> Option<&str> {
None
}
}
impl PartialOrd for TypeInfo {
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for TypeInfo {
/// Order by alignment, descending. Ties broken with TypeId.
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.layout
.align()
.cmp(&other.layout.align())
.reverse()
.then_with(|| self.id.cmp(&other.id))
}
}
impl PartialEq for TypeInfo {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
impl Eq for TypeInfo {}
/// Shared reference to a single column of component data in an [`Archetype`]
pub struct ArchetypeColumn<'a, T: Component> {
archetype: &'a Archetype,
column: &'a [T],
}
impl<T: Component> Deref for ArchetypeColumn<'_, T> {
type Target = [T];
fn deref(&self) -> &[T] {
self.column
}
}
impl<T: Component> Drop for ArchetypeColumn<'_, T> {
fn drop(&mut self) {
let state = self.archetype.get_state::<T>().unwrap();
self.archetype.release::<T>(state);
}
}
impl<T: Component> Clone for ArchetypeColumn<'_, T> {
fn clone(&self) -> Self {
let state = self.archetype.get_state::<T>().unwrap();
self.archetype.borrow::<T>(state);
Self {
archetype: self.archetype,
column: self.column,
}
}
}
impl<T: Component + fmt::Debug> fmt::Debug for ArchetypeColumn<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.column.fmt(f)
}
}
| 32.943307 | 100 | 0.544768 |
039caef3d998bc3fb667ce29963ae31e4a7697b3 | 555 | // -*- rust -*-
use std;
import comm;
import comm::port;
import comm::send;
import comm::chan;
import comm::recv;
import task;
fn a(c: chan<int>) { #debug("task a0"); #debug("task a1"); send(c, 10); }
fn main() {
let p = port();
let ch = chan(p);
task::spawn(|| a(ch) );
task::spawn(|| b(ch) );
let mut n: int = 0;
n = recv(p);
n = recv(p);
#debug("Finished.");
}
fn b(c: chan<int>) {
#debug("task b0");
#debug("task b1");
#debug("task b2");
#debug("task b2");
#debug("task b3");
send(c, 10);
}
| 17.34375 | 73 | 0.50991 |
e95c8b810b945ba12009c247b866d3ff2a512f66 | 50 | //! 数据表模型
mod account;
pub use account::Account;
| 10 | 25 | 0.7 |
62d8ba826f452e7c0473115702f34a3a054170b8 | 19,972 | // Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Buffer whose content is accessible to the CPU.
//!
//! The `CpuAccessibleBuffer` is a basic general-purpose buffer. It can be used in any situation
//! but may not perform as well as other buffer types.
//!
//! Each access from the CPU or from the GPU locks the whole buffer for either reading or writing.
//! You can read the buffer multiple times simultaneously. Trying to read and write simultaneously,
//! or write and write simultaneously will block.
use smallvec::SmallVec;
use std::error;
use std::fmt;
use std::iter;
use std::marker::PhantomData;
use std::mem;
use std::ops::Deref;
use std::ops::DerefMut;
use std::ptr;
use std::sync::Arc;
use std::sync::RwLock;
use std::sync::RwLockReadGuard;
use std::sync::RwLockWriteGuard;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use buffer::BufferUsage;
use buffer::sys::BufferCreationError;
use buffer::sys::SparseLevel;
use buffer::sys::UnsafeBuffer;
use buffer::traits::BufferAccess;
use buffer::traits::BufferInner;
use buffer::traits::TypedBufferAccess;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use image::ImageAccess;
use instance::QueueFamily;
use memory::Content;
use memory::CpuAccess as MemCpuAccess;
use memory::DedicatedAlloc;
use memory::DeviceMemoryAllocError;
use memory::pool::AllocFromRequirementsFilter;
use memory::pool::AllocLayout;
use memory::pool::MappingRequirement;
use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;
use memory::pool::PotentialDedicatedAllocation;
use memory::pool::StdMemoryPoolAlloc;
use sync::AccessError;
use sync::Sharing;
/// Buffer whose content is accessible by the CPU.
#[derive(Debug)]
pub struct CpuAccessibleBuffer<T: ?Sized, A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>> {
// Inner content.
inner: UnsafeBuffer,
// The memory held by the buffer.
memory: A,
// Access pattern of the buffer.
// Every time the user tries to read or write the buffer from the CPU, this `RwLock` is kept
// locked and its content is checked to verify that we are allowed access. Every time the user
// tries to submit this buffer for the GPU, this `RwLock` is briefly locked and modified.
access: RwLock<CurrentGpuAccess>,
// Queue families allowed to access this buffer.
queue_families: SmallVec<[u32; 4]>,
// Necessary to make it compile.
marker: PhantomData<Box<T>>,
}
#[derive(Debug)]
enum CurrentGpuAccess {
NonExclusive {
// Number of non-exclusive GPU accesses. Can be 0.
num: AtomicUsize,
},
Exclusive {
// Number of exclusive locks. Cannot be 0. If 0 is reached, we must jump to `NonExclusive`.
num: usize,
},
}
impl<T> CpuAccessibleBuffer<T> {
/// Builds a new buffer with some data in it. Only allowed for sized data.
pub fn from_data(device: Arc<Device>, usage: BufferUsage, data: T)
-> Result<Arc<CpuAccessibleBuffer<T>>, DeviceMemoryAllocError>
where T: Content + 'static
{
unsafe {
let uninitialized =
CpuAccessibleBuffer::raw(device, mem::size_of::<T>(), usage, iter::empty())?;
// Note that we are in panic-unsafety land here. However a panic should never ever
// happen here, so in theory we are safe.
// TODO: check whether that's true ^
{
let mut mapping = uninitialized.write().unwrap();
ptr::write::<T>(&mut *mapping, data)
}
Ok(uninitialized)
}
}
/// Builds a new uninitialized buffer. Only allowed for sized data.
#[inline]
pub unsafe fn uninitialized(device: Arc<Device>, usage: BufferUsage)
-> Result<Arc<CpuAccessibleBuffer<T>>, DeviceMemoryAllocError> {
CpuAccessibleBuffer::raw(device, mem::size_of::<T>(), usage, iter::empty())
}
}
impl<T> CpuAccessibleBuffer<[T]> {
/// Builds a new buffer that contains an array `T`. The initial data comes from an iterator
/// that produces that list of Ts.
pub fn from_iter<I>(device: Arc<Device>, usage: BufferUsage, data: I)
-> Result<Arc<CpuAccessibleBuffer<[T]>>, DeviceMemoryAllocError>
where I: ExactSizeIterator<Item = T>,
T: Content + 'static
{
unsafe {
let uninitialized =
CpuAccessibleBuffer::uninitialized_array(device, data.len(), usage)?;
// Note that we are in panic-unsafety land here. However a panic should never ever
// happen here, so in theory we are safe.
// TODO: check whether that's true ^
{
let mut mapping = uninitialized.write().unwrap();
for (i, o) in data.zip(mapping.iter_mut()) {
ptr::write(o, i);
}
}
Ok(uninitialized)
}
}
/// Builds a new buffer. Can be used for arrays.
#[inline]
pub unsafe fn uninitialized_array(
device: Arc<Device>, len: usize, usage: BufferUsage)
-> Result<Arc<CpuAccessibleBuffer<[T]>>, DeviceMemoryAllocError> {
CpuAccessibleBuffer::raw(device, len * mem::size_of::<T>(), usage, iter::empty())
}
}
impl<T: ?Sized> CpuAccessibleBuffer<T> {
/// Builds a new buffer without checking the size.
///
/// # Safety
///
/// You must ensure that the size that you pass is correct for `T`.
///
pub unsafe fn raw<'a, I>(device: Arc<Device>, size: usize, usage: BufferUsage,
queue_families: I)
-> Result<Arc<CpuAccessibleBuffer<T>>, DeviceMemoryAllocError>
where I: IntoIterator<Item = QueueFamily<'a>>
{
let queue_families = queue_families
.into_iter()
.map(|f| f.id())
.collect::<SmallVec<[u32; 4]>>();
let (buffer, mem_reqs) = {
let sharing = if queue_families.len() >= 2 {
Sharing::Concurrent(queue_families.iter().cloned())
} else {
Sharing::Exclusive
};
match UnsafeBuffer::new(device.clone(), size, usage, sharing, SparseLevel::none()) {
Ok(b) => b,
Err(BufferCreationError::AllocError(err)) => return Err(err),
Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
// errors can't happen
}
};
let mem = MemoryPool::alloc_from_requirements(&Device::standard_pool(&device),
&mem_reqs,
AllocLayout::Linear,
MappingRequirement::Map,
DedicatedAlloc::Buffer(&buffer),
|_| AllocFromRequirementsFilter::Allowed)?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
debug_assert!(mem.mapped_memory().is_some());
buffer.bind_memory(mem.memory(), mem.offset())?;
Ok(Arc::new(CpuAccessibleBuffer {
inner: buffer,
memory: mem,
access: RwLock::new(CurrentGpuAccess::NonExclusive {
num: AtomicUsize::new(0),
}),
queue_families: queue_families,
marker: PhantomData,
}))
}
}
impl<T: ?Sized, A> CpuAccessibleBuffer<T, A> {
/// Returns the queue families this buffer can be used on.
// TODO: use a custom iterator
#[inline]
pub fn queue_families(&self) -> Vec<QueueFamily> {
self.queue_families
.iter()
.map(|&num| {
self.device()
.physical_device()
.queue_family_by_id(num)
.unwrap()
})
.collect()
}
}
impl<T: ?Sized, A> CpuAccessibleBuffer<T, A>
where T: Content + 'static,
A: MemoryPoolAlloc
{
/// Locks the buffer in order to read its content from the CPU.
///
/// If the buffer is currently used in exclusive mode by the GPU, this function will return
/// an error. Similarly if you called `write()` on the buffer and haven't dropped the lock,
/// this function will return an error as well.
///
/// After this function successfully locks the buffer, any attempt to submit a command buffer
/// that uses it in exclusive mode will fail. You can still submit this buffer for non-exclusive
/// accesses (ie. reads).
#[inline]
pub fn read(&self) -> Result<ReadLock<T>, ReadLockError> {
let lock = match self.access.try_read() {
Ok(l) => l,
// TODO: if a user simultaneously calls .write(), and write() is currently finding out
// that the buffer is in fact GPU locked, then we will return a CpuWriteLocked
// error instead of a GpuWriteLocked ; is this a problem? how do we fix this?
Err(_) => return Err(ReadLockError::CpuWriteLocked),
};
if let CurrentGpuAccess::Exclusive { .. } = *lock {
return Err(ReadLockError::GpuWriteLocked);
}
let offset = self.memory.offset();
let range = offset .. offset + self.inner.size();
Ok(ReadLock {
inner: unsafe { self.memory.mapped_memory().unwrap().read_write(range) },
lock: lock,
})
}
/// Locks the buffer in order to write its content from the CPU.
///
/// If the buffer is currently in use by the GPU, this function will return an error. Similarly
/// if you called `read()` on the buffer and haven't dropped the lock, this function will
/// return an error as well.
///
/// After this function successfully locks the buffer, any attempt to submit a command buffer
/// that uses it and any attempt to call `read()` will return an error.
#[inline]
pub fn write(&self) -> Result<WriteLock<T>, WriteLockError> {
let lock = match self.access.try_write() {
Ok(l) => l,
// TODO: if a user simultaneously calls .read() or .write(), and the function is
// currently finding out that the buffer is in fact GPU locked, then we will
// return a CpuLocked error instead of a GpuLocked ; is this a problem?
// how do we fix this?
Err(_) => return Err(WriteLockError::CpuLocked),
};
match *lock {
CurrentGpuAccess::NonExclusive { ref num } if num.load(Ordering::SeqCst) == 0 => (),
_ => return Err(WriteLockError::GpuLocked),
}
let offset = self.memory.offset();
let range = offset .. offset + self.inner.size();
Ok(WriteLock {
inner: unsafe { self.memory.mapped_memory().unwrap().read_write(range) },
lock: lock,
})
}
}
unsafe impl<T: ?Sized, A> BufferAccess for CpuAccessibleBuffer<T, A>
where T: 'static + Send + Sync
{
#[inline]
fn inner(&self) -> BufferInner {
BufferInner {
buffer: &self.inner,
offset: 0,
}
}
#[inline]
fn size(&self) -> usize {
self.inner.size()
}
#[inline]
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
self.conflict_key() == other.conflict_key() // TODO:
}
#[inline]
fn conflicts_image(&self, other: &ImageAccess) -> bool {
false
}
#[inline]
fn conflict_key(&self) -> (u64, usize) {
(self.inner.key(), 0)
}
#[inline]
fn try_gpu_lock(&self, exclusive_access: bool, _: &Queue) -> Result<(), AccessError> {
if exclusive_access {
let mut lock = match self.access.try_write() {
Ok(lock) => lock,
Err(_) => return Err(AccessError::AlreadyInUse),
};
match *lock {
CurrentGpuAccess::NonExclusive { ref num } if num.load(Ordering::SeqCst) == 0 => (),
_ => return Err(AccessError::AlreadyInUse),
};
*lock = CurrentGpuAccess::Exclusive { num: 1 };
Ok(())
} else {
let lock = match self.access.try_read() {
Ok(lock) => lock,
Err(_) => return Err(AccessError::AlreadyInUse),
};
match *lock {
CurrentGpuAccess::Exclusive { .. } => return Err(AccessError::AlreadyInUse),
CurrentGpuAccess::NonExclusive { ref num } => {
num.fetch_add(1, Ordering::SeqCst)
},
};
Ok(())
}
}
#[inline]
unsafe fn increase_gpu_lock(&self) {
// First, handle if we have a non-exclusive access.
{
// Since the buffer is in use by the GPU, it is invalid to hold a write-lock to
// the buffer. The buffer can still be briefly in a write-locked state for the duration
// of the check though.
let read_lock = self.access.read().unwrap();
if let CurrentGpuAccess::NonExclusive { ref num } = *read_lock {
let prev = num.fetch_add(1, Ordering::SeqCst);
debug_assert!(prev >= 1);
return;
}
}
// If we reach here, this means that `access` contains `CurrentGpuAccess::Exclusive`.
{
// Same remark as above, but for writing.
let mut write_lock = self.access.write().unwrap();
if let CurrentGpuAccess::Exclusive { ref mut num } = *write_lock {
*num += 1;
} else {
unreachable!()
}
}
}
#[inline]
unsafe fn unlock(&self) {
// First, handle if we had a non-exclusive access.
{
// Since the buffer is in use by the GPU, it is invalid to hold a write-lock to
// the buffer. The buffer can still be briefly in a write-locked state for the duration
// of the check though.
let read_lock = self.access.read().unwrap();
if let CurrentGpuAccess::NonExclusive { ref num } = *read_lock {
let prev = num.fetch_sub(1, Ordering::SeqCst);
debug_assert!(prev >= 1);
return;
}
}
// If we reach here, this means that `access` contains `CurrentGpuAccess::Exclusive`.
{
// Same remark as above, but for writing.
let mut write_lock = self.access.write().unwrap();
if let CurrentGpuAccess::Exclusive { ref mut num } = *write_lock {
if *num != 1 {
*num -= 1;
return;
}
} else {
// Can happen if we lock in exclusive mode N times, and unlock N+1 times with the
// last two unlocks happen simultaneously.
panic!()
}
*write_lock = CurrentGpuAccess::NonExclusive { num: AtomicUsize::new(0) };
}
}
}
unsafe impl<T: ?Sized, A> TypedBufferAccess for CpuAccessibleBuffer<T, A>
where T: 'static + Send + Sync
{
type Content = T;
}
unsafe impl<T: ?Sized, A> DeviceOwned for CpuAccessibleBuffer<T, A> {
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
/// Object that can be used to read or write the content of a `CpuAccessibleBuffer`.
///
/// Note that this object holds a rwlock read guard on the chunk. If another thread tries to access
/// this buffer's content or tries to submit a GPU command that uses this buffer, it will block.
pub struct ReadLock<'a, T: ?Sized + 'a> {
inner: MemCpuAccess<'a, T>,
lock: RwLockReadGuard<'a, CurrentGpuAccess>,
}
impl<'a, T: ?Sized + 'a> ReadLock<'a, T> {
/// Makes a new `ReadLock` to access a sub-part of the current `ReadLock`.
#[inline]
pub fn map<U: ?Sized + 'a, F>(self, f: F) -> ReadLock<'a, U>
where F: FnOnce(&mut T) -> &mut U
{
ReadLock {
inner: self.inner.map(|ptr| unsafe { f(&mut *ptr) as *mut _ }),
lock: self.lock,
}
}
}
impl<'a, T: ?Sized + 'a> Deref for ReadLock<'a, T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
self.inner.deref()
}
}
/// Error when attempting to CPU-read a buffer.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ReadLockError {
/// The buffer is already locked for write mode by the CPU.
CpuWriteLocked,
/// The buffer is already locked for write mode by the GPU.
GpuWriteLocked,
}
impl error::Error for ReadLockError {
#[inline]
fn description(&self) -> &str {
match *self {
ReadLockError::CpuWriteLocked => {
"the buffer is already locked for write mode by the CPU"
},
ReadLockError::GpuWriteLocked => {
"the buffer is already locked for write mode by the GPU"
},
}
}
}
impl fmt::Display for ReadLockError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", error::Error::description(self))
}
}
/// Object that can be used to read or write the content of a `CpuAccessibleBuffer`.
///
/// Note that this object holds a rwlock write guard on the chunk. If another thread tries to access
/// this buffer's content or tries to submit a GPU command that uses this buffer, it will block.
pub struct WriteLock<'a, T: ?Sized + 'a> {
inner: MemCpuAccess<'a, T>,
lock: RwLockWriteGuard<'a, CurrentGpuAccess>,
}
impl<'a, T: ?Sized + 'a> WriteLock<'a, T> {
/// Makes a new `WriteLock` to access a sub-part of the current `WriteLock`.
#[inline]
pub fn map<U: ?Sized + 'a, F>(self, f: F) -> WriteLock<'a, U>
where F: FnOnce(&mut T) -> &mut U
{
WriteLock {
inner: self.inner.map(|ptr| unsafe { f(&mut *ptr) as *mut _ }),
lock: self.lock,
}
}
}
impl<'a, T: ?Sized + 'a> Deref for WriteLock<'a, T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
self.inner.deref()
}
}
impl<'a, T: ?Sized + 'a> DerefMut for WriteLock<'a, T> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
self.inner.deref_mut()
}
}
/// Error when attempting to CPU-write a buffer.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum WriteLockError {
/// The buffer is already locked by the CPU.
CpuLocked,
/// The buffer is already locked by the GPU.
GpuLocked,
}
impl error::Error for WriteLockError {
#[inline]
fn description(&self) -> &str {
match *self {
WriteLockError::CpuLocked => {
"the buffer is already locked by the CPU"
},
WriteLockError::GpuLocked => {
"the buffer is already locked by the GPU"
},
}
}
}
impl fmt::Display for WriteLockError {
#[inline]
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "{}", error::Error::description(self))
}
}
#[cfg(test)]
mod tests {
use buffer::{BufferUsage, CpuAccessibleBuffer};
#[test]
fn create_empty_buffer() {
let (device, queue) = gfx_dev_and_queue!();
const EMPTY: [i32; 0] = [];
let _ = CpuAccessibleBuffer::from_data(device, BufferUsage::all(), EMPTY.iter());
}
}
| 34.140171 | 100 | 0.574404 |
16499ee13958a39f09bd7c116d3b763070de8c36 | 15,971 | #[doc = "Writer for register PMC_SLPWK_ER1"]
pub type W = crate::W<u32, super::PMC_SLPWK_ER1>;
#[doc = "Register PMC_SLPWK_ER1 `reset()`'s with value 0"]
impl crate::ResetValue for super::PMC_SLPWK_ER1 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Write proxy for field `PID32`"]
pub struct PID32_W<'a> {
w: &'a mut W,
}
impl<'a> PID32_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Write proxy for field `PID33`"]
pub struct PID33_W<'a> {
w: &'a mut W,
}
impl<'a> PID33_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Write proxy for field `PID34`"]
pub struct PID34_W<'a> {
w: &'a mut W,
}
impl<'a> PID34_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Write proxy for field `PID40`"]
pub struct PID40_W<'a> {
w: &'a mut W,
}
impl<'a> PID40_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Write proxy for field `PID41`"]
pub struct PID41_W<'a> {
w: &'a mut W,
}
impl<'a> PID41_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Write proxy for field `PID42`"]
pub struct PID42_W<'a> {
w: &'a mut W,
}
impl<'a> PID42_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Write proxy for field `PID43`"]
pub struct PID43_W<'a> {
w: &'a mut W,
}
impl<'a> PID43_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11);
self.w
}
}
#[doc = "Write proxy for field `PID44`"]
pub struct PID44_W<'a> {
w: &'a mut W,
}
impl<'a> PID44_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12);
self.w
}
}
#[doc = "Write proxy for field `PID45`"]
pub struct PID45_W<'a> {
w: &'a mut W,
}
impl<'a> PID45_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13);
self.w
}
}
#[doc = "Write proxy for field `PID46`"]
pub struct PID46_W<'a> {
w: &'a mut W,
}
impl<'a> PID46_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14);
self.w
}
}
#[doc = "Write proxy for field `PID47`"]
pub struct PID47_W<'a> {
w: &'a mut W,
}
impl<'a> PID47_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15);
self.w
}
}
#[doc = "Write proxy for field `PID48`"]
pub struct PID48_W<'a> {
w: &'a mut W,
}
impl<'a> PID48_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Write proxy for field `PID49`"]
pub struct PID49_W<'a> {
w: &'a mut W,
}
impl<'a> PID49_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 17)) | (((value as u32) & 0x01) << 17);
self.w
}
}
#[doc = "Write proxy for field `PID50`"]
pub struct PID50_W<'a> {
w: &'a mut W,
}
impl<'a> PID50_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 18)) | (((value as u32) & 0x01) << 18);
self.w
}
}
#[doc = "Write proxy for field `PID51`"]
pub struct PID51_W<'a> {
w: &'a mut W,
}
impl<'a> PID51_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 19)) | (((value as u32) & 0x01) << 19);
self.w
}
}
#[doc = "Write proxy for field `PID52`"]
pub struct PID52_W<'a> {
w: &'a mut W,
}
impl<'a> PID52_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 20)) | (((value as u32) & 0x01) << 20);
self.w
}
}
#[doc = "Write proxy for field `PID56`"]
pub struct PID56_W<'a> {
w: &'a mut W,
}
impl<'a> PID56_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24);
self.w
}
}
#[doc = "Write proxy for field `PID57`"]
pub struct PID57_W<'a> {
w: &'a mut W,
}
impl<'a> PID57_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 25)) | (((value as u32) & 0x01) << 25);
self.w
}
}
#[doc = "Write proxy for field `PID58`"]
pub struct PID58_W<'a> {
w: &'a mut W,
}
impl<'a> PID58_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 26)) | (((value as u32) & 0x01) << 26);
self.w
}
}
#[doc = "Write proxy for field `PID59`"]
pub struct PID59_W<'a> {
w: &'a mut W,
}
impl<'a> PID59_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 27)) | (((value as u32) & 0x01) << 27);
self.w
}
}
#[doc = "Write proxy for field `PID60`"]
pub struct PID60_W<'a> {
w: &'a mut W,
}
impl<'a> PID60_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 28)) | (((value as u32) & 0x01) << 28);
self.w
}
}
impl W {
#[doc = "Bit 0 - Peripheral 32 SleepWalking Enable"]
#[inline(always)]
pub fn pid32(&mut self) -> PID32_W {
PID32_W { w: self }
}
#[doc = "Bit 1 - Peripheral 33 SleepWalking Enable"]
#[inline(always)]
pub fn pid33(&mut self) -> PID33_W {
PID33_W { w: self }
}
#[doc = "Bit 2 - Peripheral 34 SleepWalking Enable"]
#[inline(always)]
pub fn pid34(&mut self) -> PID34_W {
PID34_W { w: self }
}
#[doc = "Bit 8 - Peripheral 40 SleepWalking Enable"]
#[inline(always)]
pub fn pid40(&mut self) -> PID40_W {
PID40_W { w: self }
}
#[doc = "Bit 9 - Peripheral 41 SleepWalking Enable"]
#[inline(always)]
pub fn pid41(&mut self) -> PID41_W {
PID41_W { w: self }
}
#[doc = "Bit 10 - Peripheral 42 SleepWalking Enable"]
#[inline(always)]
pub fn pid42(&mut self) -> PID42_W {
PID42_W { w: self }
}
#[doc = "Bit 11 - Peripheral 43 SleepWalking Enable"]
#[inline(always)]
pub fn pid43(&mut self) -> PID43_W {
PID43_W { w: self }
}
#[doc = "Bit 12 - Peripheral 44 SleepWalking Enable"]
#[inline(always)]
pub fn pid44(&mut self) -> PID44_W {
PID44_W { w: self }
}
#[doc = "Bit 13 - Peripheral 45 SleepWalking Enable"]
#[inline(always)]
pub fn pid45(&mut self) -> PID45_W {
PID45_W { w: self }
}
#[doc = "Bit 14 - Peripheral 46 SleepWalking Enable"]
#[inline(always)]
pub fn pid46(&mut self) -> PID46_W {
PID46_W { w: self }
}
#[doc = "Bit 15 - Peripheral 47 SleepWalking Enable"]
#[inline(always)]
pub fn pid47(&mut self) -> PID47_W {
PID47_W { w: self }
}
#[doc = "Bit 16 - Peripheral 48 SleepWalking Enable"]
#[inline(always)]
pub fn pid48(&mut self) -> PID48_W {
PID48_W { w: self }
}
#[doc = "Bit 17 - Peripheral 49 SleepWalking Enable"]
#[inline(always)]
pub fn pid49(&mut self) -> PID49_W {
PID49_W { w: self }
}
#[doc = "Bit 18 - Peripheral 50 SleepWalking Enable"]
#[inline(always)]
pub fn pid50(&mut self) -> PID50_W {
PID50_W { w: self }
}
#[doc = "Bit 19 - Peripheral 51 SleepWalking Enable"]
#[inline(always)]
pub fn pid51(&mut self) -> PID51_W {
PID51_W { w: self }
}
#[doc = "Bit 20 - Peripheral 52 SleepWalking Enable"]
#[inline(always)]
pub fn pid52(&mut self) -> PID52_W {
PID52_W { w: self }
}
#[doc = "Bit 24 - Peripheral 56 SleepWalking Enable"]
#[inline(always)]
pub fn pid56(&mut self) -> PID56_W {
PID56_W { w: self }
}
#[doc = "Bit 25 - Peripheral 57 SleepWalking Enable"]
#[inline(always)]
pub fn pid57(&mut self) -> PID57_W {
PID57_W { w: self }
}
#[doc = "Bit 26 - Peripheral 58 SleepWalking Enable"]
#[inline(always)]
pub fn pid58(&mut self) -> PID58_W {
PID58_W { w: self }
}
#[doc = "Bit 27 - Peripheral 59 SleepWalking Enable"]
#[inline(always)]
pub fn pid59(&mut self) -> PID59_W {
PID59_W { w: self }
}
#[doc = "Bit 28 - Peripheral 60 SleepWalking Enable"]
#[inline(always)]
pub fn pid60(&mut self) -> PID60_W {
PID60_W { w: self }
}
}
| 27.536207 | 86 | 0.522823 |
01a549e7f9f85237a0c0b6122f547f050fc9cc7f | 527 | use graph::*;
use errors::*;
use ops::interface::default::*;
use super::super::ids;
use std::convert::AsRef;
use std::ops::DerefMut;
//use std::borrow::Borrow;
pub fn mat_mul<T1: AsRef<Expr>, T2: AsRef<Expr>>(arg0: T1, arg1: T2) -> Result<Expr> {
let arg0 = arg0.as_ref();
let arg1 = arg1.as_ref();
same_graph_2(arg0, arg1)?;
let ref wrapper = arg0.wrapper;
let result = {
let mut g = wrapper.get_mut();
ids::mat_mul(g.deref_mut(), arg0.id, arg1.id)?
};
wrapper.as_expr(result)
}
| 23.954545 | 86 | 0.614801 |
eda938a8ce8d7f1fc29e6ec2a312ea47447a7adf | 3,118 | // This file is part of dpdk. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/dpdk/master/COPYRIGHT. No part of dpdk, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
// Copyright © 2017 The developers of dpdk. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/dpdk/master/COPYRIGHT.
/// A block pool of persistent memory.
#[derive(Debug, Clone)]
pub struct BlockPool(*mut PMEMblkpool, Arc<BlockPoolDropWrapper>);
unsafe impl Send for BlockPool
{
}
unsafe impl Sync for BlockPool
{
}
impl BlockPool
{
/// Validate an existing pool.
#[inline(always)]
pub fn validate(pool_set_file_path: &Path, block_size: usize) -> Result<bool, PmdkError>
{
pool_set_file_path.validate_block_pool_is_consistent(block_size)
}
/// Open an existing pool.
/// Prefer the use of `BlockPoolConfiguration.open_or_create()`.
#[inline(always)]
pub fn open(pool_set_file_path: &Path, validate_block_size_is: Option<usize>) -> Result<Self, PmdkError>
{
let block_size = if let Some(block_size) = validate_block_size_is
{
assert_ne!(block_size, 0, "block_size can not be zero");
block_size
}
else
{
0
};
pool_set_file_path.open_block_pool(block_size).map(Self::from_handle)
}
/// Create a new pool.
/// Prefer the use of `BlockPoolConfiguration.open_or_create()`.
#[inline(always)]
pub fn create(pool_set_file_path: &Path, block_size: usize, pool_size: usize, mode: mode_t) -> Result<Self, PmdkError>
{
pool_set_file_path.create_block_pool(block_size, pool_size, mode).map(Self::from_handle)
}
/// Size of blocks in the block pool.
#[inline(always)]
pub fn block_size(self) -> usize
{
self.0.block_size()
}
/// How many blocks are available (free) in the block pool?
#[inline(always)]
pub fn number_of_blocks_available_in_block_pool(self) -> usize
{
self.0.number_of_blocks_available_in_block_pool()
}
/// Read from a block.
/// Returns false if the block has previously had its error condition set (see `set_error()`).
#[inline(always)]
pub fn read(self, to: *mut c_void, zero_based_block_index: usize) -> bool
{
self.0.read_from(to, zero_based_block_index)
}
/// Write to a block.
#[inline(always)]
pub fn write(self, from: *const c_void, zero_based_block_index: usize)
{
self.0.write_to(from, zero_based_block_index)
}
/// Set a block to all zeros.
#[inline(always)]
pub fn set_zero(self, zero_based_block_index: usize)
{
self.0.set_zero(zero_based_block_index)
}
/// Set a block to being in an error state (ie set its error condition).
#[inline(always)]
pub fn set_error(self, zero_based_block_index: usize)
{
self.0.set_error(zero_based_block_index)
}
#[inline(always)]
fn from_handle(handle: *mut PMEMblkpool) -> Self
{
debug_assert!(handle.is_not_null(), "PMEMblkpool handle is null");
BlockPool(handle, BlockPoolDropWrapper::new(handle))
}
}
| 30.271845 | 367 | 0.735407 |
fc89489925d799c170550e11d353f0e9caa8057f | 553 | use crate::BigMapKey;
use naia_serde::{BitReader, BitWrite, Serde, SerdeErr};
// EntityHandle
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
pub struct EntityHandle(u64);
impl BigMapKey for EntityHandle {
fn to_u64(&self) -> u64 {
self.0
}
fn from_u64(value: u64) -> Self {
EntityHandle(value)
}
}
impl Serde for EntityHandle {
fn ser(&self, _: &mut dyn BitWrite) {
panic!("shouldn't call this");
}
fn de(_: &mut BitReader) -> Result<Self, SerdeErr> {
panic!("shouldn't call this");
}
}
| 20.481481 | 56 | 0.611212 |
030afb3a10fec6e6803aac0308fb96b19cce4774 | 7,116 | //! Sub-expression variants.
prelude! {}
// use expr::frame::Frame;
#[derive(Debug, Clone)]
pub struct Variant {
e_idx: idx::Expr,
v_idx: idx::Variant,
data: idx::DataMap<expr::Data>,
src: rust::Variant,
zip_handler_id: rust::Id,
zipper_go_up_id: rust::Id,
}
implement! {
impl Variant {
Index<idx::Data, expr::Data> {
|self, d_idx| &self.data[d_idx]
}
}
}
impl Variant {
pub fn e_idx(&self) -> idx::Expr {
self.e_idx
}
pub fn v_idx(&self) -> idx::Variant {
self.v_idx
}
pub fn v_id(&self) -> &rust::Id {
&self.src.ident
}
pub fn data(&self) -> &idx::DataMap<expr::Data> {
&self.data
}
pub fn is_self_rec(&self) -> bool {
self.data.iter().any(|data| data.is_self_rec())
}
pub fn contains_leaf_data(&self) -> bool {
self.data.iter().any(expr::data::Data::is_leaf)
}
pub fn is_leaf(&self) -> bool {
self.data.iter().all(expr::data::Data::is_leaf)
}
}
impl Variant {
pub fn from_front(
cxt: &mut cxt::PreCxt,
e_idx: idx::Expr,
v_idx: idx::Variant,
variant: &rust::Variant,
) -> Res<Self> {
check::expr_variant(variant)?;
let src = variant.clone();
let mut data = idx::DataMap::with_capacity(src.fields.len());
let fields = match &variant.fields {
syn::Fields::Unit => None,
syn::Fields::Named(fields) => Some(&fields.named),
syn::Fields::Unnamed(fields) => Some(&fields.unnamed),
};
if let Some(fields) = fields {
for field in fields {
let d_idx = data.next_index();
let field_data = expr::data::Data::from_front(cxt, e_idx, v_idx, d_idx, field)?;
let _d_idx = data.push(field_data);
debug_assert_eq!(d_idx, _d_idx)
}
}
let zip_handler_id = gen::fun::variant_handler(cxt[e_idx].e_id(), &src.ident);
let zipper_go_up_id = gen::fun::go_up(cxt[e_idx].e_id(), &variant.ident);
Ok(Self {
e_idx,
v_idx,
data,
src,
zip_handler_id,
zipper_go_up_id,
})
}
pub fn is_struct_like(&self) -> Option<bool> {
match &self.src.fields {
syn::Fields::Named(_) => Some(true),
syn::Fields::Unnamed(_) => Some(false),
syn::Fields::Unit => None,
}
}
// pub fn frame_variants(&self) -> &idx::DataMap<Option<expr::Frame>> {
// &self.frames
// }
// pub fn has_frame_variants(&self) -> bool {
// self.frames.iter().any(Option::is_some)
// }
pub fn zip_handler_id(&self) -> &rust::Id {
&self.zip_handler_id
}
pub fn zipper_go_up_id(&self) -> &rust::Id {
&self.zipper_go_up_id
}
pub fn log(&self, pref: &str, trailing_comma: bool) {
let (open, close) = match self.is_struct_like() {
None => (None, None),
Some(true) => (Some(" {"), Some("}")),
Some(false) => (Some(" ("), Some(")")),
};
logln!("{}{}{}", pref, self.v_id(), open.unwrap_or(""));
for data in &self.data {
logln!("{} {},", pref, data)
}
if let Some(close) = close {
logln!("{}{}{}", pref, close, if trailing_comma { "," } else { "" })
}
}
}
/// # Expr-enum codegen functions.
impl Variant {
fn to_fields_tokens(
&self,
stream: &mut TokenStream,
fields: &syn::punctuated::Punctuated<rust::Field, rust::token::Comma>,
) {
debug_assert_eq!(fields.len(), self.data.len());
let mut d_idx = idx::Data::zero();
gen::punct::do_with(fields, |punct_opt| {
self.data[d_idx].to_expr_data_tokens(stream);
d_idx.inc();
if let Some(punct) = punct_opt {
punct.to_tokens(stream)
}
})
}
pub fn to_expr_variant_tokens(&self, stream: &mut TokenStream) {
stream.append_all(&self.src.attrs);
self.src.ident.to_tokens(stream);
use syn::Fields::*;
match &self.src.fields {
Named(fields) => fields.brace_token.surround(stream, |stream| {
self.to_fields_tokens(stream, &fields.named)
}),
Unnamed(fields) => fields.paren_token.surround(stream, |stream| {
self.to_fields_tokens(stream, &fields.unnamed)
}),
Unit => (),
}
if let Some((eq_token, disc)) = &self.src.discriminant {
eq_token.to_tokens(stream);
disc.to_tokens(stream);
}
}
pub fn to_constructor_tokens(&self) -> TokenStream {
let id = self.v_id();
let data = self.data.iter().map(|data| data.param_id());
if self.is_struct_like().unwrap_or(false) {
quote! {
#id { #(#data ,)* }
}
} else {
quote! {
#id ( #(#data ,)* )
}
}
}
}
/// # Expr zipper struct codgen functions
impl Variant {
pub fn zip_produce_final_res(&self, cxt: &cxt::ZipCxt) -> TokenStream {
let zip_field = &cxt.zip_ids().self_step_field();
let go_up = &self.zipper_go_up_id;
let data_params = self.data.iter().map(expr::data::Data::param_id);
let empty_convert = cxt.lib_gen().zip_do_empty_convert();
quote! {
#zip_field . #go_up (
#( #data_params , )*
) . #empty_convert ()
}
}
/// Builds the next frame for some data index.
pub fn zip_handle_variant_from(
&self,
cxt: &cxt::ZipCxt,
is_own: bool,
d_idx: idx::Data,
) -> TokenStream {
self.data[d_idx].zip_handle_variant_data(
cxt,
is_own,
|input| cxt.lib_gen().zip_do_new_go_down(input),
|| {
let mut d_idx = d_idx;
d_idx.inc();
if d_idx < self.data.len() {
self.zip_handle_variant_from(cxt, is_own, d_idx)
} else {
self.zip_produce_final_res(cxt)
}
},
)
}
pub fn to_zip_handler_fn_tokens(&self, cxt: &cxt::ZipCxt, is_own: bool) -> TokenStream {
let e_cxt = &cxt[self.e_idx];
let fun_id = &self.zip_handler_id;
let out_typ = e_cxt.zip_variant_handler_out_typ(cxt, is_own);
let data_params = self.data.iter().map(|data| {
let param_id = data.param_id();
let typ = data.frame_typ(cxt, is_own);
quote! {
#param_id: #typ
}
});
let handle_variant = self.zip_handle_variant_from(cxt, is_own, idx::Data::zero());
let vis = cxt.conf().secret_item_vis();
quote! {
#vis fn #fun_id (
&mut self,
#( #data_params , )*
) -> #out_typ {
#handle_variant
}
}
}
}
| 28.238095 | 96 | 0.509696 |
ac0fa52c9707096133188f10f75feb10c3888612 | 9,389 | // This file is part of Substrate.
// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Benchmarks for Proxy Pallet
#![cfg(feature = "runtime-benchmarks")]
use super::*;
use frame_system::{RawOrigin, EventRecord};
use frame_benchmarking::{benchmarks, account, whitelisted_caller};
use sp_runtime::traits::Bounded;
use crate::Module as Proxy;
const SEED: u32 = 0;
fn assert_last_event<T: Config>(generic_event: <T as Config>::Event) {
let events = frame_system::Module::<T>::events();
let system_event: <T as frame_system::Config>::Event = generic_event.into();
// compare to the last event record
let EventRecord { event, .. } = &events[events.len() - 1];
assert_eq!(event, &system_event);
}
fn add_proxies<T: Config>(n: u32, maybe_who: Option<T::AccountId>) -> Result<(), &'static str> {
let caller = maybe_who.unwrap_or_else(|| whitelisted_caller());
T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
for i in 0..n {
Proxy::<T>::add_proxy(
RawOrigin::Signed(caller.clone()).into(),
account("target", i, SEED),
T::ProxyType::default(),
T::BlockNumber::zero(),
)?;
}
Ok(())
}
fn add_announcements<T: Config>(
n: u32,
maybe_who: Option<T::AccountId>,
maybe_real: Option<T::AccountId>
) -> Result<(), &'static str> {
let caller = maybe_who.unwrap_or_else(|| account("caller", 0, SEED));
T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
let real = if let Some(real) = maybe_real {
real
} else {
let real = account("real", 0, SEED);
T::Currency::make_free_balance_be(&real, BalanceOf::<T>::max_value());
Proxy::<T>::add_proxy(
RawOrigin::Signed(real.clone()).into(),
caller.clone(),
T::ProxyType::default(),
T::BlockNumber::zero(),
)?;
real
};
for _ in 0..n {
Proxy::<T>::announce(
RawOrigin::Signed(caller.clone()).into(),
real.clone(),
T::CallHasher::hash_of(&("add_announcement", n)),
)?;
}
Ok(())
}
benchmarks! {
_ {
let p in 1 .. (T::MaxProxies::get() - 1).into() => add_proxies::<T>(p, None)?;
}
proxy {
let p in ...;
// In this case the caller is the "target" proxy
let caller: T::AccountId = account("target", p - 1, SEED);
T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
// ... and "real" is the traditional caller. This is not a typo.
let real: T::AccountId = whitelisted_caller();
let call: <T as Config>::Call = frame_system::Call::<T>::remark(vec![]).into();
}: _(RawOrigin::Signed(caller), real, Some(T::ProxyType::default()), Box::new(call))
verify {
assert_last_event::<T>(RawEvent::ProxyExecuted(Ok(())).into())
}
proxy_announced {
let a in 0 .. T::MaxPending::get() - 1;
let p in ...;
// In this case the caller is the "target" proxy
let caller: T::AccountId = account("anonymous", 0, SEED);
let delegate: T::AccountId = account("target", p - 1, SEED);
T::Currency::make_free_balance_be(&delegate, BalanceOf::<T>::max_value());
// ... and "real" is the traditional caller. This is not a typo.
let real: T::AccountId = whitelisted_caller();
let call: <T as Config>::Call = frame_system::Call::<T>::remark(vec![]).into();
Proxy::<T>::announce(
RawOrigin::Signed(delegate.clone()).into(),
real.clone(),
T::CallHasher::hash_of(&call),
)?;
add_announcements::<T>(a, Some(delegate.clone()), None)?;
}: _(RawOrigin::Signed(caller), delegate, real, Some(T::ProxyType::default()), Box::new(call))
verify {
assert_last_event::<T>(RawEvent::ProxyExecuted(Ok(())).into())
}
remove_announcement {
let a in 0 .. T::MaxPending::get() - 1;
let p in ...;
// In this case the caller is the "target" proxy
let caller: T::AccountId = account("target", p - 1, SEED);
T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
// ... and "real" is the traditional caller. This is not a typo.
let real: T::AccountId = whitelisted_caller();
let call: <T as Config>::Call = frame_system::Call::<T>::remark(vec![]).into();
Proxy::<T>::announce(
RawOrigin::Signed(caller.clone()).into(),
real.clone(),
T::CallHasher::hash_of(&call),
)?;
add_announcements::<T>(a, Some(caller.clone()), None)?;
}: _(RawOrigin::Signed(caller.clone()), real, T::CallHasher::hash_of(&call))
verify {
let (announcements, _) = Announcements::<T>::get(&caller);
assert_eq!(announcements.len() as u32, a);
}
reject_announcement {
let a in 0 .. T::MaxPending::get() - 1;
let p in ...;
// In this case the caller is the "target" proxy
let caller: T::AccountId = account("target", p - 1, SEED);
T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
// ... and "real" is the traditional caller. This is not a typo.
let real: T::AccountId = whitelisted_caller();
let call: <T as Config>::Call = frame_system::Call::<T>::remark(vec![]).into();
Proxy::<T>::announce(
RawOrigin::Signed(caller.clone()).into(),
real.clone(),
T::CallHasher::hash_of(&call),
)?;
add_announcements::<T>(a, Some(caller.clone()), None)?;
}: _(RawOrigin::Signed(real), caller.clone(), T::CallHasher::hash_of(&call))
verify {
let (announcements, _) = Announcements::<T>::get(&caller);
assert_eq!(announcements.len() as u32, a);
}
announce {
let a in 0 .. T::MaxPending::get() - 1;
let p in ...;
// In this case the caller is the "target" proxy
let caller: T::AccountId = account("target", p - 1, SEED);
T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
// ... and "real" is the traditional caller. This is not a typo.
let real: T::AccountId = whitelisted_caller();
add_announcements::<T>(a, Some(caller.clone()), None)?;
let call: <T as Config>::Call = frame_system::Call::<T>::remark(vec![]).into();
let call_hash = T::CallHasher::hash_of(&call);
}: _(RawOrigin::Signed(caller.clone()), real.clone(), call_hash)
verify {
assert_last_event::<T>(RawEvent::Announced(real, caller, call_hash).into());
}
add_proxy {
let p in ...;
let caller: T::AccountId = whitelisted_caller();
}: _(
RawOrigin::Signed(caller.clone()),
account("target", T::MaxProxies::get().into(), SEED),
T::ProxyType::default(),
T::BlockNumber::zero()
)
verify {
let (proxies, _) = Proxies::<T>::get(caller);
assert_eq!(proxies.len() as u32, p + 1);
}
remove_proxy {
let p in ...;
let caller: T::AccountId = whitelisted_caller();
}: _(
RawOrigin::Signed(caller.clone()),
account("target", 0, SEED),
T::ProxyType::default(),
T::BlockNumber::zero()
)
verify {
let (proxies, _) = Proxies::<T>::get(caller);
assert_eq!(proxies.len() as u32, p - 1);
}
remove_proxies {
let p in ...;
let caller: T::AccountId = whitelisted_caller();
}: _(RawOrigin::Signed(caller.clone()))
verify {
let (proxies, _) = Proxies::<T>::get(caller);
assert_eq!(proxies.len() as u32, 0);
}
anonymous {
let p in ...;
let caller: T::AccountId = whitelisted_caller();
}: _(
RawOrigin::Signed(caller.clone()),
T::ProxyType::default(),
T::BlockNumber::zero(),
0
)
verify {
let anon_account = Module::<T>::anonymous_account(&caller, &T::ProxyType::default(), 0, None);
assert_last_event::<T>(RawEvent::AnonymousCreated(
anon_account,
caller,
T::ProxyType::default(),
0,
).into());
}
kill_anonymous {
let p in 0 .. (T::MaxProxies::get() - 2).into();
let caller: T::AccountId = whitelisted_caller();
T::Currency::make_free_balance_be(&caller, BalanceOf::<T>::max_value());
Module::<T>::anonymous(
RawOrigin::Signed(whitelisted_caller()).into(),
T::ProxyType::default(),
T::BlockNumber::zero(),
0
)?;
let height = system::Module::<T>::block_number();
let ext_index = system::Module::<T>::extrinsic_index().unwrap_or(0);
let anon = Module::<T>::anonymous_account(&caller, &T::ProxyType::default(), 0, None);
add_proxies::<T>(p, Some(anon.clone()))?;
ensure!(Proxies::<T>::contains_key(&anon), "anon proxy not created");
}: _(RawOrigin::Signed(anon.clone()), caller.clone(), T::ProxyType::default(), 0, height, ext_index)
verify {
assert!(!Proxies::<T>::contains_key(&anon));
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::{new_test_ext, Test};
use frame_support::assert_ok;
#[test]
fn test_benchmarks() {
new_test_ext().execute_with(|| {
assert_ok!(test_benchmark_proxy::<Test>());
assert_ok!(test_benchmark_proxy_announced::<Test>());
assert_ok!(test_benchmark_remove_announcement::<Test>());
assert_ok!(test_benchmark_reject_announcement::<Test>());
assert_ok!(test_benchmark_announce::<Test>());
assert_ok!(test_benchmark_add_proxy::<Test>());
assert_ok!(test_benchmark_remove_proxy::<Test>());
assert_ok!(test_benchmark_remove_proxies::<Test>());
assert_ok!(test_benchmark_anonymous::<Test>());
assert_ok!(test_benchmark_kill_anonymous::<Test>());
});
}
}
| 33.532143 | 101 | 0.657472 |
f92919a2b3a1172c7b158932d329601a0b0b1699 | 344 | mod entity_client;
mod partition_key_client;
mod table_client;
mod table_service_client;
pub use entity_client::{AsEntityClient, EntityClient};
pub use partition_key_client::{AsPartitionKeyClient, PartitionKeyClient};
pub use table_client::{AsTableClient, TableClient};
pub use table_service_client::{AsTableServiceClient, TableServiceClient};
| 38.222222 | 73 | 0.848837 |
0ee2221bbc14b9658c9f5010c94be25821b54f7b | 780 | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Regression test for issue #25436: check that things which can be
// followed by any token also permit X* to come afterwards.
macro_rules! foo {
( $a:expr $($b:tt)* ) => { }; //~ ERROR not allowed for `expr` fragments
( $a:ty $($b:tt)* ) => { }; //~ ERROR not allowed for `ty` fragments
}
fn main() { }
| 39 | 74 | 0.691026 |
11c8d57b9314592daab61d154b8d3c641da33aa4 | 9,263 | use super::{ItemAccess, KeyPartialOrd, Offset, Storage, StorageMut};
use crate::util::binary_search_min;
use std::marker::PhantomData;
/// Internal node reference.
pub trait InternalRef<S: Storage>: ItemAccess<S> {
/// Returns the identifer of the parent node, if any.
fn parent(&self) -> Option<usize>;
/// Find the offset of the item matching the given key.
///
/// If the key matches no item in this node,
/// this funtion returns the index and id of the child that may match the key.
#[inline]
fn offset_of<Q: ?Sized>(&self, key: &Q) -> Result<Offset, (usize, usize)>
where
S: KeyPartialOrd<Q>,
{
match binary_search_min(self, key) {
Some((i, eq)) => {
if eq {
Ok(i)
} else {
let child_index = 1usize + i.unwrap();
let id = self.child_id(child_index).unwrap();
Err((child_index, id))
}
}
None => Err((0, self.child_id(0).unwrap())),
}
}
/// Returns the id of the child with the given index, if any.
fn child_id(&self, index: usize) -> Option<usize>;
#[inline]
fn child_count(&self) -> usize {
self.item_count() + 1usize
}
/// Returns the index of the child with the given id, if any.
#[inline]
fn child_index(&self, id: usize) -> Option<usize> {
let child_count = self.item_count() + 1usize;
for i in 0..child_count {
if self.child_id(i).unwrap() == id {
return Some(i);
}
}
None
}
fn children(&self) -> Children<S, Self> {
Children {
node: self,
index: 0,
storage: PhantomData,
}
}
fn items(&self) -> Items<S, Self> {
Items {
node: self,
offset: 0.into(),
storage: PhantomData,
}
}
/// Returns the maximum capacity of this node.
///
/// Must be at least 6 for internal nodes, and 7 for leaf nodes.
///
/// The node is considered overflowing if it contains `max_capacity` items.
fn max_capacity(&self) -> usize;
/// Returns the minimum capacity of this node.
///
/// The node is considered underflowing if it contains less items than this value.
#[inline]
fn min_capacity(&self) -> usize {
self.max_capacity() / 2 - 1
}
/// Checks if the node is overflowing.
///
/// For an internal node, this is when it contains `max_capacity` items.
/// For a leaf node, this is when it contains `max_capacity + 1` items.
#[inline]
fn is_overflowing(&self) -> bool {
self.item_count() >= self.max_capacity()
}
/// Checks if the node is underflowing.
#[inline]
fn is_underflowing(&self) -> bool {
self.item_count() < self.min_capacity()
}
}
/// Immutable internal node reference.
///
/// Since an immutable reference is also a reference,
/// implementing this trait requires implementing the
/// [`InternalRef`] trait.
pub trait InternalConst<'a, S: 'a + Storage>: InternalRef<S> {
fn item(&self, offset: Offset) -> Option<S::ItemRef<'a>>;
#[inline]
fn get<Q: ?Sized>(&self, key: &Q) -> Result<S::ItemRef<'a>, usize>
where
for<'r> S: KeyPartialOrd<Q>,
{
match binary_search_min(self, key) {
Some((i, eq)) => {
let item = self.item(i).unwrap();
if eq {
Ok(item)
} else {
Err(self.child_id(1usize + i.unwrap()).unwrap())
}
}
_ => Err(self.child_id(0).unwrap()),
}
}
#[inline]
fn separators(&self, index: usize) -> (Option<S::ItemRef<'a>>, Option<S::ItemRef<'a>>) {
let min = if index > 0 {
self.item((index - 1).into())
} else {
None
};
let max = if index < self.child_count() {
self.item(index.into())
} else {
None
};
(min, max)
}
}
/// Mutable internal node reference.
///
/// Since a mutable reference is also a reference,
/// implementing this trait requires implementing the
/// [`InternalRef`] trait.
pub trait InternalMut<'a, S: 'a + StorageMut>: Sized + InternalRef<S> {
/// Sets the identifier of the parent node.
fn set_parent(&mut self, parent: Option<usize>);
/// Sets the identifier of the first child node.
fn set_first_child_id(&mut self, id: usize);
/// Returns a mutable reference to the item with the given offset in the node.
fn into_item_mut(self, offset: Offset) -> Option<S::ItemMut<'a>>;
/// Inserts an item at the given offset in the node,
/// separated with the next item by the given child node.
fn insert(&mut self, offset: Offset, item: S::Item, right_child_id: usize);
/// Removes the item at the given offset and the reference to its right child.
///
/// Returns the item and the identifier of the right child.
fn remove(&mut self, offset: Offset) -> (S::Item, usize);
/// Replaces the item at the given offset.
///
/// Returns the old item.
fn replace(&mut self, offset: Offset, item: S::Item) -> S::Item;
/// Appends the separator and all the branches of `other` into this node.
///
/// Returns the offset of the separator.
fn append(&mut self, separator: S::Item, other: S::InternalNode) -> Offset;
/// Returns a mutable reference to the item matching the given key in this node.
///
/// If no item in the node matches the given key,
/// returns the id of the child node that may contain such item.
#[inline]
fn get_mut<Q: ?Sized>(self, key: &Q) -> Result<S::ItemMut<'a>, usize>
where
S: KeyPartialOrd<Q>,
{
match binary_search_min(&self, key) {
Some((i, eq)) => {
let child_id = self.child_id(1usize + i.unwrap());
let item = self.into_item_mut(i).unwrap();
if eq {
Ok(item)
} else {
Err(child_id.unwrap())
}
}
_ => Err(self.child_id(0).unwrap()),
}
}
/// Split the node.
///
/// Returns a tuple (len, item, node) where
/// `len` is the number of item left in the node,
/// `item` is the pivot item around which the node has been split and
/// `node` an a new internal node containing all the items
/// removed from the right of the pivot item.
#[inline]
fn split(&mut self) -> (usize, S::Item, S::InternalNode) {
use crate::btree::node::buffer::Internal;
assert!(self.is_overflowing()); // implies self.other_children.len() >= 4
// Index of the median-key item in `other_children`.
let median_i = (self.item_count() - 1) / 2; // Since the knuth-order is at least 3, `median_i` is at least 1.
// Put all the branches on the right of the median pivot in `right_branches`.
let right_len = self.item_count() - median_i - 1;
let mut right_branches = Vec::new(); // Note: branches are stored in reverse order.
for i in 0..right_len {
let offset: Offset = (median_i + right_len - i).into();
let (item, right_child_id) = self.remove(offset);
right_branches.push((item, right_child_id));
}
let mut right_node = S::InternalNode::default();
right_node.set_parent(self.parent());
// Remove the median pivot.
let (median_item, median_right_child) = self.remove(median_i.into());
right_node.set_first_child_id(median_right_child);
// Move the right branches to the other node.
for (item, child_id) in right_branches.into_iter().rev() {
right_node.push_right(item, child_id);
}
assert!(!self.is_underflowing());
// assert!(!right_node.is_underflowing());
(self.item_count(), median_item, right_node)
}
}
pub struct Children<'b, S, R: ?Sized> {
node: &'b R,
index: usize,
storage: PhantomData<S>,
}
impl<'b, S: Storage, R: InternalRef<S>> Iterator for Children<'b, S, R> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
if self.index < self.node.child_count() {
let i = self.index;
self.index += 1;
self.node.child_id(i)
} else {
None
}
}
}
pub struct Items<'b, S, R: ?Sized> {
node: &'b R,
offset: Offset,
storage: PhantomData<S>,
}
impl<'b, S: 'b + Storage, R: InternalRef<S>> Iterator for Items<'b, S, R> {
type Item = (usize, S::ItemRef<'b>, usize);
fn next(&mut self) -> Option<Self::Item> {
if self.offset < self.node.item_count() {
let offset = self.offset;
self.offset = offset + 1;
let left_child_id = self.node.child_id(offset.unwrap()).unwrap();
let right_child_id = self.node.child_id(offset.unwrap() + 1).unwrap();
let item = self.node.borrow_item(offset).unwrap();
Some((left_child_id, item, right_child_id))
} else {
None
}
}
}
| 32.275261 | 117 | 0.562453 |
2800ce4b0031dc150705eadf81e93526ffdacc24 | 32,683 | #![no_std]
extern crate alloc;
use alloc::{boxed::Box, string::String, vec, vec::Vec};
use core::iter::{self, FromIterator};
use rand::{distributions::Alphanumeric, rngs::SmallRng, Rng, SeedableRng};
use casper_contract::{
contract_api::{account, runtime, storage, system},
unwrap_or_revert::UnwrapOrRevert,
};
use casper_types::{
account::{AccountHash, ActionType, Weight},
bytesrepr::Bytes,
contracts::NamedKeys,
runtime_args, ApiError, BlockTime, CLType, CLValue, ContractHash, ContractVersion, EntryPoint,
EntryPointAccess, EntryPointType, EntryPoints, Key, Parameter, Phase, RuntimeArgs, U512,
};
const MIN_FUNCTION_NAME_LENGTH: usize = 1;
const MAX_FUNCTION_NAME_LENGTH: usize = 100;
const NAMED_KEY_COUNT: usize = 100;
const MIN_NAMED_KEY_NAME_LENGTH: usize = 10;
// TODO - consider increasing to e.g. 1_000 once https://casperlabs.atlassian.net/browse/EE-966 is
// resolved.
const MAX_NAMED_KEY_NAME_LENGTH: usize = 100;
const VALUE_FOR_ADDITION_1: u64 = 1;
const VALUE_FOR_ADDITION_2: u64 = 2;
const TRANSFER_AMOUNT: u64 = 1_000_000;
const ARG_SEED: &str = "seed";
const ARG_OTHERS: &str = "others";
const ARG_BYTES: &str = "bytes";
#[repr(u16)]
enum Error {
GetCaller = 0,
GetBlockTime = 1,
GetPhase = 2,
HasKey = 3,
GetKey = 4,
NamedKeys = 5,
ReadOrRevert = 6,
IsValidURef = 7,
Transfer = 8,
Revert = 9,
}
impl From<Error> for ApiError {
fn from(error: Error) -> ApiError {
ApiError::User(error as u16)
}
}
fn create_random_names(rng: &mut SmallRng) -> impl Iterator<Item = String> + '_ {
iter::repeat_with(move || {
let key_length: usize = rng.gen_range(MIN_NAMED_KEY_NAME_LENGTH, MAX_NAMED_KEY_NAME_LENGTH);
rng.sample_iter(&Alphanumeric)
.take(key_length)
.collect::<String>()
})
.take(NAMED_KEY_COUNT)
}
fn truncate_named_keys(named_keys: NamedKeys, rng: &mut SmallRng) -> NamedKeys {
let truncated_len = rng.gen_range(1, named_keys.len() + 1);
let mut vec = named_keys.into_iter().collect::<Vec<_>>();
vec.truncate(truncated_len);
vec.into_iter().collect()
}
// Executes the named key functions from the `runtime` module and most of the functions from the
// `storage` module.
fn large_function() {
let seed: u64 = runtime::get_named_arg(ARG_SEED);
let random_bytes: Bytes = runtime::get_named_arg(ARG_BYTES);
let uref = storage::new_uref(random_bytes.clone());
let mut rng = SmallRng::seed_from_u64(seed);
let mut key_name = String::new();
for random_name in create_random_names(&mut rng) {
key_name = random_name;
runtime::put_key(&key_name, Key::from(uref));
}
if !runtime::has_key(&key_name) {
runtime::revert(Error::HasKey);
}
if runtime::get_key(&key_name) != Some(Key::from(uref)) {
runtime::revert(Error::GetKey);
}
runtime::remove_key(&key_name);
let named_keys = runtime::list_named_keys();
if named_keys.len() != NAMED_KEY_COUNT - 1 {
runtime::revert(Error::NamedKeys)
}
storage::write(uref, random_bytes.clone());
let retrieved_value: Bytes = storage::read_or_revert(uref);
if retrieved_value != random_bytes {
runtime::revert(Error::ReadOrRevert);
}
storage::write(uref, VALUE_FOR_ADDITION_1);
storage::add(uref, VALUE_FOR_ADDITION_2);
let keys_to_return = truncate_named_keys(named_keys, &mut rng);
runtime::ret(CLValue::from_t(keys_to_return).unwrap_or_revert());
}
fn small_function() {
if runtime::get_phase() != Phase::Session {
runtime::revert(Error::GetPhase);
}
}
#[no_mangle]
pub extern "C" fn call() {
let seed: u64 = runtime::get_named_arg(ARG_SEED);
let (random_bytes, source_account, destination_account): (Vec<u8>, AccountHash, AccountHash) =
runtime::get_named_arg(ARG_OTHERS);
// ========== storage, execution and upgrading of contracts ====================================
// Store large function with no named keys, then execute it to get named keys returned.
let mut rng = SmallRng::seed_from_u64(seed);
let large_function_name = String::from_iter(
iter::repeat('l')
.take(rng.gen_range(MIN_FUNCTION_NAME_LENGTH, MAX_FUNCTION_NAME_LENGTH + 1)),
);
let entry_point_name = &large_function_name;
let runtime_args = runtime_args! {
ARG_SEED => seed,
ARG_BYTES => random_bytes.clone()
};
let (contract_hash, _contract_version) = store_function(entry_point_name, None);
let named_keys: NamedKeys =
runtime::call_contract(contract_hash, entry_point_name, runtime_args.clone());
let (contract_hash, _contract_version) =
store_function(entry_point_name, Some(named_keys.clone()));
// Store large function with 10 named keys, then execute it.
runtime::call_contract::<NamedKeys>(contract_hash, entry_point_name, runtime_args);
// Small function
let small_function_name = String::from_iter(
iter::repeat('s')
.take(rng.gen_range(MIN_FUNCTION_NAME_LENGTH, MAX_FUNCTION_NAME_LENGTH + 1)),
);
let entry_point_name = &small_function_name;
let runtime_args = runtime_args! {};
// Store small function with no named keys, then execute it.
let (contract_hash, _contract_version) =
store_function(entry_point_name, Some(NamedKeys::new()));
runtime::call_contract::<()>(contract_hash, entry_point_name, runtime_args.clone());
let (contract_hash, _contract_version) = store_function(entry_point_name, Some(named_keys));
// Store small function with 10 named keys, then execute it.
runtime::call_contract::<()>(contract_hash, entry_point_name, runtime_args);
// ========== functions from `account` module ==================================================
let main_purse = account::get_main_purse();
account::set_action_threshold(ActionType::Deployment, Weight::new(1)).unwrap_or_revert();
account::add_associated_key(destination_account, Weight::new(1)).unwrap_or_revert();
account::update_associated_key(destination_account, Weight::new(1)).unwrap_or_revert();
account::remove_associated_key(destination_account).unwrap_or_revert();
// ========== functions from `system` module ===================================================
let _ = system::get_mint();
let new_purse = system::create_purse();
let transfer_amount = U512::from(TRANSFER_AMOUNT);
system::transfer_from_purse_to_purse(main_purse, new_purse, transfer_amount, None)
.unwrap_or_revert();
let balance = system::get_purse_balance(new_purse).unwrap_or_revert();
if balance != transfer_amount {
runtime::revert(Error::Transfer);
}
system::transfer_from_purse_to_account(new_purse, destination_account, transfer_amount, None)
.unwrap_or_revert();
system::transfer_to_account(destination_account, transfer_amount, None).unwrap_or_revert();
// ========== remaining functions from `runtime` module ========================================
if !runtime::is_valid_uref(main_purse) {
runtime::revert(Error::IsValidURef);
}
if runtime::get_blocktime() != BlockTime::new(0) {
runtime::revert(Error::GetBlockTime);
}
if runtime::get_caller() != source_account {
runtime::revert(Error::GetCaller);
}
runtime::print(&String::from_utf8_lossy(&random_bytes));
runtime::revert(Error::Revert);
}
fn store_function(
entry_point_name: &str,
named_keys: Option<NamedKeys>,
) -> (ContractHash, ContractVersion) {
let entry_points = {
let mut entry_points = EntryPoints::new();
let entry_point = EntryPoint::new(
entry_point_name,
vec![
Parameter::new(ARG_SEED, CLType::U64),
Parameter::new(ARG_BYTES, CLType::List(Box::new(CLType::U8))),
],
CLType::Unit,
EntryPointAccess::Public,
EntryPointType::Contract,
);
entry_points.add_entry_point(entry_point);
entry_points
};
storage::new_contract(entry_points, named_keys, None, None)
}
#[rustfmt::skip] #[no_mangle] pub extern "C" fn s() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn sss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn sssssssssssssssssssssssssss() { small_function()
}
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C"
fn ssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern
"C" fn sssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub
extern "C" fn ssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub
extern "C" fn ssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub
extern "C" fn ssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle]
pub extern "C" fn ssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::
skip] #[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn sssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::
skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::
skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::
skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::
skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::
skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function()
}
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() { small_function()
}
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss() {
small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss()
{ small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss()
{ small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss()
{ small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss()
{ small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss()
{ small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss()
{ small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss()
{ small_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn l() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn ll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn lll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn lllllllllllllllllllllllllll() { large_function()
}
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C"
fn llllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern
"C" fn lllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub
extern "C" fn llllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub
extern "C" fn llllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub
extern "C" fn llllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn llllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle]
pub extern "C" fn llllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::
skip] #[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip]
#[no_mangle] pub extern "C" fn lllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::
skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::
skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::
skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::
skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::
skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function()
}
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() { large_function()
}
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll() {
large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll()
{ large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll()
{ large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll()
{ large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll()
{ large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll()
{ large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
lllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll()
{ large_function() }
#[rustfmt::skip] #[no_mangle] pub extern "C" fn
llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll()
{ large_function() }
| 48.853513 | 102 | 0.75706 |
33b2f355e086e679350e27c944681ae2fd2c5a54 | 1,920 | #[doc(hidden)]
#[allow(missing_debug_implementations)]
pub struct AppMeta<'b> {
pub name: String,
pub bin_name: Option<String>,
pub author: Option<&'b str>,
pub version: Option<&'b str>,
pub about: Option<&'b str>,
pub more_help: Option<&'b str>,
pub pre_help: Option<&'b str>,
pub aliases: Option<Vec<(&'b str, bool)>>, // (name, visible)
pub usage_str: Option<&'b str>,
pub usage: Option<String>,
pub help_str: Option<&'b str>,
pub disp_ord: usize,
pub term_w: Option<usize>,
pub max_w: Option<usize>,
pub template: Option<&'b str>,
}
impl<'b> Default for AppMeta<'b> {
fn default() -> Self {
AppMeta {
name: String::new(),
author: None,
about: None,
more_help: None,
pre_help: None,
version: None,
usage_str: None,
usage: None,
bin_name: None,
help_str: None,
disp_ord: 999,
template: None,
aliases: None,
term_w: None,
max_w: None,
}
}
}
impl<'b> AppMeta<'b> {
pub fn new() -> Self { Default::default() }
pub fn with_name(s: String) -> Self { AppMeta { name: s, ..Default::default() } }
}
impl<'b> Clone for AppMeta<'b> {
fn clone(&self) -> Self {
AppMeta {
name: self.name.clone(),
author: self.author,
about: self.about,
more_help: self.more_help,
pre_help: self.pre_help,
version: self.version,
usage_str: self.usage_str,
usage: self.usage.clone(),
bin_name: self.bin_name.clone(),
help_str: self.help_str,
disp_ord: self.disp_ord,
template: self.template,
aliases: self.aliases.clone(),
term_w: self.term_w,
max_w: self.max_w,
}
}
}
| 27.826087 | 85 | 0.521875 |
618ddd2384a83bc6304fb1bd4f5ff40fb40b9616 | 1,986 | use graph::blockchain;
use graph::blockchain::Block;
use graph::blockchain::TriggerData;
use graph::cheap_clone::CheapClone;
use graph::prelude::hex;
use graph::prelude::web3::types::H256;
use graph::prelude::BlockNumber;
use graph::runtime::asc_new;
use graph::runtime::AscHeap;
use graph::runtime::AscPtr;
use graph::runtime::DeterministicHostError;
use std::{cmp::Ordering, sync::Arc};
use crate::codec::CryptoHash;
use crate::codec::{NeoBlock, RpcNotifyEventArgs as Event,RpcApplicationLog};
#[derive(Clone)]
pub enum NeoTrigger {
Block(Arc<NeoBlock>),
Receipt(Arc<RpcApplicationLog>),
}
impl CheapClone for NeoTrigger {
fn cheap_clone(&self) -> NeoTrigger {
match self {
NeoTrigger::Block(block) => NeoTrigger::Block(block.cheap_clone()),
NeoTrigger::Receipt(receipt) => NeoTrigger::Receipt(receipt.cheap_clone()),
}
}
}
impl PartialEq for NeoTrigger {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Self::Block(a), Self::Block(b)) => a == b,
(Self::Receipt(a), Self::Receipt(b)) => a.txid == b.txid,
(Self::Block(_), Self::Receipt(_)) | (Self::Receipt(_), Self::Block(_)) => false,
}
}
}
impl NeoTrigger {
pub fn block_hash(&self) -> CryptoHash {
match self {
NeoTrigger::Block(block) => block.header.unwrap().hash.unwrap(),
NeoTrigger::Receipt(receipt) => receipt.blockhash.unwrap()
}
}
}
impl TriggerData for NeoTrigger {
fn error_context(&self) -> std::string::String {
match self {
NeoTrigger::Block(block) => {
format!("Block ({:?})", block.header.unwrap().hash.unwrap() )
}
NeoTrigger::Receipt(receipt) => {
format!(
"receipt id {:?}, block ({:?})",
receipt.txid,
receipt.blockhash
)
}
}
}
} | 27.971831 | 93 | 0.573011 |
ddd42adf845875b0bccc35e778db6d7c487301d5 | 1,315 | // This file was generated by gir (d50d839) from gir-files (469db10)
// DO NOT EDIT
use MimeInfo;
use ffi;
use glib::object::IsA;
use glib::translate::*;
use glib_ffi;
use gobject_ffi;
use std::mem;
use std::ptr;
glib_wrapper! {
pub struct Plugin(Object<ffi::WebKitPlugin, ffi::WebKitPluginClass>);
match fn {
get_type => || ffi::webkit_plugin_get_type(),
}
}
pub trait PluginExt {
fn get_description(&self) -> Option<String>;
fn get_mime_info_list(&self) -> Vec<MimeInfo>;
fn get_name(&self) -> Option<String>;
fn get_path(&self) -> Option<String>;
}
impl<O: IsA<Plugin>> PluginExt for O {
fn get_description(&self) -> Option<String> {
unsafe {
from_glib_none(ffi::webkit_plugin_get_description(self.to_glib_none().0))
}
}
fn get_mime_info_list(&self) -> Vec<MimeInfo> {
unsafe {
FromGlibPtrContainer::from_glib_none(ffi::webkit_plugin_get_mime_info_list(self.to_glib_none().0))
}
}
fn get_name(&self) -> Option<String> {
unsafe {
from_glib_none(ffi::webkit_plugin_get_name(self.to_glib_none().0))
}
}
fn get_path(&self) -> Option<String> {
unsafe {
from_glib_none(ffi::webkit_plugin_get_path(self.to_glib_none().0))
}
}
}
| 23.482143 | 110 | 0.630418 |
1aa43bfac0f68e11d6abe0e157ca4e2762348bbd | 707 | use anyhow::Result;
use structopt::StructOpt;
use tokio::net::TcpListener;
use toy_storage::{api::Server, storage::inmemory};
use tracing::info;
#[derive(StructOpt)]
struct Opts {
#[structopt(short, long, default_value = "127.0.0.1:8080")]
address: String,
}
#[tokio::main]
async fn main() -> Result<()> {
init_logger();
let opts = Opts::from_args();
run_with(opts).await
}
async fn run_with(opts: Opts) -> Result<()> {
info!("listening at {}", opts.address);
let listener = TcpListener::bind(opts.address).await?;
let store = inmemory::start();
Server::new(listener, store).start().await;
Ok(())
}
fn init_logger() {
tracing_subscriber::fmt().init()
}
| 19.108108 | 63 | 0.640736 |
d66fdf2614047878bca5c676fa223c2d720156b0 | 892 | use std::ops::Range;
fn greatest_subsequential_sum(nums: &[i32]) -> (i32, Range<usize>) {
let mut max = 0;
let mut boundaries = 0..0;
for length in 0..nums.len() {
for start in 0..nums.len() - length {
let sum = (&nums[start..start + length]).iter().sum();
if sum > max {
max = sum;
boundaries = start..start + length;
}
}
}
(max, boundaries)
}
fn main() {
let nums = [1, 2, 39, 34, 20, -20, -16, 35, 0];
let (max, boundaries) = greatest_subsequential_sum(&nums);
println!("Max subsequence sum: {} for {:?}", max, &nums[boundaries]);
}
#[test]
fn subsequential_sum() {
let nums = [1, 2, 39, 34, 20, -20, -16, 35, 0];
let (max, boundaries) = greatest_subsequential_sum(&nums);
assert_eq!(max, 96);
assert_eq!(&nums[boundaries], &[1, 2, 39, 34, 20]);
}
| 24.108108 | 73 | 0.530269 |
38ac498ea8c7a24f7a3a56d3bff58b89e7a4c0ec | 133 | pub mod request;
pub use self::request::{TransactionRequest,FilledTransactionRequest,CallRequest};
// pub use self::sign::sign_call;
| 33.25 | 81 | 0.796992 |
1c64f3e9ab23ebe524e7b44ed9132a173aaf5bf3 | 906 | #[test]
fn min_height_overrides_height_on_root() {
let mut sprawl = sprawl::Sprawl::new();
let node = sprawl
.new_node(
sprawl::style::Style {
size: sprawl::geometry::Size { height: sprawl::style::Dimension::Points(50f32), ..Default::default() },
min_size: sprawl::geometry::Size {
height: sprawl::style::Dimension::Points(100f32),
..Default::default()
},
..Default::default()
},
&[],
)
.unwrap();
sprawl.compute_layout(node, sprawl::geometry::Size::undefined()).unwrap();
assert_eq!(sprawl.layout(node).unwrap().size.width, 0f32);
assert_eq!(sprawl.layout(node).unwrap().size.height, 100f32);
assert_eq!(sprawl.layout(node).unwrap().location.x, 0f32);
assert_eq!(sprawl.layout(node).unwrap().location.y, 0f32);
}
| 39.391304 | 119 | 0.565121 |
d578ff424b56c37bbc24fdf4e8ade85a354bfee4 | 672 | use opentelemetry::{
api,
api::Tracer as _,
api::{Provider, Span},
sdk,
};
use std::thread;
use std::time::Duration;
fn main() {
let tracer = sdk::Provider::default().get_tracer("report_example");
{
let span0 = tracer.start("main", None);
thread::sleep(Duration::from_millis(10));
{
let span1 = tracer.start("sub", Some(span0.get_context()));
span1.set_attribute(api::Key::new("foo").string("bar"));
span1.add_event("something wrong".to_string());
thread::sleep(Duration::from_millis(10));
}
}
// Allow flush
thread::sleep(Duration::from_millis(250));
}
| 25.846154 | 71 | 0.577381 |
18f256d33ec1bb598e3f946bf138f801d0a5a8ed | 5,460 | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::Debug;
use openraft::AppDataResponse;
use serde::Deserialize;
use serde::Serialize;
use crate::AddResult;
use crate::AppError;
use crate::Change;
use crate::DatabaseMeta;
use crate::MetaError;
use crate::Node;
use crate::ShareInfo;
use crate::TableMeta;
use crate::TxnReply;
/// The state of an applied raft log.
/// Normally it includes two fields: the state before applying and the state after applying the log.
#[allow(clippy::large_enum_variant)]
#[derive(
Serialize, Deserialize, Debug, Clone, PartialEq, derive_more::From, derive_more::TryInto,
)]
pub enum AppliedState {
Seq {
seq: u64,
},
Node {
prev: Option<Node>,
result: Option<Node>,
},
DatabaseId(Change<u64>),
DatabaseMeta(Change<DatabaseMeta>),
TableMeta(Change<TableMeta>),
ShareInfo(Change<ShareInfo>),
KV(Change<Vec<u8>>),
AppError(AppError),
TxnReply(TxnReply),
#[try_into(ignore)]
None,
}
impl AppDataResponse for AppliedState {}
impl<T, ID> TryInto<AddResult<T, ID>> for AppliedState
where
ID: Clone + PartialEq + Debug,
T: Clone + PartialEq + Debug,
Change<T, ID>: TryFrom<AppliedState>,
<Change<T, ID> as TryFrom<AppliedState>>::Error: Debug,
{
type Error = MetaError;
fn try_into(self) -> Result<AddResult<T, ID>, Self::Error> {
// TODO(xp): maybe better to replace with specific error?
if let AppliedState::AppError(app_err) = self {
return Err(MetaError::AppError(app_err));
}
let typ = std::any::type_name::<T>();
let ch = TryInto::<Change<T, ID>>::try_into(self).expect(typ);
let add_res = ch.into_add_result()?;
Ok(add_res)
}
}
pub enum PrevOrResult<'a> {
Prev(&'a AppliedState),
Result(&'a AppliedState),
}
impl<'a> PrevOrResult<'a> {
pub fn is_some(&self) -> bool {
match self {
PrevOrResult::Prev(state) => state.prev_is_some(),
PrevOrResult::Result(state) => state.result_is_some(),
}
}
pub fn is_none(&self) -> bool {
!self.is_some()
}
}
impl AppliedState {
pub fn prev(&self) -> PrevOrResult {
PrevOrResult::Prev(self)
}
pub fn result(&self) -> PrevOrResult {
PrevOrResult::Result(self)
}
/// Whether the state changed
pub fn changed(&self) -> bool {
match self {
AppliedState::Seq { .. } => true,
AppliedState::Node {
ref prev,
ref result,
} => prev != result,
AppliedState::DatabaseId(ref ch) => ch.changed(),
AppliedState::DatabaseMeta(ref ch) => ch.changed(),
AppliedState::TableMeta(ref ch) => ch.changed(),
AppliedState::ShareInfo(ref ch) => ch.changed(),
AppliedState::KV(ref ch) => ch.changed(),
AppliedState::None => false,
AppliedState::AppError(_e) => false,
AppliedState::TxnReply(txn) => txn.success,
}
}
pub fn prev_is_some(&self) -> bool {
!self.prev_is_none()
}
pub fn result_is_some(&self) -> bool {
!self.result_is_none()
}
pub fn is_some(&self) -> (bool, bool) {
(self.prev_is_some(), self.result_is_some())
}
pub fn is_none(&self) -> (bool, bool) {
(self.prev_is_none(), self.result_is_none())
}
pub fn prev_is_none(&self) -> bool {
match self {
AppliedState::Seq { .. } => false,
AppliedState::Node { ref prev, .. } => prev.is_none(),
AppliedState::DatabaseId(Change { ref prev, .. }) => prev.is_none(),
AppliedState::DatabaseMeta(Change { ref prev, .. }) => prev.is_none(),
AppliedState::TableMeta(Change { ref prev, .. }) => prev.is_none(),
AppliedState::ShareInfo(Change { ref prev, .. }) => prev.is_none(),
AppliedState::KV(Change { ref prev, .. }) => prev.is_none(),
AppliedState::None => true,
AppliedState::AppError(_e) => true,
AppliedState::TxnReply(_txn) => true,
}
}
pub fn result_is_none(&self) -> bool {
match self {
AppliedState::Seq { .. } => false,
AppliedState::Node { ref result, .. } => result.is_none(),
AppliedState::DatabaseId(Change { ref result, .. }) => result.is_none(),
AppliedState::DatabaseMeta(Change { ref result, .. }) => result.is_none(),
AppliedState::TableMeta(Change { ref result, .. }) => result.is_none(),
AppliedState::ShareInfo(Change { ref result, .. }) => result.is_none(),
AppliedState::KV(Change { ref result, .. }) => result.is_none(),
AppliedState::None => true,
AppliedState::AppError(_e) => true,
AppliedState::TxnReply(txn) => !txn.success,
}
}
}
| 30.165746 | 100 | 0.594689 |
4b09681548d37ea1a19c45f1903118b9df710281 | 200,164 | //! Lowering rules for X64.
use crate::data_value::DataValue;
use crate::ir::{
condcodes::FloatCC, condcodes::IntCC, types, AbiParam, ArgumentPurpose, ExternalName,
Inst as IRInst, InstructionData, LibCall, Opcode, Signature, Type,
};
use crate::isa::x64::abi::*;
use crate::isa::x64::inst::args::*;
use crate::isa::x64::inst::*;
use crate::isa::{x64::X64Backend, CallConv};
use crate::machinst::lower::*;
use crate::machinst::*;
use crate::result::CodegenResult;
use crate::settings::Flags;
use alloc::boxed::Box;
use alloc::vec::Vec;
use cranelift_codegen_shared::condcodes::CondCode;
use log::trace;
use regalloc::{Reg, RegClass, Writable};
use smallvec::SmallVec;
use std::convert::TryFrom;
use target_lexicon::Triple;
//=============================================================================
// Helpers for instruction lowering.
fn is_int_or_ref_ty(ty: Type) -> bool {
match ty {
types::I8 | types::I16 | types::I32 | types::I64 | types::R64 => true,
types::R32 => panic!("shouldn't have 32-bits refs on x64"),
_ => false,
}
}
fn is_bool_ty(ty: Type) -> bool {
match ty {
types::B1 | types::B8 | types::B16 | types::B32 | types::B64 => true,
types::R32 => panic!("shouldn't have 32-bits refs on x64"),
_ => false,
}
}
/// This is target-word-size dependent. And it excludes booleans and reftypes.
fn is_valid_atomic_transaction_ty(ty: Type) -> bool {
match ty {
types::I8 | types::I16 | types::I32 | types::I64 => true,
_ => false,
}
}
/// Returns whether the given specified `input` is a result produced by an instruction with Opcode
/// `op`.
// TODO investigate failures with checking against the result index.
fn matches_input<C: LowerCtx<I = Inst>>(
ctx: &mut C,
input: InsnInput,
op: Opcode,
) -> Option<IRInst> {
let inputs = ctx.get_input_as_source_or_const(input.insn, input.input);
inputs.inst.and_then(|(src_inst, _)| {
let data = ctx.data(src_inst);
if data.opcode() == op {
return Some(src_inst);
}
None
})
}
/// Returns whether the given specified `input` is a result produced by an instruction with any of
/// the opcodes specified in `ops`.
fn matches_input_any<C: LowerCtx<I = Inst>>(
ctx: &mut C,
input: InsnInput,
ops: &[Opcode],
) -> Option<IRInst> {
let inputs = ctx.get_input_as_source_or_const(input.insn, input.input);
inputs.inst.and_then(|(src_inst, _)| {
let data = ctx.data(src_inst);
for &op in ops {
if data.opcode() == op {
return Some(src_inst);
}
}
None
})
}
/// Emits instruction(s) to generate the given 64-bit constant value into a newly-allocated
/// temporary register, returning that register.
fn generate_constant<C: LowerCtx<I = Inst>>(ctx: &mut C, ty: Type, c: u64) -> Reg {
let from_bits = ty_bits(ty);
let masked = if from_bits < 64 {
c & ((1u64 << from_bits) - 1)
} else {
c
};
let cst_copy = ctx.alloc_tmp(Inst::rc_for_type(ty).unwrap(), ty);
for inst in Inst::gen_constant(cst_copy, masked, ty, |reg_class, ty| {
ctx.alloc_tmp(reg_class, ty)
})
.into_iter()
{
ctx.emit(inst);
}
cst_copy.to_reg()
}
/// Put the given input into a register, and mark it as used (side-effect).
fn put_input_in_reg<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput) -> Reg {
let ty = ctx.input_ty(spec.insn, spec.input);
let input = ctx.get_input_as_source_or_const(spec.insn, spec.input);
if let Some(c) = input.constant {
// Generate constants fresh at each use to minimize long-range register pressure.
generate_constant(ctx, ty, c)
} else {
ctx.put_input_in_reg(spec.insn, spec.input)
}
}
/// Determines whether a load operation (indicated by `src_insn`) can be merged
/// into the current lowering point. If so, returns the address-base source (as
/// an `InsnInput`) and an offset from that address from which to perform the
/// load.
fn is_mergeable_load<C: LowerCtx<I = Inst>>(
ctx: &mut C,
src_insn: IRInst,
) -> Option<(InsnInput, i32)> {
let insn_data = ctx.data(src_insn);
let inputs = ctx.num_inputs(src_insn);
if inputs != 1 {
return None;
}
let load_ty = ctx.output_ty(src_insn, 0);
if ty_bits(load_ty) < 32 {
// Narrower values are handled by ALU insts that are at least 32 bits
// wide, which is normally OK as we ignore upper buts; but, if we
// generate, e.g., a direct-from-memory 32-bit add for a byte value and
// the byte is the last byte in a page, the extra data that we load is
// incorrectly accessed. So we only allow loads to merge for
// 32-bit-and-above widths.
return None;
}
// Just testing the opcode is enough, because the width will always match if
// the type does (and the type should match if the CLIF is properly
// constructed).
if insn_data.opcode() == Opcode::Load {
let offset = insn_data
.load_store_offset()
.expect("load should have offset");
Some((
InsnInput {
insn: src_insn,
input: 0,
},
offset,
))
} else {
None
}
}
/// Put the given input into a register or a memory operand.
/// Effectful: may mark the given input as used, when returning the register form.
fn input_to_reg_mem<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput) -> RegMem {
let inputs = ctx.get_input_as_source_or_const(spec.insn, spec.input);
if let Some(c) = inputs.constant {
// Generate constants fresh at each use to minimize long-range register pressure.
let ty = ctx.input_ty(spec.insn, spec.input);
return RegMem::reg(generate_constant(ctx, ty, c));
}
if let Some((src_insn, 0)) = inputs.inst {
if let Some((addr_input, offset)) = is_mergeable_load(ctx, src_insn) {
ctx.sink_inst(src_insn);
let amode = lower_to_amode(ctx, addr_input, offset);
return RegMem::mem(amode);
}
}
RegMem::reg(ctx.put_input_in_reg(spec.insn, spec.input))
}
/// An extension specification for `extend_input_to_reg`.
#[derive(Clone, Copy)]
enum ExtSpec {
ZeroExtendTo32,
ZeroExtendTo64,
SignExtendTo32,
SignExtendTo64,
}
/// Put the given input into a register, marking it as used, and do a zero- or signed- extension if
/// required. (This obviously causes side-effects.)
fn extend_input_to_reg<C: LowerCtx<I = Inst>>(
ctx: &mut C,
spec: InsnInput,
ext_spec: ExtSpec,
) -> Reg {
let requested_size = match ext_spec {
ExtSpec::ZeroExtendTo32 | ExtSpec::SignExtendTo32 => 32,
ExtSpec::ZeroExtendTo64 | ExtSpec::SignExtendTo64 => 64,
};
let input_size = ctx.input_ty(spec.insn, spec.input).bits();
let requested_ty = if requested_size == 32 {
types::I32
} else {
types::I64
};
let ext_mode = match (input_size, requested_size) {
(a, b) if a == b => return put_input_in_reg(ctx, spec),
(1, 8) => return put_input_in_reg(ctx, spec),
(a, b) => ExtMode::new(a, b).expect(&format!("invalid extension: {} -> {}", a, b)),
};
let src = input_to_reg_mem(ctx, spec);
let dst = ctx.alloc_tmp(RegClass::I64, requested_ty);
match ext_spec {
ExtSpec::ZeroExtendTo32 | ExtSpec::ZeroExtendTo64 => {
ctx.emit(Inst::movzx_rm_r(ext_mode, src, dst))
}
ExtSpec::SignExtendTo32 | ExtSpec::SignExtendTo64 => {
ctx.emit(Inst::movsx_rm_r(ext_mode, src, dst))
}
}
dst.to_reg()
}
/// Returns whether the given input is an immediate that can be properly sign-extended, without any
/// possible side-effect.
fn non_reg_input_to_sext_imm(input: NonRegInput, input_ty: Type) -> Option<u32> {
input.constant.and_then(|x| {
// For i64 instructions (prefixed with REX.W), require that the immediate will sign-extend
// to 64 bits. For other sizes, it doesn't matter and we can just use the plain
// constant.
if input_ty.bytes() != 8 || low32_will_sign_extend_to_64(x) {
Some(x as u32)
} else {
None
}
})
}
fn input_to_sext_imm<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput) -> Option<u32> {
let input = ctx.get_input_as_source_or_const(spec.insn, spec.input);
let input_ty = ctx.input_ty(spec.insn, spec.input);
non_reg_input_to_sext_imm(input, input_ty)
}
fn input_to_imm<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput) -> Option<u64> {
ctx.get_input_as_source_or_const(spec.insn, spec.input)
.constant
}
/// Put the given input into an immediate, a register or a memory operand.
/// Effectful: may mark the given input as used, when returning the register form.
fn input_to_reg_mem_imm<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput) -> RegMemImm {
let input = ctx.get_input_as_source_or_const(spec.insn, spec.input);
let input_ty = ctx.input_ty(spec.insn, spec.input);
match non_reg_input_to_sext_imm(input, input_ty) {
Some(x) => RegMemImm::imm(x),
None => match input_to_reg_mem(ctx, spec) {
RegMem::Reg { reg } => RegMemImm::reg(reg),
RegMem::Mem { addr } => RegMemImm::mem(addr),
},
}
}
/// Emit an instruction to insert a value `src` into a lane of `dst`.
fn emit_insert_lane<C: LowerCtx<I = Inst>>(
ctx: &mut C,
src: RegMem,
dst: Writable<Reg>,
lane: u8,
ty: Type,
) {
if !ty.is_float() {
let (sse_op, is64) = match ty.lane_bits() {
8 => (SseOpcode::Pinsrb, false),
16 => (SseOpcode::Pinsrw, false),
32 => (SseOpcode::Pinsrd, false),
64 => (SseOpcode::Pinsrd, true),
_ => panic!("Unable to insertlane for lane size: {}", ty.lane_bits()),
};
ctx.emit(Inst::xmm_rm_r_imm(sse_op, src, dst, lane, is64));
} else if ty == types::F32 {
let sse_op = SseOpcode::Insertps;
// Insert 32-bits from replacement (at index 00, bits 7:8) to vector (lane
// shifted into bits 5:6).
let lane = 0b00_00_00_00 | lane << 4;
ctx.emit(Inst::xmm_rm_r_imm(sse_op, src, dst, lane, false));
} else if ty == types::F64 {
let sse_op = match lane {
// Move the lowest quadword in replacement to vector without changing
// the upper bits.
0 => SseOpcode::Movsd,
// Move the low 64 bits of replacement vector to the high 64 bits of the
// vector.
1 => SseOpcode::Movlhps,
_ => unreachable!(),
};
// Here we use the `xmm_rm_r` encoding because it correctly tells the register
// allocator how we are using `dst`: we are using `dst` as a `mod` whereas other
// encoding formats like `xmm_unary_rm_r` treat it as a `def`.
ctx.emit(Inst::xmm_rm_r(sse_op, src, dst));
} else {
panic!("unable to emit insertlane for type: {}", ty)
}
}
/// Emit an instruction to extract a lane of `src` into `dst`.
fn emit_extract_lane<C: LowerCtx<I = Inst>>(
ctx: &mut C,
src: Reg,
dst: Writable<Reg>,
lane: u8,
ty: Type,
) {
if !ty.is_float() {
let (sse_op, is64) = match ty.lane_bits() {
8 => (SseOpcode::Pextrb, false),
16 => (SseOpcode::Pextrw, false),
32 => (SseOpcode::Pextrd, false),
64 => (SseOpcode::Pextrd, true),
_ => panic!("Unable to extractlane for lane size: {}", ty.lane_bits()),
};
let src = RegMem::reg(src);
ctx.emit(Inst::xmm_rm_r_imm(sse_op, src, dst, lane, is64));
} else if ty == types::F32 || ty == types::F64 {
if lane == 0 {
// Remove the extractlane instruction, leaving the float where it is. The upper
// bits will remain unchanged; for correctness, this relies on Cranelift type
// checking to avoid using those bits.
ctx.emit(Inst::gen_move(dst, src, ty));
} else {
// Otherwise, shuffle the bits in `lane` to the lowest lane.
let sse_op = SseOpcode::Pshufd;
let mask = match ty {
// Move the value at `lane` to lane 0, copying existing value at lane 0 to
// other lanes. Again, this relies on Cranelift type checking to avoid
// using those bits.
types::F32 => {
assert!(lane > 0 && lane < 4);
0b00_00_00_00 | lane
}
// Move the value at `lane` 1 (we know it must be 1 because of the `if`
// statement above) to lane 0 and leave lane 1 unchanged. The Cranelift type
// checking assumption also applies here.
types::F64 => {
assert!(lane == 1);
0b11_10_11_10
}
_ => unreachable!(),
};
let src = RegMem::reg(src);
ctx.emit(Inst::xmm_rm_r_imm(sse_op, src, dst, mask, false));
}
} else {
panic!("unable to emit extractlane for type: {}", ty)
}
}
/// Emits an int comparison instruction.
///
/// Note: make sure that there are no instructions modifying the flags between a call to this
/// function and the use of the flags!
fn emit_cmp<C: LowerCtx<I = Inst>>(ctx: &mut C, insn: IRInst) {
let ty = ctx.input_ty(insn, 0);
let inputs = [InsnInput { insn, input: 0 }, InsnInput { insn, input: 1 }];
// TODO Try to commute the operands (and invert the condition) if one is an immediate.
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg_mem_imm(ctx, inputs[1]);
// Cranelift's icmp semantics want to compare lhs - rhs, while Intel gives
// us dst - src at the machine instruction level, so invert operands.
ctx.emit(Inst::cmp_rmi_r(ty.bytes() as u8, rhs, lhs));
}
/// A specification for a fcmp emission.
enum FcmpSpec {
/// Normal flow.
Normal,
/// Avoid emitting Equal at all costs by inverting it to NotEqual, and indicate when that
/// happens with `InvertedEqualOrConditions`.
///
/// This is useful in contexts where it is hard/inefficient to produce a single instruction (or
/// sequence of instructions) that check for an "AND" combination of condition codes; see for
/// instance lowering of Select.
InvertEqual,
}
/// This explains how to interpret the results of an fcmp instruction.
enum FcmpCondResult {
/// The given condition code must be set.
Condition(CC),
/// Both condition codes must be set.
AndConditions(CC, CC),
/// Either of the conditions codes must be set.
OrConditions(CC, CC),
/// The associated spec was set to `FcmpSpec::InvertEqual` and Equal has been inverted. Either
/// of the condition codes must be set, and the user must invert meaning of analyzing the
/// condition code results. When the spec is set to `FcmpSpec::Normal`, then this case can't be
/// reached.
InvertedEqualOrConditions(CC, CC),
}
/// Emits a float comparison instruction.
///
/// Note: make sure that there are no instructions modifying the flags between a call to this
/// function and the use of the flags!
fn emit_fcmp<C: LowerCtx<I = Inst>>(
ctx: &mut C,
insn: IRInst,
mut cond_code: FloatCC,
spec: FcmpSpec,
) -> FcmpCondResult {
let (flip_operands, inverted_equal) = match cond_code {
FloatCC::LessThan
| FloatCC::LessThanOrEqual
| FloatCC::UnorderedOrGreaterThan
| FloatCC::UnorderedOrGreaterThanOrEqual => {
cond_code = cond_code.reverse();
(true, false)
}
FloatCC::Equal => {
let inverted_equal = match spec {
FcmpSpec::Normal => false,
FcmpSpec::InvertEqual => {
cond_code = FloatCC::NotEqual; // same as .inverse()
true
}
};
(false, inverted_equal)
}
_ => (false, false),
};
// The only valid CC constructed with `from_floatcc` can be put in the flag
// register with a direct float comparison; do this here.
let op = match ctx.input_ty(insn, 0) {
types::F32 => SseOpcode::Ucomiss,
types::F64 => SseOpcode::Ucomisd,
_ => panic!("Bad input type to Fcmp"),
};
let inputs = &[InsnInput { insn, input: 0 }, InsnInput { insn, input: 1 }];
let (lhs_input, rhs_input) = if flip_operands {
(inputs[1], inputs[0])
} else {
(inputs[0], inputs[1])
};
let lhs = put_input_in_reg(ctx, lhs_input);
let rhs = input_to_reg_mem(ctx, rhs_input);
ctx.emit(Inst::xmm_cmp_rm_r(op, rhs, lhs));
let cond_result = match cond_code {
FloatCC::Equal => FcmpCondResult::AndConditions(CC::NP, CC::Z),
FloatCC::NotEqual if inverted_equal => {
FcmpCondResult::InvertedEqualOrConditions(CC::P, CC::NZ)
}
FloatCC::NotEqual if !inverted_equal => FcmpCondResult::OrConditions(CC::P, CC::NZ),
_ => FcmpCondResult::Condition(CC::from_floatcc(cond_code)),
};
cond_result
}
fn make_libcall_sig<C: LowerCtx<I = Inst>>(
ctx: &mut C,
insn: IRInst,
call_conv: CallConv,
ptr_ty: Type,
) -> Signature {
let mut sig = Signature::new(call_conv);
for i in 0..ctx.num_inputs(insn) {
sig.params.push(AbiParam::new(ctx.input_ty(insn, i)));
}
for i in 0..ctx.num_outputs(insn) {
sig.returns.push(AbiParam::new(ctx.output_ty(insn, i)));
}
if call_conv.extends_baldrdash() {
// Adds the special VMContext parameter to the signature.
sig.params
.push(AbiParam::special(ptr_ty, ArgumentPurpose::VMContext));
}
sig
}
fn emit_vm_call<C: LowerCtx<I = Inst>>(
ctx: &mut C,
flags: &Flags,
triple: &Triple,
libcall: LibCall,
insn: IRInst,
inputs: SmallVec<[InsnInput; 4]>,
outputs: SmallVec<[InsnOutput; 2]>,
) -> CodegenResult<()> {
let extname = ExternalName::LibCall(libcall);
let dist = if flags.use_colocated_libcalls() {
RelocDistance::Near
} else {
RelocDistance::Far
};
// TODO avoid recreating signatures for every single Libcall function.
let call_conv = CallConv::for_libcall(flags, CallConv::triple_default(triple));
let sig = make_libcall_sig(ctx, insn, call_conv, types::I64);
let caller_conv = ctx.abi().call_conv();
let mut abi = X64ABICaller::from_func(&sig, &extname, dist, caller_conv)?;
abi.emit_stack_pre_adjust(ctx);
let vm_context = if call_conv.extends_baldrdash() { 1 } else { 0 };
assert_eq!(inputs.len() + vm_context, abi.num_args());
for (i, input) in inputs.iter().enumerate() {
let arg_reg = put_input_in_reg(ctx, *input);
abi.emit_copy_reg_to_arg(ctx, i, arg_reg);
}
if call_conv.extends_baldrdash() {
let vm_context_vreg = ctx
.get_vm_context()
.expect("should have a VMContext to pass to libcall funcs");
abi.emit_copy_reg_to_arg(ctx, inputs.len(), vm_context_vreg);
}
abi.emit_call(ctx);
for (i, output) in outputs.iter().enumerate() {
let retval_reg = get_output_reg(ctx, *output);
abi.emit_copy_retval_to_reg(ctx, i, retval_reg);
}
abi.emit_stack_post_adjust(ctx);
Ok(())
}
/// Returns whether the given input is a shift by a constant value less or equal than 3.
/// The goal is to embed it within an address mode.
fn matches_small_constant_shift<C: LowerCtx<I = Inst>>(
ctx: &mut C,
spec: InsnInput,
) -> Option<(InsnInput, u8)> {
matches_input(ctx, spec, Opcode::Ishl).and_then(|shift| {
match input_to_imm(
ctx,
InsnInput {
insn: shift,
input: 1,
},
) {
Some(shift_amt) if shift_amt <= 3 => Some((
InsnInput {
insn: shift,
input: 0,
},
shift_amt as u8,
)),
_ => None,
}
})
}
/// Lowers an instruction to one of the x86 addressing modes.
///
/// Note: the 32-bit offset in Cranelift has to be sign-extended, which maps x86's behavior.
fn lower_to_amode<C: LowerCtx<I = Inst>>(ctx: &mut C, spec: InsnInput, offset: i32) -> Amode {
let flags = ctx
.memflags(spec.insn)
.expect("Instruction with amode should have memflags");
// We now either have an add that we must materialize, or some other input; as well as the
// final offset.
if let Some(add) = matches_input(ctx, spec, Opcode::Iadd) {
debug_assert_eq!(ctx.output_ty(add, 0), types::I64);
let add_inputs = &[
InsnInput {
insn: add,
input: 0,
},
InsnInput {
insn: add,
input: 1,
},
];
// TODO heap_addr legalization generates a uext64 *after* the shift, so these optimizations
// aren't happening in the wasm case. We could do better, given some range analysis.
let (base, index, shift) = if let Some((shift_input, shift_amt)) =
matches_small_constant_shift(ctx, add_inputs[0])
{
(
put_input_in_reg(ctx, add_inputs[1]),
put_input_in_reg(ctx, shift_input),
shift_amt,
)
} else if let Some((shift_input, shift_amt)) =
matches_small_constant_shift(ctx, add_inputs[1])
{
(
put_input_in_reg(ctx, add_inputs[0]),
put_input_in_reg(ctx, shift_input),
shift_amt,
)
} else {
for i in 0..=1 {
// Try to pierce through uextend.
if let Some(uextend) = matches_input(
ctx,
InsnInput {
insn: add,
input: i,
},
Opcode::Uextend,
) {
if let Some(cst) = ctx.get_input_as_source_or_const(uextend, 0).constant {
// Zero the upper bits.
let input_size = ctx.input_ty(uextend, 0).bits() as u64;
let shift: u64 = 64 - input_size;
let uext_cst: u64 = (cst << shift) >> shift;
let final_offset = (offset as i64).wrapping_add(uext_cst as i64);
if low32_will_sign_extend_to_64(final_offset as u64) {
let base = put_input_in_reg(ctx, add_inputs[1 - i]);
return Amode::imm_reg(final_offset as u32, base).with_flags(flags);
}
}
}
// If it's a constant, add it directly!
if let Some(cst) = ctx.get_input_as_source_or_const(add, i).constant {
let final_offset = (offset as i64).wrapping_add(cst as i64);
if low32_will_sign_extend_to_64(final_offset as u64) {
let base = put_input_in_reg(ctx, add_inputs[1 - i]);
return Amode::imm_reg(final_offset as u32, base).with_flags(flags);
}
}
}
(
put_input_in_reg(ctx, add_inputs[0]),
put_input_in_reg(ctx, add_inputs[1]),
0,
)
};
return Amode::imm_reg_reg_shift(offset as u32, base, index, shift).with_flags(flags);
}
let input = put_input_in_reg(ctx, spec);
Amode::imm_reg(offset as u32, input).with_flags(flags)
}
//=============================================================================
// Top-level instruction lowering entry point, for one instruction.
/// Actually codegen an instruction's results into registers.
fn lower_insn_to_regs<C: LowerCtx<I = Inst>>(
ctx: &mut C,
insn: IRInst,
flags: &Flags,
triple: &Triple,
) -> CodegenResult<()> {
let op = ctx.data(insn).opcode();
let inputs: SmallVec<[InsnInput; 4]> = (0..ctx.num_inputs(insn))
.map(|i| InsnInput { insn, input: i })
.collect();
let outputs: SmallVec<[InsnOutput; 2]> = (0..ctx.num_outputs(insn))
.map(|i| InsnOutput { insn, output: i })
.collect();
let ty = if outputs.len() > 0 {
Some(ctx.output_ty(insn, 0))
} else {
None
};
match op {
Opcode::Iconst | Opcode::Bconst | Opcode::Null => {
let value = ctx
.get_constant(insn)
.expect("constant value for iconst et al");
let dst = get_output_reg(ctx, outputs[0]);
for inst in Inst::gen_constant(dst, value, ty.unwrap(), |reg_class, ty| {
ctx.alloc_tmp(reg_class, ty)
}) {
ctx.emit(inst);
}
}
Opcode::Iadd
| Opcode::IaddIfcout
| Opcode::SaddSat
| Opcode::UaddSat
| Opcode::Isub
| Opcode::SsubSat
| Opcode::UsubSat
| Opcode::Imul
| Opcode::AvgRound
| Opcode::Band
| Opcode::Bor
| Opcode::Bxor => {
let ty = ty.unwrap();
if ty.lane_count() > 1 {
let sse_op = match op {
Opcode::Iadd => match ty {
types::I8X16 => SseOpcode::Paddb,
types::I16X8 => SseOpcode::Paddw,
types::I32X4 => SseOpcode::Paddd,
types::I64X2 => SseOpcode::Paddq,
_ => panic!("Unsupported type for packed iadd instruction: {}", ty),
},
Opcode::SaddSat => match ty {
types::I8X16 => SseOpcode::Paddsb,
types::I16X8 => SseOpcode::Paddsw,
_ => panic!("Unsupported type for packed sadd_sat instruction: {}", ty),
},
Opcode::UaddSat => match ty {
types::I8X16 => SseOpcode::Paddusb,
types::I16X8 => SseOpcode::Paddusw,
_ => panic!("Unsupported type for packed uadd_sat instruction: {}", ty),
},
Opcode::Isub => match ty {
types::I8X16 => SseOpcode::Psubb,
types::I16X8 => SseOpcode::Psubw,
types::I32X4 => SseOpcode::Psubd,
types::I64X2 => SseOpcode::Psubq,
_ => panic!("Unsupported type for packed isub instruction: {}", ty),
},
Opcode::SsubSat => match ty {
types::I8X16 => SseOpcode::Psubsb,
types::I16X8 => SseOpcode::Psubsw,
_ => panic!("Unsupported type for packed ssub_sat instruction: {}", ty),
},
Opcode::UsubSat => match ty {
types::I8X16 => SseOpcode::Psubusb,
types::I16X8 => SseOpcode::Psubusw,
_ => panic!("Unsupported type for packed usub_sat instruction: {}", ty),
},
Opcode::Imul => match ty {
types::I16X8 => SseOpcode::Pmullw,
types::I32X4 => SseOpcode::Pmulld,
types::I64X2 => {
// Note for I64X2 we describe a lane A as being composed of a
// 32-bit upper half "Ah" and a 32-bit lower half "Al".
// The 32-bit long hand multiplication can then be written as:
// Ah Al
// * Bh Bl
// -----
// Al * Bl
// + (Ah * Bl) << 32
// + (Al * Bh) << 32
//
// So for each lane we will compute:
// A * B = (Al * Bl) + ((Ah * Bl) + (Al * Bh)) << 32
//
// Note, the algorithm will use pmuldq which operates directly on
// the lower 32-bit (Al or Bl) of a lane and writes the result
// to the full 64-bits of the lane of the destination. For this
// reason we don't need shifts to isolate the lower 32-bits, however
// we will need to use shifts to isolate the high 32-bits when doing
// calculations, i.e. Ah == A >> 32
//
// The full sequence then is as follows:
// A' = A
// A' = A' >> 32
// A' = Ah' * Bl
// B' = B
// B' = B' >> 32
// B' = Bh' * Al
// B' = B' + A'
// B' = B' << 32
// A' = A
// A' = Al' * Bl
// A' = A' + B'
// dst = A'
// Get inputs rhs=A and lhs=B and the dst register
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = put_input_in_reg(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]);
// A' = A
let rhs_1 = ctx.alloc_tmp(RegClass::V128, types::I64X2);
ctx.emit(Inst::gen_move(rhs_1, rhs, ty));
// A' = A' >> 32
// A' = Ah' * Bl
ctx.emit(Inst::xmm_rmi_reg(
SseOpcode::Psrlq,
RegMemImm::imm(32),
rhs_1,
));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pmuludq,
RegMem::reg(lhs.clone()),
rhs_1,
));
// B' = B
let lhs_1 = ctx.alloc_tmp(RegClass::V128, types::I64X2);
ctx.emit(Inst::gen_move(lhs_1, lhs, ty));
// B' = B' >> 32
// B' = Bh' * Al
ctx.emit(Inst::xmm_rmi_reg(
SseOpcode::Psrlq,
RegMemImm::imm(32),
lhs_1,
));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmuludq, RegMem::reg(rhs), lhs_1));
// B' = B' + A'
// B' = B' << 32
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Paddq,
RegMem::reg(rhs_1.to_reg()),
lhs_1,
));
ctx.emit(Inst::xmm_rmi_reg(
SseOpcode::Psllq,
RegMemImm::imm(32),
lhs_1,
));
// A' = A
// A' = Al' * Bl
// A' = A' + B'
// dst = A'
ctx.emit(Inst::gen_move(rhs_1, rhs, ty));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pmuludq,
RegMem::reg(lhs.clone()),
rhs_1,
));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Paddq,
RegMem::reg(lhs_1.to_reg()),
rhs_1,
));
ctx.emit(Inst::gen_move(dst, rhs_1.to_reg(), ty));
return Ok(());
}
_ => panic!("Unsupported type for packed imul instruction: {}", ty),
},
Opcode::AvgRound => match ty {
types::I8X16 => SseOpcode::Pavgb,
types::I16X8 => SseOpcode::Pavgw,
_ => panic!("Unsupported type for packed avg_round instruction: {}", ty),
},
Opcode::Band => match ty {
types::F32X4 => SseOpcode::Andps,
types::F64X2 => SseOpcode::Andpd,
_ => SseOpcode::Pand,
},
Opcode::Bor => match ty {
types::F32X4 => SseOpcode::Orps,
types::F64X2 => SseOpcode::Orpd,
_ => SseOpcode::Por,
},
Opcode::Bxor => match ty {
types::F32X4 => SseOpcode::Xorps,
types::F64X2 => SseOpcode::Xorpd,
_ => SseOpcode::Pxor,
},
_ => panic!("Unsupported packed instruction: {}", op),
};
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg_mem(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]);
// Move the `lhs` to the same register as `dst`.
ctx.emit(Inst::gen_move(dst, lhs, ty));
ctx.emit(Inst::xmm_rm_r(sse_op, rhs, dst));
} else {
let is_64 = ty == types::I64;
let alu_op = match op {
Opcode::Iadd | Opcode::IaddIfcout => AluRmiROpcode::Add,
Opcode::Isub => AluRmiROpcode::Sub,
Opcode::Imul => AluRmiROpcode::Mul,
Opcode::Band => AluRmiROpcode::And,
Opcode::Bor => AluRmiROpcode::Or,
Opcode::Bxor => AluRmiROpcode::Xor,
_ => unreachable!(),
};
let (lhs, rhs) = match op {
Opcode::Iadd
| Opcode::IaddIfcout
| Opcode::Imul
| Opcode::Band
| Opcode::Bor
| Opcode::Bxor => {
// For commutative operations, try to commute operands if one is an
// immediate or direct memory reference. Do so by converting LHS to RMI; if
// reg, then always convert RHS to RMI; else, use LHS as RMI and convert
// RHS to reg.
let lhs = input_to_reg_mem_imm(ctx, inputs[0]);
if let RegMemImm::Reg { reg: lhs_reg } = lhs {
let rhs = input_to_reg_mem_imm(ctx, inputs[1]);
(lhs_reg, rhs)
} else {
let rhs_reg = put_input_in_reg(ctx, inputs[1]);
(rhs_reg, lhs)
}
}
Opcode::Isub => (
put_input_in_reg(ctx, inputs[0]),
input_to_reg_mem_imm(ctx, inputs[1]),
),
_ => unreachable!(),
};
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::mov_r_r(true, lhs, dst));
ctx.emit(Inst::alu_rmi_r(is_64, alu_op, rhs, dst));
}
}
Opcode::BandNot => {
let ty = ty.unwrap();
debug_assert!(ty.is_vector() && ty.bytes() == 16);
let lhs = input_to_reg_mem(ctx, inputs[0]);
let rhs = put_input_in_reg(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]);
let sse_op = match ty {
types::F32X4 => SseOpcode::Andnps,
types::F64X2 => SseOpcode::Andnpd,
_ => SseOpcode::Pandn,
};
// Note the flipping of operands: the `rhs` operand is used as the destination instead
// of the `lhs` as in the other bit operations above (e.g. `band`).
ctx.emit(Inst::gen_move(dst, rhs, ty));
ctx.emit(Inst::xmm_rm_r(sse_op, lhs, dst));
}
Opcode::Iabs => {
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
let ty = ty.unwrap();
if ty.is_vector() {
let opcode = match ty {
types::I8X16 => SseOpcode::Pabsb,
types::I16X8 => SseOpcode::Pabsw,
types::I32X4 => SseOpcode::Pabsd,
_ => panic!("Unsupported type for packed iabs instruction: {}", ty),
};
ctx.emit(Inst::xmm_unary_rm_r(opcode, src, dst));
} else {
unimplemented!("iabs is unimplemented for non-vector type: {}", ty);
}
}
Opcode::Imax | Opcode::Umax | Opcode::Imin | Opcode::Umin => {
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg_mem(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]);
let ty = ty.unwrap();
if ty.is_vector() {
let sse_op = match op {
Opcode::Imax => match ty {
types::I8X16 => SseOpcode::Pmaxsb,
types::I16X8 => SseOpcode::Pmaxsw,
types::I32X4 => SseOpcode::Pmaxsd,
_ => panic!("Unsupported type for packed {} instruction: {}", op, ty),
},
Opcode::Umax => match ty {
types::I8X16 => SseOpcode::Pmaxub,
types::I16X8 => SseOpcode::Pmaxuw,
types::I32X4 => SseOpcode::Pmaxud,
_ => panic!("Unsupported type for packed {} instruction: {}", op, ty),
},
Opcode::Imin => match ty {
types::I8X16 => SseOpcode::Pminsb,
types::I16X8 => SseOpcode::Pminsw,
types::I32X4 => SseOpcode::Pminsd,
_ => panic!("Unsupported type for packed {} instruction: {}", op, ty),
},
Opcode::Umin => match ty {
types::I8X16 => SseOpcode::Pminub,
types::I16X8 => SseOpcode::Pminuw,
types::I32X4 => SseOpcode::Pminud,
_ => panic!("Unsupported type for packed {} instruction: {}", op, ty),
},
_ => unreachable!("This is a bug: the external and internal `match op` should be over the same opcodes."),
};
// Move the `lhs` to the same register as `dst`.
ctx.emit(Inst::gen_move(dst, lhs, ty));
ctx.emit(Inst::xmm_rm_r(sse_op, rhs, dst));
} else {
panic!("Unsupported type for {} instruction: {}", op, ty);
}
}
Opcode::Bnot => {
let ty = ty.unwrap();
let size = ty.bytes() as u8;
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::gen_move(dst, src, ty));
if ty.is_vector() {
let tmp = ctx.alloc_tmp(RegClass::V128, ty);
ctx.emit(Inst::equals(ty, RegMem::from(tmp), tmp));
ctx.emit(Inst::xor(ty, RegMem::from(tmp), dst));
} else if ty.is_bool() {
unimplemented!("bool bnot")
} else {
ctx.emit(Inst::not(size, dst));
}
}
Opcode::Bitselect => {
let ty = ty.unwrap();
let condition = put_input_in_reg(ctx, inputs[0]);
let if_true = put_input_in_reg(ctx, inputs[1]);
let if_false = input_to_reg_mem(ctx, inputs[2]);
let dst = get_output_reg(ctx, outputs[0]);
if ty.is_vector() {
let tmp1 = ctx.alloc_tmp(RegClass::V128, ty);
ctx.emit(Inst::gen_move(tmp1, if_true, ty));
ctx.emit(Inst::and(ty, RegMem::reg(condition.clone()), tmp1));
let tmp2 = ctx.alloc_tmp(RegClass::V128, ty);
ctx.emit(Inst::gen_move(tmp2, condition, ty));
ctx.emit(Inst::and_not(ty, if_false, tmp2));
ctx.emit(Inst::gen_move(dst, tmp2.to_reg(), ty));
ctx.emit(Inst::or(ty, RegMem::from(tmp1), dst));
} else {
unimplemented!("scalar bitselect")
}
}
Opcode::Ishl | Opcode::Ushr | Opcode::Sshr | Opcode::Rotl | Opcode::Rotr => {
let dst_ty = ctx.output_ty(insn, 0);
debug_assert_eq!(ctx.input_ty(insn, 0), dst_ty);
if !dst_ty.is_vector() {
// Scalar shifts on x86 have various encodings:
// - shift by one bit, e.g. `SAL r/m8, 1` (not used here)
// - shift by an immediate amount, e.g. `SAL r/m8, imm8`
// - shift by a dynamic amount but only from the CL register, e.g. `SAL r/m8, CL`.
// This implementation uses the last two encoding methods.
let (size, lhs) = match dst_ty {
types::I8 | types::I16 => match op {
Opcode::Ishl => (4, put_input_in_reg(ctx, inputs[0])),
Opcode::Ushr => (
4,
extend_input_to_reg(ctx, inputs[0], ExtSpec::ZeroExtendTo32),
),
Opcode::Sshr => (
4,
extend_input_to_reg(ctx, inputs[0], ExtSpec::SignExtendTo32),
),
Opcode::Rotl | Opcode::Rotr => {
(dst_ty.bytes() as u8, put_input_in_reg(ctx, inputs[0]))
}
_ => unreachable!(),
},
types::I32 | types::I64 => {
(dst_ty.bytes() as u8, put_input_in_reg(ctx, inputs[0]))
}
_ => unreachable!("unhandled output type for shift/rotates: {}", dst_ty),
};
let (count, rhs) =
if let Some(cst) = ctx.get_input_as_source_or_const(insn, 1).constant {
// Mask count, according to Cranelift's semantics.
let cst = (cst as u8) & (dst_ty.bits() as u8 - 1);
(Some(cst), None)
} else {
(None, Some(put_input_in_reg(ctx, inputs[1])))
};
let dst = get_output_reg(ctx, outputs[0]);
let shift_kind = match op {
Opcode::Ishl => ShiftKind::ShiftLeft,
Opcode::Ushr => ShiftKind::ShiftRightLogical,
Opcode::Sshr => ShiftKind::ShiftRightArithmetic,
Opcode::Rotl => ShiftKind::RotateLeft,
Opcode::Rotr => ShiftKind::RotateRight,
_ => unreachable!(),
};
let w_rcx = Writable::from_reg(regs::rcx());
ctx.emit(Inst::mov_r_r(true, lhs, dst));
if count.is_none() {
ctx.emit(Inst::mov_r_r(true, rhs.unwrap(), w_rcx));
}
ctx.emit(Inst::shift_r(size, shift_kind, count, dst));
} else if dst_ty == types::I8X16 && (op == Opcode::Ishl || op == Opcode::Ushr) {
// Since the x86 instruction set does not have any 8x16 shift instructions (even in higher feature sets
// like AVX), we lower the `ishl.i8x16` and `ushr.i8x16` to a sequence of instructions. The basic idea,
// whether the `shift_by` amount is an immediate or not, is to use a 16x8 shift and then mask off the
// incorrect bits to 0s (see below for handling signs in `sshr.i8x16`).
let src = put_input_in_reg(ctx, inputs[0]);
let shift_by = input_to_reg_mem_imm(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]);
// If necessary, move the shift index into the lowest bits of a vector register.
let shift_by_moved = match &shift_by {
RegMemImm::Imm { .. } => shift_by.clone(),
RegMemImm::Reg { reg } => {
let tmp_shift_by = ctx.alloc_tmp(RegClass::V128, dst_ty);
ctx.emit(Inst::gpr_to_xmm(
SseOpcode::Movd,
RegMem::reg(*reg),
OperandSize::Size32,
tmp_shift_by,
));
RegMemImm::reg(tmp_shift_by.to_reg())
}
RegMemImm::Mem { .. } => unimplemented!("load shift amount to XMM register"),
};
// Shift `src` using 16x8. Unfortunately, a 16x8 shift will only be correct for half of the lanes;
// the others must be fixed up with the mask below.
let shift_opcode = match op {
Opcode::Ishl => SseOpcode::Psllw,
Opcode::Ushr => SseOpcode::Psrlw,
_ => unimplemented!("{} is not implemented for type {}", op, dst_ty),
};
ctx.emit(Inst::gen_move(dst, src, dst_ty));
ctx.emit(Inst::xmm_rmi_reg(shift_opcode, shift_by_moved, dst));
// Choose which mask to use to fixup the shifted lanes. Since we must use a 16x8 shift, we need to fix
// up the bits that migrate from one half of the lane to the other. Each 16-byte mask (which rustfmt
// forces to multiple lines) is indexed by the shift amount: e.g. if we shift right by 0 (no movement),
// we want to retain all the bits so we mask with `0xff`; if we shift right by 1, we want to retain all
// bits except the MSB so we mask with `0x7f`; etc.
const USHR_MASKS: [u8; 128] = [
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f,
0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x1f, 0x1f, 0x1f, 0x1f,
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x07, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
];
const SHL_MASKS: [u8; 128] = [
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe,
0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc,
0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xf8, 0xf8, 0xf8, 0xf8,
0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf0,
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
0xf0, 0xf0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xe0,
0xe0, 0xe0, 0xe0, 0xe0, 0xe0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0,
0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
];
let mask = match op {
Opcode::Ishl => &SHL_MASKS,
Opcode::Ushr => &USHR_MASKS,
_ => unimplemented!("{} is not implemented for type {}", op, dst_ty),
};
// Figure out the address of the shift mask.
let mask_address = match shift_by {
RegMemImm::Imm { simm32 } => {
// When the shift amount is known, we can statically (i.e. at compile time) determine the mask to
// use and only emit that.
debug_assert!(simm32 < 8);
let mask_offset = simm32 as usize * 16;
let mask_constant = ctx.use_constant(VCodeConstantData::WellKnown(
&mask[mask_offset..mask_offset + 16],
));
SyntheticAmode::ConstantOffset(mask_constant)
}
RegMemImm::Reg { reg } => {
// Otherwise, we must emit the entire mask table and dynamically (i.e. at run time) find the correct
// mask offset in the table. We do this use LEA to find the base address of the mask table and then
// complex addressing to offset to the right mask: `base_address + shift_by * 4`
let base_mask_address = ctx.alloc_tmp(RegClass::I64, types::I64);
let mask_offset = ctx.alloc_tmp(RegClass::I64, types::I64);
let mask_constant = ctx.use_constant(VCodeConstantData::WellKnown(mask));
ctx.emit(Inst::lea(
SyntheticAmode::ConstantOffset(mask_constant),
base_mask_address,
));
ctx.emit(Inst::gen_move(mask_offset, reg, types::I64));
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftLeft, Some(4), mask_offset));
Amode::imm_reg_reg_shift(
0,
base_mask_address.to_reg(),
mask_offset.to_reg(),
0,
)
.into()
}
RegMemImm::Mem { addr: _ } => unimplemented!("load mask address"),
};
// Load the mask into a temporary register, `mask_value`.
let mask_value = ctx.alloc_tmp(RegClass::V128, dst_ty);
ctx.emit(Inst::load(dst_ty, mask_address, mask_value, ExtKind::None));
// Remove the bits that would have disappeared in a true 8x16 shift. TODO in the future,
// this AND instruction could be coalesced with the load above.
let sse_op = match dst_ty {
types::F32X4 => SseOpcode::Andps,
types::F64X2 => SseOpcode::Andpd,
_ => SseOpcode::Pand,
};
ctx.emit(Inst::xmm_rm_r(sse_op, RegMem::from(mask_value), dst));
} else if dst_ty == types::I8X16 && op == Opcode::Sshr {
// Since the x86 instruction set does not have an 8x16 shift instruction and the approach used for
// `ishl` and `ushr` cannot be easily used (the masks do not preserve the sign), we use a different
// approach here: separate the low and high lanes, shift them separately, and merge them into the final
// result. Visually, this looks like the following, where `src.i8x16 = [s0, s1, ..., s15]:
// low.i16x8 = [(s0, s0), (s1, s1), ..., (s7, s7)]
// shifted_low.i16x8 = shift each lane of `low`
// high.i16x8 = [(s8, s8), (s9, s9), ..., (s15, s15)]
// shifted_high.i16x8 = shift each lane of `high`
// dst.i8x16 = [s0'', s1'', ..., s15'']
let src = put_input_in_reg(ctx, inputs[0]);
let shift_by = input_to_reg_mem_imm(ctx, inputs[1]);
let shift_by_ty = ctx.input_ty(insn, 1);
let dst = get_output_reg(ctx, outputs[0]);
// In order for PACKSSWB later to only use the high byte of each 16x8 lane, we shift right an extra 8
// bits, relying on PSRAW to fill in the upper bits appropriately.
let bigger_shift_by = match shift_by {
// When we know the shift amount at compile time, we add the extra shift amount statically.
RegMemImm::Imm { simm32 } => RegMemImm::imm(simm32 + 8),
// Otherwise we add instructions to add the extra shift amount and move the value into an XMM
// register.
RegMemImm::Reg { reg } => {
let bigger_shift_by_gpr = ctx.alloc_tmp(RegClass::I64, shift_by_ty);
ctx.emit(Inst::mov_r_r(true, reg, bigger_shift_by_gpr));
let is_64 = shift_by_ty == types::I64;
let imm = RegMemImm::imm(8);
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::Add,
imm,
bigger_shift_by_gpr,
));
let bigger_shift_by_xmm = ctx.alloc_tmp(RegClass::V128, dst_ty);
ctx.emit(Inst::gpr_to_xmm(
SseOpcode::Movd,
RegMem::from(bigger_shift_by_gpr),
OperandSize::Size32,
bigger_shift_by_xmm,
));
RegMemImm::reg(bigger_shift_by_xmm.to_reg())
}
RegMemImm::Mem { .. } => unimplemented!("load shift amount to XMM register"),
};
// Unpack and shift the lower lanes of `src` into the `dst` register.
ctx.emit(Inst::gen_move(dst, src, dst_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Punpcklbw, RegMem::from(dst), dst));
ctx.emit(Inst::xmm_rmi_reg(
SseOpcode::Psraw,
bigger_shift_by.clone(),
dst,
));
// Unpack and shift the upper lanes of `src` into a temporary register, `upper_lanes`.
let upper_lanes = ctx.alloc_tmp(RegClass::V128, dst_ty);
ctx.emit(Inst::gen_move(upper_lanes, src, dst_ty));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Punpckhbw,
RegMem::from(upper_lanes),
upper_lanes,
));
ctx.emit(Inst::xmm_rmi_reg(
SseOpcode::Psraw,
bigger_shift_by,
upper_lanes,
));
// Merge the upper and lower shifted lanes into `dst`.
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Packsswb,
RegMem::from(upper_lanes),
dst,
));
} else if dst_ty == types::I64X2 && op == Opcode::Sshr {
// The `sshr.i8x16` CLIF instruction has no single x86 instruction in the older feature sets; newer ones
// like AVX512VL and AVX512F include VPSRAQ, a 128-bit instruction that would fit here, but this backend
// does not currently have support for EVEX encodings (TODO when EVEX support is available, add an
// alternate lowering here). To remedy this, we extract each 64-bit lane to a GPR, shift each using a
// scalar instruction, and insert the shifted values back in the `dst` XMM register.
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::gen_move(dst, src, dst_ty));
// Extract the upper and lower lanes into temporary GPRs.
let lower_lane = ctx.alloc_tmp(RegClass::I64, types::I64);
emit_extract_lane(ctx, src, lower_lane, 0, types::I64);
let upper_lane = ctx.alloc_tmp(RegClass::I64, types::I64);
emit_extract_lane(ctx, src, upper_lane, 1, types::I64);
// Shift each value.
let mut shift = |reg: Writable<Reg>| {
let kind = ShiftKind::ShiftRightArithmetic;
if let Some(shift_by) = ctx.get_input_as_source_or_const(insn, 1).constant {
// Mask the shift amount according to Cranelift's semantics.
let shift_by = (shift_by as u8) & (types::I64.bits() as u8 - 1);
ctx.emit(Inst::shift_r(8, kind, Some(shift_by), reg));
} else {
let dynamic_shift_by = put_input_in_reg(ctx, inputs[1]);
let w_rcx = Writable::from_reg(regs::rcx());
ctx.emit(Inst::mov_r_r(true, dynamic_shift_by, w_rcx));
ctx.emit(Inst::shift_r(8, kind, None, reg));
};
};
shift(lower_lane);
shift(upper_lane);
// Insert the scalar values back into the `dst` vector.
emit_insert_lane(ctx, RegMem::from(lower_lane), dst, 0, types::I64);
emit_insert_lane(ctx, RegMem::from(upper_lane), dst, 1, types::I64);
} else {
// For the remaining packed shifts not covered above, x86 has implementations that can either:
// - shift using an immediate
// - shift using a dynamic value given in the lower bits of another XMM register.
let src = put_input_in_reg(ctx, inputs[0]);
let shift_by = input_to_reg_mem_imm(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]);
let sse_op = match dst_ty {
types::I16X8 => match op {
Opcode::Ishl => SseOpcode::Psllw,
Opcode::Ushr => SseOpcode::Psrlw,
Opcode::Sshr => SseOpcode::Psraw,
_ => unimplemented!("{} is not implemented for type {}", op, dst_ty),
},
types::I32X4 => match op {
Opcode::Ishl => SseOpcode::Pslld,
Opcode::Ushr => SseOpcode::Psrld,
Opcode::Sshr => SseOpcode::Psrad,
_ => unimplemented!("{} is not implemented for type {}", op, dst_ty),
},
types::I64X2 => match op {
Opcode::Ishl => SseOpcode::Psllq,
Opcode::Ushr => SseOpcode::Psrlq,
_ => unimplemented!("{} is not implemented for type {}", op, dst_ty),
},
_ => unreachable!(),
};
// If necessary, move the shift index into the lowest bits of a vector register.
let shift_by = match shift_by {
RegMemImm::Imm { .. } => shift_by,
RegMemImm::Reg { reg } => {
let tmp_shift_by = ctx.alloc_tmp(RegClass::V128, dst_ty);
ctx.emit(Inst::gpr_to_xmm(
SseOpcode::Movd,
RegMem::reg(reg),
OperandSize::Size32,
tmp_shift_by,
));
RegMemImm::reg(tmp_shift_by.to_reg())
}
RegMemImm::Mem { .. } => unimplemented!("load shift amount to XMM register"),
};
// Move the `src` to the same register as `dst`.
ctx.emit(Inst::gen_move(dst, src, dst_ty));
ctx.emit(Inst::xmm_rmi_reg(sse_op, shift_by, dst));
}
}
Opcode::Ineg => {
let dst = get_output_reg(ctx, outputs[0]);
let ty = ty.unwrap();
if ty.is_vector() {
// Zero's out a register and then does a packed subtraction
// of the input from the register.
let src = input_to_reg_mem(ctx, inputs[0]);
let tmp = ctx.alloc_tmp(RegClass::V128, types::I32X4);
let subtract_opcode = match ty {
types::I8X16 => SseOpcode::Psubb,
types::I16X8 => SseOpcode::Psubw,
types::I32X4 => SseOpcode::Psubd,
types::I64X2 => SseOpcode::Psubq,
_ => panic!("Unsupported type for Ineg instruction, found {}", ty),
};
// Note we must zero out a tmp instead of using the destination register since
// the desitnation could be an alias for the source input register
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pxor,
RegMem::reg(tmp.to_reg()),
tmp,
));
ctx.emit(Inst::xmm_rm_r(subtract_opcode, src, tmp));
ctx.emit(Inst::xmm_unary_rm_r(
SseOpcode::Movapd,
RegMem::reg(tmp.to_reg()),
dst,
));
} else {
let size = ty.bytes() as u8;
let src = put_input_in_reg(ctx, inputs[0]);
ctx.emit(Inst::gen_move(dst, src, ty));
ctx.emit(Inst::neg(size, dst));
}
}
Opcode::Clz => {
// TODO when the x86 flags have use_lzcnt, we can use LZCNT.
// General formula using bit-scan reverse (BSR):
// mov -1, %dst
// bsr %src, %tmp
// cmovz %dst, %tmp
// mov $(size_bits - 1), %dst
// sub %tmp, %dst
let (ext_spec, ty) = match ctx.input_ty(insn, 0) {
types::I8 | types::I16 => (Some(ExtSpec::ZeroExtendTo32), types::I32),
a if a == types::I32 || a == types::I64 => (None, a),
_ => unreachable!(),
};
let src = if let Some(ext_spec) = ext_spec {
RegMem::reg(extend_input_to_reg(ctx, inputs[0], ext_spec))
} else {
input_to_reg_mem(ctx, inputs[0])
};
let dst = get_output_reg(ctx, outputs[0]);
let tmp = ctx.alloc_tmp(RegClass::I64, ty);
ctx.emit(Inst::imm(
OperandSize::from_bytes(ty.bytes()),
u64::max_value(),
dst,
));
ctx.emit(Inst::unary_rm_r(
ty.bytes() as u8,
UnaryRmROpcode::Bsr,
src,
tmp,
));
ctx.emit(Inst::cmove(
ty.bytes() as u8,
CC::Z,
RegMem::reg(dst.to_reg()),
tmp,
));
ctx.emit(Inst::imm(
OperandSize::from_bytes(ty.bytes()),
ty.bits() as u64 - 1,
dst,
));
ctx.emit(Inst::alu_rmi_r(
ty == types::I64,
AluRmiROpcode::Sub,
RegMemImm::reg(tmp.to_reg()),
dst,
));
}
Opcode::Ctz => {
// TODO when the x86 flags have use_bmi1, we can use TZCNT.
// General formula using bit-scan forward (BSF):
// bsf %src, %dst
// mov $(size_bits), %tmp
// cmovz %tmp, %dst
let ty = ctx.input_ty(insn, 0);
let ty = if ty.bits() < 32 { types::I32 } else { ty };
debug_assert!(ty == types::I32 || ty == types::I64);
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
let tmp = ctx.alloc_tmp(RegClass::I64, ty);
ctx.emit(Inst::imm(OperandSize::Size32, ty.bits() as u64, tmp));
ctx.emit(Inst::unary_rm_r(
ty.bytes() as u8,
UnaryRmROpcode::Bsf,
src,
dst,
));
ctx.emit(Inst::cmove(
ty.bytes() as u8,
CC::Z,
RegMem::reg(tmp.to_reg()),
dst,
));
}
Opcode::Popcnt => {
// TODO when the x86 flags have use_popcnt, we can use the popcnt instruction.
let (ext_spec, ty) = match ctx.input_ty(insn, 0) {
types::I8 | types::I16 => (Some(ExtSpec::ZeroExtendTo32), types::I32),
a if a == types::I32 || a == types::I64 => (None, a),
_ => unreachable!(),
};
let src = if let Some(ext_spec) = ext_spec {
RegMem::reg(extend_input_to_reg(ctx, inputs[0], ext_spec))
} else {
input_to_reg_mem(ctx, inputs[0])
};
let dst = get_output_reg(ctx, outputs[0]);
if ty == types::I64 {
let is_64 = true;
let tmp1 = ctx.alloc_tmp(RegClass::I64, types::I64);
let tmp2 = ctx.alloc_tmp(RegClass::I64, types::I64);
let cst = ctx.alloc_tmp(RegClass::I64, types::I64);
// mov src, tmp1
ctx.emit(Inst::mov64_rm_r(src.clone(), tmp1));
// shr $1, tmp1
ctx.emit(Inst::shift_r(
8,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
));
// mov 0x7777_7777_7777_7777, cst
ctx.emit(Inst::imm(OperandSize::Size64, 0x7777777777777777, cst));
// andq cst, tmp1
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::And,
RegMemImm::reg(cst.to_reg()),
tmp1,
));
// mov src, tmp2
ctx.emit(Inst::mov64_rm_r(src, tmp2));
// sub tmp1, tmp2
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::Sub,
RegMemImm::reg(tmp1.to_reg()),
tmp2,
));
// shr $1, tmp1
ctx.emit(Inst::shift_r(
8,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
));
// and cst, tmp1
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::And,
RegMemImm::reg(cst.to_reg()),
tmp1,
));
// sub tmp1, tmp2
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::Sub,
RegMemImm::reg(tmp1.to_reg()),
tmp2,
));
// shr $1, tmp1
ctx.emit(Inst::shift_r(
8,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
));
// and cst, tmp1
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::And,
RegMemImm::reg(cst.to_reg()),
tmp1,
));
// sub tmp1, tmp2
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::Sub,
RegMemImm::reg(tmp1.to_reg()),
tmp2,
));
// mov tmp2, dst
ctx.emit(Inst::mov64_rm_r(RegMem::reg(tmp2.to_reg()), dst));
// shr $4, dst
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftRightLogical, Some(4), dst));
// add tmp2, dst
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::Add,
RegMemImm::reg(tmp2.to_reg()),
dst,
));
// mov $0x0F0F_0F0F_0F0F_0F0F, cst
ctx.emit(Inst::imm(OperandSize::Size64, 0x0F0F0F0F0F0F0F0F, cst));
// and cst, dst
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::And,
RegMemImm::reg(cst.to_reg()),
dst,
));
// mov $0x0101_0101_0101_0101, cst
ctx.emit(Inst::imm(OperandSize::Size64, 0x0101010101010101, cst));
// mul cst, dst
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::Mul,
RegMemImm::reg(cst.to_reg()),
dst,
));
// shr $56, dst
ctx.emit(Inst::shift_r(
8,
ShiftKind::ShiftRightLogical,
Some(56),
dst,
));
} else {
assert_eq!(ty, types::I32);
let is_64 = false;
let tmp1 = ctx.alloc_tmp(RegClass::I64, types::I64);
let tmp2 = ctx.alloc_tmp(RegClass::I64, types::I64);
// mov src, tmp1
ctx.emit(Inst::mov64_rm_r(src.clone(), tmp1));
// shr $1, tmp1
ctx.emit(Inst::shift_r(
4,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
));
// andq $0x7777_7777, tmp1
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::And,
RegMemImm::imm(0x77777777),
tmp1,
));
// mov src, tmp2
ctx.emit(Inst::mov64_rm_r(src, tmp2));
// sub tmp1, tmp2
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::Sub,
RegMemImm::reg(tmp1.to_reg()),
tmp2,
));
// shr $1, tmp1
ctx.emit(Inst::shift_r(
4,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
));
// and 0x7777_7777, tmp1
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::And,
RegMemImm::imm(0x77777777),
tmp1,
));
// sub tmp1, tmp2
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::Sub,
RegMemImm::reg(tmp1.to_reg()),
tmp2,
));
// shr $1, tmp1
ctx.emit(Inst::shift_r(
4,
ShiftKind::ShiftRightLogical,
Some(1),
tmp1,
));
// and $0x7777_7777, tmp1
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::And,
RegMemImm::imm(0x77777777),
tmp1,
));
// sub tmp1, tmp2
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::Sub,
RegMemImm::reg(tmp1.to_reg()),
tmp2,
));
// mov tmp2, dst
ctx.emit(Inst::mov64_rm_r(RegMem::reg(tmp2.to_reg()), dst));
// shr $4, dst
ctx.emit(Inst::shift_r(4, ShiftKind::ShiftRightLogical, Some(4), dst));
// add tmp2, dst
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::Add,
RegMemImm::reg(tmp2.to_reg()),
dst,
));
// and $0x0F0F_0F0F, dst
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::And,
RegMemImm::imm(0x0F0F0F0F),
dst,
));
// mul $0x0101_0101, dst
ctx.emit(Inst::alu_rmi_r(
is_64,
AluRmiROpcode::Mul,
RegMemImm::imm(0x01010101),
dst,
));
// shr $24, dst
ctx.emit(Inst::shift_r(
4,
ShiftKind::ShiftRightLogical,
Some(24),
dst,
));
}
}
Opcode::IsNull | Opcode::IsInvalid => {
// Null references are represented by the constant value 0; invalid references are
// represented by the constant value -1. See `define_reftypes()` in
// `meta/src/isa/x86/encodings.rs` to confirm.
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
let ty = ctx.input_ty(insn, 0);
let imm = match op {
Opcode::IsNull => {
// TODO could use tst src, src for IsNull
0
}
Opcode::IsInvalid => {
// We can do a 32-bit comparison even in 64-bits mode, as the constant is then
// sign-extended.
0xffffffff
}
_ => unreachable!(),
};
ctx.emit(Inst::cmp_rmi_r(ty.bytes() as u8, RegMemImm::imm(imm), src));
ctx.emit(Inst::setcc(CC::Z, dst));
}
Opcode::Uextend
| Opcode::Sextend
| Opcode::Bint
| Opcode::Breduce
| Opcode::Bextend
| Opcode::Ireduce => {
let src_ty = ctx.input_ty(insn, 0);
let dst_ty = ctx.output_ty(insn, 0);
// Sextend requires a sign-extended move, but all the other opcodes are simply a move
// from a zero-extended source. Here is why this works, in each case:
//
// - Bint: Bool-to-int. We always represent a bool as a 0 or 1, so we merely need to
// zero-extend here.
//
// - Breduce, Bextend: changing width of a boolean. We represent a bool as a 0 or 1, so
// again, this is a zero-extend / no-op.
//
// - Ireduce: changing width of an integer. Smaller ints are stored with undefined
// high-order bits, so we can simply do a copy.
if src_ty == types::I32 && dst_ty == types::I64 && op != Opcode::Sextend {
// As a particular x64 extra-pattern matching opportunity, all the ALU opcodes on
// 32-bits will zero-extend the upper 32-bits, so we can even not generate a
// zero-extended move in this case.
// TODO add loads and shifts here.
if let Some(_) = matches_input_any(
ctx,
inputs[0],
&[
Opcode::Iadd,
Opcode::IaddIfcout,
Opcode::Isub,
Opcode::Imul,
Opcode::Band,
Opcode::Bor,
Opcode::Bxor,
],
) {
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::gen_move(dst, src, types::I64));
return Ok(());
}
}
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
let ext_mode = ExtMode::new(src_ty.bits(), dst_ty.bits());
assert_eq!(
src_ty.bits() < dst_ty.bits(),
ext_mode.is_some(),
"unexpected extension: {} -> {}",
src_ty,
dst_ty
);
if let Some(ext_mode) = ext_mode {
if op == Opcode::Sextend {
ctx.emit(Inst::movsx_rm_r(ext_mode, src, dst));
} else {
ctx.emit(Inst::movzx_rm_r(ext_mode, src, dst));
}
} else {
ctx.emit(Inst::mov64_rm_r(src, dst));
}
}
Opcode::Icmp => {
let condcode = ctx.data(insn).cond_code().unwrap();
let dst = get_output_reg(ctx, outputs[0]);
let ty = ctx.input_ty(insn, 0);
if !ty.is_vector() {
emit_cmp(ctx, insn);
let cc = CC::from_intcc(condcode);
ctx.emit(Inst::setcc(cc, dst));
} else {
assert_eq!(ty.bits(), 128);
let eq = |ty| match ty {
types::I8X16 => SseOpcode::Pcmpeqb,
types::I16X8 => SseOpcode::Pcmpeqw,
types::I32X4 => SseOpcode::Pcmpeqd,
types::I64X2 => SseOpcode::Pcmpeqq,
_ => panic!(
"Unable to find an instruction for {} for type: {}",
condcode, ty
),
};
let gt = |ty| match ty {
types::I8X16 => SseOpcode::Pcmpgtb,
types::I16X8 => SseOpcode::Pcmpgtw,
types::I32X4 => SseOpcode::Pcmpgtd,
types::I64X2 => SseOpcode::Pcmpgtq,
_ => panic!(
"Unable to find an instruction for {} for type: {}",
condcode, ty
),
};
let maxu = |ty| match ty {
types::I8X16 => SseOpcode::Pmaxub,
types::I16X8 => SseOpcode::Pmaxuw,
types::I32X4 => SseOpcode::Pmaxud,
_ => panic!(
"Unable to find an instruction for {} for type: {}",
condcode, ty
),
};
let mins = |ty| match ty {
types::I8X16 => SseOpcode::Pminsb,
types::I16X8 => SseOpcode::Pminsw,
types::I32X4 => SseOpcode::Pminsd,
_ => panic!(
"Unable to find an instruction for {} for type: {}",
condcode, ty
),
};
let minu = |ty| match ty {
types::I8X16 => SseOpcode::Pminub,
types::I16X8 => SseOpcode::Pminuw,
types::I32X4 => SseOpcode::Pminud,
_ => panic!(
"Unable to find an instruction for {} for type: {}",
condcode, ty
),
};
// Here we decide which operand to use as the read/write `dst` (ModRM reg field)
// and which to use as the read `input` (ModRM r/m field). In the normal case we
// use Cranelift's first operand, the `lhs`, as `dst` but we flip the operands for
// the less-than cases so that we can reuse the greater-than implementation.
let input = match condcode {
IntCC::SignedLessThan
| IntCC::SignedLessThanOrEqual
| IntCC::UnsignedLessThan
| IntCC::UnsignedLessThanOrEqual => {
let lhs = input_to_reg_mem(ctx, inputs[0]);
let rhs = put_input_in_reg(ctx, inputs[1]);
ctx.emit(Inst::gen_move(dst, rhs, ty));
lhs
}
_ => {
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg_mem(ctx, inputs[1]);
ctx.emit(Inst::gen_move(dst, lhs, ty));
rhs
}
};
match condcode {
IntCC::Equal => ctx.emit(Inst::xmm_rm_r(eq(ty), input, dst)),
IntCC::NotEqual => {
ctx.emit(Inst::xmm_rm_r(eq(ty), input, dst));
// Emit all 1s into the `tmp` register.
let tmp = ctx.alloc_tmp(RegClass::V128, ty);
ctx.emit(Inst::xmm_rm_r(eq(ty), RegMem::from(tmp), tmp));
// Invert the result of the `PCMPEQ*`.
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp), dst));
}
IntCC::SignedGreaterThan | IntCC::SignedLessThan => {
ctx.emit(Inst::xmm_rm_r(gt(ty), input, dst))
}
IntCC::SignedGreaterThanOrEqual | IntCC::SignedLessThanOrEqual => {
ctx.emit(Inst::xmm_rm_r(mins(ty), input.clone(), dst));
ctx.emit(Inst::xmm_rm_r(eq(ty), input, dst))
}
IntCC::UnsignedGreaterThan | IntCC::UnsignedLessThan => {
ctx.emit(Inst::xmm_rm_r(maxu(ty), input.clone(), dst));
ctx.emit(Inst::xmm_rm_r(eq(ty), input, dst));
// Emit all 1s into the `tmp` register.
let tmp = ctx.alloc_tmp(RegClass::V128, ty);
ctx.emit(Inst::xmm_rm_r(eq(ty), RegMem::from(tmp), tmp));
// Invert the result of the `PCMPEQ*`.
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp), dst));
}
IntCC::UnsignedGreaterThanOrEqual | IntCC::UnsignedLessThanOrEqual => {
ctx.emit(Inst::xmm_rm_r(minu(ty), input.clone(), dst));
ctx.emit(Inst::xmm_rm_r(eq(ty), input, dst))
}
_ => unimplemented!("Unimplemented comparison code for icmp: {}", condcode),
}
}
}
Opcode::Fcmp => {
let cond_code = ctx.data(insn).fp_cond_code().unwrap();
let input_ty = ctx.input_ty(insn, 0);
if !input_ty.is_vector() {
// Unordered is returned by setting ZF, PF, CF <- 111
// Greater than by ZF, PF, CF <- 000
// Less than by ZF, PF, CF <- 001
// Equal by ZF, PF, CF <- 100
//
// Checking the result of comiss is somewhat annoying because you don't have setcc
// instructions that explicitly check simultaneously for the condition (i.e. eq, le,
// gt, etc) *and* orderedness.
//
// So that might mean we need more than one setcc check and then a logical "and" or
// "or" to determine both, in some cases. However knowing that if the parity bit is
// set, then the result was considered unordered and knowing that if the parity bit is
// set, then both the ZF and CF flag bits must also be set we can get away with using
// one setcc for most condition codes.
let dst = get_output_reg(ctx, outputs[0]);
match emit_fcmp(ctx, insn, cond_code, FcmpSpec::Normal) {
FcmpCondResult::Condition(cc) => {
ctx.emit(Inst::setcc(cc, dst));
}
FcmpCondResult::AndConditions(cc1, cc2) => {
let tmp = ctx.alloc_tmp(RegClass::I64, types::I32);
ctx.emit(Inst::setcc(cc1, tmp));
ctx.emit(Inst::setcc(cc2, dst));
ctx.emit(Inst::alu_rmi_r(
false,
AluRmiROpcode::And,
RegMemImm::reg(tmp.to_reg()),
dst,
));
}
FcmpCondResult::OrConditions(cc1, cc2) => {
let tmp = ctx.alloc_tmp(RegClass::I64, types::I32);
ctx.emit(Inst::setcc(cc1, tmp));
ctx.emit(Inst::setcc(cc2, dst));
ctx.emit(Inst::alu_rmi_r(
false,
AluRmiROpcode::Or,
RegMemImm::reg(tmp.to_reg()),
dst,
));
}
FcmpCondResult::InvertedEqualOrConditions(_, _) => unreachable!(),
}
} else {
let op = match input_ty {
types::F32X4 => SseOpcode::Cmpps,
types::F64X2 => SseOpcode::Cmppd,
_ => panic!("Bad input type to fcmp: {}", input_ty),
};
// Since some packed comparisons are not available, some of the condition codes
// must be inverted, with a corresponding `flip` of the operands.
let (imm, flip) = match cond_code {
FloatCC::GreaterThan => (FcmpImm::LessThan, true),
FloatCC::GreaterThanOrEqual => (FcmpImm::LessThanOrEqual, true),
FloatCC::UnorderedOrLessThan => (FcmpImm::UnorderedOrGreaterThan, true),
FloatCC::UnorderedOrLessThanOrEqual => {
(FcmpImm::UnorderedOrGreaterThanOrEqual, true)
}
FloatCC::OrderedNotEqual | FloatCC::UnorderedOrEqual => {
panic!("unsupported float condition code: {}", cond_code)
}
_ => (FcmpImm::from(cond_code), false),
};
// Determine the operands of the comparison, possibly by flipping them.
let (lhs, rhs) = if flip {
(
put_input_in_reg(ctx, inputs[1]),
input_to_reg_mem(ctx, inputs[0]),
)
} else {
(
put_input_in_reg(ctx, inputs[0]),
input_to_reg_mem(ctx, inputs[1]),
)
};
// Move the `lhs` to the same register as `dst`; this may not emit an actual move
// but ensures that the registers are the same to match x86's read-write operand
// encoding.
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::gen_move(dst, lhs, input_ty));
// Emit the comparison.
ctx.emit(Inst::xmm_rm_r_imm(op, rhs, dst, imm.encode(), false));
}
}
Opcode::FallthroughReturn | Opcode::Return => {
for i in 0..ctx.num_inputs(insn) {
let src_reg = put_input_in_reg(ctx, inputs[i]);
let retval_reg = ctx.retval(i);
let ty = ctx.input_ty(insn, i);
ctx.emit(Inst::gen_move(retval_reg, src_reg, ty));
}
// N.B.: the Ret itself is generated by the ABI.
}
Opcode::Call | Opcode::CallIndirect => {
let caller_conv = ctx.abi().call_conv();
let (mut abi, inputs) = match op {
Opcode::Call => {
let (extname, dist) = ctx.call_target(insn).unwrap();
let sig = ctx.call_sig(insn).unwrap();
assert_eq!(inputs.len(), sig.params.len());
assert_eq!(outputs.len(), sig.returns.len());
(
X64ABICaller::from_func(sig, &extname, dist, caller_conv)?,
&inputs[..],
)
}
Opcode::CallIndirect => {
let ptr = put_input_in_reg(ctx, inputs[0]);
let sig = ctx.call_sig(insn).unwrap();
assert_eq!(inputs.len() - 1, sig.params.len());
assert_eq!(outputs.len(), sig.returns.len());
(
X64ABICaller::from_ptr(sig, ptr, op, caller_conv)?,
&inputs[1..],
)
}
_ => unreachable!(),
};
abi.emit_stack_pre_adjust(ctx);
assert_eq!(inputs.len(), abi.num_args());
for (i, input) in inputs.iter().enumerate() {
let arg_reg = put_input_in_reg(ctx, *input);
abi.emit_copy_reg_to_arg(ctx, i, arg_reg);
}
abi.emit_call(ctx);
for (i, output) in outputs.iter().enumerate() {
let retval_reg = get_output_reg(ctx, *output);
abi.emit_copy_retval_to_reg(ctx, i, retval_reg);
}
abi.emit_stack_post_adjust(ctx);
}
Opcode::Debugtrap => {
ctx.emit(Inst::Hlt);
}
Opcode::Trap | Opcode::ResumableTrap => {
let trap_code = ctx.data(insn).trap_code().unwrap();
ctx.emit_safepoint(Inst::Ud2 { trap_code });
}
Opcode::Trapif | Opcode::Trapff => {
let trap_code = ctx.data(insn).trap_code().unwrap();
if matches_input(ctx, inputs[0], Opcode::IaddIfcout).is_some() {
let cond_code = ctx.data(insn).cond_code().unwrap();
// The flags must not have been clobbered by any other instruction between the
// iadd_ifcout and this instruction, as verified by the CLIF validator; so we can
// simply use the flags here.
let cc = CC::from_intcc(cond_code);
ctx.emit_safepoint(Inst::TrapIf { trap_code, cc });
} else if op == Opcode::Trapif {
let cond_code = ctx.data(insn).cond_code().unwrap();
let cc = CC::from_intcc(cond_code);
// Verification ensures that the input is always a single-def ifcmp.
let ifcmp = matches_input(ctx, inputs[0], Opcode::Ifcmp).unwrap();
emit_cmp(ctx, ifcmp);
ctx.emit_safepoint(Inst::TrapIf { trap_code, cc });
} else {
let cond_code = ctx.data(insn).fp_cond_code().unwrap();
// Verification ensures that the input is always a single-def ffcmp.
let ffcmp = matches_input(ctx, inputs[0], Opcode::Ffcmp).unwrap();
match emit_fcmp(ctx, ffcmp, cond_code, FcmpSpec::Normal) {
FcmpCondResult::Condition(cc) => {
ctx.emit_safepoint(Inst::TrapIf { trap_code, cc })
}
FcmpCondResult::AndConditions(cc1, cc2) => {
// A bit unfortunate, but materialize the flags in their own register, and
// check against this.
let tmp = ctx.alloc_tmp(RegClass::I64, types::I32);
let tmp2 = ctx.alloc_tmp(RegClass::I64, types::I32);
ctx.emit(Inst::setcc(cc1, tmp));
ctx.emit(Inst::setcc(cc2, tmp2));
ctx.emit(Inst::alu_rmi_r(
false, /* is_64 */
AluRmiROpcode::And,
RegMemImm::reg(tmp.to_reg()),
tmp2,
));
ctx.emit_safepoint(Inst::TrapIf {
trap_code,
cc: CC::NZ,
});
}
FcmpCondResult::OrConditions(cc1, cc2) => {
ctx.emit_safepoint(Inst::TrapIf { trap_code, cc: cc1 });
ctx.emit_safepoint(Inst::TrapIf { trap_code, cc: cc2 });
}
FcmpCondResult::InvertedEqualOrConditions(_, _) => unreachable!(),
};
};
}
Opcode::F64const => {
// TODO use cmpeqpd for all 1s.
let value = ctx.get_constant(insn).unwrap();
let dst = get_output_reg(ctx, outputs[0]);
for inst in Inst::gen_constant(dst, value, types::F64, |reg_class, ty| {
ctx.alloc_tmp(reg_class, ty)
}) {
ctx.emit(inst);
}
}
Opcode::F32const => {
// TODO use cmpeqps for all 1s.
let value = ctx.get_constant(insn).unwrap();
let dst = get_output_reg(ctx, outputs[0]);
for inst in Inst::gen_constant(dst, value, types::F32, |reg_class, ty| {
ctx.alloc_tmp(reg_class, ty)
}) {
ctx.emit(inst);
}
}
Opcode::Fadd | Opcode::Fsub | Opcode::Fmul | Opcode::Fdiv => {
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg_mem(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]);
let ty = ty.unwrap();
// Move the `lhs` to the same register as `dst`; this may not emit an actual move
// but ensures that the registers are the same to match x86's read-write operand
// encoding.
ctx.emit(Inst::gen_move(dst, lhs, ty));
// Note: min and max can't be handled here, because of the way Cranelift defines them:
// if any operand is a NaN, they must return the NaN operand, while the x86 machine
// instruction will return the second operand if either operand is a NaN.
let sse_op = match ty {
types::F32 => match op {
Opcode::Fadd => SseOpcode::Addss,
Opcode::Fsub => SseOpcode::Subss,
Opcode::Fmul => SseOpcode::Mulss,
Opcode::Fdiv => SseOpcode::Divss,
_ => unreachable!(),
},
types::F64 => match op {
Opcode::Fadd => SseOpcode::Addsd,
Opcode::Fsub => SseOpcode::Subsd,
Opcode::Fmul => SseOpcode::Mulsd,
Opcode::Fdiv => SseOpcode::Divsd,
_ => unreachable!(),
},
types::F32X4 => match op {
Opcode::Fadd => SseOpcode::Addps,
Opcode::Fsub => SseOpcode::Subps,
Opcode::Fmul => SseOpcode::Mulps,
Opcode::Fdiv => SseOpcode::Divps,
_ => unreachable!(),
},
types::F64X2 => match op {
Opcode::Fadd => SseOpcode::Addpd,
Opcode::Fsub => SseOpcode::Subpd,
Opcode::Fmul => SseOpcode::Mulpd,
Opcode::Fdiv => SseOpcode::Divpd,
_ => unreachable!(),
},
_ => panic!(
"invalid type: expected one of [F32, F64, F32X4, F64X2], found {}",
ty
),
};
ctx.emit(Inst::xmm_rm_r(sse_op, rhs, dst));
}
Opcode::Fmin | Opcode::Fmax => {
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = put_input_in_reg(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]);
let is_min = op == Opcode::Fmin;
let output_ty = ty.unwrap();
ctx.emit(Inst::gen_move(dst, rhs, output_ty));
if !output_ty.is_vector() {
let op_size = match output_ty {
types::F32 => OperandSize::Size32,
types::F64 => OperandSize::Size64,
_ => panic!("unexpected type {:?} for fmin/fmax", output_ty),
};
ctx.emit(Inst::xmm_min_max_seq(op_size, is_min, lhs, dst));
} else {
// X64's implementation of floating point min and floating point max does not
// propagate NaNs and +0's in a way that is friendly to the SIMD spec. For the
// scalar approach we use jumps to handle cases where NaN and +0 propagation is
// not consistent with what is needed. However for packed floating point min and
// floating point max we implement a different approach to avoid the sequence
// of jumps that would be required on a per lane basis. Because we do not need to
// lower labels and jumps but do need ctx for creating temporaries we implement
// the lowering here in lower.rs instead of emit.rs as is done in the case for scalars.
// The outline of approach is as follows:
//
// First we preform the Min/Max in both directions. This is because in the
// case of an operand's lane containing a NaN or in the case of the lanes of the
// two operands containing 0 but with mismatched signs, x64 will return the second
// operand regardless of its contents. So in order to make sure we capture NaNs and
// normalize NaNs and 0 values we capture the operation in both directions and merge the
// results. Then we normalize the results through operations that create a mask for the
// lanes containing NaNs, we use that mask to adjust NaNs to quite NaNs and normalize
// 0s.
//
// The following sequence is generated for min:
//
// movap{s,d} %lhs, %tmp
// minp{s,d} %dst, %tmp
// minp,{s,d} %lhs, %dst
// orp{s,d} %dst, %tmp
// cmpp{s,d} %tmp, %dst, $3
// orps{s,d} %dst, %tmp
// psrl{s,d} {$10, $13}, %dst
// andnp{s,d} %tmp, %dst
//
// and for max the sequence is:
//
// movap{s,d} %lhs, %tmp
// minp{s,d} %dst, %tmp
// minp,{s,d} %lhs, %dst
// xorp{s,d} %tmp, %dst
// orp{s,d} %dst, %tmp
// subp{s,d} %dst, %tmp
// cmpp{s,d} %tmp, %dst, $3
// psrl{s,d} {$10, $13}, %dst
// andnp{s,d} %tmp, %dst
if is_min {
let (mov_op, min_op, or_op, cmp_op, shift_op, shift_by, andn_op) =
match output_ty {
types::F32X4 => (
SseOpcode::Movaps,
SseOpcode::Minps,
SseOpcode::Orps,
SseOpcode::Cmpps,
SseOpcode::Psrld,
10,
SseOpcode::Andnps,
),
types::F64X2 => (
SseOpcode::Movapd,
SseOpcode::Minpd,
SseOpcode::Orpd,
SseOpcode::Cmppd,
SseOpcode::Psrlq,
13,
SseOpcode::Andnpd,
),
_ => unimplemented!("unsupported op type {:?}", output_ty),
};
// Copy lhs into tmp
let tmp_xmm1 = ctx.alloc_tmp(RegClass::V128, output_ty);
ctx.emit(Inst::xmm_mov(mov_op, RegMem::reg(lhs), tmp_xmm1));
// Perform min in reverse direction
ctx.emit(Inst::xmm_rm_r(min_op, RegMem::from(dst), tmp_xmm1));
// Perform min in original direction
ctx.emit(Inst::xmm_rm_r(min_op, RegMem::reg(lhs), dst));
// X64 handles propagation of -0's and Nans differently between left and right
// operands. After doing the min in both directions, this OR will
// guarrentee capture of -0's and Nan in our tmp register
ctx.emit(Inst::xmm_rm_r(or_op, RegMem::from(dst), tmp_xmm1));
// Compare unordered to create mask for lanes containing NaNs and then use
// that mask to saturate the NaN containing lanes in the tmp register with 1s.
// TODO: Would a check for NaN and then a jump be better here in the
// common case than continuing on to normalize NaNs that might not exist?
let cond = FcmpImm::from(FloatCC::Unordered);
ctx.emit(Inst::xmm_rm_r_imm(
cmp_op,
RegMem::reg(tmp_xmm1.to_reg()),
dst,
cond.encode(),
false,
));
ctx.emit(Inst::xmm_rm_r(or_op, RegMem::reg(dst.to_reg()), tmp_xmm1));
// The dst register holds a mask for lanes containing NaNs.
// We take that mask and shift in preparation for creating a different mask
// to normalize NaNs (create a quite NaN) by zeroing out the appropriate
// number of least signficant bits. We shift right each lane by 10 bits
// (1 sign + 8 exp. + 1 MSB sig.) for F32X4 and by 13 bits (1 sign +
// 11 exp. + 1 MSB sig.) for F64X2.
ctx.emit(Inst::xmm_rmi_reg(shift_op, RegMemImm::imm(shift_by), dst));
// Finally we do a nand with the tmp register to produce the final results
// in the dst.
ctx.emit(Inst::xmm_rm_r(andn_op, RegMem::reg(tmp_xmm1.to_reg()), dst));
} else {
let (
mov_op,
max_op,
xor_op,
or_op,
sub_op,
cmp_op,
shift_op,
shift_by,
andn_op,
) = match output_ty {
types::F32X4 => (
SseOpcode::Movaps,
SseOpcode::Maxps,
SseOpcode::Xorps,
SseOpcode::Orps,
SseOpcode::Subps,
SseOpcode::Cmpps,
SseOpcode::Psrld,
10,
SseOpcode::Andnps,
),
types::F64X2 => (
SseOpcode::Movapd,
SseOpcode::Maxpd,
SseOpcode::Xorpd,
SseOpcode::Orpd,
SseOpcode::Subpd,
SseOpcode::Cmppd,
SseOpcode::Psrlq,
13,
SseOpcode::Andnpd,
),
_ => unimplemented!("unsupported op type {:?}", output_ty),
};
// Copy lhs into tmp.
let tmp_xmm1 = ctx.alloc_tmp(RegClass::V128, types::F32);
ctx.emit(Inst::xmm_mov(mov_op, RegMem::reg(lhs), tmp_xmm1));
// Perform max in reverse direction.
ctx.emit(Inst::xmm_rm_r(max_op, RegMem::reg(dst.to_reg()), tmp_xmm1));
// Perform max in original direction.
ctx.emit(Inst::xmm_rm_r(max_op, RegMem::reg(lhs), dst));
// Get the difference between the two results and store in tmp.
// Max uses a different approach than min to account for potential
// discrepancies with plus/minus 0.
ctx.emit(Inst::xmm_rm_r(xor_op, RegMem::reg(tmp_xmm1.to_reg()), dst));
// X64 handles propagation of -0's and Nans differently between left and right
// operands. After doing the max in both directions, this OR will
// guarentee capture of 0's and Nan in our tmp register.
ctx.emit(Inst::xmm_rm_r(or_op, RegMem::reg(dst.to_reg()), tmp_xmm1));
// Capture NaNs and sign discrepancies.
ctx.emit(Inst::xmm_rm_r(sub_op, RegMem::reg(dst.to_reg()), tmp_xmm1));
// Compare unordered to create mask for lanes containing NaNs and then use
// that mask to saturate the NaN containing lanes in the tmp register with 1s.
let cond = FcmpImm::from(FloatCC::Unordered);
ctx.emit(Inst::xmm_rm_r_imm(
cmp_op,
RegMem::reg(tmp_xmm1.to_reg()),
dst,
cond.encode(),
false,
));
// The dst register holds a mask for lanes containing NaNs.
// We take that mask and shift in preparation for creating a different mask
// to normalize NaNs (create a quite NaN) by zeroing out the appropriate
// number of least signficant bits. We shift right each lane by 10 bits
// (1 sign + 8 exp. + 1 MSB sig.) for F32X4 and by 13 bits (1 sign +
// 11 exp. + 1 MSB sig.) for F64X2.
ctx.emit(Inst::xmm_rmi_reg(shift_op, RegMemImm::imm(shift_by), dst));
// Finally we do a nand with the tmp register to produce the final results
// in the dst.
ctx.emit(Inst::xmm_rm_r(andn_op, RegMem::reg(tmp_xmm1.to_reg()), dst));
}
}
}
Opcode::FminPseudo | Opcode::FmaxPseudo => {
let lhs = input_to_reg_mem(ctx, inputs[0]);
let rhs = put_input_in_reg(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]);
let ty = ty.unwrap();
ctx.emit(Inst::gen_move(dst, rhs, ty));
let sse_opcode = match (ty, op) {
(types::F32X4, Opcode::FminPseudo) => SseOpcode::Minps,
(types::F32X4, Opcode::FmaxPseudo) => SseOpcode::Maxps,
(types::F64X2, Opcode::FminPseudo) => SseOpcode::Minpd,
(types::F64X2, Opcode::FmaxPseudo) => SseOpcode::Maxpd,
_ => unimplemented!("unsupported type {} for {}", ty, op),
};
ctx.emit(Inst::xmm_rm_r(sse_opcode, lhs, dst));
}
Opcode::Sqrt => {
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
let ty = ty.unwrap();
let sse_op = match ty {
types::F32 => SseOpcode::Sqrtss,
types::F64 => SseOpcode::Sqrtsd,
types::F32X4 => SseOpcode::Sqrtps,
types::F64X2 => SseOpcode::Sqrtpd,
_ => panic!(
"invalid type: expected one of [F32, F64, F32X4, F64X2], found {}",
ty
),
};
ctx.emit(Inst::xmm_unary_rm_r(sse_op, src, dst));
}
Opcode::Fpromote => {
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::xmm_unary_rm_r(SseOpcode::Cvtss2sd, src, dst));
}
Opcode::Fdemote => {
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::xmm_unary_rm_r(SseOpcode::Cvtsd2ss, src, dst));
}
Opcode::FcvtFromSint => {
let output_ty = ty.unwrap();
if !output_ty.is_vector() {
let (ext_spec, src_size) = match ctx.input_ty(insn, 0) {
types::I8 | types::I16 => (Some(ExtSpec::SignExtendTo32), OperandSize::Size32),
types::I32 => (None, OperandSize::Size32),
types::I64 => (None, OperandSize::Size64),
_ => unreachable!(),
};
let src = match ext_spec {
Some(ext_spec) => RegMem::reg(extend_input_to_reg(ctx, inputs[0], ext_spec)),
None => input_to_reg_mem(ctx, inputs[0]),
};
let opcode = if output_ty == types::F32 {
SseOpcode::Cvtsi2ss
} else {
assert_eq!(output_ty, types::F64);
SseOpcode::Cvtsi2sd
};
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::gpr_to_xmm(opcode, src, src_size, dst));
} else {
let ty = ty.unwrap();
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
let opcode = match ctx.input_ty(insn, 0) {
types::I32X4 => SseOpcode::Cvtdq2ps,
_ => {
unimplemented!("unable to use type {} for op {}", ctx.input_ty(insn, 0), op)
}
};
ctx.emit(Inst::gen_move(dst, src, ty));
ctx.emit(Inst::xmm_rm_r(opcode, RegMem::from(dst), dst));
}
}
Opcode::FcvtFromUint => {
let dst = get_output_reg(ctx, outputs[0]);
let ty = ty.unwrap();
let input_ty = ctx.input_ty(insn, 0);
if !ty.is_vector() {
match input_ty {
types::I8 | types::I16 | types::I32 => {
// Conversion from an unsigned int smaller than 64-bit is easy: zero-extend +
// do a signed conversion (which won't overflow).
let opcode = if ty == types::F32 {
SseOpcode::Cvtsi2ss
} else {
assert_eq!(ty, types::F64);
SseOpcode::Cvtsi2sd
};
let src = RegMem::reg(extend_input_to_reg(
ctx,
inputs[0],
ExtSpec::ZeroExtendTo64,
));
ctx.emit(Inst::gpr_to_xmm(opcode, src, OperandSize::Size64, dst));
}
types::I64 => {
let src = put_input_in_reg(ctx, inputs[0]);
let src_copy = ctx.alloc_tmp(RegClass::I64, types::I64);
ctx.emit(Inst::gen_move(src_copy, src, types::I64));
let tmp_gpr1 = ctx.alloc_tmp(RegClass::I64, types::I64);
let tmp_gpr2 = ctx.alloc_tmp(RegClass::I64, types::I64);
ctx.emit(Inst::cvt_u64_to_float_seq(
ty == types::F64,
src_copy,
tmp_gpr1,
tmp_gpr2,
dst,
));
}
_ => panic!("unexpected input type for FcvtFromUint: {:?}", input_ty),
};
} else {
// Converting packed unsigned integers to packed floats requires a few steps.
// There is no single instruction lowering for converting unsigned floats but there
// is for converting packed signed integers to float (cvtdq2ps). In the steps below
// we isolate the upper half (16 bits) and lower half (16 bits) of each lane and
// then we convert each half separately using cvtdq2ps meant for signed integers.
// In order for this to work for the upper half bits we must shift right by 1
// (divide by 2) these bits in order to ensure the most significant bit is 0 not
// signed, and then after the conversion we double the value. Finally we add the
// converted values where addition will correctly round.
//
// Sequence:
// -> A = 0xffffffff
// -> Ah = 0xffff0000
// -> Al = 0x0000ffff
// -> Convert(Al) // Convert int to float
// -> Ah = Ah >> 1 // Shift right 1 to assure Ah conversion isn't treated as signed
// -> Convert(Ah) // Convert .. with no loss of significant digits from previous shift
// -> Ah = Ah + Ah // Double Ah to account for shift right before the conversion.
// -> dst = Ah + Al // Add the two floats together
assert_eq!(ctx.input_ty(insn, 0), types::I32X4);
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
// Create a temporary register
let tmp = ctx.alloc_tmp(RegClass::V128, types::I32X4);
ctx.emit(Inst::xmm_unary_rm_r(
SseOpcode::Movapd,
RegMem::reg(src),
tmp,
));
ctx.emit(Inst::gen_move(dst, src, ty));
// Get the low 16 bits
ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Pslld, RegMemImm::imm(16), tmp));
ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Psrld, RegMemImm::imm(16), tmp));
// Get the high 16 bits
ctx.emit(Inst::xmm_rm_r(SseOpcode::Psubd, RegMem::from(tmp), dst));
// Convert the low 16 bits
ctx.emit(Inst::xmm_rm_r(SseOpcode::Cvtdq2ps, RegMem::from(tmp), tmp));
// Shift the high bits by 1, convert, and double to get the correct value.
ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Psrld, RegMemImm::imm(1), dst));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Cvtdq2ps, RegMem::from(dst), dst));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Addps,
RegMem::reg(dst.to_reg()),
dst,
));
// Add together the two converted values.
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Addps,
RegMem::reg(tmp.to_reg()),
dst,
));
}
}
Opcode::FcvtToUint | Opcode::FcvtToUintSat | Opcode::FcvtToSint | Opcode::FcvtToSintSat => {
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
let input_ty = ctx.input_ty(insn, 0);
if !input_ty.is_vector() {
let src_size = if input_ty == types::F32 {
OperandSize::Size32
} else {
assert_eq!(input_ty, types::F64);
OperandSize::Size64
};
let output_ty = ty.unwrap();
let dst_size = if output_ty == types::I32 {
OperandSize::Size32
} else {
assert_eq!(output_ty, types::I64);
OperandSize::Size64
};
let to_signed = op == Opcode::FcvtToSint || op == Opcode::FcvtToSintSat;
let is_sat = op == Opcode::FcvtToUintSat || op == Opcode::FcvtToSintSat;
let src_copy = ctx.alloc_tmp(RegClass::V128, input_ty);
ctx.emit(Inst::gen_move(src_copy, src, input_ty));
let tmp_xmm = ctx.alloc_tmp(RegClass::V128, input_ty);
let tmp_gpr = ctx.alloc_tmp(RegClass::I64, output_ty);
if to_signed {
ctx.emit(Inst::cvt_float_to_sint_seq(
src_size, dst_size, is_sat, src_copy, dst, tmp_gpr, tmp_xmm,
));
} else {
ctx.emit(Inst::cvt_float_to_uint_seq(
src_size, dst_size, is_sat, src_copy, dst, tmp_gpr, tmp_xmm,
));
}
} else {
if op == Opcode::FcvtToSintSat {
// Sets destination to zero if float is NaN
assert_eq!(types::F32X4, ctx.input_ty(insn, 0));
let tmp = ctx.alloc_tmp(RegClass::V128, types::I32X4);
ctx.emit(Inst::xmm_unary_rm_r(
SseOpcode::Movapd,
RegMem::reg(src),
tmp,
));
ctx.emit(Inst::gen_move(dst, src, input_ty));
let cond = FcmpImm::from(FloatCC::Equal);
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Cmpps,
RegMem::reg(tmp.to_reg()),
tmp,
cond.encode(),
false,
));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Andps,
RegMem::reg(tmp.to_reg()),
dst,
));
// Sets top bit of tmp if float is positive
// Setting up to set top bit on negative float values
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pxor,
RegMem::reg(dst.to_reg()),
tmp,
));
// Convert the packed float to packed doubleword.
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Cvttps2dq,
RegMem::reg(dst.to_reg()),
dst,
));
// Set top bit only if < 0
// Saturate lane with sign (top) bit.
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pand,
RegMem::reg(dst.to_reg()),
tmp,
));
ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Psrad, RegMemImm::imm(31), tmp));
// On overflow 0x80000000 is returned to a lane.
// Below sets positive overflow lanes to 0x7FFFFFFF
// Keeps negative overflow lanes as is.
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pxor,
RegMem::reg(tmp.to_reg()),
dst,
));
} else if op == Opcode::FcvtToUintSat {
// The algorithm for converting floats to unsigned ints is a little tricky. The
// complication arises because we are converting from a signed 64-bit int with a positive
// integer range from 1..INT_MAX (0x1..0x7FFFFFFF) to an unsigned integer with an extended
// range from (INT_MAX+1)..UINT_MAX. It's this range from (INT_MAX+1)..UINT_MAX
// (0x80000000..0xFFFFFFFF) that needs to be accounted for as a special case since our
// conversion instruction (cvttps2dq) only converts as high as INT_MAX (0x7FFFFFFF), but
// which conveniently setting underflows and overflows (smaller than MIN_INT or larger than
// MAX_INT) to be INT_MAX+1 (0x80000000). Nothing that the range (INT_MAX+1)..UINT_MAX includes
// precisely INT_MAX values we can correctly account for and convert every value in this range
// if we simply subtract INT_MAX+1 before doing the cvttps2dq conversion. After the subtraction
// every value originally (INT_MAX+1)..UINT_MAX is now the range (0..INT_MAX).
// After the conversion we add INT_MAX+1 back to this converted value, noting again that
// values we are trying to account for were already set to INT_MAX+1 during the original conversion.
// We simply have to create a mask and make sure we are adding together only the lanes that need
// to be accounted for. Digesting it all the steps then are:
//
// Step 1 - Account for NaN and negative floats by setting these src values to zero.
// Step 2 - Make a copy (tmp1) of the src value since we need to convert twice for
// reasons described above.
// Step 3 - Convert the original src values. This will convert properly all floats up to INT_MAX
// Step 4 - Subtract INT_MAX from the copy set (tmp1). Note, all zero and negative values are those
// values that were originally in the range (0..INT_MAX). This will come in handy during
// step 7 when we zero negative lanes.
// Step 5 - Create a bit mask for tmp1 that will correspond to all lanes originally less than
// UINT_MAX that are now less than INT_MAX thanks to the subtraction.
// Step 6 - Convert the second set of values (tmp1)
// Step 7 - Prep the converted second set by zeroing out negative lanes (these have already been
// converted correctly with the first set) and by setting overflow lanes to 0x7FFFFFFF
// as this will allow us to properly saturate overflow lanes when adding to 0x80000000
// Step 8 - Add the orginal converted src and the converted tmp1 where float values originally less
// than and equal to INT_MAX will be unchanged, float values originally between INT_MAX+1 and
// UINT_MAX will add together (INT_MAX) + (SRC - INT_MAX), and float values originally
// greater than UINT_MAX will be saturated to UINT_MAX (0xFFFFFFFF) after adding (0x8000000 + 0x7FFFFFFF).
//
//
// The table below illustrates the result after each step where it matters for the converted set.
// Note the original value range (original src set) is the final dst in Step 8:
//
// Original src set:
// | Original Value Range | Step 1 | Step 3 | Step 8 |
// | -FLT_MIN..FLT_MAX | 0.0..FLT_MAX | 0..INT_MAX(w/overflow) | 0..UINT_MAX(w/saturation) |
//
// Copied src set (tmp1):
// | Step 2 | Step 4 |
// | 0.0..FLT_MAX | (0.0-(INT_MAX+1))..(FLT_MAX-(INT_MAX+1)) |
//
// | Step 6 | Step 7 |
// | (0-(INT_MAX+1))..(UINT_MAX-(INT_MAX+1))(w/overflow) | ((INT_MAX+1)-(INT_MAX+1))..(INT_MAX+1) |
// Create temporaries
assert_eq!(types::F32X4, ctx.input_ty(insn, 0));
let tmp1 = ctx.alloc_tmp(RegClass::V128, types::I32X4);
let tmp2 = ctx.alloc_tmp(RegClass::V128, types::I32X4);
// Converting to unsigned int so if float src is negative or NaN
// will first set to zero.
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp2), tmp2));
ctx.emit(Inst::gen_move(dst, src, input_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Maxps, RegMem::from(tmp2), dst));
// Set tmp2 to INT_MAX+1. It is important to note here that after it looks
// like we are only converting INT_MAX (0x7FFFFFFF) but in fact because
// single precision IEEE-754 floats can only accurately represent contingous
// integers up to 2^23 and outside of this range it rounds to the closest
// integer that it can represent. In the case of INT_MAX, this value gets
// represented as 0x4f000000 which is the integer value (INT_MAX+1).
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pcmpeqd, RegMem::from(tmp2), tmp2));
ctx.emit(Inst::xmm_rmi_reg(SseOpcode::Psrld, RegMemImm::imm(1), tmp2));
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Cvtdq2ps,
RegMem::from(tmp2),
tmp2,
));
// Make a copy of these lanes and then do the first conversion.
// Overflow lanes greater than the maximum allowed signed value will
// set to 0x80000000. Negative and NaN lanes will be 0x0
ctx.emit(Inst::xmm_mov(SseOpcode::Movaps, RegMem::from(dst), tmp1));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Cvttps2dq, RegMem::from(dst), dst));
// Set lanes to src - max_signed_int
ctx.emit(Inst::xmm_rm_r(SseOpcode::Subps, RegMem::from(tmp2), tmp1));
// Create mask for all positive lanes to saturate (i.e. greater than
// or equal to the maxmimum allowable unsigned int).
let cond = FcmpImm::from(FloatCC::LessThanOrEqual);
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Cmpps,
RegMem::from(tmp1),
tmp2,
cond.encode(),
false,
));
// Convert those set of lanes that have the max_signed_int factored out.
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Cvttps2dq,
RegMem::from(tmp1),
tmp1,
));
// Prepare converted lanes by zeroing negative lanes and prepping lanes
// that have positive overflow (based on the mask) by setting these lanes
// to 0x7FFFFFFF
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp2), tmp1));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp2), tmp2));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmaxsd, RegMem::from(tmp2), tmp1));
// Add this second set of converted lanes to the original to properly handle
// values greater than max signed int.
ctx.emit(Inst::xmm_rm_r(SseOpcode::Paddd, RegMem::from(tmp1), dst));
} else {
// Since this branch is also guarded by a check for vector types
// neither Opcode::FcvtToUint nor Opcode::FcvtToSint can reach here
// due to vector varients not existing. The first two branches will
// cover all reachable cases.
unreachable!();
}
}
}
Opcode::UwidenHigh | Opcode::UwidenLow | Opcode::SwidenHigh | Opcode::SwidenLow => {
let input_ty = ctx.input_ty(insn, 0);
let output_ty = ctx.output_ty(insn, 0);
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
if output_ty.is_vector() {
match op {
Opcode::SwidenLow => match (input_ty, output_ty) {
(types::I8X16, types::I16X8) => {
ctx.emit(Inst::gen_move(dst, src, output_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmovsxbw, RegMem::from(dst), dst));
}
(types::I16X8, types::I32X4) => {
ctx.emit(Inst::gen_move(dst, src, output_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmovsxwd, RegMem::from(dst), dst));
}
_ => unreachable!(),
},
Opcode::SwidenHigh => match (input_ty, output_ty) {
(types::I8X16, types::I16X8) => {
ctx.emit(Inst::gen_move(dst, src, output_ty));
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Palignr,
RegMem::reg(src),
dst,
8,
false,
));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmovsxbw, RegMem::from(dst), dst));
}
(types::I16X8, types::I32X4) => {
ctx.emit(Inst::gen_move(dst, src, output_ty));
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Palignr,
RegMem::reg(src),
dst,
8,
false,
));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmovsxwd, RegMem::from(dst), dst));
}
_ => unreachable!(),
},
Opcode::UwidenLow => match (input_ty, output_ty) {
(types::I8X16, types::I16X8) => {
ctx.emit(Inst::gen_move(dst, src, output_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmovzxbw, RegMem::from(dst), dst));
}
(types::I16X8, types::I32X4) => {
ctx.emit(Inst::gen_move(dst, src, output_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmovzxwd, RegMem::from(dst), dst));
}
_ => unreachable!(),
},
Opcode::UwidenHigh => match (input_ty, output_ty) {
(types::I8X16, types::I16X8) => {
ctx.emit(Inst::gen_move(dst, src, output_ty));
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Palignr,
RegMem::reg(src),
dst,
8,
false,
));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmovzxbw, RegMem::from(dst), dst));
}
(types::I16X8, types::I32X4) => {
ctx.emit(Inst::gen_move(dst, src, output_ty));
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Palignr,
RegMem::reg(src),
dst,
8,
false,
));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pmovzxwd, RegMem::from(dst), dst));
}
_ => unreachable!(),
},
_ => unreachable!(),
}
} else {
panic!("Unsupported non-vector type for widen instruction {:?}", ty);
}
}
Opcode::Snarrow | Opcode::Unarrow => {
let input_ty = ctx.input_ty(insn, 0);
let output_ty = ctx.output_ty(insn, 0);
let src1 = put_input_in_reg(ctx, inputs[0]);
let src2 = put_input_in_reg(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]);
if output_ty.is_vector() {
match op {
Opcode::Snarrow => match (input_ty, output_ty) {
(types::I16X8, types::I8X16) => {
ctx.emit(Inst::gen_move(dst, src1, input_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Packsswb, RegMem::reg(src2), dst));
}
(types::I32X4, types::I16X8) => {
ctx.emit(Inst::gen_move(dst, src1, input_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Packssdw, RegMem::reg(src2), dst));
}
_ => unreachable!(),
},
Opcode::Unarrow => match (input_ty, output_ty) {
(types::I16X8, types::I8X16) => {
ctx.emit(Inst::gen_move(dst, src1, input_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Packuswb, RegMem::reg(src2), dst));
}
(types::I32X4, types::I16X8) => {
ctx.emit(Inst::gen_move(dst, src1, input_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Packusdw, RegMem::reg(src2), dst));
}
_ => unreachable!(),
},
_ => unreachable!(),
}
} else {
panic!("Unsupported non-vector type for widen instruction {:?}", ty);
}
}
Opcode::Bitcast => {
let input_ty = ctx.input_ty(insn, 0);
let output_ty = ctx.output_ty(insn, 0);
match (input_ty, output_ty) {
(types::F32, types::I32) => {
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::xmm_to_gpr(
SseOpcode::Movd,
src,
dst,
OperandSize::Size32,
));
}
(types::I32, types::F32) => {
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::gpr_to_xmm(
SseOpcode::Movd,
src,
OperandSize::Size32,
dst,
));
}
(types::F64, types::I64) => {
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::xmm_to_gpr(
SseOpcode::Movq,
src,
dst,
OperandSize::Size64,
));
}
(types::I64, types::F64) => {
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::gpr_to_xmm(
SseOpcode::Movq,
src,
OperandSize::Size64,
dst,
));
}
_ => unreachable!("invalid bitcast from {:?} to {:?}", input_ty, output_ty),
}
}
Opcode::Fabs | Opcode::Fneg => {
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
// In both cases, generate a constant and apply a single binary instruction:
// - to compute the absolute value, set all bits to 1 but the MSB to 0, and bit-AND the
// src with it.
// - to compute the negated value, set all bits to 0 but the MSB to 1, and bit-XOR the
// src with it.
let output_ty = ty.unwrap();
if !output_ty.is_vector() {
let (val, opcode) = match output_ty {
types::F32 => match op {
Opcode::Fabs => (0x7fffffff, SseOpcode::Andps),
Opcode::Fneg => (0x80000000, SseOpcode::Xorps),
_ => unreachable!(),
},
types::F64 => match op {
Opcode::Fabs => (0x7fffffffffffffff, SseOpcode::Andpd),
Opcode::Fneg => (0x8000000000000000, SseOpcode::Xorpd),
_ => unreachable!(),
},
_ => panic!("unexpected type {:?} for Fabs", output_ty),
};
for inst in Inst::gen_constant(dst, val, output_ty, |reg_class, ty| {
ctx.alloc_tmp(reg_class, ty)
}) {
ctx.emit(inst);
}
ctx.emit(Inst::xmm_rm_r(opcode, src, dst));
} else {
// Eventually vector constants should be available in `gen_constant` and this block
// can be merged with the one above (TODO).
if output_ty.bits() == 128 {
// Move the `lhs` to the same register as `dst`; this may not emit an actual move
// but ensures that the registers are the same to match x86's read-write operand
// encoding.
let src = put_input_in_reg(ctx, inputs[0]);
ctx.emit(Inst::gen_move(dst, src, output_ty));
// Generate an all 1s constant in an XMM register. This uses CMPPS but could
// have used CMPPD with the same effect.
let tmp = ctx.alloc_tmp(RegClass::V128, output_ty);
let cond = FcmpImm::from(FloatCC::Equal);
let cmpps = Inst::xmm_rm_r_imm(
SseOpcode::Cmpps,
RegMem::reg(tmp.to_reg()),
tmp,
cond.encode(),
false,
);
ctx.emit(cmpps);
// Shift the all 1s constant to generate the mask.
let lane_bits = output_ty.lane_bits();
let (shift_opcode, opcode, shift_by) = match (op, lane_bits) {
(Opcode::Fabs, 32) => (SseOpcode::Psrld, SseOpcode::Andps, 1),
(Opcode::Fabs, 64) => (SseOpcode::Psrlq, SseOpcode::Andpd, 1),
(Opcode::Fneg, 32) => (SseOpcode::Pslld, SseOpcode::Xorps, 31),
(Opcode::Fneg, 64) => (SseOpcode::Psllq, SseOpcode::Xorpd, 63),
_ => unreachable!(
"unexpected opcode and lane size: {:?}, {} bits",
op, lane_bits
),
};
let shift = Inst::xmm_rmi_reg(shift_opcode, RegMemImm::imm(shift_by), tmp);
ctx.emit(shift);
// Apply shifted mask (XOR or AND).
let mask = Inst::xmm_rm_r(opcode, RegMem::reg(tmp.to_reg()), dst);
ctx.emit(mask);
} else {
panic!("unexpected type {:?} for Fabs", output_ty);
}
}
}
Opcode::Fcopysign => {
let dst = get_output_reg(ctx, outputs[0]);
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = put_input_in_reg(ctx, inputs[1]);
let ty = ty.unwrap();
// We're going to generate the following sequence:
//
// movabs $INT_MIN, tmp_gpr1
// mov{d,q} tmp_gpr1, tmp_xmm1
// movap{s,d} tmp_xmm1, dst
// andnp{s,d} src_1, dst
// movap{s,d} src_2, tmp_xmm2
// andp{s,d} tmp_xmm1, tmp_xmm2
// orp{s,d} tmp_xmm2, dst
let tmp_xmm1 = ctx.alloc_tmp(RegClass::V128, types::F32);
let tmp_xmm2 = ctx.alloc_tmp(RegClass::V128, types::F32);
let (sign_bit_cst, mov_op, and_not_op, and_op, or_op) = match ty {
types::F32 => (
0x8000_0000,
SseOpcode::Movaps,
SseOpcode::Andnps,
SseOpcode::Andps,
SseOpcode::Orps,
),
types::F64 => (
0x8000_0000_0000_0000,
SseOpcode::Movapd,
SseOpcode::Andnpd,
SseOpcode::Andpd,
SseOpcode::Orpd,
),
_ => {
panic!("unexpected type {:?} for copysign", ty);
}
};
for inst in Inst::gen_constant(tmp_xmm1, sign_bit_cst, ty, |reg_class, ty| {
ctx.alloc_tmp(reg_class, ty)
}) {
ctx.emit(inst);
}
ctx.emit(Inst::xmm_mov(mov_op, RegMem::reg(tmp_xmm1.to_reg()), dst));
ctx.emit(Inst::xmm_rm_r(and_not_op, RegMem::reg(lhs), dst));
ctx.emit(Inst::xmm_mov(mov_op, RegMem::reg(rhs), tmp_xmm2));
ctx.emit(Inst::xmm_rm_r(
and_op,
RegMem::reg(tmp_xmm1.to_reg()),
tmp_xmm2,
));
ctx.emit(Inst::xmm_rm_r(or_op, RegMem::reg(tmp_xmm2.to_reg()), dst));
}
Opcode::Ceil | Opcode::Floor | Opcode::Nearest | Opcode::Trunc => {
// TODO use ROUNDSS/ROUNDSD after sse4.1.
// Lower to VM calls when there's no access to SSE4.1.
let ty = ty.unwrap();
if !ty.is_vector() {
let libcall = match (op, ty) {
(Opcode::Ceil, types::F32) => LibCall::CeilF32,
(Opcode::Ceil, types::F64) => LibCall::CeilF64,
(Opcode::Floor, types::F32) => LibCall::FloorF32,
(Opcode::Floor, types::F64) => LibCall::FloorF64,
(Opcode::Nearest, types::F32) => LibCall::NearestF32,
(Opcode::Nearest, types::F64) => LibCall::NearestF64,
(Opcode::Trunc, types::F32) => LibCall::TruncF32,
(Opcode::Trunc, types::F64) => LibCall::TruncF64,
_ => panic!(
"unexpected type/opcode {:?}/{:?} in Ceil/Floor/Nearest/Trunc",
ty, op
),
};
emit_vm_call(ctx, flags, triple, libcall, insn, inputs, outputs)?;
} else {
let (op, mode) = match (op, ty) {
(Opcode::Ceil, types::F32X4) => (SseOpcode::Roundps, RoundImm::RoundUp),
(Opcode::Ceil, types::F64X2) => (SseOpcode::Roundpd, RoundImm::RoundUp),
(Opcode::Floor, types::F32X4) => (SseOpcode::Roundps, RoundImm::RoundDown),
(Opcode::Floor, types::F64X2) => (SseOpcode::Roundpd, RoundImm::RoundDown),
(Opcode::Trunc, types::F32X4) => (SseOpcode::Roundps, RoundImm::RoundZero),
(Opcode::Trunc, types::F64X2) => (SseOpcode::Roundpd, RoundImm::RoundZero),
(Opcode::Nearest, types::F32X4) => (SseOpcode::Roundps, RoundImm::RoundNearest),
(Opcode::Nearest, types::F64X2) => (SseOpcode::Roundpd, RoundImm::RoundNearest),
_ => panic!("Unknown op/ty combination (vector){:?}", ty),
};
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::gen_move(dst, src, ty));
ctx.emit(Inst::xmm_rm_r_imm(
op,
RegMem::from(dst),
dst,
mode.encode(),
false,
));
}
}
Opcode::Load
| Opcode::Uload8
| Opcode::Sload8
| Opcode::Uload16
| Opcode::Sload16
| Opcode::Uload32
| Opcode::Sload32
| Opcode::LoadComplex
| Opcode::Uload8Complex
| Opcode::Sload8Complex
| Opcode::Uload16Complex
| Opcode::Sload16Complex
| Opcode::Uload32Complex
| Opcode::Sload32Complex => {
let offset = ctx.data(insn).load_store_offset().unwrap();
let elem_ty = match op {
Opcode::Sload8 | Opcode::Uload8 | Opcode::Sload8Complex | Opcode::Uload8Complex => {
types::I8
}
Opcode::Sload16
| Opcode::Uload16
| Opcode::Sload16Complex
| Opcode::Uload16Complex => types::I16,
Opcode::Sload32
| Opcode::Uload32
| Opcode::Sload32Complex
| Opcode::Uload32Complex => types::I32,
Opcode::Load | Opcode::LoadComplex => ctx.output_ty(insn, 0),
_ => unimplemented!(),
};
let ext_mode = ExtMode::new(elem_ty.bits(), 64);
let sign_extend = match op {
Opcode::Sload8
| Opcode::Sload8Complex
| Opcode::Sload16
| Opcode::Sload16Complex
| Opcode::Sload32
| Opcode::Sload32Complex => true,
_ => false,
};
let amode = match op {
Opcode::Load
| Opcode::Uload8
| Opcode::Sload8
| Opcode::Uload16
| Opcode::Sload16
| Opcode::Uload32
| Opcode::Sload32 => {
assert_eq!(inputs.len(), 1, "only one input for load operands");
lower_to_amode(ctx, inputs[0], offset)
}
Opcode::LoadComplex
| Opcode::Uload8Complex
| Opcode::Sload8Complex
| Opcode::Uload16Complex
| Opcode::Sload16Complex
| Opcode::Uload32Complex
| Opcode::Sload32Complex => {
assert_eq!(
inputs.len(),
2,
"can't handle more than two inputs in complex load"
);
let base = put_input_in_reg(ctx, inputs[0]);
let index = put_input_in_reg(ctx, inputs[1]);
let shift = 0;
let flags = ctx.memflags(insn).expect("load should have memflags");
Amode::imm_reg_reg_shift(offset as u32, base, index, shift).with_flags(flags)
}
_ => unreachable!(),
};
let dst = get_output_reg(ctx, outputs[0]);
let is_xmm = elem_ty.is_float() || elem_ty.is_vector();
match (sign_extend, is_xmm) {
(true, false) => {
// The load is sign-extended only when the output size is lower than 64 bits,
// so ext-mode is defined in this case.
ctx.emit(Inst::movsx_rm_r(ext_mode.unwrap(), RegMem::mem(amode), dst));
}
(false, false) => {
if elem_ty.bytes() == 8 {
// Use a plain load.
ctx.emit(Inst::mov64_m_r(amode, dst))
} else {
// Use a zero-extended load.
ctx.emit(Inst::movzx_rm_r(ext_mode.unwrap(), RegMem::mem(amode), dst))
}
}
(_, true) => {
ctx.emit(match elem_ty {
types::F32 => Inst::xmm_mov(SseOpcode::Movss, RegMem::mem(amode), dst),
types::F64 => Inst::xmm_mov(SseOpcode::Movsd, RegMem::mem(amode), dst),
_ if elem_ty.is_vector() && elem_ty.bits() == 128 => {
Inst::xmm_mov(SseOpcode::Movups, RegMem::mem(amode), dst)
} // TODO Specialize for different types: MOVUPD, MOVDQU
_ => unreachable!("unexpected type for load: {:?}", elem_ty),
});
}
}
}
Opcode::Store
| Opcode::Istore8
| Opcode::Istore16
| Opcode::Istore32
| Opcode::StoreComplex
| Opcode::Istore8Complex
| Opcode::Istore16Complex
| Opcode::Istore32Complex => {
let offset = ctx.data(insn).load_store_offset().unwrap();
let elem_ty = match op {
Opcode::Istore8 | Opcode::Istore8Complex => types::I8,
Opcode::Istore16 | Opcode::Istore16Complex => types::I16,
Opcode::Istore32 | Opcode::Istore32Complex => types::I32,
Opcode::Store | Opcode::StoreComplex => ctx.input_ty(insn, 0),
_ => unreachable!(),
};
let addr = match op {
Opcode::Store | Opcode::Istore8 | Opcode::Istore16 | Opcode::Istore32 => {
assert_eq!(inputs.len(), 2, "only one input for store memory operands");
lower_to_amode(ctx, inputs[1], offset)
}
Opcode::StoreComplex
| Opcode::Istore8Complex
| Opcode::Istore16Complex
| Opcode::Istore32Complex => {
assert_eq!(
inputs.len(),
3,
"can't handle more than two inputs in complex store"
);
let base = put_input_in_reg(ctx, inputs[1]);
let index = put_input_in_reg(ctx, inputs[2]);
let shift = 0;
let flags = ctx.memflags(insn).expect("store should have memflags");
Amode::imm_reg_reg_shift(offset as u32, base, index, shift).with_flags(flags)
}
_ => unreachable!(),
};
let src = put_input_in_reg(ctx, inputs[0]);
ctx.emit(match elem_ty {
types::F32 => Inst::xmm_mov_r_m(SseOpcode::Movss, src, addr),
types::F64 => Inst::xmm_mov_r_m(SseOpcode::Movsd, src, addr),
_ if elem_ty.is_vector() && elem_ty.bits() == 128 => {
// TODO Specialize for different types: MOVUPD, MOVDQU, etc.
Inst::xmm_mov_r_m(SseOpcode::Movups, src, addr)
}
_ => Inst::mov_r_m(elem_ty.bytes() as u8, src, addr),
});
}
Opcode::AtomicRmw => {
// This is a simple, general-case atomic update, based on a loop involving
// `cmpxchg`. Note that we could do much better than this in the case where the old
// value at the location (that is to say, the SSA `Value` computed by this CLIF
// instruction) is not required. In that case, we could instead implement this
// using a single `lock`-prefixed x64 read-modify-write instruction. Also, even in
// the case where the old value is required, for the `add` and `sub` cases, we can
// use the single instruction `lock xadd`. However, those improvements have been
// left for another day.
// TODO: filed as https://github.com/bytecodealliance/wasmtime/issues/2153
let dst = get_output_reg(ctx, outputs[0]);
let mut addr = put_input_in_reg(ctx, inputs[0]);
let mut arg2 = put_input_in_reg(ctx, inputs[1]);
let ty_access = ty.unwrap();
assert!(is_valid_atomic_transaction_ty(ty_access));
// Make sure that both args are in virtual regs, since in effect we have to do a
// parallel copy to get them safely to the AtomicRmwSeq input regs, and that's not
// guaranteed safe if either is in a real reg.
addr = ctx.ensure_in_vreg(addr, types::I64);
arg2 = ctx.ensure_in_vreg(arg2, types::I64);
// Move the args to the preordained AtomicRMW input regs. Note that `AtomicRmwSeq`
// operates at whatever width is specified by `ty`, so there's no need to
// zero-extend `arg2` in the case of `ty` being I8/I16/I32.
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::r9()),
addr,
types::I64,
));
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::r10()),
arg2,
types::I64,
));
// Now the AtomicRmwSeq (pseudo-) instruction itself
let op = inst_common::AtomicRmwOp::from(ctx.data(insn).atomic_rmw_op().unwrap());
ctx.emit(Inst::AtomicRmwSeq { ty: ty_access, op });
// And finally, copy the preordained AtomicRmwSeq output reg to its destination.
ctx.emit(Inst::gen_move(dst, regs::rax(), types::I64));
}
Opcode::AtomicCas => {
// This is very similar to, but not identical to, the `AtomicRmw` case. As with
// `AtomicRmw`, there's no need to zero-extend narrow values here.
let dst = get_output_reg(ctx, outputs[0]);
let addr = lower_to_amode(ctx, inputs[0], 0);
let expected = put_input_in_reg(ctx, inputs[1]);
let replacement = put_input_in_reg(ctx, inputs[2]);
let ty_access = ty.unwrap();
assert!(is_valid_atomic_transaction_ty(ty_access));
// Move the expected value into %rax. Because there's only one fixed register on
// the input side, we don't have to use `ensure_in_vreg`, as is necessary in the
// `AtomicRmw` case.
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::rax()),
expected,
types::I64,
));
ctx.emit(Inst::LockCmpxchg {
ty: ty_access,
src: replacement,
dst: addr.into(),
});
// And finally, copy the old value at the location to its destination reg.
ctx.emit(Inst::gen_move(dst, regs::rax(), types::I64));
}
Opcode::AtomicLoad => {
// This is a normal load. The x86-TSO memory model provides sufficient sequencing
// to satisfy the CLIF synchronisation requirements for `AtomicLoad` without the
// need for any fence instructions.
let data = get_output_reg(ctx, outputs[0]);
let addr = lower_to_amode(ctx, inputs[0], 0);
let ty_access = ty.unwrap();
assert!(is_valid_atomic_transaction_ty(ty_access));
let rm = RegMem::mem(addr);
if ty_access == types::I64 {
ctx.emit(Inst::mov64_rm_r(rm, data));
} else {
let ext_mode = ExtMode::new(ty_access.bits(), 64).expect(&format!(
"invalid extension during AtomicLoad: {} -> {}",
ty_access.bits(),
64
));
ctx.emit(Inst::movzx_rm_r(ext_mode, rm, data));
}
}
Opcode::AtomicStore => {
// This is a normal store, followed by an `mfence` instruction.
let data = put_input_in_reg(ctx, inputs[0]);
let addr = lower_to_amode(ctx, inputs[1], 0);
let ty_access = ctx.input_ty(insn, 0);
assert!(is_valid_atomic_transaction_ty(ty_access));
ctx.emit(Inst::mov_r_m(ty_access.bytes() as u8, data, addr));
ctx.emit(Inst::Fence {
kind: FenceKind::MFence,
});
}
Opcode::Fence => {
ctx.emit(Inst::Fence {
kind: FenceKind::MFence,
});
}
Opcode::FuncAddr => {
let dst = get_output_reg(ctx, outputs[0]);
let (extname, _) = ctx.call_target(insn).unwrap();
let extname = extname.clone();
ctx.emit(Inst::LoadExtName {
dst,
name: Box::new(extname),
offset: 0,
});
}
Opcode::SymbolValue => {
let dst = get_output_reg(ctx, outputs[0]);
let (extname, _, offset) = ctx.symbol_value(insn).unwrap();
let extname = extname.clone();
ctx.emit(Inst::LoadExtName {
dst,
name: Box::new(extname),
offset,
});
}
Opcode::StackAddr => {
let (stack_slot, offset) = match *ctx.data(insn) {
InstructionData::StackLoad {
opcode: Opcode::StackAddr,
stack_slot,
offset,
} => (stack_slot, offset),
_ => unreachable!(),
};
let dst = get_output_reg(ctx, outputs[0]);
let offset: i32 = offset.into();
let inst = ctx
.abi()
.stackslot_addr(stack_slot, u32::try_from(offset).unwrap(), dst);
ctx.emit(inst);
}
Opcode::Select => {
let flag_input = inputs[0];
if let Some(fcmp) = matches_input(ctx, flag_input, Opcode::Fcmp) {
let cond_code = ctx.data(fcmp).fp_cond_code().unwrap();
// For equal, we flip the operands, because we can't test a conjunction of
// CPU flags with a single cmove; see InvertedEqualOrConditions doc comment.
let (lhs_input, rhs_input) = match cond_code {
FloatCC::Equal => (inputs[2], inputs[1]),
_ => (inputs[1], inputs[2]),
};
let ty = ctx.output_ty(insn, 0);
let rhs = put_input_in_reg(ctx, rhs_input);
let dst = get_output_reg(ctx, outputs[0]);
let lhs = if is_int_or_ref_ty(ty) && ty.bytes() < 4 {
// Special case: since the higher bits are undefined per CLIF semantics, we
// can just apply a 32-bit cmove here. Force inputs into registers, to
// avoid partial spilling out-of-bounds with memory accesses, though.
// Sign-extend operands to 32, then do a cmove of size 4.
RegMem::reg(put_input_in_reg(ctx, lhs_input))
} else {
input_to_reg_mem(ctx, lhs_input)
};
// We request inversion of Equal to NotEqual here: taking LHS if equal would mean
// take it if both CC::NP and CC::Z are set, the conjunction of which can't be
// modeled with a single cmov instruction. Instead, we'll swap LHS and RHS in the
// select operation, and invert the equal to a not-equal here.
let fcmp_results = emit_fcmp(ctx, fcmp, cond_code, FcmpSpec::InvertEqual);
if let FcmpCondResult::InvertedEqualOrConditions(_, _) = &fcmp_results {
// Keep this sync'd with the lowering of the select inputs above.
assert_eq!(cond_code, FloatCC::Equal);
}
ctx.emit(Inst::gen_move(dst, rhs, ty));
match fcmp_results {
FcmpCondResult::Condition(cc) => {
if is_int_or_ref_ty(ty) {
let size = u8::max(ty.bytes() as u8, 4);
ctx.emit(Inst::cmove(size, cc, lhs, dst));
} else {
ctx.emit(Inst::xmm_cmove(ty == types::F64, cc, lhs, dst));
}
}
FcmpCondResult::AndConditions(_, _) => {
unreachable!(
"can't AND with select; see above comment about inverting equal"
);
}
FcmpCondResult::InvertedEqualOrConditions(cc1, cc2)
| FcmpCondResult::OrConditions(cc1, cc2) => {
if is_int_or_ref_ty(ty) {
let size = u8::max(ty.bytes() as u8, 4);
ctx.emit(Inst::cmove(size, cc1, lhs.clone(), dst));
ctx.emit(Inst::cmove(size, cc2, lhs, dst));
} else {
ctx.emit(Inst::xmm_cmove(ty == types::F64, cc1, lhs.clone(), dst));
ctx.emit(Inst::xmm_cmove(ty == types::F64, cc2, lhs, dst));
}
}
}
} else {
let ty = ty.unwrap();
let mut size = ty.bytes() as u8;
let lhs = if is_int_or_ref_ty(ty) {
if size < 4 {
// Special case: since the higher bits are undefined per CLIF semantics, we
// can just apply a 32-bit cmove here. Force inputs into registers, to
// avoid partial spilling out-of-bounds with memory accesses, though.
size = 4;
RegMem::reg(put_input_in_reg(ctx, inputs[1]))
} else {
input_to_reg_mem(ctx, inputs[1])
}
} else {
input_to_reg_mem(ctx, inputs[1])
};
let rhs = put_input_in_reg(ctx, inputs[2]);
let dst = get_output_reg(ctx, outputs[0]);
let cc = if let Some(icmp) = matches_input(ctx, flag_input, Opcode::Icmp) {
emit_cmp(ctx, icmp);
let cond_code = ctx.data(icmp).cond_code().unwrap();
CC::from_intcc(cond_code)
} else {
// The input is a boolean value, compare it against zero.
let size = ctx.input_ty(insn, 0).bytes() as u8;
let test = put_input_in_reg(ctx, flag_input);
ctx.emit(Inst::cmp_rmi_r(size, RegMemImm::imm(0), test));
CC::NZ
};
// This doesn't affect the flags.
ctx.emit(Inst::gen_move(dst, rhs, ty));
if is_int_or_ref_ty(ty) {
ctx.emit(Inst::cmove(size, cc, lhs, dst));
} else {
debug_assert!(ty == types::F32 || ty == types::F64);
ctx.emit(Inst::xmm_cmove(ty == types::F64, cc, lhs, dst));
}
}
}
Opcode::Selectif | Opcode::SelectifSpectreGuard => {
let lhs = input_to_reg_mem(ctx, inputs[1]);
let rhs = put_input_in_reg(ctx, inputs[2]);
let dst = get_output_reg(ctx, outputs[0]);
let ty = ctx.output_ty(insn, 0);
// Verification ensures that the input is always a single-def ifcmp.
let cmp_insn = ctx
.get_input_as_source_or_const(inputs[0].insn, inputs[0].input)
.inst
.unwrap()
.0;
debug_assert_eq!(ctx.data(cmp_insn).opcode(), Opcode::Ifcmp);
emit_cmp(ctx, cmp_insn);
let cc = CC::from_intcc(ctx.data(insn).cond_code().unwrap());
if is_int_or_ref_ty(ty) {
let size = ty.bytes() as u8;
if size == 1 {
// Sign-extend operands to 32, then do a cmove of size 4.
let lhs_se = ctx.alloc_tmp(RegClass::I64, types::I32);
ctx.emit(Inst::movsx_rm_r(ExtMode::BL, lhs, lhs_se));
ctx.emit(Inst::movsx_rm_r(ExtMode::BL, RegMem::reg(rhs), dst));
ctx.emit(Inst::cmove(4, cc, RegMem::reg(lhs_se.to_reg()), dst));
} else {
ctx.emit(Inst::gen_move(dst, rhs, ty));
ctx.emit(Inst::cmove(size, cc, lhs, dst));
}
} else {
debug_assert!(ty == types::F32 || ty == types::F64);
ctx.emit(Inst::gen_move(dst, rhs, ty));
ctx.emit(Inst::xmm_cmove(ty == types::F64, cc, lhs, dst));
}
}
Opcode::Udiv | Opcode::Urem | Opcode::Sdiv | Opcode::Srem => {
let kind = match op {
Opcode::Udiv => DivOrRemKind::UnsignedDiv,
Opcode::Sdiv => DivOrRemKind::SignedDiv,
Opcode::Urem => DivOrRemKind::UnsignedRem,
Opcode::Srem => DivOrRemKind::SignedRem,
_ => unreachable!(),
};
let is_div = kind.is_div();
let input_ty = ctx.input_ty(insn, 0);
let size = input_ty.bytes() as u8;
let dividend = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::rax()),
dividend,
input_ty,
));
if flags.avoid_div_traps() {
// A vcode meta-instruction is used to lower the inline checks, since they embed
// pc-relative offsets that must not change, thus requiring regalloc to not
// interfere by introducing spills and reloads.
//
// Note it keeps the result in $rax (for divide) or $rdx (for rem), so that
// regalloc is aware of the coalescing opportunity between rax/rdx and the
// destination register.
let divisor = put_input_in_reg(ctx, inputs[1]);
let divisor_copy = ctx.alloc_tmp(RegClass::I64, types::I64);
ctx.emit(Inst::gen_move(divisor_copy, divisor, types::I64));
let tmp = if op == Opcode::Sdiv && size == 8 {
Some(ctx.alloc_tmp(RegClass::I64, types::I64))
} else {
None
};
// TODO use xor
ctx.emit(Inst::imm(
OperandSize::Size32,
0,
Writable::from_reg(regs::rdx()),
));
ctx.emit(Inst::checked_div_or_rem_seq(kind, size, divisor_copy, tmp));
} else {
let divisor = input_to_reg_mem(ctx, inputs[1]);
// Fill in the high parts:
if kind.is_signed() {
// sign-extend the sign-bit of al into ah for size 1, or rax into rdx, for
// signed opcodes.
ctx.emit(Inst::sign_extend_data(size));
} else if input_ty == types::I8 {
ctx.emit(Inst::movzx_rm_r(
ExtMode::BL,
RegMem::reg(regs::rax()),
Writable::from_reg(regs::rax()),
));
} else {
// zero for unsigned opcodes.
ctx.emit(Inst::imm(
OperandSize::Size64,
0,
Writable::from_reg(regs::rdx()),
));
}
// Emit the actual idiv.
ctx.emit(Inst::div(size, kind.is_signed(), divisor));
}
// Move the result back into the destination reg.
if is_div {
// The quotient is in rax.
ctx.emit(Inst::gen_move(dst, regs::rax(), input_ty));
} else {
// The remainder is in rdx.
ctx.emit(Inst::gen_move(dst, regs::rdx(), input_ty));
}
}
Opcode::Umulhi | Opcode::Smulhi => {
let input_ty = ctx.input_ty(insn, 0);
let size = input_ty.bytes() as u8;
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = input_to_reg_mem(ctx, inputs[1]);
let dst = get_output_reg(ctx, outputs[0]);
// Move lhs in %rax.
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::rax()),
lhs,
input_ty,
));
// Emit the actual mul or imul.
let signed = op == Opcode::Smulhi;
ctx.emit(Inst::mul_hi(size, signed, rhs));
// Read the result from the high part (stored in %rdx).
ctx.emit(Inst::gen_move(dst, regs::rdx(), input_ty));
}
Opcode::GetPinnedReg => {
let dst = get_output_reg(ctx, outputs[0]);
ctx.emit(Inst::gen_move(dst, regs::pinned_reg(), types::I64));
}
Opcode::SetPinnedReg => {
let src = put_input_in_reg(ctx, inputs[0]);
ctx.emit(Inst::gen_move(
Writable::from_reg(regs::pinned_reg()),
src,
types::I64,
));
}
Opcode::Vconst => {
let used_constant = if let &InstructionData::UnaryConst {
constant_handle, ..
} = ctx.data(insn)
{
ctx.use_constant(VCodeConstantData::Pool(
constant_handle,
ctx.get_constant_data(constant_handle).clone(),
))
} else {
unreachable!("vconst should always have unary_const format")
};
// TODO use Inst::gen_constant() instead.
let dst = get_output_reg(ctx, outputs[0]);
let ty = ty.unwrap();
ctx.emit(Inst::xmm_load_const(used_constant, dst, ty));
}
Opcode::RawBitcast => {
// A raw_bitcast is just a mechanism for correcting the type of V128 values (see
// https://github.com/bytecodealliance/wasmtime/issues/1147). As such, this IR
// instruction should emit no machine code but a move is necessary to give the register
// allocator a definition for the output virtual register.
let src = put_input_in_reg(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
let ty = ty.unwrap();
ctx.emit(Inst::gen_move(dst, src, ty));
}
Opcode::Shuffle => {
let ty = ty.unwrap();
let dst = get_output_reg(ctx, outputs[0]);
let lhs_ty = ctx.input_ty(insn, 0);
let lhs = put_input_in_reg(ctx, inputs[0]);
let rhs = put_input_in_reg(ctx, inputs[1]);
let mask = match ctx.get_immediate(insn) {
Some(DataValue::V128(bytes)) => bytes.to_vec(),
_ => unreachable!("shuffle should always have a 16-byte immediate"),
};
// A mask-building helper: in 128-bit SIMD, 0-15 indicate which lane to read from and a
// 1 in the most significant position zeroes the lane.
let zero_unknown_lane_index = |b: u8| if b > 15 { 0b10000000 } else { b };
ctx.emit(Inst::gen_move(dst, rhs, ty));
if rhs == lhs {
// If `lhs` and `rhs` are the same we can use a single PSHUFB to shuffle the XMM
// register. We statically build `constructed_mask` to zero out any unknown lane
// indices (may not be completely necessary: verification could fail incorrect mask
// values) and fix the indexes to all point to the `dst` vector.
let constructed_mask = mask
.iter()
// If the mask is greater than 15 it still may be referring to a lane in b.
.map(|&b| if b > 15 { b.wrapping_sub(16) } else { b })
.map(zero_unknown_lane_index)
.collect();
let constant = ctx.use_constant(VCodeConstantData::Generated(constructed_mask));
let tmp = ctx.alloc_tmp(RegClass::V128, types::I8X16);
ctx.emit(Inst::xmm_load_const(constant, tmp, ty));
// After loading the constructed mask in a temporary register, we use this to
// shuffle the `dst` register (remember that, in this case, it is the same as
// `src` so we disregard this register).
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pshufb, RegMem::from(tmp), dst));
} else {
// If `lhs` and `rhs` are different, we must shuffle each separately and then OR
// them together. This is necessary due to PSHUFB semantics. As in the case above,
// we build the `constructed_mask` for each case statically.
// PSHUFB the `lhs` argument into `tmp0`, placing zeroes for unused lanes.
let tmp0 = ctx.alloc_tmp(RegClass::V128, lhs_ty);
ctx.emit(Inst::gen_move(tmp0, lhs, lhs_ty));
let constructed_mask = mask.iter().cloned().map(zero_unknown_lane_index).collect();
let constant = ctx.use_constant(VCodeConstantData::Generated(constructed_mask));
let tmp1 = ctx.alloc_tmp(RegClass::V128, types::I8X16);
ctx.emit(Inst::xmm_load_const(constant, tmp1, ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pshufb, RegMem::from(tmp1), tmp0));
// PSHUFB the second argument, placing zeroes for unused lanes.
let constructed_mask = mask
.iter()
.map(|b| b.wrapping_sub(16))
.map(zero_unknown_lane_index)
.collect();
let constant = ctx.use_constant(VCodeConstantData::Generated(constructed_mask));
let tmp2 = ctx.alloc_tmp(RegClass::V128, types::I8X16);
ctx.emit(Inst::xmm_load_const(constant, tmp2, ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pshufb, RegMem::from(tmp2), dst));
// OR the shuffled registers (the mechanism and lane-size for OR-ing the registers
// is not important).
ctx.emit(Inst::xmm_rm_r(SseOpcode::Orps, RegMem::from(tmp0), dst));
// TODO when AVX512 is enabled we should replace this sequence with a single VPERMB
}
}
Opcode::Swizzle => {
// SIMD swizzle; the following inefficient implementation is due to the Wasm SIMD spec
// requiring mask indexes greater than 15 to have the same semantics as a 0 index. For
// the spec discussion, see https://github.com/WebAssembly/simd/issues/93. The CLIF
// semantics match the Wasm SIMD semantics for this instruction.
// The instruction format maps to variables like: %dst = swizzle %src, %mask
let ty = ty.unwrap();
let dst = get_output_reg(ctx, outputs[0]);
let src = put_input_in_reg(ctx, inputs[0]);
let swizzle_mask = put_input_in_reg(ctx, inputs[1]);
// Inform the register allocator that `src` and `dst` should be in the same register.
ctx.emit(Inst::gen_move(dst, src, ty));
// Create a mask for zeroing out-of-bounds lanes of the swizzle mask.
let zero_mask = ctx.alloc_tmp(RegClass::V128, types::I8X16);
static ZERO_MASK_VALUE: [u8; 16] = [
0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70, 0x70,
0x70, 0x70,
];
let constant = ctx.use_constant(VCodeConstantData::WellKnown(&ZERO_MASK_VALUE));
ctx.emit(Inst::xmm_load_const(constant, zero_mask, ty));
// Use the `zero_mask` on a writable `swizzle_mask`.
let swizzle_mask = Writable::from_reg(swizzle_mask);
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Paddusb,
RegMem::from(zero_mask),
swizzle_mask,
));
// Shuffle `dst` using the fixed-up `swizzle_mask`.
ctx.emit(Inst::xmm_rm_r(
SseOpcode::Pshufb,
RegMem::from(swizzle_mask),
dst,
));
}
Opcode::Insertlane => {
// The instruction format maps to variables like: %dst = insertlane %in_vec, %src, %lane
let ty = ty.unwrap();
let dst = get_output_reg(ctx, outputs[0]);
let in_vec = put_input_in_reg(ctx, inputs[0]);
let src_ty = ctx.input_ty(insn, 1);
debug_assert!(!src_ty.is_vector());
let src = input_to_reg_mem(ctx, inputs[1]);
let lane = if let InstructionData::TernaryImm8 { imm, .. } = ctx.data(insn) {
*imm
} else {
unreachable!();
};
debug_assert!(lane < ty.lane_count() as u8);
ctx.emit(Inst::gen_move(dst, in_vec, ty));
emit_insert_lane(ctx, src, dst, lane, ty.lane_type());
}
Opcode::Extractlane => {
// The instruction format maps to variables like: %dst = extractlane %src, %lane
let ty = ty.unwrap();
let dst = get_output_reg(ctx, outputs[0]);
let src_ty = ctx.input_ty(insn, 0);
assert_eq!(src_ty.bits(), 128);
let src = put_input_in_reg(ctx, inputs[0]);
let lane = if let InstructionData::BinaryImm8 { imm, .. } = ctx.data(insn) {
*imm
} else {
unreachable!();
};
debug_assert!(lane < src_ty.lane_count() as u8);
emit_extract_lane(ctx, src, dst, lane, ty);
}
Opcode::Splat => {
let ty = ty.unwrap();
assert_eq!(ty.bits(), 128);
let src_ty = ctx.input_ty(insn, 0);
assert!(src_ty.bits() < 128);
let src = input_to_reg_mem(ctx, inputs[0]);
let dst = get_output_reg(ctx, outputs[0]);
// We know that splat will overwrite all of the lanes of `dst` but it takes several
// instructions to do so. Because of the multiple instructions, there is no good way to
// declare `dst` a `def` except with the following pseudo-instruction.
ctx.emit(Inst::xmm_uninit_value(dst));
// TODO: eventually many of these sequences could be optimized with AVX's VBROADCAST*
// and VPBROADCAST*.
match ty.lane_bits() {
8 => {
emit_insert_lane(ctx, src, dst, 0, ty.lane_type());
// Initialize a register with all 0s.
let tmp = ctx.alloc_tmp(RegClass::V128, ty);
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp), tmp));
// Shuffle the lowest byte lane to all other lanes.
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pshufb, RegMem::from(tmp), dst))
}
16 => {
emit_insert_lane(ctx, src.clone(), dst, 0, ty.lane_type());
emit_insert_lane(ctx, src, dst, 1, ty.lane_type());
// Shuffle the lowest two lanes to all other lanes.
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Pshufd,
RegMem::from(dst),
dst,
0,
false,
))
}
32 => {
emit_insert_lane(ctx, src, dst, 0, ty.lane_type());
// Shuffle the lowest lane to all other lanes.
ctx.emit(Inst::xmm_rm_r_imm(
SseOpcode::Pshufd,
RegMem::from(dst),
dst,
0,
false,
))
}
64 => {
emit_insert_lane(ctx, src.clone(), dst, 0, ty.lane_type());
emit_insert_lane(ctx, src, dst, 1, ty.lane_type());
}
_ => panic!("Invalid type to splat: {}", ty),
}
}
Opcode::VanyTrue => {
let dst = get_output_reg(ctx, outputs[0]);
let src_ty = ctx.input_ty(insn, 0);
assert_eq!(src_ty.bits(), 128);
let src = put_input_in_reg(ctx, inputs[0]);
// Set the ZF if the result is all zeroes.
ctx.emit(Inst::xmm_cmp_rm_r(SseOpcode::Ptest, RegMem::reg(src), src));
// If the ZF is not set, place a 1 in `dst`.
ctx.emit(Inst::setcc(CC::NZ, dst));
}
Opcode::VallTrue => {
let ty = ty.unwrap();
let dst = get_output_reg(ctx, outputs[0]);
let src_ty = ctx.input_ty(insn, 0);
assert_eq!(src_ty.bits(), 128);
let src = input_to_reg_mem(ctx, inputs[0]);
let eq = |ty: Type| match ty.lane_bits() {
8 => SseOpcode::Pcmpeqb,
16 => SseOpcode::Pcmpeqw,
32 => SseOpcode::Pcmpeqd,
64 => SseOpcode::Pcmpeqq,
_ => panic!("Unable to find an instruction for {} for type: {}", op, ty),
};
// Initialize a register with all 0s.
let tmp = ctx.alloc_tmp(RegClass::V128, ty);
ctx.emit(Inst::xmm_rm_r(SseOpcode::Pxor, RegMem::from(tmp), tmp));
// Compare to see what lanes are filled with all 1s.
ctx.emit(Inst::xmm_rm_r(eq(src_ty), src, tmp));
// Set the ZF if the result is all zeroes.
ctx.emit(Inst::xmm_cmp_rm_r(
SseOpcode::Ptest,
RegMem::from(tmp),
tmp.to_reg(),
));
// If the ZF is set, place a 1 in `dst`.
ctx.emit(Inst::setcc(CC::Z, dst));
}
Opcode::VhighBits => {
let src = put_input_in_reg(ctx, inputs[0]);
let src_ty = ctx.input_ty(insn, 0);
debug_assert!(src_ty.is_vector() && src_ty.bits() == 128);
let dst = get_output_reg(ctx, outputs[0]);
debug_assert!(dst.to_reg().get_class() == RegClass::I64);
// The Intel specification allows using both 32-bit and 64-bit GPRs as destination for
// the "move mask" instructions. This is controlled by the REX.R bit: "In 64-bit mode,
// the instruction can access additional registers when used with a REX.R prefix. The
// default operand size is 64-bit in 64-bit mode" (PMOVMSKB in IA Software Development
// Manual, vol. 2). This being the case, we will always clear REX.W since its use is
// unnecessary (`OperandSize` is used for setting/clearing REX.W).
let size = OperandSize::Size32;
match src_ty {
types::I8X16 | types::B8X16 => {
ctx.emit(Inst::xmm_to_gpr(SseOpcode::Pmovmskb, src, dst, size))
}
types::I32X4 | types::B32X4 | types::F32X4 => {
ctx.emit(Inst::xmm_to_gpr(SseOpcode::Movmskps, src, dst, size))
}
types::I64X2 | types::B64X2 | types::F64X2 => {
ctx.emit(Inst::xmm_to_gpr(SseOpcode::Movmskpd, src, dst, size))
}
types::I16X8 | types::B16X8 => {
// There is no x86 instruction for extracting the high bit of 16-bit lanes so
// here we:
// - duplicate the 16-bit lanes of `src` into 8-bit lanes:
// PACKSSWB([x1, x2, ...], [x1, x2, ...]) = [x1', x2', ..., x1', x2', ...]
// - use PMOVMSKB to gather the high bits; now we have duplicates, though
// - shift away the bottom 8 high bits to remove the duplicates.
let tmp = ctx.alloc_tmp(RegClass::V128, src_ty);
ctx.emit(Inst::gen_move(tmp, src, src_ty));
ctx.emit(Inst::xmm_rm_r(SseOpcode::Packsswb, RegMem::reg(src), tmp));
ctx.emit(Inst::xmm_to_gpr(
SseOpcode::Pmovmskb,
tmp.to_reg(),
dst,
size,
));
ctx.emit(Inst::shift_r(8, ShiftKind::ShiftRightLogical, Some(8), dst));
}
_ => unimplemented!("unknown input type {} for {}", src_ty, op),
}
}
Opcode::IaddImm
| Opcode::ImulImm
| Opcode::UdivImm
| Opcode::SdivImm
| Opcode::UremImm
| Opcode::SremImm
| Opcode::IrsubImm
| Opcode::IaddCin
| Opcode::IaddIfcin
| Opcode::IaddCout
| Opcode::IaddCarry
| Opcode::IaddIfcarry
| Opcode::IsubBin
| Opcode::IsubIfbin
| Opcode::IsubBout
| Opcode::IsubIfbout
| Opcode::IsubBorrow
| Opcode::IsubIfborrow
| Opcode::BandImm
| Opcode::BorImm
| Opcode::BxorImm
| Opcode::RotlImm
| Opcode::RotrImm
| Opcode::IshlImm
| Opcode::UshrImm
| Opcode::SshrImm => {
panic!("ALU+imm and ALU+carry ops should not appear here!");
}
_ => unimplemented!("unimplemented lowering for opcode {:?}", op),
}
Ok(())
}
//=============================================================================
// Lowering-backend trait implementation.
impl LowerBackend for X64Backend {
type MInst = Inst;
fn lower<C: LowerCtx<I = Inst>>(&self, ctx: &mut C, ir_inst: IRInst) -> CodegenResult<()> {
lower_insn_to_regs(ctx, ir_inst, &self.flags, &self.triple)
}
fn lower_branch_group<C: LowerCtx<I = Inst>>(
&self,
ctx: &mut C,
branches: &[IRInst],
targets: &[MachLabel],
) -> CodegenResult<()> {
// A block should end with at most two branches. The first may be a
// conditional branch; a conditional branch can be followed only by an
// unconditional branch or fallthrough. Otherwise, if only one branch,
// it may be an unconditional branch, a fallthrough, a return, or a
// trap. These conditions are verified by `is_ebb_basic()` during the
// verifier pass.
assert!(branches.len() <= 2);
if branches.len() == 2 {
// Must be a conditional branch followed by an unconditional branch.
let op0 = ctx.data(branches[0]).opcode();
let op1 = ctx.data(branches[1]).opcode();
trace!(
"lowering two-branch group: opcodes are {:?} and {:?}",
op0,
op1
);
assert!(op1 == Opcode::Jump || op1 == Opcode::Fallthrough);
let taken = targets[0];
// not_taken target is the target of the second branch, even if it is a Fallthrough
// instruction: because we reorder blocks while we lower, the fallthrough in the new
// order is not (necessarily) the same as the fallthrough in CLIF. So we use the
// explicitly-provided target.
let not_taken = targets[1];
match op0 {
Opcode::Brz | Opcode::Brnz => {
let flag_input = InsnInput {
insn: branches[0],
input: 0,
};
let src_ty = ctx.input_ty(branches[0], 0);
if let Some(icmp) = matches_input(ctx, flag_input, Opcode::Icmp) {
emit_cmp(ctx, icmp);
let cond_code = ctx.data(icmp).cond_code().unwrap();
let cond_code = if op0 == Opcode::Brz {
cond_code.inverse()
} else {
cond_code
};
let cc = CC::from_intcc(cond_code);
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
} else if let Some(fcmp) = matches_input(ctx, flag_input, Opcode::Fcmp) {
let cond_code = ctx.data(fcmp).fp_cond_code().unwrap();
let cond_code = if op0 == Opcode::Brz {
cond_code.inverse()
} else {
cond_code
};
match emit_fcmp(ctx, fcmp, cond_code, FcmpSpec::Normal) {
FcmpCondResult::Condition(cc) => {
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
}
FcmpCondResult::AndConditions(cc1, cc2) => {
ctx.emit(Inst::jmp_if(cc1.invert(), not_taken));
ctx.emit(Inst::jmp_cond(cc2.invert(), not_taken, taken));
}
FcmpCondResult::OrConditions(cc1, cc2) => {
ctx.emit(Inst::jmp_if(cc1, taken));
ctx.emit(Inst::jmp_cond(cc2, taken, not_taken));
}
FcmpCondResult::InvertedEqualOrConditions(_, _) => unreachable!(),
}
} else if is_int_or_ref_ty(src_ty) || is_bool_ty(src_ty) {
let src = put_input_in_reg(
ctx,
InsnInput {
insn: branches[0],
input: 0,
},
);
let cc = match op0 {
Opcode::Brz => CC::Z,
Opcode::Brnz => CC::NZ,
_ => unreachable!(),
};
let size_bytes = src_ty.bytes() as u8;
ctx.emit(Inst::cmp_rmi_r(size_bytes, RegMemImm::imm(0), src));
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
} else {
unimplemented!("brz/brnz with non-int type {:?}", src_ty);
}
}
Opcode::BrIcmp => {
let src_ty = ctx.input_ty(branches[0], 0);
if is_int_or_ref_ty(src_ty) || is_bool_ty(src_ty) {
let lhs = put_input_in_reg(
ctx,
InsnInput {
insn: branches[0],
input: 0,
},
);
let rhs = input_to_reg_mem_imm(
ctx,
InsnInput {
insn: branches[0],
input: 1,
},
);
let cc = CC::from_intcc(ctx.data(branches[0]).cond_code().unwrap());
let byte_size = src_ty.bytes() as u8;
// Cranelift's icmp semantics want to compare lhs - rhs, while Intel gives
// us dst - src at the machine instruction level, so invert operands.
ctx.emit(Inst::cmp_rmi_r(byte_size, rhs, lhs));
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
} else {
unimplemented!("bricmp with non-int type {:?}", src_ty);
}
}
Opcode::Brif => {
let flag_input = InsnInput {
insn: branches[0],
input: 0,
};
if let Some(ifcmp) = matches_input(ctx, flag_input, Opcode::Ifcmp) {
emit_cmp(ctx, ifcmp);
let cond_code = ctx.data(branches[0]).cond_code().unwrap();
let cc = CC::from_intcc(cond_code);
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
} else if let Some(ifcmp_sp) = matches_input(ctx, flag_input, Opcode::IfcmpSp) {
let operand = put_input_in_reg(
ctx,
InsnInput {
insn: ifcmp_sp,
input: 0,
},
);
let ty = ctx.input_ty(ifcmp_sp, 0);
ctx.emit(Inst::cmp_rmi_r(
ty.bytes() as u8,
RegMemImm::reg(operand),
regs::rsp(),
));
let cond_code = ctx.data(branches[0]).cond_code().unwrap();
let cc = CC::from_intcc(cond_code);
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
} else {
// Should be disallowed by flags checks in verifier.
unimplemented!("Brif with non-ifcmp input");
}
}
Opcode::Brff => {
let flag_input = InsnInput {
insn: branches[0],
input: 0,
};
if let Some(ffcmp) = matches_input(ctx, flag_input, Opcode::Ffcmp) {
let cond_code = ctx.data(branches[0]).fp_cond_code().unwrap();
match emit_fcmp(ctx, ffcmp, cond_code, FcmpSpec::Normal) {
FcmpCondResult::Condition(cc) => {
ctx.emit(Inst::jmp_cond(cc, taken, not_taken));
}
FcmpCondResult::AndConditions(cc1, cc2) => {
ctx.emit(Inst::jmp_if(cc1.invert(), not_taken));
ctx.emit(Inst::jmp_cond(cc2.invert(), not_taken, taken));
}
FcmpCondResult::OrConditions(cc1, cc2) => {
ctx.emit(Inst::jmp_if(cc1, taken));
ctx.emit(Inst::jmp_cond(cc2, taken, not_taken));
}
FcmpCondResult::InvertedEqualOrConditions(_, _) => unreachable!(),
}
} else {
// Should be disallowed by flags checks in verifier.
unimplemented!("Brff with input not from ffcmp");
}
}
_ => panic!("unexpected branch opcode: {:?}", op0),
}
} else {
assert_eq!(branches.len(), 1);
// Must be an unconditional branch or trap.
let op = ctx.data(branches[0]).opcode();
match op {
Opcode::Jump | Opcode::Fallthrough => {
ctx.emit(Inst::jmp_known(targets[0]));
}
Opcode::BrTable => {
let jt_size = targets.len() - 1;
assert!(jt_size <= u32::max_value() as usize);
let jt_size = jt_size as u32;
let idx = extend_input_to_reg(
ctx,
InsnInput {
insn: branches[0],
input: 0,
},
ExtSpec::ZeroExtendTo32,
);
// Bounds-check (compute flags from idx - jt_size) and branch to default.
ctx.emit(Inst::cmp_rmi_r(4, RegMemImm::imm(jt_size), idx));
// Emit the compound instruction that does:
//
// lea $jt, %rA
// movsbl [%rA, %rIndex, 2], %rB
// add %rB, %rA
// j *%rA
// [jt entries]
//
// This must be *one* instruction in the vcode because we cannot allow regalloc
// to insert any spills/fills in the middle of the sequence; otherwise, the
// lea PC-rel offset to the jumptable would be incorrect. (The alternative
// is to introduce a relocation pass for inlined jumptables, which is much
// worse.)
// This temporary is used as a signed integer of 64-bits (to hold addresses).
let tmp1 = ctx.alloc_tmp(RegClass::I64, types::I64);
// This temporary is used as a signed integer of 32-bits (for the wasm-table
// index) and then 64-bits (address addend). The small lie about the I64 type
// is benign, since the temporary is dead after this instruction (and its
// Cranelift type is thus unused).
let tmp2 = ctx.alloc_tmp(RegClass::I64, types::I64);
let targets_for_term: Vec<MachLabel> = targets.to_vec();
let default_target = targets[0];
let jt_targets: Vec<MachLabel> = targets.iter().skip(1).cloned().collect();
ctx.emit(Inst::JmpTableSeq {
idx,
tmp1,
tmp2,
default_target,
targets: jt_targets,
targets_for_term,
});
}
_ => panic!("Unknown branch type {:?}", op),
}
}
Ok(())
}
fn maybe_pinned_reg(&self) -> Option<Reg> {
Some(regs::pinned_reg())
}
}
| 44.950371 | 135 | 0.470869 |
b9823d915612ea3342b87f1ac7dd9221a0929cdd | 120 | // run: to_json()
#[derive(Deserialize, Document)]
pub enum Ty {
Foo,
#[serde(skip_deserializing)]
Bar,
}
| 12 | 32 | 0.608333 |
0ee0014642ba8e227bd82389a4ae1791b56d6ac8 | 4,608 | // This file is part of the SORA network and Polkaswap app.
// Copyright (c) 2020, 2021, Polka Biome Ltd. All rights reserved.
// SPDX-License-Identifier: BSD-4-Clause
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// Redistributions of source code must retain the above copyright notice, this list
// of conditions and the following disclaimer.
// Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// All advertising materials mentioning features or use of this software must display
// the following acknowledgement: This product includes software developed by Polka Biome
// Ltd., SORA, and Polkaswap.
//
// Neither the name of the Polka Biome Ltd. nor the names of its contributors may be used
// to endorse or promote products derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY Polka Biome Ltd. AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Polka Biome Ltd. BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use core::convert::TryFrom;
use sp_std::vec::Vec;
use common::prelude::fixnum::ops::CheckedAdd;
use common::prelude::FixedWrapper;
use common::{balance, fixed, Fixed};
/// Given a set of monotoneous sequences A_i(n), i = 0..M-1, n = 0..N-1 returns a pair of:
/// - a vector of "weights" [W_i / N], i = 0..M-1, where W_i are lengths of respective
/// subsequences A_i(k), k = 0..W_i-1 such that the sum S = Sum(A_i(W_i)) for i = 0..M-1
/// is the largest (smallest) across all possible combinations while the sum of weights
/// Sum(W_i) = N,
/// - the optimal sum value S.
///
/// - `sample_data`: a 2D matrix of N vectors each composed of M elements,
/// - `inversed`: boolean flag: if true, the overall sum is minimized (otherwise maximized).
pub fn find_distribution(sample_data: Vec<Vec<Fixed>>, inversed: bool) -> (Vec<Fixed>, Fixed) {
fn default() -> (Vec<Fixed>, Fixed) {
(Default::default(), fixed!(0))
}
if sample_data.is_empty() {
return default();
}
let n = sample_data.len();
let s = sample_data[0].len();
let total_parts = match Fixed::try_from(s) {
Err(_) => return default(),
Ok(value) if value == fixed!(0) => return default(),
Ok(value) => value,
};
let mut accumulator: Vec<Vec<Fixed>> = vec![vec![fixed!(0); s + 1]; n];
accumulator[0][1..].copy_from_slice(&sample_data[0][..]);
let mut foreign: Vec<Vec<usize>> = vec![vec![0; s + 1]; n];
for i in 1..n {
for j in 1..=s {
accumulator[i][j] = accumulator[i - 1][j];
foreign[i][j] = j;
for k in 0..j {
let tmp: Fixed = match accumulator[i - 1][j - k - 1].cadd(sample_data[i][k]) {
Err(_) => continue,
Ok(value) => value,
};
let is_better = match inversed {
true => tmp < accumulator[i][j],
_ => tmp > accumulator[i][j],
};
if is_better {
accumulator[i][j] = tmp;
foreign[i][j] = j - k - 1;
}
}
}
}
let mut parts_left = s;
let mut cur_exchange = n;
let mut distribution = vec![fixed!(0); n];
while parts_left > 0 && cur_exchange != 0 {
cur_exchange -= 1;
let distribution_part = (FixedWrapper::from(parts_left as u128 * balance!(1))
- foreign[cur_exchange][parts_left] as u128 * balance!(1))
/ total_parts;
distribution[cur_exchange] = match distribution_part.get() {
Err(_) => return default(),
Ok(value) => value,
};
parts_left = foreign[cur_exchange][parts_left];
}
let best_amount = accumulator[n - 1][s];
(distribution, best_amount)
}
| 42.666667 | 103 | 0.636936 |
f822e426503248da5bf23c83d6fac6a6dc24c090 | 349,269 | //! Persistent accounts are stored in below path location:
//! <path>/<pid>/data/
//!
//! The persistent store would allow for this mode of operation:
//! - Concurrent single thread append with many concurrent readers.
//!
//! The underlying memory is memory mapped to a file. The accounts would be
//! stored across multiple files and the mappings of file and offset of a
//! particular account would be stored in a shared index. This will allow for
//! concurrent commits without blocking reads, which will sequentially write
//! to memory, ssd or disk, and should be as fast as the hardware allow for.
//! The only required in memory data structure with a write lock is the index,
//! which should be fast to update.
//!
//! AppendVec's only store accounts for single slots. To bootstrap the
//! index from a persistent store of AppendVec's, the entries include
//! a "write_version". A single global atomic `AccountsDb::write_version`
//! tracks the number of commits to the entire data store. So the latest
//! commit for each slot entry would be indexed.
use crate::{
accounts_cache::{AccountsCache, CachedAccount, SlotCache},
accounts_hash::{AccountsHash, CalculateHashIntermediate, HashStats},
accounts_index::{
AccountIndex, AccountsIndex, AccountsIndexRootsStats, Ancestors, IndexKey, IsCached,
SlotList, SlotSlice, ZeroLamport,
},
append_vec::{AppendVec, StoredAccountMeta, StoredMeta},
contains::Contains,
};
use blake3::traits::digest::Digest;
use dashmap::{
mapref::entry::Entry::{Occupied, Vacant},
DashMap, DashSet,
};
use lazy_static::lazy_static;
use log::*;
use rand::{prelude::SliceRandom, thread_rng, Rng};
use rayon::{prelude::*, ThreadPool};
use serde::{Deserialize, Serialize};
use solana_measure::measure::Measure;
use solana_rayon_threadlimit::get_thread_count;
use solana_sdk::{
account::Account,
clock::{Epoch, Slot},
genesis_config::ClusterType,
hash::{Hash, Hasher},
pubkey::Pubkey,
};
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::{
borrow::Cow,
boxed::Box,
collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
convert::TryFrom,
io::{Error as IoError, Result as IoResult},
ops::RangeBounds,
path::{Path, PathBuf},
sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
sync::{Arc, Mutex, MutexGuard, RwLock},
time::Instant,
};
use tempfile::TempDir;
const PAGE_SIZE: u64 = 4 * 1024;
const MAX_RECYCLE_STORES: usize = 1000;
const STORE_META_OVERHEAD: usize = 256;
const MAX_CACHE_SLOTS: usize = 200;
const FLUSH_CACHE_RANDOM_THRESHOLD: usize = MAX_LOCKOUT_HISTORY;
const SCAN_SLOT_PAR_ITER_THRESHOLD: usize = 4000;
pub const DEFAULT_FILE_SIZE: u64 = PAGE_SIZE * 1024;
pub const DEFAULT_NUM_THREADS: u32 = 8;
pub const DEFAULT_NUM_DIRS: u32 = 4;
pub const SHRINK_RATIO: f64 = 0.80;
// A specially reserved storage id just for entries in the cache, so that
// operations that take a storage entry can maintain a common interface
// when interacting with cached accounts. This id is "virtual" in that it
// doesn't actually refer to an actual storage entry.
const CACHE_VIRTUAL_STORAGE_ID: usize = AppendVecId::MAX;
// A specially reserved write version (identifier for ordering writes in an AppendVec)
// for entries in the cache, so that operations that take a storage entry can maintain
// a common interface when interacting with cached accounts. This version is "virtual" in
// that it doesn't actually map to an entry in an AppendVec.
const CACHE_VIRTUAL_WRITE_VERSION: u64 = 0;
// A specially reserved offset (represents an offset into an AppendVec)
// for entries in the cache, so that operations that take a storage entry can maintain
// a common interface when interacting with cached accounts. This version is "virtual" in
// that it doesn't actually map to an entry in an AppendVec.
const CACHE_VIRTUAL_OFFSET: usize = 0;
const CACHE_VIRTUAL_STORED_SIZE: usize = 0;
type DashMapVersionHash = DashMap<Pubkey, (u64, Hash)>;
lazy_static! {
// FROZEN_ACCOUNT_PANIC is used to signal local_cluster that an AccountsDb panic has occurred,
// as |cargo test| cannot observe panics in other threads
pub static ref FROZEN_ACCOUNT_PANIC: Arc<AtomicBool> = Arc::new(AtomicBool::new(false));
}
pub enum ScanStorageResult<R, B> {
Cached(Vec<R>),
Stored(B),
}
#[derive(Debug, Default)]
pub struct ErrorCounters {
pub total: usize,
pub account_in_use: usize,
pub account_loaded_twice: usize,
pub account_not_found: usize,
pub blockhash_not_found: usize,
pub blockhash_too_old: usize,
pub call_chain_too_deep: usize,
pub duplicate_signature: usize,
pub instruction_error: usize,
pub insufficient_funds: usize,
pub invalid_account_for_fee: usize,
pub invalid_account_index: usize,
pub invalid_program_for_execution: usize,
pub not_allowed_during_cluster_maintenance: usize,
}
#[derive(Default, Debug, PartialEq, Clone)]
pub struct AccountInfo {
/// index identifying the append storage
store_id: AppendVecId,
/// offset into the storage
offset: usize,
/// needed to track shrink candidacy in bytes. Used to update the number
/// of alive bytes in an AppendVec as newer slots purge outdated entries
stored_size: usize,
/// lamports in the account used when squashing kept for optimization
/// purposes to remove accounts with zero balance.
lamports: u64,
}
impl IsCached for AccountInfo {
fn is_cached(&self) -> bool {
self.store_id == CACHE_VIRTUAL_STORAGE_ID
}
}
impl ZeroLamport for AccountInfo {
fn is_zero_lamport(&self) -> bool {
self.lamports == 0
}
}
/// An offset into the AccountsDb::storage vector
pub type AppendVecId = usize;
pub type SnapshotStorage = Vec<Arc<AccountStorageEntry>>;
pub type SnapshotStorages = Vec<SnapshotStorage>;
// Each slot has a set of storage entries.
pub(crate) type SlotStores = Arc<RwLock<HashMap<usize, Arc<AccountStorageEntry>>>>;
type AccountSlots = HashMap<Pubkey, HashSet<Slot>>;
type AppendVecOffsets = HashMap<AppendVecId, HashSet<usize>>;
type ReclaimResult = (AccountSlots, AppendVecOffsets);
type StorageFinder<'a> = Box<dyn Fn(Slot, usize) -> Arc<AccountStorageEntry> + 'a>;
type ShrinkCandidates = HashMap<Slot, HashMap<AppendVecId, Arc<AccountStorageEntry>>>;
trait Versioned {
fn version(&self) -> u64;
}
impl Versioned for (u64, Hash) {
fn version(&self) -> u64 {
self.0
}
}
impl Versioned for (u64, AccountInfo) {
fn version(&self) -> u64 {
self.0
}
}
pub enum LoadedAccountAccessor<'a> {
Stored(Option<(Arc<AccountStorageEntry>, usize)>),
Cached((&'a AccountsCache, Slot, &'a Pubkey)),
}
impl<'a> LoadedAccountAccessor<'a> {
fn get_loaded_account(&self) -> Option<LoadedAccount> {
match self {
LoadedAccountAccessor::Stored(storage_entry) => {
// May not be present if slot was cleaned up in between
storage_entry.as_ref().and_then(|(storage_entry, offset)| {
storage_entry
.get_stored_account_meta(*offset)
.map(LoadedAccount::Stored)
})
}
LoadedAccountAccessor::Cached((cache, slot, pubkey)) => {
// May not be present if slot was cleaned up in between
cache.load(*slot, pubkey).map(|cached_account| {
LoadedAccount::Cached((**pubkey, Cow::Owned(cached_account)))
})
}
}
}
}
pub enum LoadedAccount<'a> {
Stored(StoredAccountMeta<'a>),
Cached((Pubkey, Cow<'a, CachedAccount>)),
}
impl<'a> LoadedAccount<'a> {
pub fn owner(&self) -> &Pubkey {
match self {
LoadedAccount::Stored(stored_account_meta) => &stored_account_meta.account_meta.owner,
LoadedAccount::Cached((_, cached_account)) => &cached_account.account.owner,
}
}
pub fn executable(&self) -> bool {
match self {
LoadedAccount::Stored(stored_account_meta) => {
stored_account_meta.account_meta.executable
}
LoadedAccount::Cached((_, cached_account)) => cached_account.account.executable,
}
}
pub fn loaded_hash(&self) -> &Hash {
match self {
LoadedAccount::Stored(stored_account_meta) => &stored_account_meta.hash,
LoadedAccount::Cached((_, cached_account)) => &cached_account.hash,
}
}
pub fn pubkey(&self) -> &Pubkey {
match self {
LoadedAccount::Stored(stored_account_meta) => &stored_account_meta.meta.pubkey,
LoadedAccount::Cached((pubkey, _)) => &pubkey,
}
}
pub fn write_version(&self) -> u64 {
match self {
LoadedAccount::Stored(stored_account_meta) => stored_account_meta.meta.write_version,
LoadedAccount::Cached(_) => CACHE_VIRTUAL_WRITE_VERSION,
}
}
pub fn compute_hash(&self, slot: Slot, cluster_type: &ClusterType, pubkey: &Pubkey) -> Hash {
match self {
LoadedAccount::Stored(stored_account_meta) => {
AccountsDb::hash_stored_account(slot, &stored_account_meta, cluster_type)
}
LoadedAccount::Cached((_, cached_account)) => {
AccountsDb::hash_account(slot, &cached_account.account, pubkey, cluster_type)
}
}
}
pub fn stored_size(&self) -> usize {
match self {
LoadedAccount::Stored(stored_account_meta) => stored_account_meta.stored_size,
LoadedAccount::Cached(_) => CACHE_VIRTUAL_STORED_SIZE,
}
}
pub fn lamports(&self) -> u64 {
match self {
LoadedAccount::Stored(stored_account_meta) => stored_account_meta.account_meta.lamports,
LoadedAccount::Cached((_, cached_account)) => cached_account.account.lamports,
}
}
pub fn account(self) -> Account {
match self {
LoadedAccount::Stored(stored_account_meta) => stored_account_meta.clone_account(),
LoadedAccount::Cached((_, cached_account)) => match cached_account {
Cow::Owned(cached_account) => cached_account.account,
Cow::Borrowed(cached_account) => cached_account.account.clone(),
},
}
}
}
#[derive(Clone, Default, Debug)]
pub struct AccountStorage(pub DashMap<Slot, SlotStores>);
impl AccountStorage {
fn get_account_storage_entry(
&self,
slot: Slot,
store_id: AppendVecId,
) -> Option<Arc<AccountStorageEntry>> {
self.get_slot_stores(slot)
.and_then(|storage_map| storage_map.read().unwrap().get(&store_id).cloned())
}
fn get_slot_stores(&self, slot: Slot) -> Option<SlotStores> {
self.0.get(&slot).map(|result| result.value().clone())
}
fn get_slot_storage_entries(&self, slot: Slot) -> Option<Vec<Arc<AccountStorageEntry>>> {
self.get_slot_stores(slot)
.map(|res| res.read().unwrap().values().cloned().collect())
}
fn slot_store_count(&self, slot: Slot, store_id: AppendVecId) -> Option<usize> {
self.get_account_storage_entry(slot, store_id)
.map(|store| store.count())
}
fn all_slots(&self) -> Vec<Slot> {
self.0.iter().map(|iter_item| *iter_item.key()).collect()
}
}
#[derive(Debug, Eq, PartialEq, Copy, Clone, Deserialize, Serialize, AbiExample, AbiEnumVisitor)]
pub enum AccountStorageStatus {
Available = 0,
Full = 1,
Candidate = 2,
}
impl Default for AccountStorageStatus {
fn default() -> Self {
Self::Available
}
}
#[derive(Debug)]
pub enum BankHashVerificationError {
MismatchedAccountHash,
MismatchedBankHash,
MissingBankHash,
MismatchedTotalLamports(u64, u64),
}
#[derive(Default)]
struct CleanKeyTimings {
collect_delta_keys_us: u64,
delta_insert_us: u64,
hashset_to_vec_us: u64,
zero_lamport_key_clone_us: u64,
delta_key_count: u64,
zero_lamport_count: u64,
}
/// Persistent storage structure holding the accounts
#[derive(Debug)]
pub struct AccountStorageEntry {
pub(crate) id: AtomicUsize,
pub(crate) slot: AtomicU64,
/// storage holding the accounts
pub(crate) accounts: AppendVec,
/// Keeps track of the number of accounts stored in a specific AppendVec.
/// This is periodically checked to reuse the stores that do not have
/// any accounts in it
/// status corresponding to the storage, lets us know that
/// the append_vec, once maxed out, then emptied, can be reclaimed
count_and_status: RwLock<(usize, AccountStorageStatus)>,
/// This is the total number of accounts stored ever since initialized to keep
/// track of lifetime count of all store operations. And this differs from
/// count_and_status in that this field won't be decremented.
///
/// This is used as a rough estimate for slot shrinking. As such a relaxed
/// use case, this value ARE NOT strictly synchronized with count_and_status!
approx_store_count: AtomicUsize,
alive_bytes: AtomicUsize,
}
impl AccountStorageEntry {
pub fn new(path: &Path, slot: Slot, id: usize, file_size: u64) -> Self {
let tail = AppendVec::new_relative_path(slot, id);
let path = Path::new(path).join(&tail);
let accounts = AppendVec::new(&path, true, file_size as usize);
Self {
id: AtomicUsize::new(id),
slot: AtomicU64::new(slot),
accounts,
count_and_status: RwLock::new((0, AccountStorageStatus::Available)),
approx_store_count: AtomicUsize::new(0),
alive_bytes: AtomicUsize::new(0),
}
}
pub(crate) fn new_existing(
slot: Slot,
id: AppendVecId,
accounts: AppendVec,
num_accounts: usize,
) -> Self {
Self {
id: AtomicUsize::new(id),
slot: AtomicU64::new(slot),
accounts,
count_and_status: RwLock::new((0, AccountStorageStatus::Available)),
approx_store_count: AtomicUsize::new(num_accounts),
alive_bytes: AtomicUsize::new(0),
}
}
pub fn set_status(&self, mut status: AccountStorageStatus) {
let mut count_and_status = self.count_and_status.write().unwrap();
let count = count_and_status.0;
if status == AccountStorageStatus::Full && count == 0 {
// this case arises when the append_vec is full (store_ptrs fails),
// but all accounts have already been removed from the storage
//
// the only time it's safe to call reset() on an append_vec is when
// every account has been removed
// **and**
// the append_vec has previously been completely full
//
self.accounts.reset();
status = AccountStorageStatus::Available;
}
*count_and_status = (count, status);
}
pub fn recycle(&self, slot: Slot, id: usize) {
let mut count_and_status = self.count_and_status.write().unwrap();
self.accounts.reset();
*count_and_status = (0, AccountStorageStatus::Available);
self.slot.store(slot, Ordering::Release);
self.id.store(id, Ordering::Relaxed);
self.approx_store_count.store(0, Ordering::Relaxed);
self.alive_bytes.store(0, Ordering::Relaxed);
}
pub fn status(&self) -> AccountStorageStatus {
self.count_and_status.read().unwrap().1
}
pub fn count(&self) -> usize {
self.count_and_status.read().unwrap().0
}
pub fn approx_stored_count(&self) -> usize {
self.approx_store_count.load(Ordering::Relaxed)
}
pub fn alive_bytes(&self) -> usize {
self.alive_bytes.load(Ordering::SeqCst)
}
pub fn written_bytes(&self) -> u64 {
self.accounts.len() as u64
}
pub fn total_bytes(&self) -> u64 {
self.accounts.capacity()
}
pub fn has_accounts(&self) -> bool {
self.count() > 0
}
pub fn slot(&self) -> Slot {
self.slot.load(Ordering::Acquire)
}
pub fn append_vec_id(&self) -> AppendVecId {
self.id.load(Ordering::Relaxed)
}
pub fn flush(&self) -> Result<(), IoError> {
self.accounts.flush()
}
fn get_stored_account_meta(&self, offset: usize) -> Option<StoredAccountMeta> {
Some(self.accounts.get_account(offset)?.0)
}
fn add_account(&self, num_bytes: usize) {
let mut count_and_status = self.count_and_status.write().unwrap();
*count_and_status = (count_and_status.0 + 1, count_and_status.1);
self.approx_store_count.fetch_add(1, Ordering::Relaxed);
self.alive_bytes.fetch_add(num_bytes, Ordering::SeqCst);
}
fn try_available(&self) -> bool {
let mut count_and_status = self.count_and_status.write().unwrap();
let (count, status) = *count_and_status;
if status == AccountStorageStatus::Available {
*count_and_status = (count, AccountStorageStatus::Candidate);
true
} else {
false
}
}
pub fn all_accounts(&self) -> Vec<StoredAccountMeta> {
self.accounts.accounts(0)
}
fn remove_account(&self, num_bytes: usize, reset_accounts: bool) -> usize {
let mut count_and_status = self.count_and_status.write().unwrap();
let (mut count, mut status) = *count_and_status;
if count == 1 && status == AccountStorageStatus::Full && reset_accounts {
// this case arises when we remove the last account from the
// storage, but we've learned from previous write attempts that
// the storage is full
//
// the only time it's safe to call reset() on an append_vec is when
// every account has been removed
// **and**
// the append_vec has previously been completely full
//
// otherwise, the storage may be in flight with a store()
// call
self.accounts.reset();
status = AccountStorageStatus::Available;
}
// Some code path is removing accounts too many; this may result in an
// unintended reveal of old state for unrelated accounts.
assert!(
count > 0,
"double remove of account in slot: {}/store: {}!!",
self.slot(),
self.append_vec_id(),
);
self.alive_bytes.fetch_sub(num_bytes, Ordering::SeqCst);
count -= 1;
*count_and_status = (count, status);
count
}
pub fn get_relative_path(&self) -> Option<PathBuf> {
AppendVec::get_relative_path(self.accounts.get_path())
}
pub fn get_path(&self) -> PathBuf {
self.accounts.get_path()
}
}
pub fn get_temp_accounts_paths(count: u32) -> IoResult<(Vec<TempDir>, Vec<PathBuf>)> {
let temp_dirs: IoResult<Vec<TempDir>> = (0..count).map(|_| TempDir::new()).collect();
let temp_dirs = temp_dirs?;
let paths: Vec<PathBuf> = temp_dirs.iter().map(|t| t.path().to_path_buf()).collect();
Ok((temp_dirs, paths))
}
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, AbiExample)]
pub struct BankHashStats {
pub num_updated_accounts: u64,
pub num_removed_accounts: u64,
pub num_lamports_stored: u64,
pub total_data_len: u64,
pub num_executable_accounts: u64,
}
impl BankHashStats {
pub fn update(&mut self, account: &Account) {
if account.lamports == 0 {
self.num_removed_accounts += 1;
} else {
self.num_updated_accounts += 1;
}
self.total_data_len = self.total_data_len.wrapping_add(account.data.len() as u64);
if account.executable {
self.num_executable_accounts += 1;
}
self.num_lamports_stored = self.num_lamports_stored.wrapping_add(account.lamports);
}
pub fn merge(&mut self, other: &BankHashStats) {
self.num_updated_accounts += other.num_updated_accounts;
self.num_removed_accounts += other.num_removed_accounts;
self.total_data_len = self.total_data_len.wrapping_add(other.total_data_len);
self.num_lamports_stored = self
.num_lamports_stored
.wrapping_add(other.num_lamports_stored);
self.num_executable_accounts += other.num_executable_accounts;
}
}
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, AbiExample)]
pub struct BankHashInfo {
pub hash: Hash,
pub snapshot_hash: Hash,
pub stats: BankHashStats,
}
#[derive(Debug)]
struct FrozenAccountInfo {
pub hash: Hash, // Hash generated by hash_frozen_account_data()
pub lamports: u64, // Account balance cannot be lower than this amount
}
#[derive(Default)]
pub struct StoreAccountsTiming {
store_accounts_elapsed: u64,
update_index_elapsed: u64,
handle_reclaims_elapsed: u64,
}
#[derive(Debug, Default)]
struct RecycleStores {
entries: Vec<(Instant, Arc<AccountStorageEntry>)>,
total_bytes: u64,
}
// 30 min should be enough to be certain there won't be any prospective recycle uses for given
// store entry
// That's because it already processed ~2500 slots and ~25 passes of AccountsBackgroundService
pub const EXPIRATION_TTL_SECONDS: u64 = 1800;
impl RecycleStores {
fn add_entry(&mut self, new_entry: Arc<AccountStorageEntry>) {
self.total_bytes += new_entry.total_bytes();
self.entries.push((Instant::now(), new_entry))
}
fn iter(&self) -> std::slice::Iter<(Instant, Arc<AccountStorageEntry>)> {
self.entries.iter()
}
fn add_entries(&mut self, new_entries: Vec<Arc<AccountStorageEntry>>) {
self.total_bytes += new_entries.iter().map(|e| e.total_bytes()).sum::<u64>();
let now = Instant::now();
for new_entry in new_entries {
self.entries.push((now, new_entry));
}
}
fn expire_old_entries(&mut self) -> Vec<Arc<AccountStorageEntry>> {
let mut expired = vec![];
let now = Instant::now();
let mut expired_bytes = 0;
self.entries.retain(|(recycled_time, entry)| {
if now.duration_since(*recycled_time).as_secs() > EXPIRATION_TTL_SECONDS {
if Arc::strong_count(entry) >= 2 {
warn!(
"Expiring still in-use recycled StorageEntry anyway...: id: {} slot: {}",
entry.append_vec_id(),
entry.slot(),
);
}
expired_bytes += entry.total_bytes();
expired.push(entry.clone());
false
} else {
true
}
});
self.total_bytes -= expired_bytes;
expired
}
fn remove_entry(&mut self, index: usize) -> Arc<AccountStorageEntry> {
let (_added_time, removed_entry) = self.entries.swap_remove(index);
self.total_bytes -= removed_entry.total_bytes();
removed_entry
}
fn entry_count(&self) -> usize {
self.entries.len()
}
fn total_bytes(&self) -> u64 {
self.total_bytes
}
}
// This structure handles the load/store of the accounts
#[derive(Debug)]
pub struct AccountsDb {
/// Keeps tracks of index into AppendVec on a per slot basis
pub accounts_index: AccountsIndex<AccountInfo>,
pub storage: AccountStorage,
pub accounts_cache: AccountsCache,
recycle_stores: RwLock<RecycleStores>,
/// distribute the accounts across storage lists
pub next_id: AtomicUsize,
/// Set of shrinkable stores organized by map of slot to append_vec_id
pub shrink_candidate_slots: Mutex<ShrinkCandidates>,
/// Legacy shrink slots to support non-cached code-path.
pub shrink_candidate_slots_v1: Mutex<Vec<Slot>>,
pub(crate) write_version: AtomicU64,
/// Set of storage paths to pick from
pub(crate) paths: Vec<PathBuf>,
pub shrink_paths: RwLock<Option<Vec<PathBuf>>>,
/// Directory of paths this accounts_db needs to hold/remove
temp_paths: Option<Vec<TempDir>>,
/// Starting file size of appendvecs
file_size: u64,
/// Accounts that will cause a panic! if data modified or lamports decrease
frozen_accounts: HashMap<Pubkey, FrozenAccountInfo>,
/// Thread pool used for par_iter
pub thread_pool: ThreadPool,
pub thread_pool_clean: ThreadPool,
/// Number of append vecs to create to maximize parallelism when scanning
/// the accounts
min_num_stores: usize,
pub bank_hashes: RwLock<HashMap<Slot, BankHashInfo>>,
stats: AccountsStats,
clean_accounts_stats: CleanAccountsStats,
// Stats for purges called outside of clean_accounts()
external_purge_slots_stats: PurgeStats,
shrink_stats: ShrinkStats,
pub cluster_type: Option<ClusterType>,
pub account_indexes: HashSet<AccountIndex>,
pub caching_enabled: bool,
/// Set of unique keys per slot which is used
/// to drive clean_accounts
/// Generated by get_accounts_delta_hash
uncleaned_pubkeys: DashMap<Slot, Vec<Pubkey>>,
}
#[derive(Debug, Default)]
struct AccountsStats {
delta_hash_scan_time_total_us: AtomicU64,
delta_hash_accumulate_time_total_us: AtomicU64,
delta_hash_num: AtomicU64,
last_store_report: AtomicU64,
store_hash_accounts: AtomicU64,
store_accounts: AtomicU64,
store_update_index: AtomicU64,
store_handle_reclaims: AtomicU64,
store_append_accounts: AtomicU64,
store_find_store: AtomicU64,
store_num_accounts: AtomicU64,
store_total_data: AtomicU64,
recycle_store_count: AtomicU64,
create_store_count: AtomicU64,
store_get_slot_store: AtomicU64,
store_find_existing: AtomicU64,
dropped_stores: AtomicU64,
store_uncleaned_update: AtomicU64,
}
#[derive(Debug, Default)]
struct PurgeStats {
last_report: AtomicU64,
safety_checks_elapsed: AtomicU64,
remove_storages_elapsed: AtomicU64,
drop_storage_entries_elapsed: AtomicU64,
num_cached_slots_removed: AtomicUsize,
num_stored_slots_removed: AtomicUsize,
total_removed_storage_entries: AtomicUsize,
total_removed_cached_bytes: AtomicU64,
total_removed_stored_bytes: AtomicU64,
recycle_stores_write_elapsed: AtomicU64,
}
impl PurgeStats {
fn report(&self, metric_name: &'static str, report_interval_ms: Option<u64>) {
let should_report = report_interval_ms
.map(|report_interval_ms| {
let last = self.last_report.load(Ordering::Relaxed);
let now = solana_sdk::timing::timestamp();
now.saturating_sub(last) > report_interval_ms
&& self.last_report.compare_exchange(
last,
now,
Ordering::Relaxed,
Ordering::Relaxed,
) == Ok(last)
})
.unwrap_or(true);
if should_report {
datapoint_info!(
metric_name,
(
"safety_checks_elapsed",
self.safety_checks_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"remove_storages_elapsed",
self.remove_storages_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"drop_storage_entries_elapsed",
self.drop_storage_entries_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"num_cached_slots_removed",
self.num_cached_slots_removed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"num_stored_slots_removed",
self.num_stored_slots_removed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_removed_storage_entries",
self.total_removed_storage_entries
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_removed_cached_bytes",
self.total_removed_cached_bytes.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_removed_stored_bytes",
self.total_removed_stored_bytes.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"recycle_stores_write_elapsed",
self.recycle_stores_write_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
);
}
}
}
#[derive(Debug, Default)]
struct LatestAccountsIndexRootsStats {
roots_len: AtomicUsize,
uncleaned_roots_len: AtomicUsize,
previous_uncleaned_roots_len: AtomicUsize,
}
impl LatestAccountsIndexRootsStats {
fn update(&self, accounts_index_roots_stats: &AccountsIndexRootsStats) {
self.roots_len
.store(accounts_index_roots_stats.roots_len, Ordering::Relaxed);
self.uncleaned_roots_len.store(
accounts_index_roots_stats.uncleaned_roots_len,
Ordering::Relaxed,
);
self.previous_uncleaned_roots_len.store(
accounts_index_roots_stats.previous_uncleaned_roots_len,
Ordering::Relaxed,
);
}
fn report(&self) {
datapoint_info!(
"accounts_index_roots_len",
(
"roots_len",
self.roots_len.load(Ordering::Relaxed) as i64,
i64
),
(
"uncleaned_roots_len",
self.uncleaned_roots_len.load(Ordering::Relaxed) as i64,
i64
),
(
"previous_uncleaned_roots_len",
self.previous_uncleaned_roots_len.load(Ordering::Relaxed) as i64,
i64
),
);
// Don't need to reset since this tracks the latest updates, not a cumulative total
}
}
#[derive(Debug, Default)]
struct CleanAccountsStats {
purge_stats: PurgeStats,
latest_accounts_index_roots_stats: LatestAccountsIndexRootsStats,
}
impl CleanAccountsStats {
fn report(&self) {
self.purge_stats.report("clean_purge_slots_stats", None);
self.latest_accounts_index_roots_stats.report();
}
}
#[derive(Debug, Default)]
struct ShrinkStats {
last_report: AtomicU64,
num_slots_shrunk: AtomicUsize,
storage_read_elapsed: AtomicU64,
index_read_elapsed: AtomicU64,
find_alive_elapsed: AtomicU64,
create_and_insert_store_elapsed: AtomicU64,
store_accounts_elapsed: AtomicU64,
update_index_elapsed: AtomicU64,
handle_reclaims_elapsed: AtomicU64,
write_storage_elapsed: AtomicU64,
rewrite_elapsed: AtomicU64,
drop_storage_entries_elapsed: AtomicU64,
recycle_stores_write_elapsed: AtomicU64,
accounts_removed: AtomicUsize,
bytes_removed: AtomicU64,
}
impl ShrinkStats {
fn report(&self) {
let last = self.last_report.load(Ordering::Relaxed);
let now = solana_sdk::timing::timestamp();
let should_report = now.saturating_sub(last) > 1000
&& self
.last_report
.compare_exchange(last, now, Ordering::Relaxed, Ordering::Relaxed)
== Ok(last);
if should_report {
datapoint_info!(
"shrink_stats",
(
"num_slots_shrunk",
self.num_slots_shrunk.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"storage_read_elapsed",
self.storage_read_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"index_read_elapsed",
self.index_read_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"find_alive_elapsed",
self.find_alive_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"create_and_insert_store_elapsed",
self.create_and_insert_store_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"store_accounts_elapsed",
self.store_accounts_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"update_index_elapsed",
self.update_index_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"handle_reclaims_elapsed",
self.handle_reclaims_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"write_storage_elapsed",
self.write_storage_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"rewrite_elapsed",
self.rewrite_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"drop_storage_entries_elapsed",
self.drop_storage_entries_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"recycle_stores_write_time",
self.recycle_stores_write_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"accounts_removed",
self.accounts_removed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"bytes_removed",
self.bytes_removed.swap(0, Ordering::Relaxed) as i64,
i64
),
);
}
}
}
pub fn make_min_priority_thread_pool() -> ThreadPool {
// Use lower thread count to reduce priority.
let num_threads = std::cmp::max(2, num_cpus::get() / 4);
rayon::ThreadPoolBuilder::new()
.thread_name(|i| format!("solana-accounts-cleanup-{}", i))
.num_threads(num_threads)
.build()
.unwrap()
}
#[cfg(all(test, RUSTC_WITH_SPECIALIZATION))]
impl solana_frozen_abi::abi_example::AbiExample for AccountsDb {
fn example() -> Self {
let accounts_db = AccountsDb::new_single();
let key = Pubkey::default();
let some_data_len = 5;
let some_slot: Slot = 0;
let account = Account::new(1, some_data_len, &key);
accounts_db.store_uncached(some_slot, &[(&key, &account)]);
accounts_db.add_root(0);
accounts_db
}
}
impl Default for AccountsDb {
fn default() -> Self {
let num_threads = get_thread_count();
let mut bank_hashes = HashMap::new();
bank_hashes.insert(0, BankHashInfo::default());
AccountsDb {
accounts_index: AccountsIndex::default(),
storage: AccountStorage::default(),
accounts_cache: AccountsCache::default(),
recycle_stores: RwLock::new(RecycleStores::default()),
uncleaned_pubkeys: DashMap::new(),
next_id: AtomicUsize::new(0),
shrink_candidate_slots_v1: Mutex::new(Vec::new()),
shrink_candidate_slots: Mutex::new(HashMap::new()),
write_version: AtomicU64::new(0),
paths: vec![],
shrink_paths: RwLock::new(None),
temp_paths: None,
file_size: DEFAULT_FILE_SIZE,
thread_pool: rayon::ThreadPoolBuilder::new()
.num_threads(num_threads)
.thread_name(|i| format!("solana-accounts-db-{}", i))
.build()
.unwrap(),
thread_pool_clean: make_min_priority_thread_pool(),
min_num_stores: num_threads,
bank_hashes: RwLock::new(bank_hashes),
frozen_accounts: HashMap::new(),
external_purge_slots_stats: PurgeStats::default(),
clean_accounts_stats: CleanAccountsStats::default(),
shrink_stats: ShrinkStats::default(),
stats: AccountsStats::default(),
cluster_type: None,
account_indexes: HashSet::new(),
caching_enabled: false,
}
}
}
impl AccountsDb {
pub fn new(paths: Vec<PathBuf>, cluster_type: &ClusterType) -> Self {
AccountsDb::new_with_config(paths, cluster_type, HashSet::new(), false)
}
pub fn new_with_config(
paths: Vec<PathBuf>,
cluster_type: &ClusterType,
account_indexes: HashSet<AccountIndex>,
caching_enabled: bool,
) -> Self {
let new = if !paths.is_empty() {
Self {
paths,
temp_paths: None,
cluster_type: Some(*cluster_type),
account_indexes,
caching_enabled,
..Self::default()
}
} else {
// Create a temporary set of accounts directories, used primarily
// for testing
let (temp_dirs, paths) = get_temp_accounts_paths(DEFAULT_NUM_DIRS).unwrap();
Self {
paths,
temp_paths: Some(temp_dirs),
cluster_type: Some(*cluster_type),
account_indexes,
caching_enabled,
..Self::default()
}
};
{
for path in new.paths.iter() {
std::fs::create_dir_all(path).expect("Create directory failed.");
}
}
new
}
pub fn set_shrink_paths(&self, paths: Vec<PathBuf>) {
assert!(!paths.is_empty());
let mut shrink_paths = self.shrink_paths.write().unwrap();
for path in &paths {
std::fs::create_dir_all(path).expect("Create directory failed.");
}
*shrink_paths = Some(paths);
}
pub fn file_size(&self) -> u64 {
self.file_size
}
pub fn new_single() -> Self {
AccountsDb {
min_num_stores: 0,
..AccountsDb::new(Vec::new(), &ClusterType::Development)
}
}
fn new_storage_entry(&self, slot: Slot, path: &Path, size: u64) -> AccountStorageEntry {
AccountStorageEntry::new(
path,
slot,
self.next_id.fetch_add(1, Ordering::Relaxed),
size,
)
}
pub fn expected_cluster_type(&self) -> ClusterType {
self.cluster_type
.expect("Cluster type must be set at initialization")
}
// Reclaim older states of rooted accounts for AccountsDb bloat mitigation
fn clean_old_rooted_accounts(
&self,
purges_in_root: Vec<Pubkey>,
max_clean_root: Option<Slot>,
) -> ReclaimResult {
if purges_in_root.is_empty() {
return (HashMap::new(), HashMap::new());
}
// This number isn't carefully chosen; just guessed randomly such that
// the hot loop will be the order of ~Xms.
const INDEX_CLEAN_BULK_COUNT: usize = 4096;
let mut clean_rooted = Measure::start("clean_old_root-ms");
let reclaim_vecs =
purges_in_root
.par_chunks(INDEX_CLEAN_BULK_COUNT)
.map(|pubkeys: &[Pubkey]| {
let mut reclaims = Vec::new();
for pubkey in pubkeys {
self.accounts_index.clean_rooted_entries(
&pubkey,
&mut reclaims,
max_clean_root,
&self.account_indexes,
);
}
reclaims
});
let reclaims: Vec<_> = reclaim_vecs.flatten().collect();
clean_rooted.stop();
inc_new_counter_info!("clean-old-root-par-clean-ms", clean_rooted.as_ms() as usize);
let mut measure = Measure::start("clean_old_root_reclaims");
// Don't reset from clean, since the pubkeys in those stores may need to be unref'ed
// and those stores may be used for background hashing.
let reset_accounts = false;
let mut reclaim_result = (HashMap::new(), HashMap::new());
self.handle_reclaims(
&reclaims,
None,
false,
Some(&mut reclaim_result),
reset_accounts,
);
measure.stop();
debug!("{} {}", clean_rooted, measure);
inc_new_counter_info!("clean-old-root-reclaim-ms", measure.as_ms() as usize);
reclaim_result
}
fn do_reset_uncleaned_roots(&self, max_clean_root: Option<Slot>) {
self.accounts_index.reset_uncleaned_roots(max_clean_root);
}
fn calc_delete_dependencies(
purges: &HashMap<Pubkey, (SlotList<AccountInfo>, u64)>,
store_counts: &mut HashMap<AppendVecId, (usize, HashSet<Pubkey>)>,
) {
// Another pass to check if there are some filtered accounts which
// do not match the criteria of deleting all appendvecs which contain them
// then increment their storage count.
let mut already_counted = HashSet::new();
for (pubkey, (account_infos, ref_count_from_storage)) in purges.iter() {
let no_delete = if account_infos.len() as u64 != *ref_count_from_storage {
debug!(
"calc_delete_dependencies(),
pubkey: {},
account_infos: {:?},
account_infos_len: {},
ref_count_from_storage: {}",
pubkey,
account_infos,
account_infos.len(),
ref_count_from_storage,
);
true
} else {
let mut no_delete = false;
for (_slot, account_info) in account_infos {
debug!(
"calc_delete_dependencies()
storage id: {},
count len: {}",
account_info.store_id,
store_counts.get(&account_info.store_id).unwrap().0,
);
if store_counts.get(&account_info.store_id).unwrap().0 != 0 {
no_delete = true;
break;
}
}
no_delete
};
if no_delete {
let mut pending_store_ids: HashSet<usize> = HashSet::new();
for (_slot_id, account_info) in account_infos {
if !already_counted.contains(&account_info.store_id) {
pending_store_ids.insert(account_info.store_id);
}
}
while !pending_store_ids.is_empty() {
let id = pending_store_ids.iter().next().cloned().unwrap();
pending_store_ids.remove(&id);
if already_counted.contains(&id) {
continue;
}
store_counts.get_mut(&id).unwrap().0 += 1;
already_counted.insert(id);
let affected_pubkeys = &store_counts.get(&id).unwrap().1;
for key in affected_pubkeys {
for (_slot, account_info) in &purges.get(&key).unwrap().0 {
if !already_counted.contains(&account_info.store_id) {
pending_store_ids.insert(account_info.store_id);
}
}
}
}
}
}
}
fn purge_keys_exact<'a, C: 'a>(
&'a self,
pubkey_to_slot_set: &'a [(Pubkey, C)],
) -> Vec<(u64, AccountInfo)>
where
C: Contains<'a, Slot>,
{
let mut reclaims = Vec::new();
let mut dead_keys = Vec::new();
for (pubkey, slots_set) in pubkey_to_slot_set {
let is_empty = self.accounts_index.purge_exact(
&pubkey,
slots_set,
&mut reclaims,
&self.account_indexes,
);
if is_empty {
dead_keys.push(pubkey);
}
}
self.accounts_index
.handle_dead_keys(&dead_keys, &self.account_indexes);
reclaims
}
fn max_clean_root(&self, proposed_clean_root: Option<Slot>) -> Option<Slot> {
match (
self.accounts_index.min_ongoing_scan_root(),
proposed_clean_root,
) {
(None, None) => None,
(Some(min_scan_root), None) => Some(min_scan_root),
(None, Some(proposed_clean_root)) => Some(proposed_clean_root),
(Some(min_scan_root), Some(proposed_clean_root)) => {
Some(std::cmp::min(min_scan_root, proposed_clean_root))
}
}
}
fn collect_uncleaned_pubkeys_to_slot(&self, max_slot: Slot) -> (Vec<Vec<Pubkey>>, Slot) {
let mut max_slot_in_uncleaned_pubkeys = 0;
let slots: Vec<Slot> = self
.uncleaned_pubkeys
.iter()
.filter_map(|entry| {
let slot = entry.key();
max_slot_in_uncleaned_pubkeys = max_slot_in_uncleaned_pubkeys.max(*slot);
if *slot <= max_slot {
Some(*slot)
} else {
None
}
})
.collect();
(
slots
.into_iter()
.filter_map(|slot| {
let maybe_slot_keys = self.uncleaned_pubkeys.remove(&slot);
if self.accounts_index.is_root(slot) {
// Safe to unwrap on rooted slots since this is called from clean_accounts
// and only clean_accounts operates on rooted slots. purge_slots only
// operates on uncleaned_pubkeys
let (_slot, keys) = maybe_slot_keys.expect("Root slot should exist");
Some(keys)
} else {
None
}
})
.collect(),
max_slot_in_uncleaned_pubkeys,
)
}
// Construct a vec of pubkeys for cleaning from:
// uncleaned_pubkeys - the delta set of updated pubkeys in rooted slots from the last clean
// zero_lamport_pubkeys - set of all alive pubkeys containing 0-lamport updates
fn construct_candidate_clean_keys(
&self,
max_clean_root: Option<Slot>,
timings: &mut CleanKeyTimings,
) -> Vec<Pubkey> {
let mut zero_lamport_key_clone = Measure::start("zero_lamport_key");
let pubkeys = self.accounts_index.zero_lamport_pubkeys().clone();
timings.zero_lamport_count = pubkeys.len() as u64;
zero_lamport_key_clone.stop();
timings.zero_lamport_key_clone_us += zero_lamport_key_clone.as_us();
let mut collect_delta_keys = Measure::start("key_create");
let max_slot = max_clean_root.unwrap_or_else(|| self.accounts_index.max_root());
let (delta_keys, _max_slot) = self.collect_uncleaned_pubkeys_to_slot(max_slot);
collect_delta_keys.stop();
timings.collect_delta_keys_us += collect_delta_keys.as_us();
let mut delta_insert = Measure::start("delta_insert");
self.thread_pool_clean.install(|| {
delta_keys.par_iter().for_each(|keys| {
for key in keys {
pubkeys.insert(*key);
}
});
});
delta_insert.stop();
timings.delta_insert_us += delta_insert.as_us();
timings.delta_key_count = pubkeys.len() as u64;
let mut hashset_to_vec = Measure::start("flat_map");
let pubkeys: Vec<Pubkey> = pubkeys.into_iter().collect();
hashset_to_vec.stop();
timings.hashset_to_vec_us += hashset_to_vec.as_us();
pubkeys
}
// Purge zero lamport accounts and older rooted account states as garbage
// collection
// Only remove those accounts where the entire rooted history of the account
// can be purged because there are no live append vecs in the ancestors
pub fn clean_accounts(&self, max_clean_root: Option<Slot>) {
let max_clean_root = self.max_clean_root(max_clean_root);
// hold a lock to prevent slot shrinking from running because it might modify some rooted
// slot storages which can not happen as long as we're cleaning accounts because we're also
// modifying the rooted slot storages!
let mut candidates_v1 = self.shrink_candidate_slots_v1.lock().unwrap();
self.report_store_stats();
let mut key_timings = CleanKeyTimings::default();
let pubkeys = self.construct_candidate_clean_keys(max_clean_root, &mut key_timings);
let total_keys_count = pubkeys.len();
let mut accounts_scan = Measure::start("accounts_scan");
// parallel scan the index.
let (mut purges, purges_in_root) = {
self.thread_pool_clean.install(|| {
pubkeys
.par_chunks(4096)
.map(|pubkeys: &[Pubkey]| {
let mut purges_in_root = Vec::new();
let mut purges = HashMap::new();
for pubkey in pubkeys {
if let Some((locked_entry, index)) =
self.accounts_index.get(pubkey, None, max_clean_root)
{
let slot_list = locked_entry.slot_list();
let (slot, account_info) = &slot_list[index];
if account_info.lamports == 0 {
purges.insert(
*pubkey,
self.accounts_index
.roots_and_ref_count(&locked_entry, max_clean_root),
);
}
// prune zero_lamport_pubkey set which should contain all 0-lamport
// keys whether rooted or not. A 0-lamport update may become rooted
// in the future.
let has_zero_lamport_accounts = slot_list
.iter()
.any(|(_slot, account_info)| account_info.lamports == 0);
if !has_zero_lamport_accounts {
self.accounts_index.remove_zero_lamport_key(pubkey);
}
// Release the lock
let slot = *slot;
drop(locked_entry);
if self.accounts_index.is_uncleaned_root(slot) {
// Assertion enforced by `accounts_index.get()`, the latest slot
// will not be greater than the given `max_clean_root`
if let Some(max_clean_root) = max_clean_root {
assert!(slot <= max_clean_root);
}
purges_in_root.push(*pubkey);
}
} else {
let r_accounts_index =
self.accounts_index.account_maps.read().unwrap();
if !r_accounts_index.contains_key(pubkey) {
self.accounts_index.remove_zero_lamport_key(pubkey);
}
}
}
(purges, purges_in_root)
})
.reduce(
|| (HashMap::new(), Vec::new()),
|mut m1, m2| {
// Collapse down the hashmaps/vecs into one.
m1.0.extend(m2.0);
m1.1.extend(m2.1);
m1
},
)
})
};
accounts_scan.stop();
let mut clean_old_rooted = Measure::start("clean_old_roots");
let (purged_account_slots, removed_accounts) =
self.clean_old_rooted_accounts(purges_in_root, max_clean_root);
if self.caching_enabled {
self.do_reset_uncleaned_roots(max_clean_root);
} else {
self.do_reset_uncleaned_roots_v1(&mut candidates_v1, max_clean_root);
}
clean_old_rooted.stop();
let mut store_counts_time = Measure::start("store_counts");
// Calculate store counts as if everything was purged
// Then purge if we can
let mut store_counts: HashMap<AppendVecId, (usize, HashSet<Pubkey>)> = HashMap::new();
for (key, (account_infos, ref_count)) in purges.iter_mut() {
if purged_account_slots.contains_key(&key) {
*ref_count = self.accounts_index.ref_count_from_storage(&key);
}
account_infos.retain(|(slot, account_info)| {
let was_slot_purged = purged_account_slots
.get(&key)
.map(|slots_removed| slots_removed.contains(slot))
.unwrap_or(false);
if was_slot_purged {
// No need to look up the slot storage below if the entire
// slot was purged
return false;
}
// Check if this update in `slot` to the account with `key` was reclaimed earlier by
// `clean_old_rooted_accounts()`
let was_reclaimed = removed_accounts
.get(&account_info.store_id)
.map(|store_removed| store_removed.contains(&account_info.offset))
.unwrap_or(false);
if was_reclaimed {
return false;
}
if let Some(store_count) = store_counts.get_mut(&account_info.store_id) {
store_count.0 -= 1;
store_count.1.insert(*key);
} else {
let mut key_set = HashSet::new();
key_set.insert(*key);
let count = self
.storage
.slot_store_count(*slot, account_info.store_id)
.unwrap()
- 1;
debug!(
"store_counts, inserting slot: {}, store id: {}, count: {}",
slot, account_info.store_id, count
);
store_counts.insert(account_info.store_id, (count, key_set));
}
true
});
}
store_counts_time.stop();
let mut calc_deps_time = Measure::start("calc_deps");
Self::calc_delete_dependencies(&purges, &mut store_counts);
calc_deps_time.stop();
// Only keep purges where the entire history of the account in the root set
// can be purged. All AppendVecs for those updates are dead.
let mut purge_filter = Measure::start("purge_filter");
purges.retain(|_pubkey, (account_infos, _ref_count)| {
for (_slot, account_info) in account_infos.iter() {
if store_counts.get(&account_info.store_id).unwrap().0 != 0 {
return false;
}
}
true
});
purge_filter.stop();
let mut reclaims_time = Measure::start("reclaims");
// Recalculate reclaims with new purge set
let pubkey_to_slot_set: Vec<_> = purges
.into_iter()
.map(|(key, (slots_list, _ref_count))| {
(
key,
slots_list
.into_iter()
.map(|(slot, _)| slot)
.collect::<HashSet<Slot>>(),
)
})
.collect();
let reclaims = self.purge_keys_exact(&pubkey_to_slot_set);
// Don't reset from clean, since the pubkeys in those stores may need to be unref'ed
// and those stores may be used for background hashing.
let reset_accounts = false;
self.handle_reclaims(&reclaims, None, false, None, reset_accounts);
reclaims_time.stop();
self.clean_accounts_stats.report();
datapoint_info!(
"clean_accounts",
(
"collect_delta_keys_us",
key_timings.collect_delta_keys_us,
i64
),
(
"zero_lamport_key_clone_us",
key_timings.zero_lamport_key_clone_us,
i64
),
("accounts_scan", accounts_scan.as_us() as i64, i64),
("clean_old_rooted", clean_old_rooted.as_us() as i64, i64),
("store_counts", store_counts_time.as_us() as i64, i64),
("purge_filter", purge_filter.as_us() as i64, i64),
("calc_deps", calc_deps_time.as_us() as i64, i64),
("reclaims", reclaims_time.as_us() as i64, i64),
("delta_key_count", key_timings.delta_key_count, i64),
("zero_lamport_count", key_timings.zero_lamport_count, i64),
("total_keys_count", total_keys_count, i64),
);
}
/// Removes the accounts in the input `reclaims` from the tracked "count" of
/// their corresponding storage entries. Note this does not actually free
/// the memory from the storage entries until all the storage entries for
/// a given slot `S` are empty, at which point `process_dead_slots` will
/// remove all the storage entries for `S`.
///
/// # Arguments
/// * `reclaims` - The accounts to remove from storage entries' "count"
///
/// * `expected_single_dead_slot` - A correctness assertion. If this is equal to `Some(S)`,
/// then the function will check that the only slot being cleaned up in `reclaims`
/// is the slot == `S`. This is true for instance when `handle_reclaims` is called
/// from store or slot shrinking, as those should only touch the slot they are
/// currently storing to or shrinking.
///
/// * `no_dead_slot` - A correctness assertion. If this is equal to
/// `false`, the function will check that no slots are cleaned up/removed via
/// `process_dead_slots`. For instance, on store, no slots should be cleaned up,
/// but during the background clean accounts purges accounts from old rooted slots,
/// so outdated slots may be removed.
/// * `reclaim_result` - Information about accounts that were removed from storage, does
/// not include accounts that were removed from the cache
/// * `reset_accounts` - Reset the append_vec store when the store is dead (count==0)
/// From the clean and shrink paths it should be false since there may be an in-progress
/// hash operation and the stores may hold accounts that need to be unref'ed.
fn handle_reclaims(
&self,
reclaims: SlotSlice<AccountInfo>,
expected_single_dead_slot: Option<Slot>,
no_dead_slot: bool,
reclaim_result: Option<&mut ReclaimResult>,
reset_accounts: bool,
) {
if reclaims.is_empty() {
return;
}
let (purged_account_slots, reclaimed_offsets) =
if let Some((ref mut x, ref mut y)) = reclaim_result {
(Some(x), Some(y))
} else {
(None, None)
};
let dead_slots = self.remove_dead_accounts(
reclaims,
expected_single_dead_slot,
reclaimed_offsets,
reset_accounts,
);
if no_dead_slot {
assert!(dead_slots.is_empty());
} else if let Some(expected_single_dead_slot) = expected_single_dead_slot {
assert!(dead_slots.len() <= 1);
if dead_slots.len() == 1 {
assert!(dead_slots.contains(&expected_single_dead_slot));
}
}
self.process_dead_slots(&dead_slots, purged_account_slots);
}
// Must be kept private!, does sensitive cleanup that should only be called from
// supported pipelines in AccountsDb
fn process_dead_slots(
&self,
dead_slots: &HashSet<Slot>,
purged_account_slots: Option<&mut AccountSlots>,
) {
if dead_slots.is_empty() {
return;
}
let mut clean_dead_slots = Measure::start("reclaims::clean_dead_slots");
self.clean_stored_dead_slots(&dead_slots, purged_account_slots);
clean_dead_slots.stop();
let mut purge_removed_slots = Measure::start("reclaims::purge_removed_slots");
self.purge_storage_slots(&dead_slots);
purge_removed_slots.stop();
// If the slot is dead, remove the need to shrink the storages as
// the storage entries will be purged.
for slot in dead_slots {
self.shrink_candidate_slots.lock().unwrap().remove(slot);
}
debug!(
"process_dead_slots({}): {} {} {:?}",
dead_slots.len(),
clean_dead_slots,
purge_removed_slots,
dead_slots,
);
}
fn do_shrink_slot_stores<'a, I>(&'a self, slot: Slot, stores: I)
where
I: Iterator<Item = &'a Arc<AccountStorageEntry>>,
{
struct FoundStoredAccount {
account: Account,
account_hash: Hash,
account_size: usize,
store_id: AppendVecId,
offset: usize,
write_version: u64,
}
debug!("do_shrink_slot_stores: slot: {}", slot);
let mut stored_accounts: HashMap<Pubkey, FoundStoredAccount> = HashMap::new();
let mut original_bytes = 0;
for store in stores {
let mut start = 0;
original_bytes += store.total_bytes();
while let Some((account, next)) = store.accounts.get_account(start) {
match stored_accounts.entry(account.meta.pubkey) {
Entry::Occupied(mut occupied_entry) => {
if account.meta.write_version > occupied_entry.get().write_version {
occupied_entry.insert(FoundStoredAccount {
account: account.clone_account(),
account_hash: *account.hash,
account_size: next - start,
store_id: store.append_vec_id(),
offset: account.offset,
write_version: account.meta.write_version,
});
}
}
Entry::Vacant(vacant_entry) => {
vacant_entry.insert(FoundStoredAccount {
account: account.clone_account(),
account_hash: *account.hash,
account_size: next - start,
store_id: store.append_vec_id(),
offset: account.offset,
write_version: account.meta.write_version,
});
}
}
start = next;
}
}
let mut index_read_elapsed = Measure::start("index_read_elapsed");
let mut alive_total = 0;
let alive_accounts: Vec<_> = {
stored_accounts
.iter()
.filter(|(pubkey, stored_account)| {
let FoundStoredAccount {
account_size,
store_id,
offset,
..
} = stored_account;
if let Some((locked_entry, _)) = self.accounts_index.get(pubkey, None, None) {
let is_alive = locked_entry
.slot_list()
.iter()
.any(|(_slot, i)| i.store_id == *store_id && i.offset == *offset);
if !is_alive {
// This pubkey was found in the storage, but no longer exists in the index.
// It would have had a ref to the storage from the initial store, but it will
// not exist in the re-written slot. Unref it to keep the index consistent with
// rewriting the storage entries.
locked_entry.unref()
} else {
alive_total += *account_size as u64;
}
is_alive
} else {
false
}
})
.collect()
};
index_read_elapsed.stop();
let aligned_total: u64 = self.page_align(alive_total);
let total_starting_accounts = stored_accounts.len();
let total_accounts_after_shrink = alive_accounts.len();
debug!(
"shrinking: slot: {}, total_starting_accounts: {} => total_accounts_after_shrink: {} ({} bytes; aligned to: {})",
slot,
total_starting_accounts,
total_accounts_after_shrink,
alive_total,
aligned_total
);
let mut rewrite_elapsed = Measure::start("rewrite_elapsed");
let mut dead_storages = vec![];
let mut find_alive_elapsed = 0;
let mut create_and_insert_store_elapsed = 0;
let mut write_storage_elapsed = 0;
let mut store_accounts_timing = StoreAccountsTiming::default();
if aligned_total > 0 {
let mut start = Measure::start("find_alive_elapsed");
let mut accounts = Vec::with_capacity(alive_accounts.len());
let mut hashes = Vec::with_capacity(alive_accounts.len());
let mut write_versions = Vec::with_capacity(alive_accounts.len());
for (pubkey, alive_account) in alive_accounts {
accounts.push((pubkey, &alive_account.account));
hashes.push(alive_account.account_hash);
write_versions.push(alive_account.write_version);
}
start.stop();
find_alive_elapsed = start.as_us();
let mut start = Measure::start("create_and_insert_store_elapsed");
let shrunken_store = if let Some(new_store) =
self.try_recycle_and_insert_store(slot, aligned_total, aligned_total + 1024)
{
new_store
} else {
let maybe_shrink_paths = self.shrink_paths.read().unwrap();
if let Some(ref shrink_paths) = *maybe_shrink_paths {
self.create_and_insert_store_with_paths(
slot,
aligned_total,
"shrink-w-path",
shrink_paths,
)
} else {
self.create_and_insert_store(slot, aligned_total, "shrink")
}
};
start.stop();
create_and_insert_store_elapsed = start.as_us();
// here, we're writing back alive_accounts. That should be an atomic operation
// without use of rather wide locks in this whole function, because we're
// mutating rooted slots; There should be no writers to them.
store_accounts_timing = self.store_accounts_frozen(
slot,
&accounts,
&hashes,
Some(Box::new(move |_, _| shrunken_store.clone())),
Some(Box::new(write_versions.into_iter())),
);
// `store_accounts_frozen()` above may have purged accounts from some
// other storage entries (the ones that were just overwritten by this
// new storage entry). This means some of those stores might have caused
// this slot to be read to `self.shrink_candidate_slots`, so delete
// those here
self.shrink_candidate_slots.lock().unwrap().remove(&slot);
// Purge old, overwritten storage entries
let mut start = Measure::start("write_storage_elapsed");
if let Some(slot_stores) = self.storage.get_slot_stores(slot) {
slot_stores.write().unwrap().retain(|_key, store| {
if store.count() == 0 {
dead_storages.push(store.clone());
}
store.count() > 0
});
}
start.stop();
write_storage_elapsed = start.as_us();
}
rewrite_elapsed.stop();
let mut recycle_stores_write_elapsed = Measure::start("recycle_stores_write_time");
let mut recycle_stores = self.recycle_stores.write().unwrap();
recycle_stores_write_elapsed.stop();
let mut drop_storage_entries_elapsed = Measure::start("drop_storage_entries_elapsed");
if recycle_stores.entry_count() < MAX_RECYCLE_STORES {
recycle_stores.add_entries(dead_storages);
drop(recycle_stores);
} else {
self.stats
.dropped_stores
.fetch_add(dead_storages.len() as u64, Ordering::Relaxed);
drop(recycle_stores);
drop(dead_storages);
}
drop_storage_entries_elapsed.stop();
self.shrink_stats
.num_slots_shrunk
.fetch_add(1, Ordering::Relaxed);
self.shrink_stats
.index_read_elapsed
.fetch_add(index_read_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats
.find_alive_elapsed
.fetch_add(find_alive_elapsed, Ordering::Relaxed);
self.shrink_stats
.create_and_insert_store_elapsed
.fetch_add(create_and_insert_store_elapsed, Ordering::Relaxed);
self.shrink_stats.store_accounts_elapsed.fetch_add(
store_accounts_timing.store_accounts_elapsed,
Ordering::Relaxed,
);
self.shrink_stats.update_index_elapsed.fetch_add(
store_accounts_timing.update_index_elapsed,
Ordering::Relaxed,
);
self.shrink_stats.handle_reclaims_elapsed.fetch_add(
store_accounts_timing.handle_reclaims_elapsed,
Ordering::Relaxed,
);
self.shrink_stats
.write_storage_elapsed
.fetch_add(write_storage_elapsed, Ordering::Relaxed);
self.shrink_stats
.rewrite_elapsed
.fetch_add(rewrite_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats
.drop_storage_entries_elapsed
.fetch_add(drop_storage_entries_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats
.recycle_stores_write_elapsed
.fetch_add(recycle_stores_write_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats.accounts_removed.fetch_add(
total_starting_accounts - total_accounts_after_shrink,
Ordering::Relaxed,
);
self.shrink_stats.bytes_removed.fetch_add(
original_bytes.saturating_sub(aligned_total),
Ordering::Relaxed,
);
self.shrink_stats.report();
}
// Reads all accounts in given slot's AppendVecs and filter only to alive,
// then create a minimum AppendVec filled with the alive.
fn shrink_slot_forced(&self, slot: Slot) -> usize {
debug!("shrink_slot_forced: slot: {}", slot);
if let Some(stores_lock) = self.storage.get_slot_stores(slot) {
let stores: Vec<Arc<AccountStorageEntry>> =
stores_lock.read().unwrap().values().cloned().collect();
let mut alive_count = 0;
let mut stored_count = 0;
for store in &stores {
alive_count += store.count();
stored_count += store.approx_stored_count();
}
if alive_count == stored_count && stores.len() == 1 {
trace!(
"shrink_slot_forced ({}): not able to shrink at all: alive/stored: {} / {}",
slot,
alive_count,
stored_count,
);
return 0;
}
self.do_shrink_slot_stores(slot, stores.iter());
alive_count
} else {
0
}
}
fn all_slots_in_storage(&self) -> Vec<Slot> {
self.storage.all_slots()
}
fn all_root_slots_in_index(&self) -> Vec<Slot> {
self.accounts_index.all_roots()
}
pub fn shrink_candidate_slots(&self) -> usize {
let shrink_slots = std::mem::replace(
&mut *self.shrink_candidate_slots.lock().unwrap(),
HashMap::new(),
);
let num_candidates = shrink_slots.len();
for (slot, slot_shrink_candidates) in shrink_slots {
let mut measure = Measure::start("shrink_candidate_slots-ms");
self.do_shrink_slot_stores(slot, slot_shrink_candidates.values());
measure.stop();
inc_new_counter_info!("shrink_candidate_slots-ms", measure.as_ms() as usize);
}
num_candidates
}
pub fn shrink_all_slots(&self) {
for slot in self.all_slots_in_storage() {
if self.caching_enabled {
self.shrink_slot_forced(slot);
} else {
self.do_shrink_slot_forced_v1(slot);
}
}
}
pub fn scan_accounts<F, A>(&self, ancestors: &Ancestors, scan_func: F) -> A
where
F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>),
A: Default,
{
let mut collector = A::default();
self.accounts_index
.scan_accounts(ancestors, |pubkey, (account_info, slot)| {
let account_slot = self
.get_account_accessor_from_cache_or_storage(
slot,
pubkey,
account_info.store_id,
account_info.offset,
)
.get_loaded_account()
.map(|loaded_account| (pubkey, loaded_account.account(), slot));
scan_func(&mut collector, account_slot)
});
collector
}
pub fn unchecked_scan_accounts<F, A>(
&self,
metric_name: &'static str,
ancestors: &Ancestors,
scan_func: F,
) -> A
where
F: Fn(&mut A, (&Pubkey, LoadedAccount, Slot)),
A: Default,
{
let mut collector = A::default();
self.accounts_index.unchecked_scan_accounts(
metric_name,
ancestors,
|pubkey, (account_info, slot)| {
if let Some(loaded_account) = self
.get_account_accessor_from_cache_or_storage(
slot,
pubkey,
account_info.store_id,
account_info.offset,
)
.get_loaded_account()
{
scan_func(&mut collector, (pubkey, loaded_account, slot));
}
},
);
collector
}
pub fn range_scan_accounts<F, A, R>(
&self,
metric_name: &'static str,
ancestors: &Ancestors,
range: R,
scan_func: F,
) -> A
where
F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>),
A: Default,
R: RangeBounds<Pubkey>,
{
let mut collector = A::default();
self.accounts_index.range_scan_accounts(
metric_name,
ancestors,
range,
|pubkey, (account_info, slot)| {
let account_slot = self
.get_account_accessor_from_cache_or_storage(
slot,
pubkey,
account_info.store_id,
account_info.offset,
)
.get_loaded_account()
.map(|loaded_account| (pubkey, loaded_account.account(), slot));
scan_func(&mut collector, account_slot)
},
);
collector
}
pub fn index_scan_accounts<F, A>(
&self,
ancestors: &Ancestors,
index_key: IndexKey,
scan_func: F,
) -> A
where
F: Fn(&mut A, Option<(&Pubkey, Account, Slot)>),
A: Default,
{
let mut collector = A::default();
self.accounts_index.index_scan_accounts(
ancestors,
index_key,
|pubkey, (account_info, slot)| {
let account_slot = self
.get_account_accessor_from_cache_or_storage(
slot,
pubkey,
account_info.store_id,
account_info.offset,
)
.get_loaded_account()
.map(|loaded_account| (pubkey, loaded_account.account(), slot));
scan_func(&mut collector, account_slot)
},
);
collector
}
/// Scan a specific slot through all the account storage in parallel
pub fn scan_account_storage<R, B>(
&self,
slot: Slot,
cache_map_func: impl Fn(LoadedAccount) -> Option<R> + Sync,
storage_scan_func: impl Fn(&B, LoadedAccount) + Sync,
) -> ScanStorageResult<R, B>
where
R: Send,
B: Send + Default + Sync,
{
if let Some(slot_cache) = self.accounts_cache.slot_cache(slot) {
// If we see the slot in the cache, then all the account information
// is in this cached slot
if slot_cache.len() > SCAN_SLOT_PAR_ITER_THRESHOLD {
ScanStorageResult::Cached(self.thread_pool.install(|| {
slot_cache
.par_iter()
.filter_map(|cached_account| {
cache_map_func(LoadedAccount::Cached((
*cached_account.key(),
Cow::Borrowed(cached_account.value()),
)))
})
.collect()
}))
} else {
ScanStorageResult::Cached(
slot_cache
.iter()
.filter_map(|cached_account| {
cache_map_func(LoadedAccount::Cached((
*cached_account.key(),
Cow::Borrowed(cached_account.value()),
)))
})
.collect(),
)
}
} else {
let retval = B::default();
// If the slot is not in the cache, then all the account information must have
// been flushed. This is guaranteed because we only remove the rooted slot from
// the cache *after* we've finished flushing in `flush_slot_cache`.
let storage_maps: Vec<Arc<AccountStorageEntry>> = self
.storage
.get_slot_storage_entries(slot)
.unwrap_or_default();
self.thread_pool.install(|| {
storage_maps
.par_iter()
.flat_map(|storage| storage.all_accounts())
.for_each(|account| storage_scan_func(&retval, LoadedAccount::Stored(account)));
});
ScanStorageResult::Stored(retval)
}
}
pub fn set_hash(&self, slot: Slot, parent_slot: Slot) {
let mut bank_hashes = self.bank_hashes.write().unwrap();
if bank_hashes.get(&slot).is_some() {
error!(
"set_hash: already exists; multiple forks with shared slot {} as child (parent: {})!?",
slot, parent_slot,
);
return;
}
let new_hash_info = BankHashInfo {
hash: Hash::default(),
snapshot_hash: Hash::default(),
stats: BankHashStats::default(),
};
bank_hashes.insert(slot, new_hash_info);
}
pub fn load(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Option<(Account, Slot)> {
self.do_load(ancestors, pubkey, None)
}
fn do_load(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
max_root: Option<Slot>,
) -> Option<(Account, Slot)> {
let (slot, store_id, offset) = {
let (lock, index) = self.accounts_index.get(pubkey, Some(ancestors), max_root)?;
let slot_list = lock.slot_list();
let (
slot,
AccountInfo {
store_id, offset, ..
},
) = slot_list[index];
(slot, store_id, offset)
// `lock` released here
};
//TODO: thread this as a ref
self.get_account_accessor_from_cache_or_storage(slot, pubkey, store_id, offset)
.get_loaded_account()
.map(|loaded_account| (loaded_account.account(), slot))
}
pub fn load_account_hash(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Hash {
let (slot, store_id, offset) = {
let (lock, index) = self
.accounts_index
.get(pubkey, Some(ancestors), None)
.unwrap();
let slot_list = lock.slot_list();
let (
slot,
AccountInfo {
store_id, offset, ..
},
) = slot_list[index];
(slot, store_id, offset)
// lock released here
};
self.get_account_accessor_from_cache_or_storage(slot, pubkey, store_id, offset)
.get_loaded_account()
.map(|loaded_account| *loaded_account.loaded_hash())
.unwrap()
}
pub fn load_slow(&self, ancestors: &Ancestors, pubkey: &Pubkey) -> Option<(Account, Slot)> {
self.load(ancestors, pubkey)
}
// Only safe to use the `get_account_accessor_from_cache_or_storage() -> get_loaded_account()`
// pattern if you're holding the AccountIndex lock for the `pubkey`, otherwise, a cache
// flush could happen between `get_account_accessor_from_cache_or_storage()` and
//`get_loaded_account()`, and the `LoadedAccountAccessor::Cached((&self.accounts_cache, slot, pubkey))`
// returned here won't be able to find a slot cache entry for that `slot`.
fn get_account_accessor_from_cache_or_storage<'a>(
&'a self,
slot: Slot,
pubkey: &'a Pubkey,
store_id: usize,
offset: usize,
) -> LoadedAccountAccessor<'a> {
if store_id == CACHE_VIRTUAL_STORAGE_ID {
LoadedAccountAccessor::Cached((&self.accounts_cache, slot, pubkey))
} else {
let account_storage_entry = self.storage.get_account_storage_entry(slot, store_id);
LoadedAccountAccessor::Stored(
account_storage_entry.map(|account_storage_entry| (account_storage_entry, offset)),
)
}
}
fn try_recycle_and_insert_store(
&self,
slot: Slot,
min_size: u64,
max_size: u64,
) -> Option<Arc<AccountStorageEntry>> {
let store = self.try_recycle_store(slot, min_size, max_size)?;
self.insert_store(slot, store.clone());
Some(store)
}
fn try_recycle_store(
&self,
slot: Slot,
min_size: u64,
max_size: u64,
) -> Option<Arc<AccountStorageEntry>> {
let mut max = 0;
let mut min = std::u64::MAX;
let mut avail = 0;
let mut recycle_stores = self.recycle_stores.write().unwrap();
for (i, (_recycled_time, store)) in recycle_stores.iter().enumerate() {
if Arc::strong_count(store) == 1 {
max = std::cmp::max(store.accounts.capacity(), max);
min = std::cmp::min(store.accounts.capacity(), min);
avail += 1;
if store.accounts.capacity() >= min_size && store.accounts.capacity() < max_size {
let ret = recycle_stores.remove_entry(i);
drop(recycle_stores);
let old_id = ret.append_vec_id();
ret.recycle(slot, self.next_id.fetch_add(1, Ordering::Relaxed));
debug!(
"recycling store: {} {:?} old_id: {}",
ret.append_vec_id(),
ret.get_path(),
old_id
);
return Some(ret);
}
}
}
debug!(
"no recycle stores max: {} min: {} len: {} looking: {}, {} avail: {}",
max,
min,
recycle_stores.entry_count(),
min_size,
max_size,
avail,
);
None
}
fn find_storage_candidate(&self, slot: Slot, size: usize) -> Arc<AccountStorageEntry> {
let mut create_extra = false;
let mut get_slot_stores = Measure::start("get_slot_stores");
let slot_stores_lock = self.storage.get_slot_stores(slot);
get_slot_stores.stop();
self.stats
.store_get_slot_store
.fetch_add(get_slot_stores.as_us(), Ordering::Relaxed);
let mut find_existing = Measure::start("find_existing");
if let Some(slot_stores_lock) = slot_stores_lock {
let slot_stores = slot_stores_lock.read().unwrap();
if !slot_stores.is_empty() {
if slot_stores.len() <= self.min_num_stores {
let mut total_accounts = 0;
for store in slot_stores.values() {
total_accounts += store.count();
}
// Create more stores so that when scanning the storage all CPUs have work
if (total_accounts / 16) >= slot_stores.len() {
create_extra = true;
}
}
// pick an available store at random by iterating from a random point
let to_skip = thread_rng().gen_range(0, slot_stores.len());
for (i, store) in slot_stores.values().cycle().skip(to_skip).enumerate() {
if store.try_available() {
let ret = store.clone();
drop(slot_stores);
if create_extra {
if self
.try_recycle_and_insert_store(slot, size as u64, std::u64::MAX)
.is_none()
{
self.stats
.create_store_count
.fetch_add(1, Ordering::Relaxed);
self.create_and_insert_store(slot, self.file_size, "store extra");
} else {
self.stats
.recycle_store_count
.fetch_add(1, Ordering::Relaxed);
}
}
find_existing.stop();
self.stats
.store_find_existing
.fetch_add(find_existing.as_us(), Ordering::Relaxed);
return ret;
}
// looked at every store, bail...
if i == slot_stores.len() {
break;
}
}
}
}
find_existing.stop();
self.stats
.store_find_existing
.fetch_add(find_existing.as_us(), Ordering::Relaxed);
let store = if let Some(store) = self.try_recycle_store(slot, size as u64, std::u64::MAX) {
self.stats
.recycle_store_count
.fetch_add(1, Ordering::Relaxed);
store
} else {
self.stats
.create_store_count
.fetch_add(1, Ordering::Relaxed);
self.create_store(slot, self.file_size, "store", &self.paths)
};
// try_available is like taking a lock on the store,
// preventing other threads from using it.
// It must succeed here and happen before insert,
// otherwise another thread could also grab it from the index.
assert!(store.try_available());
self.insert_store(slot, store.clone());
store
}
fn page_align(&self, size: u64) -> u64 {
(size + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1)
}
fn has_space_available(&self, slot: Slot, size: u64) -> bool {
let slot_storage = self.storage.get_slot_stores(slot).unwrap();
let slot_storage_r = slot_storage.read().unwrap();
for (_id, store) in slot_storage_r.iter() {
if store.status() == AccountStorageStatus::Available
&& (store.accounts.capacity() - store.accounts.len() as u64) > size
{
return true;
}
}
false
}
fn create_store(
&self,
slot: Slot,
size: u64,
from: &str,
paths: &[PathBuf],
) -> Arc<AccountStorageEntry> {
let path_index = thread_rng().gen_range(0, paths.len());
let store = Arc::new(self.new_storage_entry(
slot,
&Path::new(&paths[path_index]),
self.page_align(size),
));
if store.append_vec_id() == CACHE_VIRTUAL_STORAGE_ID {
panic!("We've run out of storage ids!");
}
debug!(
"creating store: {} slot: {} len: {} size: {} from: {} path: {:?}",
store.append_vec_id(),
slot,
store.accounts.len(),
store.accounts.capacity(),
from,
store.accounts.get_path()
);
store
}
fn create_and_insert_store(
&self,
slot: Slot,
size: u64,
from: &str,
) -> Arc<AccountStorageEntry> {
self.create_and_insert_store_with_paths(slot, size, from, &self.paths)
}
fn create_and_insert_store_with_paths(
&self,
slot: Slot,
size: u64,
from: &str,
paths: &[PathBuf],
) -> Arc<AccountStorageEntry> {
let store = self.create_store(slot, size, from, paths);
let store_for_index = store.clone();
self.insert_store(slot, store_for_index);
store
}
fn insert_store(&self, slot: Slot, store: Arc<AccountStorageEntry>) {
let slot_storages: SlotStores = self.storage.get_slot_stores(slot).unwrap_or_else(||
// DashMap entry.or_insert() returns a RefMut, essentially a write lock,
// which is dropped after this block ends, minimizing time held by the lock.
// However, we still want to persist the reference to the `SlotStores` behind
// the lock, hence we clone it out, (`SlotStores` is an Arc so is cheap to clone).
self.storage
.0
.entry(slot)
.or_insert(Arc::new(RwLock::new(HashMap::new())))
.clone());
assert!(slot_storages
.write()
.unwrap()
.insert(store.append_vec_id(), store)
.is_none());
}
pub fn purge_slot(&self, slot: Slot) {
let mut slots = HashSet::new();
slots.insert(slot);
self.purge_slots(&slots);
}
fn recycle_slot_stores(
&self,
total_removed_storage_entries: usize,
slot_stores: &[SlotStores],
) -> u64 {
let mut recycled_count = 0;
let mut recycle_stores_write_elapsed = Measure::start("recycle_stores_write_elapsed");
let mut recycle_stores = self.recycle_stores.write().unwrap();
recycle_stores_write_elapsed.stop();
for slot_entries in slot_stores {
let entry = slot_entries.read().unwrap();
for (_store_id, stores) in entry.iter() {
if recycle_stores.entry_count() > MAX_RECYCLE_STORES {
let dropped_count = total_removed_storage_entries - recycled_count;
self.stats
.dropped_stores
.fetch_add(dropped_count as u64, Ordering::Relaxed);
return recycle_stores_write_elapsed.as_us();
}
recycle_stores.add_entry(stores.clone());
recycled_count += 1;
}
}
recycle_stores_write_elapsed.as_us()
}
fn do_purge_slots_from_cache_and_store<'a>(
&'a self,
can_exist_in_cache: bool,
removed_slots: impl Iterator<Item = &'a Slot>,
purge_stats: &PurgeStats,
) {
let mut remove_storages_elapsed = Measure::start("remove_storages_elapsed");
let mut all_removed_slot_storages = vec![];
let mut num_cached_slots_removed = 0;
let mut total_removed_cached_bytes = 0;
let mut total_removed_storage_entries = 0;
let mut total_removed_stored_bytes = 0;
for remove_slot in removed_slots {
if let Some(slot_cache) = self.accounts_cache.remove_slot(*remove_slot) {
// If the slot is still in the cache, remove the backing storages for
// the slot and from the Accounts Index
if !can_exist_in_cache {
panic!("The removed slot must alrady have been flushed from the cache");
}
num_cached_slots_removed += 1;
total_removed_cached_bytes += slot_cache.total_bytes();
self.purge_slot_cache(*remove_slot, slot_cache);
} else if let Some((_, slot_removed_storages)) = self.storage.0.remove(&remove_slot) {
// Because AccountsBackgroundService synchronously flushes from the accounts cache
// and handles all Bank::drop() (the cleanup function that leads to this
// function call), then we don't need to worry above an overlapping cache flush
// with this function call. This means, if we get into this case, we can be
// confident that the entire state for this slot has been flushed to the storage
// already.
// Note this only cleans up the storage entries. The accounts index cleaning
// (removing from the slot list, decrementing the account ref count), is handled in
// clean_accounts() -> purge_older_root_entries()
{
let r_slot_removed_storages = slot_removed_storages.read().unwrap();
total_removed_storage_entries += r_slot_removed_storages.len();
total_removed_stored_bytes += r_slot_removed_storages
.values()
.map(|i| i.accounts.capacity())
.sum::<u64>();
}
all_removed_slot_storages.push(slot_removed_storages.clone());
}
// It should not be possible that a slot is neither in the cache or storage. Even in
// a slot with all ticks, `Bank::new_from_parent()` immediately stores some sysvars
// on bank creation.
// Remove any delta pubkey set if existing.
self.uncleaned_pubkeys.remove(remove_slot);
}
remove_storages_elapsed.stop();
let num_stored_slots_removed = all_removed_slot_storages.len();
let recycle_stores_write_elapsed =
self.recycle_slot_stores(total_removed_storage_entries, &all_removed_slot_storages);
let mut drop_storage_entries_elapsed = Measure::start("drop_storage_entries_elapsed");
// Backing mmaps for removed storages entries explicitly dropped here outside
// of any locks
drop(all_removed_slot_storages);
drop_storage_entries_elapsed.stop();
purge_stats
.remove_storages_elapsed
.fetch_add(remove_storages_elapsed.as_us(), Ordering::Relaxed);
purge_stats
.drop_storage_entries_elapsed
.fetch_add(drop_storage_entries_elapsed.as_us(), Ordering::Relaxed);
purge_stats
.num_cached_slots_removed
.fetch_add(num_cached_slots_removed, Ordering::Relaxed);
purge_stats
.total_removed_cached_bytes
.fetch_add(total_removed_cached_bytes, Ordering::Relaxed);
purge_stats
.num_stored_slots_removed
.fetch_add(num_stored_slots_removed, Ordering::Relaxed);
purge_stats
.total_removed_storage_entries
.fetch_add(total_removed_storage_entries, Ordering::Relaxed);
purge_stats
.total_removed_stored_bytes
.fetch_add(total_removed_stored_bytes, Ordering::Relaxed);
purge_stats
.recycle_stores_write_elapsed
.fetch_add(recycle_stores_write_elapsed, Ordering::Relaxed);
}
fn purge_storage_slots(&self, removed_slots: &HashSet<Slot>) {
// Check all slots `removed_slots` are no longer rooted
let mut safety_checks_elapsed = Measure::start("safety_checks_elapsed");
for slot in removed_slots.iter() {
assert!(!self.accounts_index.is_root(*slot))
}
safety_checks_elapsed.stop();
self.clean_accounts_stats
.purge_stats
.safety_checks_elapsed
.fetch_add(safety_checks_elapsed.as_us(), Ordering::Relaxed);
self.do_purge_slots_from_cache_and_store(
false,
removed_slots.iter(),
&self.clean_accounts_stats.purge_stats,
);
}
fn purge_slot_cache(&self, purged_slot: Slot, slot_cache: SlotCache) {
let mut purged_slot_pubkeys: HashSet<(Slot, Pubkey)> = HashSet::new();
let pubkey_to_slot_set: Vec<(Pubkey, Slot)> = slot_cache
.iter()
.map(|account| {
purged_slot_pubkeys.insert((purged_slot, *account.key()));
(*account.key(), purged_slot)
})
.collect();
self.purge_slot_cache_pubkeys(purged_slot, purged_slot_pubkeys, pubkey_to_slot_set, true);
}
fn purge_slot_cache_pubkeys(
&self,
purged_slot: Slot,
purged_slot_pubkeys: HashSet<(Slot, Pubkey)>,
pubkey_to_slot_set: Vec<(Pubkey, Slot)>,
is_dead: bool,
) {
// Slot purged from cache should not exist in the backing store
assert!(self.storage.get_slot_stores(purged_slot).is_none());
let num_purged_keys = pubkey_to_slot_set.len();
let reclaims = self.purge_keys_exact(&pubkey_to_slot_set);
assert_eq!(reclaims.len(), num_purged_keys);
if is_dead {
self.finalize_dead_slot_removal(
std::iter::once(&purged_slot),
purged_slot_pubkeys,
None,
);
}
}
fn purge_slots(&self, slots: &HashSet<Slot>) {
// `add_root()` should be called first
let mut safety_checks_elapsed = Measure::start("safety_checks_elapsed");
let non_roots: Vec<&Slot> = slots
.iter()
.filter(|slot| !self.accounts_index.is_root(**slot))
.collect();
safety_checks_elapsed.stop();
self.external_purge_slots_stats
.safety_checks_elapsed
.fetch_add(safety_checks_elapsed.as_us(), Ordering::Relaxed);
self.do_purge_slots_from_cache_and_store(
true,
non_roots.into_iter(),
&self.external_purge_slots_stats,
);
self.external_purge_slots_stats
.report("external_purge_slots_stats", Some(1000));
}
// TODO: This is currently:
// 1. Unsafe with scan because it can remove a slot in the middle
// of a scan.
// 2. Doesn't handle cache flushes that happen during the slot deletion (see comment below).
pub fn remove_unrooted_slot(&self, remove_slot: Slot) {
if self.accounts_index.is_root(remove_slot) {
panic!("Trying to remove accounts for rooted slot {}", remove_slot);
}
if let Some(slot_cache) = self.accounts_cache.remove_slot(remove_slot) {
// If the slot is still in the cache, remove it from the cache
self.purge_slot_cache(remove_slot, slot_cache);
}
// TODO: Handle if the slot was flushed to storage while we were removing the cached
// slot above, i.e. it's possible the storage contains partial version of the current
// slot. One way to handle this is to augment slots to contain a "version", That way,
// 1) We clean older versions via the natural clean() pipeline
// without having to call this function out of band.
// 2) This deletion doesn't have to block on scan
// Reads will then always read the latest version of a slot. Scans will also know
// which version their parents because banks will also be augmented with this version,
// which handles cases where a deletion of one version happens in the middle of the scan.
let scan_result: ScanStorageResult<Pubkey, DashSet<Pubkey>> = self.scan_account_storage(
remove_slot,
|loaded_account: LoadedAccount| Some(*loaded_account.pubkey()),
|accum: &DashSet<Pubkey>, loaded_account: LoadedAccount| {
accum.insert(*loaded_account.pubkey());
},
);
// Purge this slot from the accounts index
let purge_slot: HashSet<Slot> = vec![remove_slot].into_iter().collect();
let mut reclaims = vec![];
match scan_result {
ScanStorageResult::Cached(cached_keys) => {
for pubkey in cached_keys.iter() {
self.accounts_index.purge_exact(
pubkey,
&purge_slot,
&mut reclaims,
&self.account_indexes,
);
}
}
ScanStorageResult::Stored(stored_keys) => {
for set_ref in stored_keys.iter() {
self.accounts_index.purge_exact(
set_ref.key(),
&purge_slot,
&mut reclaims,
&self.account_indexes,
);
}
}
}
self.handle_reclaims(&reclaims, Some(remove_slot), false, None, false);
// After handling the reclaimed entries, this slot's
// storage entries should be purged from self.storage
assert!(self.storage.get_slot_stores(remove_slot).is_none());
}
fn include_owner(cluster_type: &ClusterType, slot: Slot) -> bool {
// When devnet was moved to stable release channel, it was done without
// hashing account.owner. That's because devnet's slot was lower than
// 5_800_000 and the release channel's gating lacked ClusterType at the time...
match cluster_type {
ClusterType::Devnet => slot >= 5_800_000,
_ => true,
}
}
pub fn hash_stored_account(
slot: Slot,
account: &StoredAccountMeta,
cluster_type: &ClusterType,
) -> Hash {
let include_owner = Self::include_owner(cluster_type, slot);
if slot > Self::get_blake3_slot(cluster_type) {
Self::blake3_hash_account_data(
slot,
account.account_meta.lamports,
&account.account_meta.owner,
account.account_meta.executable,
account.account_meta.rent_epoch,
account.data,
&account.meta.pubkey,
include_owner,
)
} else {
Self::hash_account_data(
slot,
account.account_meta.lamports,
&account.account_meta.owner,
account.account_meta.executable,
account.account_meta.rent_epoch,
account.data,
&account.meta.pubkey,
include_owner,
)
}
}
pub fn hash_account(
slot: Slot,
account: &Account,
pubkey: &Pubkey,
cluster_type: &ClusterType,
) -> Hash {
let include_owner = Self::include_owner(cluster_type, slot);
if slot > Self::get_blake3_slot(cluster_type) {
Self::blake3_hash_account_data(
slot,
account.lamports,
&account.owner,
account.executable,
account.rent_epoch,
&account.data,
pubkey,
include_owner,
)
} else {
Self::hash_account_data(
slot,
account.lamports,
&account.owner,
account.executable,
account.rent_epoch,
&account.data,
pubkey,
include_owner,
)
}
}
fn hash_frozen_account_data(account: &Account) -> Hash {
let mut hasher = Hasher::default();
hasher.hash(&account.data);
hasher.hash(&account.owner.as_ref());
if account.executable {
hasher.hash(&[1u8; 1]);
} else {
hasher.hash(&[0u8; 1]);
}
hasher.result()
}
pub fn hash_account_data(
slot: Slot,
lamports: u64,
owner: &Pubkey,
executable: bool,
rent_epoch: Epoch,
data: &[u8],
pubkey: &Pubkey,
include_owner: bool,
) -> Hash {
if lamports == 0 {
return Hash::default();
}
let mut hasher = Hasher::default();
hasher.hash(&lamports.to_le_bytes());
hasher.hash(&slot.to_le_bytes());
hasher.hash(&rent_epoch.to_le_bytes());
hasher.hash(&data);
if executable {
hasher.hash(&[1u8; 1]);
} else {
hasher.hash(&[0u8; 1]);
}
if include_owner {
hasher.hash(&owner.as_ref());
}
hasher.hash(&pubkey.as_ref());
hasher.result()
}
pub fn blake3_hash_account_data(
slot: Slot,
lamports: u64,
owner: &Pubkey,
executable: bool,
rent_epoch: Epoch,
data: &[u8],
pubkey: &Pubkey,
include_owner: bool,
) -> Hash {
if lamports == 0 {
return Hash::default();
}
let mut hasher = blake3::Hasher::new();
hasher.update(&lamports.to_le_bytes());
hasher.update(&slot.to_le_bytes());
hasher.update(&rent_epoch.to_le_bytes());
hasher.update(&data);
if executable {
hasher.update(&[1u8; 1]);
} else {
hasher.update(&[0u8; 1]);
}
if include_owner {
hasher.update(&owner.as_ref());
}
hasher.update(&pubkey.as_ref());
Hash(<[u8; solana_sdk::hash::HASH_BYTES]>::try_from(hasher.finalize().as_slice()).unwrap())
}
fn get_blake3_slot(cluster_type: &ClusterType) -> Slot {
match cluster_type {
ClusterType::Development => 0,
// Epoch 400
ClusterType::Devnet => 3_276_800,
// Epoch 78
ClusterType::MainnetBeta => 33_696_000,
// Epoch 95
ClusterType::Testnet => 35_516_256,
}
}
fn bulk_assign_write_version(&self, count: usize) -> u64 {
self.write_version
.fetch_add(count as u64, Ordering::Relaxed)
}
fn write_accounts_to_storage<F: FnMut(Slot, usize) -> Arc<AccountStorageEntry>>(
&self,
slot: Slot,
hashes: &[Hash],
mut storage_finder: F,
accounts_and_meta_to_store: &[(StoredMeta, &Account)],
) -> Vec<AccountInfo> {
assert_eq!(hashes.len(), accounts_and_meta_to_store.len());
let mut infos: Vec<AccountInfo> = Vec::with_capacity(accounts_and_meta_to_store.len());
let mut total_append_accounts_us = 0;
let mut total_storage_find_us = 0;
while infos.len() < accounts_and_meta_to_store.len() {
let mut storage_find = Measure::start("storage_finder");
let storage = storage_finder(
slot,
accounts_and_meta_to_store[infos.len()].1.data.len() + STORE_META_OVERHEAD,
);
storage_find.stop();
total_storage_find_us += storage_find.as_us();
let mut append_accounts = Measure::start("append_accounts");
let rvs = storage.accounts.append_accounts(
&accounts_and_meta_to_store[infos.len()..],
&hashes[infos.len()..],
);
assert!(!rvs.is_empty());
append_accounts.stop();
total_append_accounts_us += append_accounts.as_us();
if rvs.len() == 1 {
storage.set_status(AccountStorageStatus::Full);
// See if an account overflows the append vecs in the slot.
let data_len = (accounts_and_meta_to_store[infos.len()].1.data.len()
+ STORE_META_OVERHEAD) as u64;
if !self.has_space_available(slot, data_len) {
let special_store_size = std::cmp::max(data_len * 2, self.file_size);
if self
.try_recycle_and_insert_store(slot, special_store_size, std::u64::MAX)
.is_none()
{
self.stats
.create_store_count
.fetch_add(1, Ordering::Relaxed);
self.create_and_insert_store(slot, special_store_size, "large create");
} else {
self.stats
.recycle_store_count
.fetch_add(1, Ordering::Relaxed);
}
}
continue;
}
for (offsets, (_, account)) in rvs
.windows(2)
.zip(&accounts_and_meta_to_store[infos.len()..])
{
let stored_size = offsets[1] - offsets[0];
storage.add_account(stored_size);
infos.push(AccountInfo {
store_id: storage.append_vec_id(),
offset: offsets[0],
stored_size,
lamports: account.lamports,
});
}
// restore the state to available
storage.set_status(AccountStorageStatus::Available);
}
self.stats
.store_append_accounts
.fetch_add(total_append_accounts_us, Ordering::Relaxed);
self.stats
.store_find_store
.fetch_add(total_storage_find_us, Ordering::Relaxed);
infos
}
pub fn mark_slot_frozen(&self, slot: Slot) {
if let Some(slot_cache) = self.accounts_cache.slot_cache(slot) {
slot_cache.mark_slot_frozen();
slot_cache.report_slot_store_metrics();
}
self.accounts_cache.report_size();
}
pub fn expire_old_recycle_stores(&self) {
let mut recycle_stores_write_elapsed = Measure::start("recycle_stores_write_time");
let recycle_stores = self.recycle_stores.write().unwrap().expire_old_entries();
recycle_stores_write_elapsed.stop();
let mut drop_storage_entries_elapsed = Measure::start("drop_storage_entries_elapsed");
drop(recycle_stores);
drop_storage_entries_elapsed.stop();
self.clean_accounts_stats
.purge_stats
.drop_storage_entries_elapsed
.fetch_add(drop_storage_entries_elapsed.as_us(), Ordering::Relaxed);
self.clean_accounts_stats
.purge_stats
.recycle_stores_write_elapsed
.fetch_add(recycle_stores_write_elapsed.as_us(), Ordering::Relaxed);
}
// `force_flush` flushes all the cached roots `<= requested_flush_root`. It also then
// flushes:
// 1) Any remaining roots if there are > MAX_CACHE_SLOTS remaining slots in the cache,
// 2) It there are still > MAX_CACHE_SLOTS remaining slots in the cache, the excess
// unrooted slots
pub fn flush_accounts_cache(&self, force_flush: bool, requested_flush_root: Option<Slot>) {
#[cfg(not(test))]
assert!(requested_flush_root.is_some());
if !force_flush && self.accounts_cache.num_slots() <= MAX_CACHE_SLOTS {
return;
}
// Flush only the roots <= requested_flush_root, so that snapshotting has all
// the relevant roots in storage.
let mut flush_roots_elapsed = Measure::start("flush_roots_elapsed");
let mut account_bytes_saved = 0;
let mut num_accounts_saved = 0;
// Note even if force_flush is false, we will still flush all roots <= the
// given `requested_flush_root`, even if some of the later roots cannot be used for
// cleaning due to an ongoing scan
let (total_new_cleaned_roots, num_cleaned_roots_flushed) = self
.flush_rooted_accounts_cache(
requested_flush_root,
Some((&mut account_bytes_saved, &mut num_accounts_saved)),
);
flush_roots_elapsed.stop();
// Note we don't purge unrooted slots here because there may be ongoing scans/references
// for those slot, let the Bank::drop() implementation do cleanup instead on dead
// banks
// If there are > MAX_CACHE_SLOTS, then flush the excess ones to storage
let (total_new_excess_roots, num_excess_roots_flushed) =
if self.accounts_cache.num_slots() > MAX_CACHE_SLOTS {
// Start by flushing the roots
//
// Cannot do any cleaning on roots past `requested_flush_root` because future
// snapshots may need updates from those later slots, hence we pass `None`
// for `should_clean`.
self.flush_rooted_accounts_cache(None, None)
} else {
(0, 0)
};
let old_slots = self.accounts_cache.find_older_frozen_slots(MAX_CACHE_SLOTS);
let excess_slot_count = old_slots.len();
let mut unflushable_unrooted_slot_count = 0;
let max_flushed_root = self.accounts_cache.fetch_max_flush_root();
for old_slot in old_slots {
// Don't flush slots that are known to be unrooted
if old_slot > max_flushed_root {
self.flush_slot_cache(old_slot, None::<&mut fn(&_, &_) -> bool>);
} else {
unflushable_unrooted_slot_count += 1;
}
}
datapoint_info!(
"accounts_db-flush_accounts_cache",
("total_new_cleaned_roots", total_new_cleaned_roots, i64),
("num_cleaned_roots_flushed", num_cleaned_roots_flushed, i64),
("total_new_excess_roots", total_new_excess_roots, i64),
("num_excess_roots_flushed", num_excess_roots_flushed, i64),
("excess_slot_count", excess_slot_count, i64),
(
"unflushable_unrooted_slot_count",
unflushable_unrooted_slot_count,
i64
),
(
"flush_roots_elapsed",
flush_roots_elapsed.as_us() as i64,
i64
),
("account_bytes_saved", account_bytes_saved, i64),
("num_accounts_saved", num_accounts_saved, i64),
);
// Flush a random slot out after every force flush to catch any inconsistencies
// between cache and written state (i.e. should cause a hash mismatch between validators
// that flush and don't flush if such a bug exists).
let num_slots_remaining = self.accounts_cache.num_slots();
if force_flush && num_slots_remaining >= FLUSH_CACHE_RANDOM_THRESHOLD {
// Don't flush slots that are known to be unrooted
let mut frozen_slots = self.accounts_cache.find_older_frozen_slots(0);
frozen_slots.retain(|s| *s > max_flushed_root);
// Remove a random index 0 <= i < `frozen_slots.len()`
let rand_slot = frozen_slots.choose(&mut thread_rng());
if let Some(rand_slot) = rand_slot {
info!(
"Flushing random slot: {}, num_remaining: {}",
*rand_slot, num_slots_remaining
);
self.flush_slot_cache(*rand_slot, None::<&mut fn(&_, &_) -> bool>);
}
}
}
fn flush_rooted_accounts_cache(
&self,
requested_flush_root: Option<Slot>,
should_clean: Option<(&mut usize, &mut usize)>,
) -> (usize, usize) {
let max_clean_root = should_clean.as_ref().and_then(|_| {
// If there is a long running scan going on, this could prevent any cleaning
// based on updates from slots > `max_clean_root`.
self.max_clean_root(requested_flush_root)
});
// Use HashMap because HashSet doesn't provide Entry api
let mut written_accounts = HashMap::new();
// If `should_clean` is None, then`should_flush_f` is also None, which will cause
// `flush_slot_cache` to flush all accounts to storage without cleaning any accounts.
let mut should_flush_f = should_clean.map(|(account_bytes_saved, num_accounts_saved)| {
move |&pubkey: &Pubkey, account: &Account| {
use std::collections::hash_map::Entry::{Occupied, Vacant};
let should_flush = match written_accounts.entry(pubkey) {
Vacant(vacant_entry) => {
vacant_entry.insert(());
true
}
Occupied(_occupied_entry) => {
*account_bytes_saved += account.data.len();
*num_accounts_saved += 1;
// If a later root already wrote this account, no point
// in flushing it
false
}
};
should_flush
}
});
// Always flush up to `requested_flush_root`, which is necessary for things like snapshotting.
let cached_roots: BTreeSet<Slot> = self.accounts_cache.clear_roots(requested_flush_root);
// Iterate from highest to lowest so that we don't need to flush earlier
// outdated updates in earlier roots
let mut num_roots_flushed = 0;
for &root in cached_roots.iter().rev() {
let should_flush_f = if let Some(max_clean_root) = max_clean_root {
if root > max_clean_root {
// Only if the root is greater than the `max_clean_root` do we
// have to prevent cleaning, otherwise, just default to `should_flush_f`
// for any slots <= `max_clean_root`
None
} else {
should_flush_f.as_mut()
}
} else {
should_flush_f.as_mut()
};
if self.flush_slot_cache(root, should_flush_f) {
num_roots_flushed += 1;
}
// Regardless of whether this slot was *just* flushed from the cache by the above
// `flush_slot_cache()`, we should update the `max_flush_root`.
// This is because some rooted slots may be flushed to storage *before* they are marked as root.
// This can occur for instance when:
// 1) The cache is overwhelmed, we we flushed some yet to be rooted frozen slots
// 2) Random evictions
// These slots may then *later* be marked as root, so we still need to handle updating the
// `max_flush_root` in the accounts cache.
self.accounts_cache.set_max_flush_root(root);
}
// Only add to the uncleaned roots set *after* we've flushed the previous roots,
// so that clean will actually be able to clean the slots.
let num_new_roots = cached_roots.len();
self.accounts_index.add_uncleaned_roots(cached_roots);
(num_new_roots, num_roots_flushed)
}
// `should_flush_f` is an optional closure that determines whether a given
// account should be flushed. Passing `None` will by default flush all
// accounts
fn flush_slot_cache(
&self,
slot: Slot,
mut should_flush_f: Option<&mut impl FnMut(&Pubkey, &Account) -> bool>,
) -> bool {
info!("flush_slot_cache slot: {}", slot);
let slot_cache = self.accounts_cache.slot_cache(slot);
if let Some(slot_cache) = slot_cache {
let iter_items: Vec<_> = slot_cache.iter().collect();
let mut total_size = 0;
let mut purged_slot_pubkeys: HashSet<(Slot, Pubkey)> = HashSet::new();
let mut pubkey_to_slot_set: Vec<(Pubkey, Slot)> = vec![];
let (accounts, hashes): (Vec<(&Pubkey, &Account)>, Vec<Hash>) = iter_items
.iter()
.filter_map(|iter_item| {
let key = iter_item.key();
let account = &iter_item.value().account;
let should_flush = should_flush_f
.as_mut()
.map(|should_flush_f| should_flush_f(key, account))
.unwrap_or(true);
if should_flush {
let hash = iter_item.value().hash;
total_size += (account.data.len() + STORE_META_OVERHEAD) as u64;
Some(((key, account), hash))
} else {
// If we don't flush, we have to remove the entry from the
// index, since it's equivalent to purging
purged_slot_pubkeys.insert((slot, *key));
pubkey_to_slot_set.push((*key, slot));
None
}
})
.unzip();
let is_dead_slot = accounts.is_empty();
// Remove the account index entries from earlier roots that are outdated by later roots.
// Safe because queries to the index will be reading updates from later roots.
self.purge_slot_cache_pubkeys(
slot,
purged_slot_pubkeys,
pubkey_to_slot_set,
is_dead_slot,
);
if !is_dead_slot {
let aligned_total_size = self.page_align(total_size);
// This ensures that all updates are written to an AppendVec, before any
// updates to the index happen, so anybody that sees a real entry in the index,
// will be able to find the account in storage
let flushed_store =
self.create_and_insert_store(slot, aligned_total_size, "flush_slot_cache");
self.store_accounts_frozen(
slot,
&accounts,
&hashes,
Some(Box::new(move |_, _| flushed_store.clone())),
None,
);
// If the above sizing function is correct, just one AppendVec is enough to hold
// all the data for the slot
assert_eq!(
self.storage
.get_slot_stores(slot)
.unwrap()
.read()
.unwrap()
.len(),
1
);
}
// Remove this slot from the cache, which will to AccountsDb readers should look like an
// atomic switch from the cache to storage
assert!(self.accounts_cache.remove_slot(slot).is_some());
true
} else {
false
}
}
fn write_accounts_to_cache(
&self,
slot: Slot,
hashes: &[Hash],
accounts_and_meta_to_store: &[(StoredMeta, &Account)],
) -> Vec<AccountInfo> {
assert_eq!(hashes.len(), accounts_and_meta_to_store.len());
accounts_and_meta_to_store
.iter()
.zip(hashes)
.map(|((meta, account), hash)| {
self.accounts_cache
.store(slot, &meta.pubkey, (**account).clone(), *hash);
AccountInfo {
store_id: CACHE_VIRTUAL_STORAGE_ID,
offset: CACHE_VIRTUAL_OFFSET,
stored_size: CACHE_VIRTUAL_STORED_SIZE,
lamports: account.lamports,
}
})
.collect()
}
fn store_accounts_to<
F: FnMut(Slot, usize) -> Arc<AccountStorageEntry>,
P: Iterator<Item = u64>,
>(
&self,
slot: Slot,
accounts: &[(&Pubkey, &Account)],
hashes: &[Hash],
storage_finder: F,
mut write_version_producer: P,
is_cached_store: bool,
) -> Vec<AccountInfo> {
let default_account = Account::default();
let accounts_and_meta_to_store: Vec<(StoredMeta, &Account)> = accounts
.iter()
.map(|(pubkey, account)| {
let account = if account.lamports == 0 {
&default_account
} else {
*account
};
let data_len = account.data.len() as u64;
let meta = StoredMeta {
write_version: write_version_producer.next().unwrap(),
pubkey: **pubkey,
data_len,
};
(meta, account)
})
.collect();
if self.caching_enabled && is_cached_store {
self.write_accounts_to_cache(slot, hashes, &accounts_and_meta_to_store)
} else {
self.write_accounts_to_storage(
slot,
hashes,
storage_finder,
&accounts_and_meta_to_store,
)
}
}
fn report_store_stats(&self) {
let mut total_count = 0;
let mut min = std::usize::MAX;
let mut min_slot = 0;
let mut max = 0;
let mut max_slot = 0;
let mut newest_slot = 0;
let mut oldest_slot = std::u64::MAX;
for iter_item in self.storage.0.iter() {
let slot = iter_item.key();
let slot_stores = iter_item.value().read().unwrap();
total_count += slot_stores.len();
if slot_stores.len() < min {
min = slot_stores.len();
min_slot = *slot;
}
if slot_stores.len() > max {
max = slot_stores.len();
max_slot = *slot;
}
if *slot > newest_slot {
newest_slot = *slot;
}
if *slot < oldest_slot {
oldest_slot = *slot;
}
}
info!("total_stores: {}, newest_slot: {}, oldest_slot: {}, max_slot: {} (num={}), min_slot: {} (num={})",
total_count, newest_slot, oldest_slot, max_slot, max, min_slot, min);
datapoint_info!(
"accounts_db-stores",
("total_count", total_count, i64),
(
"recycle_count",
self.recycle_stores.read().unwrap().entry_count() as u64,
i64
),
);
datapoint_info!(
"accounts_db-perf-stats",
(
"delta_hash_num",
self.stats.delta_hash_num.swap(0, Ordering::Relaxed),
i64
),
(
"delta_hash_scan_us",
self.stats
.delta_hash_scan_time_total_us
.swap(0, Ordering::Relaxed),
i64
),
(
"delta_hash_accumulate_us",
self.stats
.delta_hash_accumulate_time_total_us
.swap(0, Ordering::Relaxed),
i64
),
);
}
pub fn checked_iterative_sum_for_capitalization(total_cap: u64, new_cap: u64) -> u64 {
let new_total = total_cap as u128 + new_cap as u128;
AccountsHash::checked_cast_for_capitalization(new_total)
}
pub fn checked_sum_for_capitalization<T: Iterator<Item = u64>>(balances: T) -> u64 {
AccountsHash::checked_cast_for_capitalization(balances.map(|b| b as u128).sum::<u128>())
}
pub fn account_balance_for_capitalization(
lamports: u64,
owner: &Pubkey,
executable: bool,
simple_capitalization_enabled: bool,
) -> u64 {
if simple_capitalization_enabled {
return lamports;
}
let is_specially_retained = (solana_sdk::native_loader::check_id(owner) && executable)
|| solana_sdk::sysvar::check_id(owner);
if is_specially_retained {
// specially retained accounts always have an initial 1 lamport
// balance, but could be modified by transfers which increase
// the balance but don't affect the capitalization.
lamports - 1
} else {
lamports
}
}
fn calculate_accounts_hash(
&self,
slot: Slot,
ancestors: &Ancestors,
check_hash: bool,
simple_capitalization_enabled: bool,
) -> Result<(Hash, u64), BankHashVerificationError> {
use BankHashVerificationError::*;
let mut scan = Measure::start("scan");
let keys: Vec<_> = self
.accounts_index
.account_maps
.read()
.unwrap()
.keys()
.cloned()
.collect();
let mismatch_found = AtomicU64::new(0);
// Pick a chunk size big enough to allow us to produce output vectors that are smaller than the overall size.
// We'll also accumulate the lamports within each chunk and fewer chunks results in less contention to accumulate the sum.
let chunks = crate::accounts_hash::MERKLE_FANOUT.pow(4);
let total_lamports = Mutex::<u64>::new(0);
let hashes: Vec<Vec<Hash>> = {
self.thread_pool_clean.install(|| {
keys.par_chunks(chunks)
.map(|pubkeys| {
let mut sum = 0u128;
let result: Vec<Hash> = pubkeys
.iter()
.filter_map(|pubkey| {
if let Some((lock, index)) =
self.accounts_index.get(pubkey, Some(ancestors), Some(slot))
{
let (slot, account_info) = &lock.slot_list()[index];
if account_info.lamports != 0 {
self.get_account_accessor_from_cache_or_storage(
*slot,
pubkey,
account_info.store_id,
account_info.offset,
)
.get_loaded_account()
.and_then(
|loaded_account| {
let loaded_hash = loaded_account.loaded_hash();
let balance =
Self::account_balance_for_capitalization(
account_info.lamports,
loaded_account.owner(),
loaded_account.executable(),
simple_capitalization_enabled,
);
if check_hash {
let computed_hash = loaded_account
.compute_hash(
*slot,
&self.expected_cluster_type(),
pubkey,
);
if computed_hash != *loaded_hash {
mismatch_found
.fetch_add(1, Ordering::Relaxed);
return None;
}
}
sum += balance as u128;
Some(*loaded_hash)
},
)
} else {
None
}
} else {
None
}
})
.collect();
let mut total = total_lamports.lock().unwrap();
*total =
AccountsHash::checked_cast_for_capitalization(*total as u128 + sum);
result
})
.collect()
})
};
if mismatch_found.load(Ordering::Relaxed) > 0 {
warn!(
"{} mismatched account hash(es) found",
mismatch_found.load(Ordering::Relaxed)
);
return Err(MismatchedAccountHash);
}
scan.stop();
let total_lamports = *total_lamports.lock().unwrap();
let mut hash_time = Measure::start("hash");
let (accumulated_hash, hash_total) = AccountsHash::calculate_hash(hashes);
hash_time.stop();
datapoint_info!(
"update_accounts_hash",
("accounts_scan", scan.as_us(), i64),
("hash", hash_time.as_us(), i64),
("hash_total", hash_total, i64),
);
Ok((accumulated_hash, total_lamports))
}
pub fn get_accounts_hash(&self, slot: Slot) -> Hash {
let bank_hashes = self.bank_hashes.read().unwrap();
let bank_hash_info = bank_hashes.get(&slot).unwrap();
bank_hash_info.snapshot_hash
}
pub fn update_accounts_hash(
&self,
slot: Slot,
ancestors: &Ancestors,
simple_capitalization_enabled: bool,
) -> (Hash, u64) {
self.update_accounts_hash_with_index_option(
true,
false,
slot,
ancestors,
simple_capitalization_enabled,
None,
)
}
pub fn update_accounts_hash_test(
&self,
slot: Slot,
ancestors: &Ancestors,
simple_capitalization_enabled: bool,
) -> (Hash, u64) {
self.update_accounts_hash_with_index_option(
true,
true,
slot,
ancestors,
simple_capitalization_enabled,
None,
)
}
/// Scan through all the account storage in parallel
fn scan_account_storage_no_bank<F, B>(
snapshot_storages: &[SnapshotStorage],
stats: &mut crate::accounts_hash::HashStats,
scan_func: F,
) -> Vec<B>
where
F: Fn(LoadedAccount, &mut B, Slot) + Send + Sync,
B: Send + Default,
{
let mut time = Measure::start("flatten");
let items: Vec<_> = snapshot_storages.iter().flatten().collect();
time.stop();
stats.pre_scan_flatten_time_total_us += time.as_us();
// Without chunks, we end up with 1 output vec for each outer snapshot storage.
// This results in too many vectors to be efficient.
const MAX_ITEMS_PER_CHUNK: usize = 5_000;
items
.par_chunks(MAX_ITEMS_PER_CHUNK)
.map(|storages: &[&Arc<AccountStorageEntry>]| {
let mut retval = B::default();
for storage in storages {
let accounts = storage.accounts.accounts(0);
accounts.into_iter().for_each(|stored_account| {
scan_func(
LoadedAccount::Stored(stored_account),
&mut retval,
storage.slot(),
)
});
}
retval
})
.collect()
}
fn calculate_accounts_hash_helper(
&self,
use_index: bool,
slot: Slot,
ancestors: &Ancestors,
simple_capitalization_enabled: bool,
) -> (Hash, u64) {
if !use_index {
let combined_maps = self.get_snapshot_storages(slot);
Self::calculate_accounts_hash_without_index(
&combined_maps,
simple_capitalization_enabled,
Some(&self.thread_pool_clean),
)
} else {
self.calculate_accounts_hash(slot, ancestors, false, simple_capitalization_enabled)
.unwrap()
}
}
pub fn update_accounts_hash_with_index_option(
&self,
use_index: bool,
debug_verify: bool,
slot: Slot,
ancestors: &Ancestors,
simple_capitalization_enabled: bool,
expected_capitalization: Option<u64>,
) -> (Hash, u64) {
let (hash, total_lamports) = self.calculate_accounts_hash_helper(
use_index,
slot,
ancestors,
simple_capitalization_enabled,
);
if debug_verify {
// calculate the other way (store or non-store) and verify results match.
let (hash_other, total_lamports_other) = self.calculate_accounts_hash_helper(
!use_index,
slot,
ancestors,
simple_capitalization_enabled,
);
let success = hash == hash_other
&& total_lamports == total_lamports_other
&& total_lamports == expected_capitalization.unwrap_or(total_lamports);
assert!(success, "update_accounts_hash_with_index_option mismatch. hashes: {}, {}; lamports: {}, {}; expected lamports: {:?}, using index: {}, slot: {}", hash, hash_other, total_lamports, total_lamports_other, expected_capitalization, use_index, slot);
}
let mut bank_hashes = self.bank_hashes.write().unwrap();
let mut bank_hash_info = bank_hashes.get_mut(&slot).unwrap();
bank_hash_info.snapshot_hash = hash;
(hash, total_lamports)
}
fn scan_snapshot_stores(
storage: &[SnapshotStorage],
simple_capitalization_enabled: bool,
mut stats: &mut crate::accounts_hash::HashStats,
bins: usize,
) -> Vec<Vec<Vec<CalculateHashIntermediate>>> {
let max_plus_1 = std::u8::MAX as usize + 1;
assert!(bins <= max_plus_1 && bins > 0);
let mut time = Measure::start("scan all accounts");
stats.num_snapshot_storage = storage.len();
let result: Vec<Vec<Vec<CalculateHashIntermediate>>> = Self::scan_account_storage_no_bank(
&storage,
&mut stats,
|loaded_account: LoadedAccount,
accum: &mut Vec<Vec<CalculateHashIntermediate>>,
slot: Slot| {
let version = loaded_account.write_version();
let raw_lamports = loaded_account.lamports();
let zero_raw_lamports = raw_lamports == 0;
let balance = if zero_raw_lamports {
crate::accounts_hash::ZERO_RAW_LAMPORTS_SENTINEL
} else {
Self::account_balance_for_capitalization(
raw_lamports,
loaded_account.owner(),
loaded_account.executable(),
simple_capitalization_enabled,
)
};
let pubkey = *loaded_account.pubkey();
let source_item = CalculateHashIntermediate::new(
version,
*loaded_account.loaded_hash(),
balance,
slot,
pubkey,
);
let rng_index = pubkey.as_ref()[0] as usize * bins / max_plus_1;
let max = accum.len();
if max == 0 {
accum.extend(vec![Vec::new(); bins]);
}
accum[rng_index].push(source_item);
},
);
time.stop();
stats.scan_time_total_us += time.as_us();
result
}
// modeled after get_accounts_delta_hash
// intended to be faster than calculate_accounts_hash
pub fn calculate_accounts_hash_without_index(
storages: &[SnapshotStorage],
simple_capitalization_enabled: bool,
thread_pool: Option<&ThreadPool>,
) -> (Hash, u64) {
let scan_and_hash = || {
let mut stats = HashStats::default();
// When calculating hashes, it is helpful to break the pubkeys found into bins based on the pubkey value.
const PUBKEY_BINS_FOR_CALCULATING_HASHES: usize = 64;
let result = Self::scan_snapshot_stores(
storages,
simple_capitalization_enabled,
&mut stats,
PUBKEY_BINS_FOR_CALCULATING_HASHES,
);
AccountsHash::rest_of_hash_calculation(result, &mut stats)
};
if let Some(thread_pool) = thread_pool {
thread_pool.install(scan_and_hash)
} else {
scan_and_hash()
}
}
pub fn verify_bank_hash_and_lamports(
&self,
slot: Slot,
ancestors: &Ancestors,
total_lamports: u64,
simple_capitalization_enabled: bool,
) -> Result<(), BankHashVerificationError> {
use BankHashVerificationError::*;
let (calculated_hash, calculated_lamports) =
self.calculate_accounts_hash(slot, ancestors, true, simple_capitalization_enabled)?;
if calculated_lamports != total_lamports {
warn!(
"Mismatched total lamports: {} calculated: {}",
total_lamports, calculated_lamports
);
return Err(MismatchedTotalLamports(calculated_lamports, total_lamports));
}
let bank_hashes = self.bank_hashes.read().unwrap();
if let Some(found_hash_info) = bank_hashes.get(&slot) {
if calculated_hash == found_hash_info.snapshot_hash {
Ok(())
} else {
warn!(
"mismatched bank hash for slot {}: {} (calculated) != {} (expected)",
slot, calculated_hash, found_hash_info.snapshot_hash
);
Err(MismatchedBankHash)
}
} else {
Err(MissingBankHash)
}
}
pub fn get_accounts_delta_hash(&self, slot: Slot) -> Hash {
let mut scan = Measure::start("scan");
let scan_result: ScanStorageResult<(Pubkey, Hash), DashMapVersionHash> = self
.scan_account_storage(
slot,
|loaded_account: LoadedAccount| {
// Cache only has one version per key, don't need to worry about versioning
Some((*loaded_account.pubkey(), *loaded_account.loaded_hash()))
},
|accum: &DashMap<Pubkey, (u64, Hash)>, loaded_account: LoadedAccount| {
let loaded_write_version = loaded_account.write_version();
let loaded_hash = *loaded_account.loaded_hash();
let should_insert =
if let Some(existing_entry) = accum.get(loaded_account.pubkey()) {
loaded_write_version > existing_entry.value().version()
} else {
true
};
if should_insert {
// Detected insertion is necessary, grabs the write lock to commit the write,
match accum.entry(*loaded_account.pubkey()) {
// Double check in case another thread interleaved a write between the read + write.
Occupied(mut occupied_entry) => {
if loaded_write_version > occupied_entry.get().version() {
occupied_entry.insert((loaded_write_version, loaded_hash));
}
}
Vacant(vacant_entry) => {
vacant_entry.insert((loaded_write_version, loaded_hash));
}
}
}
},
);
scan.stop();
let mut accumulate = Measure::start("accumulate");
let hashes: Vec<_> = match scan_result {
ScanStorageResult::Cached(cached_result) => cached_result,
ScanStorageResult::Stored(stored_result) => stored_result
.into_iter()
.map(|(pubkey, (_latest_write_version, hash))| (pubkey, hash))
.collect(),
};
let dirty_keys = hashes.iter().map(|(pubkey, _hash)| *pubkey).collect();
let ret = AccountsHash::accumulate_account_hashes(hashes);
accumulate.stop();
let mut uncleaned_time = Measure::start("uncleaned_index");
self.uncleaned_pubkeys.insert(slot, dirty_keys);
uncleaned_time.stop();
self.stats
.store_uncleaned_update
.fetch_add(uncleaned_time.as_us(), Ordering::Relaxed);
self.stats
.delta_hash_scan_time_total_us
.fetch_add(scan.as_us(), Ordering::Relaxed);
self.stats
.delta_hash_accumulate_time_total_us
.fetch_add(accumulate.as_us(), Ordering::Relaxed);
self.stats.delta_hash_num.fetch_add(1, Ordering::Relaxed);
ret
}
fn update_index(
&self,
slot: Slot,
infos: Vec<AccountInfo>,
accounts: &[(&Pubkey, &Account)],
) -> SlotList<AccountInfo> {
let mut reclaims = SlotList::<AccountInfo>::with_capacity(infos.len() * 2);
for (info, pubkey_account) in infos.into_iter().zip(accounts.iter()) {
let pubkey = pubkey_account.0;
self.accounts_index.upsert(
slot,
pubkey,
&pubkey_account.1.owner,
&pubkey_account.1.data,
&self.account_indexes,
info,
&mut reclaims,
);
}
reclaims
}
fn remove_dead_accounts(
&self,
reclaims: SlotSlice<AccountInfo>,
expected_slot: Option<Slot>,
mut reclaimed_offsets: Option<&mut AppendVecOffsets>,
reset_accounts: bool,
) -> HashSet<Slot> {
let mut dead_slots = HashSet::new();
let mut new_shrink_candidates: ShrinkCandidates = HashMap::new();
for (slot, account_info) in reclaims {
// No cached accounts should make it here
assert_ne!(account_info.store_id, CACHE_VIRTUAL_STORAGE_ID);
if let Some(ref mut reclaimed_offsets) = reclaimed_offsets {
reclaimed_offsets
.entry(account_info.store_id)
.or_default()
.insert(account_info.offset);
}
if let Some(expected_slot) = expected_slot {
assert_eq!(*slot, expected_slot);
}
if let Some(store) = self
.storage
.get_account_storage_entry(*slot, account_info.store_id)
{
assert_eq!(
*slot, store.slot(),
"AccountDB::accounts_index corrupted. Storage pointed to: {}, expected: {}, should only point to one slot",
store.slot(), *slot
);
let count = store.remove_account(account_info.stored_size, reset_accounts);
if count == 0 {
dead_slots.insert(*slot);
} else if self.caching_enabled
&& (self.page_align(store.alive_bytes() as u64) as f64
/ store.total_bytes() as f64)
< SHRINK_RATIO
{
// Checking that this single storage entry is ready for shrinking,
// should be a sufficient indication that the slot is ready to be shrunk
// because slots should only have one storage entry, namely the one that was
// created by `flush_slot_cache()`.
{
new_shrink_candidates
.entry(*slot)
.or_default()
.insert(store.append_vec_id(), store);
}
}
}
}
if self.caching_enabled {
{
let mut shrink_candidate_slots = self.shrink_candidate_slots.lock().unwrap();
for (slot, slot_shrink_candidates) in new_shrink_candidates {
for (store_id, store) in slot_shrink_candidates {
shrink_candidate_slots
.entry(slot)
.or_default()
.insert(store_id, store);
}
}
}
}
dead_slots.retain(|slot| {
if let Some(slot_stores) = self.storage.get_slot_stores(*slot) {
for x in slot_stores.read().unwrap().values() {
if x.count() != 0 {
return false;
}
}
}
true
});
dead_slots
}
fn finalize_dead_slot_removal<'a>(
&'a self,
dead_slots_iter: impl Iterator<Item = &'a Slot> + Clone,
purged_slot_pubkeys: HashSet<(Slot, Pubkey)>,
// Should only be `Some` for non-cached slots
purged_stored_account_slots: Option<&mut AccountSlots>,
) {
if let Some(purged_stored_account_slots) = purged_stored_account_slots {
for (slot, pubkey) in purged_slot_pubkeys {
purged_stored_account_slots
.entry(pubkey)
.or_default()
.insert(slot);
self.accounts_index.unref_from_storage(&pubkey);
}
}
let mut accounts_index_root_stats = AccountsIndexRootsStats::default();
for slot in dead_slots_iter.clone() {
info!("finalize_dead_slot_removal slot {}", slot);
if let Some(latest) = self.accounts_index.clean_dead_slot(*slot) {
accounts_index_root_stats = latest;
}
}
self.clean_accounts_stats
.latest_accounts_index_roots_stats
.update(&accounts_index_root_stats);
{
let mut bank_hashes = self.bank_hashes.write().unwrap();
for slot in dead_slots_iter {
bank_hashes.remove(slot);
}
}
}
fn clean_stored_dead_slots(
&self,
dead_slots: &HashSet<Slot>,
purged_account_slots: Option<&mut AccountSlots>,
) {
let mut measure = Measure::start("clean_stored_dead_slots-ms");
let mut stores: Vec<Arc<AccountStorageEntry>> = vec![];
for slot in dead_slots.iter() {
if let Some(slot_storage) = self.storage.get_slot_stores(*slot) {
for store in slot_storage.read().unwrap().values() {
stores.push(store.clone());
}
}
}
let purged_slot_pubkeys: HashSet<(Slot, Pubkey)> = {
self.thread_pool_clean.install(|| {
stores
.into_par_iter()
.map(|store| {
let accounts = store.all_accounts();
accounts
.into_iter()
.map(|account| (store.slot(), account.meta.pubkey))
.collect::<HashSet<(Slot, Pubkey)>>()
})
.reduce(HashSet::new, |mut reduced, store_pubkeys| {
reduced.extend(store_pubkeys);
reduced
})
})
};
self.finalize_dead_slot_removal(
dead_slots.iter(),
purged_slot_pubkeys,
purged_account_slots,
);
measure.stop();
inc_new_counter_info!("clean_stored_dead_slots-ms", measure.as_ms() as usize);
}
fn hash_accounts(
&self,
slot: Slot,
accounts: &[(&Pubkey, &Account)],
cluster_type: &ClusterType,
) -> Vec<Hash> {
let mut stats = BankHashStats::default();
let mut total_data = 0;
let hashes: Vec<_> = accounts
.iter()
.map(|(pubkey, account)| {
total_data += account.data.len();
stats.update(account);
Self::hash_account(slot, account, pubkey, cluster_type)
})
.collect();
self.stats
.store_total_data
.fetch_add(total_data as u64, Ordering::Relaxed);
let mut bank_hashes = self.bank_hashes.write().unwrap();
let slot_info = bank_hashes
.entry(slot)
.or_insert_with(BankHashInfo::default);
slot_info.stats.merge(&stats);
hashes
}
pub(crate) fn freeze_accounts(&mut self, ancestors: &Ancestors, account_pubkeys: &[Pubkey]) {
for account_pubkey in account_pubkeys {
if let Some((account, _slot)) = self.load_slow(ancestors, &account_pubkey) {
let frozen_account_info = FrozenAccountInfo {
hash: Self::hash_frozen_account_data(&account),
lamports: account.lamports,
};
warn!(
"Account {} is now frozen at lamports={}, hash={}",
account_pubkey, frozen_account_info.lamports, frozen_account_info.hash
);
self.frozen_accounts
.insert(*account_pubkey, frozen_account_info);
} else {
panic!(
"Unable to freeze an account that does not exist: {}",
account_pubkey
);
}
}
}
/// Cause a panic if frozen accounts would be affected by data in `accounts`
fn assert_frozen_accounts(&self, accounts: &[(&Pubkey, &Account)]) {
if self.frozen_accounts.is_empty() {
return;
}
for (account_pubkey, account) in accounts.iter() {
if let Some(frozen_account_info) = self.frozen_accounts.get(*account_pubkey) {
if account.lamports < frozen_account_info.lamports {
FROZEN_ACCOUNT_PANIC.store(true, Ordering::Relaxed);
panic!(
"Frozen account {} modified. Lamports decreased from {} to {}",
account_pubkey, frozen_account_info.lamports, account.lamports,
)
}
let hash = Self::hash_frozen_account_data(&account);
if hash != frozen_account_info.hash {
FROZEN_ACCOUNT_PANIC.store(true, Ordering::Relaxed);
panic!(
"Frozen account {} modified. Hash changed from {} to {}",
account_pubkey, frozen_account_info.hash, hash,
)
}
}
}
}
pub fn store_cached(&self, slot: Slot, accounts: &[(&Pubkey, &Account)]) {
self.store(slot, accounts, self.caching_enabled);
}
/// Store the account update.
pub fn store_uncached(&self, slot: Slot, accounts: &[(&Pubkey, &Account)]) {
self.store(slot, accounts, false);
}
fn store(&self, slot: Slot, accounts: &[(&Pubkey, &Account)], is_cached_store: bool) {
// If all transactions in a batch are errored,
// it's possible to get a store with no accounts.
if accounts.is_empty() {
return;
}
self.assert_frozen_accounts(accounts);
let mut hash_time = Measure::start("hash_accounts");
let hashes = self.hash_accounts(slot, accounts, &self.expected_cluster_type());
hash_time.stop();
self.stats
.store_hash_accounts
.fetch_add(hash_time.as_us(), Ordering::Relaxed);
self.store_accounts_unfrozen(slot, accounts, &hashes, is_cached_store);
self.report_store_timings();
}
fn report_store_timings(&self) {
let last = self.stats.last_store_report.load(Ordering::Relaxed);
let now = solana_sdk::timing::timestamp();
if now.saturating_sub(last) > 1000
&& self.stats.last_store_report.compare_exchange(
last,
now,
Ordering::Relaxed,
Ordering::Relaxed,
) == Ok(last)
{
datapoint_info!(
"accounts_db_store_timings",
(
"hash_accounts",
self.stats.store_hash_accounts.swap(0, Ordering::Relaxed),
i64
),
(
"store_accounts",
self.stats.store_accounts.swap(0, Ordering::Relaxed),
i64
),
(
"update_index",
self.stats.store_update_index.swap(0, Ordering::Relaxed),
i64
),
(
"handle_reclaims",
self.stats.store_handle_reclaims.swap(0, Ordering::Relaxed),
i64
),
(
"append_accounts",
self.stats.store_append_accounts.swap(0, Ordering::Relaxed),
i64
),
(
"find_storage",
self.stats.store_find_store.swap(0, Ordering::Relaxed),
i64
),
(
"num_accounts",
self.stats.store_num_accounts.swap(0, Ordering::Relaxed),
i64
),
(
"total_data",
self.stats.store_total_data.swap(0, Ordering::Relaxed),
i64
),
);
let recycle_stores = self.recycle_stores.read().unwrap();
datapoint_info!(
"accounts_db_store_timings2",
(
"recycle_store_count",
self.stats.recycle_store_count.swap(0, Ordering::Relaxed),
i64
),
(
"current_recycle_store_count",
recycle_stores.entry_count(),
i64
),
(
"current_recycle_store_bytes",
recycle_stores.total_bytes(),
i64
),
(
"create_store_count",
self.stats.create_store_count.swap(0, Ordering::Relaxed),
i64
),
(
"store_get_slot_store",
self.stats.store_get_slot_store.swap(0, Ordering::Relaxed),
i64
),
(
"store_find_existing",
self.stats.store_find_existing.swap(0, Ordering::Relaxed),
i64
),
(
"dropped_stores",
self.stats.dropped_stores.swap(0, Ordering::Relaxed),
i64
),
);
}
}
fn store_accounts_unfrozen(
&self,
slot: Slot,
accounts: &[(&Pubkey, &Account)],
hashes: &[Hash],
is_cached_store: bool,
) {
// This path comes from a store to a non-frozen slot.
// If a store is dead here, then a newer update for
// each pubkey in the store must exist in another
// store in the slot. Thus it is safe to reset the store and
// re-use it for a future store op. The pubkey ref counts should still
// hold just 1 ref from this slot.
let reset_accounts = true;
self.store_accounts_custom(
slot,
accounts,
hashes,
None::<StorageFinder>,
None::<Box<dyn Iterator<Item = u64>>>,
is_cached_store,
reset_accounts,
);
}
fn store_accounts_frozen<'a>(
&'a self,
slot: Slot,
accounts: &[(&Pubkey, &Account)],
hashes: &[Hash],
storage_finder: Option<StorageFinder<'a>>,
write_version_producer: Option<Box<dyn Iterator<Item = u64>>>,
) -> StoreAccountsTiming {
// stores on a frozen slot should not reset
// the append vec so that hashing could happen on the store
// and accounts in the append_vec can be unrefed correctly
let reset_accounts = false;
let is_cached_store = false;
self.store_accounts_custom(
slot,
accounts,
hashes,
storage_finder,
write_version_producer,
is_cached_store,
reset_accounts,
)
}
fn store_accounts_custom<'a>(
&'a self,
slot: Slot,
accounts: &[(&Pubkey, &Account)],
hashes: &[Hash],
storage_finder: Option<StorageFinder<'a>>,
write_version_producer: Option<Box<dyn Iterator<Item = u64>>>,
is_cached_store: bool,
reset_accounts: bool,
) -> StoreAccountsTiming {
let storage_finder: StorageFinder<'a> = storage_finder
.unwrap_or_else(|| Box::new(move |slot, size| self.find_storage_candidate(slot, size)));
let write_version_producer: Box<dyn Iterator<Item = u64>> = write_version_producer
.unwrap_or_else(|| {
let mut current_version = self.bulk_assign_write_version(accounts.len());
Box::new(std::iter::from_fn(move || {
let ret = current_version;
current_version += 1;
Some(ret)
}))
});
self.stats
.store_num_accounts
.fetch_add(accounts.len() as u64, Ordering::Relaxed);
let mut store_accounts_time = Measure::start("store_accounts");
let infos = self.store_accounts_to(
slot,
accounts,
hashes,
storage_finder,
write_version_producer,
is_cached_store,
);
store_accounts_time.stop();
self.stats
.store_accounts
.fetch_add(store_accounts_time.as_us(), Ordering::Relaxed);
let mut update_index_time = Measure::start("update_index");
// If the cache was flushed, then because `update_index` occurs
// after the account are stored by the above `store_accounts_to`
// call and all the accounts are stored, all reads after this point
// will know to not check the cache anymore
let mut reclaims = self.update_index(slot, infos, accounts);
// For each updated account, `reclaims` should only have at most one
// item (if the account was previously updated in this slot).
// filter out the cached reclaims as those don't actually map
// to anything that needs to be cleaned in the backing storage
// entries
if self.caching_enabled {
reclaims.retain(|(_, r)| r.store_id != CACHE_VIRTUAL_STORAGE_ID);
if is_cached_store {
assert!(reclaims.is_empty());
}
}
update_index_time.stop();
self.stats
.store_update_index
.fetch_add(update_index_time.as_us(), Ordering::Relaxed);
// A store for a single slot should:
// 1) Only make "reclaims" for the same slot
// 2) Should not cause any slots to be removed from the storage
// database because
// a) this slot has at least one account (the one being stored),
// b)From 1) we know no other slots are included in the "reclaims"
//
// From 1) and 2) we guarantee passing Some(slot), true is safe
let mut handle_reclaims_time = Measure::start("handle_reclaims");
self.handle_reclaims(&reclaims, Some(slot), true, None, reset_accounts);
handle_reclaims_time.stop();
self.stats
.store_handle_reclaims
.fetch_add(handle_reclaims_time.as_us(), Ordering::Relaxed);
StoreAccountsTiming {
store_accounts_elapsed: store_accounts_time.as_us(),
update_index_elapsed: update_index_time.as_us(),
handle_reclaims_elapsed: handle_reclaims_time.as_us(),
}
}
pub fn add_root(&self, slot: Slot) {
self.accounts_index.add_root(slot, self.caching_enabled);
if self.caching_enabled {
self.accounts_cache.add_root(slot);
}
}
pub fn get_snapshot_storages(&self, snapshot_slot: Slot) -> SnapshotStorages {
self.storage
.0
.iter()
.filter(|iter_item| {
let slot = *iter_item.key();
slot <= snapshot_slot && self.accounts_index.is_root(slot)
})
.map(|iter_item| {
iter_item
.value()
.read()
.unwrap()
.values()
.filter(|x| x.has_accounts())
.cloned()
.collect()
})
.filter(|snapshot_storage: &SnapshotStorage| !snapshot_storage.is_empty())
.collect()
}
pub fn generate_index(&self) {
type AccountsMap<'a> = HashMap<Pubkey, BTreeMap<u64, (AppendVecId, StoredAccountMeta<'a>)>>;
let mut slots = self.storage.all_slots();
#[allow(clippy::stable_sort_primitive)]
slots.sort();
let mut last_log_update = Instant::now();
for (index, slot) in slots.iter().enumerate() {
let now = Instant::now();
if now.duration_since(last_log_update).as_secs() >= 2 {
info!("generating index: {}/{} slots...", index, slots.len());
last_log_update = now;
}
let storage_maps: Vec<Arc<AccountStorageEntry>> = self
.storage
.get_slot_storage_entries(*slot)
.unwrap_or_default();
let num_accounts = storage_maps
.iter()
.map(|storage| storage.approx_stored_count())
.sum();
let mut accounts_map: AccountsMap = AccountsMap::with_capacity(num_accounts);
storage_maps.iter().for_each(|storage| {
let accounts = storage.all_accounts();
accounts.into_iter().for_each(|stored_account| {
let entry = accounts_map
.entry(stored_account.meta.pubkey)
.or_insert_with(BTreeMap::new);
assert!(
// There should only be one update per write version for a specific slot
// and account
entry
.insert(
stored_account.meta.write_version,
(storage.append_vec_id(), stored_account)
)
.is_none()
);
})
});
// Need to restore indexes even with older write versions which may
// be shielding other accounts. When they are then purged, the
// original non-shielded account value will be visible when the account
// is restored from the append-vec
if !accounts_map.is_empty() {
let mut _reclaims: Vec<(u64, AccountInfo)> = vec![];
let dirty_keys = accounts_map.iter().map(|(pubkey, _info)| *pubkey).collect();
self.uncleaned_pubkeys.insert(*slot, dirty_keys);
for (pubkey, account_infos) in accounts_map.into_iter() {
for (_, (store_id, stored_account)) in account_infos.into_iter() {
let account_info = AccountInfo {
store_id,
offset: stored_account.offset,
stored_size: stored_account.stored_size,
lamports: stored_account.account_meta.lamports,
};
self.accounts_index.insert_new_if_missing(
*slot,
&pubkey,
&stored_account.account_meta.owner,
&stored_account.data,
&self.account_indexes,
account_info,
&mut _reclaims,
);
}
}
}
}
// Need to add these last, otherwise older updates will be cleaned
for slot in slots {
self.accounts_index.add_root(slot, false);
}
let mut stored_sizes_and_counts = HashMap::new();
for account_entry in self.accounts_index.account_maps.read().unwrap().values() {
for (_slot, account_entry) in account_entry.slot_list.read().unwrap().iter() {
let storage_entry_meta = stored_sizes_and_counts
.entry(account_entry.store_id)
.or_insert((0, 0));
storage_entry_meta.0 += account_entry.stored_size;
storage_entry_meta.1 += 1;
}
}
for slot_stores in self.storage.0.iter() {
for (id, store) in slot_stores.value().read().unwrap().iter() {
// Should be default at this point
assert_eq!(store.alive_bytes(), 0);
if let Some((stored_size, count)) = stored_sizes_and_counts.get(&id) {
trace!("id: {} setting count: {} cur: {}", id, count, store.count(),);
store.count_and_status.write().unwrap().0 = *count;
store.alive_bytes.store(*stored_size, Ordering::SeqCst);
} else {
trace!("id: {} clearing count", id);
store.count_and_status.write().unwrap().0 = 0;
}
}
}
}
pub(crate) fn print_accounts_stats(&self, label: &str) {
self.print_index(label);
self.print_count_and_status(label);
info!("recycle_stores:");
let recycle_stores = self.recycle_stores.read().unwrap();
for (recycled_time, entry) in recycle_stores.iter() {
info!(
" slot: {} id: {} count_and_status: {:?} approx_store_count: {} len: {} capacity: {} (recycled: {:?})",
entry.slot(),
entry.append_vec_id(),
*entry.count_and_status.read().unwrap(),
entry.approx_store_count.load(Ordering::Relaxed),
entry.accounts.len(),
entry.accounts.capacity(),
recycled_time,
);
}
}
fn print_index(&self, label: &str) {
let mut roots: Vec<_> = self.accounts_index.all_roots();
#[allow(clippy::stable_sort_primitive)]
roots.sort();
info!("{}: accounts_index roots: {:?}", label, roots,);
for (pubkey, account_entry) in self.accounts_index.account_maps.read().unwrap().iter() {
info!(" key: {} ref_count: {}", pubkey, account_entry.ref_count(),);
info!(
" slots: {:?}",
*account_entry.slot_list.read().unwrap()
);
}
}
fn print_count_and_status(&self, label: &str) {
let mut slots: Vec<_> = self.storage.all_slots();
#[allow(clippy::stable_sort_primitive)]
slots.sort();
info!("{}: count_and status for {} slots:", label, slots.len());
for slot in &slots {
let slot_stores = self.storage.get_slot_stores(*slot).unwrap();
let r_slot_stores = slot_stores.read().unwrap();
let mut ids: Vec<_> = r_slot_stores.keys().cloned().collect();
#[allow(clippy::stable_sort_primitive)]
ids.sort();
for id in &ids {
let entry = r_slot_stores.get(id).unwrap();
info!(
" slot: {} id: {} count_and_status: {:?} approx_store_count: {} len: {} capacity: {}",
slot,
id,
*entry.count_and_status.read().unwrap(),
entry.approx_store_count.load(Ordering::Relaxed),
entry.accounts.len(),
entry.accounts.capacity(),
);
}
}
}
}
#[cfg(test)]
impl AccountsDb {
pub fn new_sized(paths: Vec<PathBuf>, file_size: u64) -> Self {
AccountsDb {
file_size,
..AccountsDb::new(paths, &ClusterType::Development)
}
}
pub fn new_sized_no_extra_stores(paths: Vec<PathBuf>, file_size: u64) -> Self {
AccountsDb {
file_size,
min_num_stores: 0,
..AccountsDb::new(paths, &ClusterType::Development)
}
}
pub fn get_append_vec_id(&self, pubkey: &Pubkey, slot: Slot) -> Option<AppendVecId> {
let ancestors = vec![(slot, 1)].into_iter().collect();
let result = self.accounts_index.get(&pubkey, Some(&ancestors), None);
result.map(|(list, index)| list.slot_list()[index].1.store_id)
}
pub fn alive_account_count_in_slot(&self, slot: Slot) -> usize {
self.storage
.get_slot_stores(slot)
.map(|storages| storages.read().unwrap().values().map(|s| s.count()).sum())
.unwrap_or(0)
}
}
/// Legacy shrink functions to support non-cached path.
/// Should be able to be deleted after cache path is the only path.
impl AccountsDb {
// Reads all accounts in given slot's AppendVecs and filter only to alive,
// then create a minimum AppendVec filled with the alive.
// v1 path shrinks all stores in the slot
//
// Requires all stores in the slot to be re-written otherwise the accounts_index
// store ref count could become incorrect.
fn do_shrink_slot_v1(&self, slot: Slot, forced: bool) -> usize {
struct FoundStoredAccount {
account: Account,
account_hash: Hash,
account_size: usize,
store_id: AppendVecId,
offset: usize,
write_version: u64,
}
trace!("shrink_stale_slot: slot: {}", slot);
let mut stored_accounts: HashMap<Pubkey, FoundStoredAccount> = HashMap::new();
let mut storage_read_elapsed = Measure::start("storage_read_elapsed");
{
if let Some(stores_lock) = self.storage.get_slot_stores(slot) {
let stores = stores_lock.read().unwrap();
let mut alive_count = 0;
let mut stored_count = 0;
let mut written_bytes = 0;
let mut total_bytes = 0;
for store in stores.values() {
alive_count += store.count();
stored_count += store.approx_stored_count();
written_bytes += store.written_bytes();
total_bytes += store.total_bytes();
}
if alive_count == stored_count && stores.values().len() == 1 {
trace!(
"shrink_stale_slot ({}): not able to shrink at all: alive/stored: {} / {} {}",
slot,
alive_count,
stored_count,
if forced { " (forced)" } else { "" },
);
return 0;
} else if !forced {
let sparse_by_count = (alive_count as f32 / stored_count as f32) <= 0.8;
let sparse_by_bytes = (written_bytes as f32 / total_bytes as f32) <= 0.8;
let not_sparse = !sparse_by_count && !sparse_by_bytes;
let too_small_to_shrink = total_bytes <= PAGE_SIZE;
if not_sparse || too_small_to_shrink {
return 0;
}
info!(
"shrink_stale_slot ({}): not_sparse: {} count: {}/{} byte: {}/{}",
slot, not_sparse, alive_count, stored_count, written_bytes, total_bytes,
);
}
for store in stores.values() {
let mut start = 0;
while let Some((account, next)) = store.accounts.get_account(start) {
match stored_accounts.entry(account.meta.pubkey) {
Entry::Occupied(mut occupied_entry) => {
if account.meta.write_version > occupied_entry.get().write_version {
occupied_entry.insert(FoundStoredAccount {
account: account.clone_account(),
account_hash: *account.hash,
account_size: next - start,
store_id: store.append_vec_id(),
offset: account.offset,
write_version: account.meta.write_version,
});
}
}
Entry::Vacant(vacant_entry) => {
vacant_entry.insert(FoundStoredAccount {
account: account.clone_account(),
account_hash: *account.hash,
account_size: next - start,
store_id: store.append_vec_id(),
offset: account.offset,
write_version: account.meta.write_version,
});
}
}
start = next;
}
}
}
}
storage_read_elapsed.stop();
let mut index_read_elapsed = Measure::start("index_read_elapsed");
let mut alive_total = 0;
let alive_accounts: Vec<_> = {
stored_accounts
.iter()
.filter(|(pubkey, stored_account)| {
let FoundStoredAccount {
account_size,
store_id,
offset,
..
} = stored_account;
if let Some((locked_entry, _)) = self.accounts_index.get(pubkey, None, None) {
let is_alive = locked_entry
.slot_list()
.iter()
.any(|(_slot, i)| i.store_id == *store_id && i.offset == *offset);
if !is_alive {
// This pubkey was found in the storage, but no longer exists in the index.
// It would have had a ref to the storage from the initial store, but it will
// not exist in the re-written slot. Unref it to keep the index consistent with
// rewriting the storage entries.
locked_entry.unref()
} else {
alive_total += *account_size as u64;
}
is_alive
} else {
false
}
})
.collect()
};
index_read_elapsed.stop();
let aligned_total: u64 = self.page_align(alive_total);
let alive_accounts_len = alive_accounts.len();
debug!(
"shrinking: slot: {}, stored_accounts: {} => alive_accounts: {} ({} bytes; aligned to: {})",
slot,
stored_accounts.len(),
alive_accounts_len,
alive_total,
aligned_total
);
let mut rewrite_elapsed = Measure::start("rewrite_elapsed");
let mut dead_storages = vec![];
let mut find_alive_elapsed = 0;
let mut create_and_insert_store_elapsed = 0;
let mut write_storage_elapsed = 0;
let mut store_accounts_timing = StoreAccountsTiming::default();
if aligned_total > 0 {
let mut start = Measure::start("find_alive_elapsed");
let mut accounts = Vec::with_capacity(alive_accounts_len);
let mut hashes = Vec::with_capacity(alive_accounts_len);
let mut write_versions = Vec::with_capacity(alive_accounts_len);
for (pubkey, alive_account) in alive_accounts {
accounts.push((pubkey, &alive_account.account));
hashes.push(alive_account.account_hash);
write_versions.push(alive_account.write_version);
}
start.stop();
find_alive_elapsed = start.as_us();
let mut start = Measure::start("create_and_insert_store_elapsed");
let shrunken_store = if let Some(new_store) =
self.try_recycle_and_insert_store(slot, aligned_total, aligned_total + 1024)
{
new_store
} else {
let maybe_shrink_paths = self.shrink_paths.read().unwrap();
if let Some(ref shrink_paths) = *maybe_shrink_paths {
self.create_and_insert_store_with_paths(
slot,
aligned_total,
"shrink-w-path",
shrink_paths,
)
} else {
self.create_and_insert_store(slot, aligned_total, "shrink")
}
};
start.stop();
create_and_insert_store_elapsed = start.as_us();
// here, we're writing back alive_accounts. That should be an atomic operation
// without use of rather wide locks in this whole function, because we're
// mutating rooted slots; There should be no writers to them.
store_accounts_timing = self.store_accounts_frozen(
slot,
&accounts,
&hashes,
Some(Box::new(move |_, _| shrunken_store.clone())),
Some(Box::new(write_versions.into_iter())),
);
let mut start = Measure::start("write_storage_elapsed");
if let Some(slot_stores) = self.storage.get_slot_stores(slot) {
slot_stores.write().unwrap().retain(|_key, store| {
if store.count() == 0 {
dead_storages.push(store.clone());
}
store.count() > 0
});
}
start.stop();
write_storage_elapsed = start.as_us();
}
rewrite_elapsed.stop();
let mut recycle_stores_write_elapsed = Measure::start("recycle_stores_write_elapsed");
let mut recycle_stores = self.recycle_stores.write().unwrap();
recycle_stores_write_elapsed.stop();
let mut drop_storage_entries_elapsed = Measure::start("drop_storage_entries_elapsed");
if recycle_stores.entry_count() < MAX_RECYCLE_STORES {
recycle_stores.add_entries(dead_storages);
drop(recycle_stores);
} else {
self.stats
.dropped_stores
.fetch_add(dead_storages.len() as u64, Ordering::Relaxed);
drop(recycle_stores);
drop(dead_storages);
}
drop_storage_entries_elapsed.stop();
self.shrink_stats
.num_slots_shrunk
.fetch_add(1, Ordering::Relaxed);
self.shrink_stats
.storage_read_elapsed
.fetch_add(storage_read_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats
.index_read_elapsed
.fetch_add(index_read_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats
.find_alive_elapsed
.fetch_add(find_alive_elapsed, Ordering::Relaxed);
self.shrink_stats
.create_and_insert_store_elapsed
.fetch_add(create_and_insert_store_elapsed, Ordering::Relaxed);
self.shrink_stats.store_accounts_elapsed.fetch_add(
store_accounts_timing.store_accounts_elapsed,
Ordering::Relaxed,
);
self.shrink_stats.update_index_elapsed.fetch_add(
store_accounts_timing.update_index_elapsed,
Ordering::Relaxed,
);
self.shrink_stats.handle_reclaims_elapsed.fetch_add(
store_accounts_timing.handle_reclaims_elapsed,
Ordering::Relaxed,
);
self.shrink_stats
.write_storage_elapsed
.fetch_add(write_storage_elapsed, Ordering::Relaxed);
self.shrink_stats
.rewrite_elapsed
.fetch_add(rewrite_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats
.drop_storage_entries_elapsed
.fetch_add(drop_storage_entries_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats
.recycle_stores_write_elapsed
.fetch_add(recycle_stores_write_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats.report();
alive_accounts_len
}
fn do_reset_uncleaned_roots_v1(
&self,
candidates: &mut MutexGuard<Vec<Slot>>,
max_clean_root: Option<Slot>,
) {
let previous_roots = self.accounts_index.reset_uncleaned_roots(max_clean_root);
candidates.extend(previous_roots);
}
#[cfg(test)]
fn reset_uncleaned_roots_v1(&self) {
self.do_reset_uncleaned_roots_v1(&mut self.shrink_candidate_slots_v1.lock().unwrap(), None);
}
fn do_shrink_stale_slot_v1(&self, slot: Slot) -> usize {
self.do_shrink_slot_v1(slot, false)
}
fn do_shrink_slot_forced_v1(&self, slot: Slot) {
self.do_shrink_slot_v1(slot, true);
}
fn shrink_stale_slot_v1(&self, candidates: &mut MutexGuard<Vec<Slot>>) -> usize {
let mut shrunken_account_total = 0;
let mut shrunk_slot_count = 0;
let start = Instant::now();
let num_roots = self.accounts_index.num_roots();
loop {
if let Some(slot) = self.do_next_shrink_slot_v1(candidates) {
shrunken_account_total += self.do_shrink_stale_slot_v1(slot);
} else {
return 0;
}
if start.elapsed().as_millis() > 100 || shrunk_slot_count > num_roots / 10 {
debug!(
"do_shrink_stale_slot_v1: {} {} {}us",
shrunk_slot_count,
candidates.len(),
start.elapsed().as_micros()
);
break;
}
shrunk_slot_count += 1;
}
shrunken_account_total
}
// Infinitely returns rooted roots in cyclic order
fn do_next_shrink_slot_v1(&self, candidates: &mut MutexGuard<Vec<Slot>>) -> Option<Slot> {
// At this point, a lock (= candidates) is ensured to be held to keep
// do_reset_uncleaned_roots() (in clean_accounts()) from updating candidates.
// Also, candidates in the lock may be swapped here if it's empty.
let next = candidates.pop();
if next.is_some() {
next
} else {
let mut new_all_slots = self.all_root_slots_in_index();
let next = new_all_slots.pop();
// refresh candidates for later calls!
**candidates = new_all_slots;
next
}
}
#[cfg(test)]
fn next_shrink_slot_v1(&self) -> Option<Slot> {
let mut candidates = self.shrink_candidate_slots_v1.lock().unwrap();
self.do_next_shrink_slot_v1(&mut candidates)
}
pub fn process_stale_slot_v1(&self) -> usize {
let mut measure = Measure::start("stale_slot_shrink-ms");
let candidates = self.shrink_candidate_slots_v1.try_lock();
if candidates.is_err() {
// skip and return immediately if locked by clean_accounts()
// the calling background thread will just retry later.
return 0;
}
// hold this lock as long as this shrinking process is running to avoid conflicts
// with clean_accounts().
let mut candidates = candidates.unwrap();
let count = self.shrink_stale_slot_v1(&mut candidates);
measure.stop();
inc_new_counter_info!("stale_slot_shrink-ms", measure.as_ms() as usize);
count
}
#[cfg(test)]
fn shrink_all_stale_slots_v1(&self) {
for slot in self.all_slots_in_storage() {
self.do_shrink_stale_slot_v1(slot);
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::{
accounts_hash::MERKLE_FANOUT, accounts_index::tests::*, accounts_index::RefCount,
append_vec::AccountMeta, inline_spl_token_v2_0,
};
use assert_matches::assert_matches;
use rand::{thread_rng, Rng};
use solana_sdk::{account::Account, hash::HASH_BYTES, pubkey::PUBKEY_BYTES};
use std::{
iter::FromIterator,
str::FromStr,
thread::{self, sleep, Builder, JoinHandle},
time::Duration,
};
fn linear_ancestors(end_slot: u64) -> Ancestors {
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
for i in 1..end_slot {
ancestors.insert(i, (i - 1) as usize);
}
ancestors
}
#[test]
#[should_panic(expected = "assertion failed: bins <= max_plus_1 && bins > 0")]
fn test_accountsdb_scan_snapshot_stores_illegal_bins2() {
let mut stats = HashStats::default();
AccountsDb::scan_snapshot_stores(&[], true, &mut stats, 257);
}
#[test]
#[should_panic(expected = "assertion failed: bins <= max_plus_1 && bins > 0")]
fn test_accountsdb_scan_snapshot_stores_illegal_bins() {
let mut stats = HashStats::default();
AccountsDb::scan_snapshot_stores(&[], true, &mut stats, 0);
}
fn sample_storages_and_accounts() -> (SnapshotStorages, Vec<CalculateHashIntermediate>) {
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey0 = Pubkey::new(&[0u8; 32]);
let pubkey127 = Pubkey::new(&[0x7fu8; 32]);
let pubkey128 = Pubkey::new(&[0x80u8; 32]);
let pubkey255 = Pubkey::new(&[0xffu8; 32]);
const SLOT: u64 = 0;
let raw_expected = vec![
CalculateHashIntermediate {
version: 0,
hash: Hash::from_str("2UXkyxNEXNRbLo793fkWcdqQDuU8zwFjVhH6sbrcptKH").unwrap(),
lamports: 1,
slot: 0,
pubkey: pubkey0,
},
CalculateHashIntermediate {
version: 1,
hash: Hash::from_str("E8cioj2q9T6QFhijrUPRnP8iy86NtQievPyRe3GY5TMC").unwrap(),
lamports: 128,
slot: 0,
pubkey: pubkey127,
},
CalculateHashIntermediate {
version: 2,
hash: Hash::from_str("9yaXmx2ruksV1465BuMffqspjW35ggH8nTs8SW2Lq6NK").unwrap(),
lamports: 129,
slot: 0,
pubkey: pubkey128,
},
CalculateHashIntermediate {
version: 3,
hash: Hash::from_str("7nhnUMjRsaA83HgvEJVv3YrDqHd1SCoVbvsWDTXzCgfh").unwrap(),
lamports: 256,
slot: 0,
pubkey: pubkey255,
},
];
accounts.store_uncached(
SLOT,
&[(
&pubkey0,
&Account::new(raw_expected[0].lamports, 1, &Account::default().owner),
)],
);
accounts.store_uncached(
SLOT,
&[(&pubkey127, &Account::new(128, 1, &Account::default().owner))],
);
accounts.store_uncached(
SLOT,
&[(&pubkey128, &Account::new(129, 1, &Account::default().owner))],
);
accounts.store_uncached(
SLOT,
&[(&pubkey255, &Account::new(256, 1, &Account::default().owner))],
);
accounts.add_root(SLOT);
let storages = accounts.get_snapshot_storages(SLOT);
(storages, raw_expected)
}
#[test]
fn test_accountsdb_scan_snapshot_stores() {
let (mut storages, raw_expected) = sample_storages_and_accounts();
let bins = 1;
let mut stats = HashStats::default();
let result = AccountsDb::scan_snapshot_stores(&storages, true, &mut stats, bins);
assert_eq!(result, vec![vec![raw_expected.clone()]]);
let bins = 2;
let result = AccountsDb::scan_snapshot_stores(&storages, true, &mut stats, bins);
let mut expected = vec![Vec::new(); bins];
expected[0].push(raw_expected[0].clone());
expected[0].push(raw_expected[1].clone());
expected[bins - 1].push(raw_expected[2].clone());
expected[bins - 1].push(raw_expected[3].clone());
assert_eq!(result, vec![expected]);
let bins = 4;
let result = AccountsDb::scan_snapshot_stores(&storages, true, &mut stats, bins);
let mut expected = vec![Vec::new(); bins];
expected[0].push(raw_expected[0].clone());
expected[1].push(raw_expected[1].clone());
expected[2].push(raw_expected[2].clone());
expected[bins - 1].push(raw_expected[3].clone());
assert_eq!(result, vec![expected]);
let bins = 256;
let result = AccountsDb::scan_snapshot_stores(&storages, true, &mut stats, bins);
let mut expected = vec![Vec::new(); bins];
expected[0].push(raw_expected[0].clone());
expected[127].push(raw_expected[1].clone());
expected[128].push(raw_expected[2].clone());
expected[bins - 1].push(raw_expected.last().unwrap().clone());
assert_eq!(result, vec![expected]);
// enough stores to get to 2nd chunk
let bins = 1;
let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let slot_expected: Slot = 0;
let size: usize = 123;
let data = AccountStorageEntry::new(&paths[0], slot_expected, 0, size as u64);
let arc = Arc::new(data);
const MAX_ITEMS_PER_CHUNK: usize = 5_000;
storages[0].splice(0..0, vec![arc; MAX_ITEMS_PER_CHUNK]);
let mut stats = HashStats::default();
let result = AccountsDb::scan_snapshot_stores(&storages, true, &mut stats, bins);
assert_eq!(result.len(), 2); // 2 chunks
assert_eq!(result[0].len(), 0); // nothing found in first slots
assert_eq!(result[1].len(), bins);
assert_eq!(result[1], vec![raw_expected]);
}
#[test]
fn test_accountsdb_calculate_accounts_hash_without_index_simple() {
solana_logger::setup();
let (storages, _size, _slot_expected) = sample_storage();
let result = AccountsDb::calculate_accounts_hash_without_index(&storages, true, None);
let expected_hash = Hash::from_str("GKot5hBsd81kMupNCXHaqbhv3huEbxAFMLnpcX2hniwn").unwrap();
assert_eq!(result, (expected_hash, 0));
}
#[test]
fn test_accountsdb_calculate_accounts_hash_without_index() {
solana_logger::setup();
let (storages, raw_expected) = sample_storages_and_accounts();
let expected_hash =
AccountsHash::compute_merkle_root_loop(raw_expected.clone(), MERKLE_FANOUT, |item| {
item.hash
});
let sum = raw_expected.iter().map(|item| item.lamports).sum();
let result = AccountsDb::calculate_accounts_hash_without_index(&storages, true, None);
assert_eq!(result, (expected_hash, sum));
}
fn sample_storage() -> (SnapshotStorages, usize, Slot) {
let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let slot_expected: Slot = 0;
let size: usize = 123;
let data = AccountStorageEntry::new(&paths[0], slot_expected, 0, size as u64);
let arc = Arc::new(data);
let storages = vec![vec![arc]];
(storages, size, slot_expected)
}
#[test]
fn test_accountsdb_scan_account_storage_no_bank() {
solana_logger::setup();
let expected = 1;
let tf = crate::append_vec::test_utils::get_append_vec_path(
"test_accountsdb_scan_account_storage_no_bank",
);
let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let slot_expected: Slot = 0;
let size: usize = 123;
let mut data = AccountStorageEntry::new(&paths[0], slot_expected, 0, size as u64);
let av = AppendVec::new(&tf.path, true, 1024 * 1024);
data.accounts = av;
let arc = Arc::new(data);
let storages = vec![vec![arc]];
let pubkey = solana_sdk::pubkey::new_rand();
let acc = Account::new(1, 48, &Account::default().owner);
let sm = StoredMeta {
data_len: 1,
pubkey,
write_version: 1,
};
storages[0][0]
.accounts
.append_accounts(&[(sm, &acc)], &[Hash::default()]);
let calls = AtomicU64::new(0);
let result = AccountsDb::scan_account_storage_no_bank(
&storages,
&mut HashStats::default(),
|loaded_account: LoadedAccount, accum: &mut Vec<u64>, slot: Slot| {
calls.fetch_add(1, Ordering::Relaxed);
assert_eq!(loaded_account.pubkey(), &pubkey);
assert_eq!(slot_expected, slot);
accum.push(expected);
},
);
assert_eq!(calls.load(Ordering::Relaxed), 1);
assert_eq!(result, vec![vec![expected]]);
}
#[test]
fn test_accountsdb_add_root() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account0 = Account::new(1, 0, &key);
db.store_uncached(0, &[(&key, &account0)]);
db.add_root(0);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(db.load_slow(&ancestors, &key), Some((account0, 0)));
}
#[test]
fn test_accountsdb_latest_ancestor() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account0 = Account::new(1, 0, &key);
db.store_uncached(0, &[(&key, &account0)]);
let account1 = Account::new(0, 0, &key);
db.store_uncached(1, &[(&key, &account1)]);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1);
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1);
let accounts: Vec<Account> =
db.unchecked_scan_accounts("", &ancestors, |accounts: &mut Vec<Account>, option| {
accounts.push(option.1.account());
});
assert_eq!(accounts, vec![account1]);
}
#[test]
fn test_accountsdb_latest_ancestor_with_root() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account0 = Account::new(1, 0, &key);
db.store_uncached(0, &[(&key, &account0)]);
let account1 = Account::new(0, 0, &key);
db.store_uncached(1, &[(&key, &account1)]);
db.add_root(0);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1);
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1);
}
#[test]
fn test_accountsdb_root_one_slot() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account0 = Account::new(1, 0, &key);
// store value 1 in the "root", i.e. db zero
db.store_uncached(0, &[(&key, &account0)]);
// now we have:
//
// root0 -> key.lamports==1
// / \
// / \
// key.lamports==0 <- slot1 \
// slot2 -> key.lamports==1
// (via root0)
// store value 0 in one child
let account1 = Account::new(0, 0, &key);
db.store_uncached(1, &[(&key, &account1)]);
// masking accounts is done at the Accounts level, at accountsDB we see
// original account (but could also accept "None", which is implemented
// at the Accounts level)
let ancestors = vec![(0, 0), (1, 1)].into_iter().collect();
assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account1);
// we should see 1 token in slot 2
let ancestors = vec![(0, 0), (2, 2)].into_iter().collect();
assert_eq!(&db.load_slow(&ancestors, &key).unwrap().0, &account0);
db.add_root(0);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(db.load_slow(&ancestors, &key), Some((account1, 1)));
let ancestors = vec![(2, 2)].into_iter().collect();
assert_eq!(db.load_slow(&ancestors, &key), Some((account0, 0))); // original value
}
#[test]
fn test_accountsdb_add_root_many() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 100, 0, 0);
for _ in 1..100 {
let idx = thread_rng().gen_range(0, 99);
let ancestors = vec![(0, 0)].into_iter().collect();
let account = db.load_slow(&ancestors, &pubkeys[idx]).unwrap();
let default_account = Account {
lamports: (idx + 1) as u64,
..Account::default()
};
assert_eq!((default_account, 0), account);
}
db.add_root(0);
// check that all the accounts appear with a new root
for _ in 1..100 {
let idx = thread_rng().gen_range(0, 99);
let ancestors = vec![(0, 0)].into_iter().collect();
let account0 = db.load_slow(&ancestors, &pubkeys[idx]).unwrap();
let ancestors = vec![(1, 1)].into_iter().collect();
let account1 = db.load_slow(&ancestors, &pubkeys[idx]).unwrap();
let default_account = Account {
lamports: (idx + 1) as u64,
..Account::default()
};
assert_eq!(&default_account, &account0.0);
assert_eq!(&default_account, &account1.0);
}
}
#[test]
fn test_accountsdb_count_stores() {
solana_logger::setup();
let db = AccountsDb::new_single();
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 2, DEFAULT_FILE_SIZE as usize / 3, 0);
assert!(check_storage(&db, 0, 2));
let pubkey = solana_sdk::pubkey::new_rand();
let account = Account::new(1, DEFAULT_FILE_SIZE as usize / 3, &pubkey);
db.store_uncached(1, &[(&pubkey, &account)]);
db.store_uncached(1, &[(&pubkeys[0], &account)]);
{
let slot_0_stores = &db.storage.get_slot_stores(0).unwrap();
let slot_1_stores = &db.storage.get_slot_stores(1).unwrap();
let r_slot_0_stores = slot_0_stores.read().unwrap();
let r_slot_1_stores = slot_1_stores.read().unwrap();
assert_eq!(r_slot_0_stores.len(), 1);
assert_eq!(r_slot_1_stores.len(), 1);
assert_eq!(r_slot_0_stores.get(&0).unwrap().count(), 2);
assert_eq!(r_slot_1_stores[&1].count(), 2);
assert_eq!(r_slot_0_stores.get(&0).unwrap().approx_stored_count(), 2);
assert_eq!(r_slot_1_stores[&1].approx_stored_count(), 2);
}
// adding root doesn't change anything
db.get_accounts_delta_hash(1);
db.add_root(1);
{
let slot_0_stores = &db.storage.get_slot_stores(0).unwrap();
let slot_1_stores = &db.storage.get_slot_stores(1).unwrap();
let r_slot_0_stores = slot_0_stores.read().unwrap();
let r_slot_1_stores = slot_1_stores.read().unwrap();
assert_eq!(r_slot_0_stores.len(), 1);
assert_eq!(r_slot_1_stores.len(), 1);
assert_eq!(r_slot_0_stores.get(&0).unwrap().count(), 2);
assert_eq!(r_slot_1_stores[&1].count(), 2);
assert_eq!(r_slot_0_stores.get(&0).unwrap().approx_stored_count(), 2);
assert_eq!(r_slot_1_stores[&1].approx_stored_count(), 2);
}
// overwrite old rooted account version; only the r_slot_0_stores.count() should be
// decremented
db.store_uncached(2, &[(&pubkeys[0], &account)]);
db.clean_accounts(None);
{
let slot_0_stores = &db.storage.get_slot_stores(0).unwrap();
let slot_1_stores = &db.storage.get_slot_stores(1).unwrap();
let r_slot_0_stores = slot_0_stores.read().unwrap();
let r_slot_1_stores = slot_1_stores.read().unwrap();
assert_eq!(r_slot_0_stores.len(), 1);
assert_eq!(r_slot_1_stores.len(), 1);
assert_eq!(r_slot_0_stores.get(&0).unwrap().count(), 1);
assert_eq!(r_slot_1_stores[&1].count(), 2);
assert_eq!(r_slot_0_stores.get(&0).unwrap().approx_stored_count(), 2);
assert_eq!(r_slot_1_stores[&1].approx_stored_count(), 2);
}
}
#[test]
fn test_accounts_unsquashed() {
let key = Pubkey::default();
// 1 token in the "root", i.e. db zero
let db0 = AccountsDb::new(Vec::new(), &ClusterType::Development);
let account0 = Account::new(1, 0, &key);
db0.store_uncached(0, &[(&key, &account0)]);
// 0 lamports in the child
let account1 = Account::new(0, 0, &key);
db0.store_uncached(1, &[(&key, &account1)]);
// masking accounts is done at the Accounts level, at accountsDB we see
// original account
let ancestors = vec![(0, 0), (1, 1)].into_iter().collect();
assert_eq!(db0.load_slow(&ancestors, &key), Some((account1, 1)));
let ancestors = vec![(0, 0)].into_iter().collect();
assert_eq!(db0.load_slow(&ancestors, &key), Some((account0, 0)));
}
#[test]
fn test_remove_unrooted_slot() {
let unrooted_slot = 9;
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
db.caching_enabled = true;
let key = Pubkey::default();
let account0 = Account::new(1, 0, &key);
let ancestors: HashMap<_, _> = vec![(unrooted_slot, 1)].into_iter().collect();
db.store_cached(unrooted_slot, &[(&key, &account0)]);
db.bank_hashes
.write()
.unwrap()
.insert(unrooted_slot, BankHashInfo::default());
assert!(db
.accounts_index
.get(&key, Some(&ancestors), None)
.is_some());
assert_load_account(&db, unrooted_slot, key, 1);
// Purge the slot
db.remove_unrooted_slot(unrooted_slot);
assert!(db.load_slow(&ancestors, &key).is_none());
assert!(db.bank_hashes.read().unwrap().get(&unrooted_slot).is_none());
assert!(db.storage.0.get(&unrooted_slot).is_none());
assert!(db
.accounts_index
.get_account_read_entry(&key)
.map(|locked_entry| locked_entry.slot_list().is_empty())
.unwrap_or(true));
assert!(db
.accounts_index
.get(&key, Some(&ancestors), None)
.is_none());
// Test we can store for the same slot again and get the right information
let account0 = Account::new(2, 0, &key);
db.store_uncached(unrooted_slot, &[(&key, &account0)]);
assert_load_account(&db, unrooted_slot, key, 2);
}
#[test]
fn test_remove_unrooted_slot_snapshot() {
solana_logger::setup();
let unrooted_slot = 9;
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = solana_sdk::pubkey::new_rand();
let account0 = Account::new(1, 0, &key);
db.store_uncached(unrooted_slot, &[(&key, &account0)]);
// Purge the slot
db.remove_unrooted_slot(unrooted_slot);
// Add a new root
let key2 = solana_sdk::pubkey::new_rand();
let new_root = unrooted_slot + 1;
db.store_uncached(new_root, &[(&key2, &account0)]);
db.add_root(new_root);
// Simulate reconstruction from snapshot
let db = reconstruct_accounts_db_via_serialization(&db, new_root);
// Check root account exists
assert_load_account(&db, new_root, key2, 1);
// Check purged account stays gone
let unrooted_slot_ancestors: HashMap<_, _> = vec![(unrooted_slot, 1)].into_iter().collect();
assert!(db.load_slow(&unrooted_slot_ancestors, &key).is_none());
}
fn create_account(
accounts: &AccountsDb,
pubkeys: &mut Vec<Pubkey>,
slot: Slot,
num: usize,
space: usize,
num_vote: usize,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
for t in 0..num {
let pubkey = solana_sdk::pubkey::new_rand();
let account = Account::new((t + 1) as u64, space, &Account::default().owner);
pubkeys.push(pubkey);
assert!(accounts.load_slow(&ancestors, &pubkey).is_none());
accounts.store_uncached(slot, &[(&pubkey, &account)]);
}
for t in 0..num_vote {
let pubkey = solana_sdk::pubkey::new_rand();
let account = Account::new((num + t + 1) as u64, space, &solana_vote_program::id());
pubkeys.push(pubkey);
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(accounts.load_slow(&ancestors, &pubkey).is_none());
accounts.store_uncached(slot, &[(&pubkey, &account)]);
}
}
fn update_accounts(accounts: &AccountsDb, pubkeys: &[Pubkey], slot: Slot, range: usize) {
for _ in 1..1000 {
let idx = thread_rng().gen_range(0, range);
let ancestors = vec![(slot, 0)].into_iter().collect();
if let Some((mut account, _)) = accounts.load_slow(&ancestors, &pubkeys[idx]) {
account.lamports += 1;
accounts.store_uncached(slot, &[(&pubkeys[idx], &account)]);
if account.lamports == 0 {
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(accounts.load_slow(&ancestors, &pubkeys[idx]).is_none());
} else {
let default_account = Account {
lamports: account.lamports,
..Account::default()
};
assert_eq!(default_account, account);
}
}
}
}
fn check_storage(accounts: &AccountsDb, slot: Slot, count: usize) -> bool {
assert_eq!(
accounts
.storage
.get_slot_stores(slot)
.unwrap()
.read()
.unwrap()
.len(),
1
);
let slot_storages = accounts.storage.get_slot_stores(slot).unwrap();
let mut total_count: usize = 0;
let r_slot_storages = slot_storages.read().unwrap();
for store in r_slot_storages.values() {
assert_eq!(store.status(), AccountStorageStatus::Available);
total_count += store.count();
}
assert_eq!(total_count, count);
let (expected_store_count, actual_store_count): (usize, usize) = (
r_slot_storages
.values()
.map(|s| s.approx_stored_count())
.sum(),
r_slot_storages
.values()
.map(|s| s.all_accounts().len())
.sum(),
);
assert_eq!(expected_store_count, actual_store_count);
total_count == count
}
fn check_accounts(
accounts: &AccountsDb,
pubkeys: &[Pubkey],
slot: Slot,
num: usize,
count: usize,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
for _ in 0..num {
let idx = thread_rng().gen_range(0, num);
let account = accounts.load_slow(&ancestors, &pubkeys[idx]);
let account1 = Some((
Account::new((idx + count) as u64, 0, &Account::default().owner),
slot,
));
assert_eq!(account, account1);
}
}
#[allow(clippy::needless_range_loop)]
fn modify_accounts(
accounts: &AccountsDb,
pubkeys: &[Pubkey],
slot: Slot,
num: usize,
count: usize,
) {
for idx in 0..num {
let account = Account::new((idx + count) as u64, 0, &Account::default().owner);
accounts.store_uncached(slot, &[(&pubkeys[idx], &account)]);
}
}
#[test]
fn test_account_one() {
let (_accounts_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let db = AccountsDb::new(paths, &ClusterType::Development);
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 1, 0, 0);
let ancestors = vec![(0, 0)].into_iter().collect();
let account = db.load_slow(&ancestors, &pubkeys[0]).unwrap();
let default_account = Account {
lamports: 1,
..Account::default()
};
assert_eq!((default_account, 0), account);
}
#[test]
fn test_account_many() {
let (_accounts_dirs, paths) = get_temp_accounts_paths(2).unwrap();
let db = AccountsDb::new(paths, &ClusterType::Development);
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 100, 0, 0);
check_accounts(&db, &pubkeys, 0, 100, 1);
}
#[test]
fn test_account_update() {
let accounts = AccountsDb::new_single();
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&accounts, &mut pubkeys, 0, 100, 0, 0);
update_accounts(&accounts, &pubkeys, 0, 99);
assert_eq!(check_storage(&accounts, 0, 100), true);
}
#[test]
fn test_account_grow_many() {
let (_accounts_dir, paths) = get_temp_accounts_paths(2).unwrap();
let size = 4096;
let accounts = AccountsDb::new_sized(paths, size);
let mut keys = vec![];
for i in 0..9 {
let key = solana_sdk::pubkey::new_rand();
let account = Account::new(i + 1, size as usize / 4, &key);
accounts.store_uncached(0, &[(&key, &account)]);
keys.push(key);
}
let ancestors = vec![(0, 0)].into_iter().collect();
for (i, key) in keys.iter().enumerate() {
assert_eq!(
accounts.load_slow(&ancestors, &key).unwrap().0.lamports,
(i as u64) + 1
);
}
let mut append_vec_histogram = HashMap::new();
let mut all_storages = vec![];
for slot_storage in accounts.storage.0.iter() {
all_storages.extend(slot_storage.read().unwrap().values().cloned())
}
for storage in all_storages {
*append_vec_histogram.entry(storage.slot()).or_insert(0) += 1;
}
for count in append_vec_histogram.values() {
assert!(*count >= 2);
}
}
#[test]
fn test_account_grow() {
let accounts = AccountsDb::new_single();
let status = [AccountStorageStatus::Available, AccountStorageStatus::Full];
let pubkey1 = solana_sdk::pubkey::new_rand();
let account1 = Account::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey1);
accounts.store_uncached(0, &[(&pubkey1, &account1)]);
{
let stores = &accounts.storage.get_slot_stores(0).unwrap();
let r_stores = stores.read().unwrap();
assert_eq!(r_stores.len(), 1);
assert_eq!(r_stores[&0].count(), 1);
assert_eq!(r_stores[&0].status(), AccountStorageStatus::Available);
}
let pubkey2 = solana_sdk::pubkey::new_rand();
let account2 = Account::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey2);
accounts.store_uncached(0, &[(&pubkey2, &account2)]);
{
assert_eq!(accounts.storage.0.len(), 1);
let stores = &accounts.storage.get_slot_stores(0).unwrap();
let r_stores = stores.read().unwrap();
assert_eq!(r_stores.len(), 2);
assert_eq!(r_stores[&0].count(), 1);
assert_eq!(r_stores[&0].status(), AccountStorageStatus::Full);
assert_eq!(r_stores[&1].count(), 1);
assert_eq!(r_stores[&1].status(), AccountStorageStatus::Available);
}
let ancestors = vec![(0, 0)].into_iter().collect();
assert_eq!(
accounts.load_slow(&ancestors, &pubkey1).unwrap().0,
account1
);
assert_eq!(
accounts.load_slow(&ancestors, &pubkey2).unwrap().0,
account2
);
// lots of stores, but 3 storages should be enough for everything
for _ in 0..25 {
accounts.store_uncached(0, &[(&pubkey1, &account1)]);
{
assert_eq!(accounts.storage.0.len(), 1);
let stores = &accounts.storage.get_slot_stores(0).unwrap();
let r_stores = stores.read().unwrap();
assert!(r_stores.len() <= 5);
assert_eq!(r_stores[&0].status(), status[0]);
}
let ancestors = vec![(0, 0)].into_iter().collect();
assert_eq!(
accounts.load_slow(&ancestors, &pubkey1).unwrap().0,
account1
);
assert_eq!(
accounts.load_slow(&ancestors, &pubkey2).unwrap().0,
account2
);
}
}
#[test]
fn test_lazy_gc_slot() {
solana_logger::setup();
//This test is pedantic
//A slot is purged when a non root bank is cleaned up. If a slot is behind root but it is
//not root, it means we are retaining dead banks.
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = Account::new(1, 0, &Account::default().owner);
//store an account
accounts.store_uncached(0, &[(&pubkey, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
let id = {
let (lock, idx) = accounts
.accounts_index
.get(&pubkey, Some(&ancestors), None)
.unwrap();
lock.slot_list()[idx].1.store_id
};
accounts.get_accounts_delta_hash(0);
accounts.add_root(1);
//slot is still there, since gc is lazy
assert!(accounts
.storage
.get_slot_stores(0)
.unwrap()
.read()
.unwrap()
.get(&id)
.is_some());
//store causes clean
accounts.store_uncached(1, &[(&pubkey, &account)]);
// generate delta state for slot 1, so clean operates on it.
accounts.get_accounts_delta_hash(1);
//slot is gone
accounts.print_accounts_stats("pre-clean");
accounts.clean_accounts(None);
assert!(accounts.storage.0.get(&0).is_none());
//new value is there
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(accounts.load_slow(&ancestors, &pubkey), Some((account, 1)));
}
impl AccountsDb {
fn all_account_count_in_append_vec(&self, slot: Slot) -> usize {
let slot_storage = self.storage.get_slot_stores(slot);
if let Some(slot_storage) = slot_storage {
let r_slot_storage = slot_storage.read().unwrap();
let count = r_slot_storage
.values()
.map(|store| store.all_accounts().len())
.sum();
let stored_count: usize = r_slot_storage
.values()
.map(|store| store.approx_stored_count())
.sum();
assert_eq!(stored_count, count);
count
} else {
0
}
}
fn ref_count_for_pubkey(&self, pubkey: &Pubkey) -> RefCount {
self.accounts_index.ref_count_from_storage(&pubkey)
}
}
#[test]
fn test_clean_zero_lamport_and_dead_slot() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let account = Account::new(1, 1, &Account::default().owner);
let zero_lamport_account = Account::new(0, 0, &Account::default().owner);
// Store two accounts
accounts.store_uncached(0, &[(&pubkey1, &account)]);
accounts.store_uncached(0, &[(&pubkey2, &account)]);
// Make sure both accounts are in the same AppendVec in slot 0, which
// will prevent pubkey1 from being cleaned up later even when it's a
// zero-lamport account
let ancestors: HashMap<Slot, usize> = vec![(0, 1)].into_iter().collect();
let (slot1, account_info1) = accounts
.accounts_index
.get(&pubkey1, Some(&ancestors), None)
.map(|(account_list1, index1)| account_list1.slot_list()[index1].clone())
.unwrap();
let (slot2, account_info2) = accounts
.accounts_index
.get(&pubkey2, Some(&ancestors), None)
.map(|(account_list2, index2)| account_list2.slot_list()[index2].clone())
.unwrap();
assert_eq!(slot1, 0);
assert_eq!(slot1, slot2);
assert_eq!(account_info1.store_id, account_info2.store_id);
// Update account 1 in slot 1
accounts.store_uncached(1, &[(&pubkey1, &account)]);
// Update account 1 as zero lamports account
accounts.store_uncached(2, &[(&pubkey1, &zero_lamport_account)]);
// Pubkey 1 was the only account in slot 1, and it was updated in slot 2, so
// slot 1 should be purged
accounts.add_root(0);
accounts.add_root(1);
accounts.add_root(2);
// Slot 1 should be removed, slot 0 cannot be removed because it still has
// the latest update for pubkey 2
accounts.clean_accounts(None);
assert!(accounts.storage.get_slot_stores(0).is_some());
assert!(accounts.storage.get_slot_stores(1).is_none());
// Slot 1 should be cleaned because all it's accounts are
// zero lamports, and are not present in any other slot's
// storage entries
assert_eq!(accounts.alive_account_count_in_slot(1), 0);
}
#[test]
fn test_clean_zero_lamport_and_old_roots() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = Account::new(1, 0, &Account::default().owner);
let zero_lamport_account = Account::new(0, 0, &Account::default().owner);
// Store a zero-lamport account
accounts.store_uncached(0, &[(&pubkey, &account)]);
accounts.store_uncached(1, &[(&pubkey, &zero_lamport_account)]);
// Simulate rooting the zero-lamport account, should be a
// candidate for cleaning
accounts.add_root(0);
accounts.add_root(1);
// Slot 0 should be removed, and
// zero-lamport account should be cleaned
accounts.clean_accounts(None);
assert!(accounts.storage.get_slot_stores(0).is_none());
assert!(accounts.storage.get_slot_stores(1).is_none());
// Slot 0 should be cleaned because all it's accounts have been
// updated in the rooted slot 1
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
// Slot 1 should be cleaned because all it's accounts are
// zero lamports, and are not present in any other slot's
// storage entries
assert_eq!(accounts.alive_account_count_in_slot(1), 0);
// zero lamport account, should no longer exist in accounts index
// because it has been removed
assert!(accounts.accounts_index.get(&pubkey, None, None).is_none());
}
#[test]
fn test_clean_old_with_normal_account() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = Account::new(1, 0, &Account::default().owner);
//store an account
accounts.store_uncached(0, &[(&pubkey, &account)]);
accounts.store_uncached(1, &[(&pubkey, &account)]);
// simulate slots are rooted after while
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
accounts.get_accounts_delta_hash(1);
accounts.add_root(1);
//even if rooted, old state isn't cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 1);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
accounts.clean_accounts(None);
//now old state is cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
}
#[test]
fn test_clean_old_with_zero_lamport_account() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let normal_account = Account::new(1, 0, &Account::default().owner);
let zero_account = Account::new(0, 0, &Account::default().owner);
//store an account
accounts.store_uncached(0, &[(&pubkey1, &normal_account)]);
accounts.store_uncached(1, &[(&pubkey1, &zero_account)]);
accounts.store_uncached(0, &[(&pubkey2, &normal_account)]);
accounts.store_uncached(1, &[(&pubkey2, &normal_account)]);
//simulate slots are rooted after while
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
accounts.get_accounts_delta_hash(1);
accounts.add_root(1);
//even if rooted, old state isn't cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 2);
assert_eq!(accounts.alive_account_count_in_slot(1), 2);
accounts.print_accounts_stats("");
accounts.clean_accounts(None);
//Old state behind zero-lamport account is cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
assert_eq!(accounts.alive_account_count_in_slot(1), 2);
}
#[test]
fn test_clean_old_with_both_normal_and_zero_lamport_accounts() {
solana_logger::setup();
let accounts = AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
spl_token_mint_index_enabled(),
false,
);
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
// Set up account to be added to secondary index
let mint_key = Pubkey::new_unique();
let mut account_data_with_mint =
vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()];
account_data_with_mint[..PUBKEY_BYTES].clone_from_slice(&(mint_key.clone().to_bytes()));
let mut normal_account = Account::new(1, 0, &Account::default().owner);
normal_account.owner = inline_spl_token_v2_0::id();
normal_account.data = account_data_with_mint.clone();
let mut zero_account = Account::new(0, 0, &Account::default().owner);
zero_account.owner = inline_spl_token_v2_0::id();
zero_account.data = account_data_with_mint;
//store an account
accounts.store_uncached(0, &[(&pubkey1, &normal_account)]);
accounts.store_uncached(0, &[(&pubkey1, &normal_account)]);
accounts.store_uncached(1, &[(&pubkey1, &zero_account)]);
accounts.store_uncached(0, &[(&pubkey2, &normal_account)]);
accounts.store_uncached(2, &[(&pubkey2, &normal_account)]);
//simulate slots are rooted after while
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
accounts.get_accounts_delta_hash(1);
accounts.add_root(1);
accounts.get_accounts_delta_hash(2);
accounts.add_root(2);
//even if rooted, old state isn't cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 2);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
assert_eq!(accounts.alive_account_count_in_slot(2), 1);
// Secondary index should still find both pubkeys
let mut found_accounts = HashSet::new();
accounts.accounts_index.index_scan_accounts(
&HashMap::new(),
IndexKey::SplTokenMint(mint_key),
|key, _| {
found_accounts.insert(*key);
},
);
assert_eq!(found_accounts.len(), 2);
assert!(found_accounts.contains(&pubkey1));
assert!(found_accounts.contains(&pubkey2));
accounts.clean_accounts(None);
//both zero lamport and normal accounts are cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
// The only store to slot 1 was a zero lamport account, should
// be purged by zero-lamport cleaning logic because slot 1 is
// rooted
assert_eq!(accounts.alive_account_count_in_slot(1), 0);
assert_eq!(accounts.alive_account_count_in_slot(2), 1);
// `pubkey1`, a zero lamport account, should no longer exist in accounts index
// because it has been removed by the clean
assert!(accounts.accounts_index.get(&pubkey1, None, None).is_none());
// Secondary index should have purged `pubkey1` as well
let mut found_accounts = vec![];
accounts.accounts_index.index_scan_accounts(
&HashMap::new(),
IndexKey::SplTokenMint(mint_key),
|key, _| found_accounts.push(*key),
);
assert_eq!(found_accounts, vec![pubkey2]);
}
#[test]
fn test_clean_max_slot_zero_lamport_account() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = Account::new(1, 0, &Account::default().owner);
let zero_account = Account::new(0, 0, &Account::default().owner);
// store an account, make it a zero lamport account
// in slot 1
accounts.store_uncached(0, &[(&pubkey, &account)]);
accounts.store_uncached(1, &[(&pubkey, &zero_account)]);
// simulate slots are rooted after while
accounts.add_root(0);
accounts.add_root(1);
// Only clean up to account 0, should not purge slot 0 based on
// updates in later slots in slot 1
assert_eq!(accounts.alive_account_count_in_slot(0), 1);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
accounts.clean_accounts(Some(0));
assert_eq!(accounts.alive_account_count_in_slot(0), 1);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
assert!(accounts.accounts_index.get(&pubkey, None, None).is_some());
// Now the account can be cleaned up
accounts.clean_accounts(Some(1));
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
assert_eq!(accounts.alive_account_count_in_slot(1), 0);
// The zero lamport account, should no longer exist in accounts index
// because it has been removed
assert!(accounts.accounts_index.get(&pubkey, None, None).is_none());
}
#[test]
fn test_uncleaned_roots_with_account() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = Account::new(1, 0, &Account::default().owner);
//store an account
accounts.store_uncached(0, &[(&pubkey, &account)]);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
// simulate slots are rooted after while
accounts.add_root(0);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1);
//now uncleaned roots are cleaned up
accounts.clean_accounts(None);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
}
#[test]
fn test_uncleaned_roots_with_no_account() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
// simulate slots are rooted after while
accounts.add_root(0);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1);
//now uncleaned roots are cleaned up
accounts.clean_accounts(None);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
}
#[test]
fn test_accounts_db_serialize1() {
solana_logger::setup();
let accounts = AccountsDb::new_single();
let mut pubkeys: Vec<Pubkey> = vec![];
// Create 100 accounts in slot 0
create_account(&accounts, &mut pubkeys, 0, 100, 0, 0);
accounts.clean_accounts(None);
check_accounts(&accounts, &pubkeys, 0, 100, 1);
// do some updates to those accounts and re-check
modify_accounts(&accounts, &pubkeys, 0, 100, 2);
assert_eq!(check_storage(&accounts, 0, 100), true);
check_accounts(&accounts, &pubkeys, 0, 100, 2);
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
let mut pubkeys1: Vec<Pubkey> = vec![];
// CREATE SLOT 1
let latest_slot = 1;
// Modify the first 10 of the accounts from slot 0 in slot 1
modify_accounts(&accounts, &pubkeys, latest_slot, 10, 3);
// Overwrite account 30 from slot 0 with lamports=0 into slot 1.
// Slot 1 should now have 10 + 1 = 11 accounts
let account = Account::new(0, 0, &Account::default().owner);
accounts.store_uncached(latest_slot, &[(&pubkeys[30], &account)]);
// Create 10 new accounts in slot 1, should now have 11 + 10 = 21
// accounts
create_account(&accounts, &mut pubkeys1, latest_slot, 10, 0, 0);
accounts.get_accounts_delta_hash(latest_slot);
accounts.add_root(latest_slot);
assert!(check_storage(&accounts, 1, 21));
// CREATE SLOT 2
let latest_slot = 2;
let mut pubkeys2: Vec<Pubkey> = vec![];
// Modify first 20 of the accounts from slot 0 in slot 2
modify_accounts(&accounts, &pubkeys, latest_slot, 20, 4);
accounts.clean_accounts(None);
// Overwrite account 31 from slot 0 with lamports=0 into slot 2.
// Slot 2 should now have 20 + 1 = 21 accounts
let account = Account::new(0, 0, &Account::default().owner);
accounts.store_uncached(latest_slot, &[(&pubkeys[31], &account)]);
// Create 10 new accounts in slot 2. Slot 2 should now have
// 21 + 10 = 31 accounts
create_account(&accounts, &mut pubkeys2, latest_slot, 10, 0, 0);
accounts.get_accounts_delta_hash(latest_slot);
accounts.add_root(latest_slot);
assert!(check_storage(&accounts, 2, 31));
accounts.clean_accounts(None);
// The first 20 accounts of slot 0 have been updated in slot 2, as well as
// accounts 30 and 31 (overwritten with zero-lamport accounts in slot 1 and
// slot 2 respectively), so only 78 accounts are left in slot 0's storage entries.
assert!(check_storage(&accounts, 0, 78));
// 10 of the 21 accounts have been modified in slot 2, so only 11
// accounts left in slot 1.
assert!(check_storage(&accounts, 1, 11));
assert!(check_storage(&accounts, 2, 31));
let daccounts = reconstruct_accounts_db_via_serialization(&accounts, latest_slot);
assert_eq!(
daccounts.write_version.load(Ordering::Relaxed),
accounts.write_version.load(Ordering::Relaxed)
);
assert_eq!(
daccounts.next_id.load(Ordering::Relaxed),
accounts.next_id.load(Ordering::Relaxed)
);
// Get the hash for the latest slot, which should be the only hash in the
// bank_hashes map on the deserialized AccountsDb
assert_eq!(daccounts.bank_hashes.read().unwrap().len(), 2);
assert_eq!(
daccounts.bank_hashes.read().unwrap().get(&latest_slot),
accounts.bank_hashes.read().unwrap().get(&latest_slot)
);
daccounts.print_count_and_status("daccounts");
// Don't check the first 35 accounts which have not been modified on slot 0
check_accounts(&daccounts, &pubkeys[35..], 0, 65, 37);
check_accounts(&daccounts, &pubkeys1, 1, 10, 1);
assert!(check_storage(&daccounts, 0, 100));
assert!(check_storage(&daccounts, 1, 21));
assert!(check_storage(&daccounts, 2, 31));
let ancestors = linear_ancestors(latest_slot);
assert_eq!(
daccounts.update_accounts_hash(latest_slot, &ancestors, true),
accounts.update_accounts_hash(latest_slot, &ancestors, true)
);
}
fn assert_load_account(
accounts: &AccountsDb,
slot: Slot,
pubkey: Pubkey,
expected_lamports: u64,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
let (account, slot) = accounts.load_slow(&ancestors, &pubkey).unwrap();
assert_eq!((account.lamports, slot), (expected_lamports, slot));
}
fn assert_not_load_account(accounts: &AccountsDb, slot: Slot, pubkey: Pubkey) {
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(accounts.load_slow(&ancestors, &pubkey).is_none());
}
fn reconstruct_accounts_db_via_serialization(accounts: &AccountsDb, slot: Slot) -> AccountsDb {
let daccounts =
crate::serde_snapshot::reconstruct_accounts_db_via_serialization(accounts, slot);
daccounts.print_count_and_status("daccounts");
daccounts
}
fn assert_no_stores(accounts: &AccountsDb, slot: Slot) {
let slot_stores = accounts.storage.get_slot_stores(slot);
let r_slot_stores = slot_stores.as_ref().map(|slot_stores| {
let r_slot_stores = slot_stores.read().unwrap();
info!("{:?}", *r_slot_stores);
r_slot_stores
});
assert!(r_slot_stores.is_none() || r_slot_stores.unwrap().is_empty());
}
#[test]
fn test_accounts_db_purge_keep_live() {
solana_logger::setup();
let some_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = Account::default().owner;
let account = Account::new(some_lamport, no_data, &owner);
let pubkey = solana_sdk::pubkey::new_rand();
let account2 = Account::new(some_lamport, no_data, &owner);
let pubkey2 = solana_sdk::pubkey::new_rand();
let zero_lamport_account = Account::new(zero_lamport, no_data, &owner);
let accounts = AccountsDb::new_single();
accounts.add_root(0);
// Step A
let mut current_slot = 1;
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
// Store another live account to slot 1 which will prevent any purge
// since the store count will not be zero
accounts.store_uncached(current_slot, &[(&pubkey2, &account2)]);
accounts.add_root(current_slot);
let (slot1, account_info1) = accounts
.accounts_index
.get(&pubkey, None, None)
.map(|(account_list1, index1)| account_list1.slot_list()[index1].clone())
.unwrap();
let (slot2, account_info2) = accounts
.accounts_index
.get(&pubkey2, None, None)
.map(|(account_list2, index2)| account_list2.slot_list()[index2].clone())
.unwrap();
assert_eq!(slot1, current_slot);
assert_eq!(slot1, slot2);
assert_eq!(account_info1.store_id, account_info2.store_id);
// Step B
current_slot += 1;
let zero_lamport_slot = current_slot;
accounts.store_uncached(current_slot, &[(&pubkey, &zero_lamport_account)]);
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
current_slot += 1;
accounts.add_root(current_slot);
accounts.print_accounts_stats("pre_purge");
accounts.clean_accounts(None);
accounts.print_accounts_stats("post_purge");
// The earlier entry for pubkey in the account index is purged,
let (slot_list_len, index_slot) = {
let account_entry = accounts
.accounts_index
.get_account_read_entry(&pubkey)
.unwrap();
let slot_list = account_entry.slot_list();
(slot_list.len(), slot_list[0].0)
};
assert_eq!(slot_list_len, 1);
// Zero lamport entry was not the one purged
assert_eq!(index_slot, zero_lamport_slot);
// The ref count should still be 2 because no slots were purged
assert_eq!(accounts.ref_count_for_pubkey(&pubkey), 2);
// storage for slot 1 had 2 accounts, now has 1 after pubkey 1
// was reclaimed
check_storage(&accounts, 1, 1);
// storage for slot 2 had 1 accounts, now has 1
check_storage(&accounts, 2, 1);
}
#[test]
fn test_accounts_db_purge1() {
solana_logger::setup();
let some_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = Account::default().owner;
let account = Account::new(some_lamport, no_data, &owner);
let pubkey = solana_sdk::pubkey::new_rand();
let zero_lamport_account = Account::new(zero_lamport, no_data, &owner);
let accounts = AccountsDb::new_single();
accounts.add_root(0);
let mut current_slot = 1;
accounts.set_hash(current_slot, current_slot - 1);
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.set_hash(current_slot, current_slot - 1);
accounts.store_uncached(current_slot, &[(&pubkey, &zero_lamport_account)]);
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
// Otherwise slot 2 will not be removed
current_slot += 1;
accounts.set_hash(current_slot, current_slot - 1);
accounts.add_root(current_slot);
accounts.print_accounts_stats("pre_purge");
let ancestors = linear_ancestors(current_slot);
info!("ancestors: {:?}", ancestors);
let hash = accounts.update_accounts_hash_test(current_slot, &ancestors, true);
accounts.clean_accounts(None);
assert_eq!(
accounts.update_accounts_hash_test(current_slot, &ancestors, true),
hash
);
accounts.print_accounts_stats("post_purge");
// Make sure the index is for pubkey cleared
assert!(accounts
.accounts_index
.get_account_read_entry(&pubkey)
.is_none());
// slot 1 & 2 should not have any stores
assert_no_stores(&accounts, 1);
assert_no_stores(&accounts, 2);
}
#[test]
fn test_accounts_db_serialize_zero_and_free() {
solana_logger::setup();
let some_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = Account::default().owner;
let account = Account::new(some_lamport, no_data, &owner);
let pubkey = solana_sdk::pubkey::new_rand();
let zero_lamport_account = Account::new(zero_lamport, no_data, &owner);
let account2 = Account::new(some_lamport + 1, no_data, &owner);
let pubkey2 = solana_sdk::pubkey::new_rand();
let filler_account = Account::new(some_lamport, no_data, &owner);
let filler_account_pubkey = solana_sdk::pubkey::new_rand();
let accounts = AccountsDb::new_single();
let mut current_slot = 1;
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&pubkey, &zero_lamport_account)]);
accounts.store_uncached(current_slot, &[(&pubkey2, &account2)]);
// Store enough accounts such that an additional store for slot 2 is created.
while accounts
.storage
.get_slot_stores(current_slot)
.unwrap()
.read()
.unwrap()
.len()
< 2
{
accounts.store_uncached(current_slot, &[(&filler_account_pubkey, &filler_account)]);
}
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
accounts.print_accounts_stats("accounts");
accounts.clean_accounts(None);
accounts.print_accounts_stats("accounts_post_purge");
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.print_accounts_stats("reconstructed");
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
}
fn with_chained_zero_lamport_accounts<F>(f: F)
where
F: Fn(AccountsDb, Slot) -> AccountsDb,
{
let some_lamport = 223;
let zero_lamport = 0;
let dummy_lamport = 999;
let no_data = 0;
let owner = Account::default().owner;
let account = Account::new(some_lamport, no_data, &owner);
let account2 = Account::new(some_lamport + 100_001, no_data, &owner);
let account3 = Account::new(some_lamport + 100_002, no_data, &owner);
let zero_lamport_account = Account::new(zero_lamport, no_data, &owner);
let pubkey = solana_sdk::pubkey::new_rand();
let purged_pubkey1 = solana_sdk::pubkey::new_rand();
let purged_pubkey2 = solana_sdk::pubkey::new_rand();
let dummy_account = Account::new(dummy_lamport, no_data, &owner);
let dummy_pubkey = Pubkey::default();
let accounts = AccountsDb::new_single();
let mut current_slot = 1;
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &zero_lamport_account)]);
accounts.store_uncached(current_slot, &[(&purged_pubkey2, &account3)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey2, &zero_lamport_account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&dummy_pubkey, &dummy_account)]);
accounts.add_root(current_slot);
accounts.print_accounts_stats("pre_f");
accounts.update_accounts_hash(4, &HashMap::default(), true);
let accounts = f(accounts, current_slot);
accounts.print_accounts_stats("post_f");
assert_load_account(&accounts, current_slot, pubkey, some_lamport);
assert_load_account(&accounts, current_slot, purged_pubkey1, 0);
assert_load_account(&accounts, current_slot, purged_pubkey2, 0);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
accounts
.verify_bank_hash_and_lamports(4, &HashMap::default(), 1222, true)
.unwrap();
}
#[test]
fn test_accounts_purge_chained_purge_before_snapshot_restore() {
solana_logger::setup();
with_chained_zero_lamport_accounts(|accounts, current_slot| {
accounts.clean_accounts(None);
reconstruct_accounts_db_via_serialization(&accounts, current_slot)
});
}
#[test]
fn test_accounts_purge_chained_purge_after_snapshot_restore() {
solana_logger::setup();
with_chained_zero_lamport_accounts(|accounts, current_slot| {
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.print_accounts_stats("after_reconstruct");
accounts.clean_accounts(None);
reconstruct_accounts_db_via_serialization(&accounts, current_slot)
});
}
#[test]
#[ignore]
fn test_store_account_stress() {
let slot = 42;
let num_threads = 2;
let min_file_bytes = std::mem::size_of::<StoredMeta>()
+ std::mem::size_of::<crate::append_vec::AccountMeta>();
let db = Arc::new(AccountsDb::new_sized(Vec::new(), min_file_bytes as u64));
db.add_root(slot);
let thread_hdls: Vec<_> = (0..num_threads)
.map(|_| {
let db = db.clone();
std::thread::Builder::new()
.name("account-writers".to_string())
.spawn(move || {
let pubkey = solana_sdk::pubkey::new_rand();
let mut account = Account::new(1, 0, &pubkey);
let mut i = 0;
loop {
let account_bal = thread_rng().gen_range(1, 99);
account.lamports = account_bal;
db.store_uncached(slot, &[(&pubkey, &account)]);
let (account, slot) =
db.load_slow(&HashMap::new(), &pubkey).unwrap_or_else(|| {
panic!("Could not fetch stored account {}, iter {}", pubkey, i)
});
assert_eq!(slot, slot);
assert_eq!(account.lamports, account_bal);
i += 1;
}
})
.unwrap()
})
.collect();
for t in thread_hdls {
t.join().unwrap();
}
}
#[test]
fn test_accountsdb_scan_accounts() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let key0 = solana_sdk::pubkey::new_rand();
let account0 = Account::new(1, 0, &key);
db.store_uncached(0, &[(&key0, &account0)]);
let key1 = solana_sdk::pubkey::new_rand();
let account1 = Account::new(2, 0, &key);
db.store_uncached(1, &[(&key1, &account1)]);
let ancestors = vec![(0, 0)].into_iter().collect();
let accounts: Vec<Account> =
db.unchecked_scan_accounts("", &ancestors, |accounts: &mut Vec<Account>, option| {
accounts.push(option.1.account());
});
assert_eq!(accounts, vec![account0]);
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
let accounts: Vec<Account> =
db.unchecked_scan_accounts("", &ancestors, |accounts: &mut Vec<Account>, option| {
accounts.push(option.1.account());
});
assert_eq!(accounts.len(), 2);
}
#[test]
fn test_cleanup_key_not_removed() {
solana_logger::setup();
let db = AccountsDb::new_single();
let key = Pubkey::default();
let key0 = solana_sdk::pubkey::new_rand();
let account0 = Account::new(1, 0, &key);
db.store_uncached(0, &[(&key0, &account0)]);
let key1 = solana_sdk::pubkey::new_rand();
let account1 = Account::new(2, 0, &key);
db.store_uncached(1, &[(&key1, &account1)]);
db.print_accounts_stats("pre");
let slots: HashSet<Slot> = vec![1].into_iter().collect();
let purge_keys = vec![(key1, slots)];
db.purge_keys_exact(&purge_keys);
let account2 = Account::new(3, 0, &key);
db.store_uncached(2, &[(&key1, &account2)]);
db.print_accounts_stats("post");
let ancestors = vec![(2, 0)].into_iter().collect();
assert_eq!(db.load_slow(&ancestors, &key1).unwrap().0.lamports, 3);
}
#[test]
fn test_store_large_account() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let data_len = DEFAULT_FILE_SIZE as usize + 7;
let account = Account::new(1, data_len, &key);
db.store_uncached(0, &[(&key, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
let ret = db.load_slow(&ancestors, &key).unwrap();
assert_eq!(ret.0.data.len(), data_len);
}
#[test]
fn test_hash_frozen_account_data() {
let account = Account::new(1, 42, &Pubkey::default());
let hash = AccountsDb::hash_frozen_account_data(&account);
assert_ne!(hash, Hash::default()); // Better not be the default Hash
// Lamports changes to not affect the hash
let mut account_modified = account.clone();
account_modified.lamports -= 1;
assert_eq!(
hash,
AccountsDb::hash_frozen_account_data(&account_modified)
);
// Rent epoch may changes to not affect the hash
let mut account_modified = account.clone();
account_modified.rent_epoch += 1;
assert_eq!(
hash,
AccountsDb::hash_frozen_account_data(&account_modified)
);
// Account data may not be modified
let mut account_modified = account.clone();
account_modified.data[0] = 42;
assert_ne!(
hash,
AccountsDb::hash_frozen_account_data(&account_modified)
);
// Owner may not be modified
let mut account_modified = account.clone();
account_modified.owner =
Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
assert_ne!(
hash,
AccountsDb::hash_frozen_account_data(&account_modified)
);
// Executable may not be modified
let mut account_modified = account;
account_modified.executable = true;
assert_ne!(
hash,
AccountsDb::hash_frozen_account_data(&account_modified)
);
}
#[test]
fn test_frozen_account_lamport_increase() {
let frozen_pubkey =
Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let mut account = Account::new(1, 42, &frozen_pubkey);
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
db.freeze_accounts(&ancestors, &[frozen_pubkey]);
// Store with no account changes is ok
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
// Store with an increase in lamports is ok
account.lamports = 2;
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
// Store with an decrease that does not go below the frozen amount of lamports is tolerated
account.lamports = 1;
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
// A store of any value over the frozen value of '1' across different slots is also ok
account.lamports = 3;
db.store_uncached(1, &[(&frozen_pubkey, &account)]);
account.lamports = 2;
db.store_uncached(2, &[(&frozen_pubkey, &account)]);
account.lamports = 1;
db.store_uncached(3, &[(&frozen_pubkey, &account)]);
}
#[test]
#[should_panic(
expected = "Frozen account My11111111111111111111111111111111111111111 modified. Lamports decreased from 1 to 0"
)]
fn test_frozen_account_lamport_decrease() {
let frozen_pubkey =
Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let mut account = Account::new(1, 42, &frozen_pubkey);
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
db.freeze_accounts(&ancestors, &[frozen_pubkey]);
// Store with a decrease below the frozen amount of lamports is not ok
account.lamports -= 1;
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
}
#[test]
#[should_panic(
expected = "Unable to freeze an account that does not exist: My11111111111111111111111111111111111111111"
)]
fn test_frozen_account_nonexistent() {
let frozen_pubkey =
Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let ancestors = vec![(0, 0)].into_iter().collect();
db.freeze_accounts(&ancestors, &[frozen_pubkey]);
}
#[test]
#[should_panic(
expected = "Frozen account My11111111111111111111111111111111111111111 modified. Hash changed from 8wHcxDkjiwdrkPAsDnmNrF1UDGJFAtZzPQBSVweY3yRA to JdscGYB1uczVssmYuJusDD1Bfe6wpNeeho8XjcH8inN"
)]
fn test_frozen_account_data_modified() {
let frozen_pubkey =
Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let mut account = Account::new(1, 42, &frozen_pubkey);
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
db.freeze_accounts(&ancestors, &[frozen_pubkey]);
account.data[0] = 42;
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
}
#[test]
fn test_hash_stored_account() {
// This test uses some UNSAFE trick to detect most of account's field
// addition and deletion without changing the hash code
const ACCOUNT_DATA_LEN: usize = 3;
// the type of InputTuple elements must not contain references;
// they should be simple scalars or data blobs
type InputTuple = (
Slot,
StoredMeta,
AccountMeta,
[u8; ACCOUNT_DATA_LEN],
usize, // for StoredAccountMeta::offset
Hash,
);
const INPUT_LEN: usize = std::mem::size_of::<InputTuple>();
type InputBlob = [u8; INPUT_LEN];
let mut blob: InputBlob = [0u8; INPUT_LEN];
// spray memory with decreasing counts so that, data layout can be detected.
for (i, byte) in blob.iter_mut().enumerate() {
*byte = (INPUT_LEN - i) as u8;
}
//UNSAFE: forcibly cast the special byte pattern to actual account fields.
let (slot, meta, account_meta, data, offset, hash): InputTuple =
unsafe { std::mem::transmute::<InputBlob, InputTuple>(blob) };
let stored_account = StoredAccountMeta {
meta: &meta,
account_meta: &account_meta,
data: &data,
offset,
stored_size: CACHE_VIRTUAL_STORED_SIZE,
hash: &hash,
};
let account = stored_account.clone_account();
let expected_account_hash =
Hash::from_str("4StuvYHFd7xuShVXB94uHHvpqGMCaacdZnYB74QQkPA1").unwrap();
assert_eq!(
AccountsDb::hash_stored_account(slot, &stored_account, &ClusterType::Development),
expected_account_hash,
"StoredAccountMeta's data layout might be changed; update hashing if needed."
);
assert_eq!(
AccountsDb::hash_account(
slot,
&account,
&stored_account.meta.pubkey,
&ClusterType::Development
),
expected_account_hash,
"Account-based hashing must be consistent with StoredAccountMeta-based one."
);
}
#[test]
fn test_bank_hash_stats() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let some_data_len = 5;
let some_slot: Slot = 0;
let account = Account::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.store_uncached(some_slot, &[(&key, &account)]);
let mut account = db.load_slow(&ancestors, &key).unwrap().0;
account.lamports -= 1;
account.executable = true;
db.store_uncached(some_slot, &[(&key, &account)]);
db.add_root(some_slot);
let bank_hashes = db.bank_hashes.read().unwrap();
let bank_hash = bank_hashes.get(&some_slot).unwrap();
assert_eq!(bank_hash.stats.num_updated_accounts, 1);
assert_eq!(bank_hash.stats.num_removed_accounts, 1);
assert_eq!(bank_hash.stats.num_lamports_stored, 1);
assert_eq!(bank_hash.stats.total_data_len, 2 * some_data_len as u64);
assert_eq!(bank_hash.stats.num_executable_accounts, 1);
}
#[test]
fn test_verify_bank_hash() {
use BankHashVerificationError::*;
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = solana_sdk::pubkey::new_rand();
let some_data_len = 0;
let some_slot: Slot = 0;
let account = Account::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.store_uncached(some_slot, &[(&key, &account)]);
db.add_root(some_slot);
db.update_accounts_hash_test(some_slot, &ancestors, true);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, true),
Ok(_)
);
db.bank_hashes.write().unwrap().remove(&some_slot).unwrap();
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, true),
Err(MissingBankHash)
);
let some_bank_hash = Hash::new(&[0xca; HASH_BYTES]);
let bank_hash_info = BankHashInfo {
hash: some_bank_hash,
snapshot_hash: Hash::new(&[0xca; HASH_BYTES]),
stats: BankHashStats::default(),
};
db.bank_hashes
.write()
.unwrap()
.insert(some_slot, bank_hash_info);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, true),
Err(MismatchedBankHash)
);
}
#[test]
fn test_verify_bank_capitalization() {
use BankHashVerificationError::*;
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = solana_sdk::pubkey::new_rand();
let some_data_len = 0;
let some_slot: Slot = 0;
let account = Account::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.store_uncached(some_slot, &[(&key, &account)]);
db.add_root(some_slot);
db.update_accounts_hash_test(some_slot, &ancestors, true);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, true),
Ok(_)
);
let native_account_pubkey = solana_sdk::pubkey::new_rand();
db.store_uncached(
some_slot,
&[(
&native_account_pubkey,
&solana_sdk::native_loader::create_loadable_account("foo", 1),
)],
);
db.update_accounts_hash_test(some_slot, &ancestors, true);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, false),
Ok(_)
);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 2, true),
Ok(_)
);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 10, true),
Err(MismatchedTotalLamports(expected, actual)) if expected == 2 && actual == 10
);
}
#[test]
fn test_verify_bank_hash_no_account() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let some_slot: Slot = 0;
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.bank_hashes
.write()
.unwrap()
.insert(some_slot, BankHashInfo::default());
db.add_root(some_slot);
db.update_accounts_hash_test(some_slot, &ancestors, true);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 0, true),
Ok(_)
);
}
#[test]
fn test_verify_bank_hash_bad_account_hash() {
use BankHashVerificationError::*;
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let some_data_len = 0;
let some_slot: Slot = 0;
let account = Account::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
let accounts = &[(&key, &account)];
// update AccountsDb's bank hash but discard real account hashes
db.hash_accounts(some_slot, accounts, &ClusterType::Development);
// provide bogus account hashes
let some_hash = Hash::new(&[0xca; HASH_BYTES]);
db.store_accounts_unfrozen(some_slot, accounts, &[some_hash], false);
db.add_root(some_slot);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, true),
Err(MismatchedAccountHash)
);
}
#[test]
fn test_storage_finder() {
solana_logger::setup();
let db = AccountsDb::new_sized(Vec::new(), 16 * 1024);
let key = solana_sdk::pubkey::new_rand();
let lamports = 100;
let data_len = 8190;
let account = Account::new(lamports, data_len, &solana_sdk::pubkey::new_rand());
// pre-populate with a smaller empty store
db.create_and_insert_store(1, 8192, "test_storage_finder");
db.store_uncached(1, &[(&key, &account)]);
}
#[test]
fn test_get_snapshot_storages_empty() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
assert!(db.get_snapshot_storages(0).is_empty());
}
#[test]
fn test_get_snapshot_storages_only_older_than_or_equal_to_snapshot_slot() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account = Account::new(1, 0, &key);
let before_slot = 0;
let base_slot = before_slot + 1;
let after_slot = base_slot + 1;
db.add_root(base_slot);
db.store_uncached(base_slot, &[(&key, &account)]);
assert!(db.get_snapshot_storages(before_slot).is_empty());
assert_eq!(1, db.get_snapshot_storages(base_slot).len());
assert_eq!(1, db.get_snapshot_storages(after_slot).len());
}
#[test]
fn test_get_snapshot_storages_only_non_empty() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account = Account::new(1, 0, &key);
let base_slot = 0;
let after_slot = base_slot + 1;
db.store_uncached(base_slot, &[(&key, &account)]);
db.storage
.get_slot_stores(base_slot)
.unwrap()
.write()
.unwrap()
.clear();
db.add_root(base_slot);
assert!(db.get_snapshot_storages(after_slot).is_empty());
db.store_uncached(base_slot, &[(&key, &account)]);
assert_eq!(1, db.get_snapshot_storages(after_slot).len());
}
#[test]
fn test_get_snapshot_storages_only_roots() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account = Account::new(1, 0, &key);
let base_slot = 0;
let after_slot = base_slot + 1;
db.store_uncached(base_slot, &[(&key, &account)]);
assert!(db.get_snapshot_storages(after_slot).is_empty());
db.add_root(base_slot);
assert_eq!(1, db.get_snapshot_storages(after_slot).len());
}
#[test]
fn test_get_snapshot_storages_exclude_empty() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account = Account::new(1, 0, &key);
let base_slot = 0;
let after_slot = base_slot + 1;
db.store_uncached(base_slot, &[(&key, &account)]);
db.add_root(base_slot);
assert_eq!(1, db.get_snapshot_storages(after_slot).len());
db.storage
.get_slot_stores(0)
.unwrap()
.read()
.unwrap()
.values()
.next()
.unwrap()
.remove_account(0, true);
assert!(db.get_snapshot_storages(after_slot).is_empty());
}
#[test]
#[should_panic(expected = "double remove of account in slot: 0/store: 0!!")]
fn test_storage_remove_account_double_remove() {
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = Account::new(1, 0, &Account::default().owner);
accounts.store_uncached(0, &[(&pubkey, &account)]);
let storage_entry = accounts
.storage
.get_slot_stores(0)
.unwrap()
.read()
.unwrap()
.values()
.next()
.unwrap()
.clone();
storage_entry.remove_account(0, true);
storage_entry.remove_account(0, true);
}
#[test]
fn test_accounts_purge_long_chained_after_snapshot_restore() {
solana_logger::setup();
let old_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = Account::default().owner;
let account = Account::new(old_lamport, no_data, &owner);
let account2 = Account::new(old_lamport + 100_001, no_data, &owner);
let account3 = Account::new(old_lamport + 100_002, no_data, &owner);
let dummy_account = Account::new(99_999_999, no_data, &owner);
let zero_lamport_account = Account::new(zero_lamport, no_data, &owner);
let pubkey = solana_sdk::pubkey::new_rand();
let dummy_pubkey = solana_sdk::pubkey::new_rand();
let purged_pubkey1 = solana_sdk::pubkey::new_rand();
let purged_pubkey2 = solana_sdk::pubkey::new_rand();
let mut current_slot = 0;
let accounts = AccountsDb::new_single();
// create intermediate updates to purged_pubkey1 so that
// generate_index must add slots as root last at once
current_slot += 1;
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &zero_lamport_account)]);
accounts.store_uncached(current_slot, &[(&purged_pubkey2, &account3)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey2, &zero_lamport_account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&dummy_pubkey, &dummy_account)]);
accounts.add_root(current_slot);
accounts.print_count_and_status("before reconstruct");
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.print_count_and_status("before purge zero");
accounts.clean_accounts(None);
accounts.print_count_and_status("after purge zero");
assert_load_account(&accounts, current_slot, pubkey, old_lamport);
assert_load_account(&accounts, current_slot, purged_pubkey1, 0);
assert_load_account(&accounts, current_slot, purged_pubkey2, 0);
}
fn do_full_clean_refcount(store1_first: bool, store_size: u64) {
let pubkey1 = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
let pubkey2 = Pubkey::from_str("My22211111111111111111111111111111111111111").unwrap();
let pubkey3 = Pubkey::from_str("My33311111111111111111111111111111111111111").unwrap();
let old_lamport = 223;
let zero_lamport = 0;
let dummy_lamport = 999_999;
// size data so only 1 fits in a 4k store
let data_size = 2200;
let owner = Account::default().owner;
let account = Account::new(old_lamport, data_size, &owner);
let account2 = Account::new(old_lamport + 100_001, data_size, &owner);
let account3 = Account::new(old_lamport + 100_002, data_size, &owner);
let account4 = Account::new(dummy_lamport, data_size, &owner);
let zero_lamport_account = Account::new(zero_lamport, data_size, &owner);
let mut current_slot = 0;
let accounts = AccountsDb::new_sized_no_extra_stores(Vec::new(), store_size);
// A: Initialize AccountsDb with pubkey1 and pubkey2
current_slot += 1;
if store1_first {
accounts.store_uncached(current_slot, &[(&pubkey1, &account)]);
accounts.store_uncached(current_slot, &[(&pubkey2, &account)]);
} else {
accounts.store_uncached(current_slot, &[(&pubkey2, &account)]);
accounts.store_uncached(current_slot, &[(&pubkey1, &account)]);
}
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
info!("post A");
accounts.print_accounts_stats("Post-A");
// B: Test multiple updates to pubkey1 in a single slot/storage
current_slot += 1;
assert_eq!(0, accounts.alive_account_count_in_slot(current_slot));
assert_eq!(1, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &account2)]);
accounts.store_uncached(current_slot, &[(&pubkey1, &account2)]);
assert_eq!(1, accounts.alive_account_count_in_slot(current_slot));
// Stores to same pubkey, same slot only count once towards the
// ref count
assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1));
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
accounts.print_accounts_stats("Post-B pre-clean");
accounts.clean_accounts(None);
info!("post B");
accounts.print_accounts_stats("Post-B");
// C: more updates to trigger clean of previous updates
current_slot += 1;
assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &account3)]);
accounts.store_uncached(current_slot, &[(&pubkey2, &account3)]);
accounts.store_uncached(current_slot, &[(&pubkey3, &account4)]);
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
info!("post C");
accounts.print_accounts_stats("Post-C");
// D: Make all keys 0-lamport, cleans all keys
current_slot += 1;
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &zero_lamport_account)]);
accounts.store_uncached(current_slot, &[(&pubkey2, &zero_lamport_account)]);
accounts.store_uncached(current_slot, &[(&pubkey3, &zero_lamport_account)]);
let snapshot_stores = accounts.get_snapshot_storages(current_slot);
let total_accounts: usize = snapshot_stores
.iter()
.flatten()
.map(|s| s.all_accounts().len())
.sum();
assert!(!snapshot_stores.is_empty());
assert!(total_accounts > 0);
info!("post D");
accounts.print_accounts_stats("Post-D");
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
accounts.clean_accounts(None);
accounts.print_accounts_stats("Post-D clean");
let total_accounts_post_clean: usize = snapshot_stores
.iter()
.flatten()
.map(|s| s.all_accounts().len())
.sum();
assert_eq!(total_accounts, total_accounts_post_clean);
// should clean all 3 pubkeys
assert_eq!(accounts.ref_count_for_pubkey(&pubkey1), 0);
assert_eq!(accounts.ref_count_for_pubkey(&pubkey2), 0);
assert_eq!(accounts.ref_count_for_pubkey(&pubkey3), 0);
}
#[test]
fn test_full_clean_refcount() {
solana_logger::setup();
// Setup 3 scenarios which try to differentiate between pubkey1 being in an
// Available slot or a Full slot which would cause a different reset behavior
// when pubkey1 is cleaned and therefor cause the ref count to be incorrect
// preventing a removal of that key.
//
// do stores with a 4mb size so only 1 store is created per slot
do_full_clean_refcount(false, 4 * 1024 * 1024);
// do stores with a 4k size and store pubkey1 first
do_full_clean_refcount(false, 4096);
// do stores with a 4k size and store pubkey1 2nd
do_full_clean_refcount(true, 4096);
}
#[test]
fn test_accounts_clean_after_snapshot_restore_then_old_revives() {
solana_logger::setup();
let old_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let dummy_lamport = 999_999;
let owner = Account::default().owner;
let account = Account::new(old_lamport, no_data, &owner);
let account2 = Account::new(old_lamport + 100_001, no_data, &owner);
let account3 = Account::new(old_lamport + 100_002, no_data, &owner);
let dummy_account = Account::new(dummy_lamport, no_data, &owner);
let zero_lamport_account = Account::new(zero_lamport, no_data, &owner);
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let dummy_pubkey = solana_sdk::pubkey::new_rand();
let mut current_slot = 0;
let accounts = AccountsDb::new_single();
// A: Initialize AccountsDb with pubkey1 and pubkey2
current_slot += 1;
accounts.store_uncached(current_slot, &[(&pubkey1, &account)]);
accounts.store_uncached(current_slot, &[(&pubkey2, &account)]);
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
// B: Test multiple updates to pubkey1 in a single slot/storage
current_slot += 1;
assert_eq!(0, accounts.alive_account_count_in_slot(current_slot));
assert_eq!(1, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &account2)]);
accounts.store_uncached(current_slot, &[(&pubkey1, &account2)]);
assert_eq!(1, accounts.alive_account_count_in_slot(current_slot));
// Stores to same pubkey, same slot only count once towards the
// ref count
assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1));
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
// C: Yet more update to trigger lazy clean of step A
current_slot += 1;
assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &account3)]);
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
// D: Make pubkey1 0-lamport; also triggers clean of step B
current_slot += 1;
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &zero_lamport_account)]);
accounts.clean_accounts(None);
assert_eq!(
// Removed one reference from the dead slot (reference only counted once
// even though there were two stores to the pubkey in that slot)
3, /* == 3 - 1 + 1 */
accounts.ref_count_for_pubkey(&pubkey1)
);
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
// E: Avoid missing bank hash error
current_slot += 1;
accounts.store_uncached(current_slot, &[(&dummy_pubkey, &dummy_account)]);
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey1, zero_lamport);
assert_load_account(&accounts, current_slot, pubkey2, old_lamport);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
// At this point, there is no index entries for A and B
// If step C and step D should be purged, snapshot restore would cause
// pubkey1 to be revived as the state of step A.
// So, prevent that from happening by introducing refcount
accounts.clean_accounts(None);
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.clean_accounts(None);
info!("pubkey: {}", pubkey1);
accounts.print_accounts_stats("pre_clean");
assert_load_account(&accounts, current_slot, pubkey1, zero_lamport);
assert_load_account(&accounts, current_slot, pubkey2, old_lamport);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
// F: Finally, make Step A cleanable
current_slot += 1;
accounts.store_uncached(current_slot, &[(&pubkey2, &account)]);
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
// Do clean
accounts.clean_accounts(None);
// Ensure pubkey2 is cleaned from the index finally
assert_not_load_account(&accounts, current_slot, pubkey1);
assert_load_account(&accounts, current_slot, pubkey2, old_lamport);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
}
#[test]
fn test_clean_stored_dead_slots_empty() {
let accounts = AccountsDb::new_single();
let mut dead_slots = HashSet::new();
dead_slots.insert(10);
accounts.clean_stored_dead_slots(&dead_slots, None);
}
#[test]
fn test_shrink_all_slots_none() {
let accounts = AccountsDb::new_single();
for _ in 0..10 {
accounts.shrink_candidate_slots();
}
accounts.shrink_all_slots();
}
#[test]
fn test_shrink_next_slots() {
let mut accounts = AccountsDb::new_single();
accounts.caching_enabled = false;
let mut current_slot = 7;
assert_eq!(
vec![None, None, None],
(0..3)
.map(|_| accounts.next_shrink_slot_v1())
.collect::<Vec<_>>()
);
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
assert_eq!(
vec![Some(7), Some(7), Some(7)],
(0..3)
.map(|_| accounts.next_shrink_slot_v1())
.collect::<Vec<_>>()
);
current_slot += 1;
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
let slots = (0..6)
.map(|_| accounts.next_shrink_slot_v1())
.collect::<Vec<_>>();
// Because the origin of this data is HashMap (not BTreeMap), key order is arbitrary per cycle.
assert!(
vec![Some(7), Some(8), Some(7), Some(8), Some(7), Some(8)] == slots
|| vec![Some(8), Some(7), Some(8), Some(7), Some(8), Some(7)] == slots
);
}
#[test]
fn test_shrink_reset_uncleaned_roots() {
let mut accounts = AccountsDb::new_single();
accounts.caching_enabled = false;
accounts.reset_uncleaned_roots_v1();
assert_eq!(
*accounts.shrink_candidate_slots_v1.lock().unwrap(),
vec![] as Vec<Slot>
);
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
accounts.get_accounts_delta_hash(1);
accounts.add_root(1);
accounts.get_accounts_delta_hash(2);
accounts.add_root(2);
accounts.reset_uncleaned_roots_v1();
let actual_slots = accounts.shrink_candidate_slots_v1.lock().unwrap().clone();
assert_eq!(actual_slots, vec![] as Vec<Slot>);
accounts.reset_uncleaned_roots_v1();
let mut actual_slots = accounts.shrink_candidate_slots_v1.lock().unwrap().clone();
actual_slots.sort_unstable();
assert_eq!(actual_slots, vec![0, 1, 2]);
accounts.accounts_index.clear_roots();
let mut actual_slots = (0..5)
.map(|_| accounts.next_shrink_slot_v1())
.collect::<Vec<_>>();
actual_slots.sort();
assert_eq!(actual_slots, vec![None, None, Some(0), Some(1), Some(2)],);
}
#[test]
fn test_shrink_stale_slots_processed() {
solana_logger::setup();
let accounts = AccountsDb::new_single();
let pubkey_count = 100;
let pubkeys: Vec<_> = (0..pubkey_count)
.map(|_| solana_sdk::pubkey::new_rand())
.collect();
let some_lamport = 223;
let no_data = 0;
let owner = Account::default().owner;
let account = Account::new(some_lamport, no_data, &owner);
let mut current_slot = 0;
current_slot += 1;
for pubkey in &pubkeys {
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
}
let shrink_slot = current_slot;
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
current_slot += 1;
let pubkey_count_after_shrink = 10;
let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
for pubkey in updated_pubkeys {
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
}
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
accounts.clean_accounts(None);
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
accounts.shrink_all_slots();
assert_eq!(
pubkey_count_after_shrink,
accounts.all_account_count_in_append_vec(shrink_slot)
);
let no_ancestors = HashMap::default();
accounts.update_accounts_hash(current_slot, &no_ancestors, true);
accounts
.verify_bank_hash_and_lamports(current_slot, &no_ancestors, 22300, true)
.unwrap();
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts
.verify_bank_hash_and_lamports(current_slot, &no_ancestors, 22300, true)
.unwrap();
// repeating should be no-op
accounts.shrink_all_slots();
assert_eq!(
pubkey_count_after_shrink,
accounts.all_account_count_in_append_vec(shrink_slot)
);
}
#[test]
fn test_shrink_candidate_slots() {
solana_logger::setup();
let accounts = AccountsDb::new_single();
let pubkey_count = 30000;
let pubkeys: Vec<_> = (0..pubkey_count)
.map(|_| solana_sdk::pubkey::new_rand())
.collect();
let some_lamport = 223;
let no_data = 0;
let owner = Account::default().owner;
let account = Account::new(some_lamport, no_data, &owner);
let mut current_slot = 0;
current_slot += 1;
for pubkey in &pubkeys {
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
}
let shrink_slot = current_slot;
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
current_slot += 1;
let pubkey_count_after_shrink = 25000;
let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
for pubkey in updated_pubkeys {
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
}
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
accounts.clean_accounts(None);
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
// Only, try to shrink stale slots, nothing happens because 90/100
// is not small enough to do a shrink
accounts.shrink_candidate_slots();
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
// Now, do full-shrink.
accounts.shrink_all_slots();
assert_eq!(
pubkey_count_after_shrink,
accounts.all_account_count_in_append_vec(shrink_slot)
);
}
#[test]
fn test_shrink_stale_slots_skipped() {
solana_logger::setup();
let mut accounts = AccountsDb::new_single();
accounts.caching_enabled = false;
let pubkey_count = 30000;
let pubkeys: Vec<_> = (0..pubkey_count)
.map(|_| solana_sdk::pubkey::new_rand())
.collect();
let some_lamport = 223;
let no_data = 0;
let owner = Account::default().owner;
let account = Account::new(some_lamport, no_data, &owner);
let mut current_slot = 0;
current_slot += 1;
for pubkey in &pubkeys {
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
}
let shrink_slot = current_slot;
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
current_slot += 1;
let pubkey_count_after_shrink = 25000;
let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
for pubkey in updated_pubkeys {
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
}
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
accounts.clean_accounts(None);
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
// Only, try to shrink stale slots.
accounts.shrink_all_stale_slots_v1();
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
// Now, do full-shrink.
accounts.shrink_all_slots();
assert_eq!(
pubkey_count_after_shrink,
accounts.all_account_count_in_append_vec(shrink_slot)
);
}
#[test]
fn test_delete_dependencies() {
solana_logger::setup();
let accounts_index = AccountsIndex::default();
let key0 = Pubkey::new_from_array([0u8; 32]);
let key1 = Pubkey::new_from_array([1u8; 32]);
let key2 = Pubkey::new_from_array([2u8; 32]);
let info0 = AccountInfo {
store_id: 0,
offset: 0,
stored_size: 0,
lamports: 0,
};
let info1 = AccountInfo {
store_id: 1,
offset: 0,
stored_size: 0,
lamports: 0,
};
let info2 = AccountInfo {
store_id: 2,
offset: 0,
stored_size: 0,
lamports: 0,
};
let info3 = AccountInfo {
store_id: 3,
offset: 0,
stored_size: 0,
lamports: 0,
};
let mut reclaims = vec![];
accounts_index.upsert(
0,
&key0,
&Pubkey::default(),
&[],
&HashSet::new(),
info0,
&mut reclaims,
);
accounts_index.upsert(
1,
&key0,
&Pubkey::default(),
&[],
&HashSet::new(),
info1.clone(),
&mut reclaims,
);
accounts_index.upsert(
1,
&key1,
&Pubkey::default(),
&[],
&HashSet::new(),
info1,
&mut reclaims,
);
accounts_index.upsert(
2,
&key1,
&Pubkey::default(),
&[],
&HashSet::new(),
info2.clone(),
&mut reclaims,
);
accounts_index.upsert(
2,
&key2,
&Pubkey::default(),
&[],
&HashSet::new(),
info2,
&mut reclaims,
);
accounts_index.upsert(
3,
&key2,
&Pubkey::default(),
&[],
&HashSet::new(),
info3,
&mut reclaims,
);
accounts_index.add_root(0, false);
accounts_index.add_root(1, false);
accounts_index.add_root(2, false);
accounts_index.add_root(3, false);
let mut purges = HashMap::new();
let (key0_entry, _) = accounts_index.get(&key0, None, None).unwrap();
purges.insert(key0, accounts_index.roots_and_ref_count(&key0_entry, None));
let (key1_entry, _) = accounts_index.get(&key1, None, None).unwrap();
purges.insert(key1, accounts_index.roots_and_ref_count(&key1_entry, None));
let (key2_entry, _) = accounts_index.get(&key2, None, None).unwrap();
purges.insert(key2, accounts_index.roots_and_ref_count(&key2_entry, None));
for (key, (list, ref_count)) in &purges {
info!(" purge {} ref_count {} =>", key, ref_count);
for x in list {
info!(" {:?}", x);
}
}
let mut store_counts = HashMap::new();
store_counts.insert(0, (0, HashSet::from_iter(vec![key0])));
store_counts.insert(1, (0, HashSet::from_iter(vec![key0, key1])));
store_counts.insert(2, (0, HashSet::from_iter(vec![key1, key2])));
store_counts.insert(3, (1, HashSet::from_iter(vec![key2])));
AccountsDb::calc_delete_dependencies(&purges, &mut store_counts);
let mut stores: Vec<_> = store_counts.keys().cloned().collect();
stores.sort_unstable();
for store in &stores {
info!(
"store: {:?} : {:?}",
store,
store_counts.get(&store).unwrap()
);
}
for x in 0..3 {
assert!(store_counts[&x].0 >= 1);
}
}
#[test]
fn test_account_balance_for_capitalization_normal() {
// system accounts
assert_eq!(
AccountsDb::account_balance_for_capitalization(10, &Pubkey::default(), false, true),
10
);
// any random program data accounts
assert_eq!(
AccountsDb::account_balance_for_capitalization(
10,
&solana_sdk::pubkey::new_rand(),
false,
true,
),
10
);
assert_eq!(
AccountsDb::account_balance_for_capitalization(
10,
&solana_sdk::pubkey::new_rand(),
false,
false,
),
10
);
}
#[test]
fn test_account_balance_for_capitalization_sysvar() {
let normal_sysvar = solana_sdk::account::create_account(
&solana_sdk::slot_history::SlotHistory::default(),
1,
);
assert_eq!(
AccountsDb::account_balance_for_capitalization(
normal_sysvar.lamports,
&normal_sysvar.owner,
normal_sysvar.executable,
false,
),
0
);
assert_eq!(
AccountsDb::account_balance_for_capitalization(
normal_sysvar.lamports,
&normal_sysvar.owner,
normal_sysvar.executable,
true,
),
1
);
// currently transactions can send any lamports to sysvars although this is not sensible.
assert_eq!(
AccountsDb::account_balance_for_capitalization(
10,
&solana_sdk::sysvar::id(),
false,
false
),
9
);
assert_eq!(
AccountsDb::account_balance_for_capitalization(
10,
&solana_sdk::sysvar::id(),
false,
true
),
10
);
}
#[test]
fn test_account_balance_for_capitalization_native_program() {
let normal_native_program = solana_sdk::native_loader::create_loadable_account("foo", 1);
assert_eq!(
AccountsDb::account_balance_for_capitalization(
normal_native_program.lamports,
&normal_native_program.owner,
normal_native_program.executable,
false,
),
0
);
assert_eq!(
AccountsDb::account_balance_for_capitalization(
normal_native_program.lamports,
&normal_native_program.owner,
normal_native_program.executable,
true,
),
1
);
// test maliciously assigned bogus native loader account
assert_eq!(
AccountsDb::account_balance_for_capitalization(
1,
&solana_sdk::native_loader::id(),
false,
false,
),
1
);
assert_eq!(
AccountsDb::account_balance_for_capitalization(
1,
&solana_sdk::native_loader::id(),
false,
true,
),
1
);
}
#[test]
fn test_checked_sum_for_capitalization_normal() {
assert_eq!(
AccountsDb::checked_sum_for_capitalization(vec![1, 2].into_iter()),
3
);
}
#[test]
#[should_panic(expected = "overflow is detected while summing capitalization")]
fn test_checked_sum_for_capitalization_overflow() {
assert_eq!(
AccountsDb::checked_sum_for_capitalization(vec![1, u64::max_value()].into_iter()),
3
);
}
#[test]
fn test_store_overhead() {
solana_logger::setup();
let accounts = AccountsDb::new_single();
let account = Account::default();
let pubkey = solana_sdk::pubkey::new_rand();
accounts.store_uncached(0, &[(&pubkey, &account)]);
let slot_stores = accounts.storage.get_slot_stores(0).unwrap();
let mut total_len = 0;
for (_id, store) in slot_stores.read().unwrap().iter() {
total_len += store.accounts.len();
}
info!("total: {}", total_len);
assert!(total_len < STORE_META_OVERHEAD);
}
#[test]
fn test_store_reuse() {
solana_logger::setup();
let accounts = AccountsDb::new_sized(vec![], 4096);
let size = 100;
let num_accounts: usize = 100;
let mut keys = Vec::new();
for i in 0..num_accounts {
let account = Account::new((i + 1) as u64, size, &Pubkey::default());
let pubkey = solana_sdk::pubkey::new_rand();
accounts.store_uncached(0, &[(&pubkey, &account)]);
keys.push(pubkey);
}
accounts.add_root(0);
for (i, key) in keys[1..].iter().enumerate() {
let account = Account::new((1 + i + num_accounts) as u64, size, &Pubkey::default());
accounts.store_uncached(1, &[(key, &account)]);
}
accounts.add_root(1);
accounts.clean_accounts(None);
accounts.shrink_all_slots();
accounts.print_accounts_stats("post-shrink");
let num_stores = accounts.recycle_stores.read().unwrap().entry_count();
assert!(num_stores > 0);
let mut account_refs = Vec::new();
let num_to_store = 20;
for (i, key) in keys[..num_to_store].iter().enumerate() {
let account = Account::new(
(1 + i + 2 * num_accounts) as u64,
i + 20,
&Pubkey::default(),
);
accounts.store_uncached(2, &[(key, &account)]);
account_refs.push(account);
}
assert!(accounts.recycle_stores.read().unwrap().entry_count() < num_stores);
accounts.print_accounts_stats("post-store");
let mut ancestors = HashMap::new();
ancestors.insert(1, 0);
ancestors.insert(2, 1);
for (key, account_ref) in keys[..num_to_store].iter().zip(account_refs) {
assert_eq!(accounts.load_slow(&ancestors, key).unwrap().0, account_ref);
}
}
#[test]
fn test_zero_lamport_new_root_not_cleaned() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let account_key = Pubkey::new_unique();
let zero_lamport_account = Account::new(0, 0, &Account::default().owner);
// Store zero lamport account into slots 0 and 1, root both slots
db.store_uncached(0, &[(&account_key, &zero_lamport_account)]);
db.store_uncached(1, &[(&account_key, &zero_lamport_account)]);
db.get_accounts_delta_hash(0);
db.add_root(0);
db.get_accounts_delta_hash(1);
db.add_root(1);
// Only clean zero lamport accounts up to slot 0
db.clean_accounts(Some(0));
// Should still be able to find zero lamport account in slot 1
assert_eq!(
db.load_slow(&HashMap::new(), &account_key),
Some((zero_lamport_account, 1))
);
}
#[test]
fn test_store_load_cached() {
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
db.caching_enabled = true;
let key = Pubkey::default();
let account0 = Account::new(1, 0, &key);
let slot = 0;
db.store_cached(slot, &[(&key, &account0)]);
// Load with no ancestors and no root will return nothing
assert!(db.load_slow(&HashMap::new(), &key).is_none());
// Load with ancestors not equal to `slot` will return nothing
let ancestors = vec![(slot + 1, 1)].into_iter().collect();
assert!(db.load_slow(&ancestors, &key).is_none());
// Load with ancestors equal to `slot` will return the account
let ancestors = vec![(slot, 1)].into_iter().collect();
assert_eq!(
db.load_slow(&ancestors, &key),
Some((account0.clone(), slot))
);
// Adding root will return the account even without ancestors
db.add_root(slot);
assert_eq!(db.load_slow(&HashMap::new(), &key), Some((account0, slot)));
}
#[test]
fn test_store_flush_load_cached() {
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
db.caching_enabled = true;
let key = Pubkey::default();
let account0 = Account::new(1, 0, &key);
let slot = 0;
db.store_cached(slot, &[(&key, &account0)]);
db.mark_slot_frozen(slot);
// No root was added yet, requires an ancestor to find
// the account
db.flush_accounts_cache(true, None);
let ancestors = vec![(slot, 1)].into_iter().collect();
assert_eq!(
db.load_slow(&ancestors, &key),
Some((account0.clone(), slot))
);
// Add root then flush
db.add_root(slot);
db.flush_accounts_cache(true, None);
assert_eq!(db.load_slow(&HashMap::new(), &key), Some((account0, slot)));
}
#[test]
fn test_flush_accounts_cache() {
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
db.caching_enabled = true;
let account0 = Account::new(1, 0, &Pubkey::default());
let unrooted_slot = 4;
let root5 = 5;
let root6 = 6;
let unrooted_key = solana_sdk::pubkey::new_rand();
let key5 = solana_sdk::pubkey::new_rand();
let key6 = solana_sdk::pubkey::new_rand();
db.store_cached(unrooted_slot, &[(&unrooted_key, &account0)]);
db.store_cached(root5, &[(&key5, &account0)]);
db.store_cached(root6, &[(&key6, &account0)]);
for slot in &[unrooted_slot, root5, root6] {
db.mark_slot_frozen(*slot);
}
db.add_root(root5);
db.add_root(root6);
// Unrooted slot should be able to be fetched before the flush
let ancestors = vec![(unrooted_slot, 1)].into_iter().collect();
assert_eq!(
db.load_slow(&ancestors, &unrooted_key),
Some((account0.clone(), unrooted_slot))
);
db.flush_accounts_cache(true, None);
// After the flush, the unrooted slot is still in the cache
assert!(db.load_slow(&ancestors, &unrooted_key).is_some());
assert!(db
.accounts_index
.get_account_read_entry(&unrooted_key)
.is_some());
assert_eq!(db.accounts_cache.num_slots(), 1);
assert!(db.accounts_cache.slot_cache(unrooted_slot).is_some());
assert_eq!(
db.load_slow(&HashMap::new(), &key5),
Some((account0.clone(), root5))
);
assert_eq!(
db.load_slow(&HashMap::new(), &key6),
Some((account0, root6))
);
}
#[test]
fn test_flush_accounts_cache_if_needed() {
run_test_flush_accounts_cache_if_needed(0, 2 * MAX_CACHE_SLOTS);
run_test_flush_accounts_cache_if_needed(2 * MAX_CACHE_SLOTS, 0);
run_test_flush_accounts_cache_if_needed(MAX_CACHE_SLOTS - 1, 0);
run_test_flush_accounts_cache_if_needed(0, MAX_CACHE_SLOTS - 1);
run_test_flush_accounts_cache_if_needed(MAX_CACHE_SLOTS, 0);
run_test_flush_accounts_cache_if_needed(0, MAX_CACHE_SLOTS);
run_test_flush_accounts_cache_if_needed(2 * MAX_CACHE_SLOTS, 2 * MAX_CACHE_SLOTS);
run_test_flush_accounts_cache_if_needed(MAX_CACHE_SLOTS - 1, MAX_CACHE_SLOTS - 1);
run_test_flush_accounts_cache_if_needed(MAX_CACHE_SLOTS, MAX_CACHE_SLOTS);
}
fn run_test_flush_accounts_cache_if_needed(num_roots: usize, num_unrooted: usize) {
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
db.caching_enabled = true;
let account0 = Account::new(1, 0, &Pubkey::default());
let mut keys = vec![];
let num_slots = 2 * MAX_CACHE_SLOTS;
for i in 0..num_roots + num_unrooted {
let key = Pubkey::new_unique();
db.store_cached(i as Slot, &[(&key, &account0)]);
keys.push(key);
db.mark_slot_frozen(i as Slot);
if i < num_roots {
db.add_root(i as Slot);
}
}
db.flush_accounts_cache(false, None);
let total_slots = num_roots + num_unrooted;
// If there's <= the max size, then nothing will be flushed from the slot
if total_slots <= MAX_CACHE_SLOTS {
assert_eq!(db.accounts_cache.num_slots(), total_slots);
} else {
// Otherwise, all the roots are flushed, and only at most MAX_CACHE_SLOTS
// of the unrooted slots are kept in the cache
let expected_size = std::cmp::min(num_unrooted, MAX_CACHE_SLOTS);
if expected_size > 0 {
for unrooted_slot in total_slots - expected_size..total_slots {
assert!(db
.accounts_cache
.slot_cache(unrooted_slot as Slot)
.is_some());
}
}
}
// Should still be able to fetch all the accounts after flush
for (slot, key) in (0..num_slots as Slot).zip(keys) {
let ancestors = if slot < num_roots as Slot {
HashMap::new()
} else {
vec![(slot, 1)].into_iter().collect()
};
assert_eq!(
db.load_slow(&ancestors, &key),
Some((account0.clone(), slot))
);
}
}
fn slot_stores(db: &AccountsDb, slot: Slot) -> Vec<Arc<AccountStorageEntry>> {
db.storage
.get_slot_storage_entries(slot)
.unwrap_or_default()
}
#[test]
fn test_flush_cache_clean() {
let caching_enabled = true;
let db = Arc::new(AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
HashSet::new(),
caching_enabled,
));
let account_key = Pubkey::new_unique();
let zero_lamport_account = Account::new(0, 0, &Account::default().owner);
let slot1_account = Account::new(1, 1, &Account::default().owner);
db.store_cached(0, &[(&account_key, &zero_lamport_account)]);
db.store_cached(1, &[(&account_key, &slot1_account)]);
db.add_root(0);
db.add_root(1);
// Clean should not remove anything yet as nothing has been flushed
db.clean_accounts(None);
let account = db
.do_load(&Ancestors::default(), &account_key, Some(0))
.unwrap();
assert_eq!(account.0.lamports, 0);
// Flush, then clean again. Should not need another root to initiate the cleaning
// because `accounts_index.uncleaned_roots` should be correct
db.flush_accounts_cache(true, None);
db.clean_accounts(None);
assert!(db
.do_load(&Ancestors::default(), &account_key, Some(0))
.is_none());
}
#[test]
fn test_flush_cache_dont_clean_zero_lamport_account() {
let caching_enabled = true;
let db = Arc::new(AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
HashSet::new(),
caching_enabled,
));
let zero_lamport_account_key = Pubkey::new_unique();
let other_account_key = Pubkey::new_unique();
let original_lamports = 1;
let slot0_account = Account::new(original_lamports, 1, &Account::default().owner);
let zero_lamport_account = Account::new(0, 0, &Account::default().owner);
// Store into slot 0, and then flush the slot to storage
db.store_cached(0, &[(&zero_lamport_account_key, &slot0_account)]);
// Second key keeps other lamport account entry for slot 0 alive,
// preventing clean of the zero_lamport_account in slot 1.
db.store_cached(0, &[(&other_account_key, &slot0_account)]);
db.add_root(0);
db.flush_accounts_cache(true, None);
assert!(!db.storage.get_slot_storage_entries(0).unwrap().is_empty());
// Store into slot 1, a dummy slot that will be dead and purged before flush
db.store_cached(1, &[(&zero_lamport_account_key, &zero_lamport_account)]);
// Store into slot 2, which makes all updates from slot 1 outdated.
// This means slot 1 is a dead slot. Later, slot 1 will be cleaned/purged
// before it even reaches storage, but this purge of slot 1should not affect
// the refcount of `zero_lamport_account_key` because cached keys do not bump
// the refcount in the index. This means clean should *not* remove
// `zero_lamport_account_key` from slot 2
db.store_cached(2, &[(&zero_lamport_account_key, &zero_lamport_account)]);
db.add_root(1);
db.add_root(2);
// Flush, then clean. Should not need another root to initiate the cleaning
// because `accounts_index.uncleaned_roots` should be correct
db.flush_accounts_cache(true, None);
db.clean_accounts(None);
// The `zero_lamport_account_key` is still alive in slot 1, so refcount for the
// pubkey should be 2
assert_eq!(
db.accounts_index
.ref_count_from_storage(&zero_lamport_account_key),
2
);
assert_eq!(
db.accounts_index.ref_count_from_storage(&other_account_key),
1
);
// The zero-lamport account in slot 2 should not be purged yet, because the
// entry in slot 1 is blocking cleanup of the zero-lamport account.
let max_root = None;
assert_eq!(
db.do_load(&Ancestors::default(), &zero_lamport_account_key, max_root,)
.unwrap()
.0
.lamports,
0
);
}
struct ScanTracker {
t_scan: JoinHandle<()>,
exit: Arc<AtomicBool>,
}
impl ScanTracker {
fn exit(self) -> thread::Result<()> {
self.exit.store(true, Ordering::Relaxed);
self.t_scan.join()
}
}
fn setup_scan(
db: Arc<AccountsDb>,
scan_ancestors: Arc<Ancestors>,
stall_key: Pubkey,
) -> ScanTracker {
let exit = Arc::new(AtomicBool::new(false));
let exit_ = exit.clone();
let ready = Arc::new(AtomicBool::new(false));
let ready_ = ready.clone();
let t_scan = Builder::new()
.name("scan".to_string())
.spawn(move || {
db.scan_accounts(
&scan_ancestors,
|_collector: &mut Vec<(Pubkey, Account)>, maybe_account| {
ready_.store(true, Ordering::Relaxed);
if let Some((pubkey, _, _)) = maybe_account {
if *pubkey == stall_key {
loop {
if exit_.load(Ordering::Relaxed) {
break;
} else {
sleep(Duration::from_millis(10));
}
}
}
}
},
);
})
.unwrap();
// Wait for scan to start
while !ready.load(Ordering::Relaxed) {
sleep(Duration::from_millis(10));
}
ScanTracker { t_scan, exit }
}
#[test]
fn test_scan_flush_accounts_cache_then_clean_drop() {
let caching_enabled = true;
let db = Arc::new(AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
HashSet::new(),
caching_enabled,
));
let account_key = Pubkey::new_unique();
let account_key2 = Pubkey::new_unique();
let zero_lamport_account = Account::new(0, 0, &Account::default().owner);
let slot1_account = Account::new(1, 1, &Account::default().owner);
let slot2_account = Account::new(2, 1, &Account::default().owner);
/*
Store zero lamport account into slots 0, 1, 2 where
root slots are 0, 2, and slot 1 is unrooted.
0 (root)
/ \
1 2 (root)
*/
db.store_cached(0, &[(&account_key, &zero_lamport_account)]);
db.store_cached(1, &[(&account_key, &slot1_account)]);
// Fodder for the scan so that the lock on `account_key` is not held
db.store_cached(1, &[(&account_key2, &slot1_account)]);
db.store_cached(2, &[(&account_key, &slot2_account)]);
db.get_accounts_delta_hash(0);
let max_scan_root = 0;
db.add_root(max_scan_root);
let scan_ancestors: Arc<Ancestors> = Arc::new(vec![(0, 1), (1, 1)].into_iter().collect());
let scan_tracker = setup_scan(db.clone(), scan_ancestors.clone(), account_key2);
// Add a new root 2
let new_root = 2;
db.get_accounts_delta_hash(new_root);
db.add_root(new_root);
// Check that the scan is properly set up
assert_eq!(
db.accounts_index.min_ongoing_scan_root().unwrap(),
max_scan_root
);
// If we specify a requested_flush_root == 2, then `slot 2 <= max_flush_slot` will
// be flushed even though `slot 2 > max_scan_root`. The unrooted slot 1 should
// remain in the cache
db.flush_accounts_cache(true, Some(new_root));
assert_eq!(db.accounts_cache.num_slots(), 1);
assert!(db.accounts_cache.slot_cache(1).is_some());
// Intra cache cleaning should not clean the entry for `account_key` from slot 0,
// even though it was updated in slot `2` because of the ongoing scan
let account = db
.do_load(&Ancestors::default(), &account_key, Some(0))
.unwrap();
assert_eq!(account.0.lamports, zero_lamport_account.lamports);
// Run clean, unrooted slot 1 should not be purged, and still readable from the cache,
// because we're still doing a scan on it.
db.clean_accounts(None);
let account = db
.do_load(&scan_ancestors, &account_key, Some(max_scan_root))
.unwrap();
assert_eq!(account.0.lamports, slot1_account.lamports);
// When the scan is over, clean should not panic and should not purge something
// still in the cache.
scan_tracker.exit().unwrap();
db.clean_accounts(None);
let account = db
.do_load(&scan_ancestors, &account_key, Some(max_scan_root))
.unwrap();
assert_eq!(account.0.lamports, slot1_account.lamports);
// Simulate dropping the bank, which finally removes the slot from the cache
db.purge_slot(1);
assert!(db
.do_load(&scan_ancestors, &account_key, Some(max_scan_root))
.is_none());
}
#[test]
fn test_alive_bytes() {
let caching_enabled = true;
let accounts_db = AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
HashSet::new(),
caching_enabled,
);
let slot: Slot = 0;
let num_keys = 10;
for data_size in 0..num_keys {
let account = Account::new(1, data_size, &Pubkey::default());
accounts_db.store_cached(slot, &[(&Pubkey::new_unique(), &account)]);
}
accounts_db.add_root(slot);
accounts_db.flush_accounts_cache(true, None);
let mut storage_maps: Vec<Arc<AccountStorageEntry>> = accounts_db
.storage
.get_slot_storage_entries(slot)
.unwrap_or_default();
// Flushing cache should only create one storage entry
assert_eq!(storage_maps.len(), 1);
let storage0 = storage_maps.pop().unwrap();
let accounts = storage0.all_accounts();
for account in accounts {
let before_size = storage0.alive_bytes.load(Ordering::Relaxed);
let account_info = accounts_db
.accounts_index
.get_account_read_entry(&account.meta.pubkey)
.map(|locked_entry| {
// Should only be one entry per key, since every key was only stored to slot 0
locked_entry.slot_list()[0].clone()
})
.unwrap();
let removed_data_size = account_info.1.stored_size;
// Fetching the account from storage should return the same
// stored size as in the index.
assert_eq!(removed_data_size, account.stored_size);
assert_eq!(account_info.0, slot);
let reclaims = vec![account_info];
accounts_db.remove_dead_accounts(&reclaims, None, None, true);
let after_size = storage0.alive_bytes.load(Ordering::Relaxed);
assert_eq!(before_size, after_size + account.stored_size);
}
}
fn setup_accounts_db_cache_clean(
num_slots: usize,
scan_slot: Option<Slot>,
) -> (Arc<AccountsDb>, Vec<Pubkey>, Vec<Slot>, Option<ScanTracker>) {
let caching_enabled = true;
let accounts_db = Arc::new(AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
HashSet::new(),
caching_enabled,
));
let slots: Vec<_> = (0..num_slots as Slot).into_iter().collect();
let stall_slot = num_slots as Slot;
let scan_stall_key = Pubkey::new_unique();
let keys: Vec<Pubkey> = std::iter::repeat_with(Pubkey::new_unique)
.take(num_slots)
.collect();
if scan_slot.is_some() {
accounts_db.store_cached(
// Store it in a slot that isn't returned in `slots`
stall_slot,
&[(&scan_stall_key, &Account::new(1, 0, &Pubkey::default()))],
);
}
// Store some subset of the keys in slots 0..num_slots
let mut scan_tracker = None;
for slot in &slots {
for key in &keys[*slot as usize..] {
accounts_db.store_cached(*slot, &[(key, &Account::new(1, 0, &Pubkey::default()))]);
}
accounts_db.add_root(*slot as Slot);
if Some(*slot) == scan_slot {
let ancestors = Arc::new(vec![(stall_slot, 1), (*slot, 1)].into_iter().collect());
scan_tracker = Some(setup_scan(accounts_db.clone(), ancestors, scan_stall_key));
assert_eq!(
accounts_db.accounts_index.min_ongoing_scan_root().unwrap(),
*slot
);
}
}
accounts_db.accounts_cache.remove_slot(stall_slot);
// If there's <= MAX_CACHE_SLOTS, no slots should be flushed
if accounts_db.accounts_cache.num_slots() <= MAX_CACHE_SLOTS {
accounts_db.flush_accounts_cache(false, None);
assert_eq!(accounts_db.accounts_cache.num_slots(), num_slots);
}
(accounts_db, keys, slots, scan_tracker)
}
#[test]
fn test_accounts_db_cache_clean_dead_slots() {
let num_slots = 10;
let (accounts_db, keys, mut slots, _) = setup_accounts_db_cache_clean(num_slots, None);
let last_dead_slot = (num_slots - 1) as Slot;
assert_eq!(*slots.last().unwrap(), last_dead_slot);
let alive_slot = last_dead_slot as Slot + 1;
slots.push(alive_slot);
for key in &keys {
// Store a slot that overwrites all previous keys, rendering all previous keys dead
accounts_db.store_cached(
alive_slot,
&[(key, &Account::new(1, 0, &Pubkey::default()))],
);
accounts_db.add_root(alive_slot);
}
// Before the flush, we can find entries in the database for slots < alive_slot if we specify
// a smaller max root
for key in &keys {
assert!(accounts_db
.do_load(&Ancestors::default(), key, Some(last_dead_slot))
.is_some());
}
// If no `max_clean_root` is specified, cleaning should purge all flushed slots
accounts_db.flush_accounts_cache(true, None);
assert_eq!(accounts_db.accounts_cache.num_slots(), 0);
let mut uncleaned_roots = accounts_db
.accounts_index
.clear_uncleaned_roots(None)
.into_iter()
.collect::<Vec<_>>();
uncleaned_roots.sort_unstable();
assert_eq!(uncleaned_roots, slots);
assert_eq!(
accounts_db.accounts_cache.fetch_max_flush_root(),
alive_slot,
);
// Specifying a max_root < alive_slot, should not return any more entries,
// as those have been purged from the accounts index for the dead slots.
for key in &keys {
assert!(accounts_db
.do_load(&Ancestors::default(), key, Some(last_dead_slot))
.is_none());
}
// Each slot should only have one entry in the storage, since all other accounts were
// cleaned due to later updates
for slot in &slots {
if let ScanStorageResult::Stored(slot_accounts) = accounts_db.scan_account_storage(
*slot as Slot,
|_| Some(0),
|slot_accounts: &DashSet<Pubkey>, loaded_account: LoadedAccount| {
slot_accounts.insert(*loaded_account.pubkey());
},
) {
if *slot == alive_slot {
assert_eq!(slot_accounts.len(), keys.len());
} else {
assert!(slot_accounts.is_empty());
}
} else {
panic!("Expected slot to be in storage, not cache");
}
}
}
#[test]
fn test_accounts_db_cache_clean() {
let (accounts_db, keys, slots, _) = setup_accounts_db_cache_clean(10, None);
// If no `max_clean_root` is specified, cleaning should purge all flushed slots
accounts_db.flush_accounts_cache(true, None);
assert_eq!(accounts_db.accounts_cache.num_slots(), 0);
let mut uncleaned_roots = accounts_db
.accounts_index
.clear_uncleaned_roots(None)
.into_iter()
.collect::<Vec<_>>();
uncleaned_roots.sort_unstable();
assert_eq!(uncleaned_roots, slots);
assert_eq!(
accounts_db.accounts_cache.fetch_max_flush_root(),
*slots.last().unwrap()
);
// Each slot should only have one entry in the storage, since all other accounts were
// cleaned due to later updates
for slot in &slots {
if let ScanStorageResult::Stored(slot_account) = accounts_db.scan_account_storage(
*slot as Slot,
|_| Some(0),
|slot_account: &Arc<RwLock<Pubkey>>, loaded_account: LoadedAccount| {
*slot_account.write().unwrap() = *loaded_account.pubkey();
},
) {
assert_eq!(*slot_account.read().unwrap(), keys[*slot as usize]);
} else {
panic!("Everything should have been flushed")
}
}
}
fn run_test_accounts_db_cache_clean_max_root(
num_slots: usize,
requested_flush_root: Slot,
scan_root: Option<Slot>,
) {
assert!(requested_flush_root < (num_slots as Slot));
let (accounts_db, keys, slots, scan_tracker) =
setup_accounts_db_cache_clean(num_slots, scan_root);
let is_cache_at_limit = num_slots - requested_flush_root as usize - 1 > MAX_CACHE_SLOTS;
// If:
// 1) `requested_flush_root` is specified,
// 2) not at the cache limit, i.e. `is_cache_at_limit == false`, then
// `flush_accounts_cache()` should clean and flush only slots <= requested_flush_root,
accounts_db.flush_accounts_cache(true, Some(requested_flush_root));
if !is_cache_at_limit {
// Should flush all slots between 0..=requested_flush_root
assert_eq!(
accounts_db.accounts_cache.num_slots(),
slots.len() - requested_flush_root as usize - 1
);
} else {
// Otherwise, if we are at the cache limit, all roots will be flushed
assert_eq!(accounts_db.accounts_cache.num_slots(), 0,);
}
let mut uncleaned_roots = accounts_db
.accounts_index
.clear_uncleaned_roots(None)
.into_iter()
.collect::<Vec<_>>();
uncleaned_roots.sort_unstable();
let expected_max_flushed_root = if !is_cache_at_limit {
// Should flush all slots between 0..=requested_flush_root
requested_flush_root
} else {
// Otherwise, if we are at the cache limit, all roots will be flushed
num_slots as Slot - 1
};
assert_eq!(
uncleaned_roots,
slots[0..=expected_max_flushed_root as usize].to_vec()
);
assert_eq!(
accounts_db.accounts_cache.fetch_max_flush_root(),
expected_max_flushed_root,
);
for slot in &slots {
let slot_accounts = accounts_db.scan_account_storage(
*slot as Slot,
|loaded_account: LoadedAccount| {
if is_cache_at_limit {
panic!(
"When cache is at limit, all roots should have been flushed to storage"
);
}
// All slots <= requested_flush_root should have been flushed, regardless
// of ongoing scans
assert!(*slot > requested_flush_root);
Some(*loaded_account.pubkey())
},
|slot_accounts: &DashSet<Pubkey>, loaded_account: LoadedAccount| {
slot_accounts.insert(*loaded_account.pubkey());
if !is_cache_at_limit {
// Only true when the limit hasn't been reached and there are still
// slots left in the cache
assert!(*slot <= requested_flush_root);
}
},
);
let slot_accounts = match slot_accounts {
ScanStorageResult::Cached(slot_accounts) => {
slot_accounts.into_iter().collect::<HashSet<Pubkey>>()
}
ScanStorageResult::Stored(slot_accounts) => {
slot_accounts.into_iter().collect::<HashSet<Pubkey>>()
}
};
let expected_accounts =
if *slot >= requested_flush_root || *slot >= scan_root.unwrap_or(Slot::MAX) {
// 1) If slot > `requested_flush_root`, then either:
// a) If `is_cache_at_limit == false`, still in the cache
// b) if `is_cache_at_limit == true`, were not cleaned before being flushed to storage.
//
// In both cases all the *original* updates at index `slot` were uncleaned and thus
// should be discoverable by this scan.
//
// 2) If slot == `requested_flush_root`, the slot was not cleaned before being flushed to storage,
// so it also contains all the original updates.
//
// 3) If *slot >= scan_root, then we should not clean it either
keys[*slot as usize..]
.iter()
.cloned()
.collect::<HashSet<Pubkey>>()
} else {
// Slots less than `requested_flush_root` and `scan_root` were cleaned in the cache before being flushed
// to storage, should only contain one account
std::iter::once(keys[*slot as usize])
.into_iter()
.collect::<HashSet<Pubkey>>()
};
assert_eq!(slot_accounts, expected_accounts);
}
if let Some(scan_tracker) = scan_tracker {
scan_tracker.exit().unwrap();
}
}
#[test]
fn test_accounts_db_cache_clean_max_root() {
let requested_flush_root = 5;
run_test_accounts_db_cache_clean_max_root(10, requested_flush_root, None);
}
#[test]
fn test_accounts_db_cache_clean_max_root_with_scan() {
let requested_flush_root = 5;
run_test_accounts_db_cache_clean_max_root(
10,
requested_flush_root,
Some(requested_flush_root - 1),
);
run_test_accounts_db_cache_clean_max_root(
10,
requested_flush_root,
Some(requested_flush_root + 1),
);
}
#[test]
fn test_accounts_db_cache_clean_max_root_with_cache_limit_hit() {
let requested_flush_root = 5;
// Test that if there are > MAX_CACHE_SLOTS in the cache after flush, then more roots
// will be flushed
run_test_accounts_db_cache_clean_max_root(
MAX_CACHE_SLOTS + requested_flush_root as usize + 2,
requested_flush_root,
None,
);
}
#[test]
fn test_accounts_db_cache_clean_max_root_with_cache_limit_hit_and_scan() {
let requested_flush_root = 5;
// Test that if there are > MAX_CACHE_SLOTS in the cache after flush, then more roots
// will be flushed
run_test_accounts_db_cache_clean_max_root(
MAX_CACHE_SLOTS + requested_flush_root as usize + 2,
requested_flush_root,
Some(requested_flush_root - 1),
);
run_test_accounts_db_cache_clean_max_root(
MAX_CACHE_SLOTS + requested_flush_root as usize + 2,
requested_flush_root,
Some(requested_flush_root + 1),
);
}
fn run_flush_rooted_accounts_cache(should_clean: bool) {
let num_slots = 10;
let (accounts_db, keys, slots, _) = setup_accounts_db_cache_clean(num_slots, None);
let mut cleaned_bytes = 0;
let mut cleaned_accounts = 0;
let should_clean_tracker = if should_clean {
Some((&mut cleaned_bytes, &mut cleaned_accounts))
} else {
None
};
// If no cleaning is specified, then flush everything
accounts_db.flush_rooted_accounts_cache(None, should_clean_tracker);
for slot in &slots {
let slot_accounts = if let ScanStorageResult::Stored(slot_accounts) = accounts_db
.scan_account_storage(
*slot as Slot,
|_| Some(0),
|slot_account: &DashSet<Pubkey>, loaded_account: LoadedAccount| {
slot_account.insert(*loaded_account.pubkey());
},
) {
slot_accounts.into_iter().collect::<HashSet<Pubkey>>()
} else {
panic!("All roots should have been flushed to storage");
};
if !should_clean || slot == slots.last().unwrap() {
// The slot was not cleaned before being flushed to storage,
// so it also contains all the original updates.
assert_eq!(
slot_accounts,
keys[*slot as usize..]
.iter()
.cloned()
.collect::<HashSet<Pubkey>>()
);
} else {
// If clean was specified, only the latest slot should have all the updates.
// All these other slots have been cleaned before flush
assert_eq!(
slot_accounts,
std::iter::once(keys[*slot as usize])
.into_iter()
.collect::<HashSet<Pubkey>>()
);
}
}
}
#[test]
fn test_flush_rooted_accounts_cache_with_clean() {
run_flush_rooted_accounts_cache(true);
}
#[test]
fn test_flush_rooted_accounts_cache_without_clean() {
run_flush_rooted_accounts_cache(false);
}
fn run_test_shrink_unref(do_intra_cache_clean: bool) {
// Enable caching so that we use the straightforward implementation
// of shrink that will shrink all candidate slots
let caching_enabled = true;
let db = AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
HashSet::default(),
caching_enabled,
);
let account_key1 = Pubkey::new_unique();
let account_key2 = Pubkey::new_unique();
let account1 = Account::new(1, 0, &Account::default().owner);
// Store into slot 0
db.store_cached(0, &[(&account_key1, &account1)]);
db.store_cached(0, &[(&account_key2, &account1)]);
db.add_root(0);
if !do_intra_cache_clean {
// If we don't want the cache doing purges before flush,
// then we cannot flush multiple roots at once, otherwise the later
// roots will clean the earlier roots before they are stored.
// Thus flush the roots individually
db.flush_accounts_cache(true, None);
// Add an additional ref within the same slot to pubkey 1
db.store_uncached(0, &[(&account_key1, &account1)]);
}
// Make account_key1 in slot 0 outdated by updating in rooted slot 1
db.store_cached(1, &[(&account_key1, &account1)]);
db.add_root(1);
// Flushes all roots
db.flush_accounts_cache(true, None);
db.get_accounts_delta_hash(0);
db.get_accounts_delta_hash(1);
// Clean to remove outdated entry from slot 0
db.clean_accounts(Some(1));
// Shrink Slot 0
let mut slot0_stores = db.storage.get_slot_storage_entries(0).unwrap();
assert_eq!(slot0_stores.len(), 1);
let slot0_store = slot0_stores.pop().unwrap();
{
let mut shrink_candidate_slots = db.shrink_candidate_slots.lock().unwrap();
shrink_candidate_slots
.entry(0)
.or_default()
.insert(slot0_store.append_vec_id(), slot0_store);
}
db.shrink_candidate_slots();
// Make slot 0 dead by updating the remaining key
db.store_cached(2, &[(&account_key2, &account1)]);
db.add_root(2);
// Flushes all roots
db.flush_accounts_cache(true, None);
// Should be one store before clean for slot 0
assert_eq!(db.storage.get_slot_storage_entries(0).unwrap().len(), 1);
db.get_accounts_delta_hash(2);
db.clean_accounts(Some(2));
// No stores should exist for slot 0 after clean
assert!(db.storage.get_slot_storage_entries(0).is_none());
// Ref count for `account_key1` (account removed earlier by shrink)
// should be 1, since it was only stored in slot 0 and 1, and slot 0
// is now dead
assert_eq!(db.accounts_index.ref_count_from_storage(&account_key1), 1);
}
#[test]
fn test_shrink_unref() {
run_test_shrink_unref(false)
}
#[test]
fn test_shrink_unref_with_intra_slot_cleaning() {
run_test_shrink_unref(true)
}
#[test]
fn test_partial_clean() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let account_key1 = Pubkey::new_unique();
let account_key2 = Pubkey::new_unique();
let account1 = Account::new(1, 0, &Account::default().owner);
let account2 = Account::new(2, 0, &Account::default().owner);
let account3 = Account::new(3, 0, &Account::default().owner);
let account4 = Account::new(4, 0, &Account::default().owner);
// Store accounts into slots 0 and 1
db.store_uncached(0, &[(&account_key1, &account1)]);
db.store_uncached(0, &[(&account_key2, &account1)]);
db.store_uncached(1, &[(&account_key1, &account2)]);
db.get_accounts_delta_hash(0);
db.get_accounts_delta_hash(1);
db.print_accounts_stats("pre-clean1");
// clean accounts - no accounts should be cleaned, since no rooted slots
//
// Checking that the uncleaned_pubkeys are not pre-maturely removed
// such that when the slots are rooted, and can actually be cleaned, then the
// delta keys are still there.
db.clean_accounts(None);
db.print_accounts_stats("post-clean1");
// Check stores > 0
assert!(!slot_stores(&db, 0).is_empty());
assert!(!slot_stores(&db, 1).is_empty());
// root slot 0
db.add_root(0);
// store into slot 2
db.store_uncached(2, &[(&account_key2, &account3)]);
db.store_uncached(2, &[(&account_key1, &account3)]);
db.get_accounts_delta_hash(2);
db.clean_accounts(None);
db.print_accounts_stats("post-clean2");
// root slots 1
db.add_root(1);
db.clean_accounts(None);
db.print_accounts_stats("post-clean3");
db.store_uncached(3, &[(&account_key2, &account4)]);
db.get_accounts_delta_hash(3);
db.add_root(3);
// Check that we can clean where max_root=3 and slot=2 is not rooted
db.clean_accounts(None);
assert!(db.uncleaned_pubkeys.is_empty());
db.print_accounts_stats("post-clean4");
assert!(slot_stores(&db, 0).is_empty());
assert!(!slot_stores(&db, 1).is_empty());
}
#[test]
fn test_recycle_stores_expiration() {
solana_logger::setup();
let dummy_path = Path::new("");
let dummy_slot = 12;
let dummy_size = 1000;
let dummy_id1 = 22;
let entry1 = Arc::new(AccountStorageEntry::new(
&dummy_path,
dummy_slot,
dummy_id1,
dummy_size,
));
let dummy_id2 = 44;
let entry2 = Arc::new(AccountStorageEntry::new(
&dummy_path,
dummy_slot,
dummy_id2,
dummy_size,
));
let mut recycle_stores = RecycleStores::default();
recycle_stores.add_entry(entry1);
recycle_stores.add_entry(entry2);
assert_eq!(recycle_stores.entry_count(), 2);
// no expiration for newly added entries
let expired = recycle_stores.expire_old_entries();
assert_eq!(
expired
.iter()
.map(|e| e.append_vec_id())
.collect::<Vec<_>>(),
Vec::<AppendVecId>::new()
);
assert_eq!(
recycle_stores
.iter()
.map(|(_, e)| e.append_vec_id())
.collect::<Vec<_>>(),
vec![dummy_id1, dummy_id2]
);
assert_eq!(recycle_stores.entry_count(), 2);
assert_eq!(recycle_stores.total_bytes(), dummy_size * 2);
// expiration for only too old entries
recycle_stores.entries[0].0 =
Instant::now() - Duration::from_secs(EXPIRATION_TTL_SECONDS + 1);
let expired = recycle_stores.expire_old_entries();
assert_eq!(
expired
.iter()
.map(|e| e.append_vec_id())
.collect::<Vec<_>>(),
vec![dummy_id1]
);
assert_eq!(
recycle_stores
.iter()
.map(|(_, e)| e.append_vec_id())
.collect::<Vec<_>>(),
vec![dummy_id2]
);
assert_eq!(recycle_stores.entry_count(), 1);
assert_eq!(recycle_stores.total_bytes(), dummy_size);
}
}
| 38.225785 | 264 | 0.561513 |
ccb4f653cb3e884cb09c31d08b725acd08c2737b | 946 | use crate::parser::{KeywordToken, TrySet, attrs};
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub(crate) enum EnumErrorMode {
Default,
ReturnAllErrors,
ReturnUnexpectedError,
}
impl Default for EnumErrorMode {
fn default() -> Self {
Self::Default
}
}
impl From<attrs::ReturnAllErrors> for EnumErrorMode {
fn from(_: attrs::ReturnAllErrors) -> Self {
Self::ReturnAllErrors
}
}
impl From<attrs::ReturnUnexpectedError> for EnumErrorMode {
fn from(_: attrs::ReturnUnexpectedError) -> Self {
Self::ReturnUnexpectedError
}
}
impl <T: Into<EnumErrorMode> + KeywordToken> TrySet<EnumErrorMode> for T {
fn try_set(self, to: &mut EnumErrorMode) -> syn::Result<()> {
if *to == EnumErrorMode::Default {
*to = self.into();
Ok(())
} else {
Err(syn::Error::new(self.keyword_span(), "conflicting error handling keyword"))
}
}
}
| 24.894737 | 91 | 0.624736 |
29fdbae92aceb289108bd76a880dd7552672f73e | 150 | //! Tests auto-converted from "sass-spec/spec/non_conformant/errors/import/url/mixin/control-else"
#[allow(unused)]
use super::runner;
mod outside;
| 21.428571 | 98 | 0.76 |
6133af1bb1f1adcee7687768b5b0a58d36a97d90 | 30,209 | //! Generic WebSocket message stream.
pub mod frame;
pub mod payload;
mod message;
pub use self::{frame::CloseFrame, message::Message};
use log::*;
use std::{
collections::VecDeque,
io::{ErrorKind as IoErrorKind, Read, Write},
mem::replace,
};
use self::{
frame::{
coding::{CloseCode, Control as OpCtl, Data as OpData, OpCode},
Frame, FrameCodec,
},
message::{IncompleteMessage, IncompleteMessageType},
};
use crate::{
error::{Error, ProtocolError, Result},
util::NonBlockingResult,
};
/// Indicates a Client or Server role of the websocket
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Role {
/// This socket is a server
Server,
/// This socket is a client
Client,
}
/// The configuration for WebSocket connection.
#[derive(Debug, Clone, Copy)]
pub struct WebSocketConfig {
/// The size of the send queue. You can use it to turn on/off the backpressure features. `None`
/// means here that the size of the queue is unlimited. The default value is the unlimited
/// queue.
pub max_send_queue: Option<usize>,
/// The maximum size of a message. `None` means no size limit. The default value is 64 MiB
/// which should be reasonably big for all normal use-cases but small enough to prevent
/// memory eating by a malicious user.
pub max_message_size: Option<usize>,
/// The maximum size of a single message frame. `None` means no size limit. The limit is for
/// frame payload NOT including the frame header. The default value is 16 MiB which should
/// be reasonably big for all normal use-cases but small enough to prevent memory eating
/// by a malicious user.
pub max_frame_size: Option<usize>,
/// When set to `true`, the server will accept and handle unmasked frames
/// from the client. According to the RFC 6455, the server must close the
/// connection to the client in such cases, however it seems like there are
/// some popular libraries that are sending unmasked frames, ignoring the RFC.
/// By default this option is set to `false`, i.e. according to RFC 6455.
pub accept_unmasked_frames: bool,
}
impl Default for WebSocketConfig {
fn default() -> Self {
WebSocketConfig {
max_send_queue: None,
max_message_size: Some(64 << 20),
max_frame_size: Some(16 << 20),
accept_unmasked_frames: false,
}
}
}
/// WebSocket input-output stream.
///
/// This is THE structure you want to create to be able to speak the WebSocket protocol.
/// It may be created by calling `connect`, `accept` or `client` functions.
#[derive(Debug)]
pub struct WebSocket<Stream> {
/// The underlying socket.
socket: Stream,
/// The context for managing a WebSocket.
context: WebSocketContext,
}
impl<Stream> WebSocket<Stream> {
/// Convert a raw socket into a WebSocket without performing a handshake.
///
/// Call this function if you're using Tungstenite as a part of a web framework
/// or together with an existing one. If you need an initial handshake, use
/// `connect()` or `accept()` functions of the crate to construct a websocket.
pub fn from_raw_socket(stream: Stream, role: Role, config: Option<WebSocketConfig>) -> Self {
WebSocket { socket: stream, context: WebSocketContext::new(role, config) }
}
/// Convert a raw socket into a WebSocket without performing a handshake.
///
/// Call this function if you're using Tungstenite as a part of a web framework
/// or together with an existing one. If you need an initial handshake, use
/// `connect()` or `accept()` functions of the crate to construct a websocket.
pub fn from_partially_read(
stream: Stream,
part: Vec<u8>,
role: Role,
config: Option<WebSocketConfig>,
) -> Self {
WebSocket {
socket: stream,
context: WebSocketContext::from_partially_read(part, role, config),
}
}
/// Returns a shared reference to the inner stream.
pub fn get_ref(&self) -> &Stream {
&self.socket
}
/// Returns a mutable reference to the inner stream.
pub fn get_mut(&mut self) -> &mut Stream {
&mut self.socket
}
/// Change the configuration.
pub fn set_config(&mut self, set_func: impl FnOnce(&mut WebSocketConfig)) {
self.context.set_config(set_func)
}
/// Read the configuration.
pub fn get_config(&self) -> &WebSocketConfig {
self.context.get_config()
}
/// Check if it is possible to read messages.
///
/// Reading is impossible after receiving `Message::Close`. It is still possible after
/// sending close frame since the peer still may send some data before confirming close.
pub fn can_read(&self) -> bool {
self.context.can_read()
}
/// Check if it is possible to write messages.
///
/// Writing gets impossible immediately after sending or receiving `Message::Close`.
pub fn can_write(&self) -> bool {
self.context.can_write()
}
}
impl<Stream: Read + Write> WebSocket<Stream> {
/// Read a message from stream, if possible.
///
/// This will queue responses to ping and close messages to be sent. It will call
/// `write_pending` before trying to read in order to make sure that those responses
/// make progress even if you never call `write_pending`. That does mean that they
/// get sent out earliest on the next call to `read_message`, `write_message` or `write_pending`.
///
/// ## Closing the connection
/// When the remote endpoint decides to close the connection this will return
/// the close message with an optional close frame.
///
/// You should continue calling `read_message`, `write_message` or `write_pending` to drive
/// the reply to the close frame until [Error::ConnectionClosed] is returned. Once that happens
/// it is safe to drop the underlying connection.
pub fn read_message(&mut self) -> Result<Message> {
self.context.read_message(&mut self.socket)
}
/// Send a message to stream, if possible.
///
/// WebSocket will buffer a configurable number of messages at a time, except to reply to Ping
/// requests. A Pong reply will jump the queue because the
/// [websocket RFC](https://tools.ietf.org/html/rfc6455#section-5.5.2) specifies it should be sent
/// as soon as is practical.
///
/// Note that upon receiving a ping message, tungstenite cues a pong reply automatically.
/// When you call either `read_message`, `write_message` or `write_pending` next it will try to send
/// that pong out if the underlying connection can take more data. This means you should not
/// respond to ping frames manually.
///
/// You can however send pong frames manually in order to indicate a unidirectional heartbeat
/// as described in [RFC 6455](https://tools.ietf.org/html/rfc6455#section-5.5.3). Note that
/// if `read_message` returns a ping, you should call `write_pending` until it doesn't return
/// WouldBlock before passing a pong to `write_message`, otherwise the response to the
/// ping will not be sent, but rather replaced by your custom pong message.
///
/// ## Errors
/// - If the WebSocket's send queue is full, `SendQueueFull` will be returned
/// along with the passed message. Otherwise, the message is queued and Ok(()) is returned.
/// - If the connection is closed and should be dropped, this will return [Error::ConnectionClosed].
/// - If you try again after [Error::ConnectionClosed] was returned either from here or from `read_message`,
/// [Error::AlreadyClosed] will be returned. This indicates a program error on your part.
/// - [Error::Io] is returned if the underlying connection returns an error
/// (consider these fatal except for WouldBlock).
/// - [Error::Capacity] if your message size is bigger than the configured max message size.
pub fn write_message(&mut self, message: Message) -> Result<()> {
self.context.write_message(&mut self.socket, message)
}
/// Flush the pending send queue.
pub fn write_pending(&mut self) -> Result<()> {
self.context.write_pending(&mut self.socket)
}
/// Close the connection.
///
/// This function guarantees that the close frame will be queued.
/// There is no need to call it again. Calling this function is
/// the same as calling `write_message(Message::Close(..))`.
///
/// After queing the close frame you should continue calling `read_message` or
/// `write_pending` to drive the close handshake to completion.
///
/// The websocket RFC defines that the underlying connection should be closed
/// by the server. Tungstenite takes care of this asymmetry for you.
///
/// When the close handshake is finished (we have both sent and received
/// a close message), `read_message` or `write_pending` will return
/// [Error::ConnectionClosed] if this endpoint is the server.
///
/// If this endpoint is a client, [Error::ConnectionClosed] will only be
/// returned after the server has closed the underlying connection.
///
/// It is thus safe to drop the underlying connection as soon as [Error::ConnectionClosed]
/// is returned from `read_message` or `write_pending`.
pub fn close(&mut self, code: Option<CloseFrame>) -> Result<()> {
self.context.close(&mut self.socket, code)
}
}
/// A context for managing WebSocket stream.
#[derive(Debug)]
pub struct WebSocketContext {
/// Server or client?
role: Role,
/// encoder/decoder of frame.
frame: FrameCodec,
/// The state of processing, either "active" or "closing".
state: WebSocketState,
/// Receive: an incomplete message being processed.
incomplete: Option<IncompleteMessage>,
/// Send: a data send queue.
send_queue: VecDeque<Frame>,
/// Send: an OOB pong message.
pong: Option<Frame>,
/// The configuration for the websocket session.
config: WebSocketConfig,
}
impl WebSocketContext {
/// Create a WebSocket context that manages a post-handshake stream.
pub fn new(role: Role, config: Option<WebSocketConfig>) -> Self {
WebSocketContext {
role,
frame: FrameCodec::new(),
state: WebSocketState::Active,
incomplete: None,
send_queue: VecDeque::new(),
pong: None,
config: config.unwrap_or_else(WebSocketConfig::default),
}
}
/// Create a WebSocket context that manages an post-handshake stream.
pub fn from_partially_read(part: Vec<u8>, role: Role, config: Option<WebSocketConfig>) -> Self {
WebSocketContext {
frame: FrameCodec::from_partially_read(part),
..WebSocketContext::new(role, config)
}
}
/// Change the configuration.
pub fn set_config(&mut self, set_func: impl FnOnce(&mut WebSocketConfig)) {
set_func(&mut self.config)
}
/// Read the configuration.
pub fn get_config(&self) -> &WebSocketConfig {
&self.config
}
/// Check if it is possible to read messages.
///
/// Reading is impossible after receiving `Message::Close`. It is still possible after
/// sending close frame since the peer still may send some data before confirming close.
pub fn can_read(&self) -> bool {
self.state.can_read()
}
/// Check if it is possible to write messages.
///
/// Writing gets impossible immediately after sending or receiving `Message::Close`.
pub fn can_write(&self) -> bool {
self.state.is_active()
}
/// Read a message from the provided stream, if possible.
///
/// This function sends pong and close responses automatically.
/// However, it never blocks on write.
pub fn read_message<Stream>(&mut self, stream: &mut Stream) -> Result<Message>
where
Stream: Read + Write,
{
// Do not read from already closed connections.
self.state.check_active()?;
loop {
// Since we may get ping or close, we need to reply to the messages even during read.
// Thus we call write_pending() but ignore its blocking.
self.write_pending(stream).no_block()?;
// If we get here, either write blocks or we have nothing to write.
// Thus if read blocks, just let it return WouldBlock.
if let Some(message) = self.read_message_frame(stream)? {
trace!("Received message {}", message);
return Ok(message);
}
}
}
/// Send a message to the provided stream, if possible.
///
/// WebSocket will buffer a configurable number of messages at a time, except to reply to Ping
/// and Close requests. If the WebSocket's send queue is full, `SendQueueFull` will be returned
/// along with the passed message. Otherwise, the message is queued and Ok(()) is returned.
///
/// Note that only the last pong frame is stored to be sent, and only the
/// most recent pong frame is sent if multiple pong frames are queued.
pub fn write_message<Stream>(&mut self, stream: &mut Stream, message: Message) -> Result<()>
where
Stream: Read + Write,
{
// When terminated, return AlreadyClosed.
self.state.check_active()?;
// Do not write after sending a close frame.
if !self.state.is_active() {
return Err(Error::Protocol(ProtocolError::SendAfterClosing));
}
if let Some(max_send_queue) = self.config.max_send_queue {
if self.send_queue.len() >= max_send_queue {
// Try to make some room for the new message.
// Do not return here if write would block, ignore WouldBlock silently
// since we must queue the message anyway.
self.write_pending(stream).no_block()?;
}
if self.send_queue.len() >= max_send_queue {
return Err(Error::SendQueueFull(message));
}
}
let frame = match message {
Message::Text(data) => Frame::message(data.into(), OpCode::Data(OpData::Text), true),
Message::Binary(data) => Frame::message(data, OpCode::Data(OpData::Binary), true),
Message::Ping(data) => Frame::ping(data),
Message::Pong(data) => {
self.pong = Some(Frame::pong(data));
return self.write_pending(stream);
}
Message::Close(code) => return self.close(stream, code),
};
self.send_queue.push_back(frame);
self.write_pending(stream)
}
/// Flush the pending send queue.
pub fn write_pending<Stream>(&mut self, stream: &mut Stream) -> Result<()>
where
Stream: Read + Write,
{
// First, make sure we have no pending frame sending.
self.frame.write_pending(stream)?;
// Upon receipt of a Ping frame, an endpoint MUST send a Pong frame in
// response, unless it already received a Close frame. It SHOULD
// respond with Pong frame as soon as is practical. (RFC 6455)
if let Some(pong) = self.pong.take() {
trace!("Sending pong reply");
self.send_one_frame(stream, pong)?;
}
// If we have any unsent frames, send them.
trace!("Frames still in queue: {}", self.send_queue.len());
while let Some(data) = self.send_queue.pop_front() {
self.send_one_frame(stream, data)?;
}
// If we get to this point, the send queue is empty and the underlying socket is still
// willing to take more data.
// If we're closing and there is nothing to send anymore, we should close the connection.
if self.role == Role::Server && !self.state.can_read() {
// The underlying TCP connection, in most normal cases, SHOULD be closed
// first by the server, so that it holds the TIME_WAIT state and not the
// client (as this would prevent it from re-opening the connection for 2
// maximum segment lifetimes (2MSL), while there is no corresponding
// server impact as a TIME_WAIT connection is immediately reopened upon
// a new SYN with a higher seq number). (RFC 6455)
self.state = WebSocketState::Terminated;
Err(Error::ConnectionClosed)
} else {
Ok(())
}
}
/// Close the connection.
///
/// This function guarantees that the close frame will be queued.
/// There is no need to call it again. Calling this function is
/// the same as calling `write(Message::Close(..))`.
pub fn close<Stream>(&mut self, stream: &mut Stream, code: Option<CloseFrame>) -> Result<()>
where
Stream: Read + Write,
{
if let WebSocketState::Active = self.state {
self.state = WebSocketState::ClosedByUs;
let frame = Frame::close(code);
self.send_queue.push_back(frame);
} else {
// Already closed, nothing to do.
}
self.write_pending(stream)
}
/// Try to decode one message frame. May return None.
fn read_message_frame<Stream>(&mut self, stream: &mut Stream) -> Result<Option<Message>>
where
Stream: Read + Write,
{
if let Some(mut frame) = self
.frame
.read_frame(stream, self.config.max_frame_size)
.check_connection_reset(self.state)?
{
if !self.state.can_read() {
return Err(Error::Protocol(ProtocolError::ReceivedAfterClosing));
}
// MUST be 0 unless an extension is negotiated that defines meanings
// for non-zero values. If a nonzero value is received and none of
// the negotiated extensions defines the meaning of such a nonzero
// value, the receiving endpoint MUST _Fail the WebSocket
// Connection_.
{
let hdr = frame.header();
if hdr.rsv1 || hdr.rsv2 || hdr.rsv3 {
return Err(Error::Protocol(ProtocolError::NonZeroReservedBits));
}
}
match self.role {
Role::Server => {
if frame.is_masked() {
// A server MUST remove masking for data frames received from a client
// as described in Section 5.3. (RFC 6455)
frame.apply_mask()
} else if !self.config.accept_unmasked_frames {
// The server MUST close the connection upon receiving a
// frame that is not masked. (RFC 6455)
// The only exception here is if the user explicitly accepts given
// stream by setting WebSocketConfig.accept_unmasked_frames to true
return Err(Error::Protocol(ProtocolError::UnmaskedFrameFromClient));
}
}
Role::Client => {
if frame.is_masked() {
// A client MUST close a connection if it detects a masked frame. (RFC 6455)
return Err(Error::Protocol(ProtocolError::MaskedFrameFromServer));
}
}
}
match frame.header().opcode {
OpCode::Control(ctl) => {
match ctl {
// All control frames MUST have a payload length of 125 bytes or less
// and MUST NOT be fragmented. (RFC 6455)
_ if !frame.header().is_final => {
Err(Error::Protocol(ProtocolError::FragmentedControlFrame))
}
_ if frame.payload().len() > 125 => {
Err(Error::Protocol(ProtocolError::ControlFrameTooBig))
}
OpCtl::Close => Ok(self.do_close(frame.into_close()?).map(Message::Close)),
OpCtl::Reserved(i) => {
Err(Error::Protocol(ProtocolError::UnknownControlFrameType(i)))
}
OpCtl::Ping => {
let data = frame.into_data();
// No ping processing after we sent a close frame.
if self.state.is_active() {
self.pong = Some(Frame::pong(data.to_vec()));
}
Ok(Some(Message::Ping(data.into_vec())))
}
OpCtl::Pong => Ok(Some(Message::Pong(frame.into_data().into_vec()))),
}
}
OpCode::Data(data) => {
let fin = frame.header().is_final;
match data {
OpData::Continue => {
if let Some(ref mut msg) = self.incomplete {
msg.extend(frame.into_data().as_ref(), self.config.max_message_size)?;
} else {
return Err(Error::Protocol(
ProtocolError::UnexpectedContinueFrame,
));
}
if fin {
Ok(Some(self.incomplete.take().unwrap().complete()?))
} else {
Ok(None)
}
}
c if self.incomplete.is_some() => {
Err(Error::Protocol(ProtocolError::ExpectedFragment(c)))
}
OpData::Text | OpData::Binary => {
let msg = {
let message_type = match data {
OpData::Text => IncompleteMessageType::Text,
OpData::Binary => IncompleteMessageType::Binary,
_ => panic!("Bug: message is not text nor binary"),
};
let mut m = IncompleteMessage::new(message_type);
m.extend(frame.into_data().as_ref(), self.config.max_message_size)?;
m
};
if fin {
Ok(Some(msg.complete()?))
} else {
self.incomplete = Some(msg);
Ok(None)
}
}
OpData::Reserved(i) => {
Err(Error::Protocol(ProtocolError::UnknownDataFrameType(i)))
}
}
}
} // match opcode
} else {
// Connection closed by peer
match replace(&mut self.state, WebSocketState::Terminated) {
WebSocketState::ClosedByPeer | WebSocketState::CloseAcknowledged => {
Err(Error::ConnectionClosed)
}
_ => Err(Error::Protocol(ProtocolError::ResetWithoutClosingHandshake)),
}
}
}
/// Received a close frame. Tells if we need to return a close frame to the user.
#[allow(clippy::option_option)]
fn do_close<'t>(&mut self, close: Option<CloseFrame<'t>>) -> Option<Option<CloseFrame<'t>>> {
debug!("Received close frame: {:?}", close);
match self.state {
WebSocketState::Active => {
let close_code = close.as_ref().map(|f| f.code);
self.state = WebSocketState::ClosedByPeer;
let reply = if let Some(code) = close_code {
if code.is_allowed() {
Frame::close(Some(CloseFrame {
code: CloseCode::Normal,
reason: "".into(),
}))
} else {
Frame::close(Some(CloseFrame {
code: CloseCode::Protocol,
reason: "Protocol violation".into(),
}))
}
} else {
Frame::close(None)
};
debug!("Replying to close with {:?}", reply);
self.send_queue.push_back(reply);
Some(close)
}
WebSocketState::ClosedByPeer | WebSocketState::CloseAcknowledged => {
// It is already closed, just ignore.
None
}
WebSocketState::ClosedByUs => {
// We received a reply.
self.state = WebSocketState::CloseAcknowledged;
Some(close)
}
WebSocketState::Terminated => unreachable!(),
}
}
/// Send a single pending frame.
fn send_one_frame<Stream>(&mut self, stream: &mut Stream, mut frame: Frame) -> Result<()>
where
Stream: Read + Write,
{
match self.role {
Role::Server => {}
Role::Client => {
// 5. If the data is being sent by the client, the frame(s) MUST be
// masked as defined in Section 5.3. (RFC 6455)
frame.set_random_mask();
}
}
trace!("Sending frame: {:?}", frame);
self.frame.write_frame(stream, frame).check_connection_reset(self.state)
}
}
/// The current connection state.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
enum WebSocketState {
/// The connection is active.
Active,
/// We initiated a close handshake.
ClosedByUs,
/// The peer initiated a close handshake.
ClosedByPeer,
/// The peer replied to our close handshake.
CloseAcknowledged,
/// The connection does not exist anymore.
Terminated,
}
impl WebSocketState {
/// Tell if we're allowed to process normal messages.
fn is_active(self) -> bool {
matches!(self, WebSocketState::Active)
}
/// Tell if we should process incoming data. Note that if we send a close frame
/// but the remote hasn't confirmed, they might have sent data before they receive our
/// close frame, so we should still pass those to client code, hence ClosedByUs is valid.
fn can_read(self) -> bool {
matches!(self, WebSocketState::Active | WebSocketState::ClosedByUs)
}
/// Check if the state is active, return error if not.
fn check_active(self) -> Result<()> {
match self {
WebSocketState::Terminated => Err(Error::AlreadyClosed),
_ => Ok(()),
}
}
}
/// Translate "Connection reset by peer" into `ConnectionClosed` if appropriate.
trait CheckConnectionReset {
fn check_connection_reset(self, state: WebSocketState) -> Self;
}
impl<T> CheckConnectionReset for Result<T> {
fn check_connection_reset(self, state: WebSocketState) -> Self {
match self {
Err(Error::Io(io_error)) => Err({
if !state.can_read() && io_error.kind() == IoErrorKind::ConnectionReset {
Error::ConnectionClosed
} else {
Error::Io(io_error)
}
}),
x => x,
}
}
}
#[cfg(test)]
mod tests {
use super::{Message, Role, WebSocket, WebSocketConfig};
use crate::error::{CapacityError, Error};
use std::{io, io::Cursor};
struct WriteMoc<Stream>(Stream);
impl<Stream> io::Write for WriteMoc<Stream> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<Stream: io::Read> io::Read for WriteMoc<Stream> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.0.read(buf)
}
}
#[test]
fn receive_messages() {
let incoming = Cursor::new(vec![
0x89, 0x02, 0x01, 0x02, 0x8a, 0x01, 0x03, 0x01, 0x07, 0x48, 0x65, 0x6c, 0x6c, 0x6f,
0x2c, 0x20, 0x80, 0x06, 0x57, 0x6f, 0x72, 0x6c, 0x64, 0x21, 0x82, 0x03, 0x01, 0x02,
0x03,
]);
let mut socket = WebSocket::from_raw_socket(WriteMoc(incoming), Role::Client, None);
assert_eq!(socket.read_message().unwrap(), Message::Ping(vec![1, 2]));
assert_eq!(socket.read_message().unwrap(), Message::Pong(vec![3]));
assert_eq!(socket.read_message().unwrap(), Message::Text("Hello, World!".into()));
assert_eq!(socket.read_message().unwrap(), Message::binary(vec![0x01, 0x02, 0x03]));
}
#[test]
fn size_limiting_text_fragmented() {
let incoming = Cursor::new(vec![
0x01, 0x07, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x2c, 0x20, 0x80, 0x06, 0x57, 0x6f, 0x72,
0x6c, 0x64, 0x21,
]);
let limit = WebSocketConfig { max_message_size: Some(10), ..WebSocketConfig::default() };
let mut socket = WebSocket::from_raw_socket(WriteMoc(incoming), Role::Client, Some(limit));
assert!(matches!(
socket.read_message(),
Err(Error::Capacity(CapacityError::MessageTooLong { size: 13, max_size: 10 }))
));
}
#[test]
fn size_limiting_binary() {
let incoming = Cursor::new(vec![0x82, 0x03, 0x01, 0x02, 0x03]);
let limit = WebSocketConfig { max_message_size: Some(2), ..WebSocketConfig::default() };
let mut socket = WebSocket::from_raw_socket(WriteMoc(incoming), Role::Client, Some(limit));
assert!(matches!(
socket.read_message(),
Err(Error::Capacity(CapacityError::MessageTooLong { size: 3, max_size: 2 }))
));
}
}
| 41.10068 | 112 | 0.576418 |
8fa6010ebb25d034935bb0e174368c8170cb03c3 | 11,600 | use crate::chain::{ChainController, ChainService};
use ckb_chain_spec::consensus::Consensus;
use ckb_core::block::Block;
use ckb_core::block::BlockBuilder;
use ckb_core::cell::{resolve_transaction, OverlayCellProvider, TransactionsProvider};
use ckb_core::header::{Header, HeaderBuilder};
use ckb_core::transaction::{CellInput, CellOutput, OutPoint, Transaction, TransactionBuilder};
use ckb_core::{capacity_bytes, Bytes, Capacity};
use ckb_dao::DaoCalculator;
use ckb_dao_utils::genesis_dao_data;
use ckb_db::memorydb::MemoryKeyValueDB;
use ckb_notify::NotifyService;
use ckb_shared::shared::Shared;
use ckb_shared::shared::SharedBuilder;
use ckb_store::{ChainKVStore, ChainStore};
use ckb_test_chain_utils::{build_block, create_always_success_cell};
use ckb_traits::chain_provider::ChainProvider;
use fnv::FnvHashSet;
use numext_fixed_hash::H256;
use numext_fixed_uint::U256;
use std::sync::Arc;
pub use ckb_test_chain_utils::MockStore;
const MIN_CAP: Capacity = capacity_bytes!(60);
pub(crate) fn create_always_success_tx() -> Transaction {
let (ref always_success_cell, ref script) = create_always_success_cell();
TransactionBuilder::default()
.witness(script.clone().into_witness())
.input(CellInput::new(OutPoint::null(), 0))
.output(always_success_cell.clone())
.build()
}
// NOTE: this is quite a waste of resource but the alternative is to modify 100+
// invocations, let's stick to this way till this becomes a real problem
pub(crate) fn create_always_success_out_point() -> OutPoint {
OutPoint::new_cell(create_always_success_tx().hash().to_owned(), 0)
}
pub(crate) fn start_chain(
consensus: Option<Consensus>,
) -> (
ChainController,
Shared<ChainKVStore<MemoryKeyValueDB>>,
Header,
) {
let builder = SharedBuilder::<MemoryKeyValueDB>::new();
let consensus = consensus.unwrap_or_else(|| {
let tx = create_always_success_tx();
let dao = genesis_dao_data(&tx).unwrap();
let header_builder = HeaderBuilder::default().dao(dao);
let genesis_block = BlockBuilder::from_header_builder(header_builder)
.transaction(tx)
.build();
Consensus::default()
.set_cellbase_maturity(0)
.set_genesis_block(genesis_block)
});
let shared = builder.consensus(consensus).build().unwrap();
let notify = NotifyService::default().start::<&str>(None);
let chain_service = ChainService::new(shared.clone(), notify);
let chain_controller = chain_service.start::<&str>(None);
let parent = shared
.store()
.get_block_header(&shared.store().get_block_hash(0).unwrap())
.unwrap();
(chain_controller, shared, parent)
}
pub(crate) fn calculate_reward(
store: &mut MockStore,
consensus: &Consensus,
parent: &Header,
) -> Capacity {
let number = parent.number() + 1;
let target_number = consensus.finalize_target(number).unwrap();
let target = store.0.get_ancestor(parent.hash(), target_number).unwrap();
let calculator = DaoCalculator::new(consensus, Arc::clone(&store.0));
calculator
.primary_block_reward(&target)
.unwrap()
.safe_add(calculator.secondary_block_reward(&target).unwrap())
.unwrap()
}
pub(crate) fn create_cellbase(
store: &mut MockStore,
consensus: &Consensus,
parent: &Header,
) -> Transaction {
let (_, always_success_script) = create_always_success_cell();
let capacity = calculate_reward(store, consensus, parent);
TransactionBuilder::default()
.input(CellInput::new_cellbase_input(parent.number() + 1))
.output(CellOutput::new(
capacity,
Bytes::default(),
always_success_script.clone(),
None,
))
.witness(always_success_script.clone().into_witness())
.build()
}
// more flexible mock function for make non-full-dead-cell test case
pub(crate) fn create_multi_outputs_transaction(
parent: &Transaction,
indices: Vec<usize>,
output_len: usize,
data: Vec<u8>,
) -> Transaction {
let (_, always_success_script) = create_always_success_cell();
let always_success_out_point = create_always_success_out_point();
let parent_outputs = parent.outputs();
let total_capacity = indices
.iter()
.map(|i| parent_outputs[*i].capacity)
.try_fold(Capacity::zero(), Capacity::safe_add)
.unwrap();
let output_capacity = Capacity::shannons(total_capacity.as_u64() / output_len as u64);
let reminder = Capacity::shannons(total_capacity.as_u64() % output_len as u64);
assert!(output_capacity > MIN_CAP);
let data = Bytes::from(data);
let outputs = (0..output_len).map(|i| {
let capacity = if i == output_len - 1 {
output_capacity.safe_add(reminder).unwrap()
} else {
output_capacity
};
CellOutput::new(capacity, data.clone(), always_success_script.clone(), None)
});
let parent_pts = parent.output_pts();
let inputs = indices
.iter()
.map(|i| CellInput::new(parent_pts[*i].clone(), 0));
TransactionBuilder::default()
.outputs(outputs)
.inputs(inputs)
.dep(always_success_out_point)
.build()
}
pub(crate) fn create_transaction(parent: &H256, unique_data: u8) -> Transaction {
create_transaction_with_out_point(OutPoint::new_cell(parent.to_owned(), 0), unique_data)
}
pub(crate) fn create_transaction_with_out_point(
out_point: OutPoint,
unique_data: u8,
) -> Transaction {
let (_, always_success_script) = create_always_success_cell();
let always_success_out_point = create_always_success_out_point();
TransactionBuilder::default()
.output(CellOutput::new(
capacity_bytes!(100),
Bytes::from(vec![unique_data]),
always_success_script.clone(),
None,
))
.input(CellInput::new(out_point, 0))
.dep(always_success_out_point)
.build()
}
#[derive(Clone)]
pub struct MockChain<'a> {
blocks: Vec<Block>,
parent: Header,
consensus: &'a Consensus,
}
impl<'a> MockChain<'a> {
pub fn new(parent: Header, consensus: &'a Consensus) -> Self {
Self {
blocks: vec![],
parent,
consensus,
}
}
pub fn gen_block_with_proposal_txs(&mut self, txs: Vec<Transaction>, store: &mut MockStore) {
let difficulty = self.difficulty();
let parent = self.tip_header();
let cellbase = create_cellbase(store, self.consensus, &parent);
let dao = dao_data(
&self.consensus,
&parent,
&[cellbase.to_owned()],
store,
false,
);
let new_block = build_block!(
from_header_builder: {
parent_hash: parent.hash().to_owned(),
number: parent.number() + 1,
difficulty: difficulty + U256::from(100u64),
dao: dao,
},
transaction: cellbase,
proposals: txs.iter().map(Transaction::proposal_short_id),
);
store.insert_block(&new_block, self.consensus.genesis_epoch_ext());
self.blocks.push(new_block);
}
pub fn gen_empty_block_with_difficulty(&mut self, difficulty: u64, store: &mut MockStore) {
let parent = self.tip_header();
let cellbase = create_cellbase(store, self.consensus, &parent);
let dao = dao_data(
&self.consensus,
&parent,
&[cellbase.to_owned()],
store,
false,
);
let new_block = build_block!(
from_header_builder: {
parent_hash: parent.hash().to_owned(),
number: parent.number() + 1,
difficulty: U256::from(difficulty),
dao: dao,
},
transaction: cellbase,
);
store.insert_block(&new_block, self.consensus.genesis_epoch_ext());
self.blocks.push(new_block);
}
pub fn gen_empty_block(&mut self, diff: u64, store: &mut MockStore) {
let difficulty = self.difficulty();
let parent = self.tip_header();
let cellbase = create_cellbase(store, self.consensus, &parent);
let dao = dao_data(
&self.consensus,
&parent,
&[cellbase.to_owned()],
store,
false,
);
let new_block = build_block!(
from_header_builder: {
parent_hash: parent.hash().to_owned(),
number: parent.number() + 1,
difficulty: difficulty + U256::from(diff),
dao: dao,
},
transaction: cellbase,
);
store.insert_block(&new_block, self.consensus.genesis_epoch_ext());
self.blocks.push(new_block);
}
pub fn gen_block_with_commit_txs(
&mut self,
txs: Vec<Transaction>,
store: &mut MockStore,
ignore_resolve_error: bool,
) {
let difficulty = self.difficulty();
let parent = self.tip_header();
let cellbase = create_cellbase(store, self.consensus, &parent);
let mut txs_to_resolve = vec![cellbase.to_owned()];
txs_to_resolve.extend_from_slice(&txs);
let dao = dao_data(
&self.consensus,
&parent,
&txs_to_resolve,
store,
ignore_resolve_error,
);
let new_block = build_block!(
from_header_builder: {
parent_hash: parent.hash().to_owned(),
number: parent.number() + 1,
difficulty: difficulty + U256::from(100u64),
dao: dao,
},
transaction: cellbase,
transactions: txs,
);
store.insert_block(&new_block, self.consensus.genesis_epoch_ext());
self.blocks.push(new_block);
}
pub fn tip_header(&self) -> &Header {
self.blocks.last().map_or(&self.parent, |b| b.header())
}
pub fn tip(&self) -> &Block {
self.blocks.last().expect("should have tip")
}
pub fn difficulty(&self) -> U256 {
self.tip_header().difficulty().to_owned()
}
pub fn blocks(&self) -> &Vec<Block> {
&self.blocks
}
pub fn total_difficulty(&self) -> U256 {
self.blocks()
.iter()
.fold(U256::from(0u64), |sum, b| sum + b.header().difficulty())
}
}
pub fn dao_data(
consensus: &Consensus,
parent: &Header,
txs: &[Transaction],
store: &mut MockStore,
ignore_resolve_error: bool,
) -> Bytes {
let mut seen_inputs = FnvHashSet::default();
// In case of resolving errors, we just output a dummp DAO field,
// since those should be the cases where we are testing invalid
// blocks
let transactions_provider = TransactionsProvider::new(txs);
let overlay_cell_provider = OverlayCellProvider::new(&transactions_provider, store);
let rtxs = txs.iter().try_fold(vec![], |mut rtxs, tx| {
let rtx = resolve_transaction(tx, &mut seen_inputs, &overlay_cell_provider, store);
match rtx {
Ok(rtx) => {
rtxs.push(rtx);
Ok(rtxs)
}
Err(e) => Err(e),
}
});
let rtxs = if ignore_resolve_error {
rtxs.unwrap_or_else(|_| vec![])
} else {
rtxs.unwrap()
};
let calculator = DaoCalculator::new(consensus, Arc::clone(&store.0));
calculator.dao_field(&rtxs, &parent).unwrap()
}
| 33.048433 | 97 | 0.616379 |
db1fbe05fa38f646a1fc0ab0906274a95ee960f6 | 1,319 | //Author: AlbinoGazelle
//Purpose: Print the "Twelve Days of Christmas" carole.
fn num_day(n: i32){
match n {
1 => print!("first "),
2 => print!("second "),
3 => print!("third "),
4 => print!("fourth "),
5 => print!("fifth "),
6 => print!("sixth "),
7 => print!("seventh "),
8 => print!("eighth "),
9 => print!("nineth "),
10 => print!("tenth "),
11 => print!("eleventh "),
12 => print!("twelfth "),
_ => print!(""),
}
}
fn items(n: i32){
let item = match n {
1 => "A partridge in a pear tree",
2 => "Two turtle doves, and",
3 => "Three french hens",
4 => "Four calling birds",
5 => "Five golden rings",
6 => "Six geese a-laying",
7 => "Seven swans a-swimming",
8 => "Eight maids a-milking",
9 => "Nine ladies dancing",
10 => "Ten lords a-leaping",
11 => "Eleven pipers piping",
12 => "Twelve drummers drumming",
_ => "",
};
println!("{}",item);
}
fn main() {
for number in 1..13{
print!("On the ");
num_day(number);
println!("day of Christmas my true love gave to me:");
for days in (1..(number + 1)).rev(){
items(days);
}
}
}
| 25.862745 | 62 | 0.457165 |
1cbe8827381b8651ed84f5af170977c723d11473 | 1,183 | extern crate unbase;
use unbase::subject::*;
#[test]
fn basic_record_retrieval() {
let net = unbase::Network::create_new_system();
let slab_a = unbase::Slab::new(&net);
let context_a = slab_a.create_context();
let record_id;
{
let record = Subject::new_kv(&context_a, "animal_type","Cat").unwrap();
println!("Record {:?}", record );
record_id = record.id;
}
let record_retrieved = context_a.get_subject_by_id(record_id);
assert!(record_retrieved.is_ok(), "Failed to retrieve record")
}
#[test]
fn basic_record_retrieval_simulator() {
let net = unbase::Network::create_new_system();
let simulator = unbase::network::transport::Simulator::new();
net.add_transport( Box::new(simulator.clone()) );
let slab_a = unbase::Slab::new(&net);
let context_a = slab_a.create_context();
let record_id;
{
let record = Subject::new_kv(&context_a, "animal_type","Cat").unwrap();
println!("Record {:?}", record );
record_id = record.id;
}
let record_retrieved = context_a.get_subject_by_id(record_id);
assert!(record_retrieved.is_ok(), "Failed to retrieve record")
}
| 25.170213 | 79 | 0.651733 |
f4480327a3741a3e257e92513d2f76a5f7b226e1 | 3,557 | use crate::rtweekend::random_int;
use crate::vec3::Point3;
use crate::Vec3;
use std::vec::Vec;
pub struct Perlin {
point_count: i32,
ranvec: Vec<Vec3>,
perm_x: Vec<i32>,
perm_y: Vec<i32>,
perm_z: Vec<i32>,
}
impl Perlin {
pub fn perlin_generate_perm(&mut self, axis: i32) {
let tmp = match axis {
0 => &mut self.perm_x,
1 => &mut self.perm_y,
_ => &mut self.perm_z,
};
for i in 0..self.point_count {
tmp.push(i);
}
Perlin::permut(tmp, self.point_count);
}
pub fn permut(p: &mut Vec<i32>, n: i32) {
for i in 1..n {
let target = random_int(0, n - i) as usize;
let j = i as usize;
p.swap(j, target);
}
}
pub fn new() -> Self {
Self {
point_count: 256,
ranvec: Vec::new(),
perm_x: Vec::new(),
perm_y: Vec::new(),
perm_z: Vec::new(),
}
}
pub fn init(&mut self) {
for _i in 0..self.point_count {
self.ranvec.push(Vec3::random_range(-1.0, 1.0));
}
self.perlin_generate_perm(0);
self.perlin_generate_perm(1);
self.perlin_generate_perm(2);
}
pub fn noise(&self, p: Point3) -> f64 {
let mut _u = p.x - p.x.floor();
let mut _v = p.y - p.y.floor();
let mut _w = p.z - p.z.floor();
let _i = p.x.floor();
let _j = p.y.floor();
let _k = p.z.floor();
let mut c: [[[Vec3; 2]; 2]; 2] = [[[Vec3::zero(); 2]; 2]; 2];
let mut di = 0.0;
while di < 2.0 {
let mut dj = 0.0;
while dj < 2.0 {
let mut dk = 0.0;
while dk < 2.0 {
let id_x = (255 & (_i + di) as i32) as usize;
let id_y = (255 & (_j + dj) as i32) as usize;
let id_z = (255 & (_k + dk) as i32) as usize;
let tmp = (self.perm_x[id_x] ^ self.perm_y[id_y] ^ self.perm_z[id_z]) as usize;
c[di as usize][dj as usize][dk as usize] = self.ranvec[tmp];
dk += 1.0
}
dj += 1.0
}
di += 1.0
}
Self::trilinear_interp(c, _u, _v, _w)
}
pub fn trilinear_interp(c: [[[Vec3; 2]; 2]; 2], _u: f64, _v: f64, _w: f64) -> f64 {
let mut accum = 0.0;
let uu = _u * _u * (3.0 - 2.0 * _u);
let vv = _v * _v * (3.0 - 2.0 * _v);
let ww = _w * _w * (3.0 - 2.0 * _w);
let mut _i = 0.0;
while _i < 2.0 {
let mut _j = 0.0;
while _j < 2.0 {
let mut _k = 0.0;
while _k < 2.0 {
let weight_v = Vec3::new(_u - _i, _v - _j, _w - _k);
accum += (_i * uu + (1.0 - _i) * (1.0 - uu))
* (_j * vv + (1.0 - _j) * (1.0 - vv))
* (_k * ww + (1.0 - _k) * (1.0 - ww))
* Vec3::dot(c[_i as usize][_j as usize][_k as usize], weight_v);
_k += 1.0;
}
_j += 1.0;
}
_i += 1.0;
}
accum
}
pub fn turb(&self, p: Point3, depth: i32) -> f64 {
let mut accum = 0.0;
let mut temp_p = p;
let mut weight = 1.0;
for _i in 0..depth {
accum += weight * self.noise(temp_p);
weight *= 0.5;
temp_p *= 2.0;
}
accum.abs()
}
}
| 28.007874 | 99 | 0.409896 |
f7190fb7338afdca2589d3c84a9b079d4c6254ff | 5,275 | // This file is part of the uutils coreutils package.
//
// (c) Michael Gehring <[email protected]>
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (ToDO) delim mkdelim
#[macro_use]
extern crate uucore;
use std::cmp::Ordering;
use std::fs::File;
use std::io::{self, stdin, BufRead, BufReader, Stdin};
use std::path::Path;
use uucore::InvalidEncodingHandling;
use clap::{crate_version, App, Arg, ArgMatches};
static ABOUT: &str = "compare two sorted files line by line";
static LONG_HELP: &str = "";
mod options {
pub const COLUMN_1: &str = "1";
pub const COLUMN_2: &str = "2";
pub const COLUMN_3: &str = "3";
pub const DELIMITER: &str = "output-delimiter";
pub const DELIMITER_DEFAULT: &str = "\t";
pub const FILE_1: &str = "FILE1";
pub const FILE_2: &str = "FILE2";
}
fn get_usage() -> String {
format!("{} [OPTION]... FILE1 FILE2", executable!())
}
fn mkdelim(col: usize, opts: &ArgMatches) -> String {
let mut s = String::new();
let delim = opts.value_of(options::DELIMITER).unwrap();
if col > 1 && !opts.is_present(options::COLUMN_1) {
s.push_str(delim.as_ref());
}
if col > 2 && !opts.is_present(options::COLUMN_2) {
s.push_str(delim.as_ref());
}
s
}
fn ensure_nl(line: &mut String) {
match line.chars().last() {
Some('\n') => (),
_ => line.push('\n'),
}
}
enum LineReader {
Stdin(Stdin),
FileIn(BufReader<File>),
}
impl LineReader {
fn read_line(&mut self, buf: &mut String) -> io::Result<usize> {
match *self {
LineReader::Stdin(ref mut r) => r.read_line(buf),
LineReader::FileIn(ref mut r) => r.read_line(buf),
}
}
}
fn comm(a: &mut LineReader, b: &mut LineReader, opts: &ArgMatches) {
let delim: Vec<String> = (0..4).map(|col| mkdelim(col, opts)).collect();
let ra = &mut String::new();
let mut na = a.read_line(ra);
let rb = &mut String::new();
let mut nb = b.read_line(rb);
while na.is_ok() || nb.is_ok() {
let ord = match (na.is_ok(), nb.is_ok()) {
(false, true) => Ordering::Greater,
(true, false) => Ordering::Less,
(true, true) => match (&na, &nb) {
(&Ok(0), &Ok(0)) => break,
(&Ok(0), _) => Ordering::Greater,
(_, &Ok(0)) => Ordering::Less,
_ => ra.cmp(&rb),
},
_ => unreachable!(),
};
match ord {
Ordering::Less => {
if !opts.is_present(options::COLUMN_1) {
ensure_nl(ra);
print!("{}{}", delim[1], ra);
}
ra.clear();
na = a.read_line(ra);
}
Ordering::Greater => {
if !opts.is_present(options::COLUMN_2) {
ensure_nl(rb);
print!("{}{}", delim[2], rb);
}
rb.clear();
nb = b.read_line(rb);
}
Ordering::Equal => {
if !opts.is_present(options::COLUMN_3) {
ensure_nl(ra);
print!("{}{}", delim[3], ra);
}
ra.clear();
rb.clear();
na = a.read_line(ra);
nb = b.read_line(rb);
}
}
}
}
fn open_file(name: &str) -> io::Result<LineReader> {
match name {
"-" => Ok(LineReader::Stdin(stdin())),
_ => {
let f = File::open(&Path::new(name))?;
Ok(LineReader::FileIn(BufReader::new(f)))
}
}
}
pub fn uumain(args: impl uucore::Args) -> i32 {
let usage = get_usage();
let args = args
.collect_str(InvalidEncodingHandling::ConvertLossy)
.accept_any();
let matches = App::new(executable!())
.version(crate_version!())
.about(ABOUT)
.usage(&usage[..])
.after_help(LONG_HELP)
.arg(
Arg::with_name(options::COLUMN_1)
.short(options::COLUMN_1)
.help("suppress column 1 (lines unique to FILE1)"),
)
.arg(
Arg::with_name(options::COLUMN_2)
.short(options::COLUMN_2)
.help("suppress column 2 (lines unique to FILE2)"),
)
.arg(
Arg::with_name(options::COLUMN_3)
.short(options::COLUMN_3)
.help("suppress column 3 (lines that appear in both files)"),
)
.arg(
Arg::with_name(options::DELIMITER)
.long(options::DELIMITER)
.help("separate columns with STR")
.value_name("STR")
.default_value(options::DELIMITER_DEFAULT)
.hide_default_value(true),
)
.arg(Arg::with_name(options::FILE_1).required(true))
.arg(Arg::with_name(options::FILE_2).required(true))
.get_matches_from(args);
let mut f1 = open_file(matches.value_of(options::FILE_1).unwrap()).unwrap();
let mut f2 = open_file(matches.value_of(options::FILE_2).unwrap()).unwrap();
comm(&mut f1, &mut f2, &matches);
0
}
| 29.305556 | 80 | 0.515829 |
08e885ffa08d76c63f8db505dd96618089773f1d | 375 | #[macro_export]
macro_rules! column {
($($x:expr),* $(,)?) => {
geng::ui::column(vec![$(Box::new($x)),*])
};
}
#[macro_export]
macro_rules! stack {
($($x:expr),* $(,)?) => {
geng::ui::stack(vec![$(Box::new($x)),*])
};
}
#[macro_export]
macro_rules! row {
($($x:expr),* $(,)?) => {
geng::ui::row(vec![$(Box::new($x)),*])
};
}
| 17.857143 | 49 | 0.426667 |
5d32cfbcca777ee24f3a08f162a297909752eaf5 | 1,905 | // * This file is part of the uutils coreutils package.
// *
// * (c) Jordi Boggiano <[email protected]>
// *
// * For the full copyright and license information, please view the LICENSE
// * file that was distributed with this source code.
/* last synced with: printenv (GNU coreutils) 8.13 */
use clap::{crate_version, App, Arg};
use std::env;
static ABOUT: &str = "Display the values of the specified environment VARIABLE(s), or (with no VARIABLE) display name and value pairs for them all.";
static OPT_NULL: &str = "null";
static ARG_VARIABLES: &str = "variables";
fn usage() -> String {
format!("{0} [VARIABLE]... [OPTION]...", uucore::execution_phrase())
}
pub fn uumain(args: impl uucore::Args) -> i32 {
let usage = usage();
let matches = uu_app().usage(&usage[..]).get_matches_from(args);
let variables: Vec<String> = matches
.values_of(ARG_VARIABLES)
.map(|v| v.map(ToString::to_string).collect())
.unwrap_or_default();
let separator = if matches.is_present(OPT_NULL) {
"\x00"
} else {
"\n"
};
if variables.is_empty() {
for (env_var, value) in env::vars() {
print!("{}={}{}", env_var, value, separator);
}
return 0;
}
for env_var in variables {
if let Ok(var) = env::var(env_var) {
print!("{}{}", var, separator);
}
}
0
}
pub fn uu_app() -> App<'static, 'static> {
App::new(uucore::util_name())
.version(crate_version!())
.about(ABOUT)
.arg(
Arg::with_name(OPT_NULL)
.short("0")
.long(OPT_NULL)
.help("end each output line with 0 byte rather than newline"),
)
.arg(
Arg::with_name(ARG_VARIABLES)
.multiple(true)
.takes_value(true)
.min_values(1),
)
}
| 26.830986 | 149 | 0.56168 |
aced55445f4f55846bbebaea072df9be5ba24de3 | 38,541 |
#![cfg_attr(feature = "clippy", feature(plugin))]
#![cfg_attr(feature = "clippy", plugin(clippy))]
#![cfg_attr(feature = "clippy", deny(clippy))]
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::collections::hash_map::{self, HashMap};
use std::fmt;
use std::hash::{BuildHasher, Hash, Hasher};
use std::iter;
use std::marker;
use std::mem;
use std::ops::{Index, IndexMut};
use std::ptr;
struct KeyRef<K> { k: *const K }
struct Node<K, V> {
next: *mut Node<K, V>,
prev: *mut Node<K, V>,
key: K,
value: V,
}
/// A linked hash map.
pub struct LinkedHashMap<K, V, S = hash_map::RandomState> {
map: HashMap<KeyRef<K>, *mut Node<K, V>, S>,
head: *mut Node<K, V>,
free: *mut Node<K, V>,
}
impl<K: Hash> Hash for KeyRef<K> {
fn hash<H: Hasher>(&self, state: &mut H) {
unsafe { (*self.k).hash(state) }
}
}
impl<K: PartialEq> PartialEq for KeyRef<K> {
fn eq(&self, other: &Self) -> bool {
unsafe{ (*self.k).eq(&*other.k) }
}
}
impl<K: Eq> Eq for KeyRef<K> {}
// This type exists only to support borrowing `KeyRef`s, which cannot be borrowed to `Q` directly
// due to conflicting implementations of `Borrow`. The layout of `&Qey<Q>` must be identical to
// `&Q` in order to support transmuting in the `Qey::from_ref` method.
#[derive(Hash, PartialEq, Eq)]
struct Qey<Q: ?Sized>(Q);
impl<Q: ?Sized> Qey<Q> {
fn from_ref(q: &Q) -> &Self { unsafe { mem::transmute(q) } }
}
impl<K, Q: ?Sized> Borrow<Qey<Q>> for KeyRef<K> where K: Borrow<Q> {
fn borrow(&self) -> &Qey<Q> {
Qey::from_ref(unsafe { (*self.k).borrow() })
}
}
impl<K, V> Node<K, V> {
fn new(k: K, v: V) -> Self {
Node {
key: k,
value: v,
next: ptr::null_mut(),
prev: ptr::null_mut(),
}
}
}
unsafe fn drop_empty_node<K, V>(the_box: *mut Node<K, V>) {
// Prevent compiler from trying to drop the un-initialized key and values in the node.
let Node { key, value, .. } = *Box::from_raw(the_box);
mem::forget(key);
mem::forget(value);
}
impl<K: Hash + Eq, V> LinkedHashMap<K, V> {
/// Creates a linked hash map.
pub fn new() -> Self { Self::with_map(HashMap::new()) }
/// Creates an empty linked hash map with the given initial capacity.
pub fn with_capacity(capacity: usize) -> Self {
Self::with_map(HashMap::with_capacity(capacity))
}
}
impl<K, V, S> LinkedHashMap<K, V, S> {
#[inline]
fn detach(&mut self, node: *mut Node<K, V>) {
unsafe {
(*(*node).prev).next = (*node).next;
(*(*node).next).prev = (*node).prev;
}
}
#[inline]
fn attach(&mut self, node: *mut Node<K, V>) {
unsafe {
(*node).next = (*self.head).next;
(*node).prev = self.head;
(*self.head).next = node;
(*(*node).next).prev = node;
}
}
// Caller must check `!self.head.is_null()`
unsafe fn drop_entries(&mut self) {
let mut cur = (*self.head).next;
while cur != self.head {
let next = (*cur).next;
Box::from_raw(cur);
cur = next;
}
}
fn clear_free_list(&mut self) {
unsafe {
let mut free = self.free;
while ! free.is_null() {
let next_free = (*free).next;
drop_empty_node(free);
free = next_free;
}
self.free = ptr::null_mut();
}
}
fn ensure_guard_node(&mut self) {
if self.head.is_null() {
// allocate the guard node if not present
unsafe {
let node_layout = std::alloc::Layout::new::<Node<K, V>>();
self.head = std::alloc::alloc(node_layout) as *mut Node<K, V>;
(*self.head).next = self.head;
(*self.head).prev = self.head;
}
}
}
}
impl<K: Hash + Eq, V, S: BuildHasher> LinkedHashMap<K, V, S> {
fn with_map(map: HashMap<KeyRef<K>, *mut Node<K, V>, S>) -> Self {
LinkedHashMap {
map,
head: ptr::null_mut(),
free: ptr::null_mut(),
}
}
/// Creates an empty linked hash map with the given initial hash builder.
pub fn with_hasher(hash_builder: S) -> Self {
Self::with_map(HashMap::with_hasher(hash_builder))
}
/// Creates an empty linked hash map with the given initial capacity and hash builder.
pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self {
Self::with_map(HashMap::with_capacity_and_hasher(capacity, hash_builder))
}
/// Reserves capacity for at least `additional` more elements to be inserted into the map. The
/// map may reserve more space to avoid frequent allocations.
///
/// # Panics
///
/// Panics if the new allocation size overflows `usize.`
pub fn reserve(&mut self, additional: usize) { self.map.reserve(additional); }
/// Shrinks the capacity of the map as much as possible. It will drop down as much as possible
/// while maintaining the internal rules and possibly leaving some space in accordance with the
/// resize policy.
pub fn shrink_to_fit(&mut self) {
self.map.shrink_to_fit();
self.clear_free_list();
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
///
/// let mut letters = LinkedHashMap::new();
///
/// for ch in "a short treatise on fungi".chars() {
/// let counter = letters.entry(ch).or_insert(0);
/// *counter += 1;
/// }
///
/// assert_eq!(letters[&'s'], 2);
/// assert_eq!(letters[&'t'], 3);
/// assert_eq!(letters[&'u'], 1);
/// assert_eq!(letters.get(&'y'), None);
/// ```
pub fn entry(&mut self, k: K) -> Entry<K, V, S> {
let self_ptr: *mut Self = self;
if let Some(entry) = self.map.get_mut(&KeyRef{k: &k}) {
return Entry::Occupied(OccupiedEntry {
entry: *entry,
map: self_ptr,
marker: marker::PhantomData,
});
}
Entry::Vacant(VacantEntry {
key: k,
map: self,
})
}
/// Returns an iterator visiting all entries in insertion order.
/// Iterator element type is `OccupiedEntry<K, V, S>`. Allows for removal
/// as well as replacing the entry.
///
/// # Examples
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert("a", 10);
/// map.insert("c", 30);
/// map.insert("b", 20);
///
/// {
/// let mut iter = map.entries();
/// let mut entry = iter.next().unwrap();
/// assert_eq!(&"a", entry.key());
/// *entry.get_mut() = 17;
/// }
///
/// assert_eq!(&17, map.get(&"a").unwrap());
/// ```
pub fn entries(&mut self) -> Entries<K, V, S> {
let head = if ! self.head.is_null() {
unsafe { (*self.head).prev }
} else {
ptr::null_mut()
};
Entries {
head,
map: self,
remaining: self.len(),
marker: marker::PhantomData,
}
}
/// Inserts a key-value pair into the map. If the key already existed, the old value is
/// returned.
///
/// # Examples
///
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(1, "a");
/// map.insert(2, "b");
/// assert_eq!(map[&1], "a");
/// assert_eq!(map[&2], "b");
/// ```
pub fn insert(&mut self, k: K, v: V) -> Option<V> {
self.ensure_guard_node();
let (node, old_val) = match self.map.get(&KeyRef{k: &k}) {
Some(node) => {
let old_val = unsafe { ptr::replace(&mut (**node).value, v) };
(*node, Some(old_val))
}
None => {
let node = if self.free.is_null() {
Box::into_raw(Box::new(Node::new(k, v)))
} else {
// use a recycled box
unsafe {
let free = self.free;
self.free = (*free).next;
ptr::write(free, Node::new(k, v));
free
}
};
(node, None)
}
};
match old_val {
Some(_) => {
// Existing node, just update LRU position
self.detach(node);
self.attach(node);
}
None => {
let keyref = unsafe { &(*node).key };
self.map.insert(KeyRef{k: keyref}, node);
self.attach(node);
}
}
old_val
}
/// Checks if the map contains the given key.
pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool where K: Borrow<Q>, Q: Eq + Hash {
self.map.contains_key(Qey::from_ref(k))
}
/// Returns the value corresponding to the key in the map.
///
/// # Examples
///
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(1, "a");
/// map.insert(2, "b");
/// map.insert(2, "c");
/// map.insert(3, "d");
///
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), Some(&"c"));
/// ```
pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V> where K: Borrow<Q>, Q: Eq + Hash {
self.map.get(Qey::from_ref(k)).map(|e| unsafe { &(**e).value })
}
/// Returns the mutable reference corresponding to the key in the map.
///
/// # Examples
///
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(1, "a");
/// map.insert(2, "b");
///
/// *map.get_mut(&1).unwrap() = "c";
/// assert_eq!(map.get(&1), Some(&"c"));
/// ```
pub fn get_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V> where K: Borrow<Q>, Q: Eq + Hash {
self.map.get(Qey::from_ref(k)).map(|e| unsafe { &mut (**e).value })
}
/// Returns the value corresponding to the key in the map.
///
/// If value is found, it is moved to the end of the list.
/// This operation can be used in implemenation of LRU cache.
///
/// # Examples
///
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(1, "a");
/// map.insert(2, "b");
/// map.insert(3, "d");
///
/// assert_eq!(map.get_refresh(&2), Some(&mut "b"));
///
/// assert_eq!((&2, &"b"), map.iter().rev().next().unwrap());
/// ```
pub fn get_refresh<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V> where K: Borrow<Q>, Q: Eq + Hash {
let (value, node_ptr_opt) = match self.map.get(Qey::from_ref(k)) {
None => (None, None),
Some(node) => {
(Some(unsafe { &mut (**node).value }), Some(*node))
}
};
if let Some(node_ptr) = node_ptr_opt {
self.detach(node_ptr);
self.attach(node_ptr);
}
value
}
/// Removes and returns the value corresponding to the key from the map.
///
/// # Examples
///
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
///
/// map.insert(2, "a");
///
/// assert_eq!(map.remove(&1), None);
/// assert_eq!(map.remove(&2), Some("a"));
/// assert_eq!(map.remove(&2), None);
/// assert_eq!(map.len(), 0);
/// ```
pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V> where K: Borrow<Q>, Q: Eq + Hash {
let removed = self.map.remove(Qey::from_ref(k));
removed.map(|node| {
self.detach(node);
unsafe {
// add to free list
(*node).next = self.free;
self.free = node;
// drop the key and return the value
drop(ptr::read(&(*node).key));
ptr::read(&(*node).value)
}
})
}
/// Returns the maximum number of key-value pairs the map can hold without reallocating.
///
/// # Examples
///
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
/// let mut map: LinkedHashMap<i32, &str> = LinkedHashMap::new();
/// let capacity = map.capacity();
/// ```
pub fn capacity(&self) -> usize {
self.map.capacity()
}
/// Removes the first entry.
///
/// Can be used in implementation of LRU cache.
///
/// # Examples
///
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
/// map.insert(1, 10);
/// map.insert(2, 20);
/// map.pop_front();
/// assert_eq!(map.get(&1), None);
/// assert_eq!(map.get(&2), Some(&20));
/// ```
#[inline]
pub fn pop_front(&mut self) -> Option<(K, V)> {
if self.is_empty() {
return None
}
let lru = unsafe { (*self.head).prev };
self.detach(lru);
self.map
.remove(&KeyRef{k: unsafe { &(*lru).key }})
.map(|e| {
let e = *unsafe { Box::from_raw(e) };
(e.key, e.value)
})
}
/// Gets the first entry.
///
/// # Examples
///
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
/// map.insert(1, 10);
/// map.insert(2, 20);
/// assert_eq!(map.front(), Some((&1, &10)));
/// ```
#[inline]
pub fn front(&self) -> Option<(&K, &V)> {
if self.is_empty() {
return None
}
let lru = unsafe { (*self.head).prev };
self.map
.get(&KeyRef{k: unsafe { &(*lru).key }})
.map(|e| unsafe { (&(**e).key, &(**e).value) })
}
/// Removes the last entry.
///
/// # Examples
///
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
/// map.insert(1, 10);
/// map.insert(2, 20);
/// map.pop_back();
/// assert_eq!(map.get(&1), Some(&10));
/// assert_eq!(map.get(&2), None);
/// ```
#[inline]
pub fn pop_back(&mut self) -> Option<(K, V)> {
if self.is_empty() {
return None
}
let mru = unsafe { (*self.head).next };
self.detach(mru);
self.map
.remove(&KeyRef{k: unsafe { &(*mru).key }})
.map(|e| {
let e = *unsafe { Box::from_raw(e) };
(e.key, e.value)
})
}
/// Gets the last entry.
///
/// # Examples
///
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
/// let mut map = LinkedHashMap::new();
/// map.insert(1, 10);
/// map.insert(2, 20);
/// assert_eq!(map.back(), Some((&2, &20)));
/// ```
#[inline]
pub fn back(&mut self) -> Option<(&K, &V)> {
if self.is_empty() {
return None
}
let mru = unsafe { (*self.head).next };
self.map
.get(&KeyRef{k: unsafe { &(*mru).key }})
.map(|e| unsafe { (&(**e).key, &(**e).value) })
}
/// Returns the number of key-value pairs in the map.
pub fn len(&self) -> usize { self.map.len() }
/// Returns whether the map is currently empty.
pub fn is_empty(&self) -> bool { self.len() == 0 }
/// Returns a reference to the map's hasher.
pub fn hasher(&self) -> &S {
self.map.hasher()
}
/// Clears the map of all key-value pairs.
pub fn clear(&mut self) {
self.map.clear();
// update the guard node if present
if ! self.head.is_null() {
unsafe {
self.drop_entries();
(*self.head).prev = self.head;
(*self.head).next = self.head;
}
}
}
/// Returns a double-ended iterator visiting all key-value pairs in order of insertion.
/// Iterator element type is `(&'a K, &'a V)`
///
/// # Examples
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert("a", 10);
/// map.insert("c", 30);
/// map.insert("b", 20);
///
/// let mut iter = map.iter();
/// assert_eq!((&"a", &10), iter.next().unwrap());
/// assert_eq!((&"c", &30), iter.next().unwrap());
/// assert_eq!((&"b", &20), iter.next().unwrap());
/// assert_eq!(None, iter.next());
/// ```
pub fn iter(&self) -> Iter<K, V> {
let head = if self.head.is_null() {
ptr::null_mut()
} else {
unsafe { (*self.head).prev }
};
Iter {
head,
tail: self.head,
remaining: self.len(),
marker: marker::PhantomData,
}
}
/// Returns a double-ended iterator visiting all key-value pairs in order of insertion.
/// Iterator element type is `(&'a K, &'a mut V)`
/// # Examples
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert("a", 10);
/// map.insert("c", 30);
/// map.insert("b", 20);
///
/// {
/// let mut iter = map.iter_mut();
/// let mut entry = iter.next().unwrap();
/// assert_eq!(&"a", entry.0);
/// *entry.1 = 17;
/// }
///
/// assert_eq!(&17, map.get(&"a").unwrap());
/// ```
pub fn iter_mut(&mut self) -> IterMut<K, V> {
let head = if self.head.is_null() {
ptr::null_mut()
} else {
unsafe { (*self.head).prev }
};
IterMut {
head,
tail: self.head,
remaining: self.len(),
marker: marker::PhantomData,
}
}
/// Returns a double-ended iterator visiting all key in order of insertion.
///
/// # Examples
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert('a', 10);
/// map.insert('c', 30);
/// map.insert('b', 20);
///
/// let mut keys = map.keys();
/// assert_eq!(&'a', keys.next().unwrap());
/// assert_eq!(&'c', keys.next().unwrap());
/// assert_eq!(&'b', keys.next().unwrap());
/// assert_eq!(None, keys.next());
/// ```
pub fn keys(&self) -> Keys<K, V> {
Keys { inner: self.iter() }
}
/// Returns a double-ended iterator visiting all values in order of insertion.
///
/// # Examples
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
/// map.insert('a', 10);
/// map.insert('c', 30);
/// map.insert('b', 20);
///
/// let mut values = map.values();
/// assert_eq!(&10, values.next().unwrap());
/// assert_eq!(&30, values.next().unwrap());
/// assert_eq!(&20, values.next().unwrap());
/// assert_eq!(None, values.next());
/// ```
pub fn values(&self) -> Values<K, V> {
Values { inner: self.iter() }
}
}
impl<'a, K, V, S, Q: ?Sized> Index<&'a Q> for LinkedHashMap<K, V, S>
where K: Hash + Eq + Borrow<Q>, S: BuildHasher, Q: Eq + Hash
{
type Output = V;
fn index(&self, index: &'a Q) -> &V {
self.get(index).expect("no entry found for key")
}
}
impl<'a, K, V, S, Q: ?Sized> IndexMut<&'a Q> for LinkedHashMap<K, V, S>
where K: Hash + Eq + Borrow<Q>, S: BuildHasher, Q: Eq + Hash
{
fn index_mut(&mut self, index: &'a Q) -> &mut V {
self.get_mut(index).expect("no entry found for key")
}
}
impl<K: Hash + Eq + Clone, V: Clone, S: BuildHasher + Clone> Clone for LinkedHashMap<K, V, S> {
fn clone(&self) -> Self {
let mut map = Self::with_hasher(self.map.hasher().clone());
map.extend(self.iter().map(|(k, v)| (k.clone(), v.clone())));
map
}
}
impl<K: Hash + Eq, V, S: BuildHasher + Default> Default for LinkedHashMap<K, V, S> {
fn default() -> Self { Self::with_hasher(S::default()) }
}
impl<K: Hash + Eq, V, S: BuildHasher> Extend<(K, V)> for LinkedHashMap<K, V, S> {
fn extend<I: IntoIterator<Item=(K, V)>>(&mut self, iter: I) {
for (k, v) in iter {
self.insert(k, v);
}
}
}
impl<'a, K, V, S> Extend<(&'a K, &'a V)> for LinkedHashMap<K, V, S>
where K: 'a + Hash + Eq + Copy, V: 'a + Copy, S: BuildHasher,
{
fn extend<I: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: I) {
for (&k, &v) in iter {
self.insert(k, v);
}
}
}
impl<K: Hash + Eq, V, S: BuildHasher + Default> iter::FromIterator<(K, V)> for LinkedHashMap<K, V, S> {
fn from_iter<I: IntoIterator<Item=(K, V)>>(iter: I) -> Self {
let iter = iter.into_iter();
let mut map = Self::with_capacity_and_hasher(iter.size_hint().0, S::default());
map.extend(iter);
map
}
}
impl<A: fmt::Debug + Hash + Eq, B: fmt::Debug, S: BuildHasher> fmt::Debug for LinkedHashMap<A, B, S> {
/// Returns a string that lists the key-value pairs in insertion order.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_map().entries(self).finish()
}
}
impl<K: Hash + Eq, V: PartialEq, S: BuildHasher> PartialEq for LinkedHashMap<K, V, S> {
fn eq(&self, other: &Self) -> bool {
self.len() == other.len() && self.iter().eq(other)
}
}
impl<K: Hash + Eq, V: Eq, S: BuildHasher> Eq for LinkedHashMap<K, V, S> {}
impl<K: Hash + Eq + PartialOrd, V: PartialOrd, S: BuildHasher> PartialOrd for LinkedHashMap<K, V, S> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.iter().partial_cmp(other)
}
fn lt(&self, other: &Self) -> bool {
self.iter().lt(other)
}
fn le(&self, other: &Self) -> bool {
self.iter().le(other)
}
fn ge(&self, other: &Self) -> bool {
self.iter().ge(other)
}
fn gt(&self, other: &Self) -> bool {
self.iter().gt(other)
}
}
impl<K: Hash + Eq + Ord, V: Ord, S: BuildHasher> Ord for LinkedHashMap<K, V, S> {
fn cmp(&self, other: &Self) -> Ordering {
self.iter().cmp(other)
}
}
impl<K: Hash + Eq, V: Hash, S: BuildHasher> Hash for LinkedHashMap<K, V, S> {
fn hash<H: Hasher>(&self, h: &mut H) { for e in self.iter() { e.hash(h); } }
}
unsafe impl<K: Send, V: Send, S: Send> Send for LinkedHashMap<K, V, S> {}
unsafe impl<K: Sync, V: Sync, S: Sync> Sync for LinkedHashMap<K, V, S> {}
impl<K, V, S> Drop for LinkedHashMap<K, V, S> {
fn drop(&mut self) {
if !self.head.is_null() {
unsafe {
self.drop_entries();
drop_empty_node(self.head);
}
}
self.clear_free_list();
}
}
/// An insertion-order iterator over a `LinkedHashMap`'s entries, with immutable references to the
/// values.
pub struct Iter<'a, K: 'a, V: 'a> {
head: *const Node<K, V>,
tail: *const Node<K, V>,
remaining: usize,
marker: marker::PhantomData<(&'a K, &'a V)>,
}
/// An insertion-order iterator over a `LinkedHashMap`'s entries, with mutable references to the
/// values.
pub struct IterMut<'a, K: 'a, V: 'a> {
head: *mut Node<K, V>,
tail: *mut Node<K, V>,
remaining: usize,
marker: marker::PhantomData<(&'a K, &'a mut V)>,
}
/// A consuming insertion-order iterator over a `LinkedHashMap`'s entries.
pub struct IntoIter<K, V> {
head: *mut Node<K, V>,
tail: *mut Node<K, V>,
remaining: usize,
marker: marker::PhantomData<(K, V)>,
}
/// An insertion-order iterator over a `LinkedHashMap`'s entries represented as
/// an `OccupiedEntry`.
pub struct Entries<'a, K: 'a, V: 'a, S: 'a = hash_map::RandomState> {
map: *mut LinkedHashMap<K, V, S>,
head: *mut Node<K, V>,
remaining: usize,
marker: marker::PhantomData<(&'a K, &'a mut V, &'a S)>,
}
unsafe impl<'a, K, V> Send for Iter<'a, K, V> where K: Send, V: Send {}
unsafe impl<'a, K, V> Send for IterMut<'a, K, V> where K: Send, V: Send {}
unsafe impl<K, V> Send for IntoIter<K, V> where K: Send, V: Send {}
unsafe impl<'a, K, V, S> Send for Entries<'a, K, V, S> where K: Send, V: Send, S: Send {}
unsafe impl<'a, K, V> Sync for Iter<'a, K, V> where K: Sync, V: Sync {}
unsafe impl<'a, K, V> Sync for IterMut<'a, K, V> where K: Sync, V: Sync {}
unsafe impl<K, V> Sync for IntoIter<K, V> where K: Sync, V: Sync {}
unsafe impl<'a, K, V, S> Sync for Entries<'a, K, V, S> where K: Sync, V: Sync, S: Sync {}
impl<'a, K, V> Clone for Iter<'a, K, V> {
fn clone(&self) -> Self { Iter { ..*self } }
}
impl<K, V> Clone for IntoIter<K, V> where K: Clone, V: Clone {
fn clone(&self) -> Self {
if self.remaining == 0 {
return IntoIter { ..*self }
}
fn clone_node<K, V>(e: *mut Node<K, V>) -> *mut Node<K, V>
where K: Clone, V: Clone,
{
Box::into_raw(Box::new(Node::new(
unsafe { (*e).key.clone() }, unsafe { (*e).value.clone() }
)))
}
let mut cur = self.head;
let head = clone_node(cur);
let mut tail = head;
for _ in 1..self.remaining {
unsafe {
(*tail).prev = clone_node((*cur).prev);
(*(*tail).prev).next = tail;
tail = (*tail).prev;
cur = (*cur).prev;
}
}
IntoIter {
head,
tail,
remaining: self.remaining,
marker: marker::PhantomData,
}
}
}
impl<'a, K, V> Iterator for Iter<'a, K, V> {
type Item = (&'a K, &'a V);
fn next(&mut self) -> Option<(&'a K, &'a V)> {
if self.head == self.tail {
None
} else {
self.remaining -= 1;
unsafe {
let r = Some((&(*self.head).key, &(*self.head).value));
self.head = (*self.head).prev;
r
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.remaining, Some(self.remaining))
}
}
impl<'a, K, V> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
if self.head == self.tail {
None
} else {
self.remaining -= 1;
unsafe {
let r = Some((&(*self.head).key, &mut (*self.head).value));
self.head = (*self.head).prev;
r
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.remaining, Some(self.remaining))
}
}
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
fn next(&mut self) -> Option<(K, V)> {
if self.remaining == 0 {
return None
}
self.remaining -= 1;
unsafe {
let prev = (*self.head).prev;
let e = *Box::from_raw(self.head);
self.head = prev;
Some((e.key, e.value))
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.remaining, Some(self.remaining))
}
}
impl<'a, K, V, S: BuildHasher> Iterator for Entries<'a, K, V, S> {
type Item = OccupiedEntry<'a, K, V, S>;
fn next(&mut self) -> Option<OccupiedEntry<'a, K, V, S>> {
if self.remaining == 0 {
None
} else {
self.remaining -= 1;
unsafe {
let r = Some(OccupiedEntry {
map: self.map,
entry: self.head,
marker: marker::PhantomData,
});
self.head = (*self.head).prev;
r
}
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.remaining, Some(self.remaining))
}
}
impl<'a, K, V> DoubleEndedIterator for Iter<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
if self.head == self.tail {
None
} else {
self.remaining -= 1;
unsafe {
self.tail = (*self.tail).next;
Some((&(*self.tail).key, &(*self.tail).value))
}
}
}
}
impl<'a, K, V> DoubleEndedIterator for IterMut<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> {
if self.head == self.tail {
None
} else {
self.remaining -= 1;
unsafe {
self.tail = (*self.tail).next;
Some((&(*self.tail).key, &mut (*self.tail).value))
}
}
}
}
impl<K, V> DoubleEndedIterator for IntoIter<K, V> {
fn next_back(&mut self) -> Option<(K, V)> {
if self.remaining == 0 {
return None
}
self.remaining -= 1;
unsafe {
let next = (*self.tail).next;
let e = *Box::from_raw(self.tail);
self.tail = next;
Some((e.key, e.value))
}
}
}
impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> {
fn len(&self) -> usize { self.remaining }
}
impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> {
fn len(&self) -> usize { self.remaining }
}
impl<K, V> ExactSizeIterator for IntoIter<K, V> {
fn len(&self) -> usize { self.remaining }
}
impl<K, V> Drop for IntoIter<K, V> {
fn drop(&mut self) {
for _ in 0..self.remaining {
unsafe {
let next = (*self.tail).next;
Box::from_raw(self.tail);
self.tail = next;
}
}
}
}
/// An insertion-order iterator over a `LinkedHashMap`'s keys.
pub struct Keys<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Clone for Keys<'a, K, V> {
fn clone(&self) -> Self { Keys { inner: self.inner.clone() } }
}
impl<'a, K, V> Iterator for Keys<'a, K, V> {
type Item = &'a K;
#[inline] fn next(&mut self) -> Option<&'a K> { self.inner.next().map(|e| e.0) }
#[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
impl<'a, K, V> DoubleEndedIterator for Keys<'a, K, V> {
#[inline] fn next_back(&mut self) -> Option<&'a K> { self.inner.next_back().map(|e| e.0) }
}
impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> {
fn len(&self) -> usize { self.inner.len() }
}
/// An insertion-order iterator over a `LinkedHashMap`'s values.
pub struct Values<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
}
impl<'a, K, V> Clone for Values<'a, K, V> {
fn clone(&self) -> Self { Values { inner: self.inner.clone() } }
}
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
#[inline] fn next(&mut self) -> Option<&'a V> { self.inner.next().map(|e| e.1) }
#[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() }
}
impl<'a, K, V> DoubleEndedIterator for Values<'a, K, V> {
#[inline] fn next_back(&mut self) -> Option<&'a V> { self.inner.next_back().map(|e| e.1) }
}
impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> {
fn len(&self) -> usize { self.inner.len() }
}
impl<'a, K: Hash + Eq, V, S: BuildHasher> IntoIterator for &'a LinkedHashMap<K, V, S> {
type Item = (&'a K, &'a V);
type IntoIter = Iter<'a, K, V>;
fn into_iter(self) -> Iter<'a, K, V> { self.iter() }
}
impl<'a, K: Hash + Eq, V, S: BuildHasher> IntoIterator for &'a mut LinkedHashMap<K, V, S> {
type Item = (&'a K, &'a mut V);
type IntoIter = IterMut<'a, K, V>;
fn into_iter(self) -> IterMut<'a, K, V> { self.iter_mut() }
}
impl<K: Hash + Eq, V, S: BuildHasher> IntoIterator for LinkedHashMap<K, V, S> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
fn into_iter(mut self) -> IntoIter<K, V> {
let (head, tail) = if !self.head.is_null() {
unsafe { ((*self.head).prev, (*self.head).next) }
} else {
(ptr::null_mut(), ptr::null_mut())
};
let len = self.len();
if !self.head.is_null() {
unsafe { drop_empty_node(self.head) }
}
self.clear_free_list();
// drop the HashMap but not the LinkedHashMap
unsafe { ptr::drop_in_place(&mut self.map); }
mem::forget(self);
IntoIter {
head,
tail,
remaining: len,
marker: marker::PhantomData,
}
}
}
/// A view into a single location in a map, which may be vacant or occupied.
pub enum Entry<'a, K: 'a, V: 'a, S: 'a = hash_map::RandomState> {
/// An occupied Entry.
Occupied(OccupiedEntry<'a, K, V, S>),
/// A vacant Entry.
Vacant(VacantEntry<'a, K, V, S>),
}
/// A view into a single occupied location in a `LinkedHashMap`.
pub struct OccupiedEntry<'a, K: 'a, V: 'a, S: 'a = hash_map::RandomState> {
entry: *mut Node<K, V>,
map: *mut LinkedHashMap<K, V, S>,
marker: marker::PhantomData<&'a K>,
}
/// A view into a single empty location in a `LinkedHashMap`.
pub struct VacantEntry<'a, K: 'a, V: 'a, S: 'a = hash_map::RandomState> {
key: K,
map: &'a mut LinkedHashMap<K, V, S>,
}
impl<'a, K: Hash + Eq, V, S: BuildHasher> Entry<'a, K, V, S> {
/// Returns the entry key
///
/// # Examples
///
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::<String, u32>::new();
///
/// assert_eq!("hello", map.entry("hello".to_string()).key());
/// ```
pub fn key(&self) -> &K {
match *self {
Entry::Occupied(ref e) => e.key(),
Entry::Vacant(ref e) => e.key(),
}
}
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
pub fn or_insert(self, default: V) -> &'a mut V {
match self {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => entry.insert(default),
}
}
/// Ensures a value is in the entry by inserting the result of the default function if empty,
/// and returns a mutable reference to the value in the entry.
pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V {
match self {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => entry.insert(default()),
}
}
}
impl<'a, K: Hash + Eq, V, S: BuildHasher> OccupiedEntry<'a, K, V, S> {
/// Gets a reference to the entry key
///
/// # Examples
///
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::new();
///
/// map.insert("foo".to_string(), 1);
/// assert_eq!("foo", map.entry("foo".to_string()).key());
/// ```
pub fn key(&self) -> &K {
unsafe { &(*self.entry).key }
}
/// Gets a reference to the value in the entry.
pub fn get(&self) -> &V {
unsafe { &(*self.entry).value }
}
/// Gets a mutable reference to the value in the entry.
pub fn get_mut(&mut self) -> &mut V {
unsafe { &mut (*self.entry).value }
}
/// Converts the OccupiedEntry into a mutable reference to the value in the entry
/// with a lifetime bound to the map itself
pub fn into_mut(self) -> &'a mut V {
unsafe { &mut (*self.entry).value }
}
/// Sets the value of the entry, and returns the entry's old value
pub fn insert(&mut self, value: V) -> V {
unsafe {
(*self.map).ensure_guard_node();
let old_val = mem::replace(&mut (*self.entry).value, value);
let node_ptr: *mut Node<K, V> = self.entry;
// Existing node, just update LRU position
(*self.map).detach(node_ptr);
(*self.map).attach(node_ptr);
old_val
}
}
/// Takes the value out of the entry, and returns it
pub fn remove(self) -> V {
unsafe { (*self.map).remove(&(*self.entry).key) }.unwrap()
}
}
impl<'a, K: 'a + Hash + Eq, V: 'a, S: BuildHasher> VacantEntry<'a, K, V, S> {
/// Gets a reference to the entry key
///
/// # Examples
///
/// ```
/// use polodb_bson::linked_hash_map::LinkedHashMap;
///
/// let mut map = LinkedHashMap::<String, u32>::new();
///
/// assert_eq!("foo", map.entry("foo".to_string()).key());
/// ```
pub fn key(&self) -> &K {
&self.key
}
/// Sets the value of the entry with the VacantEntry's key,
/// and returns a mutable reference to it
pub fn insert(self, value: V) -> &'a mut V {
self.map.ensure_guard_node();
let node = if self.map.free.is_null() {
Box::into_raw(Box::new(Node::new(self.key, value)))
} else {
// use a recycled box
unsafe {
let free = self.map.free;
self.map.free = (*free).next;
ptr::write(free, Node::new(self.key, value));
free
}
};
let keyref = unsafe { &(*node).key };
self.map.attach(node);
let ret = self.map.map.entry(KeyRef{k: keyref}).or_insert(node);
unsafe { &mut (**ret).value }
}
}
#[cfg(all(feature = "nightly", test))]
mod bench {
extern crate test;
use super::LinkedHashMap;
#[bench]
fn not_recycled_cycling(b: &mut test::Bencher) {
let mut hash_map = LinkedHashMap::with_capacity(1000);
for i in 0usize..1000 {
hash_map.insert(i, i);
}
b.iter(|| {
for i in 0usize..1000 {
hash_map.remove(&i);
}
hash_map.clear_free_list();
for i in 0usize..1000 {
hash_map.insert(i, i);
}
})
}
#[bench]
fn recycled_cycling(b: &mut test::Bencher) {
let mut hash_map = LinkedHashMap::with_capacity(1000);
for i in 0usize..1000 {
hash_map.insert(i, i);
}
b.iter(|| {
for i in 0usize..1000 {
hash_map.remove(&i);
}
for i in 0usize..1000 {
hash_map.insert(i, i);
}
})
}
}
| 29.488141 | 104 | 0.507615 |
234ec176f583bc2b0c880915277e595a5de1fc70 | 12,527 | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::{
collections::HashMap,
convert::{TryFrom, TryInto},
};
use thiserror::Error;
use ureq;
#[derive(Debug, Error, PartialEq)]
pub enum Error {
#[error("Http error: {1}")]
HttpError(u16, String),
#[error("Internal error: {0}")]
InternalError(String),
#[error("Missing field {0}")]
MissingField(String),
#[error("404: Not Found: {0}/{1}")]
NotFound(String, String),
#[error("Serialization error: {0}")]
SerializationError(String),
}
impl From<std::io::Error> for Error {
fn from(error: std::io::Error) -> Self {
Self::SerializationError(format!("{}", error))
}
}
impl From<ureq::Response> for Error {
fn from(response: ureq::Response) -> Self {
Error::HttpError(response.status(), response.status_line().into())
}
}
impl From<serde_json::Error> for Error {
fn from(error: serde_json::Error) -> Self {
Self::SerializationError(format!("{}", error))
}
}
/// Client provides a client around the restful interface to a Vault servce. Learn more
/// here: https://www.vaultproject.io/api-docs/
///
/// A brief overview of Vault:
///
/// * Vault stores data in various paths, in the case of a WebAPI, different URLs. So, for example,
/// both a secret and a policy are hosted at distinct paths. Policies are then used to define which
/// actors can access those paths and with what actions.
/// * Vault uses a KV store separated into various containers or secrets. In the concept of a file
/// system, a secret might represent a folder, where keys would be files, and the contents the
/// values. Policies are only applied at the folder level.
/// * Data is accessed in Vault via tokens. Policies can only be granted during creation of a
/// token, but policies can be amended afterward. So you cannot add new policies to a token, but
/// you can increase the tokens abilities by modifying the underlying policies.
pub struct Client {
host: String,
token: String,
}
impl Client {
pub fn new(host: String, token: String) -> Self {
Self { host, token }
}
/// Create a new policy in Vault, see the explanation for Policy for how the data is
/// structured. Vault does not distingush a create and update. An update must first read the
/// existing policy, amend the contents, and then be applied via this API.
pub fn set_policy(&self, policy_name: &str, policy: &Policy) -> Result<(), Error> {
let response = ureq::post(&format!("{}/v1/sys/policy/{}", self.host, policy_name))
.set("X-Vault-Token", &self.token)
.timeout_connect(10_000)
.send_json(policy.try_into()?);
if response.ok() {
Ok(())
} else {
Err(response.into())
}
}
/// Retrieves the policy at the given policy name.
pub fn read_policy(&self, policy_name: &str) -> Result<Policy, Error> {
let response = ureq::get(&format!("{}/v1/sys/policy/{}", self.host, policy_name))
.set("X-Vault-Token", &self.token)
.timeout_connect(10_000)
.call();
match response.status() {
200 => Ok(Policy::try_from(response.into_json()?)?),
_ => Err(response.into()),
}
}
/// Creates a new token or identity for accessing Vault. The token will have access to anything
/// under the default policy and any perscribed policies.
pub fn create_token(&self, policies: Vec<&str>) -> Result<String, Error> {
let response = ureq::post(&format!("{}/v1/auth/token/create", self.host))
.set("X-Vault-Token", &self.token)
.timeout_connect(10_000)
.send_json(json!({ "policies": policies }));
if response.ok() {
let response: CreateTokenResponse = serde_json::from_str(&response.into_string()?)?;
Ok(response.auth.client_token)
} else {
Err(response.into())
}
}
/// List all stored secrets
pub fn list_secrets(&self, secret: &str) -> Result<Vec<String>, Error> {
let response = ureq::request(
"LIST",
&format!("{}/v1/secret/metadata/{}", self.host, secret),
)
.set("X-Vault-Token", &self.token)
.timeout_connect(10_000)
.call();
match response.status() {
200 => {
let response: ReadSecretListResponse =
serde_json::from_str(&response.into_string()?)?;
Ok(response.data.keys)
}
// There are no secrets.
404 => Ok(vec![]),
_ => Err(response.into()),
}
}
/// Delete a specific secret store
pub fn delete_secret(&self, secret: &str) -> Result<(), Error> {
let response = ureq::delete(&format!("{}/v1/secret/metadata/{}", self.host, secret))
.set("X-Vault-Token", &self.token)
.timeout_connect(10_000)
.call();
if response.ok() {
Ok(())
} else {
Err(response.into())
}
}
/// Read a key/value pair from a given secret store.
pub fn read_secret(&self, secret: &str, key: &str) -> Result<String, Error> {
let response = ureq::get(&format!("{}/v1/secret/data/{}", self.host, secret))
.set("X-Vault-Token", &self.token)
.timeout_connect(10_000)
.call();
match response.status() {
200 => {
let mut response: ReadSecretResponse =
serde_json::from_str(&response.into_string()?)?;
let value = response
.data
.data
.remove(key)
.ok_or_else(|| Error::NotFound(secret.into(), key.into()))?;
Ok(value)
}
404 => Err(Error::NotFound(secret.into(), key.into())),
_ => Err(response.into()),
}
}
/// Returns whether or not the vault is unsealed (can be read from / written to). This can be
/// queried without authentication.
pub fn unsealed(&self) -> Result<bool, Error> {
let response = ureq::get(&format!("{}/v1/sys/seal-status", self.host))
.timeout_connect(10_000)
.call();
match response.status() {
200 => {
let response: SealStatusResponse = serde_json::from_str(&response.into_string()?)?;
Ok(!response.sealed)
}
_ => Err(response.into()),
}
}
/// Create or update a key/value pair in a given secret store.
pub fn write_secret(&self, secret: &str, key: &str, value: &str) -> Result<(), Error> {
let response = ureq::put(&format!("{}/v1/secret/data/{}", self.host, secret))
.set("X-Vault-Token", &self.token)
.timeout_connect(10_000)
.send_json(json!({ "data": { key: value } }));
match response.status() {
200 => Ok(()),
_ => Err(response.into()),
}
}
}
/// Below is a sample output of a CreateTokenResponse. Only the fields leveraged by this framework
/// are decoded.
/// {
/// "request_id": "f00341c1-fad5-f6e6-13fd-235617f858a1",
/// "lease_id": "",
/// "renewable": false,
/// "lease_duration": 0,
/// "data": null,
/// "wrap_info": null,
/// "warnings": [
/// "Policy \"stage\" does not exist",
/// "Policy \"web\" does not exist"
/// ],
/// "auth": {
/// "client_token": "s.wOrq9dO9kzOcuvB06CMviJhZ",
/// "accessor": "B6oixijqmeR4bsLOJH88Ska9",
/// "policies": ["default", "stage", "web"],
/// "token_policies": ["default", "stage", "web"],
/// "metadata": {
/// "user": "armon"
/// },
/// "lease_duration": 3600,
/// "renewable": true,
/// "entity_id": "",
/// "token_type": "service",
/// "orphan": false
/// }
/// }
#[derive(Debug, Deserialize, PartialEq, Serialize)]
struct CreateTokenResponse {
auth: CreateTokenAuth,
}
/// See CreateTokenResponse
#[derive(Debug, Deserialize, PartialEq, Serialize)]
struct CreateTokenAuth {
client_token: String,
}
/// Below is a sample output of ReadSecretListResponse. All fields are decoded and used.
/// Note: in the case that a secret contains a subpath, that will be returned. Vault does
/// not automatically recurse.
/// {
/// "data": {
/// "keys": ["foo", "foo/"]
/// }
/// }
#[derive(Debug, Deserialize, PartialEq, Serialize)]
struct ReadSecretListResponse {
data: ReadSecretListData,
}
/// See ReadSecretListResponse
#[derive(Debug, Deserialize, PartialEq, Serialize)]
struct ReadSecretListData {
keys: Vec<String>,
}
/// Below is a sample output of ReadSecretResponse. Note, this returns all keys within a secret.
/// Only fields leveraged by this framework are decoded.
/// {
/// "data": {
/// "data": {
/// "foo": "bar"
/// },
/// "metadata": {
/// "created_time": "2018-03-22T02:24:06.945319214Z",
/// "deletion_time": "",
/// "destroyed": false,
/// "version": 1
/// }
/// }
/// }
#[derive(Debug, Deserialize, PartialEq, Serialize)]
struct ReadSecretResponse {
data: ReadSecretData,
}
/// See ReadPolicyResponse
#[derive(Debug, Deserialize, PartialEq, Serialize)]
struct ReadSecretData {
data: HashMap<String, String>,
}
/// This data structure is used to represent both policies read from Vault and written to Vault.
/// Thus the same Policy read, can then be written back after amending. Vault stores the rules or
/// per path policies in an encoded json blob, so that effectively means json within json, hence
/// the unusual semantics below.
/// {
/// rules: json!{
/// path: {
/// 'auth/*': { capabilities: ['create', 'read', 'update', 'delete', 'list', 'sudo'] },
/// 'sys/auth/*': { capabilities: ['create', 'read', 'update', 'delete', 'sudo'] },
/// }
/// }
/// }
/// Note: Vault claims rules is deprecated and policy should be used instead, but that doesn't seem
/// to work and makes the reading asymmetrical from the writing.
#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
pub struct Policy {
#[serde(skip)]
internal_rules: PolicyPaths,
rules: String,
}
impl Policy {
pub fn new() -> Self {
Self {
internal_rules: PolicyPaths {
path: HashMap::new(),
},
rules: "".to_string(),
}
}
pub fn add_policy(&mut self, path: &str, capabilities: Vec<Capability>) {
let path_policy = PathPolicy { capabilities };
self.internal_rules
.path
.insert(path.to_string(), path_policy);
}
}
impl TryFrom<serde_json::Value> for Policy {
type Error = serde_json::Error;
fn try_from(value: serde_json::Value) -> Result<Self, Self::Error> {
let mut policy: Self = serde_json::from_value(value)?;
policy.internal_rules = serde_json::from_str(&policy.rules)?;
Ok(policy)
}
}
impl TryFrom<&Policy> for serde_json::Value {
type Error = serde_json::Error;
fn try_from(policy: &Policy) -> Result<Self, Self::Error> {
Ok(json!({"rules": serde_json::to_string(&policy.internal_rules)?}))
}
}
/// Represents the policy for a given path.
#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
pub struct PolicyPaths {
path: HashMap<String, PathPolicy>,
}
/// Represents the set of capabilities used within a policy.
#[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)]
pub struct PathPolicy {
capabilities: Vec<Capability>,
}
/// The various set of capabilities available to a policy within Vault.
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
#[serde(rename_all = "lowercase")]
pub enum Capability {
Create,
Delete,
Deny,
List,
Read,
Sudo,
Update,
}
/// Below is an example of SealStatusResponse. Only the fields leveraged by this framework are
/// decoded.
/// {
/// "type": "shamir",
/// "sealed": false,
/// "t": 3,
/// "n": 5,
/// "progress": 0,
/// "version": "0.9.0",
/// "cluster_name": "vault-cluster-d6ec3c7f",
/// "cluster_id": "3e8b3fec-3749-e056-ba41-b62a63b997e8",
/// "nonce": "ef05d55d-4d2c-c594-a5e8-55bc88604c24"
/// }
#[derive(Debug, Deserialize, PartialEq, Serialize)]
struct SealStatusResponse {
sealed: bool,
}
| 32.965789 | 99 | 0.595673 |
5699931d16ffbc38a12b720b3f957b920822f67c | 459 | extern crate rosalind;
use rosalind::{algorithmic_heights, utility::parser};
fn parse_input() -> Vec<i32> {
let filename = format!("input_dataset/{}", &parser::cmdline_arguments()[1]);
let mut input_vec = parser::list_of_things(filename).unwrap();
let nums = input_vec.pop().unwrap();
return nums;
}
fn main() {
let mut nums = parse_input();
algorithmic_heights::sorting::ms(&mut nums);
let res = parser::vec_to_string(nums);
println!("{}", res);
} | 27 | 77 | 0.697168 |
3896a96c3e5f12cf143a64eaa4acd610dbc89044 | 1,002 | use enumset::EnumSetType;
use enum_map::Enum;
use strum::EnumIter;
#[derive(Debug, Copy, Clone)]
pub enum ButtonStates {
Colour1 = 0x01,
Colour2 = 0x00,
DimmedColour1 = 0x02,
DimmedColour2 = 0x04,
Flashing = 0x03,
}
#[derive(EnumSetType, Enum, EnumIter, Debug)]
pub enum Buttons {
// These are all the buttons from the GoXLR Mini.
Fader1Mute = 4,
Fader2Mute = 9,
Fader3Mute = 14,
Fader4Mute = 19,
Bleep = 22,
MicrophoneMute = 23,
// The rest are GoXLR Full Buttons. On the mini, they will simply be ignored.
EffectSelect1 = 0,
EffectSelect2 = 5,
EffectSelect3 = 10,
EffectSelect4 = 15,
EffectSelect5 = 1,
EffectSelect6 = 6,
EffectFx = 21,
EffectMegaphone = 20,
EffectRobot = 11,
EffectHardTune = 16,
SamplerSelectA = 2,
SamplerSelectB = 7,
SamplerSelectC = 12,
SamplerTopLeft = 3,
SamplerTopRight = 8,
SamplerBottomLeft = 17,
SamplerBottomRight = 13,
SamplerClear = 18,
}
| 21.319149 | 81 | 0.641717 |
1c226cce2c1b5ac70f0d796b5dadb2abfcfab079 | 16,985 | use std::usize;
use std::io;
use http::buf::MemSlice;
use http::io::MemRead;
use self::Kind::{Length, Chunked, Eof};
/// Decoders to handle different Transfer-Encodings.
///
/// If a message body does not include a Transfer-Encoding, it *should*
/// include a Content-Length header.
#[derive(Debug, Clone)]
pub struct Decoder {
kind: Kind,
}
impl Decoder {
pub fn length(x: u64) -> Decoder {
Decoder { kind: Kind::Length(x) }
}
pub fn chunked() -> Decoder {
Decoder { kind: Kind::Chunked(ChunkedState::Size, 0) }
}
pub fn eof() -> Decoder {
Decoder { kind: Kind::Eof(false) }
}
}
#[derive(Debug, Clone)]
enum Kind {
/// A Reader used when a Content-Length header is passed with a positive integer.
Length(u64),
/// A Reader used when Transfer-Encoding is `chunked`.
Chunked(ChunkedState, u64),
/// A Reader used for responses that don't indicate a length or chunked.
///
/// Note: This should only used for `Response`s. It is illegal for a
/// `Request` to be made with both `Content-Length` and
/// `Transfer-Encoding: chunked` missing, as explained from the spec:
///
/// > If a Transfer-Encoding header field is present in a response and
/// > the chunked transfer coding is not the final encoding, the
/// > message body length is determined by reading the connection until
/// > it is closed by the server. If a Transfer-Encoding header field
/// > is present in a request and the chunked transfer coding is not
/// > the final encoding, the message body length cannot be determined
/// > reliably; the server MUST respond with the 400 (Bad Request)
/// > status code and then close the connection.
Eof(bool),
}
#[derive(Debug, PartialEq, Clone)]
enum ChunkedState {
Size,
SizeLws,
Extension,
SizeLf,
Body,
BodyCr,
BodyLf,
EndCr,
EndLf,
End,
}
impl Decoder {
pub fn is_eof(&self) -> bool {
trace!("is_eof? {:?}", self);
match self.kind {
Length(0) |
Chunked(ChunkedState::End, _) |
Eof(true) => true,
_ => false,
}
}
}
impl Decoder {
pub fn decode<R: MemRead>(&mut self, body: &mut R) -> io::Result<MemSlice> {
match self.kind {
Length(ref mut remaining) => {
trace!("Sized read, remaining={:?}", remaining);
if *remaining == 0 {
Ok(MemSlice::empty())
} else {
let to_read = *remaining as usize;
let buf = try!(body.read_mem(to_read));
let num = buf.as_ref().len() as u64;
trace!("Length read: {}", num);
if num > *remaining {
*remaining = 0;
} else if num == 0 {
return Err(io::Error::new(io::ErrorKind::Other, "early eof"));
} else {
*remaining -= num;
}
Ok(buf)
}
}
Chunked(ref mut state, ref mut size) => {
loop {
let mut buf = None;
// advances the chunked state
*state = try!(state.step(body, size, &mut buf));
if *state == ChunkedState::End {
trace!("end of chunked");
return Ok(MemSlice::empty());
}
if let Some(buf) = buf {
return Ok(buf);
}
}
}
Eof(ref mut is_eof) => {
if *is_eof {
Ok(MemSlice::empty())
} else {
// 8192 chosen because its about 2 packets, there probably
// won't be that much available, so don't have MemReaders
// allocate buffers to big
match body.read_mem(8192) {
Ok(slice) => {
*is_eof = slice.is_empty();
Ok(slice)
}
other => other,
}
}
}
}
}
}
macro_rules! byte (
($rdr:ident) => ({
let buf = try!($rdr.read_mem(1));
if !buf.is_empty() {
buf[0]
} else {
return Err(io::Error::new(io::ErrorKind::UnexpectedEof,
"Unexpected eof during chunk size line"));
}
})
);
impl ChunkedState {
fn step<R: MemRead>(&self,
body: &mut R,
size: &mut u64,
buf: &mut Option<MemSlice>)
-> io::Result<ChunkedState> {
use self::ChunkedState::*;
Ok(match *self {
Size => try!(ChunkedState::read_size(body, size)),
SizeLws => try!(ChunkedState::read_size_lws(body)),
Extension => try!(ChunkedState::read_extension(body)),
SizeLf => try!(ChunkedState::read_size_lf(body, size)),
Body => try!(ChunkedState::read_body(body, size, buf)),
BodyCr => try!(ChunkedState::read_body_cr(body)),
BodyLf => try!(ChunkedState::read_body_lf(body)),
EndCr => try!(ChunkedState::read_end_cr(body)),
EndLf => try!(ChunkedState::read_end_lf(body)),
End => ChunkedState::End,
})
}
fn read_size<R: MemRead>(rdr: &mut R, size: &mut u64) -> io::Result<ChunkedState> {
trace!("Read chunk hex size");
let radix = 16;
match byte!(rdr) {
b @ b'0'...b'9' => {
*size *= radix;
*size += (b - b'0') as u64;
}
b @ b'a'...b'f' => {
*size *= radix;
*size += (b + 10 - b'a') as u64;
}
b @ b'A'...b'F' => {
*size *= radix;
*size += (b + 10 - b'A') as u64;
}
b'\t' | b' ' => return Ok(ChunkedState::SizeLws),
b';' => return Ok(ChunkedState::Extension),
b'\r' => return Ok(ChunkedState::SizeLf),
_ => {
return Err(io::Error::new(io::ErrorKind::InvalidInput,
"Invalid chunk size line: Invalid Size"));
}
}
Ok(ChunkedState::Size)
}
fn read_size_lws<R: MemRead>(rdr: &mut R) -> io::Result<ChunkedState> {
trace!("read_size_lws");
match byte!(rdr) {
// LWS can follow the chunk size, but no more digits can come
b'\t' | b' ' => Ok(ChunkedState::SizeLws),
b';' => Ok(ChunkedState::Extension),
b'\r' => return Ok(ChunkedState::SizeLf),
_ => {
Err(io::Error::new(io::ErrorKind::InvalidInput,
"Invalid chunk size linear white space"))
}
}
}
fn read_extension<R: MemRead>(rdr: &mut R) -> io::Result<ChunkedState> {
trace!("read_extension");
match byte!(rdr) {
b'\r' => return Ok(ChunkedState::SizeLf),
_ => return Ok(ChunkedState::Extension), // no supported extensions
}
}
fn read_size_lf<R: MemRead>(rdr: &mut R, size: &mut u64) -> io::Result<ChunkedState> {
trace!("Chunk size is {:?}", size);
match byte!(rdr) {
b'\n' if *size > 0 => Ok(ChunkedState::Body),
b'\n' if *size == 0 => Ok(ChunkedState::EndCr),
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk size LF")),
}
}
fn read_body<R: MemRead>(rdr: &mut R,
rem: &mut u64,
buf: &mut Option<MemSlice>)
-> io::Result<ChunkedState> {
trace!("Chunked read, remaining={:?}", rem);
// cap remaining bytes at the max capacity of usize
let rem_cap = match *rem {
r if r > usize::MAX as u64 => usize::MAX,
r => r as usize,
};
let to_read = rem_cap;
let slice = try!(rdr.read_mem(to_read));
let count = slice.len();
if count == 0 {
*rem = 0;
return Err(io::Error::new(io::ErrorKind::UnexpectedEof, "early eof"));
}
*buf = Some(slice);
*rem -= count as u64;
if *rem > 0 {
Ok(ChunkedState::Body)
} else {
Ok(ChunkedState::BodyCr)
}
}
fn read_body_cr<R: MemRead>(rdr: &mut R) -> io::Result<ChunkedState> {
match byte!(rdr) {
b'\r' => Ok(ChunkedState::BodyLf),
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk body CR")),
}
}
fn read_body_lf<R: MemRead>(rdr: &mut R) -> io::Result<ChunkedState> {
match byte!(rdr) {
b'\n' => Ok(ChunkedState::Size),
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk body LF")),
}
}
fn read_end_cr<R: MemRead>(rdr: &mut R) -> io::Result<ChunkedState> {
match byte!(rdr) {
b'\r' => Ok(ChunkedState::EndLf),
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk end CR")),
}
}
fn read_end_lf<R: MemRead>(rdr: &mut R) -> io::Result<ChunkedState> {
match byte!(rdr) {
b'\n' => Ok(ChunkedState::End),
_ => Err(io::Error::new(io::ErrorKind::InvalidInput, "Invalid chunk end LF")),
}
}
}
#[cfg(test)]
mod tests {
use std::error::Error;
use std::io;
use std::io::Write;
use super::Decoder;
use super::ChunkedState;
use http::io::MemRead;
use http::buf::{MemBuf, MemSlice};
use mock::AsyncIo;
impl<'a> MemRead for &'a [u8] {
fn read_mem(&mut self, len: usize) -> io::Result<MemSlice> {
let n = ::std::cmp::min(len, self.len());
if n > 0 {
let mut buf = MemBuf::with_capacity(n);
buf.read_from(self).unwrap();
Ok(buf.slice(n))
} else {
Ok(MemSlice::empty())
}
}
}
#[test]
fn test_read_chunk_size() {
use std::io::ErrorKind::{UnexpectedEof, InvalidInput};
fn read(s: &str) -> u64 {
let mut state = ChunkedState::Size;
let mut rdr = &mut s.as_bytes();
let mut size = 0;
loop {
let result = state.step(rdr, &mut size, &mut None);
let desc = format!("read_size failed for {:?}", s);
state = result.expect(desc.as_str());
if state == ChunkedState::Body || state == ChunkedState::EndCr {
break;
}
}
size
}
fn read_err(s: &str, expected_err: io::ErrorKind) {
let mut state = ChunkedState::Size;
let mut rdr = &mut s.as_bytes();
let mut size = 0;
loop {
let result = state.step(rdr, &mut size, &mut None);
state = match result {
Ok(s) => s,
Err(e) => {
assert!(expected_err == e.kind(), "Reading {:?}, expected {:?}, but got {:?}",
s, expected_err, e.kind());
return;
}
};
if state == ChunkedState::Body || state == ChunkedState::End {
panic!(format!("Was Ok. Expected Err for {:?}", s));
}
}
}
assert_eq!(1, read("1\r\n"));
assert_eq!(1, read("01\r\n"));
assert_eq!(0, read("0\r\n"));
assert_eq!(0, read("00\r\n"));
assert_eq!(10, read("A\r\n"));
assert_eq!(10, read("a\r\n"));
assert_eq!(255, read("Ff\r\n"));
assert_eq!(255, read("Ff \r\n"));
// Missing LF or CRLF
read_err("F\rF", InvalidInput);
read_err("F", UnexpectedEof);
// Invalid hex digit
read_err("X\r\n", InvalidInput);
read_err("1X\r\n", InvalidInput);
read_err("-\r\n", InvalidInput);
read_err("-1\r\n", InvalidInput);
// Acceptable (if not fully valid) extensions do not influence the size
assert_eq!(1, read("1;extension\r\n"));
assert_eq!(10, read("a;ext name=value\r\n"));
assert_eq!(1, read("1;extension;extension2\r\n"));
assert_eq!(1, read("1;;; ;\r\n"));
assert_eq!(2, read("2; extension...\r\n"));
assert_eq!(3, read("3 ; extension=123\r\n"));
assert_eq!(3, read("3 ;\r\n"));
assert_eq!(3, read("3 ; \r\n"));
// Invalid extensions cause an error
read_err("1 invalid extension\r\n", InvalidInput);
read_err("1 A\r\n", InvalidInput);
read_err("1;no CRLF", UnexpectedEof);
}
#[test]
fn test_read_sized_early_eof() {
let mut bytes = &b"foo bar"[..];
let mut decoder = Decoder::length(10);
assert_eq!(decoder.decode(&mut bytes).unwrap().len(), 7);
let e = decoder.decode(&mut bytes).unwrap_err();
assert_eq!(e.kind(), io::ErrorKind::Other);
assert_eq!(e.description(), "early eof");
}
#[test]
fn test_read_chunked_early_eof() {
let mut bytes = &b"\
9\r\n\
foo bar\
"[..];
let mut decoder = Decoder::chunked();
assert_eq!(decoder.decode(&mut bytes).unwrap().len(), 7);
let e = decoder.decode(&mut bytes).unwrap_err();
assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof);
assert_eq!(e.description(), "early eof");
}
#[test]
fn test_read_chunked_single_read() {
let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n"[..];
let buf = Decoder::chunked().decode(&mut mock_buf).expect("decode");
assert_eq!(16, buf.len());
let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String");
assert_eq!("1234567890abcdef", &result);
}
#[test]
fn test_read_chunked_after_eof() {
let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n\r\n"[..];
let mut decoder = Decoder::chunked();
// normal read
let buf = decoder.decode(&mut mock_buf).expect("decode");
assert_eq!(16, buf.len());
let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String");
assert_eq!("1234567890abcdef", &result);
// eof read
let buf = decoder.decode(&mut mock_buf).expect("decode");
assert_eq!(0, buf.len());
// ensure read after eof also returns eof
let buf = decoder.decode(&mut mock_buf).expect("decode");
assert_eq!(0, buf.len());
}
// perform an async read using a custom buffer size and causing a blocking
// read at the specified byte
fn read_async(mut decoder: Decoder,
content: &[u8],
block_at: usize)
-> String {
let content_len = content.len();
let mut ins = AsyncIo::new(content, block_at);
let mut outs = Vec::new();
loop {
match decoder.decode(&mut ins) {
Ok(buf) => {
if buf.is_empty() {
break; // eof
}
outs.write(buf.as_ref()).expect("write buffer");
}
Err(e) => match e.kind() {
io::ErrorKind::WouldBlock => {
ins.block_in(content_len); // we only block once
},
_ => panic!("unexpected decode error: {}", e),
}
};
}
String::from_utf8(outs).expect("decode String")
}
// iterate over the different ways that this async read could go.
// tests blocking a read at each byte along the content - The shotgun approach
fn all_async_cases(content: &str, expected: &str, decoder: Decoder) {
let content_len = content.len();
for block_at in 0..content_len {
let actual = read_async(decoder.clone(), content.as_bytes(), block_at);
assert_eq!(expected, &actual, "Failed async. Blocking at {}", block_at);
}
}
#[test]
fn test_read_length_async() {
let content = "foobar";
all_async_cases(content, content, Decoder::length(content.len() as u64));
}
#[test]
fn test_read_chunked_async() {
let content = "3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n";
let expected = "foobar";
all_async_cases(content, expected, Decoder::chunked());
}
#[test]
fn test_read_eof_async() {
let content = "foobar";
all_async_cases(content, content, Decoder::eof());
}
}
| 35.092975 | 102 | 0.493082 |
263c5f352d68239f299dcdcaf1ca25893a43bfbe | 93 | mod path;
mod query;
pub use path::Path;
pub use path::PathComponent;
pub use query::Query;
| 13.285714 | 28 | 0.731183 |
6910857e0d5de9788c617cd451acbbe42425e9ff | 3,159 | use cell::{Cell, CellId, CellIdGenerator, CellPosition};
use cell_vm::Facing;
use genome::Genome;
use random_generator::RandomGenerator;
use super::{INFLOW_RATE_BASE, POND_HEIGHT, POND_WIDTH};
pub struct CellPond {
grind: Vec<Vec<Cell>>,
}
impl CellPond {
pub fn new(id_generator: &mut CellIdGenerator, generator: &mut RandomGenerator) -> CellPond {
let mut grind = Vec::with_capacity(POND_WIDTH);
for i in 0..POND_WIDTH {
grind.push(Vec::with_capacity(POND_HEIGHT));
for _ in 0..POND_HEIGHT {
grind[i].push(Cell::random(id_generator, generator));
}
}
CellPond {
grind,
}
}
#[inline]
pub fn replace(&mut self, position: &CellPosition, new_id: CellId, genome: Genome) {
let cell = &mut self.grind[position.0][position.1];
cell.id = new_id.clone();
cell.parent_id = None;
cell.lineage = new_id;
cell.generation = 0;
cell.energy += INFLOW_RATE_BASE;
cell.genome = genome;
}
#[inline]
pub(crate) fn cell(&mut self, position: &CellPosition) -> &mut Cell {
&mut self.grind[position.0][position.1]
}
#[inline]
pub(crate) fn get_neighbor(&mut self, position: &CellPosition, facing: &Facing) -> &mut Cell {
match facing {
Facing::Left => {
let x = if position.0 == 0 {
POND_WIDTH-1
} else {
position.0
};
&mut self.grind[x][position.1]
},
Facing::Right => {
let x = (position.0 + 1) % POND_WIDTH;
&mut self.grind[x][position.1]
},
Facing::Up => {
let y = (position.1 + 1) % POND_HEIGHT;
&mut self.grind[position.0][y]
},
Facing::Down => {
let y = if position.1 == 0 {
POND_HEIGHT-1
} else {
position.1-1
};
&mut self.grind[position.0][y]
},
}
}
#[inline]
pub fn total_energy(&self) -> usize {
self.perform_on_active(0, |acc, cell| cell.energy + acc)
}
#[inline]
pub fn total_active_cells(&self) -> usize {
self.perform_on_active(0, |acc, _| acc+1)
}
#[inline]
pub fn total_viable_replicators(&self) -> usize {
self.perform_on_active(
0, |acc, cell| if cell.generation > 2 { acc + 1 } else { acc })
}
#[inline]
pub fn max_generation(&self) -> usize {
self.perform_on_active(0, |g, cell|
if cell.generation > g {
cell.generation
} else {
g
})
}
fn perform_on_active<R, T: Fn(R, &Cell) -> R>(&self, zero: R, op: T) -> R {
let mut acc = zero;
for x in 0..POND_WIDTH {
for y in 0..POND_HEIGHT {
let c = &self.grind[x][y];
if c.energy > 0 {
acc = op(acc, c);
}
}
}
acc
}
} | 28.981651 | 98 | 0.487813 |
abfb00a24a115364b7f825a1ae5d1f7c1117ac1b | 79,973 | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::AnnNode::*;
use syntax::abi::Abi;
use syntax::ast;
use syntax::codemap::{CodeMap, Spanned};
use syntax::parse::ParseSess;
use syntax::parse::lexer::comments;
use syntax::print::pp::{self, Breaks};
use syntax::print::pp::Breaks::{Consistent, Inconsistent};
use syntax::print::pprust::PrintState;
use syntax::ptr::P;
use syntax::symbol::keywords;
use syntax_pos::{self, BytePos};
use hir;
use hir::{PatKind, RegionTyParamBound, TraitTyParamBound, TraitBoundModifier, RangeEnd};
use std::cell::Cell;
use std::io::{self, Write, Read};
use std::iter::Peekable;
use std::vec;
pub enum AnnNode<'a> {
NodeName(&'a ast::Name),
NodeBlock(&'a hir::Block),
NodeItem(&'a hir::Item),
NodeSubItem(ast::NodeId),
NodeExpr(&'a hir::Expr),
NodePat(&'a hir::Pat),
}
pub enum Nested {
Item(hir::ItemId),
TraitItem(hir::TraitItemId),
ImplItem(hir::ImplItemId),
Body(hir::BodyId),
BodyArgPat(hir::BodyId, usize)
}
pub trait PpAnn {
fn nested(&self, _state: &mut State, _nested: Nested) -> io::Result<()> {
Ok(())
}
fn pre(&self, _state: &mut State, _node: AnnNode) -> io::Result<()> {
Ok(())
}
fn post(&self, _state: &mut State, _node: AnnNode) -> io::Result<()> {
Ok(())
}
}
pub struct NoAnn;
impl PpAnn for NoAnn {}
pub const NO_ANN: &'static PpAnn = &NoAnn;
impl PpAnn for hir::Crate {
fn nested(&self, state: &mut State, nested: Nested) -> io::Result<()> {
match nested {
Nested::Item(id) => state.print_item(self.item(id.id)),
Nested::TraitItem(id) => state.print_trait_item(self.trait_item(id)),
Nested::ImplItem(id) => state.print_impl_item(self.impl_item(id)),
Nested::Body(id) => state.print_expr(&self.body(id).value),
Nested::BodyArgPat(id, i) => state.print_pat(&self.body(id).arguments[i].pat)
}
}
}
pub struct State<'a> {
pub s: pp::Printer<'a>,
cm: Option<&'a CodeMap>,
comments: Option<Vec<comments::Comment>>,
literals: Peekable<vec::IntoIter<comments::Literal>>,
cur_cmnt: usize,
boxes: Vec<pp::Breaks>,
ann: &'a (PpAnn + 'a),
}
impl<'a> PrintState<'a> for State<'a> {
fn writer(&mut self) -> &mut pp::Printer<'a> {
&mut self.s
}
fn boxes(&mut self) -> &mut Vec<pp::Breaks> {
&mut self.boxes
}
fn comments(&mut self) -> &mut Option<Vec<comments::Comment>> {
&mut self.comments
}
fn cur_cmnt(&mut self) -> &mut usize {
&mut self.cur_cmnt
}
fn cur_lit(&mut self) -> Option<&comments::Literal> {
self.literals.peek()
}
fn bump_lit(&mut self) -> Option<comments::Literal> {
self.literals.next()
}
}
#[allow(non_upper_case_globals)]
pub const indent_unit: usize = 4;
#[allow(non_upper_case_globals)]
pub const default_columns: usize = 78;
/// Requires you to pass an input filename and reader so that
/// it can scan the input text for comments and literals to
/// copy forward.
pub fn print_crate<'a>(cm: &'a CodeMap,
sess: &ParseSess,
krate: &hir::Crate,
filename: String,
input: &mut Read,
out: Box<Write + 'a>,
ann: &'a PpAnn,
is_expanded: bool)
-> io::Result<()> {
let mut s = State::new_from_input(cm, sess, filename, input, out, ann, is_expanded);
// When printing the AST, we sometimes need to inject `#[no_std]` here.
// Since you can't compile the HIR, it's not necessary.
s.print_mod(&krate.module, &krate.attrs)?;
s.print_remaining_comments()?;
s.s.eof()
}
impl<'a> State<'a> {
pub fn new_from_input(cm: &'a CodeMap,
sess: &ParseSess,
filename: String,
input: &mut Read,
out: Box<Write + 'a>,
ann: &'a PpAnn,
is_expanded: bool)
-> State<'a> {
let (cmnts, lits) = comments::gather_comments_and_literals(sess, filename, input);
State::new(cm,
out,
ann,
Some(cmnts),
// If the code is post expansion, don't use the table of
// literals, since it doesn't correspond with the literals
// in the AST anymore.
if is_expanded {
None
} else {
Some(lits)
})
}
pub fn new(cm: &'a CodeMap,
out: Box<Write + 'a>,
ann: &'a PpAnn,
comments: Option<Vec<comments::Comment>>,
literals: Option<Vec<comments::Literal>>)
-> State<'a> {
State {
s: pp::mk_printer(out, default_columns),
cm: Some(cm),
comments: comments.clone(),
literals: literals.unwrap_or_default().into_iter().peekable(),
cur_cmnt: 0,
boxes: Vec::new(),
ann,
}
}
}
pub fn to_string<F>(ann: &PpAnn, f: F) -> String
where F: FnOnce(&mut State) -> io::Result<()>
{
let mut wr = Vec::new();
{
let mut printer = State {
s: pp::mk_printer(Box::new(&mut wr), default_columns),
cm: None,
comments: None,
literals: vec![].into_iter().peekable(),
cur_cmnt: 0,
boxes: Vec::new(),
ann,
};
f(&mut printer).unwrap();
printer.s.eof().unwrap();
}
String::from_utf8(wr).unwrap()
}
pub fn visibility_qualified(vis: &hir::Visibility, w: &str) -> String {
to_string(NO_ANN, |s| {
s.print_visibility(vis)?;
s.s.word(w)
})
}
fn needs_parentheses(expr: &hir::Expr) -> bool {
match expr.node {
hir::ExprAssign(..) |
hir::ExprBinary(..) |
hir::ExprClosure(..) |
hir::ExprAssignOp(..) |
hir::ExprCast(..) |
hir::ExprType(..) => true,
_ => false,
}
}
impl<'a> State<'a> {
pub fn cbox(&mut self, u: usize) -> io::Result<()> {
self.boxes.push(pp::Breaks::Consistent);
self.s.cbox(u)
}
pub fn nbsp(&mut self) -> io::Result<()> {
self.s.word(" ")
}
pub fn word_nbsp(&mut self, w: &str) -> io::Result<()> {
self.s.word(w)?;
self.nbsp()
}
pub fn head(&mut self, w: &str) -> io::Result<()> {
// outer-box is consistent
self.cbox(indent_unit)?;
// head-box is inconsistent
self.ibox(w.len() + 1)?;
// keyword that starts the head
if !w.is_empty() {
self.word_nbsp(w)?;
}
Ok(())
}
pub fn bopen(&mut self) -> io::Result<()> {
self.s.word("{")?;
self.end() // close the head-box
}
pub fn bclose_(&mut self, span: syntax_pos::Span, indented: usize) -> io::Result<()> {
self.bclose_maybe_open(span, indented, true)
}
pub fn bclose_maybe_open(&mut self,
span: syntax_pos::Span,
indented: usize,
close_box: bool)
-> io::Result<()> {
self.maybe_print_comment(span.hi)?;
self.break_offset_if_not_bol(1, -(indented as isize))?;
self.s.word("}")?;
if close_box {
self.end()?; // close the outer-box
}
Ok(())
}
pub fn bclose(&mut self, span: syntax_pos::Span) -> io::Result<()> {
self.bclose_(span, indent_unit)
}
pub fn in_cbox(&self) -> bool {
match self.boxes.last() {
Some(&last_box) => last_box == pp::Breaks::Consistent,
None => false,
}
}
pub fn space_if_not_bol(&mut self) -> io::Result<()> {
if !self.is_bol() {
self.s.space()?;
}
Ok(())
}
pub fn break_offset_if_not_bol(&mut self, n: usize, off: isize) -> io::Result<()> {
if !self.is_bol() {
self.s.break_offset(n, off)
} else {
if off != 0 && self.s.last_token().is_hardbreak_tok() {
// We do something pretty sketchy here: tuck the nonzero
// offset-adjustment we were going to deposit along with the
// break into the previous hardbreak.
self.s.replace_last_token(pp::Printer::hardbreak_tok_offset(off));
}
Ok(())
}
}
// Synthesizes a comment that was not textually present in the original source
// file.
pub fn synth_comment(&mut self, text: String) -> io::Result<()> {
self.s.word("/*")?;
self.s.space()?;
self.s.word(&text[..])?;
self.s.space()?;
self.s.word("*/")
}
pub fn commasep_cmnt<T, F, G>(&mut self,
b: Breaks,
elts: &[T],
mut op: F,
mut get_span: G)
-> io::Result<()>
where F: FnMut(&mut State, &T) -> io::Result<()>,
G: FnMut(&T) -> syntax_pos::Span
{
self.rbox(0, b)?;
let len = elts.len();
let mut i = 0;
for elt in elts {
self.maybe_print_comment(get_span(elt).hi)?;
op(self, elt)?;
i += 1;
if i < len {
self.s.word(",")?;
self.maybe_print_trailing_comment(get_span(elt), Some(get_span(&elts[i]).hi))?;
self.space_if_not_bol()?;
}
}
self.end()
}
pub fn commasep_exprs(&mut self, b: Breaks, exprs: &[hir::Expr]) -> io::Result<()> {
self.commasep_cmnt(b, exprs, |s, e| s.print_expr(&e), |e| e.span)
}
pub fn print_mod(&mut self, _mod: &hir::Mod, attrs: &[ast::Attribute]) -> io::Result<()> {
self.print_inner_attributes(attrs)?;
for &item_id in &_mod.item_ids {
self.ann.nested(self, Nested::Item(item_id))?;
}
Ok(())
}
pub fn print_foreign_mod(&mut self,
nmod: &hir::ForeignMod,
attrs: &[ast::Attribute])
-> io::Result<()> {
self.print_inner_attributes(attrs)?;
for item in &nmod.items {
self.print_foreign_item(item)?;
}
Ok(())
}
pub fn print_opt_lifetime(&mut self, lifetime: &hir::Lifetime) -> io::Result<()> {
if !lifetime.is_elided() {
self.print_lifetime(lifetime)?;
self.nbsp()?;
}
Ok(())
}
pub fn print_type(&mut self, ty: &hir::Ty) -> io::Result<()> {
self.maybe_print_comment(ty.span.lo)?;
self.ibox(0)?;
match ty.node {
hir::TySlice(ref ty) => {
self.s.word("[")?;
self.print_type(&ty)?;
self.s.word("]")?;
}
hir::TyPtr(ref mt) => {
self.s.word("*")?;
match mt.mutbl {
hir::MutMutable => self.word_nbsp("mut")?,
hir::MutImmutable => self.word_nbsp("const")?,
}
self.print_type(&mt.ty)?;
}
hir::TyRptr(ref lifetime, ref mt) => {
self.s.word("&")?;
self.print_opt_lifetime(lifetime)?;
self.print_mt(mt)?;
}
hir::TyNever => {
self.s.word("!")?;
},
hir::TyTup(ref elts) => {
self.popen()?;
self.commasep(Inconsistent, &elts[..], |s, ty| s.print_type(&ty))?;
if elts.len() == 1 {
self.s.word(",")?;
}
self.pclose()?;
}
hir::TyBareFn(ref f) => {
let generics = hir::Generics {
lifetimes: f.lifetimes.clone(),
ty_params: hir::HirVec::new(),
where_clause: hir::WhereClause {
id: ast::DUMMY_NODE_ID,
predicates: hir::HirVec::new(),
},
span: syntax_pos::DUMMY_SP,
};
self.print_ty_fn(f.abi, f.unsafety, &f.decl, None, &generics)?;
}
hir::TyPath(ref qpath) => {
self.print_qpath(qpath, false)?
}
hir::TyTraitObject(ref bounds, ref lifetime) => {
let mut first = true;
for bound in bounds {
self.nbsp()?;
if first {
first = false;
} else {
self.word_space("+")?;
}
self.print_poly_trait_ref(bound)?;
}
if !lifetime.is_elided() {
self.word_space("+")?;
self.print_lifetime(lifetime)?;
}
}
hir::TyImplTrait(ref bounds) => {
self.print_bounds("impl ", &bounds[..])?;
}
hir::TyArray(ref ty, v) => {
self.s.word("[")?;
self.print_type(&ty)?;
self.s.word("; ")?;
self.ann.nested(self, Nested::Body(v))?;
self.s.word("]")?;
}
hir::TyTypeof(e) => {
self.s.word("typeof(")?;
self.ann.nested(self, Nested::Body(e))?;
self.s.word(")")?;
}
hir::TyInfer => {
self.s.word("_")?;
}
hir::TyErr => {
self.s.word("?")?;
}
}
self.end()
}
pub fn print_foreign_item(&mut self, item: &hir::ForeignItem) -> io::Result<()> {
self.hardbreak_if_not_bol()?;
self.maybe_print_comment(item.span.lo)?;
self.print_outer_attributes(&item.attrs)?;
match item.node {
hir::ForeignItemFn(ref decl, ref arg_names, ref generics) => {
self.head("")?;
self.print_fn(decl,
hir::Unsafety::Normal,
hir::Constness::NotConst,
Abi::Rust,
Some(item.name),
generics,
&item.vis,
arg_names,
None)?;
self.end()?; // end head-ibox
self.s.word(";")?;
self.end() // end the outer fn box
}
hir::ForeignItemStatic(ref t, m) => {
self.head(&visibility_qualified(&item.vis, "static"))?;
if m {
self.word_space("mut")?;
}
self.print_name(item.name)?;
self.word_space(":")?;
self.print_type(&t)?;
self.s.word(";")?;
self.end()?; // end the head-ibox
self.end() // end the outer cbox
}
}
}
fn print_associated_const(&mut self,
name: ast::Name,
ty: &hir::Ty,
default: Option<hir::BodyId>,
vis: &hir::Visibility)
-> io::Result<()> {
self.s.word(&visibility_qualified(vis, ""))?;
self.word_space("const")?;
self.print_name(name)?;
self.word_space(":")?;
self.print_type(ty)?;
if let Some(expr) = default {
self.s.space()?;
self.word_space("=")?;
self.ann.nested(self, Nested::Body(expr))?;
}
self.s.word(";")
}
fn print_associated_type(&mut self,
name: ast::Name,
bounds: Option<&hir::TyParamBounds>,
ty: Option<&hir::Ty>)
-> io::Result<()> {
self.word_space("type")?;
self.print_name(name)?;
if let Some(bounds) = bounds {
self.print_bounds(":", bounds)?;
}
if let Some(ty) = ty {
self.s.space()?;
self.word_space("=")?;
self.print_type(ty)?;
}
self.s.word(";")
}
/// Pretty-print an item
pub fn print_item(&mut self, item: &hir::Item) -> io::Result<()> {
self.hardbreak_if_not_bol()?;
self.maybe_print_comment(item.span.lo)?;
self.print_outer_attributes(&item.attrs)?;
self.ann.pre(self, NodeItem(item))?;
match item.node {
hir::ItemExternCrate(ref optional_path) => {
self.head(&visibility_qualified(&item.vis, "extern crate"))?;
if let Some(p) = *optional_path {
let val = p.as_str();
if val.contains("-") {
self.print_string(&val, ast::StrStyle::Cooked)?;
} else {
self.print_name(p)?;
}
self.s.space()?;
self.s.word("as")?;
self.s.space()?;
}
self.print_name(item.name)?;
self.s.word(";")?;
self.end()?; // end inner head-block
self.end()?; // end outer head-block
}
hir::ItemUse(ref path, kind) => {
self.head(&visibility_qualified(&item.vis, "use"))?;
self.print_path(path, false)?;
match kind {
hir::UseKind::Single => {
if path.segments.last().unwrap().name != item.name {
self.s.space()?;
self.word_space("as")?;
self.print_name(item.name)?;
}
self.s.word(";")?;
}
hir::UseKind::Glob => self.s.word("::*;")?,
hir::UseKind::ListStem => self.s.word("::{};")?
}
self.end()?; // end inner head-block
self.end()?; // end outer head-block
}
hir::ItemStatic(ref ty, m, expr) => {
self.head(&visibility_qualified(&item.vis, "static"))?;
if m == hir::MutMutable {
self.word_space("mut")?;
}
self.print_name(item.name)?;
self.word_space(":")?;
self.print_type(&ty)?;
self.s.space()?;
self.end()?; // end the head-ibox
self.word_space("=")?;
self.ann.nested(self, Nested::Body(expr))?;
self.s.word(";")?;
self.end()?; // end the outer cbox
}
hir::ItemConst(ref ty, expr) => {
self.head(&visibility_qualified(&item.vis, "const"))?;
self.print_name(item.name)?;
self.word_space(":")?;
self.print_type(&ty)?;
self.s.space()?;
self.end()?; // end the head-ibox
self.word_space("=")?;
self.ann.nested(self, Nested::Body(expr))?;
self.s.word(";")?;
self.end()?; // end the outer cbox
}
hir::ItemFn(ref decl, unsafety, constness, abi, ref typarams, body) => {
self.head("")?;
self.print_fn(decl,
unsafety,
constness,
abi,
Some(item.name),
typarams,
&item.vis,
&[],
Some(body))?;
self.s.word(" ")?;
self.end()?; // need to close a box
self.end()?; // need to close a box
self.ann.nested(self, Nested::Body(body))?;
}
hir::ItemMod(ref _mod) => {
self.head(&visibility_qualified(&item.vis, "mod"))?;
self.print_name(item.name)?;
self.nbsp()?;
self.bopen()?;
self.print_mod(_mod, &item.attrs)?;
self.bclose(item.span)?;
}
hir::ItemForeignMod(ref nmod) => {
self.head("extern")?;
self.word_nbsp(&nmod.abi.to_string())?;
self.bopen()?;
self.print_foreign_mod(nmod, &item.attrs)?;
self.bclose(item.span)?;
}
hir::ItemGlobalAsm(ref ga) => {
self.head(&visibility_qualified(&item.vis, "global asm"))?;
self.s.word(&ga.asm.as_str())?;
self.end()?
}
hir::ItemTy(ref ty, ref params) => {
self.ibox(indent_unit)?;
self.ibox(0)?;
self.word_nbsp(&visibility_qualified(&item.vis, "type"))?;
self.print_name(item.name)?;
self.print_generics(params)?;
self.end()?; // end the inner ibox
self.print_where_clause(¶ms.where_clause)?;
self.s.space()?;
self.word_space("=")?;
self.print_type(&ty)?;
self.s.word(";")?;
self.end()?; // end the outer ibox
}
hir::ItemEnum(ref enum_definition, ref params) => {
self.print_enum_def(enum_definition, params, item.name, item.span, &item.vis)?;
}
hir::ItemStruct(ref struct_def, ref generics) => {
self.head(&visibility_qualified(&item.vis, "struct"))?;
self.print_struct(struct_def, generics, item.name, item.span, true)?;
}
hir::ItemUnion(ref struct_def, ref generics) => {
self.head(&visibility_qualified(&item.vis, "union"))?;
self.print_struct(struct_def, generics, item.name, item.span, true)?;
}
hir::ItemDefaultImpl(unsafety, ref trait_ref) => {
self.head("")?;
self.print_visibility(&item.vis)?;
self.print_unsafety(unsafety)?;
self.word_nbsp("impl")?;
self.print_trait_ref(trait_ref)?;
self.s.space()?;
self.word_space("for")?;
self.word_space("..")?;
self.bopen()?;
self.bclose(item.span)?;
}
hir::ItemImpl(unsafety,
polarity,
defaultness,
ref generics,
ref opt_trait,
ref ty,
ref impl_items) => {
self.head("")?;
self.print_visibility(&item.vis)?;
self.print_defaultness(defaultness)?;
self.print_unsafety(unsafety)?;
self.word_nbsp("impl")?;
if generics.is_parameterized() {
self.print_generics(generics)?;
self.s.space()?;
}
match polarity {
hir::ImplPolarity::Negative => {
self.s.word("!")?;
}
_ => {}
}
match opt_trait {
&Some(ref t) => {
self.print_trait_ref(t)?;
self.s.space()?;
self.word_space("for")?;
}
&None => {}
}
self.print_type(&ty)?;
self.print_where_clause(&generics.where_clause)?;
self.s.space()?;
self.bopen()?;
self.print_inner_attributes(&item.attrs)?;
for impl_item in impl_items {
self.ann.nested(self, Nested::ImplItem(impl_item.id))?;
}
self.bclose(item.span)?;
}
hir::ItemTrait(unsafety, ref generics, ref bounds, ref trait_items) => {
self.head("")?;
self.print_visibility(&item.vis)?;
self.print_unsafety(unsafety)?;
self.word_nbsp("trait")?;
self.print_name(item.name)?;
self.print_generics(generics)?;
let mut real_bounds = Vec::with_capacity(bounds.len());
for b in bounds.iter() {
if let TraitTyParamBound(ref ptr, hir::TraitBoundModifier::Maybe) = *b {
self.s.space()?;
self.word_space("for ?")?;
self.print_trait_ref(&ptr.trait_ref)?;
} else {
real_bounds.push(b.clone());
}
}
self.print_bounds(":", &real_bounds[..])?;
self.print_where_clause(&generics.where_clause)?;
self.s.word(" ")?;
self.bopen()?;
for trait_item in trait_items {
self.ann.nested(self, Nested::TraitItem(trait_item.id))?;
}
self.bclose(item.span)?;
}
}
self.ann.post(self, NodeItem(item))
}
pub fn print_trait_ref(&mut self, t: &hir::TraitRef) -> io::Result<()> {
self.print_path(&t.path, false)
}
fn print_formal_lifetime_list(&mut self, lifetimes: &[hir::LifetimeDef]) -> io::Result<()> {
if !lifetimes.is_empty() {
self.s.word("for<")?;
let mut comma = false;
for lifetime_def in lifetimes {
if comma {
self.word_space(",")?
}
self.print_lifetime_def(lifetime_def)?;
comma = true;
}
self.s.word(">")?;
}
Ok(())
}
fn print_poly_trait_ref(&mut self, t: &hir::PolyTraitRef) -> io::Result<()> {
self.print_formal_lifetime_list(&t.bound_lifetimes)?;
self.print_trait_ref(&t.trait_ref)
}
pub fn print_enum_def(&mut self,
enum_definition: &hir::EnumDef,
generics: &hir::Generics,
name: ast::Name,
span: syntax_pos::Span,
visibility: &hir::Visibility)
-> io::Result<()> {
self.head(&visibility_qualified(visibility, "enum"))?;
self.print_name(name)?;
self.print_generics(generics)?;
self.print_where_clause(&generics.where_clause)?;
self.s.space()?;
self.print_variants(&enum_definition.variants, span)
}
pub fn print_variants(&mut self,
variants: &[hir::Variant],
span: syntax_pos::Span)
-> io::Result<()> {
self.bopen()?;
for v in variants {
self.space_if_not_bol()?;
self.maybe_print_comment(v.span.lo)?;
self.print_outer_attributes(&v.node.attrs)?;
self.ibox(indent_unit)?;
self.print_variant(v)?;
self.s.word(",")?;
self.end()?;
self.maybe_print_trailing_comment(v.span, None)?;
}
self.bclose(span)
}
pub fn print_visibility(&mut self, vis: &hir::Visibility) -> io::Result<()> {
match *vis {
hir::Public => self.word_nbsp("pub"),
hir::Visibility::Crate => self.word_nbsp("pub(crate)"),
hir::Visibility::Restricted { ref path, .. } => {
self.s.word("pub(")?;
self.print_path(path, false)?;
self.word_nbsp(")")
}
hir::Inherited => Ok(()),
}
}
pub fn print_defaultness(&mut self, defaultness: hir::Defaultness) -> io::Result<()> {
match defaultness {
hir::Defaultness::Default { .. } => self.word_nbsp("default")?,
hir::Defaultness::Final => (),
}
Ok(())
}
pub fn print_struct(&mut self,
struct_def: &hir::VariantData,
generics: &hir::Generics,
name: ast::Name,
span: syntax_pos::Span,
print_finalizer: bool)
-> io::Result<()> {
self.print_name(name)?;
self.print_generics(generics)?;
if !struct_def.is_struct() {
if struct_def.is_tuple() {
self.popen()?;
self.commasep(Inconsistent, struct_def.fields(), |s, field| {
s.maybe_print_comment(field.span.lo)?;
s.print_outer_attributes(&field.attrs)?;
s.print_visibility(&field.vis)?;
s.print_type(&field.ty)
})?;
self.pclose()?;
}
self.print_where_clause(&generics.where_clause)?;
if print_finalizer {
self.s.word(";")?;
}
self.end()?;
self.end() // close the outer-box
} else {
self.print_where_clause(&generics.where_clause)?;
self.nbsp()?;
self.bopen()?;
self.hardbreak_if_not_bol()?;
for field in struct_def.fields() {
self.hardbreak_if_not_bol()?;
self.maybe_print_comment(field.span.lo)?;
self.print_outer_attributes(&field.attrs)?;
self.print_visibility(&field.vis)?;
self.print_name(field.name)?;
self.word_nbsp(":")?;
self.print_type(&field.ty)?;
self.s.word(",")?;
}
self.bclose(span)
}
}
pub fn print_variant(&mut self, v: &hir::Variant) -> io::Result<()> {
self.head("")?;
let generics = hir::Generics::empty();
self.print_struct(&v.node.data, &generics, v.node.name, v.span, false)?;
if let Some(d) = v.node.disr_expr {
self.s.space()?;
self.word_space("=")?;
self.ann.nested(self, Nested::Body(d))?;
}
Ok(())
}
pub fn print_method_sig(&mut self,
name: ast::Name,
m: &hir::MethodSig,
vis: &hir::Visibility,
arg_names: &[Spanned<ast::Name>],
body_id: Option<hir::BodyId>)
-> io::Result<()> {
self.print_fn(&m.decl,
m.unsafety,
m.constness,
m.abi,
Some(name),
&m.generics,
vis,
arg_names,
body_id)
}
pub fn print_trait_item(&mut self, ti: &hir::TraitItem) -> io::Result<()> {
self.ann.pre(self, NodeSubItem(ti.id))?;
self.hardbreak_if_not_bol()?;
self.maybe_print_comment(ti.span.lo)?;
self.print_outer_attributes(&ti.attrs)?;
match ti.node {
hir::TraitItemKind::Const(ref ty, default) => {
self.print_associated_const(ti.name, &ty, default, &hir::Inherited)?;
}
hir::TraitItemKind::Method(ref sig, hir::TraitMethod::Required(ref arg_names)) => {
self.print_method_sig(ti.name, sig, &hir::Inherited, arg_names, None)?;
self.s.word(";")?;
}
hir::TraitItemKind::Method(ref sig, hir::TraitMethod::Provided(body)) => {
self.head("")?;
self.print_method_sig(ti.name, sig, &hir::Inherited, &[], Some(body))?;
self.nbsp()?;
self.end()?; // need to close a box
self.end()?; // need to close a box
self.ann.nested(self, Nested::Body(body))?;
}
hir::TraitItemKind::Type(ref bounds, ref default) => {
self.print_associated_type(ti.name,
Some(bounds),
default.as_ref().map(|ty| &**ty))?;
}
}
self.ann.post(self, NodeSubItem(ti.id))
}
pub fn print_impl_item(&mut self, ii: &hir::ImplItem) -> io::Result<()> {
self.ann.pre(self, NodeSubItem(ii.id))?;
self.hardbreak_if_not_bol()?;
self.maybe_print_comment(ii.span.lo)?;
self.print_outer_attributes(&ii.attrs)?;
self.print_defaultness(ii.defaultness)?;
match ii.node {
hir::ImplItemKind::Const(ref ty, expr) => {
self.print_associated_const(ii.name, &ty, Some(expr), &ii.vis)?;
}
hir::ImplItemKind::Method(ref sig, body) => {
self.head("")?;
self.print_method_sig(ii.name, sig, &ii.vis, &[], Some(body))?;
self.nbsp()?;
self.end()?; // need to close a box
self.end()?; // need to close a box
self.ann.nested(self, Nested::Body(body))?;
}
hir::ImplItemKind::Type(ref ty) => {
self.print_associated_type(ii.name, None, Some(ty))?;
}
}
self.ann.post(self, NodeSubItem(ii.id))
}
pub fn print_stmt(&mut self, st: &hir::Stmt) -> io::Result<()> {
self.maybe_print_comment(st.span.lo)?;
match st.node {
hir::StmtDecl(ref decl, _) => {
self.print_decl(&decl)?;
}
hir::StmtExpr(ref expr, _) => {
self.space_if_not_bol()?;
self.print_expr(&expr)?;
}
hir::StmtSemi(ref expr, _) => {
self.space_if_not_bol()?;
self.print_expr(&expr)?;
self.s.word(";")?;
}
}
if stmt_ends_with_semi(&st.node) {
self.s.word(";")?;
}
self.maybe_print_trailing_comment(st.span, None)
}
pub fn print_block(&mut self, blk: &hir::Block) -> io::Result<()> {
self.print_block_with_attrs(blk, &[])
}
pub fn print_block_unclosed(&mut self, blk: &hir::Block) -> io::Result<()> {
self.print_block_unclosed_indent(blk, indent_unit)
}
pub fn print_block_unclosed_indent(&mut self,
blk: &hir::Block,
indented: usize)
-> io::Result<()> {
self.print_block_maybe_unclosed(blk, indented, &[], false)
}
pub fn print_block_with_attrs(&mut self,
blk: &hir::Block,
attrs: &[ast::Attribute])
-> io::Result<()> {
self.print_block_maybe_unclosed(blk, indent_unit, attrs, true)
}
pub fn print_block_maybe_unclosed(&mut self,
blk: &hir::Block,
indented: usize,
attrs: &[ast::Attribute],
close_box: bool)
-> io::Result<()> {
match blk.rules {
hir::UnsafeBlock(..) => self.word_space("unsafe")?,
hir::PushUnsafeBlock(..) => self.word_space("push_unsafe")?,
hir::PopUnsafeBlock(..) => self.word_space("pop_unsafe")?,
hir::DefaultBlock => (),
}
self.maybe_print_comment(blk.span.lo)?;
self.ann.pre(self, NodeBlock(blk))?;
self.bopen()?;
self.print_inner_attributes(attrs)?;
for st in &blk.stmts {
self.print_stmt(st)?;
}
match blk.expr {
Some(ref expr) => {
self.space_if_not_bol()?;
self.print_expr(&expr)?;
self.maybe_print_trailing_comment(expr.span, Some(blk.span.hi))?;
}
_ => (),
}
self.bclose_maybe_open(blk.span, indented, close_box)?;
self.ann.post(self, NodeBlock(blk))
}
fn print_else(&mut self, els: Option<&hir::Expr>) -> io::Result<()> {
match els {
Some(_else) => {
match _else.node {
// "another else-if"
hir::ExprIf(ref i, ref then, ref e) => {
self.cbox(indent_unit - 1)?;
self.ibox(0)?;
self.s.word(" else if ")?;
self.print_expr(&i)?;
self.s.space()?;
self.print_expr(&then)?;
self.print_else(e.as_ref().map(|e| &**e))
}
// "final else"
hir::ExprBlock(ref b) => {
self.cbox(indent_unit - 1)?;
self.ibox(0)?;
self.s.word(" else ")?;
self.print_block(&b)
}
// BLEAH, constraints would be great here
_ => {
panic!("print_if saw if with weird alternative");
}
}
}
_ => Ok(()),
}
}
pub fn print_if(&mut self,
test: &hir::Expr,
blk: &hir::Expr,
elseopt: Option<&hir::Expr>)
-> io::Result<()> {
self.head("if")?;
self.print_expr(test)?;
self.s.space()?;
self.print_expr(blk)?;
self.print_else(elseopt)
}
pub fn print_if_let(&mut self,
pat: &hir::Pat,
expr: &hir::Expr,
blk: &hir::Block,
elseopt: Option<&hir::Expr>)
-> io::Result<()> {
self.head("if let")?;
self.print_pat(pat)?;
self.s.space()?;
self.word_space("=")?;
self.print_expr(expr)?;
self.s.space()?;
self.print_block(blk)?;
self.print_else(elseopt)
}
fn print_call_post(&mut self, args: &[hir::Expr]) -> io::Result<()> {
self.popen()?;
self.commasep_exprs(Inconsistent, args)?;
self.pclose()
}
pub fn print_expr_maybe_paren(&mut self, expr: &hir::Expr) -> io::Result<()> {
let needs_par = needs_parentheses(expr);
if needs_par {
self.popen()?;
}
self.print_expr(expr)?;
if needs_par {
self.pclose()?;
}
Ok(())
}
fn print_expr_vec(&mut self, exprs: &[hir::Expr]) -> io::Result<()> {
self.ibox(indent_unit)?;
self.s.word("[")?;
self.commasep_exprs(Inconsistent, exprs)?;
self.s.word("]")?;
self.end()
}
fn print_expr_repeat(&mut self, element: &hir::Expr, count: hir::BodyId) -> io::Result<()> {
self.ibox(indent_unit)?;
self.s.word("[")?;
self.print_expr(element)?;
self.word_space(";")?;
self.ann.nested(self, Nested::Body(count))?;
self.s.word("]")?;
self.end()
}
fn print_expr_struct(&mut self,
qpath: &hir::QPath,
fields: &[hir::Field],
wth: &Option<P<hir::Expr>>)
-> io::Result<()> {
self.print_qpath(qpath, true)?;
self.s.word("{")?;
self.commasep_cmnt(Consistent,
&fields[..],
|s, field| {
s.ibox(indent_unit)?;
if !field.is_shorthand {
s.print_name(field.name.node)?;
s.word_space(":")?;
}
s.print_expr(&field.expr)?;
s.end()
},
|f| f.span)?;
match *wth {
Some(ref expr) => {
self.ibox(indent_unit)?;
if !fields.is_empty() {
self.s.word(",")?;
self.s.space()?;
}
self.s.word("..")?;
self.print_expr(&expr)?;
self.end()?;
}
_ => if !fields.is_empty() {
self.s.word(",")?
},
}
self.s.word("}")?;
Ok(())
}
fn print_expr_tup(&mut self, exprs: &[hir::Expr]) -> io::Result<()> {
self.popen()?;
self.commasep_exprs(Inconsistent, exprs)?;
if exprs.len() == 1 {
self.s.word(",")?;
}
self.pclose()
}
fn print_expr_call(&mut self, func: &hir::Expr, args: &[hir::Expr]) -> io::Result<()> {
self.print_expr_maybe_paren(func)?;
self.print_call_post(args)
}
fn print_expr_method_call(&mut self,
segment: &hir::PathSegment,
args: &[hir::Expr])
-> io::Result<()> {
let base_args = &args[1..];
self.print_expr(&args[0])?;
self.s.word(".")?;
self.print_name(segment.name)?;
if !segment.parameters.lifetimes().is_empty() ||
!segment.parameters.types().is_empty() ||
!segment.parameters.bindings().is_empty() {
self.print_path_parameters(&segment.parameters, true)?;
}
self.print_call_post(base_args)
}
fn print_expr_binary(&mut self,
op: hir::BinOp,
lhs: &hir::Expr,
rhs: &hir::Expr)
-> io::Result<()> {
self.print_expr(lhs)?;
self.s.space()?;
self.word_space(op.node.as_str())?;
self.print_expr(rhs)
}
fn print_expr_unary(&mut self, op: hir::UnOp, expr: &hir::Expr) -> io::Result<()> {
self.s.word(op.as_str())?;
self.print_expr_maybe_paren(expr)
}
fn print_expr_addr_of(&mut self,
mutability: hir::Mutability,
expr: &hir::Expr)
-> io::Result<()> {
self.s.word("&")?;
self.print_mutability(mutability)?;
self.print_expr_maybe_paren(expr)
}
pub fn print_expr(&mut self, expr: &hir::Expr) -> io::Result<()> {
self.maybe_print_comment(expr.span.lo)?;
self.print_outer_attributes(&expr.attrs)?;
self.ibox(indent_unit)?;
self.ann.pre(self, NodeExpr(expr))?;
match expr.node {
hir::ExprBox(ref expr) => {
self.word_space("box")?;
self.print_expr(expr)?;
}
hir::ExprArray(ref exprs) => {
self.print_expr_vec(exprs)?;
}
hir::ExprRepeat(ref element, count) => {
self.print_expr_repeat(&element, count)?;
}
hir::ExprStruct(ref qpath, ref fields, ref wth) => {
self.print_expr_struct(qpath, &fields[..], wth)?;
}
hir::ExprTup(ref exprs) => {
self.print_expr_tup(exprs)?;
}
hir::ExprCall(ref func, ref args) => {
self.print_expr_call(&func, args)?;
}
hir::ExprMethodCall(ref segment, _, ref args) => {
self.print_expr_method_call(segment, args)?;
}
hir::ExprBinary(op, ref lhs, ref rhs) => {
self.print_expr_binary(op, &lhs, &rhs)?;
}
hir::ExprUnary(op, ref expr) => {
self.print_expr_unary(op, &expr)?;
}
hir::ExprAddrOf(m, ref expr) => {
self.print_expr_addr_of(m, &expr)?;
}
hir::ExprLit(ref lit) => {
self.print_literal(&lit)?;
}
hir::ExprCast(ref expr, ref ty) => {
self.print_expr(&expr)?;
self.s.space()?;
self.word_space("as")?;
self.print_type(&ty)?;
}
hir::ExprType(ref expr, ref ty) => {
self.print_expr(&expr)?;
self.word_space(":")?;
self.print_type(&ty)?;
}
hir::ExprIf(ref test, ref blk, ref elseopt) => {
self.print_if(&test, &blk, elseopt.as_ref().map(|e| &**e))?;
}
hir::ExprWhile(ref test, ref blk, opt_sp_name) => {
if let Some(sp_name) = opt_sp_name {
self.print_name(sp_name.node)?;
self.word_space(":")?;
}
self.head("while")?;
self.print_expr(&test)?;
self.s.space()?;
self.print_block(&blk)?;
}
hir::ExprLoop(ref blk, opt_sp_name, _) => {
if let Some(sp_name) = opt_sp_name {
self.print_name(sp_name.node)?;
self.word_space(":")?;
}
self.head("loop")?;
self.s.space()?;
self.print_block(&blk)?;
}
hir::ExprMatch(ref expr, ref arms, _) => {
self.cbox(indent_unit)?;
self.ibox(4)?;
self.word_nbsp("match")?;
self.print_expr(&expr)?;
self.s.space()?;
self.bopen()?;
for arm in arms {
self.print_arm(arm)?;
}
self.bclose_(expr.span, indent_unit)?;
}
hir::ExprClosure(capture_clause, ref decl, body, _fn_decl_span) => {
self.print_capture_clause(capture_clause)?;
self.print_closure_args(&decl, body)?;
self.s.space()?;
// this is a bare expression
self.ann.nested(self, Nested::Body(body))?;
self.end()?; // need to close a box
// a box will be closed by print_expr, but we didn't want an overall
// wrapper so we closed the corresponding opening. so create an
// empty box to satisfy the close.
self.ibox(0)?;
}
hir::ExprBlock(ref blk) => {
// containing cbox, will be closed by print-block at }
self.cbox(indent_unit)?;
// head-box, will be closed by print-block after {
self.ibox(0)?;
self.print_block(&blk)?;
}
hir::ExprAssign(ref lhs, ref rhs) => {
self.print_expr(&lhs)?;
self.s.space()?;
self.word_space("=")?;
self.print_expr(&rhs)?;
}
hir::ExprAssignOp(op, ref lhs, ref rhs) => {
self.print_expr(&lhs)?;
self.s.space()?;
self.s.word(op.node.as_str())?;
self.word_space("=")?;
self.print_expr(&rhs)?;
}
hir::ExprField(ref expr, name) => {
self.print_expr(&expr)?;
self.s.word(".")?;
self.print_name(name.node)?;
}
hir::ExprTupField(ref expr, id) => {
self.print_expr(&expr)?;
self.s.word(".")?;
self.print_usize(id.node)?;
}
hir::ExprIndex(ref expr, ref index) => {
self.print_expr(&expr)?;
self.s.word("[")?;
self.print_expr(&index)?;
self.s.word("]")?;
}
hir::ExprPath(ref qpath) => {
self.print_qpath(qpath, true)?
}
hir::ExprBreak(label, ref opt_expr) => {
self.s.word("break")?;
self.s.space()?;
if let Some(label_ident) = label.ident {
self.print_name(label_ident.node.name)?;
self.s.space()?;
}
if let Some(ref expr) = *opt_expr {
self.print_expr(expr)?;
self.s.space()?;
}
}
hir::ExprAgain(label) => {
self.s.word("continue")?;
self.s.space()?;
if let Some(label_ident) = label.ident {
self.print_name(label_ident.node.name)?;
self.s.space()?
}
}
hir::ExprRet(ref result) => {
self.s.word("return")?;
match *result {
Some(ref expr) => {
self.s.word(" ")?;
self.print_expr(&expr)?;
}
_ => (),
}
}
hir::ExprInlineAsm(ref a, ref outputs, ref inputs) => {
self.s.word("asm!")?;
self.popen()?;
self.print_string(&a.asm.as_str(), a.asm_str_style)?;
self.word_space(":")?;
let mut out_idx = 0;
self.commasep(Inconsistent, &a.outputs, |s, out| {
let constraint = out.constraint.as_str();
let mut ch = constraint.chars();
match ch.next() {
Some('=') if out.is_rw => {
s.print_string(&format!("+{}", ch.as_str()),
ast::StrStyle::Cooked)?
}
_ => s.print_string(&constraint, ast::StrStyle::Cooked)?,
}
s.popen()?;
s.print_expr(&outputs[out_idx])?;
s.pclose()?;
out_idx += 1;
Ok(())
})?;
self.s.space()?;
self.word_space(":")?;
let mut in_idx = 0;
self.commasep(Inconsistent, &a.inputs, |s, co| {
s.print_string(&co.as_str(), ast::StrStyle::Cooked)?;
s.popen()?;
s.print_expr(&inputs[in_idx])?;
s.pclose()?;
in_idx += 1;
Ok(())
})?;
self.s.space()?;
self.word_space(":")?;
self.commasep(Inconsistent, &a.clobbers, |s, co| {
s.print_string(&co.as_str(), ast::StrStyle::Cooked)?;
Ok(())
})?;
let mut options = vec![];
if a.volatile {
options.push("volatile");
}
if a.alignstack {
options.push("alignstack");
}
if a.dialect == ast::AsmDialect::Intel {
options.push("intel");
}
if !options.is_empty() {
self.s.space()?;
self.word_space(":")?;
self.commasep(Inconsistent, &options, |s, &co| {
s.print_string(co, ast::StrStyle::Cooked)?;
Ok(())
})?;
}
self.pclose()?;
}
}
self.ann.post(self, NodeExpr(expr))?;
self.end()
}
pub fn print_local_decl(&mut self, loc: &hir::Local) -> io::Result<()> {
self.print_pat(&loc.pat)?;
if let Some(ref ty) = loc.ty {
self.word_space(":")?;
self.print_type(&ty)?;
}
Ok(())
}
pub fn print_decl(&mut self, decl: &hir::Decl) -> io::Result<()> {
self.maybe_print_comment(decl.span.lo)?;
match decl.node {
hir::DeclLocal(ref loc) => {
self.space_if_not_bol()?;
self.ibox(indent_unit)?;
self.word_nbsp("let")?;
self.ibox(indent_unit)?;
self.print_local_decl(&loc)?;
self.end()?;
if let Some(ref init) = loc.init {
self.nbsp()?;
self.word_space("=")?;
self.print_expr(&init)?;
}
self.end()
}
hir::DeclItem(item) => {
self.ann.nested(self, Nested::Item(item))
}
}
}
pub fn print_usize(&mut self, i: usize) -> io::Result<()> {
self.s.word(&i.to_string())
}
pub fn print_name(&mut self, name: ast::Name) -> io::Result<()> {
self.s.word(&name.as_str())?;
self.ann.post(self, NodeName(&name))
}
pub fn print_for_decl(&mut self, loc: &hir::Local, coll: &hir::Expr) -> io::Result<()> {
self.print_local_decl(loc)?;
self.s.space()?;
self.word_space("in")?;
self.print_expr(coll)
}
pub fn print_path(&mut self,
path: &hir::Path,
colons_before_params: bool)
-> io::Result<()> {
self.maybe_print_comment(path.span.lo)?;
for (i, segment) in path.segments.iter().enumerate() {
if i > 0 {
self.s.word("::")?
}
if segment.name != keywords::CrateRoot.name() &&
segment.name != keywords::DollarCrate.name() {
self.print_name(segment.name)?;
self.print_path_parameters(&segment.parameters, colons_before_params)?;
}
}
Ok(())
}
pub fn print_qpath(&mut self,
qpath: &hir::QPath,
colons_before_params: bool)
-> io::Result<()> {
match *qpath {
hir::QPath::Resolved(None, ref path) => {
self.print_path(path, colons_before_params)
}
hir::QPath::Resolved(Some(ref qself), ref path) => {
self.s.word("<")?;
self.print_type(qself)?;
self.s.space()?;
self.word_space("as")?;
for (i, segment) in path.segments[..path.segments.len() - 1].iter().enumerate() {
if i > 0 {
self.s.word("::")?
}
if segment.name != keywords::CrateRoot.name() &&
segment.name != keywords::DollarCrate.name() {
self.print_name(segment.name)?;
self.print_path_parameters(&segment.parameters, colons_before_params)?;
}
}
self.s.word(">")?;
self.s.word("::")?;
let item_segment = path.segments.last().unwrap();
self.print_name(item_segment.name)?;
self.print_path_parameters(&item_segment.parameters, colons_before_params)
}
hir::QPath::TypeRelative(ref qself, ref item_segment) => {
self.s.word("<")?;
self.print_type(qself)?;
self.s.word(">")?;
self.s.word("::")?;
self.print_name(item_segment.name)?;
self.print_path_parameters(&item_segment.parameters, colons_before_params)
}
}
}
fn print_path_parameters(&mut self,
parameters: &hir::PathParameters,
colons_before_params: bool)
-> io::Result<()> {
match *parameters {
hir::AngleBracketedParameters(ref data) => {
let start = if colons_before_params { "::<" } else { "<" };
let empty = Cell::new(true);
let start_or_comma = |this: &mut Self| {
if empty.get() {
empty.set(false);
this.s.word(start)
} else {
this.word_space(",")
}
};
if !data.lifetimes.iter().all(|lt| lt.is_elided()) {
for lifetime in &data.lifetimes {
start_or_comma(self)?;
self.print_lifetime(lifetime)?;
}
}
if !data.types.is_empty() {
start_or_comma(self)?;
self.commasep(Inconsistent, &data.types, |s, ty| s.print_type(&ty))?;
}
// FIXME(eddyb) This would leak into error messages, e.g.:
// "non-exhaustive patterns: `Some::<..>(_)` not covered".
if data.infer_types && false {
start_or_comma(self)?;
self.s.word("..")?;
}
for binding in data.bindings.iter() {
start_or_comma(self)?;
self.print_name(binding.name)?;
self.s.space()?;
self.word_space("=")?;
self.print_type(&binding.ty)?;
}
if !empty.get() {
self.s.word(">")?
}
}
hir::ParenthesizedParameters(ref data) => {
self.s.word("(")?;
self.commasep(Inconsistent, &data.inputs, |s, ty| s.print_type(&ty))?;
self.s.word(")")?;
if let Some(ref ty) = data.output {
self.space_if_not_bol()?;
self.word_space("->")?;
self.print_type(&ty)?;
}
}
}
Ok(())
}
pub fn print_pat(&mut self, pat: &hir::Pat) -> io::Result<()> {
self.maybe_print_comment(pat.span.lo)?;
self.ann.pre(self, NodePat(pat))?;
// Pat isn't normalized, but the beauty of it
// is that it doesn't matter
match pat.node {
PatKind::Wild => self.s.word("_")?,
PatKind::Binding(binding_mode, _, ref path1, ref sub) => {
match binding_mode {
hir::BindingAnnotation::Ref => {
self.word_nbsp("ref")?;
self.print_mutability(hir::MutImmutable)?;
}
hir::BindingAnnotation::RefMut => {
self.word_nbsp("ref")?;
self.print_mutability(hir::MutMutable)?;
}
hir::BindingAnnotation::Unannotated => {}
hir::BindingAnnotation::Mutable => {
self.word_nbsp("mut")?;
}
}
self.print_name(path1.node)?;
if let Some(ref p) = *sub {
self.s.word("@")?;
self.print_pat(&p)?;
}
}
PatKind::TupleStruct(ref qpath, ref elts, ddpos) => {
self.print_qpath(qpath, true)?;
self.popen()?;
if let Some(ddpos) = ddpos {
self.commasep(Inconsistent, &elts[..ddpos], |s, p| s.print_pat(&p))?;
if ddpos != 0 {
self.word_space(",")?;
}
self.s.word("..")?;
if ddpos != elts.len() {
self.s.word(",")?;
self.commasep(Inconsistent, &elts[ddpos..], |s, p| s.print_pat(&p))?;
}
} else {
self.commasep(Inconsistent, &elts[..], |s, p| s.print_pat(&p))?;
}
self.pclose()?;
}
PatKind::Path(ref qpath) => {
self.print_qpath(qpath, true)?;
}
PatKind::Struct(ref qpath, ref fields, etc) => {
self.print_qpath(qpath, true)?;
self.nbsp()?;
self.word_space("{")?;
self.commasep_cmnt(Consistent,
&fields[..],
|s, f| {
s.cbox(indent_unit)?;
if !f.node.is_shorthand {
s.print_name(f.node.name)?;
s.word_nbsp(":")?;
}
s.print_pat(&f.node.pat)?;
s.end()
},
|f| f.node.pat.span)?;
if etc {
if !fields.is_empty() {
self.word_space(",")?;
}
self.s.word("..")?;
}
self.s.space()?;
self.s.word("}")?;
}
PatKind::Tuple(ref elts, ddpos) => {
self.popen()?;
if let Some(ddpos) = ddpos {
self.commasep(Inconsistent, &elts[..ddpos], |s, p| s.print_pat(&p))?;
if ddpos != 0 {
self.word_space(",")?;
}
self.s.word("..")?;
if ddpos != elts.len() {
self.s.word(",")?;
self.commasep(Inconsistent, &elts[ddpos..], |s, p| s.print_pat(&p))?;
}
} else {
self.commasep(Inconsistent, &elts[..], |s, p| s.print_pat(&p))?;
if elts.len() == 1 {
self.s.word(",")?;
}
}
self.pclose()?;
}
PatKind::Box(ref inner) => {
self.s.word("box ")?;
self.print_pat(&inner)?;
}
PatKind::Ref(ref inner, mutbl) => {
self.s.word("&")?;
if mutbl == hir::MutMutable {
self.s.word("mut ")?;
}
self.print_pat(&inner)?;
}
PatKind::Lit(ref e) => self.print_expr(&e)?,
PatKind::Range(ref begin, ref end, ref end_kind) => {
self.print_expr(&begin)?;
self.s.space()?;
match *end_kind {
RangeEnd::Included => self.s.word("...")?,
RangeEnd::Excluded => self.s.word("..")?,
}
self.print_expr(&end)?;
}
PatKind::Slice(ref before, ref slice, ref after) => {
self.s.word("[")?;
self.commasep(Inconsistent, &before[..], |s, p| s.print_pat(&p))?;
if let Some(ref p) = *slice {
if !before.is_empty() {
self.word_space(",")?;
}
if p.node != PatKind::Wild {
self.print_pat(&p)?;
}
self.s.word("..")?;
if !after.is_empty() {
self.word_space(",")?;
}
}
self.commasep(Inconsistent, &after[..], |s, p| s.print_pat(&p))?;
self.s.word("]")?;
}
}
self.ann.post(self, NodePat(pat))
}
fn print_arm(&mut self, arm: &hir::Arm) -> io::Result<()> {
// I have no idea why this check is necessary, but here it
// is :(
if arm.attrs.is_empty() {
self.s.space()?;
}
self.cbox(indent_unit)?;
self.ibox(0)?;
self.print_outer_attributes(&arm.attrs)?;
let mut first = true;
for p in &arm.pats {
if first {
first = false;
} else {
self.s.space()?;
self.word_space("|")?;
}
self.print_pat(&p)?;
}
self.s.space()?;
if let Some(ref e) = arm.guard {
self.word_space("if")?;
self.print_expr(&e)?;
self.s.space()?;
}
self.word_space("=>")?;
match arm.body.node {
hir::ExprBlock(ref blk) => {
// the block will close the pattern's ibox
self.print_block_unclosed_indent(&blk, indent_unit)?;
// If it is a user-provided unsafe block, print a comma after it
if let hir::UnsafeBlock(hir::UserProvided) = blk.rules {
self.s.word(",")?;
}
}
_ => {
self.end()?; // close the ibox for the pattern
self.print_expr(&arm.body)?;
self.s.word(",")?;
}
}
self.end() // close enclosing cbox
}
pub fn print_fn(&mut self,
decl: &hir::FnDecl,
unsafety: hir::Unsafety,
constness: hir::Constness,
abi: Abi,
name: Option<ast::Name>,
generics: &hir::Generics,
vis: &hir::Visibility,
arg_names: &[Spanned<ast::Name>],
body_id: Option<hir::BodyId>)
-> io::Result<()> {
self.print_fn_header_info(unsafety, constness, abi, vis)?;
if let Some(name) = name {
self.nbsp()?;
self.print_name(name)?;
}
self.print_generics(generics)?;
self.popen()?;
let mut i = 0;
// Make sure we aren't supplied *both* `arg_names` and `body_id`.
assert!(arg_names.is_empty() || body_id.is_none());
self.commasep(Inconsistent, &decl.inputs, |s, ty| {
s.ibox(indent_unit)?;
if let Some(name) = arg_names.get(i) {
s.s.word(&name.node.as_str())?;
s.s.word(":")?;
s.s.space()?;
} else if let Some(body_id) = body_id {
s.ann.nested(s, Nested::BodyArgPat(body_id, i))?;
s.s.word(":")?;
s.s.space()?;
}
i += 1;
s.print_type(ty)?;
s.end()
})?;
if decl.variadic {
self.s.word(", ...")?;
}
self.pclose()?;
self.print_fn_output(decl)?;
self.print_where_clause(&generics.where_clause)
}
fn print_closure_args(&mut self, decl: &hir::FnDecl, body_id: hir::BodyId) -> io::Result<()> {
self.s.word("|")?;
let mut i = 0;
self.commasep(Inconsistent, &decl.inputs, |s, ty| {
s.ibox(indent_unit)?;
s.ann.nested(s, Nested::BodyArgPat(body_id, i))?;
i += 1;
if ty.node != hir::TyInfer {
s.s.word(":")?;
s.s.space()?;
s.print_type(ty)?;
}
s.end()
})?;
self.s.word("|")?;
if let hir::DefaultReturn(..) = decl.output {
return Ok(());
}
self.space_if_not_bol()?;
self.word_space("->")?;
match decl.output {
hir::Return(ref ty) => {
self.print_type(&ty)?;
self.maybe_print_comment(ty.span.lo)
}
hir::DefaultReturn(..) => unreachable!(),
}
}
pub fn print_capture_clause(&mut self, capture_clause: hir::CaptureClause) -> io::Result<()> {
match capture_clause {
hir::CaptureByValue => self.word_space("move"),
hir::CaptureByRef => Ok(()),
}
}
pub fn print_bounds(&mut self, prefix: &str, bounds: &[hir::TyParamBound]) -> io::Result<()> {
if !bounds.is_empty() {
self.s.word(prefix)?;
let mut first = true;
for bound in bounds {
self.nbsp()?;
if first {
first = false;
} else {
self.word_space("+")?;
}
match *bound {
TraitTyParamBound(ref tref, TraitBoundModifier::None) => {
self.print_poly_trait_ref(tref)
}
TraitTyParamBound(ref tref, TraitBoundModifier::Maybe) => {
self.s.word("?")?;
self.print_poly_trait_ref(tref)
}
RegionTyParamBound(ref lt) => {
self.print_lifetime(lt)
}
}?
}
Ok(())
} else {
Ok(())
}
}
pub fn print_lifetime(&mut self, lifetime: &hir::Lifetime) -> io::Result<()> {
self.print_name(lifetime.name)
}
pub fn print_lifetime_def(&mut self, lifetime: &hir::LifetimeDef) -> io::Result<()> {
self.print_lifetime(&lifetime.lifetime)?;
let mut sep = ":";
for v in &lifetime.bounds {
self.s.word(sep)?;
self.print_lifetime(v)?;
sep = "+";
}
Ok(())
}
pub fn print_generics(&mut self, generics: &hir::Generics) -> io::Result<()> {
let total = generics.lifetimes.len() + generics.ty_params.len();
if total == 0 {
return Ok(());
}
self.s.word("<")?;
let mut ints = Vec::new();
for i in 0..total {
ints.push(i);
}
self.commasep(Inconsistent, &ints[..], |s, &idx| {
if idx < generics.lifetimes.len() {
let lifetime = &generics.lifetimes[idx];
s.print_lifetime_def(lifetime)
} else {
let idx = idx - generics.lifetimes.len();
let param = &generics.ty_params[idx];
s.print_ty_param(param)
}
})?;
self.s.word(">")?;
Ok(())
}
pub fn print_ty_param(&mut self, param: &hir::TyParam) -> io::Result<()> {
self.print_name(param.name)?;
self.print_bounds(":", ¶m.bounds)?;
match param.default {
Some(ref default) => {
self.s.space()?;
self.word_space("=")?;
self.print_type(&default)
}
_ => Ok(()),
}
}
pub fn print_where_clause(&mut self, where_clause: &hir::WhereClause) -> io::Result<()> {
if where_clause.predicates.is_empty() {
return Ok(());
}
self.s.space()?;
self.word_space("where")?;
for (i, predicate) in where_clause.predicates.iter().enumerate() {
if i != 0 {
self.word_space(",")?;
}
match predicate {
&hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate{ref bound_lifetimes,
ref bounded_ty,
ref bounds,
..}) => {
self.print_formal_lifetime_list(bound_lifetimes)?;
self.print_type(&bounded_ty)?;
self.print_bounds(":", bounds)?;
}
&hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate{ref lifetime,
ref bounds,
..}) => {
self.print_lifetime(lifetime)?;
self.s.word(":")?;
for (i, bound) in bounds.iter().enumerate() {
self.print_lifetime(bound)?;
if i != 0 {
self.s.word(":")?;
}
}
}
&hir::WherePredicate::EqPredicate(hir::WhereEqPredicate{ref lhs_ty,
ref rhs_ty,
..}) => {
self.print_type(lhs_ty)?;
self.s.space()?;
self.word_space("=")?;
self.print_type(rhs_ty)?;
}
}
}
Ok(())
}
pub fn print_mutability(&mut self, mutbl: hir::Mutability) -> io::Result<()> {
match mutbl {
hir::MutMutable => self.word_nbsp("mut"),
hir::MutImmutable => Ok(()),
}
}
pub fn print_mt(&mut self, mt: &hir::MutTy) -> io::Result<()> {
self.print_mutability(mt.mutbl)?;
self.print_type(&mt.ty)
}
pub fn print_fn_output(&mut self, decl: &hir::FnDecl) -> io::Result<()> {
if let hir::DefaultReturn(..) = decl.output {
return Ok(());
}
self.space_if_not_bol()?;
self.ibox(indent_unit)?;
self.word_space("->")?;
match decl.output {
hir::DefaultReturn(..) => unreachable!(),
hir::Return(ref ty) => self.print_type(&ty)?,
}
self.end()?;
match decl.output {
hir::Return(ref output) => self.maybe_print_comment(output.span.lo),
_ => Ok(()),
}
}
pub fn print_ty_fn(&mut self,
abi: Abi,
unsafety: hir::Unsafety,
decl: &hir::FnDecl,
name: Option<ast::Name>,
generics: &hir::Generics)
-> io::Result<()> {
self.ibox(indent_unit)?;
if !generics.lifetimes.is_empty() || !generics.ty_params.is_empty() {
self.s.word("for")?;
self.print_generics(generics)?;
}
let generics = hir::Generics {
lifetimes: hir::HirVec::new(),
ty_params: hir::HirVec::new(),
where_clause: hir::WhereClause {
id: ast::DUMMY_NODE_ID,
predicates: hir::HirVec::new(),
},
span: syntax_pos::DUMMY_SP,
};
self.print_fn(decl,
unsafety,
hir::Constness::NotConst,
abi,
name,
&generics,
&hir::Inherited,
&[],
None)?;
self.end()
}
pub fn maybe_print_trailing_comment(&mut self,
span: syntax_pos::Span,
next_pos: Option<BytePos>)
-> io::Result<()> {
let cm = match self.cm {
Some(cm) => cm,
_ => return Ok(()),
};
if let Some(ref cmnt) = self.next_comment() {
if (*cmnt).style != comments::Trailing {
return Ok(());
}
let span_line = cm.lookup_char_pos(span.hi);
let comment_line = cm.lookup_char_pos((*cmnt).pos);
let mut next = (*cmnt).pos + BytePos(1);
if let Some(p) = next_pos {
next = p;
}
if span.hi < (*cmnt).pos && (*cmnt).pos < next &&
span_line.line == comment_line.line {
self.print_comment(cmnt)?;
}
}
Ok(())
}
pub fn print_remaining_comments(&mut self) -> io::Result<()> {
// If there aren't any remaining comments, then we need to manually
// make sure there is a line break at the end.
if self.next_comment().is_none() {
self.s.hardbreak()?;
}
loop {
match self.next_comment() {
Some(ref cmnt) => {
self.print_comment(cmnt)?;
}
_ => break,
}
}
Ok(())
}
pub fn print_opt_abi_and_extern_if_nondefault(&mut self,
opt_abi: Option<Abi>)
-> io::Result<()> {
match opt_abi {
Some(Abi::Rust) => Ok(()),
Some(abi) => {
self.word_nbsp("extern")?;
self.word_nbsp(&abi.to_string())
}
None => Ok(()),
}
}
pub fn print_extern_opt_abi(&mut self, opt_abi: Option<Abi>) -> io::Result<()> {
match opt_abi {
Some(abi) => {
self.word_nbsp("extern")?;
self.word_nbsp(&abi.to_string())
}
None => Ok(()),
}
}
pub fn print_fn_header_info(&mut self,
unsafety: hir::Unsafety,
constness: hir::Constness,
abi: Abi,
vis: &hir::Visibility)
-> io::Result<()> {
self.s.word(&visibility_qualified(vis, ""))?;
self.print_unsafety(unsafety)?;
match constness {
hir::Constness::NotConst => {}
hir::Constness::Const => self.word_nbsp("const")?,
}
if abi != Abi::Rust {
self.word_nbsp("extern")?;
self.word_nbsp(&abi.to_string())?;
}
self.s.word("fn")
}
pub fn print_unsafety(&mut self, s: hir::Unsafety) -> io::Result<()> {
match s {
hir::Unsafety::Normal => Ok(()),
hir::Unsafety::Unsafe => self.word_nbsp("unsafe"),
}
}
}
// Dup'ed from parse::classify, but adapted for the HIR.
/// Does this expression require a semicolon to be treated
/// as a statement? The negation of this: 'can this expression
/// be used as a statement without a semicolon' -- is used
/// as an early-bail-out in the parser so that, for instance,
/// if true {...} else {...}
/// |x| 5
/// isn't parsed as (if true {...} else {...} | x) | 5
fn expr_requires_semi_to_be_stmt(e: &hir::Expr) -> bool {
match e.node {
hir::ExprIf(..) |
hir::ExprMatch(..) |
hir::ExprBlock(_) |
hir::ExprWhile(..) |
hir::ExprLoop(..) => false,
_ => true,
}
}
/// this statement requires a semicolon after it.
/// note that in one case (stmt_semi), we've already
/// seen the semicolon, and thus don't need another.
fn stmt_ends_with_semi(stmt: &hir::Stmt_) -> bool {
match *stmt {
hir::StmtDecl(ref d, _) => {
match d.node {
hir::DeclLocal(_) => true,
hir::DeclItem(_) => false,
}
}
hir::StmtExpr(ref e, _) => {
expr_requires_semi_to_be_stmt(&e)
}
hir::StmtSemi(..) => {
false
}
}
}
| 35.527765 | 98 | 0.431596 |
14f277d1767bbc3faf3915ddec606226c46d6327 | 12,992 | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc::hir;
use rustc::hir::def_id::{DefId, LOCAL_CRATE};
use rustc::mir::*;
use rustc::mir::transform::{MirSuite, MirPassIndex, MirSource};
use rustc::ty::TyCtxt;
use rustc::ty::item_path;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::indexed_vec::{Idx};
use std::fmt::Display;
use std::fs;
use std::io::{self, Write};
use std::path::{PathBuf, Path};
const INDENT: &'static str = " ";
/// Alignment for lining up comments following MIR statements
const ALIGN: usize = 40;
/// If the session is properly configured, dumps a human-readable
/// representation of the mir into:
///
/// ```text
/// rustc.node<node_id>.<pass_num>.<pass_name>.<disambiguator>
/// ```
///
/// Output from this function is controlled by passing `-Z dump-mir=<filter>`,
/// where `<filter>` takes the following forms:
///
/// - `all` -- dump MIR for all fns, all passes, all everything
/// - `substring1&substring2,...` -- `&`-separated list of substrings
/// that can appear in the pass-name or the `item_path_str` for the given
/// node-id. If any one of the substrings match, the data is dumped out.
pub fn dump_mir<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
pass_num: Option<(MirSuite, MirPassIndex)>,
pass_name: &str,
disambiguator: &Display,
source: MirSource,
mir: &Mir<'tcx>) {
if !dump_enabled(tcx, pass_name, source) {
return;
}
let node_path = item_path::with_forced_impl_filename_line(|| { // see notes on #41697 below
tcx.item_path_str(tcx.hir.local_def_id(source.item_id()))
});
dump_matched_mir_node(tcx, pass_num, pass_name, &node_path,
disambiguator, source, mir);
for (index, promoted_mir) in mir.promoted.iter_enumerated() {
let promoted_source = MirSource::Promoted(source.item_id(), index);
dump_matched_mir_node(tcx, pass_num, pass_name, &node_path, disambiguator,
promoted_source, promoted_mir);
}
}
pub fn dump_enabled<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
pass_name: &str,
source: MirSource)
-> bool {
let filters = match tcx.sess.opts.debugging_opts.dump_mir {
None => return false,
Some(ref filters) => filters,
};
let node_id = source.item_id();
let node_path = item_path::with_forced_impl_filename_line(|| { // see notes on #41697 below
tcx.item_path_str(tcx.hir.local_def_id(node_id))
});
filters.split("&")
.any(|filter| {
filter == "all" ||
pass_name.contains(filter) ||
node_path.contains(filter)
})
}
// #41697 -- we use `with_forced_impl_filename_line()` because
// `item_path_str()` would otherwise trigger `type_of`, and this can
// run while we are already attempting to evaluate `type_of`.
fn dump_matched_mir_node<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
pass_num: Option<(MirSuite, MirPassIndex)>,
pass_name: &str,
node_path: &str,
disambiguator: &Display,
source: MirSource,
mir: &Mir<'tcx>) {
let promotion_id = match source {
MirSource::Promoted(_, id) => format!("-{:?}", id),
_ => String::new()
};
let pass_num = if tcx.sess.opts.debugging_opts.dump_mir_exclude_pass_number {
format!("")
} else {
match pass_num {
None => format!(".-------"),
Some((suite, pass_num)) => format!(".{:03}-{:03}", suite.0, pass_num.0),
}
};
let mut file_path = PathBuf::new();
if let Some(ref file_dir) = tcx.sess.opts.debugging_opts.dump_mir_dir {
let p = Path::new(file_dir);
file_path.push(p);
};
let file_name = format!("rustc.node{}{}{}.{}.{}.mir",
source.item_id(), promotion_id, pass_num, pass_name, disambiguator);
file_path.push(&file_name);
let _ = fs::File::create(&file_path).and_then(|mut file| {
writeln!(file, "// MIR for `{}`", node_path)?;
writeln!(file, "// source = {:?}", source)?;
writeln!(file, "// pass_name = {}", pass_name)?;
writeln!(file, "// disambiguator = {}", disambiguator)?;
writeln!(file, "")?;
write_mir_fn(tcx, source, mir, &mut file)?;
Ok(())
});
}
/// Write out a human-readable textual representation for the given MIR.
pub fn write_mir_pretty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
single: Option<DefId>,
w: &mut Write)
-> io::Result<()>
{
writeln!(w, "// WARNING: This output format is intended for human consumers only")?;
writeln!(w, "// and is subject to change without notice. Knock yourself out.")?;
let mut first = true;
for def_id in dump_mir_def_ids(tcx, single) {
let mir = &tcx.optimized_mir(def_id);
if first {
first = false;
} else {
// Put empty lines between all items
writeln!(w, "")?;
}
let id = tcx.hir.as_local_node_id(def_id).unwrap();
let src = MirSource::from_node(tcx, id);
write_mir_fn(tcx, src, mir, w)?;
for (i, mir) in mir.promoted.iter_enumerated() {
writeln!(w, "")?;
write_mir_fn(tcx, MirSource::Promoted(id, i), mir, w)?;
}
}
Ok(())
}
pub fn write_mir_fn<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
src: MirSource,
mir: &Mir<'tcx>,
w: &mut Write)
-> io::Result<()> {
write_mir_intro(tcx, src, mir, w)?;
for block in mir.basic_blocks().indices() {
write_basic_block(tcx, block, mir, w)?;
if block.index() + 1 != mir.basic_blocks().len() {
writeln!(w, "")?;
}
}
writeln!(w, "}}")?;
Ok(())
}
/// Write out a human-readable textual representation for the given basic block.
fn write_basic_block(tcx: TyCtxt,
block: BasicBlock,
mir: &Mir,
w: &mut Write)
-> io::Result<()> {
let data = &mir[block];
// Basic block label at the top.
writeln!(w, "{}{:?}: {{", INDENT, block)?;
// List of statements in the middle.
let mut current_location = Location { block: block, statement_index: 0 };
for statement in &data.statements {
let indented_mir = format!("{0}{0}{1:?};", INDENT, statement);
writeln!(w, "{0:1$} // {2}",
indented_mir,
ALIGN,
comment(tcx, statement.source_info))?;
current_location.statement_index += 1;
}
// Terminator at the bottom.
let indented_terminator = format!("{0}{0}{1:?};", INDENT, data.terminator().kind);
writeln!(w, "{0:1$} // {2}",
indented_terminator,
ALIGN,
comment(tcx, data.terminator().source_info))?;
writeln!(w, "{}}}", INDENT)
}
fn comment(tcx: TyCtxt, SourceInfo { span, scope }: SourceInfo) -> String {
format!("scope {} at {}", scope.index(), tcx.sess.codemap().span_to_string(span))
}
/// Prints user-defined variables in a scope tree.
///
/// Returns the total number of variables printed.
fn write_scope_tree(tcx: TyCtxt,
mir: &Mir,
scope_tree: &FxHashMap<VisibilityScope, Vec<VisibilityScope>>,
w: &mut Write,
parent: VisibilityScope,
depth: usize)
-> io::Result<()> {
let indent = depth * INDENT.len();
let children = match scope_tree.get(&parent) {
Some(childs) => childs,
None => return Ok(()),
};
for &child in children {
let data = &mir.visibility_scopes[child];
assert_eq!(data.parent_scope, Some(parent));
writeln!(w, "{0:1$}scope {2} {{", "", indent, child.index())?;
// User variable types (including the user's name in a comment).
for local in mir.vars_iter() {
let var = &mir.local_decls[local];
let (name, source_info) = if var.source_info.scope == child {
(var.name.unwrap(), var.source_info)
} else {
// Not a variable or not declared in this scope.
continue;
};
let mut_str = if var.mutability == Mutability::Mut {
"mut "
} else {
""
};
let indent = indent + INDENT.len();
let indented_var = format!("{0:1$}let {2}{3:?}: {4};",
INDENT,
indent,
mut_str,
local,
var.ty);
writeln!(w, "{0:1$} // \"{2}\" in {3}",
indented_var,
ALIGN,
name,
comment(tcx, source_info))?;
}
write_scope_tree(tcx, mir, scope_tree, w, child, depth + 1)?;
writeln!(w, "{0:1$}}}", "", depth * INDENT.len())?;
}
Ok(())
}
/// Write out a human-readable textual representation of the MIR's `fn` type and the types of its
/// local variables (both user-defined bindings and compiler temporaries).
fn write_mir_intro<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
src: MirSource,
mir: &Mir,
w: &mut Write)
-> io::Result<()> {
write_mir_sig(tcx, src, mir, w)?;
writeln!(w, " {{")?;
// construct a scope tree and write it out
let mut scope_tree: FxHashMap<VisibilityScope, Vec<VisibilityScope>> = FxHashMap();
for (index, scope_data) in mir.visibility_scopes.iter().enumerate() {
if let Some(parent) = scope_data.parent_scope {
scope_tree.entry(parent)
.or_insert(vec![])
.push(VisibilityScope::new(index));
} else {
// Only the argument scope has no parent, because it's the root.
assert_eq!(index, ARGUMENT_VISIBILITY_SCOPE.index());
}
}
// Print return pointer
let indented_retptr = format!("{}let mut {:?}: {};",
INDENT,
RETURN_POINTER,
mir.return_ty);
writeln!(w, "{0:1$} // return pointer",
indented_retptr,
ALIGN)?;
write_scope_tree(tcx, mir, &scope_tree, w, ARGUMENT_VISIBILITY_SCOPE, 1)?;
write_temp_decls(mir, w)?;
// Add an empty line before the first block is printed.
writeln!(w, "")?;
Ok(())
}
fn write_mir_sig(tcx: TyCtxt, src: MirSource, mir: &Mir, w: &mut Write)
-> io::Result<()>
{
match src {
MirSource::Fn(_) => write!(w, "fn")?,
MirSource::Const(_) => write!(w, "const")?,
MirSource::Static(_, hir::MutImmutable) => write!(w, "static")?,
MirSource::Static(_, hir::MutMutable) => write!(w, "static mut")?,
MirSource::Promoted(_, i) => write!(w, "{:?} in", i)?
}
write!(w, " {}", tcx.node_path_str(src.item_id()))?;
if let MirSource::Fn(_) = src {
write!(w, "(")?;
// fn argument types.
for (i, arg) in mir.args_iter().enumerate() {
if i != 0 {
write!(w, ", ")?;
}
write!(w, "{:?}: {}", Lvalue::Local(arg), mir.local_decls[arg].ty)?;
}
write!(w, ") -> {}", mir.return_ty)
} else {
assert_eq!(mir.arg_count, 0);
write!(w, ": {} =", mir.return_ty)
}
}
fn write_temp_decls(mir: &Mir, w: &mut Write) -> io::Result<()> {
// Compiler-introduced temporary types.
for temp in mir.temps_iter() {
writeln!(w, "{}let mut {:?}: {};", INDENT, temp, mir.local_decls[temp].ty)?;
}
Ok(())
}
pub fn dump_mir_def_ids(tcx: TyCtxt, single: Option<DefId>) -> Vec<DefId> {
if let Some(i) = single {
vec![i]
} else {
tcx.mir_keys(LOCAL_CRATE).iter().cloned().collect()
}
}
| 35.790634 | 97 | 0.525092 |
8ad960b3f1b45ccb441196d4f576fe8e8954f285 | 786 | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test transitive analysis for associated types. Collected types
// should be normalized and new obligations generated.
trait Foo {
type A;
fn foo(&self) {}
}
impl Foo for usize {
type A = usize;
}
struct Bar<T: Foo> { inner: T::A }
fn is_send<T: Send>() {}
fn main() {
is_send::<Bar<usize>>();
}
| 26.2 | 68 | 0.697201 |
1ed4338fcf82d4e406182cc70ed3e6e5ca4a9413 | 353 | /* automatically generated by rust-bindgen */
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
pub mod Foo {
pub type Type = u32;
pub const Bar: Type = 0;
pub const Qux: Type = 1;
}
pub mod Neg {
pub type Type = i32;
pub const MinusOne: Type = -1;
pub const One: Type = 1;
}
| 17.65 | 45 | 0.623229 |
48458811ffca0f88ba9b2b664d9403394a2ad837 | 4,744 | //! File descriptor operations
//!
//! - open(at)
//! - close
//! - dup2
//! - pipe
use super::*;
use alloc::string::String;
impl Syscall<'_> {
/// Opens or creates a file, depending on the flags passed to the call. Returns an integer with the file descriptor.
pub fn sys_open(&self, path: UserInPtr<u8>, flags: usize, mode: usize) -> SysResult {
self.sys_openat(FileDesc::CWD, path, flags, mode)
}
/// open file relative to directory file descriptor
pub fn sys_openat(
&self,
dir_fd: FileDesc,
path: UserInPtr<u8>,
flags: usize,
mode: usize,
) -> SysResult {
let proc = self.linux_process();
let path = path.read_cstring()?;
let flags = OpenFlags::from_bits_truncate(flags);
info!(
"openat: dir_fd={:?}, path={:?}, flags={:?}, mode={:#o}",
dir_fd, path, flags, mode
);
let inode = if flags.contains(OpenFlags::CREATE) {
let (dir_path, file_name) = split_path(&path);
// relative to cwd
let dir_inode = proc.lookup_inode_at(dir_fd, dir_path, true)?;
match dir_inode.find(file_name) {
Ok(file_inode) => {
if flags.contains(OpenFlags::EXCLUSIVE) {
return Err(LxError::EEXIST);
}
file_inode
}
Err(FsError::EntryNotFound) => {
dir_inode.create(file_name, FileType::File, mode as u32)?
}
Err(e) => return Err(LxError::from(e)),
}
} else {
proc.lookup_inode_at(dir_fd, &path, true)?
};
let file = File::new(inode, flags.to_options(), path);
let fd = proc.add_file(file)?;
Ok(fd.into())
}
/// Closes a file descriptor, so that it no longer refers to any file and may be reused.
pub fn sys_close(&self, fd: FileDesc) -> SysResult {
info!("close: fd={:?}", fd);
let proc = self.linux_process();
proc.close_file(fd)?;
Ok(0)
}
/// create a copy of the file descriptor oldfd.
pub fn sys_dup2(&self, fd1: FileDesc, fd2: FileDesc) -> SysResult {
info!("dup2: from {:?} to {:?}", fd1, fd2);
let proc = self.linux_process();
// close fd2 first if it is opened
let _ = proc.close_file(fd2);
let file_like = proc.get_file_like(fd1)?;
proc.add_file_at(fd2, file_like);
Ok(fd2.into())
}
/// Creates a pipe, a unidirectional data channel that can be used for interprocess communication.
pub fn sys_pipe(&self, mut fds: UserOutPtr<[i32; 2]>) -> SysResult {
info!("pipe: fds={:?}", fds);
let proc = self.linux_process();
let (read, write) = Pipe::create_pair();
let read_fd = proc.add_file(File::new(
Arc::new(read),
OpenOptions {
read: true,
write: false,
append: false,
nonblock: false,
},
String::from("pipe_r:[]"),
))?;
let write_fd = proc.add_file(File::new(
Arc::new(write),
OpenOptions {
read: false,
write: true,
append: false,
nonblock: false,
},
String::from("pipe_w:[]"),
))?;
fds.write([read_fd.into(), write_fd.into()])?;
info!(
"pipe: created rfd={:?} wfd={:?} fds={:?}",
read_fd, write_fd, fds
);
Ok(0)
}
}
bitflags! {
struct OpenFlags: usize {
/// read only
const RDONLY = 0;
/// write only
const WRONLY = 1;
/// read write
const RDWR = 2;
/// create file if it does not exist
const CREATE = 1 << 6;
/// error if CREATE and the file exists
const EXCLUSIVE = 1 << 7;
/// truncate file upon open
const TRUNCATE = 1 << 9;
/// append on each write
const APPEND = 1 << 10;
}
}
impl OpenFlags {
/// check if the OpenFlags is readable
fn readable(self) -> bool {
let b = self.bits() & 0b11;
b == Self::RDONLY.bits() || b == Self::RDWR.bits()
}
/// check if the OpenFlags is writable
fn writable(self) -> bool {
let b = self.bits() & 0b11;
b == Self::WRONLY.bits() || b == Self::RDWR.bits()
}
/// convert OpenFlags to OpenOptions
fn to_options(self) -> OpenOptions {
OpenOptions {
read: self.readable(),
write: self.writable(),
append: self.contains(Self::APPEND),
nonblock: false,
}
}
}
| 30.410256 | 120 | 0.509486 |
623fff6ecbd71c28de4fef83071f55d234bbb608 | 11,696 | // Copyright (c) 2019 - 2020 ESRLabs
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod cgroups;
pub mod config;
mod console;
#[allow(unused)]
mod device_mapper;
mod error;
mod keys;
mod loopdev;
mod minijail;
mod mount;
mod process;
mod state;
use crate::{api, api::Notification};
use config::Config;
use console::Request;
use error::Error;
use log::{debug, info, Level};
use nix::{
sys::stat,
unistd::{self, pipe},
};
use npk::manifest::Name;
use process::ExitStatus;
use state::State;
use std::{
future::Future,
io,
path::Path,
pin::Pin,
task::{Context, Poll},
};
use sync::mpsc;
use tokio::{
fs,
sync::{self, oneshot},
task,
};
pub(crate) type EventTx = mpsc::Sender<Event>;
#[allow(clippy::large_enum_variant)]
#[derive(Debug)]
pub(crate) enum Event {
/// Incomming command
Console(Request, oneshot::Sender<api::Message>),
/// A instance exited with return code
Exit(Name, ExitStatus),
/// Out of memory event occured
Oom(Name),
/// North shall shut down
Shutdown,
/// Stdout and stderr of child processes
ChildOutput { name: Name, fd: i32, line: String },
/// Notification events
Notification(Notification),
}
/// Result of a Runtime action
pub type RuntimeResult = Result<(), Error>;
/// Handle to the Northstar runtime
pub struct Runtime {
/// Channel receive a stop signal for the runtime
/// Drop the tx part to gracefully shutdown the mail loop.
stop: Option<oneshot::Sender<()>>,
// Channel to signal the runtime exit status to the caller of `start`
// When the runtime is shut down the result of shutdown is sent to this
// channel. If a error happens during normal operation the error is also
// sent to this channel.
stopped: oneshot::Receiver<RuntimeResult>,
event_tx: mpsc::Sender<Event>,
}
impl Runtime {
pub async fn start(config: Config) -> Result<Runtime, Error> {
let (stop_tx, stop_rx) = oneshot::channel();
let (stopped_tx, stopped_rx) = oneshot::channel();
// Initialize minijails static functionality
minijail_init().await?;
// Ensure the configured run_dir exists
mkdir_p_rw(&config.directories.data_dir).await?;
mkdir_p_rw(&config.directories.run_dir).await?;
// Northstar runs in a event loop. Moduls get a Sender<Event> to the main loop.
let (event_tx, event_rx) = mpsc::channel::<Event>(100);
// Start a task that drives the main loop and wait for shutdown results
{
let event_tx = event_tx.clone();
task::spawn(async move {
stopped_tx
.send(runtime_task(config, event_tx, event_rx, stop_rx).await)
.ok(); // Ignore error if calle dropped the handle
});
}
Ok(Runtime {
stop: Some(stop_tx),
stopped: stopped_rx,
event_tx,
})
}
/// Stop the runtime
pub fn stop(mut self) {
// Drop the sending part of the stop handle
self.stop.take();
}
/// Stop the runtime and wait for the termination
pub fn stop_wait(mut self) -> impl Future<Output = RuntimeResult> {
self.stop.take();
self
}
/// Send a request to the runtime directly
pub async fn request(&self, request: api::Request) -> Result<api::Response, Error> {
let (response_tx, response_rx) = oneshot::channel::<api::Message>();
let request = api::Message::new(api::Payload::Request(request));
self.event_tx
.send(Event::Console(
console::Request::Message(request),
response_tx,
))
.await
.ok();
match response_rx.await.ok().map(|message| message.payload) {
Some(api::Payload::Response(response)) => Ok(response),
Some(_) => unreachable!(),
None => panic!("Internal channel error"),
}
}
}
impl Future for Runtime {
type Output = RuntimeResult;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match Pin::new(&mut self.stopped).poll(cx) {
Poll::Ready(r) => match r {
Ok(r) => Poll::Ready(r),
// Channel error -> tx side dropped
Err(_) => Poll::Ready(Ok(())),
},
Poll::Pending => Poll::Pending,
}
}
}
async fn runtime_task(
config: Config,
event_tx: mpsc::Sender<Event>,
mut event_rx: mpsc::Receiver<Event>,
stop: oneshot::Receiver<()>,
) -> Result<(), Error> {
let mut state = State::new(&config, event_tx.clone()).await?;
// Iterate all files in SETTINGS.directories.container_dirs and try
// to mount the content.
for registry in &config.directories.container_dirs {
let mounted_containers = mount::mount_npk_dir(
&config.directories.run_dir,
&state.signing_keys,
&config.devices.device_mapper_dev,
&config.devices.device_mapper,
&config.devices.loop_control,
&config.devices.loop_dev,
®istry,
)
.await
.map_err(Error::Mount)?;
for container in mounted_containers {
state.add(container)?;
}
}
info!(
"Mounted {} containers",
state.applications.len() + state.resources.len()
);
// Autostart flagged containers. Each container with the `autostart` option
// set to true in the manifest is started.
let autostart_apps = state
.applications
.values()
.filter(|app| app.manifest().autostart.unwrap_or_default())
.map(|app| app.name().to_string())
.collect::<Vec<Name>>();
for app in &autostart_apps {
info!("Autostarting {}", app);
state.start(&app).await.ok();
}
// Initialize console
let console = console::Console::new(&config.console_address, &event_tx);
// Start to listen for incoming connections
console.listen().await.map_err(Error::Console)?;
// Wait for a external shutdown request
let shutdown_tx = event_tx.clone();
task::spawn(async move {
stop.await.ok();
shutdown_tx.send(Event::Shutdown).await.ok();
});
// Enter main loop
loop {
let result = match event_rx.recv().await.unwrap() {
Event::ChildOutput { name, fd, line } => {
on_child_output(&mut state, &name, fd, &line).await;
Ok(())
}
// Debug console commands are handled via the main loop in order to get access
// to the global state. Therefore the console server receives a tx handle to the
// main loop and issues `Event::Console`. Processing of the command takes place
// in the console module but with access to `state`.
Event::Console(msg, txr) => {
console.process(&mut state, &msg, txr).await;
Ok(())
}
// The OOM event is signaled by the cgroup memory monitor if configured in a manifest.
// If a out of memory condition occours this is signaled with `Event::Oom` which
// carries the id of the container that is oom.
Event::Oom(id) => state.on_oom(&id).await,
// A container process existed. Check `process::wait_exit` for details.
Event::Exit(ref name, ref exit_status) => state.on_exit(name, exit_status).await,
// The runtime os commanded to shut down and exit.
Event::Shutdown => break state.shutdown().await,
// Forward notifications to console
Event::Notification(notification) => {
console.notification(notification).await;
Ok(())
}
};
// Break if a error happens in the runtime
if result.is_err() {
break result;
}
}
}
// TODO: Where to send this?
async fn on_child_output(state: &mut State, name: &str, fd: i32, line: &str) {
if let Some(p) = state.application(name) {
if let Some(p) = p.process_context() {
debug!("[{}] {}: {}: {}", p.process().pid(), name, fd, line);
}
}
}
/// Create path if it does not exist. Ensure that it is
/// read and writeable
async fn mkdir_p_rw(path: &Path) -> Result<(), Error> {
if path.exists() && !is_rw(&path) {
let context = format!("Directory {} is not read and writeable", path.display());
Err(Error::Io(
context.clone(),
io::Error::new(io::ErrorKind::PermissionDenied, context),
))
} else {
debug!("Creating {}", path.display());
fs::create_dir_all(&path).await.map_err(|error| {
Error::Io(
format!("Failed to create directory {}", path.display()),
error,
)
})
}
}
/// Return true if path is read and writeable
fn is_rw(path: &Path) -> bool {
match stat::stat(path.as_os_str()) {
Ok(stat) => {
let same_uid = stat.st_uid == unistd::getuid().as_raw();
let same_gid = stat.st_gid == unistd::getgid().as_raw();
let mode = stat::Mode::from_bits_truncate(stat.st_mode);
let is_readable = (same_uid && mode.contains(stat::Mode::S_IRUSR))
|| (same_gid && mode.contains(stat::Mode::S_IRGRP))
|| mode.contains(stat::Mode::S_IROTH);
let is_writable = (same_uid && mode.contains(stat::Mode::S_IWUSR))
|| (same_gid && mode.contains(stat::Mode::S_IWGRP))
|| mode.contains(stat::Mode::S_IWOTH);
is_readable && is_writable
}
Err(_) => false,
}
}
/// Initialize minijail logging
pub async fn minijail_init() -> Result<(), Error> {
use std::{io::BufRead, os::unix::io::FromRawFd};
#[allow(non_camel_case_types)]
#[allow(dead_code)]
#[repr(i32)]
enum SyslogLevel {
LOG_EMERG = 0,
LOG_ALERT = 1,
LOG_CRIT = 2,
LOG_ERR = 3,
LOG_WARNING = 4,
LOG_NOTICE = 5,
LOG_INFO = 6,
LOG_DEBUG = 7,
MAX = i32::MAX,
}
if let Some(log_level) = log::max_level().to_level() {
let minijail_log_level = match log_level {
Level::Error => SyslogLevel::LOG_ERR,
Level::Warn => SyslogLevel::LOG_WARNING,
Level::Info => SyslogLevel::LOG_INFO,
Level::Debug => SyslogLevel::LOG_DEBUG,
Level::Trace => SyslogLevel::MAX,
};
let (readfd, writefd) =
pipe().map_err(|e| Error::Os("Failed to create pipe".to_string(), e))?;
let pipe = unsafe { std::fs::File::from_raw_fd(readfd) };
::minijail::Minijail::log_to_fd(writefd, minijail_log_level as i32);
let mut lines = std::io::BufReader::new(pipe).lines();
task::spawn_blocking(move || {
while let Some(Ok(line)) = lines.next() {
// TODO: Format the logs to make them seemless
log::log!(log_level, "{}", line);
}
});
}
Ok(())
}
| 32.579387 | 98 | 0.583191 |
e83c3b8baf631c3c73e6a80960f920a7b832813b | 283 | extern crate libc;
extern crate nix;
extern crate crossbeam;
extern crate serde_json;
mod alias_tree;
mod chunk;
mod device;
mod backup_file;
mod chunk_tracker;
mod change_logger;
mod writer;
pub mod copier;
mod quick_io;
pub mod control;
pub mod server;
pub mod cli;
pub mod lock;
| 14.894737 | 24 | 0.784452 |
d99615d72ee836d1093a2207cb8ce8f5bdef9559 | 5,884 | mod event;
mod input;
use std::cell::Cell;
use crate::{
lexer::Token,
parser_api::Parser,
parser_impl::{
event::{Event, EventProcessor},
input::{InputPosition, ParserInput},
},
SmolStr,
syntax_node::syntax_error::{
ParseError,
SyntaxError,
},
};
use crate::SyntaxKind::{self, EOF, TOMBSTONE};
pub(crate) trait Sink {
type Tree;
/// Adds new leaf to the current branch.
fn leaf(&mut self, kind: SyntaxKind, text: SmolStr);
/// Start new branch and make it current.
fn start_branch(&mut self, kind: SyntaxKind);
/// Finish current branch and restore previous
/// branch as current.
fn finish_branch(&mut self);
fn error(&mut self, error: SyntaxError);
/// Complete tree building. Make sure that
/// `start_branch` and `finish_branch` calls
/// are paired!
fn finish(self) -> Self::Tree;
}
/// Parse a sequence of tokens into the representative node tree
pub(crate) fn parse_with<S: Sink>(
sink: S,
text: &str,
tokens: &[Token],
parser: fn(&mut Parser),
) -> S::Tree {
let mut events = {
let input = input::ParserInput::new(text, tokens);
let parser_impl = ParserImpl::new(&input);
let mut parser_api = Parser(parser_impl);
parser(&mut parser_api);
parser_api.0.into_events()
};
EventProcessor::new(sink, text, tokens, &mut events).process().finish()
}
/// Implementation details of `Parser`, extracted
/// to a separate struct in order not to pollute
/// the public API of the `Parser`.
pub(crate) struct ParserImpl<'t> {
parser_input: &'t ParserInput<'t>,
pos: InputPosition,
events: Vec<Event>,
steps: Cell<u32>,
}
impl<'t> ParserImpl<'t> {
pub(crate) fn new(inp: &'t ParserInput<'t>) -> ParserImpl<'t> {
ParserImpl {
parser_input: inp,
pos: InputPosition::new(),
events: Vec::new(),
steps: Cell::new(0),
}
}
pub(crate) fn into_events(self) -> Vec<Event> {
assert_eq!(self.nth(0), EOF);
self.events
}
pub(super) fn next2(&self) -> Option<(SyntaxKind, SyntaxKind)> {
let c1 = self.parser_input.kind(self.pos);
let c2 = self.parser_input.kind(self.pos + 1);
if self.parser_input.token_start_at(self.pos + 1)
== self.parser_input.token_start_at(self.pos) + self.parser_input.token_len(self.pos)
{
Some((c1, c2))
} else {
None
}
}
pub(super) fn next3(&self) -> Option<(SyntaxKind, SyntaxKind, SyntaxKind)> {
let c1 = self.parser_input.kind(self.pos);
let c2 = self.parser_input.kind(self.pos + 1);
let c3 = self.parser_input.kind(self.pos + 2);
if self.parser_input.token_start_at(self.pos + 1)
== self.parser_input.token_start_at(self.pos) + self.parser_input.token_len(self.pos)
&& self.parser_input.token_start_at(self.pos + 2)
== self.parser_input.token_start_at(self.pos + 1)
+ self.parser_input.token_len(self.pos + 1)
{
Some((c1, c2, c3))
} else {
None
}
}
/// Get the syntax kind of the nth token.
pub(super) fn nth(&self, n: u32) -> SyntaxKind {
let steps = self.steps.get();
assert!(steps <= 10_000_000, "the parser seems stuck");
self.steps.set(steps + 1);
self.parser_input.kind(self.pos + n)
}
pub(super) fn at_kw(&self, t: &str) -> bool {
self.parser_input.token_text(self.pos) == t
}
/// Start parsing right behind the last event.
pub(super) fn start(&mut self) -> u32 {
let pos = self.events.len() as u32;
self.push_event(Event::tombstone());
pos
}
/// Advances the parser by one token unconditionally.
pub(super) fn bump(&mut self) {
let kind = self.nth(0);
if kind == EOF {
return;
}
self.do_bump(kind, 1);
}
pub(super) fn bump_remap(&mut self, kind: SyntaxKind) {
if self.nth(0) == EOF {
// TODO: panic!?
return;
}
self.do_bump(kind, 1);
}
pub(super) fn bump_compound(&mut self, kind: SyntaxKind, n: u8) {
self.do_bump(kind, n);
}
fn do_bump(&mut self, kind: SyntaxKind, n_raw_tokens: u8) {
self.pos += u32::from(n_raw_tokens);
self.push_event(Event::Token { kind, n_raw_tokens });
}
/// Append one Error event to the back of events.
pub(super) fn error(&mut self, msg: String) {
self.push_event(Event::Error { msg: ParseError(msg) })
}
/// Complete an event with appending a `Finish` event.
pub(super) fn complete(&mut self, pos: u32, kind: SyntaxKind) {
match self.events[pos as usize] {
Event::Start { kind: ref mut slot, .. } => {
*slot = kind;
}
_ => unreachable!(),
}
self.push_event(Event::Finish);
}
/// Ignore the dummy `Start` event.
pub(super) fn abandon(&mut self, pos: u32) {
let idx = pos as usize;
if idx == self.events.len() - 1 {
match self.events.pop() {
Some(Event::Start { kind: TOMBSTONE, forward_parent: None }) => (),
_ => unreachable!(),
}
}
}
/// Save the relative distance of a completed event to its forward_parent.
pub(super) fn precede(&mut self, pos: u32) -> u32 {
let new_pos = self.start();
match self.events[pos as usize] {
Event::Start { ref mut forward_parent, .. } => {
*forward_parent = Some(new_pos - pos);
}
_ => unreachable!(),
}
new_pos
}
fn push_event(&mut self, event: Event) {
self.events.push(event)
}
}
| 29.128713 | 97 | 0.563902 |
147b31716fe805c1869dbdb622512bbebe3d63b8 | 421 | // if1.rs
pub fn bigger(a: i32, b: i32) -> i32 {
// This is so stupid
if (a > b) {
return a
} else {
return b
}
}
// Don't mind this for now :)
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ten_is_bigger_than_eight() {
assert_eq!(10, bigger(10, 8));
}
#[test]
fn fortytwo_is_bigger_than_thirtytwo() {
assert_eq!(42, bigger(32, 42));
}
}
| 15.592593 | 44 | 0.508314 |
f4c08665a6916e3d6c2ce8231df7e66f484a4f63 | 625 | //! **UNSTABLE:** Structured key-value pairs.
//!
//! This module is unstable and breaking changes may be made
//! at any time. See [the tracking issue](https://github.com/rust-lang-nursery/log/issues/328)
//! for more details.
//!
//! Add the `kv_unstable` feature to your `Cargo.toml` to enable
//! this module:
//!
//! ```toml
//! [dependencies.log]
//! features = ["kv_unstable"]
//! ```
mod error;
mod key;
mod source;
pub mod value;
pub use self::error::Error;
pub use self::key::{Key, ToKey};
pub use self::source::{Source, Visitor};
#[doc(inline)]
pub use self::value::{ToValue, Value};
| 23.148148 | 95 | 0.6336 |
eddeaa5e882eefa89bd8a19e3ce64314203309e3 | 8,598 | use std::collections::BTreeMap;
use std::io;
use chrono::{DateTime, Utc};
use rand::{CryptoRng, Rng};
use crate::armor;
use crate::composed::key::{PublicKey, PublicSubkey};
use crate::composed::signed_key::{SignedKeyDetails, SignedPublicSubKey};
use crate::crypto::hash::HashAlgorithm;
use crate::crypto::public_key::PublicKeyAlgorithm;
use crate::errors::Result;
use crate::packet::{self, write_packet, SignatureType};
use crate::ser::Serialize;
use crate::types::{KeyId, KeyTrait, Mpi, PublicKeyTrait, SecretKeyRepr, SecretKeyTrait};
/// Represents a secret signed PGP key.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct SignedSecretKey {
pub primary_key: packet::SecretKey,
pub details: SignedKeyDetails,
pub public_subkeys: Vec<SignedPublicSubKey>,
pub secret_subkeys: Vec<SignedSecretSubKey>,
}
key_parser!(
SignedSecretKey,
SignedSecretKeyParser,
Tag::SecretKey,
packet::SecretKey,
// secret keys, can contain both public and secret subkeys
(
PublicSubkey,
packet::PublicSubkey,
SignedPublicSubKey,
public_subkeys
),
(
SecretSubkey,
packet::SecretSubkey,
SignedSecretSubKey,
secret_subkeys
)
);
impl SignedSecretKey {
pub fn new(
primary_key: packet::SecretKey,
details: SignedKeyDetails,
mut public_subkeys: Vec<SignedPublicSubKey>,
mut secret_subkeys: Vec<SignedSecretSubKey>,
) -> Self {
public_subkeys.retain(|key| {
if key.signatures.is_empty() {
warn!("ignoring unsigned {:?}", key.key);
false
} else {
true
}
});
secret_subkeys.retain(|key| {
if key.signatures.is_empty() {
warn!("ignoring unsigned {:?}", key.key);
false
} else {
true
}
});
SignedSecretKey {
primary_key,
details,
public_subkeys,
secret_subkeys,
}
}
/// Get the secret key expiration as a date.
pub fn expires_at(&self) -> Option<DateTime<Utc>> {
let expiration = self.details.key_expiration_time()?;
Some(*self.primary_key.created_at() + expiration)
}
fn verify_public_subkeys(&self) -> Result<()> {
for subkey in &self.public_subkeys {
subkey.verify(&self.primary_key)?;
}
Ok(())
}
fn verify_secret_subkeys(&self) -> Result<()> {
for subkey in &self.secret_subkeys {
subkey.verify(&self.primary_key)?;
}
Ok(())
}
pub fn verify(&self) -> Result<()> {
self.details.verify(&self.primary_key)?;
self.verify_public_subkeys()?;
self.verify_secret_subkeys()?;
Ok(())
}
pub fn to_armored_writer(
&self,
writer: &mut impl io::Write,
headers: Option<&BTreeMap<String, String>>,
) -> Result<()> {
armor::write(self, armor::BlockType::PrivateKey, writer, headers)
}
pub fn to_armored_bytes(&self, headers: Option<&BTreeMap<String, String>>) -> Result<Vec<u8>> {
let mut buf = Vec::new();
self.to_armored_writer(&mut buf, headers)?;
Ok(buf)
}
pub fn to_armored_string(&self, headers: Option<&BTreeMap<String, String>>) -> Result<String> {
Ok(::std::str::from_utf8(&self.to_armored_bytes(headers)?)?.to_string())
}
}
impl KeyTrait for SignedSecretKey {
/// Returns the fingerprint of the associated primary key.
fn fingerprint(&self) -> Vec<u8> {
self.primary_key.fingerprint()
}
/// Returns the Key ID of the associated primary key.
fn key_id(&self) -> KeyId {
self.primary_key.key_id()
}
fn algorithm(&self) -> PublicKeyAlgorithm {
self.primary_key.algorithm()
}
}
impl Serialize for SignedSecretKey {
fn to_writer<W: io::Write>(&self, writer: &mut W) -> Result<()> {
write_packet(writer, &self.primary_key)?;
self.details.to_writer(writer)?;
for ps in &self.public_subkeys {
ps.to_writer(writer)?;
}
for ps in &self.secret_subkeys {
ps.to_writer(writer)?;
}
Ok(())
}
}
impl SecretKeyTrait for SignedSecretKey {
type PublicKey = PublicKey;
fn unlock<F, G>(&self, pw: F, work: G) -> Result<()>
where
F: FnOnce() -> String,
G: FnOnce(&SecretKeyRepr) -> Result<()>,
{
self.primary_key.unlock(pw, work)
}
fn create_signature<F>(&self, key_pw: F, hash: HashAlgorithm, data: &[u8]) -> Result<Vec<Mpi>>
where
F: FnOnce() -> String,
{
self.primary_key.create_signature(key_pw, hash, data)
}
fn public_key(&self) -> Self::PublicKey {
let mut subkeys: Vec<PublicSubkey> = self
.public_subkeys
.iter()
.map(SignedPublicSubKey::as_unsigned)
.collect();
let sec_subkeys = self.secret_subkeys.iter().map(SecretKeyTrait::public_key);
subkeys.extend(sec_subkeys);
PublicKey::new(
self.primary_key.public_key(),
self.details.as_unsigned(),
subkeys,
)
}
}
impl PublicKeyTrait for SignedSecretKey {
fn verify_signature(&self, hash: HashAlgorithm, data: &[u8], sig: &[Mpi]) -> Result<()> {
self.primary_key.verify_signature(hash, data, sig)
}
fn encrypt<R: Rng + CryptoRng>(&self, rng: &mut R, plain: &[u8]) -> Result<Vec<Mpi>> {
self.primary_key.encrypt(rng, plain)
}
fn to_writer_old(&self, writer: &mut impl io::Write) -> Result<()> {
self.primary_key.to_writer_old(writer)
}
}
/// Represents a composed secret PGP SubKey.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct SignedSecretSubKey {
pub key: packet::SecretSubkey,
pub signatures: Vec<packet::Signature>,
}
impl SignedSecretSubKey {
pub fn new(key: packet::SecretSubkey, mut signatures: Vec<packet::Signature>) -> Self {
signatures.retain(|sig| {
if sig.typ() != SignatureType::SubkeyBinding
&& sig.typ() != SignatureType::SubkeyRevocation
{
warn!(
"ignoring unexpected signature {:?} after Subkey packet",
sig.typ()
);
false
} else {
true
}
});
SignedSecretSubKey { key, signatures }
}
pub fn verify(&self, key: &impl PublicKeyTrait) -> Result<()> {
ensure!(!self.signatures.is_empty(), "missing subkey bindings");
for sig in &self.signatures {
sig.verify_key_binding(key, &self.key)?;
}
Ok(())
}
}
impl KeyTrait for SignedSecretSubKey {
/// Returns the fingerprint of the key.
fn fingerprint(&self) -> Vec<u8> {
self.key.fingerprint()
}
/// Returns the Key ID of the key.
fn key_id(&self) -> KeyId {
self.key.key_id()
}
fn algorithm(&self) -> PublicKeyAlgorithm {
self.key.algorithm()
}
}
impl Serialize for SignedSecretSubKey {
fn to_writer<W: io::Write>(&self, writer: &mut W) -> Result<()> {
write_packet(writer, &self.key)?;
for sig in &self.signatures {
write_packet(writer, sig)?;
}
Ok(())
}
}
impl SecretKeyTrait for SignedSecretSubKey {
type PublicKey = PublicSubkey;
fn unlock<F, G>(&self, pw: F, work: G) -> Result<()>
where
F: FnOnce() -> String,
G: FnOnce(&SecretKeyRepr) -> Result<()>,
{
self.key.unlock(pw, work)
}
fn create_signature<F>(&self, key_pw: F, hash: HashAlgorithm, data: &[u8]) -> Result<Vec<Mpi>>
where
F: FnOnce() -> String,
{
self.key.create_signature(key_pw, hash, data)
}
fn public_key(&self) -> Self::PublicKey {
let keyflags = self
.signatures
.first()
.expect("invalid signed subkey")
.key_flags();
PublicSubkey::new(self.key.public_key(), keyflags)
}
}
impl PublicKeyTrait for SignedSecretSubKey {
fn verify_signature(&self, hash: HashAlgorithm, data: &[u8], sig: &[Mpi]) -> Result<()> {
self.key.verify_signature(hash, data, sig)
}
fn encrypt<R: Rng + CryptoRng>(&self, rng: &mut R, plain: &[u8]) -> Result<Vec<Mpi>> {
self.key.encrypt(rng, plain)
}
fn to_writer_old(&self, writer: &mut impl io::Write) -> Result<()> {
self.key.to_writer_old(writer)
}
}
| 27.037736 | 99 | 0.580251 |
64026e54ec9f2ae79e607f76a758cd3221ea40de | 1,650 | // Copyright Materialize, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
use std::fmt;
use serde::{Deserialize, Serialize};
use lowertest::MzStructReflect;
use repr::adt::char::{format_str_pad, Char};
use repr::{ColumnType, ScalarType};
use crate::scalar::func::EagerUnaryFunc;
/// All Char data is stored in Datum::String with its blank padding removed
/// (i.e. trimmed), so this function provides a means of restoring any
/// removed padding.
#[derive(
Ord, PartialOrd, Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Hash, MzStructReflect,
)]
pub struct PadChar {
pub length: Option<usize>,
}
impl<'a> EagerUnaryFunc<'a> for PadChar {
type Input = &'a str;
type Output = Char<String>;
fn call(&self, a: &'a str) -> Char<String> {
Char(format_str_pad(a, self.length))
}
fn output_type(&self, input: ColumnType) -> ColumnType {
ScalarType::Char {
length: self.length,
}
.nullable(input.nullable)
}
}
impl fmt::Display for PadChar {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("padchar")
}
}
// This function simply allows the expression of changing a's type from varchar to string
sqlfunc!(
#[sqlname = "chartostr"]
#[preserves_uniqueness = true]
fn cast_char_to_string<'a>(a: Char<&'a str>) -> &'a str {
a.0
}
);
| 27.5 | 96 | 0.666667 |
ff6cbec70d84d075071683ccd24e04685fea1c97 | 15,683 | use super::{stars, DifficultyAttributes};
use crate::{Beatmap, Mods, PpResult, StarResult};
/// Calculator for pp on osu!ctb maps.
///
/// # Example
///
/// ```
/// # use rosu_pp::{FruitsPP, PpResult, Beatmap};
/// # /*
/// let map: Beatmap = ...
/// # */
/// # let map = Beatmap::default();
/// let pp_result: PpResult = FruitsPP::new(&map)
/// .mods(8 + 64) // HDDT
/// .combo(1234)
/// .misses(1)
/// .accuracy(98.5)
/// .calculate();
///
/// println!("PP: {} | Stars: {}", pp_result.pp(), pp_result.stars());
///
/// let next_result = FruitsPP::new(&map)
/// .attributes(pp_result) // reusing previous results for performance
/// .mods(8 + 64) // has to be the same to reuse attributes
/// .accuracy(99.5)
/// .calculate();
///
/// println!("PP: {} | Stars: {}", next_result.pp(), next_result.stars());
/// ```
#[derive(Clone, Debug)]
#[allow(clippy::upper_case_acronyms)]
pub struct FruitsPP<'m> {
map: &'m Beatmap,
attributes: Option<DifficultyAttributes>,
mods: u32,
combo: Option<usize>,
n_fruits: Option<usize>,
n_droplets: Option<usize>,
n_tiny_droplets: Option<usize>,
n_tiny_droplet_misses: Option<usize>,
n_misses: usize,
passed_objects: Option<usize>,
}
impl<'m> FruitsPP<'m> {
#[inline]
pub fn new(map: &'m Beatmap) -> Self {
Self {
map,
attributes: None,
mods: 0,
combo: None,
n_fruits: None,
n_droplets: None,
n_tiny_droplets: None,
n_tiny_droplet_misses: None,
n_misses: 0,
passed_objects: None,
}
}
/// [`FruitsAttributeProvider`] is implemented by [`DifficultyAttributes`](crate::fruits::DifficultyAttributes),
/// [`StarResult`](crate::StarResult), and by [`PpResult`](crate::PpResult) meaning you can give the
/// result of a star calculation or a pp calculation.
/// If you already calculated the attributes for the current map-mod combination,
/// be sure to put them in here so that they don't have to be recalculated.
#[inline]
pub fn attributes(mut self, attributes: impl FruitsAttributeProvider) -> Self {
if let Some(attributes) = attributes.attributes() {
self.attributes.replace(attributes);
}
self
}
/// Specify mods through their bit values.
///
/// See [https://github.com/ppy/osu-api/wiki#mods](https://github.com/ppy/osu-api/wiki#mods)
#[inline]
pub fn mods(mut self, mods: u32) -> Self {
self.mods = mods;
self
}
/// Specify the max combo of the play.
#[inline]
pub fn combo(mut self, combo: usize) -> Self {
self.combo.replace(combo);
self
}
/// Specify the amount of fruits of a play i.e. n300.
#[inline]
pub fn fruits(mut self, n_fruits: usize) -> Self {
self.n_fruits.replace(n_fruits);
self
}
/// Specify the amount of droplets of a play i.e. n100.
#[inline]
pub fn droplets(mut self, n_droplets: usize) -> Self {
self.n_droplets.replace(n_droplets);
self
}
/// Specify the amount of tiny droplets of a play i.e. n50.
#[inline]
pub fn tiny_droplets(mut self, n_tiny_droplets: usize) -> Self {
self.n_tiny_droplets.replace(n_tiny_droplets);
self
}
/// Specify the amount of tiny droplet misses of a play i.e. n_katu.
#[inline]
pub fn tiny_droplet_misses(mut self, n_tiny_droplet_misses: usize) -> Self {
self.n_tiny_droplet_misses.replace(n_tiny_droplet_misses);
self
}
/// Specify the amount of fruit / droplet misses of the play.
#[inline]
pub fn misses(mut self, n_misses: usize) -> Self {
self.n_misses = n_misses;
self
}
/// Amount of passed objects for partial plays, e.g. a fail.
#[inline]
pub fn passed_objects(mut self, passed_objects: usize) -> Self {
self.passed_objects.replace(passed_objects);
self
}
/// Generate the hit results with respect to the given accuracy between `0` and `100`.
///
/// Be sure to set `misses` beforehand! Also, if available, set `attributes` beforehand.
pub fn accuracy(mut self, mut acc: f32) -> Self {
if self.attributes.is_none() {
self.attributes.replace(
stars(self.map, self.mods, self.passed_objects)
.attributes()
.unwrap(),
);
}
let attributes = self.attributes.as_ref().unwrap();
let n_droplets = self
.n_droplets
.unwrap_or_else(|| attributes.n_droplets.saturating_sub(self.n_misses));
let n_fruits = self.n_fruits.unwrap_or_else(|| {
attributes
.max_combo
.saturating_sub(self.n_misses)
.saturating_sub(n_droplets)
});
let max_tiny_droplets = attributes.n_tiny_droplets;
acc /= 100.0;
let n_tiny_droplets = self.n_tiny_droplets.unwrap_or_else(|| {
((acc * (attributes.max_combo + max_tiny_droplets) as f32).round() as usize)
.saturating_sub(n_fruits)
.saturating_sub(n_droplets)
});
let n_tiny_droplet_misses = max_tiny_droplets.saturating_sub(n_tiny_droplets);
self.n_fruits.replace(n_fruits);
self.n_droplets.replace(n_droplets);
self.n_tiny_droplets.replace(n_tiny_droplets);
self.n_tiny_droplet_misses.replace(n_tiny_droplet_misses);
self
}
fn assert_hitresults(&mut self, attributes: &DifficultyAttributes) {
let correct_combo_hits = self
.n_fruits
.and_then(|f| self.n_droplets.map(|d| f + d + self.n_misses))
.filter(|h| *h == attributes.max_combo);
let correct_fruits = self
.n_fruits
.filter(|f| *f >= attributes.n_fruits.saturating_sub(self.n_misses));
let correct_droplets = self
.n_droplets
.filter(|d| *d >= attributes.n_droplets.saturating_sub(self.n_misses));
let correct_tinies = self
.n_tiny_droplets
.and_then(|t| self.n_tiny_droplet_misses.map(|m| t + m))
.filter(|h| *h == attributes.n_tiny_droplets);
if correct_combo_hits
.and(correct_fruits)
.and(correct_droplets)
.and(correct_tinies)
.is_none()
{
let mut n_fruits = self.n_fruits.unwrap_or(0);
let mut n_droplets = self.n_droplets.unwrap_or(0);
let mut n_tiny_droplets = self.n_tiny_droplets.unwrap_or(0);
let n_tiny_droplet_misses = self.n_tiny_droplet_misses.unwrap_or(0);
let missing = attributes
.max_combo
.saturating_sub(n_fruits)
.saturating_sub(n_droplets)
.saturating_sub(self.n_misses);
let missing_fruits =
missing.saturating_sub(attributes.n_droplets.saturating_sub(n_droplets));
n_fruits += missing_fruits;
n_droplets += missing.saturating_sub(missing_fruits);
n_tiny_droplets += attributes
.n_tiny_droplets
.saturating_sub(n_tiny_droplets)
.saturating_sub(n_tiny_droplet_misses);
self.n_fruits.replace(n_fruits);
self.n_droplets.replace(n_droplets);
self.n_tiny_droplets.replace(n_tiny_droplets);
self.n_tiny_droplet_misses.replace(n_tiny_droplet_misses);
}
}
/// Returns an object which contains the pp and [`DifficultyAttributes`](crate::fruits::DifficultyAttributes)
/// containing stars and other attributes.
pub fn calculate(mut self) -> PpResult {
let attributes = self.attributes.take().unwrap_or_else(|| {
stars(self.map, self.mods, self.passed_objects)
.attributes()
.unwrap()
});
// Make sure all objects are set
self.assert_hitresults(&attributes);
let stars = attributes.stars;
// Relying heavily on aim
let mut pp = (5.0 * (stars / 0.0049).max(1.0) - 4.0).powi(2) / 100_000.0;
let mut combo_hits = self.combo_hits();
if combo_hits == 0 {
combo_hits = attributes.max_combo;
}
// Longer maps are worth more
let len_bonus = 0.95
+ 0.3 * (combo_hits as f32 / 2500.0).min(1.0)
+ (combo_hits > 2500) as u8 as f32 * (combo_hits as f32 / 2500.0).log10() * 0.475;
pp *= len_bonus;
// Penalize misses exponentially
pp *= 0.97_f32.powi(self.n_misses as i32);
// Combo scaling
if let Some(combo) = self.combo.filter(|_| attributes.max_combo > 0) {
pp *= (combo as f32 / attributes.max_combo as f32)
.powf(0.8)
.min(1.0);
}
// AR scaling
let ar = attributes.ar;
let mut ar_factor = 1.0;
if ar > 9.0 {
ar_factor += 0.1 * (ar - 9.0) + (ar > 10.0) as u8 as f32 * 0.1 * (ar - 10.0);
} else if ar < 8.0 {
ar_factor += 0.025 * (8.0 - ar);
}
pp *= ar_factor;
// HD bonus
if self.mods.hd() {
if ar <= 10.0 {
pp *= 1.05 + 0.075 * (10.0 - ar);
} else if ar > 10.0 {
pp *= 1.01 + 0.04 * (11.0 - ar.min(11.0));
}
}
// FL bonus
if self.mods.fl() {
pp *= 1.35 * len_bonus;
}
// Accuracy scaling
pp *= self.acc().powf(5.5);
// NF penalty
if self.mods.nf() {
pp *= 0.9;
}
PpResult {
pp,
attributes: StarResult::Fruits(attributes),
}
}
#[inline]
fn combo_hits(&self) -> usize {
self.n_fruits.unwrap_or(0) + self.n_droplets.unwrap_or(0) + self.n_misses
}
#[inline]
fn successful_hits(&self) -> usize {
self.n_fruits.unwrap_or(0)
+ self.n_droplets.unwrap_or(0)
+ self.n_tiny_droplets.unwrap_or(0)
}
#[inline]
fn total_hits(&self) -> usize {
self.successful_hits() + self.n_tiny_droplet_misses.unwrap_or(0) + self.n_misses
}
#[inline]
fn acc(&self) -> f32 {
let total_hits = self.total_hits();
if total_hits == 0 {
1.0
} else {
(self.successful_hits() as f32 / total_hits as f32)
.max(0.0)
.min(1.0)
}
}
}
pub trait FruitsAttributeProvider {
fn attributes(self) -> Option<DifficultyAttributes>;
}
impl FruitsAttributeProvider for DifficultyAttributes {
#[inline]
fn attributes(self) -> Option<DifficultyAttributes> {
Some(self)
}
}
impl FruitsAttributeProvider for StarResult {
#[inline]
fn attributes(self) -> Option<DifficultyAttributes> {
#[allow(irrefutable_let_patterns)]
if let Self::Fruits(attributes) = self {
Some(attributes)
} else {
None
}
}
}
impl FruitsAttributeProvider for PpResult {
#[inline]
fn attributes(self) -> Option<DifficultyAttributes> {
self.attributes.attributes()
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::Beatmap;
fn attributes() -> DifficultyAttributes {
DifficultyAttributes {
n_fruits: 1234,
n_droplets: 567,
n_tiny_droplets: 2345,
max_combo: 1234 + 567,
..Default::default()
}
}
#[test]
fn fruits_only_accuracy() {
let map = Beatmap::default();
let attributes = attributes();
let total_objects = attributes.n_fruits + attributes.n_droplets;
let target_acc = 97.5;
let calculator = FruitsPP::new(&map)
.attributes(attributes)
.passed_objects(total_objects)
.accuracy(target_acc);
let numerator = calculator.n_fruits.unwrap_or(0)
+ calculator.n_droplets.unwrap_or(0)
+ calculator.n_tiny_droplets.unwrap_or(0);
let denominator =
numerator + calculator.n_tiny_droplet_misses.unwrap_or(0) + calculator.n_misses;
let acc = 100.0 * numerator as f32 / denominator as f32;
assert!(
(target_acc - acc).abs() < 1.0,
"Expected: {} | Actual: {}",
target_acc,
acc
);
}
#[test]
fn fruits_accuracy_droplets_and_tiny_droplets() {
let map = Beatmap::default();
let attributes = attributes();
let total_objects = attributes.n_fruits + attributes.n_droplets;
let target_acc = 97.5;
let n_droplets = 550;
let n_tiny_droplets = 2222;
let calculator = FruitsPP::new(&map)
.attributes(attributes)
.passed_objects(total_objects)
.droplets(n_droplets)
.tiny_droplets(n_tiny_droplets)
.accuracy(target_acc);
assert_eq!(
n_droplets,
calculator.n_droplets.unwrap(),
"Expected: {} | Actual: {}",
n_droplets,
calculator.n_droplets.unwrap()
);
let numerator = calculator.n_fruits.unwrap_or(0)
+ calculator.n_droplets.unwrap_or(0)
+ calculator.n_tiny_droplets.unwrap_or(0);
let denominator =
numerator + calculator.n_tiny_droplet_misses.unwrap_or(0) + calculator.n_misses;
let acc = 100.0 * numerator as f32 / denominator as f32;
assert!(
(target_acc - acc).abs() < 1.0,
"Expected: {} | Actual: {}",
target_acc,
acc
);
}
#[test]
fn fruits_missing_objects() {
let map = Beatmap::default();
let attributes = attributes();
let total_objects = attributes.n_fruits + attributes.n_droplets;
let n_fruits = attributes.n_fruits - 10;
let n_droplets = attributes.n_droplets - 5;
let n_tiny_droplets = attributes.n_tiny_droplets - 50;
let n_tiny_droplet_misses = 20;
let n_misses = 2;
let mut calculator = FruitsPP::new(&map)
.attributes(attributes.clone())
.passed_objects(total_objects)
.fruits(n_fruits)
.droplets(n_droplets)
.tiny_droplets(n_tiny_droplets)
.tiny_droplet_misses(n_tiny_droplet_misses)
.misses(n_misses);
calculator.assert_hitresults(&attributes);
assert!(
(attributes.n_fruits as i32 - calculator.n_fruits.unwrap() as i32).abs()
<= n_misses as i32,
"Expected: {} | Actual: {} [+/- {} misses]",
attributes.n_fruits,
calculator.n_fruits.unwrap(),
n_misses
);
assert_eq!(
attributes.n_droplets,
calculator.n_droplets.unwrap()
- (n_misses - (attributes.n_fruits - calculator.n_fruits.unwrap())),
"Expected: {} | Actual: {}",
attributes.n_droplets,
calculator.n_droplets.unwrap()
- (n_misses - (attributes.n_fruits - calculator.n_fruits.unwrap())),
);
assert_eq!(
attributes.n_tiny_droplets,
calculator.n_tiny_droplets.unwrap() + calculator.n_tiny_droplet_misses.unwrap(),
"Expected: {} | Actual: {}",
attributes.n_tiny_droplets,
calculator.n_tiny_droplets.unwrap() + calculator.n_tiny_droplet_misses.unwrap(),
);
}
}
| 30.511673 | 116 | 0.569279 |
7a64071b89ff11b2cd3dba2cb2a6b805ac55e1f2 | 1,644 | use super::BoxTokenStream;
use super::{Token, TokenStream, Tokenizer};
use std::str::CharIndices;
/// Tokenize the text by splitting on whitespaces and punctuation.
#[derive(Clone)]
pub struct SimpleTokenizer;
pub struct SimpleTokenStream<'a> {
text: &'a str,
chars: CharIndices<'a>,
token: Token,
}
impl Tokenizer for SimpleTokenizer {
fn token_stream<'a>(&self, text: &'a str) -> BoxTokenStream<'a> {
BoxTokenStream::from(SimpleTokenStream {
text,
chars: text.char_indices(),
token: Token::default(),
})
}
}
impl<'a> SimpleTokenStream<'a> {
// search for the end of the current token.
fn search_token_end(&mut self) -> usize {
(&mut self.chars)
.filter(|&(_, ref c)| !c.is_alphanumeric())
.map(|(offset, _)| offset)
.next()
.unwrap_or_else(|| self.text.len())
}
}
impl<'a> TokenStream for SimpleTokenStream<'a> {
fn advance(&mut self) -> bool {
self.token.text.clear();
self.token.position = self.token.position.wrapping_add(1);
while let Some((offset_from, c)) = self.chars.next() {
if c.is_alphanumeric() {
let offset_to = self.search_token_end();
self.token.offset_from = offset_from;
self.token.offset_to = offset_to;
self.token.text.push_str(&self.text[offset_from..offset_to]);
return true;
}
}
false
}
fn token(&self) -> &Token {
&self.token
}
fn token_mut(&mut self) -> &mut Token {
&mut self.token
}
}
| 27.4 | 77 | 0.57056 |
9156344ea6bfc133b992be9efdc51b2223d820c7 | 3,946 | #[doc = "Register `BUF_09_ID` reader"]
pub struct R(crate::R<BUF_09_ID_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<BUF_09_ID_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<BUF_09_ID_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<BUF_09_ID_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `BUF_09_ID` writer"]
pub struct W(crate::W<BUF_09_ID_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<BUF_09_ID_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<BUF_09_ID_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<BUF_09_ID_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `EID` reader - "]
pub struct EID_R(crate::FieldReader<u32, u32>);
impl EID_R {
pub(crate) fn new(bits: u32) -> Self {
EID_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for EID_R {
type Target = crate::FieldReader<u32, u32>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `EID` writer - "]
pub struct EID_W<'a> {
w: &'a mut W,
}
impl<'a> EID_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0003_ffff) | (value as u32 & 0x0003_ffff);
self.w
}
}
#[doc = "Field `SID` reader - "]
pub struct SID_R(crate::FieldReader<u16, u16>);
impl SID_R {
pub(crate) fn new(bits: u16) -> Self {
SID_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SID_R {
type Target = crate::FieldReader<u16, u16>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SID` writer - "]
pub struct SID_W<'a> {
w: &'a mut W,
}
impl<'a> SID_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07ff << 18)) | ((value as u32 & 0x07ff) << 18);
self.w
}
}
impl R {
#[doc = "Bits 0:17"]
#[inline(always)]
pub fn eid(&self) -> EID_R {
EID_R::new((self.bits & 0x0003_ffff) as u32)
}
#[doc = "Bits 18:28"]
#[inline(always)]
pub fn sid(&self) -> SID_R {
SID_R::new(((self.bits >> 18) & 0x07ff) as u16)
}
}
impl W {
#[doc = "Bits 0:17"]
#[inline(always)]
pub fn eid(&mut self) -> EID_W {
EID_W { w: self }
}
#[doc = "Bits 18:28"]
#[inline(always)]
pub fn sid(&mut self) -> SID_W {
SID_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "CAN Buffer ID Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [buf_09_id](index.html) module"]
pub struct BUF_09_ID_SPEC;
impl crate::RegisterSpec for BUF_09_ID_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [buf_09_id::R](R) reader structure"]
impl crate::Readable for BUF_09_ID_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [buf_09_id::W](W) writer structure"]
impl crate::Writable for BUF_09_ID_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets BUF_09_ID to value 0"]
impl crate::Resettable for BUF_09_ID_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 28.388489 | 412 | 0.581095 |
0307fd649c83554c9e2669b4a8d1ab531978ad85 | 2,227 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc::mir::repr as mir;
use trans::common::BlockAndBuilder;
use super::MirContext;
use super::TempRef;
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_statement(&mut self,
bcx: BlockAndBuilder<'bcx, 'tcx>,
statement: &mir::Statement<'tcx>)
-> BlockAndBuilder<'bcx, 'tcx> {
debug!("trans_statement(statement={:?})", statement);
match statement.kind {
mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
match *lvalue {
mir::Lvalue::Temp(index) => {
let index = index as usize;
match self.temps[index as usize] {
TempRef::Lvalue(tr_dest) => {
self.trans_rvalue(bcx, tr_dest, rvalue)
}
TempRef::Operand(None) => {
let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue);
self.temps[index] = TempRef::Operand(Some(operand));
bcx
}
TempRef::Operand(Some(_)) => {
bcx.tcx().sess.span_bug(
statement.span,
&format!("operand {:?} already assigned", rvalue));
}
}
}
_ => {
let tr_dest = self.trans_lvalue(&bcx, lvalue);
self.trans_rvalue(bcx, tr_dest, rvalue)
}
}
}
}
}
}
| 41.240741 | 92 | 0.465649 |
237cfe19dc49b8218121fe77b13cde5e6f1c9fd4 | 828 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
pub fn main() {
let x: isize = 15;
let y: isize = 5;
assert_eq!(x / 5, 3);
assert_eq!(x / 4, 3);
assert_eq!(x / 3, 5);
assert_eq!(x / y, 3);
assert_eq!(15 / y, 3);
assert_eq!(x % 5, 0);
assert_eq!(x % 4, 3);
assert_eq!(x % 3, 0);
assert_eq!(x % y, 0);
assert_eq!(15 % y, 0);
}
| 27.6 | 68 | 0.63285 |
79112ceae534ea2350e50c2b6b1f53310b5db2c8 | 18,525 | use itertools::Itertools;
use tracing::{debug, error, warn};
use ibc::{
ics02_client::client_state::{ClientState, IdentifiedAnyClientState},
ics03_connection::connection::{IdentifiedConnectionEnd, State as ConnectionState},
ics04_channel::channel::{IdentifiedChannelEnd, State as ChannelState},
ics24_host::identifier::{ChainId, ConnectionId},
Height,
};
use ibc_proto::ibc::core::{
channel::v1::QueryConnectionChannelsRequest, client::v1::QueryClientStatesRequest,
connection::v1::QueryClientConnectionsRequest,
};
use crate::{
chain::{
counterparty::{channel_on_destination, connection_state_on_destination},
handle::ChainHandle,
},
config::Config,
object::{Channel, Client, Connection, Object, Packet},
registry::Registry,
supervisor::client_state_filter::{FilterPolicy, Permission},
supervisor::error::Error as SupervisorError,
worker::WorkerMap,
};
use super::{Error, RwArc};
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum SpawnMode {
Startup,
Reload,
}
/// A context for spawning workers within the [`crate::supervisor::Supervisor`].
pub struct SpawnContext<'a> {
config: &'a RwArc<Config>,
registry: &'a mut Registry,
workers: &'a mut WorkerMap,
client_state_filter: &'a mut FilterPolicy,
mode: SpawnMode,
}
impl<'a> SpawnContext<'a> {
pub fn new(
config: &'a RwArc<Config>,
registry: &'a mut Registry,
client_state_filter: &'a mut FilterPolicy,
workers: &'a mut WorkerMap,
mode: SpawnMode,
) -> Self {
Self {
config,
registry,
workers,
client_state_filter,
mode,
}
}
fn client_filter_enabled(&self) -> bool {
// Currently just a wrapper over the global filter.
self.config.read().expect("poisoned lock").global.filter
}
pub fn spawn_workers(&mut self) {
let chain_ids = self
.config
.read()
.expect("poisoned lock")
.chains
.iter()
.map(|c| &c.id)
.cloned()
.collect_vec();
for chain_id in chain_ids {
self.spawn_workers_for_chain(&chain_id);
}
}
pub fn spawn_workers_from_chain_to_chain(
&mut self,
from_chain_id: &ChainId,
to_chain_id: &ChainId,
) {
let clients_req = QueryClientStatesRequest {
pagination: ibc_proto::cosmos::base::query::pagination::all(),
};
let chain = match self.registry.get_or_spawn(from_chain_id) {
Ok(chain_handle) => chain_handle,
Err(e) => {
error!(
"skipping workers for chain {}, reason: failed to spawn chain runtime with error: {}",
from_chain_id, e
);
return;
}
};
let clients = match chain.query_clients(clients_req) {
Ok(clients) => clients,
Err(e) => {
error!(
"skipping workers for chain {}, reason: failed to query clients with error: {}",
from_chain_id, e
);
return;
}
};
for client in clients {
if &client.client_state.chain_id() == to_chain_id {
self.spawn_workers_for_client(chain.clone(), client);
}
}
}
pub fn spawn_workers_for_chain(&mut self, chain_id: &ChainId) {
let clients_req = QueryClientStatesRequest {
pagination: ibc_proto::cosmos::base::query::pagination::all(),
};
let chain = match self.registry.get_or_spawn(chain_id) {
Ok(chain_handle) => chain_handle,
Err(e) => {
error!(
"skipping workers for chain {}, reason: failed to spawn chain runtime with error: {}",
chain_id, e
);
return;
}
};
let clients = match chain.query_clients(clients_req) {
Ok(clients) => clients,
Err(e) => {
error!(
"skipping workers for chain {}, reason: failed to query clients with error: {}",
chain_id, e
);
return;
}
};
for client in clients {
self.spawn_workers_for_client(chain.clone(), client);
}
if self.mode != SpawnMode::Reload {
return;
}
let chain_ids = self
.config
.read()
.expect("poisoned lock")
.chains
.iter()
.map(|c| &c.id)
.cloned()
.collect_vec();
for id in chain_ids {
if chain_id == &id {
continue;
}
self.spawn_workers_from_chain_to_chain(&id, chain_id);
}
}
pub fn spawn_workers_for_client(
&mut self,
chain: Box<dyn ChainHandle>,
client: IdentifiedAnyClientState,
) {
// Potentially ignore the client
if self.client_filter_enabled()
&& matches!(
self.client_state_filter.control_client(
&chain.id(),
&client.client_id,
&client.client_state
),
Permission::Deny
)
{
warn!(
"skipping workers for chain {}, client {}. \
reason: client is not allowed (client trust level={:?})",
chain.id(),
client.client_id,
client.client_state.trust_threshold()
);
return;
}
let counterparty_chain_id = client.client_state.chain_id();
let has_counterparty = self
.config
.read()
.expect("poisoned lock")
.has_chain(&counterparty_chain_id);
if !has_counterparty {
debug!(
"skipping client worker for client {} on chain {} has its counterparty ({}) is not present in config",
client.client_id, chain.id(), counterparty_chain_id
);
return;
}
let chain_id = chain.id();
let conns_req = QueryClientConnectionsRequest {
client_id: client.client_id.to_string(),
};
let client_connections = match chain.query_client_connections(conns_req) {
Ok(connections) => connections,
Err(e) => {
error!(
"skipping workers for chain {}, reason: failed to query client connections for client {}: {}",
chain_id, client.client_id, e
);
return;
}
};
for connection_id in client_connections {
self.spawn_workers_for_connection(chain.clone(), &client, connection_id);
}
}
pub fn spawn_workers_for_connection(
&mut self,
chain: Box<dyn ChainHandle>,
client: &IdentifiedAnyClientState,
connection_id: ConnectionId,
) {
let chain_id = chain.id();
let connection_end = match chain.query_connection(&connection_id, Height::zero()) {
Ok(connection_end) => connection_end,
Err(e) => {
error!(
"skipping workers for chain {} and connection {}, reason: failed to query connection end: {}",
chain_id, connection_id, e
);
return;
}
};
let connection = IdentifiedConnectionEnd {
connection_id: connection_id.clone(),
connection_end: connection_end.clone(),
};
// Apply the client state filter
if self.client_filter_enabled() {
match self.client_state_filter.control_connection_end_and_client(
&mut self.registry,
&chain_id,
&client.client_state,
&connection_end,
&connection_id,
) {
Ok(Permission::Deny) => {
warn!(
"skipping workers for chain {}, client {} & conn {}. \
reason: client or counterparty client is not allowed",
chain_id, client.client_id, connection_id
);
return;
}
Err(e) => {
error!("skipping workers for chain {}. reason: {}", chain_id, e);
return;
}
_ => {} // allowed
}
}
match self.spawn_connection_workers(chain.clone(), client.clone(), connection.clone()) {
Ok(()) => debug!(
"done spawning workers for connection {} on chain {}",
connection.connection_id,
chain.id(),
),
Err(e) => error!(
"skipped workers for connection {} on chain {} due to error {}",
chain.id(),
connection.connection_id,
e
),
}
if !connection_end.is_open() {
debug!(
"connection {} not open, skip workers for channels over this connection",
connection.connection_id
);
return;
}
let connection = IdentifiedConnectionEnd {
connection_id: connection_id.clone(),
connection_end: connection_end.clone(),
};
match self.counterparty_connection_state(client.clone(), connection.clone()) {
Err(e) => {
debug!("error with counterparty: reason {}", e);
return;
}
Ok(state) => {
if !state.eq(&ConnectionState::Open) {
debug!(
"connection {} not open, skip workers for channels over this connection",
connection.connection_id
);
debug!(
"drop connection {} because its counterparty is not open",
connection_id
);
return;
}
}
};
let chans_req = QueryConnectionChannelsRequest {
connection: connection_id.to_string(),
pagination: ibc_proto::cosmos::base::query::pagination::all(),
};
let connection_channels = match chain.query_connection_channels(chans_req) {
Ok(channels) => channels,
Err(e) => {
error!(
"skipping workers for chain {} and connection {}, reason: failed to query its channels: {}",
chain.id(), connection_id, e
);
return;
}
};
let connection = IdentifiedConnectionEnd::new(connection_id, connection_end);
for channel in connection_channels {
let channel_id = channel.channel_id.clone();
match self.spawn_workers_for_channel(chain.clone(), client, &connection, channel) {
Ok(()) => debug!(
"done spawning workers for chain {} and channel {}",
chain.id(),
channel_id,
),
Err(e) => error!(
"skipped workers for chain {} and channel {} due to error {}",
chain.id(),
channel_id,
e
),
}
}
}
fn counterparty_connection_state(
&mut self,
client: IdentifiedAnyClientState,
connection: IdentifiedConnectionEnd,
) -> Result<ConnectionState, Error> {
let counterparty_chain = self
.registry
.get_or_spawn(&client.client_state.chain_id())
.map_err(Error::spawn)?;
connection_state_on_destination(connection, counterparty_chain.as_ref())
}
fn spawn_connection_workers(
&mut self,
chain: Box<dyn ChainHandle>,
client: IdentifiedAnyClientState,
connection: IdentifiedConnectionEnd,
) -> Result<(), Error> {
let handshake_enabled = self
.config
.read()
.expect("poisoned lock")
.handshake_enabled();
let counterparty_chain = self
.registry
.get_or_spawn(&client.client_state.chain_id())
.map_err(Error::spawn)?;
let conn_state_src = connection.connection_end.state;
let conn_state_dst =
connection_state_on_destination(connection.clone(), counterparty_chain.as_ref())?;
debug!(
"connection {} on chain {} is: {:?}, state on dest. chain ({}) is: {:?}",
connection.connection_id,
chain.id(),
conn_state_src,
counterparty_chain.id(),
conn_state_dst
);
if conn_state_src.is_open() && conn_state_dst.is_open() {
debug!(
"connection {} on chain {} is already open, not spawning Client worker",
connection.connection_id,
chain.id()
);
} else if !conn_state_dst.is_open()
&& conn_state_dst.less_or_equal_progress(conn_state_src)
&& handshake_enabled
{
// create worker for connection handshake that will advance the remote state
let connection_object = Object::Connection(Connection {
dst_chain_id: client.client_state.chain_id(),
src_chain_id: chain.id(),
src_connection_id: connection.connection_id,
});
self.workers
.spawn(
chain.clone(),
counterparty_chain.clone(),
&connection_object,
&self.config.read().expect("poisoned lock"),
)
.then(|| {
debug!(
"spawning Connection worker: {}",
connection_object.short_name()
);
});
}
Ok(())
}
/// Spawns all the [`Worker`]s that will handle a given channel for a given source chain.
pub fn spawn_workers_for_channel(
&mut self,
chain: Box<dyn ChainHandle>,
client: &IdentifiedAnyClientState,
connection: &IdentifiedConnectionEnd,
channel: IdentifiedChannelEnd,
) -> Result<(), Error> {
let handshake_enabled = self
.config
.read()
.expect("poisoned lock")
.handshake_enabled();
let counterparty_chain = self
.registry
.get_or_spawn(&client.client_state.chain_id())
.map_err(SupervisorError::spawn)?;
let counterparty_channel =
channel_on_destination(&channel, connection, counterparty_chain.as_ref())?;
let chan_state_src = channel.channel_end.state;
let chan_state_dst = counterparty_channel
.as_ref()
.map_or(ChannelState::Uninitialized, |c| c.state);
debug!(
"channel {} on chain {} is: {}; state on dest. chain ({}) is: {}",
channel.channel_id,
chain.id(),
chan_state_src,
counterparty_chain.id(),
chan_state_dst
);
if chan_state_src.is_open()
&& chan_state_dst.is_open()
&& self.relay_packets_on_channel(chain.as_ref(), &channel)
{
// spawn the client worker
let client_object = Object::Client(Client {
dst_client_id: client.client_id.clone(),
dst_chain_id: chain.id(),
src_chain_id: client.client_state.chain_id(),
});
self.workers
.spawn(
counterparty_chain.clone(),
chain.clone(),
&client_object,
&self.config.read().expect("poisoned lock"),
)
.then(|| debug!("spawned Client worker: {}", client_object.short_name()));
// create the Packet object and spawn worker
let path_object = Object::Packet(Packet {
dst_chain_id: counterparty_chain.id(),
src_chain_id: chain.id(),
src_channel_id: channel.channel_id,
src_port_id: channel.port_id,
});
self.workers
.spawn(
chain.clone(),
counterparty_chain.clone(),
&path_object,
&self.config.read().expect("poisoned lock"),
)
.then(|| debug!("spawned Path worker: {}", path_object.short_name()));
} else if !chan_state_dst.is_open()
&& chan_state_dst.less_or_equal_progress(chan_state_src)
&& handshake_enabled
{
// create worker for channel handshake that will advance the remote state
let channel_object = Object::Channel(Channel {
dst_chain_id: counterparty_chain.id(),
src_chain_id: chain.id(),
src_channel_id: channel.channel_id,
src_port_id: channel.port_id,
});
self.workers
.spawn(
chain,
counterparty_chain,
&channel_object,
&self.config.read().expect("poisoned lock"),
)
.then(|| debug!("spawned Channel worker: {}", channel_object.short_name()));
}
Ok(())
}
fn relay_packets_on_channel(
&mut self,
chain: &dyn ChainHandle,
channel: &IdentifiedChannelEnd,
) -> bool {
let config = self.config.read().expect("poisoned lock");
config.packets_on_channel_allowed(&chain.id(), &channel.port_id, &channel.channel_id)
}
pub fn shutdown_workers_for_chain(&mut self, chain_id: &ChainId) {
let affected_workers = self.workers.objects_for_chain(chain_id);
for object in affected_workers {
self.workers.shutdown_worker(&object);
}
}
}
| 32.329843 | 118 | 0.5139 |
87c3a0f47891f9c7d5059f0b42ca824144604145 | 324 | use algorithms::IterativeAlgorithm;
pub trait Execute: IterativeAlgorithm {
fn execute(self) -> Self::Result;
}
impl<T: IterativeAlgorithm> Execute for T {
fn execute(mut self) -> T::Result {
while !self.has_converged() {
self.next_iteration();
}
return self.result();
}
}
| 20.25 | 43 | 0.617284 |
622b330c0ef86925065829d64f3b5e00785e2b9b | 13,398 | #[doc = "Reader of register INTCLR"]
pub type R = crate::R<u32, super::INTCLR>;
#[doc = "Writer for register INTCLR"]
pub type W = crate::W<u32, super::INTCLR>;
#[doc = "Register INTCLR `reset()`'s with value 0"]
impl crate::ResetValue for super::INTCLR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Window comparator voltage incursion interrupt.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum WCINC_A {
#[doc = "1: Window comparitor voltage incursion interrupt."]
WCINCINT = 1,
}
impl From<WCINC_A> for bool {
#[inline(always)]
fn from(variant: WCINC_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `WCINC`"]
pub type WCINC_R = crate::R<bool, WCINC_A>;
impl WCINC_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<bool, WCINC_A> {
use crate::Variant::*;
match self.bits {
true => Val(WCINC_A::WCINCINT),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `WCINCINT`"]
#[inline(always)]
pub fn is_wcincint(&self) -> bool {
*self == WCINC_A::WCINCINT
}
}
#[doc = "Write proxy for field `WCINC`"]
pub struct WCINC_W<'a> {
w: &'a mut W,
}
impl<'a> WCINC_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: WCINC_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Window comparitor voltage incursion interrupt."]
#[inline(always)]
pub fn wcincint(self) -> &'a mut W {
self.variant(WCINC_A::WCINCINT)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Window comparator voltage excursion interrupt.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum WCEXC_A {
#[doc = "1: Window comparitor voltage excursion interrupt."]
WCEXCINT = 1,
}
impl From<WCEXC_A> for bool {
#[inline(always)]
fn from(variant: WCEXC_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `WCEXC`"]
pub type WCEXC_R = crate::R<bool, WCEXC_A>;
impl WCEXC_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<bool, WCEXC_A> {
use crate::Variant::*;
match self.bits {
true => Val(WCEXC_A::WCEXCINT),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `WCEXCINT`"]
#[inline(always)]
pub fn is_wcexcint(&self) -> bool {
*self == WCEXC_A::WCEXCINT
}
}
#[doc = "Write proxy for field `WCEXC`"]
pub struct WCEXC_W<'a> {
w: &'a mut W,
}
impl<'a> WCEXC_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: WCEXC_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "Window comparitor voltage excursion interrupt."]
#[inline(always)]
pub fn wcexcint(self) -> &'a mut W {
self.variant(WCEXC_A::WCEXCINT)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "FIFO 100 percent full interrupt.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum FIFOOVR2_A {
#[doc = "1: FIFO 100 percent full interrupt."]
FIFOFULLINT = 1,
}
impl From<FIFOOVR2_A> for bool {
#[inline(always)]
fn from(variant: FIFOOVR2_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `FIFOOVR2`"]
pub type FIFOOVR2_R = crate::R<bool, FIFOOVR2_A>;
impl FIFOOVR2_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<bool, FIFOOVR2_A> {
use crate::Variant::*;
match self.bits {
true => Val(FIFOOVR2_A::FIFOFULLINT),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `FIFOFULLINT`"]
#[inline(always)]
pub fn is_fifofullint(&self) -> bool {
*self == FIFOOVR2_A::FIFOFULLINT
}
}
#[doc = "Write proxy for field `FIFOOVR2`"]
pub struct FIFOOVR2_W<'a> {
w: &'a mut W,
}
impl<'a> FIFOOVR2_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: FIFOOVR2_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "FIFO 100 percent full interrupt."]
#[inline(always)]
pub fn fifofullint(self) -> &'a mut W {
self.variant(FIFOOVR2_A::FIFOFULLINT)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "FIFO 75 percent full interrupt.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum FIFOOVR1_A {
#[doc = "1: FIFO 75 percent full interrupt."]
FIFO75INT = 1,
}
impl From<FIFOOVR1_A> for bool {
#[inline(always)]
fn from(variant: FIFOOVR1_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `FIFOOVR1`"]
pub type FIFOOVR1_R = crate::R<bool, FIFOOVR1_A>;
impl FIFOOVR1_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<bool, FIFOOVR1_A> {
use crate::Variant::*;
match self.bits {
true => Val(FIFOOVR1_A::FIFO75INT),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `FIFO75INT`"]
#[inline(always)]
pub fn is_fifo75int(&self) -> bool {
*self == FIFOOVR1_A::FIFO75INT
}
}
#[doc = "Write proxy for field `FIFOOVR1`"]
pub struct FIFOOVR1_W<'a> {
w: &'a mut W,
}
impl<'a> FIFOOVR1_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: FIFOOVR1_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "FIFO 75 percent full interrupt."]
#[inline(always)]
pub fn fifo75int(self) -> &'a mut W {
self.variant(FIFOOVR1_A::FIFO75INT)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "ADC scan complete interrupt.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SCNCMP_A {
#[doc = "1: ADC scan complete interrupt."]
SCNCMPINT = 1,
}
impl From<SCNCMP_A> for bool {
#[inline(always)]
fn from(variant: SCNCMP_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `SCNCMP`"]
pub type SCNCMP_R = crate::R<bool, SCNCMP_A>;
impl SCNCMP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<bool, SCNCMP_A> {
use crate::Variant::*;
match self.bits {
true => Val(SCNCMP_A::SCNCMPINT),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `SCNCMPINT`"]
#[inline(always)]
pub fn is_scncmpint(&self) -> bool {
*self == SCNCMP_A::SCNCMPINT
}
}
#[doc = "Write proxy for field `SCNCMP`"]
pub struct SCNCMP_W<'a> {
w: &'a mut W,
}
impl<'a> SCNCMP_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: SCNCMP_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "ADC scan complete interrupt."]
#[inline(always)]
pub fn scncmpint(self) -> &'a mut W {
self.variant(SCNCMP_A::SCNCMPINT)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "ADC conversion complete interrupt.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CNVCMP_A {
#[doc = "1: ADC conversion complete interrupt."]
CNVCMPINT = 1,
}
impl From<CNVCMP_A> for bool {
#[inline(always)]
fn from(variant: CNVCMP_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `CNVCMP`"]
pub type CNVCMP_R = crate::R<bool, CNVCMP_A>;
impl CNVCMP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<bool, CNVCMP_A> {
use crate::Variant::*;
match self.bits {
true => Val(CNVCMP_A::CNVCMPINT),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `CNVCMPINT`"]
#[inline(always)]
pub fn is_cnvcmpint(&self) -> bool {
*self == CNVCMP_A::CNVCMPINT
}
}
#[doc = "Write proxy for field `CNVCMP`"]
pub struct CNVCMP_W<'a> {
w: &'a mut W,
}
impl<'a> CNVCMP_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: CNVCMP_A) -> &'a mut W {
{
self.bit(variant.into())
}
}
#[doc = "ADC conversion complete interrupt."]
#[inline(always)]
pub fn cnvcmpint(self) -> &'a mut W {
self.variant(CNVCMP_A::CNVCMPINT)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
impl R {
#[doc = "Bit 5 - Window comparator voltage incursion interrupt."]
#[inline(always)]
pub fn wcinc(&self) -> WCINC_R {
WCINC_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 4 - Window comparator voltage excursion interrupt."]
#[inline(always)]
pub fn wcexc(&self) -> WCEXC_R {
WCEXC_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 3 - FIFO 100 percent full interrupt."]
#[inline(always)]
pub fn fifoovr2(&self) -> FIFOOVR2_R {
FIFOOVR2_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 2 - FIFO 75 percent full interrupt."]
#[inline(always)]
pub fn fifoovr1(&self) -> FIFOOVR1_R {
FIFOOVR1_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 1 - ADC scan complete interrupt."]
#[inline(always)]
pub fn scncmp(&self) -> SCNCMP_R {
SCNCMP_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 0 - ADC conversion complete interrupt."]
#[inline(always)]
pub fn cnvcmp(&self) -> CNVCMP_R {
CNVCMP_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 5 - Window comparator voltage incursion interrupt."]
#[inline(always)]
pub fn wcinc(&mut self) -> WCINC_W {
WCINC_W { w: self }
}
#[doc = "Bit 4 - Window comparator voltage excursion interrupt."]
#[inline(always)]
pub fn wcexc(&mut self) -> WCEXC_W {
WCEXC_W { w: self }
}
#[doc = "Bit 3 - FIFO 100 percent full interrupt."]
#[inline(always)]
pub fn fifoovr2(&mut self) -> FIFOOVR2_W {
FIFOOVR2_W { w: self }
}
#[doc = "Bit 2 - FIFO 75 percent full interrupt."]
#[inline(always)]
pub fn fifoovr1(&mut self) -> FIFOOVR1_W {
FIFOOVR1_W { w: self }
}
#[doc = "Bit 1 - ADC scan complete interrupt."]
#[inline(always)]
pub fn scncmp(&mut self) -> SCNCMP_W {
SCNCMP_W { w: self }
}
#[doc = "Bit 0 - ADC conversion complete interrupt."]
#[inline(always)]
pub fn cnvcmp(&mut self) -> CNVCMP_W {
CNVCMP_W { w: self }
}
}
| 29.062907 | 84 | 0.556202 |
fc52357d220ec90a871340bbc59687c7a86e59f1 | 7,884 | // Copyright 2016 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// this is en example of scheduling several robot that do the same tasks.
// schedule num_robot to do some tasks. //
// Each robot with take something, wait some time, put the thing somewhere and go to park.
// Robot task : Loading (L), Take (T), Wait (W), Put (P), go to park end (E)
// Contraint for one robot :
// for each task start and duraton variable are defined
// start task are in domaine time[0, max_time]
//constraint:
// Ls = 0 //loading first robot start at 0.
// Ld element [10, 15] //loading take from 10 to 15 second
// Ts > Ls + Ld // take start after that loading has finished (Ls + Ld)
// Td element [25,35]
// Ws > Ts + Td
// Wd element [100, 250]
// P > Ws + Wd
// Pd element [25, 35]
// Es > Ps + Pd
// Es end before max_time.
//
//cumulative constraints
//add cumulative constraints
//Two ressouces with capacity of one:
// * Pipeting unit use by Loading and Put operations
// * Mixing unit use by Take and Put operations
#![allow(unused_variables, non_snake_case, dead_code)]
extern crate test;
//use env_logger;
use pcp::kernel::*;
use pcp::propagators::*;
use pcp::search::*;
use pcp::search::branching::*;
use pcp::search::engine::one_solution::*;
use pcp::search::debugger::*;
use pcp::search::propagation::*;
use gcollections::VectorStack;
use interval::interval_set::*;
use interval::ops::Range;
use gcollections::ops::*;
use pcp::term::*;
use pcp::term::ops::*;
use pcp::propagators::cumulative::Cumulative;
use pcp::model::*;
use std::fmt::{Formatter, Display, Error};
pub type Bound = i32;
pub type Domain = IntervalSet<Bound>;
pub struct Robot {
tasks: Vec<usize>,
cumultasks: Vec<(usize, usize)>
}
pub struct RobotScheduling {
pub robots: Vec<Robot>,
pub max_time: usize,
pub start: Vec<Identity<Domain>>,
pub durations: Vec<usize>,
pub pipeting_start: Vec<Box<Identity<Domain>>>,
pub pipeting_duration: Vec<Box<Identity<Domain>>>,
pub pipeting_resource: Vec<Box<Constant<Bound>>>,
pub model: Model,
pub space: FDSpace,
pub status: Status<FDSpace>,
}
//static TASKS: usize = 5;
//static DTASKS: usize = 4;
static L: usize = 0; // Loading
static T: usize = 1; // Take
static W: usize = 2; // Wait
static P: usize = 3; // Put
static E: usize = 4; // End, go to park
impl RobotScheduling
{
pub fn new(num_robot: usize, max_time: usize, Lduration: usize, Tduration: usize, Wduration: usize, Pduration: usize) -> Self {
let mut robotschel = RobotScheduling {
robots: vec!(),
max_time: max_time,
start: vec![],
durations: vec!(Lduration, Tduration, Wduration, Pduration),
pipeting_start: vec![],
pipeting_duration: vec![],
pipeting_resource: vec![],
model: Model::new(),
space: FDSpace::empty(),
status: Status::Unsatisfiable,
};
//create robot
for i in 0 .. num_robot {
let robot = if i % 2 == 0 { //robot with wait
Robot {
tasks: vec!(L, T, W, P, E),
cumultasks: vec!((0,0), (3,3)),
}
} else {
Robot {
tasks: vec!(L, T, P, E),
cumultasks: vec!((0,0), (2,2)),
}
};
robotschel.robots.push(robot)
}
robotschel.initialize();
robotschel
}
fn initialize(&mut self) {
let time_dom = IntervalSet::new(1, self.max_time as i32);
let cumul_tasks = vec![L, P];
// Start date for the different tasks.
self.model.open_group("r");
let mut task_counter = 0;
for (i, robot) in self.robots.iter().enumerate() {
self.model.open_group("s");
for _ in 0..robot.tasks.len() {
self.start.push(self.model.alloc_var(&mut self.space.vstore, time_dom.clone()));
}
self.model.close_group();
for &(t,d) in robot.cumultasks.iter() {
self.pipeting_start.push(Box::new(self.start[task_counter + t].clone()));
self.pipeting_duration.push(Box::new(Identity::new(self.durations[d])));
}
// Ensure that every task starts after the end time of the previous task. (S' >= S + D).
self.model.inc_group();
task_counter += robot.tasks.len();
}
self.model.close_group();
// Ls = 0 for the first robot to force it to start first
self.space.cstore.alloc(Box::new(XEqY::new(self.start[0].clone(), Constant::new(1))));
for i in 0..self.robots.len()*2 {
self.pipeting_resource.push(Box::new(Constant::new(1)));
}
let mut cumulative_pipeting = Cumulative::new(
self.pipeting_start.clone(),
self.pipeting_duration.clone(),
self.pipeting_resource.clone(),
Box::new(Constant::new(1))
);
cumulative_pipeting.join(&mut self.space.vstore, &mut self.space.cstore);
let inter_tasks = cumulative_pipeting.intermediate_vars();
for (nti, ti) in inter_tasks.into_iter().enumerate() {
for (ntj, var) in ti.into_iter().enumerate() {
self.model.register_var(
var,
format!("t{}_{}", nti, ntj));
}
}
self.space.vstore.display(&self.model);
self.space.cstore.display(&self.model);
println!(" fin \n");
}
pub fn solve(mut self) -> Self {
println!(" solve deb \n");
let search =
OneSolution::<_, VectorStack<_>, FDSpace>::new(
// Debugger::new(self.model.clone(),
Propagation::new(
Brancher::new(InputOrder, MinVal, Enumerate)));
let mut search = Box::new(search);
search.start(&self.space);
let (frozen_space, status) = search.enter(self.space);
self.space = frozen_space.unfreeze();
self.status = status;
println!(" solve fin \n");
self
}
fn start_at(&self, task: usize) -> i32 {
self.start[task].read(&self.space.vstore).lower()
}
}
impl Display for RobotScheduling
{
fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> {
use pcp::search::search_tree_visitor::Status::*;
match self.status {
Unsatisfiable => fmt.write_fmt(format_args!("{}-analysis problem is unsatisfiable.", self.robots.len()))?,
EndOfSearch => fmt.write_str("Search terminated or was interrupted.")?,
Unknown(_) => unreachable!(
"After the search step, the problem instance should be either satisfiable or unsatisfiable."),
Satisfiable => {
fmt.write_fmt(format_args!("{}-robot scheduling is satisfiable. The first solution is:\n", self.robots.len()))?;
fmt.write_fmt(format_args!("tasks : {:<8}{:<8}{:<8}{:<8}{:<8}\n", 'L', 'T', 'W', 'P', 'E'))?;
let mut task_counter = 0;
for (i, robot) in self.robots.iter().enumerate() {
fmt.write_fmt(format_args!("start time robot {}: ", i+1))?;
for j in 0 .. robot.tasks.len() {
fmt.write_fmt(format_args!("{:<8}", self.start_at(j + task_counter)))?;
}
fmt.write_str("\n")?;
task_counter += robot.tasks.len();
}
for (i, robot) in self.robots.iter().enumerate() {
fmt.write_fmt(format_args!("duration robot {} : ", i+1))?;
for dur in self.durations.iter() {
fmt.write_fmt(format_args!("{:<8}", dur))?;
}
fmt.write_str("\n")?;
}
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use test::Bencher;
#[bench]
fn bench_schedule_2(b: &mut Bencher) {
b.iter(|| RobotScheduling::new(2, 500).solve());
}
}
| 31.162055 | 129 | 0.625824 |
89a5140d43bd8d8d7315807ce0e9ea85b4fbb960 | 2,646 | // Copyright 2022 RisingLight Project Authors. Licensed under Apache-2.0.
use bitvec::prelude::BitVec;
use smallvec::SmallVec;
use crate::array::{ArrayImpl, DataChunk};
pub type PackedVec<T> = SmallVec<[T; 16]>;
/// Similar to [`DataChunk`], in the storage system, we use [`StorageChunk`]
/// to represent a set of columns. [`StorageChunk`] contains pointers to
/// array, and a visibility map. [`StorageChunk`] generally corresponds to
/// a batch read from a `RowSet`. All constructed [`StorageChunk`] has at
/// least one element.
#[derive(Clone)]
pub struct StorageChunk {
/// If a row is visible in this chunk. Data come from the delete map.
visibility: Option<BitVec>,
/// Plain array from the blocks.
arrays: PackedVec<ArrayImpl>,
/// Number of accessible rows.
cardinality: usize,
}
impl StorageChunk {
/// Construct a [`StorageChunk`] from `visibility` and `arrays`. If there are no element in the
/// chunk, the function will return `None`.
pub fn construct(
visibility: Option<BitVec>,
arrays: SmallVec<[ArrayImpl; 16]>,
) -> Option<Self> {
assert!(!arrays.is_empty());
let first_length = arrays[0].len();
for array in &arrays {
assert_eq!(first_length, array.len());
}
let cardinality = if let Some(ref visibility) = visibility {
assert_eq!(visibility.len(), first_length);
visibility.count_ones()
} else {
first_length
};
if cardinality > 0 {
Some(Self {
visibility,
arrays,
cardinality,
})
} else {
None
}
}
pub fn cardinality(&self) -> usize {
self.cardinality
}
pub fn row_count(&self) -> usize {
self.array_at(0).len()
}
pub fn column_count(&self) -> usize {
self.arrays.len()
}
pub fn arrays(&self) -> &[ArrayImpl] {
&self.arrays
}
pub fn array_at(&self, idx: usize) -> &ArrayImpl {
&self.arrays[idx]
}
pub fn visibility(&self) -> &Option<BitVec> {
&self.visibility
}
pub fn to_data_chunk(self) -> DataChunk {
match self.visibility {
Some(visibility) => DataChunk::from_iter(
self.arrays
.iter()
.map(|a| a.filter(visibility.iter().map(|x| *x))),
),
None => DataChunk::from_iter(
self.arrays
.iter()
.map(|a| a.filter([true].iter().cycle().cloned())),
),
}
}
}
| 27.5625 | 99 | 0.55291 |
e9af2c1df657cd68b7abfa4eae93d3dbaba62056 | 2,102 | // Copyright 2019 Tyler Neely
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
extern crate rocksdb;
use rocksdb::{
backup::{BackupEngine, BackupEngineOptions, RestoreOptions},
Options, DB,
};
#[test]
fn backup_restore() {
// create backup
let path = "_rust_rocksdb_backup_test";
let restore_path = "_rust_rocksdb_restore_from_backup_path";
let mut opts = Options::default();
opts.create_if_missing(true);
{
let db = DB::open(&opts, path).unwrap();
assert!(db.put(b"k1", b"v1111").is_ok());
let value = db.get(b"k1");
assert_eq!(value.unwrap().unwrap(), b"v1111");
{
let backup_path = "_rust_rocksdb_backup_path";
let backup_opts = BackupEngineOptions::default();
let mut backup_engine = BackupEngine::open(&backup_opts, &backup_path).unwrap();
assert!(backup_engine.create_new_backup(&db).is_ok());
let mut restore_option = RestoreOptions::default();
restore_option.set_keep_log_files(false); // true to keep log files
let restore_status = backup_engine.restore_from_latest_backup(
&restore_path,
&restore_path,
&restore_option,
);
assert!(restore_status.is_ok());
let db_restore = DB::open_default(restore_path).unwrap();
let value = db_restore.get(b"k1");
assert_eq!(value.unwrap().unwrap(), b"v1111");
}
}
assert!(DB::destroy(&opts, restore_path).is_ok());
assert!(DB::destroy(&opts, path).is_ok());
}
| 36.877193 | 92 | 0.6451 |
09f86e2e6702dd013e810ed05f77c68c780667aa | 852 | use serde::{Deserialize, Serialize};
use nom::{branch::alt, bytes::complete::tag_no_case, combinator::map, error::ParseError, IResult};
use std::fmt;
/// Sip URI Schema.
#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)]
pub enum UriSchema {
Sip,
Sips,
}
impl fmt::Display for UriSchema {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
UriSchema::Sip => write!(f, "sip"),
UriSchema::Sips => write!(f, "sips"),
}
}
}
/// Parse SIP URI schema. Only Accepts 'sip' and 'sips'.
pub fn parse_schema<'a, E: ParseError<&'a [u8]>>(
input: &'a [u8],
) -> IResult<&'a [u8], UriSchema, E> {
alt::<_, _, E, _>((
map(tag_no_case::<_, _, E>("sips"), |_| UriSchema::Sips),
map(tag_no_case::<_, _, E>("sip"), |_| UriSchema::Sip),
))(input)
}
| 27.483871 | 98 | 0.57277 |
e55b8e43841c42ca28d5e1963e6dda86dca808c2 | 4,183 | use std::collections::HashMap;
use std::sync::Arc;
use std::sync::Mutex;
use std::fs::File;
use std::io::Write;
use std::io::Read;
use std::thread;
use std::time::Duration;
use serenity::model::id::UserId;
use serenity::model::id::ChannelId;
use serenity::model::id::GuildId;
use serenity::model::voice::VoiceState;
use serenity::model::channel::ChannelType;
use serenity::Error;
use serenity::prelude::*;
use crate::models::managed_channel::ManagedChannel;
/// Tracks user voice states in order to maintain a list of "temporary" channels
/// that are to be deleted by the bot when the last person leaves
pub struct ChannelManager {
managed_channels: Arc<Mutex<Vec<ManagedChannel>>>,
voice_states: HashMap<UserId, VoiceState>
}
impl TypeMapKey for ChannelManager {
type Value = Self;
}
impl ChannelManager {
pub fn new() -> Self {
let managed_channels =
match File::open("persistence/managed_channels.bin") {
Ok(mut f) => {
let mut bin = Vec::new();
let _ = f.read_to_end(&mut bin);
match bincode::deserialize::<Vec<ManagedChannel>>(&bin) {
Ok(heap) => Arc::new(Mutex::new(heap)),
Err(_) => Arc::new(Mutex::new(Vec::new()))
}
}
Err(_) => Arc::new(Mutex::new(Vec::new()))
};
{
let managed_channels = managed_channels.clone();
thread::spawn(move || {
loop {
thread::sleep(Duration::from_secs(10));
if let Ok(lock) = managed_channels.lock() {
if let Ok(bin) = bincode::serialize(&*lock) {
if let Ok(mut f) = File::create("persistence/managed_channels.bin") {
let _ = f.write(&bin);
}
}
}
}
});
}
ChannelManager {
managed_channels,
voice_states: HashMap::new()
}
}
pub fn new_managed_channel(&mut self, guild_id: &GuildId, name: &str) -> Result<ChannelId, Error> {
let new_channel =
guild_id.create_channel(name, ChannelType::Voice, None)?;
match self.managed_channels.lock() {
Ok(mut lock) => lock.push(ManagedChannel::from(new_channel.id)),
Err(_) => return Err(Error::Other(""))
}
println!("Creating new managed channel");
Ok(new_channel.id)
}
pub fn user_current_channel(&self, user: &UserId) -> Option<(GuildId, ChannelId)> {
let voice_state = self.voice_states.get(user)?;
let channel_id = voice_state.channel_id?;
if let Ok(channel) = channel_id.to_channel() {
return Some((channel.guild()?.read().guild_id, channel_id));
}
None
}
pub fn get_all_users_in_channel(&self, channel_id: &ChannelId) -> Vec<UserId> {
self.voice_states
.iter()
.filter(|item| item.1.channel_id == Some(*channel_id))
.map(|item| item.0.clone())
.collect()
}
pub fn refresh_voice_states(&mut self, new_states: HashMap<UserId, VoiceState>) {
self.voice_states = new_states;
self.prune_channels();
}
pub fn voice_state_update(&mut self, new_state: VoiceState) {
println!("New voice state for {:?}: {:?}", new_state.user_id, new_state.channel_id);
self.voice_states.insert(new_state.user_id, new_state);
self.prune_channels();
}
fn prune_channels(&mut self) {
let states = self.voice_states.values().collect::<Vec<&VoiceState>>();
if let Ok(mut lock) = self.managed_channels.lock() {
lock.retain(|channel| {
for voice_state in states.iter() {
if voice_state.channel_id == Some(channel.channel_id()) {
return true;
}
}
println!("Deleting channel {:?}", channel.channel_id().name());
false
});
}
}
}
| 33.733871 | 103 | 0.545781 |
08d605b010eb83a4d41faf2b50e285782f112381 | 5,019 | use std::convert::TryFrom;
use std::io;
use async_trait::async_trait;
use crate::{proxy::*, session::*};
pub struct Handler {
pub actors: Vec<AnyOutboundHandler>,
}
impl Handler {
fn next_connect_addr(&self, start: usize) -> OutboundConnect {
for a in self.actors[start..].iter() {
match a.udp() {
Ok(h) => {
if self.unreliable_chain(start + 1) {
let oc = h.connect_addr();
if let OutboundConnect::Next = oc {
continue;
}
return oc;
} else {
match a.tcp() {
Ok(h) => {
let oc = h.connect_addr();
if let OutboundConnect::Next = oc {
continue;
}
return oc;
}
_ => (),
}
}
}
_ => match a.tcp() {
Ok(h) => {
let oc = h.connect_addr();
if let OutboundConnect::Next = oc {
continue;
}
return oc;
}
_ => (),
},
}
}
OutboundConnect::Unknown
}
fn next_session(&self, mut sess: Session, start: usize) -> Session {
if let OutboundConnect::Proxy(_, address, port) = self.next_connect_addr(start) {
if let Ok(addr) = SocksAddr::try_from((address, port)) {
sess.destination = addr;
}
}
sess
}
fn unreliable_chain(&self, start: usize) -> bool {
for a in self.actors[start..].iter() {
if let Ok(uh) = a.udp() {
if uh.transport_type() != DatagramTransportType::Unreliable {
return false;
}
} else {
return false;
}
}
true
}
async fn handle<'a>(
&'a self,
sess: &'a Session,
mut stream: Option<Box<dyn ProxyStream>>,
mut dgram: Option<Box<dyn OutboundDatagram>>,
) -> io::Result<Box<dyn OutboundDatagram>> {
for (i, a) in self.actors.iter().enumerate() {
let new_sess = self.next_session(sess.clone(), i + 1);
if let Ok(uh) = a.udp() {
if let Some(d) = dgram.take() {
dgram.replace(
uh.handle(&new_sess, Some(OutboundTransport::Datagram(d)))
.await?,
);
} else if let Some(s) = stream.take() {
// Check whether all subsequent handlers can use unreliable
// transport, otherwise we must not convert the stream to
// a datagram.
if self.unreliable_chain(i + 1) {
dgram.replace(
uh.handle(&new_sess, Some(OutboundTransport::Stream(s)))
.await?,
);
} else {
stream.replace(a.tcp()?.handle(&new_sess, Some(s)).await?);
}
} else {
if self.unreliable_chain(i + 1) {
dgram.replace(uh.handle(&new_sess, None).await?);
} else {
stream.replace(a.tcp()?.handle(&new_sess, None).await?);
}
}
} else {
let s = stream.take();
stream.replace(a.tcp()?.handle(&new_sess, s).await?);
}
}
Ok(dgram.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "no datagram"))?)
}
}
#[async_trait]
impl UdpOutboundHandler for Handler {
fn connect_addr(&self) -> OutboundConnect {
self.next_connect_addr(0)
}
fn transport_type(&self) -> DatagramTransportType {
self.actors
.iter()
.next()
.map(|x| {
x.udp()
.map(|x| x.transport_type())
.unwrap_or(DatagramTransportType::Unknown)
})
.unwrap_or(DatagramTransportType::Unknown)
}
async fn handle<'a>(
&'a self,
sess: &'a Session,
transport: Option<AnyOutboundTransport>,
) -> io::Result<AnyOutboundDatagram> {
match transport {
Some(transport) => match transport {
OutboundTransport::Datagram(dgram) => self.handle(sess, None, Some(dgram)).await,
OutboundTransport::Stream(stream) => self.handle(sess, Some(stream), None).await,
},
None => self.handle(sess, None, None).await,
}
}
}
| 33.912162 | 97 | 0.421598 |
56ecc6e68a98c8af8f1f233985b06ab89c84ab5f | 66,692 | use std::any::Any;
use std::cell::{Cell, RefCell};
use std::collections::BTreeSet;
use std::env;
use std::ffi::OsStr;
use std::fmt::Debug;
use std::fs;
use std::hash::Hash;
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::process::Command;
use std::time::{Duration, Instant};
use build_helper::{output, t};
use crate::cache::{Cache, Interned, INTERNER};
use crate::check;
use crate::compile;
use crate::config::TargetSelection;
use crate::dist;
use crate::doc;
use crate::flags::{Color, Subcommand};
use crate::install;
use crate::native;
use crate::run;
use crate::test;
use crate::tool::{self, SourceType};
use crate::util::{self, add_dylib_path, add_link_lib_path, exe, libdir};
use crate::{Build, DocTests, GitRepo, Mode};
pub use crate::Compiler;
// FIXME: replace with std::lazy after it gets stabilized and reaches beta
use once_cell::sync::Lazy;
pub struct Builder<'a> {
pub build: &'a Build,
pub top_stage: u32,
pub kind: Kind,
cache: Cache,
stack: RefCell<Vec<Box<dyn Any>>>,
time_spent_on_dependencies: Cell<Duration>,
pub paths: Vec<PathBuf>,
}
impl<'a> Deref for Builder<'a> {
type Target = Build;
fn deref(&self) -> &Self::Target {
self.build
}
}
pub trait Step: 'static + Clone + Debug + PartialEq + Eq + Hash {
/// `PathBuf` when directories are created or to return a `Compiler` once
/// it's been assembled.
type Output: Clone;
/// Whether this step is run by default as part of its respective phase.
/// `true` here can still be overwritten by `should_run` calling `default_condition`.
const DEFAULT: bool = false;
/// If true, then this rule should be skipped if --target was specified, but --host was not
const ONLY_HOSTS: bool = false;
/// Primary function to execute this rule. Can call `builder.ensure()`
/// with other steps to run those.
fn run(self, builder: &Builder<'_>) -> Self::Output;
/// When bootstrap is passed a set of paths, this controls whether this rule
/// will execute. However, it does not get called in a "default" context
/// when we are not passed any paths; in that case, `make_run` is called
/// directly.
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_>;
/// Builds up a "root" rule, either as a default rule or from a path passed
/// to us.
///
/// When path is `None`, we are executing in a context where no paths were
/// passed. When `./x.py build` is run, for example, this rule could get
/// called if it is in the correct list below with a path of `None`.
fn make_run(_run: RunConfig<'_>) {
// It is reasonable to not have an implementation of make_run for rules
// who do not want to get called from the root context. This means that
// they are likely dependencies (e.g., sysroot creation) or similar, and
// as such calling them from ./x.py isn't logical.
unimplemented!()
}
}
pub struct RunConfig<'a> {
pub builder: &'a Builder<'a>,
pub target: TargetSelection,
pub path: PathBuf,
}
impl RunConfig<'_> {
pub fn build_triple(&self) -> TargetSelection {
self.builder.build.build
}
}
struct StepDescription {
default: bool,
only_hosts: bool,
should_run: fn(ShouldRun<'_>) -> ShouldRun<'_>,
make_run: fn(RunConfig<'_>),
name: &'static str,
}
/// Collection of paths used to match a task rule.
#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)]
pub enum PathSet {
/// A collection of individual paths.
///
/// These are generally matched as a path suffix. For example, a
/// command-line value of `libstd` will match if `src/libstd` is in the
/// set.
Set(BTreeSet<PathBuf>),
/// A "suite" of paths.
///
/// These can match as a path suffix (like `Set`), or as a prefix. For
/// example, a command-line value of `src/test/ui/abi/variadic-ffi.rs`
/// will match `src/test/ui`. A command-line value of `ui` would also
/// match `src/test/ui`.
Suite(PathBuf),
}
impl PathSet {
fn empty() -> PathSet {
PathSet::Set(BTreeSet::new())
}
fn one<P: Into<PathBuf>>(path: P) -> PathSet {
let mut set = BTreeSet::new();
set.insert(path.into());
PathSet::Set(set)
}
fn has(&self, needle: &Path) -> bool {
match self {
PathSet::Set(set) => set.iter().any(|p| p.ends_with(needle)),
PathSet::Suite(suite) => suite.ends_with(needle),
}
}
fn path(&self, builder: &Builder<'_>) -> PathBuf {
match self {
PathSet::Set(set) => set.iter().next().unwrap_or(&builder.build.src).to_path_buf(),
PathSet::Suite(path) => PathBuf::from(path),
}
}
}
impl StepDescription {
fn from<S: Step>() -> StepDescription {
StepDescription {
default: S::DEFAULT,
only_hosts: S::ONLY_HOSTS,
should_run: S::should_run,
make_run: S::make_run,
name: std::any::type_name::<S>(),
}
}
fn maybe_run(&self, builder: &Builder<'_>, pathset: &PathSet) {
if builder.config.exclude.iter().any(|e| pathset.has(e)) {
eprintln!("Skipping {:?} because it is excluded", pathset);
return;
} else if !builder.config.exclude.is_empty() {
eprintln!(
"{:?} not skipped for {:?} -- not in {:?}",
pathset, self.name, builder.config.exclude
);
}
// Determine the targets participating in this rule.
let targets = if self.only_hosts { &builder.hosts } else { &builder.targets };
for target in targets {
let run = RunConfig { builder, path: pathset.path(builder), target: *target };
(self.make_run)(run);
}
}
fn run(v: &[StepDescription], builder: &Builder<'_>, paths: &[PathBuf]) {
let should_runs =
v.iter().map(|desc| (desc.should_run)(ShouldRun::new(builder))).collect::<Vec<_>>();
// sanity checks on rules
for (desc, should_run) in v.iter().zip(&should_runs) {
assert!(
!should_run.paths.is_empty(),
"{:?} should have at least one pathset",
desc.name
);
}
if paths.is_empty() || builder.config.include_default_paths {
for (desc, should_run) in v.iter().zip(&should_runs) {
if desc.default && should_run.is_really_default() {
for pathset in &should_run.paths {
desc.maybe_run(builder, pathset);
}
}
}
}
for path in paths {
// strip CurDir prefix if present
let path = match path.strip_prefix(".") {
Ok(p) => p,
Err(_) => path,
};
let mut attempted_run = false;
for (desc, should_run) in v.iter().zip(&should_runs) {
if let Some(suite) = should_run.is_suite_path(path) {
attempted_run = true;
desc.maybe_run(builder, suite);
} else if let Some(pathset) = should_run.pathset_for_path(path) {
attempted_run = true;
desc.maybe_run(builder, pathset);
}
}
if !attempted_run {
panic!("error: no rules matched {}", path.display());
}
}
}
}
enum ReallyDefault<'a> {
Bool(bool),
Lazy(Lazy<bool, Box<dyn Fn() -> bool + 'a>>),
}
pub struct ShouldRun<'a> {
pub builder: &'a Builder<'a>,
// use a BTreeSet to maintain sort order
paths: BTreeSet<PathSet>,
// If this is a default rule, this is an additional constraint placed on
// its run. Generally something like compiler docs being enabled.
is_really_default: ReallyDefault<'a>,
}
impl<'a> ShouldRun<'a> {
fn new(builder: &'a Builder<'_>) -> ShouldRun<'a> {
ShouldRun {
builder,
paths: BTreeSet::new(),
is_really_default: ReallyDefault::Bool(true), // by default no additional conditions
}
}
pub fn default_condition(mut self, cond: bool) -> Self {
self.is_really_default = ReallyDefault::Bool(cond);
self
}
pub fn lazy_default_condition(mut self, lazy_cond: Box<dyn Fn() -> bool + 'a>) -> Self {
self.is_really_default = ReallyDefault::Lazy(Lazy::new(lazy_cond));
self
}
pub fn is_really_default(&self) -> bool {
match &self.is_really_default {
ReallyDefault::Bool(val) => *val,
ReallyDefault::Lazy(lazy) => *lazy.deref(),
}
}
/// Indicates it should run if the command-line selects the given crate or
/// any of its (local) dependencies.
///
/// Compared to `krate`, this treats the dependencies as aliases for the
/// same job. Generally it is preferred to use `krate`, and treat each
/// individual path separately. For example `./x.py test src/liballoc`
/// (which uses `krate`) will test just `liballoc`. However, `./x.py check
/// src/liballoc` (which uses `all_krates`) will check all of `libtest`.
/// `all_krates` should probably be removed at some point.
pub fn all_krates(mut self, name: &str) -> Self {
let mut set = BTreeSet::new();
for krate in self.builder.in_tree_crates(name, None) {
let path = krate.local_path(self.builder);
set.insert(path);
}
self.paths.insert(PathSet::Set(set));
self
}
/// Indicates it should run if the command-line selects the given crate or
/// any of its (local) dependencies.
///
/// `make_run` will be called separately for each matching command-line path.
pub fn krate(mut self, name: &str) -> Self {
for krate in self.builder.in_tree_crates(name, None) {
let path = krate.local_path(self.builder);
self.paths.insert(PathSet::one(path));
}
self
}
// single, non-aliased path
pub fn path(self, path: &str) -> Self {
self.paths(&[path])
}
// multiple aliases for the same job
pub fn paths(mut self, paths: &[&str]) -> Self {
self.paths.insert(PathSet::Set(paths.iter().map(PathBuf::from).collect()));
self
}
pub fn is_suite_path(&self, path: &Path) -> Option<&PathSet> {
self.paths.iter().find(|pathset| match pathset {
PathSet::Suite(p) => path.starts_with(p),
PathSet::Set(_) => false,
})
}
pub fn suite_path(mut self, suite: &str) -> Self {
self.paths.insert(PathSet::Suite(PathBuf::from(suite)));
self
}
// allows being more explicit about why should_run in Step returns the value passed to it
pub fn never(mut self) -> ShouldRun<'a> {
self.paths.insert(PathSet::empty());
self
}
fn pathset_for_path(&self, path: &Path) -> Option<&PathSet> {
self.paths.iter().find(|pathset| pathset.has(path))
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum Kind {
Build,
Check,
Clippy,
Fix,
Format,
Test,
Bench,
Dist,
Doc,
Install,
Run,
}
impl<'a> Builder<'a> {
fn get_step_descriptions(kind: Kind) -> Vec<StepDescription> {
macro_rules! describe {
($($rule:ty),+ $(,)?) => {{
vec![$(StepDescription::from::<$rule>()),+]
}};
}
match kind {
Kind::Build => describe!(
compile::Std,
compile::Rustc,
compile::CodegenBackend,
compile::StartupObjects,
tool::BuildManifest,
tool::Rustbook,
tool::ErrorIndex,
tool::UnstableBookGen,
tool::Tidy,
tool::Linkchecker,
tool::CargoTest,
tool::Compiletest,
tool::RemoteTestServer,
tool::RemoteTestClient,
tool::RustInstaller,
tool::Cargo,
tool::Rls,
tool::RustAnalyzer,
tool::RustDemangler,
tool::Rustdoc,
tool::Clippy,
tool::CargoClippy,
native::Llvm,
native::Sanitizers,
tool::Rustfmt,
tool::Miri,
tool::CargoMiri,
native::Lld,
native::CrtBeginEnd
),
Kind::Check | Kind::Clippy { .. } | Kind::Fix | Kind::Format => describe!(
check::Std,
check::Rustc,
check::Rustdoc,
check::CodegenBackend,
check::Clippy,
check::Miri,
check::Rls,
check::Rustfmt,
check::Bootstrap
),
Kind::Test => describe!(
crate::toolstate::ToolStateCheck,
test::ExpandYamlAnchors,
test::Tidy,
test::Ui,
test::RunPassValgrind,
test::MirOpt,
test::Codegen,
test::CodegenUnits,
test::Assembly,
test::Incremental,
test::Debuginfo,
test::UiFullDeps,
test::Rustdoc,
test::Pretty,
test::Crate,
test::CrateLibrustc,
test::CrateRustdoc,
test::CrateRustdocJsonTypes,
test::Linkcheck,
test::TierCheck,
test::Cargotest,
test::Cargo,
test::Rls,
test::ErrorIndex,
test::Distcheck,
test::RunMakeFullDeps,
test::Nomicon,
test::Reference,
test::RustdocBook,
test::RustByExample,
test::TheBook,
test::UnstableBook,
test::RustcBook,
test::LintDocs,
test::RustcGuide,
test::EmbeddedBook,
test::EditionGuide,
test::Rustfmt,
test::Miri,
test::Clippy,
test::RustDemangler,
test::CompiletestTest,
test::RustdocJSStd,
test::RustdocJSNotStd,
test::RustdocGUI,
test::RustdocTheme,
test::RustdocUi,
test::RustdocJson,
test::HtmlCheck,
// Run bootstrap close to the end as it's unlikely to fail
test::Bootstrap,
// Run run-make last, since these won't pass without make on Windows
test::RunMake,
),
Kind::Bench => describe!(test::Crate, test::CrateLibrustc),
Kind::Doc => describe!(
doc::UnstableBook,
doc::UnstableBookGen,
doc::TheBook,
doc::Standalone,
doc::Std,
doc::Rustc,
doc::Rustdoc,
doc::Rustfmt,
doc::ErrorIndex,
doc::Nomicon,
doc::Reference,
doc::RustdocBook,
doc::RustByExample,
doc::RustcBook,
doc::CargoBook,
doc::EmbeddedBook,
doc::EditionGuide,
),
Kind::Dist => describe!(
dist::Docs,
dist::RustcDocs,
dist::Mingw,
dist::Rustc,
dist::DebuggerScripts,
dist::Std,
dist::RustcDev,
dist::Analysis,
dist::Src,
dist::PlainSourceTarball,
dist::Cargo,
dist::Rls,
dist::RustAnalyzer,
dist::Rustfmt,
dist::RustDemangler,
dist::Clippy,
dist::Miri,
dist::LlvmTools,
dist::RustDev,
dist::Extended,
dist::BuildManifest,
dist::ReproducibleArtifacts,
),
Kind::Install => describe!(
install::Docs,
install::Std,
install::Cargo,
install::Rls,
install::RustAnalyzer,
install::Rustfmt,
install::RustDemangler,
install::Clippy,
install::Miri,
install::Analysis,
install::Src,
install::Rustc
),
Kind::Run => describe!(run::ExpandYamlAnchors, run::BuildManifest),
}
}
pub fn get_help(build: &Build, subcommand: &str) -> Option<String> {
let kind = match subcommand {
"build" => Kind::Build,
"doc" => Kind::Doc,
"test" => Kind::Test,
"bench" => Kind::Bench,
"dist" => Kind::Dist,
"install" => Kind::Install,
_ => return None,
};
let builder = Self::new_internal(build, kind, vec![]);
let builder = &builder;
let mut should_run = ShouldRun::new(builder);
for desc in Builder::get_step_descriptions(builder.kind) {
should_run = (desc.should_run)(should_run);
}
let mut help = String::from("Available paths:\n");
let mut add_path = |path: &Path| {
help.push_str(&format!(" ./x.py {} {}\n", subcommand, path.display()));
};
for pathset in should_run.paths {
match pathset {
PathSet::Set(set) => {
for path in set {
add_path(&path);
}
}
PathSet::Suite(path) => {
add_path(&path.join("..."));
}
}
}
Some(help)
}
fn new_internal(build: &Build, kind: Kind, paths: Vec<PathBuf>) -> Builder<'_> {
Builder {
build,
top_stage: build.config.stage,
kind,
cache: Cache::new(),
stack: RefCell::new(Vec::new()),
time_spent_on_dependencies: Cell::new(Duration::new(0, 0)),
paths,
}
}
pub fn new(build: &Build) -> Builder<'_> {
let (kind, paths) = match build.config.cmd {
Subcommand::Build { ref paths } => (Kind::Build, &paths[..]),
Subcommand::Check { ref paths, all_targets: _ } => (Kind::Check, &paths[..]),
Subcommand::Clippy { ref paths, .. } => (Kind::Clippy, &paths[..]),
Subcommand::Fix { ref paths } => (Kind::Fix, &paths[..]),
Subcommand::Doc { ref paths, .. } => (Kind::Doc, &paths[..]),
Subcommand::Test { ref paths, .. } => (Kind::Test, &paths[..]),
Subcommand::Bench { ref paths, .. } => (Kind::Bench, &paths[..]),
Subcommand::Dist { ref paths } => (Kind::Dist, &paths[..]),
Subcommand::Install { ref paths } => (Kind::Install, &paths[..]),
Subcommand::Run { ref paths } => (Kind::Run, &paths[..]),
Subcommand::Format { .. } | Subcommand::Clean { .. } | Subcommand::Setup { .. } => {
panic!()
}
};
Self::new_internal(build, kind, paths.to_owned())
}
pub fn execute_cli(&self) {
self.run_step_descriptions(&Builder::get_step_descriptions(self.kind), &self.paths);
}
pub fn default_doc(&self, paths: &[PathBuf]) {
self.run_step_descriptions(&Builder::get_step_descriptions(Kind::Doc), paths);
}
/// NOTE: keep this in sync with `rustdoc::clean::utils::doc_rust_lang_org_channel`, or tests will fail on beta/stable.
pub fn doc_rust_lang_org_channel(&self) -> String {
let channel = match &*self.config.channel {
"stable" => &self.version,
"beta" => "beta",
"nightly" | "dev" => "nightly",
// custom build of rustdoc maybe? link to the latest stable docs just in case
_ => "stable",
};
"https://doc.rust-lang.org/".to_owned() + channel
}
fn run_step_descriptions(&self, v: &[StepDescription], paths: &[PathBuf]) {
StepDescription::run(v, self, paths);
}
/// Obtain a compiler at a given stage and for a given host. Explicitly does
/// not take `Compiler` since all `Compiler` instances are meant to be
/// obtained through this function, since it ensures that they are valid
/// (i.e., built and assembled).
pub fn compiler(&self, stage: u32, host: TargetSelection) -> Compiler {
self.ensure(compile::Assemble { target_compiler: Compiler { stage, host } })
}
/// Similar to `compiler`, except handles the full-bootstrap option to
/// silently use the stage1 compiler instead of a stage2 compiler if one is
/// requested.
///
/// Note that this does *not* have the side effect of creating
/// `compiler(stage, host)`, unlike `compiler` above which does have such
/// a side effect. The returned compiler here can only be used to compile
/// new artifacts, it can't be used to rely on the presence of a particular
/// sysroot.
///
/// See `force_use_stage1` for documentation on what each argument is.
pub fn compiler_for(
&self,
stage: u32,
host: TargetSelection,
target: TargetSelection,
) -> Compiler {
if self.build.force_use_stage1(Compiler { stage, host }, target) {
self.compiler(1, self.config.build)
} else {
self.compiler(stage, host)
}
}
pub fn sysroot(&self, compiler: Compiler) -> Interned<PathBuf> {
self.ensure(compile::Sysroot { compiler })
}
/// Returns the libdir where the standard library and other artifacts are
/// found for a compiler's sysroot.
pub fn sysroot_libdir(&self, compiler: Compiler, target: TargetSelection) -> Interned<PathBuf> {
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
struct Libdir {
compiler: Compiler,
target: TargetSelection,
}
impl Step for Libdir {
type Output = Interned<PathBuf>;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.never()
}
fn run(self, builder: &Builder<'_>) -> Interned<PathBuf> {
let lib = builder.sysroot_libdir_relative(self.compiler);
let sysroot = builder
.sysroot(self.compiler)
.join(lib)
.join("rustlib")
.join(self.target.triple)
.join("lib");
// Avoid deleting the rustlib/ directory we just copied
// (in `impl Step for Sysroot`).
if !builder.config.download_rustc {
let _ = fs::remove_dir_all(&sysroot);
t!(fs::create_dir_all(&sysroot));
}
INTERNER.intern_path(sysroot)
}
}
self.ensure(Libdir { compiler, target })
}
pub fn sysroot_codegen_backends(&self, compiler: Compiler) -> PathBuf {
self.sysroot_libdir(compiler, compiler.host).with_file_name("codegen-backends")
}
/// Returns the compiler's libdir where it stores the dynamic libraries that
/// it itself links against.
///
/// For example this returns `<sysroot>/lib` on Unix and `<sysroot>/bin` on
/// Windows.
pub fn rustc_libdir(&self, compiler: Compiler) -> PathBuf {
if compiler.is_snapshot(self) {
self.rustc_snapshot_libdir()
} else {
match self.config.libdir_relative() {
Some(relative_libdir) if compiler.stage >= 1 => {
self.sysroot(compiler).join(relative_libdir)
}
_ => self.sysroot(compiler).join(libdir(compiler.host)),
}
}
}
/// Returns the compiler's relative libdir where it stores the dynamic libraries that
/// it itself links against.
///
/// For example this returns `lib` on Unix and `bin` on
/// Windows.
pub fn libdir_relative(&self, compiler: Compiler) -> &Path {
if compiler.is_snapshot(self) {
libdir(self.config.build).as_ref()
} else {
match self.config.libdir_relative() {
Some(relative_libdir) if compiler.stage >= 1 => relative_libdir,
_ => libdir(compiler.host).as_ref(),
}
}
}
/// Returns the compiler's relative libdir where the standard library and other artifacts are
/// found for a compiler's sysroot.
///
/// For example this returns `lib` on Unix and Windows.
pub fn sysroot_libdir_relative(&self, compiler: Compiler) -> &Path {
match self.config.libdir_relative() {
Some(relative_libdir) if compiler.stage >= 1 => relative_libdir,
_ if compiler.stage == 0 => &self.build.initial_libdir,
_ => Path::new("lib"),
}
}
/// Adds the compiler's directory of dynamic libraries to `cmd`'s dynamic
/// library lookup path.
pub fn add_rustc_lib_path(&self, compiler: Compiler, cmd: &mut Command) {
// Windows doesn't need dylib path munging because the dlls for the
// compiler live next to the compiler and the system will find them
// automatically.
if cfg!(windows) {
return;
}
let mut dylib_dirs = vec![self.rustc_libdir(compiler)];
// Ensure that the downloaded LLVM libraries can be found.
if self.config.llvm_from_ci {
let ci_llvm_lib = self.out.join(&*compiler.host.triple).join("ci-llvm").join("lib");
dylib_dirs.push(ci_llvm_lib);
}
add_dylib_path(dylib_dirs, cmd);
}
/// Gets a path to the compiler specified.
pub fn rustc(&self, compiler: Compiler) -> PathBuf {
if compiler.is_snapshot(self) {
self.initial_rustc.clone()
} else {
self.sysroot(compiler).join("bin").join(exe("rustc", compiler.host))
}
}
/// Gets the paths to all of the compiler's codegen backends.
fn codegen_backends(&self, compiler: Compiler) -> impl Iterator<Item = PathBuf> {
fs::read_dir(self.sysroot_codegen_backends(compiler))
.into_iter()
.flatten()
.filter_map(Result::ok)
.map(|entry| entry.path())
}
pub fn rustdoc(&self, compiler: Compiler) -> PathBuf {
self.ensure(tool::Rustdoc { compiler })
}
pub fn rustdoc_cmd(&self, compiler: Compiler) -> Command {
let mut cmd = Command::new(&self.out.join("bootstrap/debug/rustdoc"));
cmd.env("RUSTC_STAGE", compiler.stage.to_string())
.env("RUSTC_SYSROOT", self.sysroot(compiler))
// Note that this is *not* the sysroot_libdir because rustdoc must be linked
// equivalently to rustc.
.env("RUSTDOC_LIBDIR", self.rustc_libdir(compiler))
.env("CFG_RELEASE_CHANNEL", &self.config.channel)
.env("RUSTDOC_REAL", self.rustdoc(compiler))
.env("RUSTC_BOOTSTRAP", "1");
cmd.arg("-Wrustdoc::invalid_codeblock_attributes");
if self.config.deny_warnings {
cmd.arg("-Dwarnings");
}
cmd.arg("-Znormalize-docs");
// Remove make-related flags that can cause jobserver problems.
cmd.env_remove("MAKEFLAGS");
cmd.env_remove("MFLAGS");
if let Some(linker) = self.linker(compiler.host) {
cmd.env("RUSTDOC_LINKER", linker);
}
if self.is_fuse_ld_lld(compiler.host) {
cmd.env("RUSTDOC_FUSE_LD_LLD", "1");
}
cmd
}
/// Return the path to `llvm-config` for the target, if it exists.
///
/// Note that this returns `None` if LLVM is disabled, or if we're in a
/// check build or dry-run, where there's no need to build all of LLVM.
fn llvm_config(&self, target: TargetSelection) -> Option<PathBuf> {
if self.config.llvm_enabled() && self.kind != Kind::Check && !self.config.dry_run {
let llvm_config = self.ensure(native::Llvm { target });
if llvm_config.is_file() {
return Some(llvm_config);
}
}
None
}
/// Prepares an invocation of `cargo` to be run.
///
/// This will create a `Command` that represents a pending execution of
/// Cargo. This cargo will be configured to use `compiler` as the actual
/// rustc compiler, its output will be scoped by `mode`'s output directory,
/// it will pass the `--target` flag for the specified `target`, and will be
/// executing the Cargo command `cmd`.
pub fn cargo(
&self,
compiler: Compiler,
mode: Mode,
source_type: SourceType,
target: TargetSelection,
cmd: &str,
) -> Cargo {
let mut cargo = Command::new(&self.initial_cargo);
let out_dir = self.stage_out(compiler, mode);
// Codegen backends are not yet tracked by -Zbinary-dep-depinfo,
// so we need to explicitly clear out if they've been updated.
for backend in self.codegen_backends(compiler) {
self.clear_if_dirty(&out_dir, &backend);
}
if cmd == "doc" || cmd == "rustdoc" {
let my_out = match mode {
// This is the intended out directory for compiler documentation.
Mode::Rustc | Mode::ToolRustc => self.compiler_doc_out(target),
Mode::Std => out_dir.join(target.triple).join("doc"),
_ => panic!("doc mode {:?} not expected", mode),
};
let rustdoc = self.rustdoc(compiler);
self.clear_if_dirty(&my_out, &rustdoc);
}
cargo.env("CARGO_TARGET_DIR", &out_dir).arg(cmd);
let profile_var = |name: &str| {
let profile = if self.config.rust_optimize { "RELEASE" } else { "DEV" };
format!("CARGO_PROFILE_{}_{}", profile, name)
};
// See comment in rustc_llvm/build.rs for why this is necessary, largely llvm-config
// needs to not accidentally link to libLLVM in stage0/lib.
cargo.env("REAL_LIBRARY_PATH_VAR", &util::dylib_path_var());
if let Some(e) = env::var_os(util::dylib_path_var()) {
cargo.env("REAL_LIBRARY_PATH", e);
}
// Found with `rg "init_env_logger\("`. If anyone uses `init_env_logger`
// from out of tree it shouldn't matter, since x.py is only used for
// building in-tree.
let color_logs = ["RUSTDOC_LOG_COLOR", "RUSTC_LOG_COLOR", "RUST_LOG_COLOR"];
match self.build.config.color {
Color::Always => {
cargo.arg("--color=always");
for log in &color_logs {
cargo.env(log, "always");
}
}
Color::Never => {
cargo.arg("--color=never");
for log in &color_logs {
cargo.env(log, "never");
}
}
Color::Auto => {} // nothing to do
}
if cmd != "install" {
cargo.arg("--target").arg(target.rustc_target_arg());
} else {
assert_eq!(target, compiler.host);
}
// Set a flag for `check`/`clippy`/`fix`, so that certain build
// scripts can do less work (i.e. not building/requiring LLVM).
if cmd == "check" || cmd == "clippy" || cmd == "fix" {
// If we've not yet built LLVM, or it's stale, then bust
// the rustc_llvm cache. That will always work, even though it
// may mean that on the next non-check build we'll need to rebuild
// rustc_llvm. But if LLVM is stale, that'll be a tiny amount
// of work comparitively, and we'd likely need to rebuild it anyway,
// so that's okay.
if crate::native::prebuilt_llvm_config(self, target).is_err() {
cargo.env("RUST_CHECK", "1");
}
}
let stage = if compiler.stage == 0 && self.local_rebuild {
// Assume the local-rebuild rustc already has stage1 features.
1
} else {
compiler.stage
};
let mut rustflags = Rustflags::new(target);
if stage != 0 {
if let Ok(s) = env::var("CARGOFLAGS_NOT_BOOTSTRAP") {
cargo.args(s.split_whitespace());
}
rustflags.env("RUSTFLAGS_NOT_BOOTSTRAP");
} else {
if let Ok(s) = env::var("CARGOFLAGS_BOOTSTRAP") {
cargo.args(s.split_whitespace());
}
rustflags.env("RUSTFLAGS_BOOTSTRAP");
if cmd == "clippy" {
// clippy overwrites sysroot if we pass it to cargo.
// Pass it directly to clippy instead.
// NOTE: this can't be fixed in clippy because we explicitly don't set `RUSTC`,
// so it has no way of knowing the sysroot.
rustflags.arg("--sysroot");
rustflags.arg(
self.sysroot(compiler)
.as_os_str()
.to_str()
.expect("sysroot must be valid UTF-8"),
);
// Only run clippy on a very limited subset of crates (in particular, not build scripts).
cargo.arg("-Zunstable-options");
// Explicitly does *not* set `--cfg=bootstrap`, since we're using a nightly clippy.
let host_version = Command::new("rustc").arg("--version").output().map_err(|_| ());
let output = host_version.and_then(|output| {
if output.status.success() {
Ok(output)
} else {
Err(())
}
}).unwrap_or_else(|_| {
eprintln!(
"error: `x.py clippy` requires a host `rustc` toolchain with the `clippy` component"
);
eprintln!("help: try `rustup component add clippy`");
std::process::exit(1);
});
if !t!(std::str::from_utf8(&output.stdout)).contains("nightly") {
rustflags.arg("--cfg=bootstrap");
}
} else {
rustflags.arg("--cfg=bootstrap");
}
}
if self.config.rust_new_symbol_mangling {
rustflags.arg("-Zsymbol-mangling-version=v0");
}
// FIXME: It might be better to use the same value for both `RUSTFLAGS` and `RUSTDOCFLAGS`,
// but this breaks CI. At the very least, stage0 `rustdoc` needs `--cfg bootstrap`. See
// #71458.
let mut rustdocflags = rustflags.clone();
rustdocflags.propagate_cargo_env("RUSTDOCFLAGS");
if stage == 0 {
rustdocflags.env("RUSTDOCFLAGS_BOOTSTRAP");
} else {
rustdocflags.env("RUSTDOCFLAGS_NOT_BOOTSTRAP");
}
if let Ok(s) = env::var("CARGOFLAGS") {
cargo.args(s.split_whitespace());
}
match mode {
Mode::Std | Mode::ToolBootstrap | Mode::ToolStd => {}
Mode::Rustc | Mode::Codegen | Mode::ToolRustc => {
// Build proc macros both for the host and the target
if target != compiler.host && cmd != "check" {
cargo.arg("-Zdual-proc-macros");
rustflags.arg("-Zdual-proc-macros");
}
}
}
// This tells Cargo (and in turn, rustc) to output more complete
// dependency information. Most importantly for rustbuild, this
// includes sysroot artifacts, like libstd, which means that we don't
// need to track those in rustbuild (an error prone process!). This
// feature is currently unstable as there may be some bugs and such, but
// it represents a big improvement in rustbuild's reliability on
// rebuilds, so we're using it here.
//
// For some additional context, see #63470 (the PR originally adding
// this), as well as #63012 which is the tracking issue for this
// feature on the rustc side.
cargo.arg("-Zbinary-dep-depinfo");
cargo.arg("-j").arg(self.jobs().to_string());
// Remove make-related flags to ensure Cargo can correctly set things up
cargo.env_remove("MAKEFLAGS");
cargo.env_remove("MFLAGS");
// FIXME: Temporary fix for https://github.com/rust-lang/cargo/issues/3005
// Force cargo to output binaries with disambiguating hashes in the name
let mut metadata = if compiler.stage == 0 {
// Treat stage0 like a special channel, whether it's a normal prior-
// release rustc or a local rebuild with the same version, so we
// never mix these libraries by accident.
"bootstrap".to_string()
} else {
self.config.channel.to_string()
};
// We want to make sure that none of the dependencies between
// std/test/rustc unify with one another. This is done for weird linkage
// reasons but the gist of the problem is that if librustc, libtest, and
// libstd all depend on libc from crates.io (which they actually do) we
// want to make sure they all get distinct versions. Things get really
// weird if we try to unify all these dependencies right now, namely
// around how many times the library is linked in dynamic libraries and
// such. If rustc were a static executable or if we didn't ship dylibs
// this wouldn't be a problem, but we do, so it is. This is in general
// just here to make sure things build right. If you can remove this and
// things still build right, please do!
match mode {
Mode::Std => metadata.push_str("std"),
// When we're building rustc tools, they're built with a search path
// that contains things built during the rustc build. For example,
// bitflags is built during the rustc build, and is a dependency of
// rustdoc as well. We're building rustdoc in a different target
// directory, though, which means that Cargo will rebuild the
// dependency. When we go on to build rustdoc, we'll look for
// bitflags, and find two different copies: one built during the
// rustc step and one that we just built. This isn't always a
// problem, somehow -- not really clear why -- but we know that this
// fixes things.
Mode::ToolRustc => metadata.push_str("tool-rustc"),
// Same for codegen backends.
Mode::Codegen => metadata.push_str("codegen"),
_ => {}
}
cargo.env("__CARGO_DEFAULT_LIB_METADATA", &metadata);
if cmd == "clippy" {
rustflags.arg("-Zforce-unstable-if-unmarked");
}
rustflags.arg("-Zmacro-backtrace");
let want_rustdoc = self.doc_tests != DocTests::No;
// We synthetically interpret a stage0 compiler used to build tools as a
// "raw" compiler in that it's the exact snapshot we download. Normally
// the stage0 build means it uses libraries build by the stage0
// compiler, but for tools we just use the precompiled libraries that
// we've downloaded
let use_snapshot = mode == Mode::ToolBootstrap;
assert!(!use_snapshot || stage == 0 || self.local_rebuild);
let maybe_sysroot = self.sysroot(compiler);
let sysroot = if use_snapshot { self.rustc_snapshot_sysroot() } else { &maybe_sysroot };
let libdir = self.rustc_libdir(compiler);
// Clear the output directory if the real rustc we're using has changed;
// Cargo cannot detect this as it thinks rustc is bootstrap/debug/rustc.
//
// Avoid doing this during dry run as that usually means the relevant
// compiler is not yet linked/copied properly.
//
// Only clear out the directory if we're compiling std; otherwise, we
// should let Cargo take care of things for us (via depdep info)
if !self.config.dry_run && mode == Mode::Std && cmd == "build" {
self.clear_if_dirty(&out_dir, &self.rustc(compiler));
}
// Customize the compiler we're running. Specify the compiler to cargo
// as our shim and then pass it some various options used to configure
// how the actual compiler itself is called.
//
// These variables are primarily all read by
// src/bootstrap/bin/{rustc.rs,rustdoc.rs}
cargo
.env("RUSTBUILD_NATIVE_DIR", self.native_dir(target))
.env("RUSTC_REAL", self.rustc(compiler))
.env("RUSTC_STAGE", stage.to_string())
.env("RUSTC_SYSROOT", &sysroot)
.env("RUSTC_LIBDIR", &libdir)
.env("RUSTDOC", self.out.join("bootstrap/debug/rustdoc"))
.env(
"RUSTDOC_REAL",
if cmd == "doc" || cmd == "rustdoc" || (cmd == "test" && want_rustdoc) {
self.rustdoc(compiler)
} else {
PathBuf::from("/path/to/nowhere/rustdoc/not/required")
},
)
.env("RUSTC_ERROR_METADATA_DST", self.extended_error_dir())
.env("RUSTC_BREAK_ON_ICE", "1");
// Clippy support is a hack and uses the default `cargo-clippy` in path.
// Don't override RUSTC so that the `cargo-clippy` in path will be run.
if cmd != "clippy" {
cargo.env("RUSTC", self.out.join("bootstrap/debug/rustc"));
}
// Dealing with rpath here is a little special, so let's go into some
// detail. First off, `-rpath` is a linker option on Unix platforms
// which adds to the runtime dynamic loader path when looking for
// dynamic libraries. We use this by default on Unix platforms to ensure
// that our nightlies behave the same on Windows, that is they work out
// of the box. This can be disabled, of course, but basically that's why
// we're gated on RUSTC_RPATH here.
//
// Ok, so the astute might be wondering "why isn't `-C rpath` used
// here?" and that is indeed a good question to ask. This codegen
// option is the compiler's current interface to generating an rpath.
// Unfortunately it doesn't quite suffice for us. The flag currently
// takes no value as an argument, so the compiler calculates what it
// should pass to the linker as `-rpath`. This unfortunately is based on
// the **compile time** directory structure which when building with
// Cargo will be very different than the runtime directory structure.
//
// All that's a really long winded way of saying that if we use
// `-Crpath` then the executables generated have the wrong rpath of
// something like `$ORIGIN/deps` when in fact the way we distribute
// rustc requires the rpath to be `$ORIGIN/../lib`.
//
// So, all in all, to set up the correct rpath we pass the linker
// argument manually via `-C link-args=-Wl,-rpath,...`. Plus isn't it
// fun to pass a flag to a tool to pass a flag to pass a flag to a tool
// to change a flag in a binary?
if self.config.rust_rpath && util::use_host_linker(target) {
let rpath = if target.contains("apple") {
// Note that we need to take one extra step on macOS to also pass
// `-Wl,-instal_name,@rpath/...` to get things to work right. To
// do that we pass a weird flag to the compiler to get it to do
// so. Note that this is definitely a hack, and we should likely
// flesh out rpath support more fully in the future.
rustflags.arg("-Zosx-rpath-install-name");
Some("-Wl,-rpath,@loader_path/../lib")
} else if !target.contains("windows") {
Some("-Wl,-rpath,$ORIGIN/../lib")
} else {
None
};
if let Some(rpath) = rpath {
rustflags.arg(&format!("-Clink-args={}", rpath));
}
}
if let Some(host_linker) = self.linker(compiler.host) {
cargo.env("RUSTC_HOST_LINKER", host_linker);
}
if self.is_fuse_ld_lld(compiler.host) {
cargo.env("RUSTC_HOST_FUSE_LD_LLD", "1");
cargo.env("RUSTDOC_FUSE_LD_LLD", "1");
}
if let Some(target_linker) = self.linker(target) {
let target = crate::envify(&target.triple);
cargo.env(&format!("CARGO_TARGET_{}_LINKER", target), target_linker);
}
if self.is_fuse_ld_lld(target) {
rustflags.arg("-Clink-args=-fuse-ld=lld");
}
self.lld_flags(target).for_each(|flag| {
rustdocflags.arg(&flag);
});
if !(["build", "check", "clippy", "fix", "rustc"].contains(&cmd)) && want_rustdoc {
cargo.env("RUSTDOC_LIBDIR", self.rustc_libdir(compiler));
}
let debuginfo_level = match mode {
Mode::Rustc | Mode::Codegen => self.config.rust_debuginfo_level_rustc,
Mode::Std => self.config.rust_debuginfo_level_std,
Mode::ToolBootstrap | Mode::ToolStd | Mode::ToolRustc => {
self.config.rust_debuginfo_level_tools
}
};
cargo.env(profile_var("DEBUG"), debuginfo_level.to_string());
cargo.env(
profile_var("DEBUG_ASSERTIONS"),
if mode == Mode::Std {
self.config.rust_debug_assertions_std.to_string()
} else {
self.config.rust_debug_assertions.to_string()
},
);
// `dsymutil` adds time to builds on Apple platforms for no clear benefit, and also makes
// it more difficult for debuggers to find debug info. The compiler currently defaults to
// running `dsymutil` to preserve its historical default, but when compiling the compiler
// itself, we skip it by default since we know it's safe to do so in that case.
// See https://github.com/rust-lang/rust/issues/79361 for more info on this flag.
if target.contains("apple") {
if self.config.rust_run_dsymutil {
rustflags.arg("-Csplit-debuginfo=packed");
} else {
rustflags.arg("-Csplit-debuginfo=unpacked");
}
}
if self.config.cmd.bless() {
// Bless `expect!` tests.
cargo.env("UPDATE_EXPECT", "1");
}
if !mode.is_tool() {
cargo.env("RUSTC_FORCE_UNSTABLE", "1");
}
if let Some(x) = self.crt_static(target) {
if x {
rustflags.arg("-Ctarget-feature=+crt-static");
} else {
rustflags.arg("-Ctarget-feature=-crt-static");
}
}
if let Some(x) = self.crt_static(compiler.host) {
cargo.env("RUSTC_HOST_CRT_STATIC", x.to_string());
}
if let Some(map_to) = self.build.debuginfo_map_to(GitRepo::Rustc) {
let map = format!("{}={}", self.build.src.display(), map_to);
cargo.env("RUSTC_DEBUGINFO_MAP", map);
// `rustc` needs to know the virtual `/rustc/$hash` we're mapping to,
// in order to opportunistically reverse it later.
cargo.env("CFG_VIRTUAL_RUST_SOURCE_BASE_DIR", map_to);
}
// Enable usage of unstable features
cargo.env("RUSTC_BOOTSTRAP", "1");
self.add_rust_test_threads(&mut cargo);
// Almost all of the crates that we compile as part of the bootstrap may
// have a build script, including the standard library. To compile a
// build script, however, it itself needs a standard library! This
// introduces a bit of a pickle when we're compiling the standard
// library itself.
//
// To work around this we actually end up using the snapshot compiler
// (stage0) for compiling build scripts of the standard library itself.
// The stage0 compiler is guaranteed to have a libstd available for use.
//
// For other crates, however, we know that we've already got a standard
// library up and running, so we can use the normal compiler to compile
// build scripts in that situation.
if mode == Mode::Std {
cargo
.env("RUSTC_SNAPSHOT", &self.initial_rustc)
.env("RUSTC_SNAPSHOT_LIBDIR", self.rustc_snapshot_libdir());
} else {
cargo
.env("RUSTC_SNAPSHOT", self.rustc(compiler))
.env("RUSTC_SNAPSHOT_LIBDIR", self.rustc_libdir(compiler));
}
// Tools that use compiler libraries may inherit the `-lLLVM` link
// requirement, but the `-L` library path is not propagated across
// separate Cargo projects. We can add LLVM's library path to the
// platform-specific environment variable as a workaround.
if mode == Mode::ToolRustc {
if let Some(llvm_config) = self.llvm_config(target) {
let llvm_libdir = output(Command::new(&llvm_config).arg("--libdir"));
add_link_lib_path(vec![llvm_libdir.trim().into()], &mut cargo);
}
}
// Compile everything except libraries and proc macros with the more
// efficient initial-exec TLS model. This doesn't work with `dlopen`,
// so we can't use it by default in general, but we can use it for tools
// and our own internal libraries.
if !mode.must_support_dlopen() {
rustflags.arg("-Ztls-model=initial-exec");
}
if self.config.incremental {
cargo.env("CARGO_INCREMENTAL", "1");
} else {
// Don't rely on any default setting for incr. comp. in Cargo
cargo.env("CARGO_INCREMENTAL", "0");
}
if let Some(ref on_fail) = self.config.on_fail {
cargo.env("RUSTC_ON_FAIL", on_fail);
}
if self.config.print_step_timings {
cargo.env("RUSTC_PRINT_STEP_TIMINGS", "1");
}
if self.config.print_step_rusage {
cargo.env("RUSTC_PRINT_STEP_RUSAGE", "1");
}
if self.config.backtrace_on_ice {
cargo.env("RUSTC_BACKTRACE_ON_ICE", "1");
}
cargo.env("RUSTC_VERBOSE", self.verbosity.to_string());
if source_type == SourceType::InTree {
let mut lint_flags = Vec::new();
// When extending this list, add the new lints to the RUSTFLAGS of the
// build_bootstrap function of src/bootstrap/bootstrap.py as well as
// some code doesn't go through this `rustc` wrapper.
lint_flags.push("-Wrust_2018_idioms");
lint_flags.push("-Wunused_lifetimes");
lint_flags.push("-Wsemicolon_in_expressions_from_macros");
if self.config.deny_warnings {
lint_flags.push("-Dwarnings");
rustdocflags.arg("-Dwarnings");
}
// FIXME(#58633) hide "unused attribute" errors in incremental
// builds of the standard library, as the underlying checks are
// not yet properly integrated with incremental recompilation.
if mode == Mode::Std && compiler.stage == 0 && self.config.incremental {
lint_flags.push("-Aunused-attributes");
}
// This does not use RUSTFLAGS due to caching issues with Cargo.
// Clippy is treated as an "in tree" tool, but shares the same
// cache as other "submodule" tools. With these options set in
// RUSTFLAGS, that causes *every* shared dependency to be rebuilt.
// By injecting this into the rustc wrapper, this circumvents
// Cargo's fingerprint detection. This is fine because lint flags
// are always ignored in dependencies. Eventually this should be
// fixed via better support from Cargo.
cargo.env("RUSTC_LINT_FLAGS", lint_flags.join(" "));
rustdocflags.arg("-Wrustdoc::invalid_codeblock_attributes");
}
if mode == Mode::Rustc {
rustflags.arg("-Zunstable-options");
rustflags.arg("-Wrustc::internal");
}
// Throughout the build Cargo can execute a number of build scripts
// compiling C/C++ code and we need to pass compilers, archivers, flags, etc
// obtained previously to those build scripts.
// Build scripts use either the `cc` crate or `configure/make` so we pass
// the options through environment variables that are fetched and understood by both.
//
// FIXME: the guard against msvc shouldn't need to be here
if target.contains("msvc") {
if let Some(ref cl) = self.config.llvm_clang_cl {
cargo.env("CC", cl).env("CXX", cl);
}
} else {
let ccache = self.config.ccache.as_ref();
let ccacheify = |s: &Path| {
let ccache = match ccache {
Some(ref s) => s,
None => return s.display().to_string(),
};
// FIXME: the cc-rs crate only recognizes the literal strings
// `ccache` and `sccache` when doing caching compilations, so we
// mirror that here. It should probably be fixed upstream to
// accept a new env var or otherwise work with custom ccache
// vars.
match &ccache[..] {
"ccache" | "sccache" => format!("{} {}", ccache, s.display()),
_ => s.display().to_string(),
}
};
let cc = ccacheify(&self.cc(target));
cargo.env(format!("CC_{}", target.triple), &cc);
let cflags = self.cflags(target, GitRepo::Rustc).join(" ");
cargo.env(format!("CFLAGS_{}", target.triple), &cflags);
if let Some(ar) = self.ar(target) {
let ranlib = format!("{} s", ar.display());
cargo
.env(format!("AR_{}", target.triple), ar)
.env(format!("RANLIB_{}", target.triple), ranlib);
}
if let Ok(cxx) = self.cxx(target) {
let cxx = ccacheify(&cxx);
cargo
.env(format!("CXX_{}", target.triple), &cxx)
.env(format!("CXXFLAGS_{}", target.triple), cflags);
}
}
if mode == Mode::Std && self.config.extended && compiler.is_final_stage(self) {
rustflags.arg("-Zsave-analysis");
cargo.env(
"RUST_SAVE_ANALYSIS_CONFIG",
"{\"output_file\": null,\"full_docs\": false,\
\"pub_only\": true,\"reachable_only\": false,\
\"distro_crate\": true,\"signatures\": false,\"borrow_data\": false}",
);
}
// If Control Flow Guard is enabled, pass the `control-flow-guard` flag to rustc
// when compiling the standard library, since this might be linked into the final outputs
// produced by rustc. Since this mitigation is only available on Windows, only enable it
// for the standard library in case the compiler is run on a non-Windows platform.
// This is not needed for stage 0 artifacts because these will only be used for building
// the stage 1 compiler.
if cfg!(windows)
&& mode == Mode::Std
&& self.config.control_flow_guard
&& compiler.stage >= 1
{
rustflags.arg("-Ccontrol-flow-guard");
}
// For `cargo doc` invocations, make rustdoc print the Rust version into the docs
// This replaces spaces with newlines because RUSTDOCFLAGS does not
// support arguments with regular spaces. Hopefully someday Cargo will
// have space support.
let rust_version = self.rust_version().replace(' ', "\n");
rustdocflags.arg("--crate-version").arg(&rust_version);
// Environment variables *required* throughout the build
//
// FIXME: should update code to not require this env var
cargo.env("CFG_COMPILER_HOST_TRIPLE", target.triple);
// Set this for all builds to make sure doc builds also get it.
cargo.env("CFG_RELEASE_CHANNEL", &self.config.channel);
// This one's a bit tricky. As of the time of this writing the compiler
// links to the `winapi` crate on crates.io. This crate provides raw
// bindings to Windows system functions, sort of like libc does for
// Unix. This crate also, however, provides "import libraries" for the
// MinGW targets. There's an import library per dll in the windows
// distribution which is what's linked to. These custom import libraries
// are used because the winapi crate can reference Windows functions not
// present in the MinGW import libraries.
//
// For example MinGW may ship libdbghelp.a, but it may not have
// references to all the functions in the dbghelp dll. Instead the
// custom import library for dbghelp in the winapi crates has all this
// information.
//
// Unfortunately for us though the import libraries are linked by
// default via `-ldylib=winapi_foo`. That is, they're linked with the
// `dylib` type with a `winapi_` prefix (so the winapi ones don't
// conflict with the system MinGW ones). This consequently means that
// the binaries we ship of things like rustc_codegen_llvm (aka the rustc_codegen_llvm
// DLL) when linked against *again*, for example with procedural macros
// or plugins, will trigger the propagation logic of `-ldylib`, passing
// `-lwinapi_foo` to the linker again. This isn't actually available in
// our distribution, however, so the link fails.
//
// To solve this problem we tell winapi to not use its bundled import
// libraries. This means that it will link to the system MinGW import
// libraries by default, and the `-ldylib=foo` directives will still get
// passed to the final linker, but they'll look like `-lfoo` which can
// be resolved because MinGW has the import library. The downside is we
// don't get newer functions from Windows, but we don't use any of them
// anyway.
if !mode.is_tool() {
cargo.env("WINAPI_NO_BUNDLED_LIBRARIES", "1");
}
for _ in 1..self.verbosity {
cargo.arg("-v");
}
match (mode, self.config.rust_codegen_units_std, self.config.rust_codegen_units) {
(Mode::Std, Some(n), _) | (_, _, Some(n)) => {
cargo.env(profile_var("CODEGEN_UNITS"), n.to_string());
}
_ => {
// Don't set anything
}
}
if self.config.rust_optimize {
// FIXME: cargo bench/install do not accept `--release`
if cmd != "bench" && cmd != "install" {
cargo.arg("--release");
}
}
if self.config.locked_deps {
cargo.arg("--locked");
}
if self.config.vendor || self.is_sudo {
cargo.arg("--frozen");
}
// Try to use a sysroot-relative bindir, in case it was configured absolutely.
cargo.env("RUSTC_INSTALL_BINDIR", self.config.bindir_relative());
self.ci_env.force_coloring_in_ci(&mut cargo);
// When we build Rust dylibs they're all intended for intermediate
// usage, so make sure we pass the -Cprefer-dynamic flag instead of
// linking all deps statically into the dylib.
if matches!(mode, Mode::Std | Mode::Rustc) {
rustflags.arg("-Cprefer-dynamic");
}
// When building incrementally we default to a lower ThinLTO import limit
// (unless explicitly specified otherwise). This will produce a somewhat
// slower code but give way better compile times.
{
let limit = match self.config.rust_thin_lto_import_instr_limit {
Some(limit) => Some(limit),
None if self.config.incremental => Some(10),
_ => None,
};
if let Some(limit) = limit {
rustflags.arg(&format!("-Cllvm-args=-import-instr-limit={}", limit));
}
}
Cargo { command: cargo, rustflags, rustdocflags }
}
/// Ensure that a given step is built, returning its output. This will
/// cache the step, so it is safe (and good!) to call this as often as
/// needed to ensure that all dependencies are built.
pub fn ensure<S: Step>(&'a self, step: S) -> S::Output {
{
let mut stack = self.stack.borrow_mut();
for stack_step in stack.iter() {
// should skip
if stack_step.downcast_ref::<S>().map_or(true, |stack_step| *stack_step != step) {
continue;
}
let mut out = String::new();
out += &format!("\n\nCycle in build detected when adding {:?}\n", step);
for el in stack.iter().rev() {
out += &format!("\t{:?}\n", el);
}
panic!("{}", out);
}
if let Some(out) = self.cache.get(&step) {
self.verbose(&format!("{}c {:?}", " ".repeat(stack.len()), step));
return out;
}
self.verbose(&format!("{}> {:?}", " ".repeat(stack.len()), step));
stack.push(Box::new(step.clone()));
}
let (out, dur) = {
let start = Instant::now();
let zero = Duration::new(0, 0);
let parent = self.time_spent_on_dependencies.replace(zero);
let out = step.clone().run(self);
let dur = start.elapsed();
let deps = self.time_spent_on_dependencies.replace(parent + dur);
(out, dur - deps)
};
if self.config.print_step_timings && !self.config.dry_run {
println!("[TIMING] {:?} -- {}.{:03}", step, dur.as_secs(), dur.subsec_millis());
}
{
let mut stack = self.stack.borrow_mut();
let cur_step = stack.pop().expect("step stack empty");
assert_eq!(cur_step.downcast_ref(), Some(&step));
}
self.verbose(&format!("{}< {:?}", " ".repeat(self.stack.borrow().len()), step));
self.cache.put(step, out.clone());
out
}
}
#[cfg(test)]
mod tests;
#[derive(Debug, Clone)]
struct Rustflags(String, TargetSelection);
impl Rustflags {
fn new(target: TargetSelection) -> Rustflags {
let mut ret = Rustflags(String::new(), target);
ret.propagate_cargo_env("RUSTFLAGS");
ret
}
/// By default, cargo will pick up on various variables in the environment. However, bootstrap
/// reuses those variables to pass additional flags to rustdoc, so by default they get overriden.
/// Explicitly add back any previous value in the environment.
///
/// `prefix` is usually `RUSTFLAGS` or `RUSTDOCFLAGS`.
fn propagate_cargo_env(&mut self, prefix: &str) {
// Inherit `RUSTFLAGS` by default ...
self.env(prefix);
// ... and also handle target-specific env RUSTFLAGS if they're configured.
let target_specific = format!("CARGO_TARGET_{}_{}", crate::envify(&self.1.triple), prefix);
self.env(&target_specific);
}
fn env(&mut self, env: &str) {
if let Ok(s) = env::var(env) {
for part in s.split(' ') {
self.arg(part);
}
}
}
fn arg(&mut self, arg: &str) -> &mut Self {
assert_eq!(arg.split(' ').count(), 1);
if !self.0.is_empty() {
self.0.push(' ');
}
self.0.push_str(arg);
self
}
}
#[derive(Debug)]
pub struct Cargo {
command: Command,
rustflags: Rustflags,
rustdocflags: Rustflags,
}
impl Cargo {
pub fn rustdocflag(&mut self, arg: &str) -> &mut Cargo {
self.rustdocflags.arg(arg);
self
}
pub fn rustflag(&mut self, arg: &str) -> &mut Cargo {
self.rustflags.arg(arg);
self
}
pub fn arg(&mut self, arg: impl AsRef<OsStr>) -> &mut Cargo {
self.command.arg(arg.as_ref());
self
}
pub fn args<I, S>(&mut self, args: I) -> &mut Cargo
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
for arg in args {
self.arg(arg.as_ref());
}
self
}
pub fn env(&mut self, key: impl AsRef<OsStr>, value: impl AsRef<OsStr>) -> &mut Cargo {
// These are managed through rustflag/rustdocflag interfaces.
assert_ne!(key.as_ref(), "RUSTFLAGS");
assert_ne!(key.as_ref(), "RUSTDOCFLAGS");
self.command.env(key.as_ref(), value.as_ref());
self
}
pub fn add_rustc_lib_path(&mut self, builder: &Builder<'_>, compiler: Compiler) {
builder.add_rustc_lib_path(compiler, &mut self.command);
}
pub fn current_dir(&mut self, dir: &Path) -> &mut Cargo {
self.command.current_dir(dir);
self
}
}
impl From<Cargo> for Command {
fn from(mut cargo: Cargo) -> Command {
let rustflags = &cargo.rustflags.0;
if !rustflags.is_empty() {
cargo.command.env("RUSTFLAGS", rustflags);
}
let rustdocflags = &cargo.rustdocflags.0;
if !rustdocflags.is_empty() {
cargo.command.env("RUSTDOCFLAGS", rustdocflags);
}
cargo.command
}
}
| 39.346313 | 123 | 0.557398 |
d5ecd861b0b33267a5e0662d03d43803feba2715 | 10,254 | use crate::config;
use crate::elasticsearch;
use crate::error::ResponseError;
use crate::gcp::*;
use crate::guards;
use crate::models::ImageUploadResponse;
use crate::responders::{Cached, Image, JpegReqwestStream, WebpReqwestStream};
use crate::rolodex_client;
use instrumented::instrument;
use libc::{c_float, c_int, size_t};
use rocket_contrib::json::Json;
use std::collections::HashMap;
pub struct ImageUpload(Vec<u8>);
impl rocket::data::FromDataSimple for ImageUpload {
type Error = std::io::Error;
// from https://api.rocket.rs/v0.4/rocket/data/trait.FromDataSimple.html
// see discussion at https://api.rocket.rs/v0.4/rocket/data/trait.FromData.html#provided-implementations
#[inline(always)]
fn from_data(
_: &rocket::Request,
data: rocket::Data,
) -> rocket::data::Outcome<Self, Self::Error> {
use std::io::Read;
const LIMIT: u64 = 10 * 1024 * 1024; // 10MiB
let mut bytes = Vec::new();
match data.open().take(LIMIT).read_to_end(&mut bytes) {
Ok(_) => rocket::Outcome::Success(Self(bytes)),
Err(e) => rocket::Outcome::Failure((rocket::http::Status::BadRequest, e)),
}
}
}
struct Thumbnails<'a> {
thumbs: HashMap<&'a str, image::DynamicImage>,
}
impl<'a> Thumbnails<'a> {
fn from_buffer(image: &[u8]) -> Result<Self, ResponseError> {
use image::*;
use rayon::prelude::*;
let img = load_from_memory_with_format(&image, ImageFormat::JPEG)?;
Ok(Self {
thumbs: [
("big", (img.clone(), 2048u32, 2048u32)),
("medium", (img.clone(), 1024u32, 1024u32)),
("small", (img.clone(), 256u32, 256u32)),
("tiny", (img, 64u32, 64u32)),
]
.par_iter()
.map(|(key, val)| {
let (img, width, height): &(DynamicImage, u32, u32) = val;
let result = img.thumbnail(*width, *height);
(*key, result)
})
.collect(),
})
}
}
#[link(name = "webp")]
extern "C" {
// size_t WebPEncodeBGR(const uint8_t* rgb, int width, int height, int stride, float quality_factor, uint8_t** output);
fn WebPEncodeBGR(
rgb: *const u8,
width: c_int,
height: c_int,
stride: c_int,
quality_factor: c_float,
output: *mut *mut u8,
) -> size_t;
}
fn webp_encode(img: image::ImageBuffer<image::Bgr<u8>, Vec<u8>>) -> Vec<u8> {
let width = img.width();
let height = img.height();
let stride = width * 3;
let quality: c_float = 80.0;
let mut output: *mut u8 = std::ptr::null_mut();
let raw = img.into_raw();
let mut result: Vec<u8> = vec![];
unsafe {
let length = WebPEncodeBGR(
raw.as_ptr(),
width as c_int,
height as c_int,
stride as c_int,
quality,
&mut output,
);
// Vec::from_raw_parts will take ownership of the underlying data, so we
// don't have to explicitly call WebPFree() or free().
result.append(&mut Vec::from_raw_parts(output, length, length));
}
result
}
struct EncodedImages<'a> {
images: HashMap<&'a str, Vec<u8>>,
}
impl<'a> EncodedImages<'a> {
fn new(thumbnails: &'a Thumbnails, format: image::ImageFormat) -> Self {
use rayon::prelude::*;
match format {
image::ImageFormat::WEBP => Self {
images: thumbnails
.thumbs
.par_iter()
.map(|(key, val)| {
let vec = webp_encode(val.to_bgr());
(*key, vec)
})
.collect(),
},
_ => Self {
images: thumbnails
.thumbs
.par_iter()
.map(|(key, val)| {
let mut vec: Vec<u8> = vec![];
val.write_to(&mut vec, format).expect("couldn't encode jpg");
(*key, vec)
})
.collect(),
},
}
}
}
#[instrument(INFO)]
fn encode_image_and_upload(kind: &str, client_id: &str, image: &[u8]) -> Result<(), ResponseError> {
use rayon::prelude::*;
let thumbnails = Thumbnails::from_buffer(image)?;
let mut jpegs = EncodedImages::new(&thumbnails, image::ImageFormat::JPEG);
let mut webps = EncodedImages::new(&thumbnails, image::ImageFormat::WEBP);
let prefix = format!("{}/{}/{}", kind, client_id.get(0..2).unwrap(), client_id);
let (_, mut errors): (Vec<_>, Vec<_>) = webps
.images
.par_iter_mut()
.map(|(key, val)| {
post_to_gcs(
&format!("{}/{}.webp", prefix, key),
val.drain(0..).collect(),
)
})
.partition(Result::is_ok);
if !errors.is_empty() {
// Just return the first error and stop
return errors.pop().unwrap();
}
let (_, mut errors): (Vec<_>, Vec<_>) = jpegs
.images
.par_iter_mut()
.map(|(key, val)| post_to_gcs(&format!("{}/{}.jpg", prefix, key), val.drain(0..).collect()))
.partition(Result::is_ok);
if !errors.is_empty() {
// Just return the first error and stop
return errors.pop().unwrap();
}
Ok(())
}
#[post("/img/<kind>/<client_id>", data = "<image>", format = "image/jpeg")]
pub fn post_client_image(
kind: String,
client_id: String,
image: ImageUpload,
calling_client: guards::Client,
_ratelimited: guards::RateLimited,
) -> Result<Json<ImageUploadResponse>, ResponseError> {
// check if calling client is authorized
if calling_client.client_id != client_id {
return Err(ResponseError::unauthorized("Not authorized"));
}
match kind.as_ref() {
"avatar" => {
encode_image_and_upload(&kind, &client_id, &image.0)?;
// fetch client info
let rolodex_client = rolodex_client::Client::new(&config::CONFIG);
// increment avatar version
rolodex_client.increment_client_avatar(
rolodex_grpc::proto::IncrementClientAvatarRequest {
client_id: calling_client.client_id.clone(),
increment_by: 1,
},
)?;
let client = rolodex_client::get_client_for(
&rolodex_client,
&calling_client.client_id,
&calling_client.client_id,
)?;
// Update the index in elasticsearch. This is launched on a separate thread
// so it doesn't block.
let elastic_doc: elasticsearch::ClientProfileDocument = client.clone().into();
std::thread::spawn(move || {
let elastic = elasticsearch::ElasticSearchClient::new();
elastic.update(elastic_doc);
});
let handle = if client.handle.is_empty() {
None
} else {
Some(client.handle)
};
let _res = invalidate_cdn_cache_for_client(&client.client_id, &handle);
Ok(Json(ImageUploadResponse {}))
}
_ => Err(ResponseError::bad_request("Invalid 'kind' parameter")),
}
}
#[get("/img/<kind>/<client_id>/<name>")]
pub fn get_client_image(
kind: String,
client_id: String,
name: String,
_ratelimited: guards::RateLimited,
) -> Result<Cached<Image>, ResponseError> {
use rocket::response::Stream;
if client_id.len() != 32 {
return Err(ResponseError::not_found("client_id"));
}
let object = format!(
"{}/{}/{}/{}",
kind,
client_id.get(0..2).unwrap(),
client_id,
name
);
let splat: Vec<&str> = name.split('.').collect();
if splat.len() != 2 {
return Err(ResponseError::not_found("name"));
}
match kind.as_ref() {
"avatar" => match splat[0] {
// match first part, should be one of these options
"big" | "medium" | "small" | "tiny" => match splat[1] {
// match second part
"jpg" => Ok(Cached::from(
Image::Jpeg(JpegReqwestStream(Stream::from(get_from_gcs(&object)?))),
30 * 24 * 3600, // 30 days
)),
"webp" => Ok(Cached::from(
Image::Webp(WebpReqwestStream(Stream::from(get_from_gcs(&object)?))),
30 * 24 * 3600, // 30 days
)),
_ => Err(ResponseError::not_found("format")),
},
_ => Err(ResponseError::not_found("size")),
},
_ => Err(ResponseError::not_found("kind")),
}
}
#[cfg(test)]
mod tests {
// Note this useful idiom: importing names from outer (for mod tests) scope.
use super::*;
fn read_into_vec(name: &str) -> std::io::Result<Vec<u8>> {
use std::fs::File;
use std::io::Read;
let mut file = File::open(name)?;
let mut data = Vec::new();
file.read_to_end(&mut data)?;
return Ok(data);
}
#[test]
fn test_thumbnail() {
use std::fs::File;
use std::io::Write;
let image = read_into_vec(&format!(
"{}/src/testdata/myface.jpg",
env!("CARGO_MANIFEST_DIR"),
))
.expect("couldn't read image file");
let thumbnails = Thumbnails::from_buffer(&image).expect("couldn't generate thumbnails");
let jpgs = EncodedImages::new(&thumbnails, image::ImageFormat::JPEG);
for (key, val) in jpgs.images.iter() {
let mut f = File::create(format!("{}/{}.jpg", env!("OUT_DIR"), key))
.expect("couldn't create file");
f.write(&val).expect("couldn't write to file");
}
let webps = EncodedImages::new(&thumbnails, image::ImageFormat::WEBP);
for (key, val) in webps.images.iter() {
let mut f = File::create(format!("{}/{}.webp", env!("OUT_DIR"), key))
.expect("couldn't create file");
f.write(&val).expect("couldn't write to file");
}
}
}
| 31.550769 | 123 | 0.532768 |
dd948d2f2c02f2341653013d26173cb1378d69fe | 39 | mod color;
pub use self::color::Color;
| 13 | 27 | 0.717949 |
dd708e6cf512bae3777be09ead191e13c9810910 | 4,760 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
pub mod context;
use context::Context;
pub mod application;
pub mod connection_close;
pub mod early;
pub mod interest;
pub use crate::contexts::WriteContext;
pub use interest::Interest;
/// re-export core
pub use s2n_quic_core::transmission::*;
use crate::{
endpoint, path,
space::TxPacketNumbers,
transmission::{self, interest::Provider as _},
};
use core::{marker::PhantomData, ops::RangeInclusive};
use s2n_codec::{Encoder, EncoderBuffer};
use s2n_quic_core::{
event,
frame::Padding,
packet::{
encoding::PacketPayloadEncoder,
number::{PacketNumber, PacketNumberSpace},
stateless_reset,
},
time::Timestamp,
};
pub trait Payload: interest::Provider {
fn size_hint(&self, payload_range: RangeInclusive<usize>) -> usize;
fn on_transmit<W: WriteContext>(&mut self, context: &mut W);
fn packet_number_space(&self) -> PacketNumberSpace;
}
pub struct Transmission<'a, 'sub, Config: endpoint::Config, P: Payload> {
pub config: PhantomData<Config>,
pub outcome: &'a mut transmission::Outcome,
pub payload: P,
pub packet_number: PacketNumber,
pub timestamp: Timestamp,
pub transmission_constraint: transmission::Constraint,
pub transmission_mode: transmission::Mode,
pub tx_packet_numbers: &'a mut TxPacketNumbers,
pub path_id: path::Id,
pub publisher: &'a mut event::ConnectionPublisherSubscriber<
'sub,
<Config as endpoint::Config>::EventSubscriber,
>,
pub packet_interceptor: &'a mut <Config as endpoint::Config>::PacketInterceptor,
}
impl<'a, 'sub, Config: endpoint::Config, P: Payload> PacketPayloadEncoder
for Transmission<'a, 'sub, Config, P>
{
fn encoding_size_hint<E: Encoder>(&mut self, encoder: &E, minimum_len: usize) -> usize {
if self.has_transmission_interest() {
self.payload.size_hint(minimum_len..=encoder.capacity())
} else {
0
}
}
fn encode(
&mut self,
buffer: &mut EncoderBuffer,
minimum_len: usize,
header_len: usize,
tag_len: usize,
) {
debug_assert!(
buffer.is_empty(),
"the implementation assumes an empty buffer"
);
let mut context: Context<Config> = Context {
outcome: self.outcome,
buffer,
packet_number: self.packet_number,
transmission_constraint: self.transmission_constraint,
transmission_mode: self.transmission_mode,
timestamp: self.timestamp,
header_len,
tag_len,
config: Default::default(),
path_id: self.path_id,
publisher: self.publisher,
};
self.payload.on_transmit(&mut context);
if !context.buffer.is_empty() {
// Add padding up to minimum_len
let mut length = minimum_len.saturating_sub(context.buffer.len());
// if we've only got a few bytes left in the buffer may as well pad it to full
// capacity
let remaining_capacity = context.buffer.remaining_capacity();
if remaining_capacity < stateless_reset::min_indistinguishable_packet_len(tag_len) {
length = remaining_capacity;
}
if length > 0 {
// Use `write_frame_forced` to bypass congestion controller checks
// since we still want to send this packet despite Padding being
// congestion controlled.
context.write_frame_forced(&Padding { length });
}
{
use s2n_quic_core::{
event::ConnectionPublisher,
packet::interceptor::{Interceptor, Packet},
};
// intercept the payload before it is encrypted
self.packet_interceptor.intercept_tx_payload(
self.publisher.subject(),
Packet {
number: self.packet_number,
timestamp: self.timestamp,
},
buffer,
);
}
self.tx_packet_numbers.on_transmit(self.packet_number);
self.outcome.bytes_sent = header_len + tag_len + buffer.len();
}
}
}
impl<'a, 'sub, Config: endpoint::Config, P: Payload> transmission::interest::Provider
for Transmission<'a, 'sub, Config, P>
{
fn transmission_interest<Q: transmission::interest::Query>(
&self,
query: &mut Q,
) -> transmission::interest::Result {
self.payload.transmission_interest(query)
}
}
| 31.946309 | 96 | 0.607563 |
724d7400b97f2289ddf7193da3ac11248782bdbc | 13,318 | mod guild;
mod channel;
mod user;
pub use guild::*;
pub use channel::*;
pub use user::*;
use crate::gateway::*;
use std::collections::HashMap;
use std::any::{TypeId, Any};
use crate::{Identifiable, Snowflake};
use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
pub trait Stored {
type Storage: Storage;
}
pub trait Storage: Send + Sync {}
pub struct StorageContainer {
init: Vec<Box<dyn Fn(&mut StorageContainer) + Send + Sync>>,
storages: HashMap<TypeId, Box<dyn Any + Send + Sync>>,
}
/// This implementation of clone is a bit special since
/// instead of cloning the storages which is not possible
/// since they are behind a mutex, it clones the ̀init`
/// field into the `storages` field.
impl Clone for StorageContainer {
fn clone(&self) -> Self {
let mut container = StorageContainer::for_use(self.init.len());
for callback in &self.init {
callback(&mut container);
}
container
}
}
impl StorageContainer {
pub(crate) fn for_initialization() -> StorageContainer {
StorageContainer {
init: Vec::with_capacity(5),
storages: HashMap::new(),
}
}
pub(crate) fn for_use(capacity: usize) -> StorageContainer {
StorageContainer {
init: Vec::new(),
storages: HashMap::with_capacity(capacity),
}
}
pub(crate) fn add_initializer<F: Fn(&mut StorageContainer) + Send + Sync + 'static>(&mut self, initializer: F) {
if !self.storages.is_empty() {
panic!("Adding initializer to an already initialized storage");
}
self.init.push(Box::new(initializer));
}
/// Initialize the storage with a default
/// empty storage.
pub fn initialize<T: Stored + 'static>(&mut self) where T::Storage: Default {
self.storages.insert(TypeId::of::<T>(), Box::new(RwLock::new(T::Storage::default())));
}
/// Initialize the storage with the provided
/// existing storage instance.
pub fn existing<T: Stored + 'static>(&mut self, storage: T::Storage) {
self.storages.insert(TypeId::of::<T>(), Box::new(RwLock::new(storage)));
}
pub async fn read<T: Stored + 'static>(&self) -> RwLockReadGuard<'_, T::Storage> {
self.storages
.get(&TypeId::of::<T>()).expect("Storage has never been initialized")
.downcast_ref::<RwLock<T::Storage>>().expect("Failed to downcast storage")
.read().await
}
pub async fn write<T: Stored + 'static>(&self) -> RwLockWriteGuard<'_, T::Storage> {
self.storages
.get(&TypeId::of::<T>()).expect("Storage has never been initialized")
.downcast_ref::<RwLock<T::Storage>>().expect("Failed to downcast storage")
.write().await
}
}
/// Implementation of the utility functions
/// to insert objects
impl StorageContainer {
/// Insert a guild, its channels and users in
/// the respective storages.
#[inline]
async fn insert_guild(&mut self, guild: &Guild) {
{
let mut guilds = self.write::<Guild>().await;
let mut new_guild = Clone::clone(guild);
//channels and members are not sent for guild updates, so transfer them from
//the previous guild object
if let Some(guild) = guilds.remove(guild.id) {
new_guild.members = guild.members;
new_guild.channels = guild.channels;
}
guilds.insert(new_guild);
}
{
let mut channels = self.write::<Channel>().await;
for channel in guild.channels.values() {
channels.insert(Channel::from_guild(channel));
}
}
{
let mut users = self.write::<User>().await;
for member in guild.members.values() {
Self::insert_user(&mut users, &member.user, Some(guild.id))
}
}
}
/// Adds a channel and insert its recipients in
/// the user storage if it is a group channel.
#[inline]
async fn insert_channel(&mut self, channel: &Channel) {
self.write::<Channel>().await.insert(Clone::clone(&channel));
//insert group channel recipients
if let Channel::Group(channel) = &channel {
let mut users = self.write::<User>().await;
for user in channel.recipients.values() {
Self::insert_user(&mut users, user, None)
}
}
}
/// Adds a new role to its guild.
#[inline]
async fn insert_role(&mut self, role: &Role, guild: Snowflake) {
let mut guilds = self.write::<Guild>().await;
if let Some(guild) = guilds.get_mut(guild) {
guild.roles.insert(role.id, Clone::clone(&role));
}
}
/// Inserts the user and add it to the given guild
/// if the user was not already saved, else just add
/// the guild to the currently saved user.
#[inline]
fn insert_user(storage: &mut RwLockWriteGuard<'_, UserStorage>, user: &User, guild: Option<Snowflake>) {
if let Some(user) = storage.get_mut(user.id) {
if let Some(guild) = guild {
user.guilds.insert(guild);
}
} else {
let mut user = Clone::clone(user);
if let Some(guild) = guild {
user.guilds.insert(guild);
}
storage.insert(user);
}
}
}
impl StorageContainer {
pub async fn on_ready(&mut self, event: &ReadyDispatch) {
self.initialize::<Guild>();
self.initialize::<Channel>();
self.initialize::<User>();
{
let mut channels = self.write::<Channel>().await;
for channel in &event.private_channels {
channels.insert(Channel::from_private(channel));
}
}
self.write::<User>().await.insert(event.user.clone());
}
pub async fn on_channel_create(&mut self, event: &ChannelCreateDispatch) {
self.insert_channel(&event.0).await;
}
pub async fn on_channel_update(&mut self, event: &ChannelUpdateDispatch) {
self.insert_channel(&event.0).await;
}
pub async fn on_channel_delete(&mut self, event: &ChannelDeleteDispatch) {
self.write::<Channel>().await.remove(event.0.id());
}
pub async fn on_channel_pins_update(&mut self, event: &ChannelPinsUpdateDispatch) {
let mut channels = self.write::<Channel>().await;
match channels.get_mut(event.channel_id) {
Some(Channel::Text(c)) => c.last_pin_timestamp = event.last_pin_timestamp.clone(),
Some(Channel::News(c)) => c.last_pin_timestamp = event.last_pin_timestamp.clone(),
Some(Channel::Direct(c)) => c.last_pin_timestamp = event.last_pin_timestamp.clone(),
Some(Channel::Group(c)) => c.last_pin_timestamp = event.last_pin_timestamp.clone(),
None => (), //the DM was not loaded yet so we can't update it
_ => panic!("Message-less channel received a pin update")
};
}
pub async fn on_guild_create(&mut self, event: &GuildCreateDispatch) {
self.insert_guild(&event.0).await;
}
pub async fn on_guild_update(&mut self, event: &GuildUpdateDispatch) {
self.insert_guild(&event.0).await;
}
pub async fn on_guild_delete(&mut self, event: &GuildDeleteDispatch) {
let id = event.id;
let guild: Guild = Guild::clone(self.read::<Guild>().await.get(id));
{
let mut channels = self.write::<Channel>().await;
for channel in guild.channels.keys() {
channels.remove(*channel);
}
}
self.write::<Guild>().await.remove(id);
}
/// Removal of the user will be handled
/// by an upcoming member remove event.
pub async fn on_guild_ban_add(&mut self, _event: &GuildBanAddDispatch) {}
pub async fn on_guild_ban_remove(&mut self, _event: &GuildBanRemoveDispatch) {}
pub async fn on_guild_emojis_update(&mut self, event: &GuildEmojisUpdateDispatch) {
let mut guilds = self.write::<Guild>().await;
if let Some(guild) = guilds.get_mut(event.guild_id) {
guild.emojis = event.emojis.clone();
}
}
pub async fn on_guild_integrations_update(&mut self, _event: &GuildIntegrationsUpdateDispatch) {}
pub async fn on_guild_member_add(&mut self, event: &GuildMemberAddDispatch) {
Self::insert_user(&mut self.write::<User>().await, &event.member.user, Some(event.guild_id));
let mut guilds = self.write::<Guild>().await;
if let Some(guild) = guilds.get_mut(event.guild_id) {
guild.members.insert(event.member.user.id, event.member.clone());
}
}
pub async fn on_guild_member_remove(&mut self, event: &GuildMemberRemoveDispatch) {
let mut guilds = self.write::<Guild>().await;
if let Some(guild) = guilds.get_mut(event.guild_id) {
guild.members.remove(&event.user.id);
}
}
pub async fn on_guild_member_update(&mut self, event: &GuildMemberUpdateDispatch) {
let mut guilds = self.write::<Guild>().await;
if let Some(guild) = guilds.get_mut(event.guild_id) {
if let Some(member) = guild.members.get_mut(&event.user.id) {
member.user = event.user.clone();
member.nick = event.nick.clone();
member.roles = event.roles.clone();
member.premium_since = event.premium_since;
}
}
}
pub async fn on_guild_members_chunk(&mut self, _event: &GuildMembersChunkDispatch) {}
pub async fn on_guild_role_create(&mut self, event: &GuildRoleCreateDispatch) {
self.insert_role(&event.role, event.guild_id).await;
}
pub async fn on_guild_role_update(&mut self, event: &GuildRoleUpdateDispatch) {
self.insert_role(&event.role, event.guild_id).await;
}
pub async fn on_guild_role_delete(&mut self, event: &GuildRoleDeleteDispatch) {
let mut guilds = self.write::<Guild>().await;
if let Some(guild) = guilds.get_mut(event.guild_id) {
guild.roles.remove(&event.role_id);
}
}
pub async fn on_invite_create(&mut self, _event: &InviteCreateDispatch) {}
pub async fn on_invite_delete(&mut self, _event: &InviteDeleteDispatch) {}
pub async fn on_message_create(&mut self, _event: &MessageCreateDispatch) {}
pub async fn on_message_update(&mut self, _event: &MessageUpdateDispatch) {}
pub async fn on_message_delete(&mut self, _event: &MessageDeleteDispatch) {}
pub async fn on_message_delete_bulk(&mut self, _event: &MessageDeleteBulkDispatch) {}
pub async fn on_reaction_add(&mut self, _event: &MessageReactionAddDispatch) {}
pub async fn on_reaction_remove(&mut self, _event: &MessageReactionRemoveDispatch) {}
pub async fn on_reaction_remove_all(&mut self, _event: &MessageReactionRemoveAllDispatch) {}
pub async fn on_reaction_remove_emoji(&mut self, _event: &MessageReactionRemoveEmojiDispatch) {}
pub async fn on_presence_update(&mut self, event: &PresenceUpdateDispatch) {
let update = &event.0;
let mut guilds = self.write::<Guild>().await;
if let Some(guild) = guilds.get_mut(event.guild_id) {
if let Some(member) = guild.members.get_mut(&update.user.id) {
member.roles = update.roles.clone();
member.premium_since = update.premium_since;
if let Some(nick) = &update.nick {
member.nick = nick.clone();
}
}
}
}
pub async fn on_typing_start(&mut self, _event: &TypingStartDispatch) {}
pub async fn on_user_update(&mut self, event: &UserUpdateDispatch) {
let user = &event.0;
{
let users = self.read::<User>().await;
let in_guilds = &users.get(user.id).guilds;
//update the guild member's user
let mut guilds = self.write::<Guild>().await;
for guild in in_guilds {
if let Some(guild) = guilds.get_mut(*guild) {
if let Some(member) = guild.members.get_mut(&user.id) {
member.user = Clone::clone(&user);
}
}
}
}
let mut users = self.write::<User>().await;
if let Some(current_user) = users.get_mut(user.id) {
current_user.username = user.username.clone();
current_user.discriminator = user.discriminator.clone();
current_user.avatar = user.avatar.clone();
current_user.bot = user.bot;
current_user.mfa_enabled = user.mfa_enabled;
current_user.locale = user.locale.clone();
current_user.verified = user.verified;
current_user.email = user.email.clone();
current_user.flags = user.flags;
current_user.premium_type = user.premium_type;
}
}
pub async fn on_voice_state_update(&mut self, _event: &VoiceStateUpdateDispatch) {}
pub async fn on_voice_server_update(&mut self, _event: &VoiceServerUpdateDispatch) {}
pub async fn on_webhooks_update(&mut self, _event: &WebhooksUpdateDispatch) {}
}
| 35.705094 | 116 | 0.609626 |
1456a054e58b10503117f03d80324e97bf681fa7 | 696 | use std::io::{self, Cursor, Read, Write};
#[derive(Debug, Default)]
pub struct Packet {
buffer: Cursor<Vec<u8>>,
}
impl From<&[u8]> for Packet {
fn from(buf: &[u8]) -> Self {
Packet {
buffer: Cursor::new(Vec::from(buf)),
}
}
}
impl Read for Packet {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.buffer.read(buf)
}
}
impl Write for Packet {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.buffer.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.buffer.flush()
}
}
impl AsRef<[u8]> for Packet {
fn as_ref(&self) -> &[u8] {
self.buffer.get_ref()
}
}
| 18.810811 | 61 | 0.525862 |
61b578d0e78cca2c7fdaeea1238eb8a2a96d45ae | 1,858 | /* automatically generated by rust-bindgen */
extern "C" {
pub fn cublasHgemm(
handle: cublasHandle_t,
transa: cublasOperation_t,
transb: cublasOperation_t,
m: ::std::os::raw::c_int,
n: ::std::os::raw::c_int,
k: ::std::os::raw::c_int,
alpha: *const __half,
A: *const __half,
lda: ::std::os::raw::c_int,
B: *const __half,
ldb: ::std::os::raw::c_int,
beta: *const __half,
C: *mut __half,
ldc: ::std::os::raw::c_int,
) -> cublasStatus_t;
}
extern "C" {
pub fn cublasHgemmBatched(
handle: cublasHandle_t,
transa: cublasOperation_t,
transb: cublasOperation_t,
m: ::std::os::raw::c_int,
n: ::std::os::raw::c_int,
k: ::std::os::raw::c_int,
alpha: *const __half,
Aarray: *const *const __half,
lda: ::std::os::raw::c_int,
Barray: *const *const __half,
ldb: ::std::os::raw::c_int,
beta: *const __half,
Carray: *const *mut __half,
ldc: ::std::os::raw::c_int,
batchCount: ::std::os::raw::c_int,
) -> cublasStatus_t;
}
extern "C" {
pub fn cublasHgemmStridedBatched(
handle: cublasHandle_t,
transa: cublasOperation_t,
transb: cublasOperation_t,
m: ::std::os::raw::c_int,
n: ::std::os::raw::c_int,
k: ::std::os::raw::c_int,
alpha: *const __half,
A: *const __half,
lda: ::std::os::raw::c_int,
strideA: ::std::os::raw::c_longlong,
B: *const __half,
ldb: ::std::os::raw::c_int,
strideB: ::std::os::raw::c_longlong,
beta: *const __half,
C: *mut __half,
ldc: ::std::os::raw::c_int,
strideC: ::std::os::raw::c_longlong,
batchCount: ::std::os::raw::c_int,
) -> cublasStatus_t;
}
| 29.967742 | 45 | 0.527987 |
76c409d4dc72ee3180d0cba052ab730c7f87db54 | 762 | mod available_datasets;
pub mod component;
pub mod meal;
pub use available_datasets::AvailableDatasets;
use std::collections::BTreeMap;
use uuid::Uuid;
/// Struct to hold all application data
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct Data {
/// The year the data applies to.
pub year: i32,
/// The month the data applies to.
pub month: u32,
/// The available meal components.
pub components: BTreeMap<Uuid, component::Component>,
/// The available meals.
pub meals: BTreeMap<Uuid, meal::Meal>,
}
impl Data {
pub fn new(year: i32, month: u32) -> Self {
Self {
year,
month,
components: BTreeMap::new(),
meals: BTreeMap::new(),
}
}
}
| 23.8125 | 57 | 0.622047 |
ff192179d8e733f3f22774d9a37e3162325e5085 | 245 | use ron::de::from_str;
#[test]
fn test_inf_and_nan() {
assert_eq!(from_str("inf"), Ok(std::f64::INFINITY));
assert_eq!(from_str("-inf"), Ok(std::f64::NEG_INFINITY));
assert_eq!(from_str::<f64>("NaN").map(|n| n.is_nan()), Ok(true))
} | 30.625 | 68 | 0.628571 |
ef65fa5ea14a9d6d8e083e6cf6e1dfefb71dbcaa | 1,828 | // iterators2.rs
// In this exercise, you'll learn some of the unique advantages that iterators
// can offer. Follow the steps to complete the exercise.
// As always, there are hints if you execute `rustlings hint iterators2`!
// Step 1.
// Complete the `capitalize_first` function.
// "hello" -> "Hello"
pub fn capitalize_first(input: &str) -> String {
let mut c = input.chars();
match c.next() {
None => String::new(),
Some(first) => {
let f = first.to_uppercase().next().unwrap();
String::from(f) + c.as_str()
}
}
}
// Step 2.
// Apply the `capitalize_first` function to a slice of string slices.
// Return a vector of strings.
// ["hello", "world"] -> ["Hello", "World"]
pub fn capitalize_words_vector(words: &[&str]) -> Vec<String> {
words
.to_vec()
.iter()
.map(|&word| capitalize_first(word))
.collect()
}
// Step 3.
// Apply the `capitalize_first` function again to a slice of string slices.
// Return a single string.
// ["hello", " ", "world"] -> "Hello World"
pub fn capitalize_words_string(words: &[&str]) -> String {
words
.iter()
.map(|word| capitalize_first(word))
.collect::<Vec<String>>()
.join("")
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_success() {
assert_eq!(capitalize_first("hello"), "Hello");
}
#[test]
fn test_empty() {
assert_eq!(capitalize_first(""), "");
}
#[test]
fn test_iterate_string_vec() {
let words = vec!["hello", "world"];
assert_eq!(capitalize_words_vector(&words), ["Hello", "World"]);
}
#[test]
fn test_iterate_into_string() {
let words = vec!["hello", " ", "world"];
assert_eq!(capitalize_words_string(&words), "Hello World");
}
}
| 26.114286 | 78 | 0.582057 |
e9e813465b21638600d68c3b5b7046c792f9208c | 1,148 | /// Benchmark regular indexing flow (using JSONRPC), don't persist the resulting index.
extern crate electrs_tapyrus;
extern crate error_chain;
#[macro_use]
extern crate log;
use electrs_tapyrus::{
cache::BlockTxIDsCache, config::Config, daemon::Daemon, errors::*, fake::FakeStore,
index::Index, metrics::Metrics, signal::Waiter,
};
use error_chain::ChainedError;
use std::sync::Arc;
fn run() -> Result<()> {
let signal = Waiter::start();
let config = Config::from_args();
let metrics = Metrics::new(config.monitoring_addr);
metrics.start();
let cache = Arc::new(BlockTxIDsCache::new(0, &metrics));
let daemon = Daemon::new(
&config.daemon_dir,
config.daemon_rpc_addr,
config.cookie_getter(),
config.network_type,
config.network_id.clone(),
signal.clone(),
cache,
&metrics,
)?;
let fake_store = FakeStore {};
let index = Index::load(&fake_store, &daemon, &metrics, config.index_batch_size)?;
index.update(&fake_store, &signal)?;
Ok(())
}
fn main() {
if let Err(e) = run() {
error!("{}", e.display_chain());
}
}
| 26.697674 | 87 | 0.635017 |
cc046d0a398ff0cb6e4f9b2e929d9e7edbb9448c | 2,467 | use crate::config::*;
use crate::bitvector::BitVector;
use crate::popcount::{popcount_linear};
pub struct BitvectorRank {
bitvec: BitVector,
basic_block_size: position_t,
rank_lut: Vec<position_t>,
}
impl BitvectorRank {
pub fn new(
basic_block_size: position_t,
bitvector_per_level: &Vec<Vec<word_t>>,
num_bits_per_level: &Vec<position_t>,
start_level: level_t,
end_level: level_t,
) -> BitvectorRank {
let mut rank = BitvectorRank {
bitvec: BitVector::new(bitvector_per_level, num_bits_per_level, start_level, end_level),
basic_block_size: basic_block_size,
rank_lut: Vec::new(),
};
rank.init_rank_lut();
rank
}
fn init_rank_lut(&mut self) {
let word_per_basic_block: position_t = self.basic_block_size / K_WORD_SIZE;
let num_blocks: position_t = self.bitvec.get_num_bits() / self.basic_block_size + 1;
let mut rank_lut = vec![0; num_blocks];
let mut cumu_rank: position_t = 0;
for i in 0..(num_blocks-1) {
rank_lut[i] = cumu_rank;
cumu_rank += popcount_linear(self.bitvec.get_bits(), (i * word_per_basic_block) as u64, self.basic_block_size as u64) as usize;
}
rank_lut[num_blocks - 1] = cumu_rank;
self.rank_lut = rank_lut;
}
pub fn get_bitvec(&self) -> &BitVector {
&self.bitvec
}
// Counts the number of 1's in the bitvector up to position pos.
// pos is zero-based; count is one-based.
// E.g., for bitvector: 100101000, rank(3) = 2
pub fn rank(&self, pos: position_t) -> position_t {
let word_per_basic_block: position_t = self.basic_block_size / K_WORD_SIZE;
let block_id: position_t = pos / self.basic_block_size;
let offset: position_t = pos & (self.basic_block_size - 1);
return self.rank_lut[block_id] + popcount_linear(self.bitvec.get_bits(), (block_id * word_per_basic_block) as u64, (offset + 1) as u64) as position_t;
}
pub fn prefetch(&self, pos: position_t) {
unsafe {
let bits_pointer = self.bitvec.get_bits().as_ptr();
core::intrinsics::prefetch_read_data(bits_pointer.offset((pos / K_WORD_SIZE) as isize), 0);
let rank_lut_pointer = self.rank_lut.as_ptr();
core::intrinsics::prefetch_read_data(rank_lut_pointer.offset((pos / self.basic_block_size) as isize), 0);
}
}
}
| 37.378788 | 158 | 0.642886 |
39441ec5eb934974c8b400b679de2aee57682056 | 5,330 | //! Types for context representation
//! See [ctx attribute](super::attributes#ctx) for more information.
use crate::error::DekuError;
use core::marker::PhantomData;
use core::str::FromStr;
#[cfg(feature = "alloc")]
use alloc::format;
/// An endian
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Endian {
/// Little endian
Little,
/// Big endian
Big,
}
/// Error returned when parsing a `Endian` using [`from_str`]
///
/// [`from_str`]: Endian::from_str()
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ParseEndianError {}
impl Endian {
/// [`Endian::default`], but const.
///
/// [`Endian::default`]: Endian::default()
pub const fn new() -> Self {
#[cfg(target_endian = "little")]
let endian = Endian::Little;
#[cfg(target_endian = "big")]
let endian = Endian::Big;
endian
}
/// Is it little endian
pub fn is_le(self) -> bool {
self == Endian::Little
}
/// Is it big endian
pub fn is_be(self) -> bool {
self == Endian::Big
}
}
impl Default for Endian {
/// Return the endianness of the target's CPU.
fn default() -> Self {
Self::new()
}
}
impl FromStr for Endian {
type Err = ParseEndianError;
/// Parse a `Endian` from a string.
/// # Examples
/// ```rust
/// use std::str::FromStr;
/// use deku::ctx::Endian;
/// assert_eq!(FromStr::from_str("little"), Ok(Endian::Little));
/// assert_eq!(FromStr::from_str("big"), Ok(Endian::Big));
/// assert!(<Endian as FromStr>::from_str("not an endian").is_err());
/// ```
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"little" => Ok(Endian::Little),
"big" => Ok(Endian::Big),
_ => Err(ParseEndianError {}),
}
}
}
/// A limit placed on a container's elements
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub enum Limit<T, Predicate: FnMut(&T) -> bool> {
/// Read a specific count of elements
Count(usize),
/// Read until a given predicate holds true
Until(Predicate, PhantomData<T>),
/// Read until a given quantity of bits have been read
Size(Size),
}
impl<T> From<usize> for Limit<T, fn(&T) -> bool> {
fn from(n: usize) -> Self {
Limit::Count(n)
}
}
impl<T, Predicate: for<'a> FnMut(&'a T) -> bool> From<Predicate> for Limit<T, Predicate> {
fn from(predicate: Predicate) -> Self {
Limit::Until(predicate, PhantomData)
}
}
impl<T> From<Size> for Limit<T, fn(&T) -> bool> {
fn from(size: Size) -> Self {
Limit::Size(size)
}
}
impl<T, Predicate: for<'a> FnMut(&'a T) -> bool> Limit<T, Predicate> {
/// Constructs a new Limit that reads until the given predicate returns true
/// The predicate is given a reference to the latest read value and must return
/// true to stop reading
pub fn new_until(predicate: Predicate) -> Self {
predicate.into()
}
}
impl<T> Limit<T, fn(&T) -> bool> {
/// Constructs a new Limit that reads until the given number of elements are read
pub fn new_count(count: usize) -> Self {
count.into()
}
/// Constructs a new Limit that reads until the given size
pub fn new_size(size: Size) -> Self {
size.into()
}
}
/// The size of a field
#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
pub enum Size {
/// bit size
Bits(usize),
/// byte size
Bytes(usize),
}
impl Size {
/// Convert the size in bytes to a bit size.
///
/// # Panic
/// Panic if `byte_size * 8` is greater than `usize::MAX`.
fn bits_from_bytes(byte_size: usize) -> Self {
Self::Bits(byte_size.checked_mul(8).expect("bit size overflow"))
}
/// Returns the bit size of a type.
/// # Examples
/// ```rust
/// # use deku::ctx::Size;
///
/// assert_eq!(Size::of::<i32>(), Size::Bits(4 * 8));
/// ```
///
/// # Panics
/// Panic if the bit size of given type is greater than `usize::MAX`
pub fn of<T>() -> Self {
Self::bits_from_bytes(core::mem::size_of::<T>())
}
/// Returns the bit size of the pointed-to value
pub fn of_val<T: ?Sized>(val: &T) -> Self {
Self::bits_from_bytes(core::mem::size_of_val(val))
}
/// Returns the size in bits of a Size
///
/// # Panics
/// Panic if the bit size of Size::Bytes(n) is greater than `usize::MAX`
pub fn bit_size(&self) -> usize {
match *self {
Size::Bits(size) => size,
Size::Bytes(size) => size.checked_mul(8).expect("bit size overflow"),
}
}
/// Returns the size in bytes of a Size
///
/// # Panics
/// Panic if the bit size of Size::Bytes(n) is greater than `usize::MAX`
pub fn byte_size(&self) -> Result<usize, DekuError> {
match *self {
Size::Bits(size) => {
if size % 8 == 0 {
Ok(size / 8)
} else {
Err(DekuError::InvalidParam(format!(
"Bit size of {} is not a multiple of 8.
Cannot be represented in bytes",
size
)))
}
}
Size::Bytes(size) => Ok(size),
}
}
}
| 26.65 | 90 | 0.552908 |
87649340d7b3af0a91510819319456841e7fe3bb | 76 | /*
! Select test to run with
* `cargo test --test {name of file}
*/
| 15.2 | 39 | 0.552632 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.