file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
Transformer_prac.py
import numpy as np import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable import matplotlib as plt dtype = torch.FloatTensor sentence =['ich mochte ein bier P', 'S i want a beer','i want a beer E'] # S : Symbol that shows starting point of decoding input # E : Symbol that shows starting of decoding output # P : Symbol that will fill in blank sequence if current batch data size is short than time steps # Transformer parameter src_voca = {'P':0, 'ich':1, 'mochte':2, 'ein':3, 'bier':4} # ๋”•์…”๋„ˆ๋ฆฌ , P๋Š” padding = 0 src_voca_size = len(src_voca) tgt_voca = {'P':0, 'i':1, 'want':2, 'a':3, 'beer':4, 'S':5, 'E':6} number_dict = {i:w for i, w in enumerate(tgt_voca)} tgt_voca_size = len(tgt_voca) src_len = 5 tgt_len = 5 d_model = 512 # ์ž„๋ฒ ๋”ฉ ์‚ฌ์ด์ฆˆ d_ff = 2048 # feedforward dimension n_layers = 6 # encoder decoder ์ธต ๊ฐœ์ˆ˜ n_head = 8 # multi-head attention ใ…ํ—ค๋“œ ๊ฐœ์ˆ˜ d_k = d_v = 64 # K = Q(๊ฐ™์•„์•ผ ํ•œ๋‹ค) ๋””๋ฉ˜์…˜ ๊ฐœ์ˆ˜ ,V def make_batch(sentence): input_batch = [[src_voca[n] for n in sentence[0].split()]] # list๋กœ ๋งŒ๋“ฆ output_batch = [[tgt_voca[n] for n in sentence[1].split()]] target_batch = [[tgt_voca[n] for n in sentence[2].split()]] return Variable(torch.LongTensor(input_batch)), Variable(torch.LongTensor(output_batch)), Variable(torch.LongTensor(target_batch)) # Variable = autograd : ๋””ํดํŠธ requires_grad = False, tensor๋กœ ์ •์˜๋œ ๋ชจ๋“  API๋ฅผ ์ง€์›ํ•œ๋‹ค # x = Variable(torch.ones(2,2), requries_grad = True) ์ผ๋•Œ # ๋ชจ๋ธ ํŒŒ๋ผ๋ฏธํ„ฐ x๋ฅผ ํ•™์Šตํ•˜๊ธฐ์œ„ํ•ด lossํ•จ์ˆ˜๋กœ ๊ณ„์‚ฐ๋œ loss๋ฅผ ์ €์žฅํ•˜๊ธฐ ์œ„ํ•ด variable loss์‚ฌ์šฉ # โˆ‚loss/โˆ‚x๋ฅผ ๊ณ„์‚ฐํ•˜๋Š” loss.backward๋ฅผ ํ˜ธ์ถœํ•˜๋ฉด pytorch๋Š” x ๋ณ€์ˆ˜์— gradient๋ฅผ ์ €์žฅ # requries_grad๋Š” ๋ณ€์ˆ˜ x๊ฐ€ ํ•™์Šต๊ฐ€๋Šฅํ•œ์ง€ ๋ฅผ ๋‚˜ํƒ€๋ƒ„! ์ฆ‰, ์œ„์—๊บผ๋Š” ํ•™์Šต๋ถˆ๊ฐ€ def get_sinusoid_encoding_table(n_position, d_model): # positonal encoding def cal_angle(position, hid_idx): return position/np.power(10000, 2*(hid_idx // 2)/d_model) # 10000^(2i/d_model) def get_posi_angle_vec(position): return [cal_angle(position, hid_j) for hid_j in range(d_model)] # hid_j๋Š” 0-d_model๊นŒ์ง€ sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)]) sinusoid_table[:,0::2] = np.sin(sinusoid_table[:,0::2]) # x[startpoint:endpoint:skip] ์‹œ์ž‘์ ๋ถ€ํ„ฐ skip์˜ ์ฐจ์ด์”ฉ ๋„์šฐ๋ฉด์„œ ํ‘œํ˜„๋จ # ex) l = range(20) # l[1::3] = [1,4,7,10,13,16,19] ์ด๋Ÿฐ์‹์œผ๋กœ ํ‘œํ˜„๋จ sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:,1::2]) return torch.FloatTensor(sinusoid_table) def get_attn_pad_mask(seq_q, seq_k): batch_size, len_q = seq_q.size() batch_size, len_k = seq_k.size() pad_attn_mask = seq_k.data.eq(0).unsqueeze(1) # eq : element-wise equality # x = torch.tensor([1,2,3,4]) # dim = 1 # torch.unsqueeze(x,0) = tensor([[1,2,3,4]]) # torch.unsqueeze(x,1) = tnesor([[1], # [2], # [3], # [4]]) return pad_attn_mask.expand(batch_size, len_q, len_k) # x = torch.tensor([[1],[2],[3]]) # x.size() = torch.size([3,1]) # x.expand(3,4) = tensor([1,1,1,1],[2,2,2,2],[3,3,3,3]) # x.expand(-1,4) = tensor([1,1,1,1],[2,2,2,2],[3,3,3,3]) # -1์€ ์‚ฌ์ด์ฆˆ๊ฐ€ ๋ณ€ํ•˜์ง€ ์•Š๋Š”๋‹ค๋Š” ๋œป def get_attn_subsequent_mask(seq): attn_shape = [seq.size(0), seq.size(1), seq.size(1)] subsequent_mask = np.triu(np.ones(attn_shape), k=1) # k ๋ฒˆ์งธ diagonal์„ 0์œผ๋กœ ๋งŒ๋“ ๋‹ค ๋‚˜๋จธ์ง€๋Š” 1 # np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) ์ผ๋•Œ # array([[ 1, 2, 3], # [ 4, 5, 6], # [ 0, 8, 9], # [ 0, 0, 12]]) ์ด๋Ÿฐ์‹์œผ๋กœ ํ‘œํ˜„๋œ๋‹ค subsequent_mask = torch.from_numpy(subsequent_mask).byte() # numpy์—์„œ torch๋กœ ํ…์„œ ๋ฒ„์ „์„ ๋ฐ”๊พผ๋‹ค return subsequent_mask class ScaledDotProduct(nn.Module): def __init__(self): super(ScaledDotProduct,self).__init__() self.softmax = nn.Softmax(dim = -1) # softmax์˜ dim? ์†Œํ”„ํŠธ๋งฅ์Šค๊ณ„์‚ฐ๋˜๋Š” ๋””๋ฉ˜์…˜ # NLLLoss ์—๋Š” Logsoftmax๋ฅผ ์‚ฌ์šฉํ•œ๋‹ค self.const = np.sqrt(d_k) # d_k๋Š”? def forward(self, Q, K, V, att_mask): # att_mask๋Š” score = torch.matmul(Q,K.transpose(-1,-2))/self.const # tranpose: ์ฃผ์–ด์ง„ dim0๊ณผ dim1์ด ์„œ๋กœ ๋ฐ”๊ฟ˜๋‹ค score.masked_fill_(att_mask, -1e9) # masked! # masked_fill_(mask, value) mask๋Š” boolean์œผ๋กœ, ๋งˆ์Šคํฌ๊ฐ€ true์ธ ๊ณณ์— value๋ฅผ ์ฑ„์›€ attn = self.softmax(score) # attn = attention distribution context = torch.matmul(attn, V) return context, attn ############################################################ # self ๋ž€ ๋ฌด์—‡์ธ๊ฐ€? # class Foo: # def func1(): # ์ธ์ž๊ฐ€ self๊ฐ€ ์•„๋‹ˆ์–ด๋„ ์˜ค๋ฅ˜๋Š” ๋‚˜์ง€ ์•Š๋Š”๋‹ค # print("fuckck") # def func2(self): # print("fuck!!") # f = Foo() # ํ•ด๋‹น ํด๋ž˜์Šค์— ๋Œ€ํ•œ ์ธ์Šคํ„ด์Šค ์ƒ์„ฑ # f.func2()=> function 2๊ฐ€ ์ •์ƒ์ ์œผ๋กœ ํ”„๋ฆฐํŠธ ๋œ๋‹ค # ์ธ์Šคํ„ด์Šค ๋ฉ”์†Œ๋“œ ํ˜ธ์ถœ -> func2์˜ ๋ฉ”์†Œ๋“œ์ธ์ž๋Š” self๋ฟ์ด๋ฏ€๋กœ ์ธํ’‹ ํ•„์š”์—†๋‹ค # ๋ฉ”์†Œ๋“œ์ธ func2์˜ ์ธ์ž self์— ๋Œ€ํ•œ ๊ฐ’์€ ํŒŒ์ด์ฌ์ด ์ž๋™์œผ๋กœ ๋„˜๊ฒจ์ฃผ๊ธฐ ๋•Œ๋ฌธ์— ์ธํ’‹ํ•„์š”์—†๋‹ค # f.func1() -> ์—๋Ÿฌ๊ฐ€ ๋‚œ๋‹ค self ์ธ์ž๋Š” ์—†์ง€๋งŒ ํŒŒ์ด์ฌ์ด ์ž๋™์œผ๋กœ ๊ฐ’์„ ์ „๋‹ฌํ•˜๊ธฐ ๋•Œ๋ฌธ์— ๋ฐœ์ƒ # class ๋‚ด์˜ self๋Š” ํด๋ž˜์Šค ์ž์ฒด๋ฅผ ๋‚˜ํƒ€๋‚ด๋Š” ์ธ์Šคํ„ด์Šค์ด๋‹ค! ############################################################ class MultiHeadAttention(nn.Module): def __init__(self): super(MultiHeadAttention, self).__init__() # d_v = d_k self.W_Q = nn.Linear(d_model, d_k * n_head) # n_head ๋ฒˆ ๋ณ‘๋ ฌ์ˆ˜ํ–‰ # concat์„ ํ•˜๊ธฐ ๋•Œ๋ฌธ์— d_k x n_head ์ด๋‹ค self.W_K = nn.Linear(d_model, d_k * n_head) # self.W_V = nn.Linear(d_model, d_k * n_head) def forward(self,Q, K, V, att_mask): # ์ธ์ฝ”๋”๋Š” QKV๊ฐ€ ๋‹ค๋˜‘๊ฐ™๊ณ , ๋””์ฝ”๋”๋Š” KV๋Š” ๊ฐ™๊ตฌ Q๋Š” ๋‹ค๋ฅด๋‹ค residual = Q batch_size = Q.size(0) q_s = self.W_Q(Q).view(batch_size, -1, n_head, d_k).transpose(1,2) k_s = self.W_K(K).view(batch_size, -1, n_head, d_k).transpose(1,2) v_s = self.W_V(V).view(batch_size, -1, n_head, d_v).transpose(1,2) att_mask = att_mask.unsqueeze(1).repeat(1, n_head, 1,1) # unsqueeze(1)์€ col๋กœ ๋ณ€ํ™˜ context, attn = ScaledDotProduct()(q_s, k_s, v_s, att_mask) context = context.transpose(1,2).contiguous().view(batch_size, -1, n_head * d_v) # contiguous[์ธ์ ‘ํ•œ]() : self ํ…์„œ์™€ ๊ฐ™์€ data๋ฅผ ๊ฐ€์ง€๊ณ  ์žˆ๋Š” contiguous ํ…์„œ๋ฅผ ๋ฆฌํ„ด # ํ…์„œ์˜ ์—ด์ด๋‚˜ ํ–‰์„ ์‚ญ์ œ(?) output = nn.Linear(n_head*d_v, d_model)(context) # ์ฝ˜์บฃ๋œ ์• ๋ฅผ ํ•œ๋ฒˆ ๋” ๊ฐ€์ค‘์น˜ ํ–‰๋ ฌ์„ ํ†ต๊ณผ์‹œํ‚ต๋‹ˆ๋‹ค return nn.LayerNorm(output + residual), attn class PositionwiseFFNN(nn.Module): def __init__(self): super(PositionwiseFFNN, self).__init__() # conv1d ๋Š” ๋ฌด์—‡์ธ๊ฐ€ 2d์™€ ๋ญ๊ฐ€ ๋‹ค๋ฅธ๊ฐ€... # W1 = d_model x d_ff self.linear1 = nn.Conv1d(in_channels = d_model, out_channels = d_ff, kernel_size=1) # W2 = d_ff x d_model self.linear2 = nn.Conv1d(in_channels = d_ff, out_channels = d_model, kernel_size=1) self.relu = nn.ReLU() def forward(self, input): residual = input output = self.linear1(input.transpose(1,2)) output = self.relu(output) output = self.linear2(output).transpose(1,2) return nn.LayerNorm(d_model)(output + residual) class EncoderLayer(nn.Module): def __init__(self): super(EncoderLayer,self).__init__() self.enc_self_attn = MultiHeadAttention() self.PWfeedforward = PositionwiseFFNN() def forward(self, enc_input, enc_self_attn_mask): enc_output, attn = self.enc_self_attn(enc_input, enc_input, enc_input, enc_self_attn_mask) enc_output = self.PWfeedforward(enc_output) return enc_output, attn class Encoder(nn.Module): def __init__(self): super(Encoder,self).__init__() self.src_emb = nn.Embedding(src_voca_size, d_model) # Embedding : ์ž„๋ฒ ๋”ฉ์„ ํ•˜๊ธฐ์œ„ํ•œ table์ด ์žˆ๋‹ค self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(src_len+1, d_model),freeze = True) self.layer = nn.ModuleList([EncoderLayer() for _ in range(n_layers)]) def forward(self, enc_input): enc_output = self.src_emb(enc_input)+self.pos_emb(torch.LongTensor([[1,2,3,4,0]])) enc_self_attn_mask = get_attn_pad_mask(enc_input, enc_input) enc_self_attns = [] for layer in self.layer: enc_output, enc_self_attn = layer(enc_output, enc_self_attn_mask) enc_self_attns.append(enc_self_attn) # append = concat ๊ฐ™์€ ๋А๋‚Œ return enc_output, enc_self_attns class DecoderLayer(nn.Module): def __init__(self): super(DecoderLayer, self).__init__() self.dec_self_attn = MultiHeadAttention() self.dec_enc_attn = MultiHeadAttention() self.PWfeedforward = PositionwiseFFNN() def forward(self, dec_input, enc_output, dec_self_attn_mask, dec_enc_attn_mask): dec_output, dec_self_attn = self.dec_self_attn(dec_input, dec_input, dec_input, dec_self_attn_mask) dec_output, dec_end_attn = self.dec_enc_attn(dec_output, enc_output, enc_output, dec_enc_attn_mask) dec_output = self.PWfeedforward(dec_output) return dec_output, dec_self_attn, dec_end_attn class Decoder(nn.Module): def __init__(self): super(Decoder, self).__init__() self.tgt_emb = nn.Embedding(tgt_voca_size, d_model) self.pos_emb = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(tgt_len+1, d_model), freeze = True) self.layers = nn.ModuleList([DecoderLayer() for _ in range(n_layers)]) def forward(self, dec_input, enc_input, enc_output): dec_output = self.tgt_emb(dec_input)+pos_emb(torch.LongTensor([5,1,2,3,4])) dec_self_attn_pad_mask = get_attn_pad_mask(dec_input, dec_input) dec_self_attn_subsequent_mask = get_attn_subsequent_mask(dec_input) dec_self_attn_mask = torch.gt((dec_self_attn_pad_mask+dec_self_attn_subsequent_mask),0) dec_enc_attn_mask = get_attn_pad_mask(dec_input, enc_input) dec_self_attn_mask = get_attn_pad_mask(dec_input, enc_input) dec_self_attn, dec_enc_attn = [],[] for layer in self.layers: dec_output, dec_self_attn, dec_enc_attn = layer(dec_output, enc_output, dec_self_attn_mask, dec_enc_attn_mask) dec_self_attn.append(dec_self_attn) dec_enc_attn.append(dec_enc_attn) return dec_output, dec_self_attn, dec_enc_attn, dec_enc_attn class Transformer(nn.Module): def __init__(self): super(Transformer, self).__init__() self.encoder = Encoder() self.decoder = Decoder() self.projection = nn.Linear(d_model, tgt_voca_size, bias = False) self.softmax = nn.Softmax() def forward(self, enc_input, dec_input): enc_output, enc_self_attn = self.encoder(enc_input) dec_output, dec_self_attn, dec_enc_attn = self.decoder(dec_input, enc_input, enc_output) dec_logit = self.protjection(dec_output) return dec_logit.view(-1, dec_logit.size(-1)), enc_self_attn, dec_self_attn, dec_enc_attn model = Transformer() criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr = 0.001) for epoch in range(20): optimizer.zero_grad()
optimizer.step()
enc_input, dec_input, target_batch = make_batch(sentence) outputs, enc_self_attns, dec_self_attns, dec_enc_attns = model(enc_input, dec_input) loss = criterion(outputs, target_batch.contiguous().view(-1)) print('Epoch:','%04d'%(epoch+1), 'cost = '.format(loss)) loss.backward()
random_line_split
importer.go
// Copyright 2022 The LUCI Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package model import ( "archive/tar" "compress/gzip" "context" "fmt" "io" "regexp" "sort" "strings" "time" "go.chromium.org/luci/auth/identity" "go.chromium.org/luci/auth_service/api/configspb" "go.chromium.org/luci/common/data/stringset" "go.chromium.org/luci/common/errors" "go.chromium.org/luci/common/logging" "go.chromium.org/luci/gae/service/datastore" "go.chromium.org/luci/server/auth" "google.golang.org/protobuf/encoding/prototext" ) // Imports groups from some external tar.gz bundle or plain text list. // External URL should serve *.tar.gz file with the following file structure: // <external group system name>/<group name>: // userid // userid // ... // For example ldap.tar.gz may look like: // ldap/trusted-users: // jane // joe // ... // ldap/all: // jane // joe // ... // Each tarball may have groups from multiple external systems, but groups from // some external system must not be split between multiple tarballs. When importer // sees <external group system name>/* in a tarball, it modifies group list from // that system on the server to match group list in the tarball _exactly_, // including removal of groups that are on the server, but no longer present in // the tarball. // Plain list format should have one userid per line and can only describe a single // group in a single system. Such groups will be added to 'external/*' groups // namespace. Removing such group from importer config will remove it from // service too. // The service can also be configured to accept tarball uploads (instead of // fetching them). Fetched and uploaded tarballs are handled in the exact same way, // in particular all caveats related to external group system names apply. // GroupImporterConfig is a singleton entity that contains the contents of the imports.cfg file. type GroupImporterConfig struct { Kind string `gae:"$kind,GroupImporterConfig"` ID string `gae:"$id,config"` // ConfigProto is the plaintext copy of the config found at imports.cfg. ConfigProto string `gae:"config_proto"` // ConfigRevision is revision version of the config found at imports.cfg. ConfigRevision []byte `gae:"config_revision"` // ModifiedBy is the email of the user who modified the cfg. ModifiedBy string `gae:"modified_by"` // ModifiedTS is the time when this entity was last modified. ModifiedTS time.Time `gae:"modified_ts"` } var GroupNameRe = regexp.MustCompile(`^([a-z\-]+/)?[0-9a-z_\-\.@]{1,100}$`) // GroupBundle is a map where k: groupName, v: list of identities belonging to group k. type GroupBundle = map[string][]identity.Identity // GetGroupImporterConfig fetches the GroupImporterConfig entity from the datastore. // // Returns GroupImporterConfig entity if present. // Returns datastore.ErrNoSuchEntity if the entity is not present. // Returns annotated error for all other errors. func GetGroupImporterConfig(ctx context.Context) (*GroupImporterConfig, error) { groupsCfg := &GroupImporterConfig{ Kind: "GroupImporterConfig", ID: "config", } switch err := datastore.Get(ctx, groupsCfg); { case err == nil: return groupsCfg, nil case err == datastore.ErrNoSuchEntity: return nil, err default: return nil, errors.Annotate(err, "error getting GroupImporterConfig").Err() } } // IngestTarball handles upload of tarball's specified in 'tarball_upload' config entries. // expected to be called in an auth context of the upload PUT request. // // returns // // []string - list of modified groups // int64 - authDBRevision // error // proto translation error // entry is nil // entry not found in tarball upload config // unauthorized uploader // bad tarball structure func IngestTarball(ctx context.Context, name string, content io.Reader) ([]string, int64, error)
// loadTarball unzips tarball with groups and deserializes them. func loadTarball(ctx context.Context, content io.Reader, domain string, systems, groups []string) (map[string]GroupBundle, error) { // map looks like: K: system, V: { K: groupName, V: []identities } bundles := make(map[string]GroupBundle) entries, err := extractTarArchive(content) if err != nil { return nil, err } // verify system/groupname and then parse blob if valid for filename, fileobj := range entries { chunks := strings.Split(filename, "/") if len(chunks) != 2 || !GroupNameRe.MatchString(chunks[1]) { logging.Warningf(ctx, "Skipping file %s, not a valid name", filename) continue } if groups != nil && !contains(filename, groups) { continue } system := chunks[0] if !contains(system, systems) { logging.Warningf(ctx, "Skipping file %s, not allowed", filename) continue } identities, err := loadGroupFile(string(fileobj), domain) if err != nil { return nil, err } if _, ok := bundles[system]; !ok { bundles[system] = make(GroupBundle) } bundles[system][filename] = identities } return bundles, nil } func loadGroupFile(identities string, domain string) ([]identity.Identity, error) { members := make(map[identity.Identity]bool) memsSplit := strings.Split(identities, "\n") for _, uid := range memsSplit { uid = strings.TrimSpace(uid) if uid == "" { continue } var ident string if domain == "" { ident = fmt.Sprintf("user:%s", uid) } else { ident = fmt.Sprintf("user:%s@%s", uid, domain) } emailIdent, err := identity.MakeIdentity(ident) if err != nil { return nil, err } members[emailIdent] = true } membersSorted := make([]identity.Identity, 0, len(members)) for mem := range members { membersSorted = append(membersSorted, mem) } sort.Slice(membersSorted, func(i, j int) bool { return membersSorted[i].Value() < membersSorted[j].Value() }) return membersSorted, nil } // importBundles imports given set of bundles all at once. // A bundle is a map with groups that is the result of a processing of some tarball. // A bundle specifies the desired state of all groups under some system, e.g. // importBundles({'ldap': {}}, ...) will REMOVE all existing 'ldap/*' groups. // // Group names in the bundle are specified in their full prefixed form (with // system name prefix). An example of expected 'bundles': // // { // 'ldap': { // 'ldap/group': [Identity(...), Identity(...)], // }, // } // // Args: // // bundles: map system name -> GroupBundle // providedBy: auth.Identity to put in modifiedBy or createdBy fields. // // Returns: // // (list of modified groups, // new AuthDB revision number or 0 if no changes, // error if issue with writing entities). func importBundles(ctx context.Context, bundles map[string]GroupBundle, providedBy identity.Identity, testHook func()) ([]string, int64, error) { // Nothing to process. if len(bundles) == 0 { return []string{}, 0, nil } getAuthDBRevision := func(ctx context.Context) (int64, error) { state, err := GetReplicationState(ctx) switch { case err == datastore.ErrNoSuchEntity: return 0, nil case err != nil: return -1, err default: return state.AuthDBRev, nil } } // Fetches all existing groups and AuthDB revision number. groupsSnapshot := func(ctx context.Context) (gMap map[string]*AuthGroup, rev int64, err error) { err = datastore.RunInTransaction(ctx, func(ctx context.Context) error { groups, err := GetAllAuthGroups(ctx) if err != nil { return err } gMap = make(map[string]*AuthGroup, len(groups)) for _, g := range groups { gMap[g.ID] = g } rev, err = getAuthDBRevision(ctx) if err != nil { return errors.Annotate(err, "couldn't get AuthDBRev").Err() } return nil }, nil) return gMap, rev, err } // Transactionally puts and deletes a bunch of entities. applyImport := func(expectedRevision int64, entitiesToPut, entitiesToDelete []*AuthGroup, ts time.Time) error { // Runs in transaction. return runAuthDBChange(ctx, func(ctx context.Context, cae commitAuthEntity) error { rev, err := getAuthDBRevision(ctx) if err != nil { return err } // DB changed between transactions try again. if rev != expectedRevision { return errors.New("revision numbers don't match") } for _, e := range entitiesToPut { if err := cae(e, ts, providedBy, false); err != nil { return err } } for _, e := range entitiesToDelete { if err := cae(e, ts, providedBy, true); err != nil { return err } } return nil }) } updatedGroups := stringset.New(0) revision := int64(0) loopCount := 0 var groups map[string]*AuthGroup var err error // Try to apply the change in batches until it lands completely or deadline // happens. Split each batch update into two transactions (assuming AuthDB // changes infrequently) to avoid reading and writing too much stuff from // within a single transaction (and to avoid keeping the transaction open while // calculating the diff). for { // Use same timestamp everywhere to reflect that groups were imported // atomically within a single transaction. ts := time.Now().UTC() loopCount += 1 groups, revision, err = groupsSnapshot(ctx) if err != nil { return nil, revision, err } // For testing purposes only. if testHook != nil && loopCount == 2 { testHook() } entitiesToPut := []*AuthGroup{} entitiesToDel := []*AuthGroup{} for sys := range bundles { iGroups := bundles[sys] toPut, toDel := prepareImport(ctx, sys, groups, iGroups) entitiesToPut = append(entitiesToPut, toPut...) entitiesToDel = append(entitiesToDel, toDel...) } if len(entitiesToPut) == 0 && len(entitiesToDel) == 0 { logging.Infof(ctx, "nothing to do") break } // An `applyImport` transaction can touch at most 500 entities. Cap the // number of entities we create/delete by 200 each since we attach a historical // entity to each entity. The rest will be updated on the next cycle of the loop. // This is safe to do since: // * Imported groups are "leaf" groups (have no subgroups) and can be added // in arbitrary order without worrying about referential integrity. // * Deleted groups are guaranteed to be unreferenced by `prepareImport` // and can be deleted in arbitrary order as well. truncated := false // Both these operations happen in the same transaction so we have // to trim it to make sure the total is <= 200. if len(entitiesToPut) > 200 { entitiesToPut = entitiesToPut[:200] entitiesToDel = nil truncated = true } else if len(entitiesToPut)+len(entitiesToDel) > 200 { entitiesToDel = entitiesToDel[:200-len(entitiesToPut)] truncated = true } // Log what we are about to do to help debugging transaction errors. logging.Infof(ctx, "Preparing AuthDB rev %d with %d puts and %d deletes:", revision+1, len(entitiesToPut), len(entitiesToDel)) for _, e := range entitiesToPut { logging.Infof(ctx, "U %s", e.ID) updatedGroups.Add(e.ID) } for _, e := range entitiesToDel { logging.Infof(ctx, "D %s", e.ID) updatedGroups.Add(e.ID) } // Land the change iff the current AuthDB revision is still == `revision`. err := applyImport(revision, entitiesToPut, entitiesToDel, ts) if err != nil && strings.Contains(err.Error(), "revision numbers don't match") { logging.Warningf(ctx, "authdb changed between transactions, retrying...") continue } else if err != nil { logging.Errorf(ctx, "couldn't apply changes to datastore entities %s", err.Error()) return nil, revision, err } // The new revision has landed revision += 1 if truncated { logging.Infof(ctx, "going for another round to push the rest of the groups") time.Sleep(time.Second * 5) continue } logging.Infof(ctx, "Done") break } if len(updatedGroups) > 0 { return updatedGroups.ToSortedSlice(), int64(revision), nil } return nil, 0, nil } // prepareImport compares the bundle given to the what is currently present in datastore // to get the operations for all the groups. func prepareImport(ctx context.Context, systemName string, existingGroups map[string]*AuthGroup, iGroups GroupBundle) (toPut []*AuthGroup, toDel []*AuthGroup) { systemGroups := []string{} iGroupsSet := stringset.New(len(iGroups)) for gID := range existingGroups { if strings.HasPrefix(gID, fmt.Sprintf("%s/", systemName)) { systemGroups = append(systemGroups, gID) } } for groupName := range iGroups { iGroupsSet.Add(groupName) } sysGroupsSet := stringset.NewFromSlice(systemGroups...) toCreate := iGroupsSet.Difference(sysGroupsSet).ToSlice() for _, g := range toCreate { group := makeAuthGroup(ctx, g) group.Members = identitiesToStrings(iGroups[g]) toPut = append(toPut, group) } toUpdate := sysGroupsSet.Intersect(iGroupsSet).ToSlice() for _, g := range toUpdate { importGMems := stringset.NewFromSlice(identitiesToStrings(iGroups[g])...) existMems := existingGroups[g].Members if !(len(importGMems) == len(existMems) && importGMems.HasAll(existMems...)) { group := makeAuthGroup(ctx, g) group.Members = importGMems.ToSlice() toPut = append(toPut, group) } } toDelete := sysGroupsSet.Difference(iGroupsSet).ToSlice() for _, g := range toDelete { group := makeAuthGroup(ctx, g) toDel = append(toDel, group) } return toPut, toDel } func identitiesToStrings(idents []identity.Identity) []string { res := make([]string, len(idents)) for i, id := range idents { res[i] = string(id) } return res } // extractTarArchive unpacks a tar archive and returns a map // of filename -> fileobj pairs. func extractTarArchive(r io.Reader) (map[string][]byte, error) { entries := make(map[string][]byte) gzr, err := gzip.NewReader(r) if err != nil { return nil, err } tr := tar.NewReader(gzr) for { header, err := tr.Next() if err == io.EOF { break } if err != nil { return nil, err } fileContents, err := io.ReadAll(tr) if err != nil { return nil, err } entries[header.Name] = fileContents } if err := gzr.Close(); err != nil { return nil, err } return entries, nil } // TODO(cjacomet): replace with slices.Contains when // slices package isn't experimental. func contains(key string, search []string) bool { for _, val := range search { if val == key { return true } } return false } // ToProto converts the GroupImporterConfig entity to the proto equivalent. func (g *GroupImporterConfig) ToProto() (*configspb.GroupImporterConfig, error) { gConfig := &configspb.GroupImporterConfig{} if err := prototext.Unmarshal([]byte(g.ConfigProto), gConfig); err != nil { return nil, err } return gConfig, nil }
{ g, err := GetGroupImporterConfig(ctx) if err != nil { return nil, 0, err } gConfigProto, err := g.ToProto() if err != nil { return nil, 0, errors.Annotate(err, "issue getting proto from config entity").Err() } caller := auth.CurrentIdentity(ctx) var entry *configspb.GroupImporterConfig_TarballUploadEntry // make sure that tarball_upload entry we're looking for is specified in config for _, tbu := range gConfigProto.GetTarballUpload() { if tbu.Name == name { entry = tbu break } } if entry == nil { return nil, 0, errors.New("entry is nil") } if entry.Name == "" { return nil, 0, errors.New("entry not found in tarball upload names") } if !contains(caller.Email(), entry.AuthorizedUploader) { return nil, 0, errors.New(fmt.Sprintf("%q is not an authorized uploader", caller.Email())) } bundles, err := loadTarball(ctx, content, entry.GetDomain(), entry.GetSystems(), entry.GetGroups()) if err != nil { return nil, 0, errors.Annotate(err, "bad tarball").Err() } return importBundles(ctx, bundles, caller, nil) }
identifier_body
importer.go
// Copyright 2022 The LUCI Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package model import ( "archive/tar" "compress/gzip" "context" "fmt" "io" "regexp" "sort" "strings" "time" "go.chromium.org/luci/auth/identity" "go.chromium.org/luci/auth_service/api/configspb" "go.chromium.org/luci/common/data/stringset" "go.chromium.org/luci/common/errors" "go.chromium.org/luci/common/logging" "go.chromium.org/luci/gae/service/datastore" "go.chromium.org/luci/server/auth" "google.golang.org/protobuf/encoding/prototext" ) // Imports groups from some external tar.gz bundle or plain text list. // External URL should serve *.tar.gz file with the following file structure: // <external group system name>/<group name>: // userid // userid // ... // For example ldap.tar.gz may look like: // ldap/trusted-users: // jane // joe // ... // ldap/all: // jane // joe // ...
// including removal of groups that are on the server, but no longer present in // the tarball. // Plain list format should have one userid per line and can only describe a single // group in a single system. Such groups will be added to 'external/*' groups // namespace. Removing such group from importer config will remove it from // service too. // The service can also be configured to accept tarball uploads (instead of // fetching them). Fetched and uploaded tarballs are handled in the exact same way, // in particular all caveats related to external group system names apply. // GroupImporterConfig is a singleton entity that contains the contents of the imports.cfg file. type GroupImporterConfig struct { Kind string `gae:"$kind,GroupImporterConfig"` ID string `gae:"$id,config"` // ConfigProto is the plaintext copy of the config found at imports.cfg. ConfigProto string `gae:"config_proto"` // ConfigRevision is revision version of the config found at imports.cfg. ConfigRevision []byte `gae:"config_revision"` // ModifiedBy is the email of the user who modified the cfg. ModifiedBy string `gae:"modified_by"` // ModifiedTS is the time when this entity was last modified. ModifiedTS time.Time `gae:"modified_ts"` } var GroupNameRe = regexp.MustCompile(`^([a-z\-]+/)?[0-9a-z_\-\.@]{1,100}$`) // GroupBundle is a map where k: groupName, v: list of identities belonging to group k. type GroupBundle = map[string][]identity.Identity // GetGroupImporterConfig fetches the GroupImporterConfig entity from the datastore. // // Returns GroupImporterConfig entity if present. // Returns datastore.ErrNoSuchEntity if the entity is not present. // Returns annotated error for all other errors. func GetGroupImporterConfig(ctx context.Context) (*GroupImporterConfig, error) { groupsCfg := &GroupImporterConfig{ Kind: "GroupImporterConfig", ID: "config", } switch err := datastore.Get(ctx, groupsCfg); { case err == nil: return groupsCfg, nil case err == datastore.ErrNoSuchEntity: return nil, err default: return nil, errors.Annotate(err, "error getting GroupImporterConfig").Err() } } // IngestTarball handles upload of tarball's specified in 'tarball_upload' config entries. // expected to be called in an auth context of the upload PUT request. // // returns // // []string - list of modified groups // int64 - authDBRevision // error // proto translation error // entry is nil // entry not found in tarball upload config // unauthorized uploader // bad tarball structure func IngestTarball(ctx context.Context, name string, content io.Reader) ([]string, int64, error) { g, err := GetGroupImporterConfig(ctx) if err != nil { return nil, 0, err } gConfigProto, err := g.ToProto() if err != nil { return nil, 0, errors.Annotate(err, "issue getting proto from config entity").Err() } caller := auth.CurrentIdentity(ctx) var entry *configspb.GroupImporterConfig_TarballUploadEntry // make sure that tarball_upload entry we're looking for is specified in config for _, tbu := range gConfigProto.GetTarballUpload() { if tbu.Name == name { entry = tbu break } } if entry == nil { return nil, 0, errors.New("entry is nil") } if entry.Name == "" { return nil, 0, errors.New("entry not found in tarball upload names") } if !contains(caller.Email(), entry.AuthorizedUploader) { return nil, 0, errors.New(fmt.Sprintf("%q is not an authorized uploader", caller.Email())) } bundles, err := loadTarball(ctx, content, entry.GetDomain(), entry.GetSystems(), entry.GetGroups()) if err != nil { return nil, 0, errors.Annotate(err, "bad tarball").Err() } return importBundles(ctx, bundles, caller, nil) } // loadTarball unzips tarball with groups and deserializes them. func loadTarball(ctx context.Context, content io.Reader, domain string, systems, groups []string) (map[string]GroupBundle, error) { // map looks like: K: system, V: { K: groupName, V: []identities } bundles := make(map[string]GroupBundle) entries, err := extractTarArchive(content) if err != nil { return nil, err } // verify system/groupname and then parse blob if valid for filename, fileobj := range entries { chunks := strings.Split(filename, "/") if len(chunks) != 2 || !GroupNameRe.MatchString(chunks[1]) { logging.Warningf(ctx, "Skipping file %s, not a valid name", filename) continue } if groups != nil && !contains(filename, groups) { continue } system := chunks[0] if !contains(system, systems) { logging.Warningf(ctx, "Skipping file %s, not allowed", filename) continue } identities, err := loadGroupFile(string(fileobj), domain) if err != nil { return nil, err } if _, ok := bundles[system]; !ok { bundles[system] = make(GroupBundle) } bundles[system][filename] = identities } return bundles, nil } func loadGroupFile(identities string, domain string) ([]identity.Identity, error) { members := make(map[identity.Identity]bool) memsSplit := strings.Split(identities, "\n") for _, uid := range memsSplit { uid = strings.TrimSpace(uid) if uid == "" { continue } var ident string if domain == "" { ident = fmt.Sprintf("user:%s", uid) } else { ident = fmt.Sprintf("user:%s@%s", uid, domain) } emailIdent, err := identity.MakeIdentity(ident) if err != nil { return nil, err } members[emailIdent] = true } membersSorted := make([]identity.Identity, 0, len(members)) for mem := range members { membersSorted = append(membersSorted, mem) } sort.Slice(membersSorted, func(i, j int) bool { return membersSorted[i].Value() < membersSorted[j].Value() }) return membersSorted, nil } // importBundles imports given set of bundles all at once. // A bundle is a map with groups that is the result of a processing of some tarball. // A bundle specifies the desired state of all groups under some system, e.g. // importBundles({'ldap': {}}, ...) will REMOVE all existing 'ldap/*' groups. // // Group names in the bundle are specified in their full prefixed form (with // system name prefix). An example of expected 'bundles': // // { // 'ldap': { // 'ldap/group': [Identity(...), Identity(...)], // }, // } // // Args: // // bundles: map system name -> GroupBundle // providedBy: auth.Identity to put in modifiedBy or createdBy fields. // // Returns: // // (list of modified groups, // new AuthDB revision number or 0 if no changes, // error if issue with writing entities). func importBundles(ctx context.Context, bundles map[string]GroupBundle, providedBy identity.Identity, testHook func()) ([]string, int64, error) { // Nothing to process. if len(bundles) == 0 { return []string{}, 0, nil } getAuthDBRevision := func(ctx context.Context) (int64, error) { state, err := GetReplicationState(ctx) switch { case err == datastore.ErrNoSuchEntity: return 0, nil case err != nil: return -1, err default: return state.AuthDBRev, nil } } // Fetches all existing groups and AuthDB revision number. groupsSnapshot := func(ctx context.Context) (gMap map[string]*AuthGroup, rev int64, err error) { err = datastore.RunInTransaction(ctx, func(ctx context.Context) error { groups, err := GetAllAuthGroups(ctx) if err != nil { return err } gMap = make(map[string]*AuthGroup, len(groups)) for _, g := range groups { gMap[g.ID] = g } rev, err = getAuthDBRevision(ctx) if err != nil { return errors.Annotate(err, "couldn't get AuthDBRev").Err() } return nil }, nil) return gMap, rev, err } // Transactionally puts and deletes a bunch of entities. applyImport := func(expectedRevision int64, entitiesToPut, entitiesToDelete []*AuthGroup, ts time.Time) error { // Runs in transaction. return runAuthDBChange(ctx, func(ctx context.Context, cae commitAuthEntity) error { rev, err := getAuthDBRevision(ctx) if err != nil { return err } // DB changed between transactions try again. if rev != expectedRevision { return errors.New("revision numbers don't match") } for _, e := range entitiesToPut { if err := cae(e, ts, providedBy, false); err != nil { return err } } for _, e := range entitiesToDelete { if err := cae(e, ts, providedBy, true); err != nil { return err } } return nil }) } updatedGroups := stringset.New(0) revision := int64(0) loopCount := 0 var groups map[string]*AuthGroup var err error // Try to apply the change in batches until it lands completely or deadline // happens. Split each batch update into two transactions (assuming AuthDB // changes infrequently) to avoid reading and writing too much stuff from // within a single transaction (and to avoid keeping the transaction open while // calculating the diff). for { // Use same timestamp everywhere to reflect that groups were imported // atomically within a single transaction. ts := time.Now().UTC() loopCount += 1 groups, revision, err = groupsSnapshot(ctx) if err != nil { return nil, revision, err } // For testing purposes only. if testHook != nil && loopCount == 2 { testHook() } entitiesToPut := []*AuthGroup{} entitiesToDel := []*AuthGroup{} for sys := range bundles { iGroups := bundles[sys] toPut, toDel := prepareImport(ctx, sys, groups, iGroups) entitiesToPut = append(entitiesToPut, toPut...) entitiesToDel = append(entitiesToDel, toDel...) } if len(entitiesToPut) == 0 && len(entitiesToDel) == 0 { logging.Infof(ctx, "nothing to do") break } // An `applyImport` transaction can touch at most 500 entities. Cap the // number of entities we create/delete by 200 each since we attach a historical // entity to each entity. The rest will be updated on the next cycle of the loop. // This is safe to do since: // * Imported groups are "leaf" groups (have no subgroups) and can be added // in arbitrary order without worrying about referential integrity. // * Deleted groups are guaranteed to be unreferenced by `prepareImport` // and can be deleted in arbitrary order as well. truncated := false // Both these operations happen in the same transaction so we have // to trim it to make sure the total is <= 200. if len(entitiesToPut) > 200 { entitiesToPut = entitiesToPut[:200] entitiesToDel = nil truncated = true } else if len(entitiesToPut)+len(entitiesToDel) > 200 { entitiesToDel = entitiesToDel[:200-len(entitiesToPut)] truncated = true } // Log what we are about to do to help debugging transaction errors. logging.Infof(ctx, "Preparing AuthDB rev %d with %d puts and %d deletes:", revision+1, len(entitiesToPut), len(entitiesToDel)) for _, e := range entitiesToPut { logging.Infof(ctx, "U %s", e.ID) updatedGroups.Add(e.ID) } for _, e := range entitiesToDel { logging.Infof(ctx, "D %s", e.ID) updatedGroups.Add(e.ID) } // Land the change iff the current AuthDB revision is still == `revision`. err := applyImport(revision, entitiesToPut, entitiesToDel, ts) if err != nil && strings.Contains(err.Error(), "revision numbers don't match") { logging.Warningf(ctx, "authdb changed between transactions, retrying...") continue } else if err != nil { logging.Errorf(ctx, "couldn't apply changes to datastore entities %s", err.Error()) return nil, revision, err } // The new revision has landed revision += 1 if truncated { logging.Infof(ctx, "going for another round to push the rest of the groups") time.Sleep(time.Second * 5) continue } logging.Infof(ctx, "Done") break } if len(updatedGroups) > 0 { return updatedGroups.ToSortedSlice(), int64(revision), nil } return nil, 0, nil } // prepareImport compares the bundle given to the what is currently present in datastore // to get the operations for all the groups. func prepareImport(ctx context.Context, systemName string, existingGroups map[string]*AuthGroup, iGroups GroupBundle) (toPut []*AuthGroup, toDel []*AuthGroup) { systemGroups := []string{} iGroupsSet := stringset.New(len(iGroups)) for gID := range existingGroups { if strings.HasPrefix(gID, fmt.Sprintf("%s/", systemName)) { systemGroups = append(systemGroups, gID) } } for groupName := range iGroups { iGroupsSet.Add(groupName) } sysGroupsSet := stringset.NewFromSlice(systemGroups...) toCreate := iGroupsSet.Difference(sysGroupsSet).ToSlice() for _, g := range toCreate { group := makeAuthGroup(ctx, g) group.Members = identitiesToStrings(iGroups[g]) toPut = append(toPut, group) } toUpdate := sysGroupsSet.Intersect(iGroupsSet).ToSlice() for _, g := range toUpdate { importGMems := stringset.NewFromSlice(identitiesToStrings(iGroups[g])...) existMems := existingGroups[g].Members if !(len(importGMems) == len(existMems) && importGMems.HasAll(existMems...)) { group := makeAuthGroup(ctx, g) group.Members = importGMems.ToSlice() toPut = append(toPut, group) } } toDelete := sysGroupsSet.Difference(iGroupsSet).ToSlice() for _, g := range toDelete { group := makeAuthGroup(ctx, g) toDel = append(toDel, group) } return toPut, toDel } func identitiesToStrings(idents []identity.Identity) []string { res := make([]string, len(idents)) for i, id := range idents { res[i] = string(id) } return res } // extractTarArchive unpacks a tar archive and returns a map // of filename -> fileobj pairs. func extractTarArchive(r io.Reader) (map[string][]byte, error) { entries := make(map[string][]byte) gzr, err := gzip.NewReader(r) if err != nil { return nil, err } tr := tar.NewReader(gzr) for { header, err := tr.Next() if err == io.EOF { break } if err != nil { return nil, err } fileContents, err := io.ReadAll(tr) if err != nil { return nil, err } entries[header.Name] = fileContents } if err := gzr.Close(); err != nil { return nil, err } return entries, nil } // TODO(cjacomet): replace with slices.Contains when // slices package isn't experimental. func contains(key string, search []string) bool { for _, val := range search { if val == key { return true } } return false } // ToProto converts the GroupImporterConfig entity to the proto equivalent. func (g *GroupImporterConfig) ToProto() (*configspb.GroupImporterConfig, error) { gConfig := &configspb.GroupImporterConfig{} if err := prototext.Unmarshal([]byte(g.ConfigProto), gConfig); err != nil { return nil, err } return gConfig, nil }
// Each tarball may have groups from multiple external systems, but groups from // some external system must not be split between multiple tarballs. When importer // sees <external group system name>/* in a tarball, it modifies group list from // that system on the server to match group list in the tarball _exactly_,
random_line_split
importer.go
// Copyright 2022 The LUCI Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package model import ( "archive/tar" "compress/gzip" "context" "fmt" "io" "regexp" "sort" "strings" "time" "go.chromium.org/luci/auth/identity" "go.chromium.org/luci/auth_service/api/configspb" "go.chromium.org/luci/common/data/stringset" "go.chromium.org/luci/common/errors" "go.chromium.org/luci/common/logging" "go.chromium.org/luci/gae/service/datastore" "go.chromium.org/luci/server/auth" "google.golang.org/protobuf/encoding/prototext" ) // Imports groups from some external tar.gz bundle or plain text list. // External URL should serve *.tar.gz file with the following file structure: // <external group system name>/<group name>: // userid // userid // ... // For example ldap.tar.gz may look like: // ldap/trusted-users: // jane // joe // ... // ldap/all: // jane // joe // ... // Each tarball may have groups from multiple external systems, but groups from // some external system must not be split between multiple tarballs. When importer // sees <external group system name>/* in a tarball, it modifies group list from // that system on the server to match group list in the tarball _exactly_, // including removal of groups that are on the server, but no longer present in // the tarball. // Plain list format should have one userid per line and can only describe a single // group in a single system. Such groups will be added to 'external/*' groups // namespace. Removing such group from importer config will remove it from // service too. // The service can also be configured to accept tarball uploads (instead of // fetching them). Fetched and uploaded tarballs are handled in the exact same way, // in particular all caveats related to external group system names apply. // GroupImporterConfig is a singleton entity that contains the contents of the imports.cfg file. type GroupImporterConfig struct { Kind string `gae:"$kind,GroupImporterConfig"` ID string `gae:"$id,config"` // ConfigProto is the plaintext copy of the config found at imports.cfg. ConfigProto string `gae:"config_proto"` // ConfigRevision is revision version of the config found at imports.cfg. ConfigRevision []byte `gae:"config_revision"` // ModifiedBy is the email of the user who modified the cfg. ModifiedBy string `gae:"modified_by"` // ModifiedTS is the time when this entity was last modified. ModifiedTS time.Time `gae:"modified_ts"` } var GroupNameRe = regexp.MustCompile(`^([a-z\-]+/)?[0-9a-z_\-\.@]{1,100}$`) // GroupBundle is a map where k: groupName, v: list of identities belonging to group k. type GroupBundle = map[string][]identity.Identity // GetGroupImporterConfig fetches the GroupImporterConfig entity from the datastore. // // Returns GroupImporterConfig entity if present. // Returns datastore.ErrNoSuchEntity if the entity is not present. // Returns annotated error for all other errors. func GetGroupImporterConfig(ctx context.Context) (*GroupImporterConfig, error) { groupsCfg := &GroupImporterConfig{ Kind: "GroupImporterConfig", ID: "config", } switch err := datastore.Get(ctx, groupsCfg); { case err == nil: return groupsCfg, nil case err == datastore.ErrNoSuchEntity: return nil, err default: return nil, errors.Annotate(err, "error getting GroupImporterConfig").Err() } } // IngestTarball handles upload of tarball's specified in 'tarball_upload' config entries. // expected to be called in an auth context of the upload PUT request. // // returns // // []string - list of modified groups // int64 - authDBRevision // error // proto translation error // entry is nil // entry not found in tarball upload config // unauthorized uploader // bad tarball structure func IngestTarball(ctx context.Context, name string, content io.Reader) ([]string, int64, error) { g, err := GetGroupImporterConfig(ctx) if err != nil { return nil, 0, err } gConfigProto, err := g.ToProto() if err != nil { return nil, 0, errors.Annotate(err, "issue getting proto from config entity").Err() } caller := auth.CurrentIdentity(ctx) var entry *configspb.GroupImporterConfig_TarballUploadEntry // make sure that tarball_upload entry we're looking for is specified in config for _, tbu := range gConfigProto.GetTarballUpload() { if tbu.Name == name { entry = tbu break } } if entry == nil { return nil, 0, errors.New("entry is nil") } if entry.Name == "" { return nil, 0, errors.New("entry not found in tarball upload names") } if !contains(caller.Email(), entry.AuthorizedUploader) { return nil, 0, errors.New(fmt.Sprintf("%q is not an authorized uploader", caller.Email())) } bundles, err := loadTarball(ctx, content, entry.GetDomain(), entry.GetSystems(), entry.GetGroups()) if err != nil { return nil, 0, errors.Annotate(err, "bad tarball").Err() } return importBundles(ctx, bundles, caller, nil) } // loadTarball unzips tarball with groups and deserializes them. func loadTarball(ctx context.Context, content io.Reader, domain string, systems, groups []string) (map[string]GroupBundle, error) { // map looks like: K: system, V: { K: groupName, V: []identities } bundles := make(map[string]GroupBundle) entries, err := extractTarArchive(content) if err != nil { return nil, err } // verify system/groupname and then parse blob if valid for filename, fileobj := range entries { chunks := strings.Split(filename, "/") if len(chunks) != 2 || !GroupNameRe.MatchString(chunks[1]) { logging.Warningf(ctx, "Skipping file %s, not a valid name", filename) continue } if groups != nil && !contains(filename, groups) { continue } system := chunks[0] if !contains(system, systems) { logging.Warningf(ctx, "Skipping file %s, not allowed", filename) continue } identities, err := loadGroupFile(string(fileobj), domain) if err != nil { return nil, err } if _, ok := bundles[system]; !ok { bundles[system] = make(GroupBundle) } bundles[system][filename] = identities } return bundles, nil } func loadGroupFile(identities string, domain string) ([]identity.Identity, error) { members := make(map[identity.Identity]bool) memsSplit := strings.Split(identities, "\n") for _, uid := range memsSplit { uid = strings.TrimSpace(uid) if uid == "" { continue } var ident string if domain == "" { ident = fmt.Sprintf("user:%s", uid) } else { ident = fmt.Sprintf("user:%s@%s", uid, domain) } emailIdent, err := identity.MakeIdentity(ident) if err != nil { return nil, err } members[emailIdent] = true } membersSorted := make([]identity.Identity, 0, len(members)) for mem := range members { membersSorted = append(membersSorted, mem) } sort.Slice(membersSorted, func(i, j int) bool { return membersSorted[i].Value() < membersSorted[j].Value() }) return membersSorted, nil } // importBundles imports given set of bundles all at once. // A bundle is a map with groups that is the result of a processing of some tarball. // A bundle specifies the desired state of all groups under some system, e.g. // importBundles({'ldap': {}}, ...) will REMOVE all existing 'ldap/*' groups. // // Group names in the bundle are specified in their full prefixed form (with // system name prefix). An example of expected 'bundles': // // { // 'ldap': { // 'ldap/group': [Identity(...), Identity(...)], // }, // } // // Args: // // bundles: map system name -> GroupBundle // providedBy: auth.Identity to put in modifiedBy or createdBy fields. // // Returns: // // (list of modified groups, // new AuthDB revision number or 0 if no changes, // error if issue with writing entities). func importBundles(ctx context.Context, bundles map[string]GroupBundle, providedBy identity.Identity, testHook func()) ([]string, int64, error) { // Nothing to process. if len(bundles) == 0 { return []string{}, 0, nil } getAuthDBRevision := func(ctx context.Context) (int64, error) { state, err := GetReplicationState(ctx) switch { case err == datastore.ErrNoSuchEntity: return 0, nil case err != nil: return -1, err default: return state.AuthDBRev, nil } } // Fetches all existing groups and AuthDB revision number. groupsSnapshot := func(ctx context.Context) (gMap map[string]*AuthGroup, rev int64, err error) { err = datastore.RunInTransaction(ctx, func(ctx context.Context) error { groups, err := GetAllAuthGroups(ctx) if err != nil { return err } gMap = make(map[string]*AuthGroup, len(groups)) for _, g := range groups { gMap[g.ID] = g } rev, err = getAuthDBRevision(ctx) if err != nil { return errors.Annotate(err, "couldn't get AuthDBRev").Err() } return nil }, nil) return gMap, rev, err } // Transactionally puts and deletes a bunch of entities. applyImport := func(expectedRevision int64, entitiesToPut, entitiesToDelete []*AuthGroup, ts time.Time) error { // Runs in transaction. return runAuthDBChange(ctx, func(ctx context.Context, cae commitAuthEntity) error { rev, err := getAuthDBRevision(ctx) if err != nil { return err } // DB changed between transactions try again. if rev != expectedRevision { return errors.New("revision numbers don't match") } for _, e := range entitiesToPut { if err := cae(e, ts, providedBy, false); err != nil { return err } } for _, e := range entitiesToDelete { if err := cae(e, ts, providedBy, true); err != nil { return err } } return nil }) } updatedGroups := stringset.New(0) revision := int64(0) loopCount := 0 var groups map[string]*AuthGroup var err error // Try to apply the change in batches until it lands completely or deadline // happens. Split each batch update into two transactions (assuming AuthDB // changes infrequently) to avoid reading and writing too much stuff from // within a single transaction (and to avoid keeping the transaction open while // calculating the diff). for { // Use same timestamp everywhere to reflect that groups were imported // atomically within a single transaction. ts := time.Now().UTC() loopCount += 1 groups, revision, err = groupsSnapshot(ctx) if err != nil { return nil, revision, err } // For testing purposes only. if testHook != nil && loopCount == 2 { testHook() } entitiesToPut := []*AuthGroup{} entitiesToDel := []*AuthGroup{} for sys := range bundles { iGroups := bundles[sys] toPut, toDel := prepareImport(ctx, sys, groups, iGroups) entitiesToPut = append(entitiesToPut, toPut...) entitiesToDel = append(entitiesToDel, toDel...) } if len(entitiesToPut) == 0 && len(entitiesToDel) == 0 { logging.Infof(ctx, "nothing to do") break } // An `applyImport` transaction can touch at most 500 entities. Cap the // number of entities we create/delete by 200 each since we attach a historical // entity to each entity. The rest will be updated on the next cycle of the loop. // This is safe to do since: // * Imported groups are "leaf" groups (have no subgroups) and can be added // in arbitrary order without worrying about referential integrity. // * Deleted groups are guaranteed to be unreferenced by `prepareImport` // and can be deleted in arbitrary order as well. truncated := false // Both these operations happen in the same transaction so we have // to trim it to make sure the total is <= 200. if len(entitiesToPut) > 200 { entitiesToPut = entitiesToPut[:200] entitiesToDel = nil truncated = true } else if len(entitiesToPut)+len(entitiesToDel) > 200 { entitiesToDel = entitiesToDel[:200-len(entitiesToPut)] truncated = true } // Log what we are about to do to help debugging transaction errors. logging.Infof(ctx, "Preparing AuthDB rev %d with %d puts and %d deletes:", revision+1, len(entitiesToPut), len(entitiesToDel)) for _, e := range entitiesToPut { logging.Infof(ctx, "U %s", e.ID) updatedGroups.Add(e.ID) } for _, e := range entitiesToDel { logging.Infof(ctx, "D %s", e.ID) updatedGroups.Add(e.ID) } // Land the change iff the current AuthDB revision is still == `revision`. err := applyImport(revision, entitiesToPut, entitiesToDel, ts) if err != nil && strings.Contains(err.Error(), "revision numbers don't match") { logging.Warningf(ctx, "authdb changed between transactions, retrying...") continue } else if err != nil { logging.Errorf(ctx, "couldn't apply changes to datastore entities %s", err.Error()) return nil, revision, err } // The new revision has landed revision += 1 if truncated { logging.Infof(ctx, "going for another round to push the rest of the groups") time.Sleep(time.Second * 5) continue } logging.Infof(ctx, "Done") break } if len(updatedGroups) > 0 { return updatedGroups.ToSortedSlice(), int64(revision), nil } return nil, 0, nil } // prepareImport compares the bundle given to the what is currently present in datastore // to get the operations for all the groups. func prepareImport(ctx context.Context, systemName string, existingGroups map[string]*AuthGroup, iGroups GroupBundle) (toPut []*AuthGroup, toDel []*AuthGroup) { systemGroups := []string{} iGroupsSet := stringset.New(len(iGroups)) for gID := range existingGroups { if strings.HasPrefix(gID, fmt.Sprintf("%s/", systemName))
} for groupName := range iGroups { iGroupsSet.Add(groupName) } sysGroupsSet := stringset.NewFromSlice(systemGroups...) toCreate := iGroupsSet.Difference(sysGroupsSet).ToSlice() for _, g := range toCreate { group := makeAuthGroup(ctx, g) group.Members = identitiesToStrings(iGroups[g]) toPut = append(toPut, group) } toUpdate := sysGroupsSet.Intersect(iGroupsSet).ToSlice() for _, g := range toUpdate { importGMems := stringset.NewFromSlice(identitiesToStrings(iGroups[g])...) existMems := existingGroups[g].Members if !(len(importGMems) == len(existMems) && importGMems.HasAll(existMems...)) { group := makeAuthGroup(ctx, g) group.Members = importGMems.ToSlice() toPut = append(toPut, group) } } toDelete := sysGroupsSet.Difference(iGroupsSet).ToSlice() for _, g := range toDelete { group := makeAuthGroup(ctx, g) toDel = append(toDel, group) } return toPut, toDel } func identitiesToStrings(idents []identity.Identity) []string { res := make([]string, len(idents)) for i, id := range idents { res[i] = string(id) } return res } // extractTarArchive unpacks a tar archive and returns a map // of filename -> fileobj pairs. func extractTarArchive(r io.Reader) (map[string][]byte, error) { entries := make(map[string][]byte) gzr, err := gzip.NewReader(r) if err != nil { return nil, err } tr := tar.NewReader(gzr) for { header, err := tr.Next() if err == io.EOF { break } if err != nil { return nil, err } fileContents, err := io.ReadAll(tr) if err != nil { return nil, err } entries[header.Name] = fileContents } if err := gzr.Close(); err != nil { return nil, err } return entries, nil } // TODO(cjacomet): replace with slices.Contains when // slices package isn't experimental. func contains(key string, search []string) bool { for _, val := range search { if val == key { return true } } return false } // ToProto converts the GroupImporterConfig entity to the proto equivalent. func (g *GroupImporterConfig) ToProto() (*configspb.GroupImporterConfig, error) { gConfig := &configspb.GroupImporterConfig{} if err := prototext.Unmarshal([]byte(g.ConfigProto), gConfig); err != nil { return nil, err } return gConfig, nil }
{ systemGroups = append(systemGroups, gID) }
conditional_block
importer.go
// Copyright 2022 The LUCI Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package model import ( "archive/tar" "compress/gzip" "context" "fmt" "io" "regexp" "sort" "strings" "time" "go.chromium.org/luci/auth/identity" "go.chromium.org/luci/auth_service/api/configspb" "go.chromium.org/luci/common/data/stringset" "go.chromium.org/luci/common/errors" "go.chromium.org/luci/common/logging" "go.chromium.org/luci/gae/service/datastore" "go.chromium.org/luci/server/auth" "google.golang.org/protobuf/encoding/prototext" ) // Imports groups from some external tar.gz bundle or plain text list. // External URL should serve *.tar.gz file with the following file structure: // <external group system name>/<group name>: // userid // userid // ... // For example ldap.tar.gz may look like: // ldap/trusted-users: // jane // joe // ... // ldap/all: // jane // joe // ... // Each tarball may have groups from multiple external systems, but groups from // some external system must not be split between multiple tarballs. When importer // sees <external group system name>/* in a tarball, it modifies group list from // that system on the server to match group list in the tarball _exactly_, // including removal of groups that are on the server, but no longer present in // the tarball. // Plain list format should have one userid per line and can only describe a single // group in a single system. Such groups will be added to 'external/*' groups // namespace. Removing such group from importer config will remove it from // service too. // The service can also be configured to accept tarball uploads (instead of // fetching them). Fetched and uploaded tarballs are handled in the exact same way, // in particular all caveats related to external group system names apply. // GroupImporterConfig is a singleton entity that contains the contents of the imports.cfg file. type GroupImporterConfig struct { Kind string `gae:"$kind,GroupImporterConfig"` ID string `gae:"$id,config"` // ConfigProto is the plaintext copy of the config found at imports.cfg. ConfigProto string `gae:"config_proto"` // ConfigRevision is revision version of the config found at imports.cfg. ConfigRevision []byte `gae:"config_revision"` // ModifiedBy is the email of the user who modified the cfg. ModifiedBy string `gae:"modified_by"` // ModifiedTS is the time when this entity was last modified. ModifiedTS time.Time `gae:"modified_ts"` } var GroupNameRe = regexp.MustCompile(`^([a-z\-]+/)?[0-9a-z_\-\.@]{1,100}$`) // GroupBundle is a map where k: groupName, v: list of identities belonging to group k. type GroupBundle = map[string][]identity.Identity // GetGroupImporterConfig fetches the GroupImporterConfig entity from the datastore. // // Returns GroupImporterConfig entity if present. // Returns datastore.ErrNoSuchEntity if the entity is not present. // Returns annotated error for all other errors. func
(ctx context.Context) (*GroupImporterConfig, error) { groupsCfg := &GroupImporterConfig{ Kind: "GroupImporterConfig", ID: "config", } switch err := datastore.Get(ctx, groupsCfg); { case err == nil: return groupsCfg, nil case err == datastore.ErrNoSuchEntity: return nil, err default: return nil, errors.Annotate(err, "error getting GroupImporterConfig").Err() } } // IngestTarball handles upload of tarball's specified in 'tarball_upload' config entries. // expected to be called in an auth context of the upload PUT request. // // returns // // []string - list of modified groups // int64 - authDBRevision // error // proto translation error // entry is nil // entry not found in tarball upload config // unauthorized uploader // bad tarball structure func IngestTarball(ctx context.Context, name string, content io.Reader) ([]string, int64, error) { g, err := GetGroupImporterConfig(ctx) if err != nil { return nil, 0, err } gConfigProto, err := g.ToProto() if err != nil { return nil, 0, errors.Annotate(err, "issue getting proto from config entity").Err() } caller := auth.CurrentIdentity(ctx) var entry *configspb.GroupImporterConfig_TarballUploadEntry // make sure that tarball_upload entry we're looking for is specified in config for _, tbu := range gConfigProto.GetTarballUpload() { if tbu.Name == name { entry = tbu break } } if entry == nil { return nil, 0, errors.New("entry is nil") } if entry.Name == "" { return nil, 0, errors.New("entry not found in tarball upload names") } if !contains(caller.Email(), entry.AuthorizedUploader) { return nil, 0, errors.New(fmt.Sprintf("%q is not an authorized uploader", caller.Email())) } bundles, err := loadTarball(ctx, content, entry.GetDomain(), entry.GetSystems(), entry.GetGroups()) if err != nil { return nil, 0, errors.Annotate(err, "bad tarball").Err() } return importBundles(ctx, bundles, caller, nil) } // loadTarball unzips tarball with groups and deserializes them. func loadTarball(ctx context.Context, content io.Reader, domain string, systems, groups []string) (map[string]GroupBundle, error) { // map looks like: K: system, V: { K: groupName, V: []identities } bundles := make(map[string]GroupBundle) entries, err := extractTarArchive(content) if err != nil { return nil, err } // verify system/groupname and then parse blob if valid for filename, fileobj := range entries { chunks := strings.Split(filename, "/") if len(chunks) != 2 || !GroupNameRe.MatchString(chunks[1]) { logging.Warningf(ctx, "Skipping file %s, not a valid name", filename) continue } if groups != nil && !contains(filename, groups) { continue } system := chunks[0] if !contains(system, systems) { logging.Warningf(ctx, "Skipping file %s, not allowed", filename) continue } identities, err := loadGroupFile(string(fileobj), domain) if err != nil { return nil, err } if _, ok := bundles[system]; !ok { bundles[system] = make(GroupBundle) } bundles[system][filename] = identities } return bundles, nil } func loadGroupFile(identities string, domain string) ([]identity.Identity, error) { members := make(map[identity.Identity]bool) memsSplit := strings.Split(identities, "\n") for _, uid := range memsSplit { uid = strings.TrimSpace(uid) if uid == "" { continue } var ident string if domain == "" { ident = fmt.Sprintf("user:%s", uid) } else { ident = fmt.Sprintf("user:%s@%s", uid, domain) } emailIdent, err := identity.MakeIdentity(ident) if err != nil { return nil, err } members[emailIdent] = true } membersSorted := make([]identity.Identity, 0, len(members)) for mem := range members { membersSorted = append(membersSorted, mem) } sort.Slice(membersSorted, func(i, j int) bool { return membersSorted[i].Value() < membersSorted[j].Value() }) return membersSorted, nil } // importBundles imports given set of bundles all at once. // A bundle is a map with groups that is the result of a processing of some tarball. // A bundle specifies the desired state of all groups under some system, e.g. // importBundles({'ldap': {}}, ...) will REMOVE all existing 'ldap/*' groups. // // Group names in the bundle are specified in their full prefixed form (with // system name prefix). An example of expected 'bundles': // // { // 'ldap': { // 'ldap/group': [Identity(...), Identity(...)], // }, // } // // Args: // // bundles: map system name -> GroupBundle // providedBy: auth.Identity to put in modifiedBy or createdBy fields. // // Returns: // // (list of modified groups, // new AuthDB revision number or 0 if no changes, // error if issue with writing entities). func importBundles(ctx context.Context, bundles map[string]GroupBundle, providedBy identity.Identity, testHook func()) ([]string, int64, error) { // Nothing to process. if len(bundles) == 0 { return []string{}, 0, nil } getAuthDBRevision := func(ctx context.Context) (int64, error) { state, err := GetReplicationState(ctx) switch { case err == datastore.ErrNoSuchEntity: return 0, nil case err != nil: return -1, err default: return state.AuthDBRev, nil } } // Fetches all existing groups and AuthDB revision number. groupsSnapshot := func(ctx context.Context) (gMap map[string]*AuthGroup, rev int64, err error) { err = datastore.RunInTransaction(ctx, func(ctx context.Context) error { groups, err := GetAllAuthGroups(ctx) if err != nil { return err } gMap = make(map[string]*AuthGroup, len(groups)) for _, g := range groups { gMap[g.ID] = g } rev, err = getAuthDBRevision(ctx) if err != nil { return errors.Annotate(err, "couldn't get AuthDBRev").Err() } return nil }, nil) return gMap, rev, err } // Transactionally puts and deletes a bunch of entities. applyImport := func(expectedRevision int64, entitiesToPut, entitiesToDelete []*AuthGroup, ts time.Time) error { // Runs in transaction. return runAuthDBChange(ctx, func(ctx context.Context, cae commitAuthEntity) error { rev, err := getAuthDBRevision(ctx) if err != nil { return err } // DB changed between transactions try again. if rev != expectedRevision { return errors.New("revision numbers don't match") } for _, e := range entitiesToPut { if err := cae(e, ts, providedBy, false); err != nil { return err } } for _, e := range entitiesToDelete { if err := cae(e, ts, providedBy, true); err != nil { return err } } return nil }) } updatedGroups := stringset.New(0) revision := int64(0) loopCount := 0 var groups map[string]*AuthGroup var err error // Try to apply the change in batches until it lands completely or deadline // happens. Split each batch update into two transactions (assuming AuthDB // changes infrequently) to avoid reading and writing too much stuff from // within a single transaction (and to avoid keeping the transaction open while // calculating the diff). for { // Use same timestamp everywhere to reflect that groups were imported // atomically within a single transaction. ts := time.Now().UTC() loopCount += 1 groups, revision, err = groupsSnapshot(ctx) if err != nil { return nil, revision, err } // For testing purposes only. if testHook != nil && loopCount == 2 { testHook() } entitiesToPut := []*AuthGroup{} entitiesToDel := []*AuthGroup{} for sys := range bundles { iGroups := bundles[sys] toPut, toDel := prepareImport(ctx, sys, groups, iGroups) entitiesToPut = append(entitiesToPut, toPut...) entitiesToDel = append(entitiesToDel, toDel...) } if len(entitiesToPut) == 0 && len(entitiesToDel) == 0 { logging.Infof(ctx, "nothing to do") break } // An `applyImport` transaction can touch at most 500 entities. Cap the // number of entities we create/delete by 200 each since we attach a historical // entity to each entity. The rest will be updated on the next cycle of the loop. // This is safe to do since: // * Imported groups are "leaf" groups (have no subgroups) and can be added // in arbitrary order without worrying about referential integrity. // * Deleted groups are guaranteed to be unreferenced by `prepareImport` // and can be deleted in arbitrary order as well. truncated := false // Both these operations happen in the same transaction so we have // to trim it to make sure the total is <= 200. if len(entitiesToPut) > 200 { entitiesToPut = entitiesToPut[:200] entitiesToDel = nil truncated = true } else if len(entitiesToPut)+len(entitiesToDel) > 200 { entitiesToDel = entitiesToDel[:200-len(entitiesToPut)] truncated = true } // Log what we are about to do to help debugging transaction errors. logging.Infof(ctx, "Preparing AuthDB rev %d with %d puts and %d deletes:", revision+1, len(entitiesToPut), len(entitiesToDel)) for _, e := range entitiesToPut { logging.Infof(ctx, "U %s", e.ID) updatedGroups.Add(e.ID) } for _, e := range entitiesToDel { logging.Infof(ctx, "D %s", e.ID) updatedGroups.Add(e.ID) } // Land the change iff the current AuthDB revision is still == `revision`. err := applyImport(revision, entitiesToPut, entitiesToDel, ts) if err != nil && strings.Contains(err.Error(), "revision numbers don't match") { logging.Warningf(ctx, "authdb changed between transactions, retrying...") continue } else if err != nil { logging.Errorf(ctx, "couldn't apply changes to datastore entities %s", err.Error()) return nil, revision, err } // The new revision has landed revision += 1 if truncated { logging.Infof(ctx, "going for another round to push the rest of the groups") time.Sleep(time.Second * 5) continue } logging.Infof(ctx, "Done") break } if len(updatedGroups) > 0 { return updatedGroups.ToSortedSlice(), int64(revision), nil } return nil, 0, nil } // prepareImport compares the bundle given to the what is currently present in datastore // to get the operations for all the groups. func prepareImport(ctx context.Context, systemName string, existingGroups map[string]*AuthGroup, iGroups GroupBundle) (toPut []*AuthGroup, toDel []*AuthGroup) { systemGroups := []string{} iGroupsSet := stringset.New(len(iGroups)) for gID := range existingGroups { if strings.HasPrefix(gID, fmt.Sprintf("%s/", systemName)) { systemGroups = append(systemGroups, gID) } } for groupName := range iGroups { iGroupsSet.Add(groupName) } sysGroupsSet := stringset.NewFromSlice(systemGroups...) toCreate := iGroupsSet.Difference(sysGroupsSet).ToSlice() for _, g := range toCreate { group := makeAuthGroup(ctx, g) group.Members = identitiesToStrings(iGroups[g]) toPut = append(toPut, group) } toUpdate := sysGroupsSet.Intersect(iGroupsSet).ToSlice() for _, g := range toUpdate { importGMems := stringset.NewFromSlice(identitiesToStrings(iGroups[g])...) existMems := existingGroups[g].Members if !(len(importGMems) == len(existMems) && importGMems.HasAll(existMems...)) { group := makeAuthGroup(ctx, g) group.Members = importGMems.ToSlice() toPut = append(toPut, group) } } toDelete := sysGroupsSet.Difference(iGroupsSet).ToSlice() for _, g := range toDelete { group := makeAuthGroup(ctx, g) toDel = append(toDel, group) } return toPut, toDel } func identitiesToStrings(idents []identity.Identity) []string { res := make([]string, len(idents)) for i, id := range idents { res[i] = string(id) } return res } // extractTarArchive unpacks a tar archive and returns a map // of filename -> fileobj pairs. func extractTarArchive(r io.Reader) (map[string][]byte, error) { entries := make(map[string][]byte) gzr, err := gzip.NewReader(r) if err != nil { return nil, err } tr := tar.NewReader(gzr) for { header, err := tr.Next() if err == io.EOF { break } if err != nil { return nil, err } fileContents, err := io.ReadAll(tr) if err != nil { return nil, err } entries[header.Name] = fileContents } if err := gzr.Close(); err != nil { return nil, err } return entries, nil } // TODO(cjacomet): replace with slices.Contains when // slices package isn't experimental. func contains(key string, search []string) bool { for _, val := range search { if val == key { return true } } return false } // ToProto converts the GroupImporterConfig entity to the proto equivalent. func (g *GroupImporterConfig) ToProto() (*configspb.GroupImporterConfig, error) { gConfig := &configspb.GroupImporterConfig{} if err := prototext.Unmarshal([]byte(g.ConfigProto), gConfig); err != nil { return nil, err } return gConfig, nil }
GetGroupImporterConfig
identifier_name
storeserv.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # (C) Copyright 2017-2020 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ .. module:: hpestorapi.storeserv :synopsis: Module with HPE 3PAR disk array wrapper .. moduleauthor:: Ivan Smirnov <[email protected]>, HPE Pointnext DACH & Russia """ import logging import warnings from urllib.parse import quote import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning if __name__ == "__main__": pass LOG = logging.getLogger('hpestorapi.storeserv') class StoreServ: """ HPE 3PAR array implementation class. """ def __init__(self, address, username, password, port=None, ssl=True, verify=True): """ HPE 3PAR constructor. :param str address: Hostname or IP address of HPE 3PAR array (management address). Web Services API should be enabled for this array (disabled by default). To enable Web Services API you should check 3PAR OS command: showwsapi. :param str username: User name for 3PAR Web Services API. Its recommended to create dedicated user with limited rights. For example, if you dont need to create/modify/delete objects on disk array, you should create new user with "browse" role. Of coarse, your script can work with "3paradm" user ("super" role), but its a bad idea. To create new user, you should check 3PAR OS command: createuser. :param str password: Password for 3PAR Web Services API. :param int port: (optional) Custom port number for 3PAR Web Services API. :param bool ssl: (optional) Use secure https (True) or plain text http (False). :param bool|string verify: (optional) Either a boolean, in which case it controls whether we verify the Rest serverโ€™s TLS certificate, or a string, in which case it must be a path to a CA bundle to use. By default: True. :return: None """ self._address = address self._username = username self._password = password self._port = port self._ssl = ssl self._verify = verify # Session key. None, if there is not active session. self._key = None # Default timeouts: # ConnectionTimeout = 1 second # ReadTimeout = infinity self._timeout = (1, None) # Default request headers self._headers = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'Accept-Language': 'en' } def __del__(self): # Perform session close if self._key is not None: self.close() def _query(self, url, method, **kwargs): """ Perform HTTP request to HPE 3PAR array. :param str url: URL address. For example: 'system' or 'volumes'. Static part of url is generated automatically. :param str method: HTTP method. Could be 'GET', 'POST', 'DELETE' or 'PUT'. :param float|tuple timeout: (optional) Like :attr:`StoreServ.timeout` but only for one query. :rtype: tuple(int, dict) :return: Dictionary with HTTP status code and json data. For example: dict('status':200, 'data':{'key':'value'}). Second value may be None if 3PAR array returns no message body, """ # Set connection and read timeout (if not set by user for current request) timeout = kwargs.pop('timeout', self._timeout) # Add default and auth headers to parameter list kwargs.setdefault('headers', dict()) kwargs['headers'].update(self._headers) # Prepare request path = '%s/%s' % (self._base_url, url.strip('/')) request = requests.Request(method, path, **kwargs) prep = request.prepare() LOG.debug('%s(`%s`)', method, prep.url) LOG.debug('Request body = `%s`', prep.body) # Perform request with runtime measuring with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=InsecureRequestWarning) try: session = requests.Session() resp = session.send(prep, timeout=timeout, verify=self._verify) deltafmt = '%d.%d sec' % (resp.elapsed.seconds, resp.elapsed.microseconds // 1000) except Exception as error: LOG.fatal('Cannot connect to StoreServ device. %s', repr(error)) raise # Check Rest service response if resp.status_code not in [200, 201, 202, 204]: LOG.warning('Return code %s, response delay %s', resp.status_code, deltafmt) LOG.warning('resp.content=%s', resp.content) LOG.warning('resp.reason=%s', resp.reason) else: LOG.debug('StoreServ return status %s, delay %s', resp.status_code, deltafmt) # Check response JSON body is exist try: jdata = resp.json() except ValueError: if resp.content: LOG.warning('Cannot decode JSON. Source string: "%s"', resp.content) return resp.status_code, None # (status, data) # Check wsapi session timeout error if (resp.status_code == 403) and (jdata.get('code', None) == 6): if self._key is not None: LOG.info('Session timeout occurs. Session key is invalid. ' 'Try to get new one.') # Just forget about current (inactive) session self._headers.pop('X-HP3PAR-WSAPI-SessionKey', None) self._key = None # Generate new session and replay last query try: self.open() replay = self._query(url, method, **kwargs) except Exception as error: LOG.fatal('Cannot open new WSAPI session. Exception: %s', repr(error)) raise else: LOG.debug('Request replay success.') return replay return resp.status_code, jdata def open(self): """ Open new Rest API session for HPE 3PAR array. You should call it prior any other requests. Do not forget to call :meth:`StoreServ.close` if you don't plan to use session anymore, because 3PAR array has active sessions limit. If some troubles occurs you should manually check: * 3PAR Web services API are enabled on array (3PAR OS command: 'showwsapi') * Array credentials (username and password) * 3PAR array management address is correct and available * Debug logs generated by python logging module :return: None """ auth = {'user': self._username, 'password': self._password} status, data = self.post('credentials', body=auth) if status == 201: # 201 (created) => Session succefully created self._headers.update({'X-HP3PAR-WSAPI-SessionKey': data['key']}) self._key = data['key'] elif status == 403: # 403 (forbidden) => Wrong user or password raise AuthError('Cannot connect to StoreServ. ' 'Authentification error: %s', data['desc']) def close(self): """ Close Rest API session. :return: None """ # There isnt active session if self._key is None: LOG.debug('There isnt active session - skipping session close.') return # Try to close active session path = 'credentials/' + self._key try: self.delete(path) except Exception as error: LOG.warning('Cannot close StoreServ 3PAR session ' 'gracefully. Exception occured: %s', repr(error)) else: self._headers.pop('X-HP3PAR-WSAPI-SessionKey') self._key = None def get(self, url, query=None): ""
def post(self, url, body): """ Perform HTTP POST request to HPE 3PAR array. Method used to create new objects. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's, request parameters and results are described in "HPE 3PAR Web Services API Developer's Guide" :param dict body: Request parameter, used to create new array object. :rtype: tuple (int, dict) :return: Tuple with HTTP status code and dict with request result. For example: (201, {'key':'value'}). Second value may be None if 3PAR array returns no message body. """ return self._query(url, 'POST', json=body) def delete(self, url): """ Perform HTTP DELETE request to HPE 3PAR array. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's, request parameters and results are described in "HPE 3PAR Web Services API Developer's Guide" :return: Tuple with HTTP status code and dict with request result. For example: (200, {'key':'value'}). Second value may be None if 3PAR array returns no message body. """ return self._query(url, 'DELETE') def put(self, url, body): """ Perform HTTP PUT request to HPE 3PAR array. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's, request parameters and results are described in "HPE 3PAR Web Services API Developer's Guide" :param dict body: Request parameter, used to modify array object. :rtype: tuple(int, dict) :return: Tuple with HTTP status code and dict with request result. For example: (200, {'key':'value'}). Second value may be None if 3PAR array returns no message body. """ return self._query(url, 'PUT', json=body) def _set_timeout(self, timeout): if isinstance(timeout, (float, int)): self._timeout = (timeout, timeout) elif isinstance(timeout, tuple): self._timeout = timeout def _get_timeout(self): return self._timeout timeout = property(_get_timeout, _set_timeout) """ :var float|tuple timeout: Number of seconds that Rest API client waits for response from HPE StoreServ before timeout exception generation. You can use different timeouts for connection setup and for getting first piece of data. In this case, you should use tuple(float, float) with first value - connection timeout and the second value - read timeout. Or if you want to use same values for both type of timeouts, you can use one float value. 'None' value can be used instead to wait forever for a device response. Default value: (1, None) """ @property def _base_url(self): """ Generate static part of URL. :rtype: str :return: Static part of URL """ # URL Protocol proto = 'https' if self._ssl else 'http' # Device port number if self._port is None: port = 8080 if self._ssl else 8008 else: port = self._port return f'{proto}://{self._address}:{port}/api/v1' def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): if self._key is not None: self.close() class AuthError(Exception): """ Authentification error """
" Perform HTTP GET request to HPE 3PAR array. Method used to get information about objects. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's and requests result are described in "HPE 3PAR Web Services API Developer's Guide" :param str query: (optional) Query filter specification (see "WSAPI query syntax" in "HPE 3PAR Web Services API Developer's Guide"). :rtype: tuple(int, dict) :return: Tuple with HTTP status code and dict with request result. For example: (200, {'key':'value'}). """ # Perform get request with query filter if query is not None: return self._query(url, 'GET', params=quote(f'query="{query}"')) # Perform simple get request return self._query(url, 'GET')
identifier_body
storeserv.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # (C) Copyright 2017-2020 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ .. module:: hpestorapi.storeserv :synopsis: Module with HPE 3PAR disk array wrapper .. moduleauthor:: Ivan Smirnov <[email protected]>, HPE Pointnext DACH & Russia """ import logging import warnings from urllib.parse import quote import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning if __name__ == "__main__": pass LOG = logging.getLogger('hpestorapi.storeserv') class StoreServ: """ HPE 3PAR array implementation class. """ def __init__(self, address, username, password, port=None, ssl=True, verify=True): """ HPE 3PAR constructor. :param str address: Hostname or IP address of HPE 3PAR array (management address). Web Services API should be enabled for this array (disabled by default). To enable Web Services API you should check 3PAR OS command: showwsapi. :param str username: User name for 3PAR Web Services API. Its recommended to create dedicated user with limited rights. For example, if you dont need to create/modify/delete objects on disk array, you should create new user with "browse" role. Of coarse, your script can work with "3paradm" user ("super" role), but its a bad idea. To create new user, you should check 3PAR OS command: createuser. :param str password: Password for 3PAR Web Services API. :param int port: (optional) Custom port number for 3PAR Web Services API. :param bool ssl: (optional) Use secure https (True) or plain text http (False). :param bool|string verify: (optional) Either a boolean, in which case it controls whether we verify the Rest serverโ€™s TLS certificate, or a string, in which case it must be a path to a CA bundle to use. By default: True. :return: None """ self._address = address self._username = username self._password = password self._port = port self._ssl = ssl self._verify = verify # Session key. None, if there is not active session. self._key = None # Default timeouts: # ConnectionTimeout = 1 second # ReadTimeout = infinity self._timeout = (1, None) # Default request headers self._headers = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'Accept-Language': 'en' } def __del__(self): # Perform session close if self._key is not None: self.close() def _query(self, url, method, **kwargs): """ Perform HTTP request to HPE 3PAR array. :param str url: URL address. For example: 'system' or 'volumes'. Static part of url is generated automatically. :param str method: HTTP method. Could be 'GET', 'POST', 'DELETE' or 'PUT'. :param float|tuple timeout: (optional) Like :attr:`StoreServ.timeout` but only for one query. :rtype: tuple(int, dict) :return: Dictionary with HTTP status code and json data. For example: dict('status':200, 'data':{'key':'value'}). Second value may be None if 3PAR array returns no message body, """ # Set connection and read timeout (if not set by user for current request) timeout = kwargs.pop('timeout', self._timeout) # Add default and auth headers to parameter list kwargs.setdefault('headers', dict()) kwargs['headers'].update(self._headers) # Prepare request path = '%s/%s' % (self._base_url, url.strip('/')) request = requests.Request(method, path, **kwargs) prep = request.prepare() LOG.debug('%s(`%s`)', method, prep.url) LOG.debug('Request body = `%s`', prep.body) # Perform request with runtime measuring with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=InsecureRequestWarning) try: session = requests.Session() resp = session.send(prep, timeout=timeout, verify=self._verify) deltafmt = '%d.%d sec' % (resp.elapsed.seconds, resp.elapsed.microseconds // 1000) except Exception as error: LOG.fatal('Cannot connect to StoreServ device. %s', repr(error)) raise # Check Rest service response if resp.status_code not in [200, 201, 202, 204]: LOG.warning('Return code %s, response delay %s', resp.status_code, deltafmt) LOG.warning('resp.content=%s', resp.content) LOG.warning('resp.reason=%s', resp.reason) else: LOG.debug('StoreServ return status %s, delay %s', resp.status_code, deltafmt) # Check response JSON body is exist try: jdata = resp.json() except ValueError: if resp.content: LOG.warning('Cannot decode JSON. Source string: "%s"', resp.content) return resp.status_code, None # (status, data) # Check wsapi session timeout error if (resp.status_code == 403) and (jdata.get('code', None) == 6): if self._key is not None: LOG.info('Session timeout occurs. Session key is invalid. ' 'Try to get new one.') # Just forget about current (inactive) session self._headers.pop('X-HP3PAR-WSAPI-SessionKey', None) self._key = None # Generate new session and replay last query try: self.open() replay = self._query(url, method, **kwargs) except Exception as error: LOG.fatal('Cannot open new WSAPI session. Exception: %s', repr(error)) raise else: LOG.debug('Request replay success.') return replay return resp.status_code, jdata def open(self): """ Open new Rest API session for HPE 3PAR array. You should call it prior any other requests. Do not forget to call :meth:`StoreServ.close` if you don't plan to use session anymore, because 3PAR array has active sessions limit. If some troubles occurs you should manually check: * 3PAR Web services API are enabled on array (3PAR OS command: 'showwsapi') * Array credentials (username and password) * 3PAR array management address is correct and available * Debug logs generated by python logging module :return: None """ auth = {'user': self._username, 'password': self._password} status, data = self.post('credentials', body=auth) if status == 201: # 201 (created) => Session succefully created self._headers.update({'X-HP3PAR-WSAPI-SessionKey': data['key']}) self._key = data['key'] elif status == 403: # 403 (forbidden) => Wrong user or password raise AuthError('Cannot connect to StoreServ. ' 'Authentification error: %s', data['desc']) def close(self): """ Close Rest API session. :return: None """ # There isnt active session if self._key is None: LOG.debug('There isnt active session - skipping session close.') return # Try to close active session path = 'credentials/' + self._key try: self.delete(path) except Exception as error: LOG.warning('Cannot close StoreServ 3PAR session ' 'gracefully. Exception occured: %s', repr(error)) else: self._headers.pop('X-HP3PAR-WSAPI-SessionKey') self._key = None def get(self, url, query=None): """ Perform HTTP GET request to HPE 3PAR array. Method used to get information about objects. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's and requests result are described in "HPE 3PAR Web Services API Developer's Guide" :param str query: (optional) Query filter specification (see "WSAPI query syntax" in "HPE 3PAR Web Services API Developer's Guide"). :rtype: tuple(int, dict) :return: Tuple with HTTP status code and dict with request result. For example: (200, {'key':'value'}). """ # Perform get request with query filter if query is not None: return self._query(url, 'GET', params=quote(f'query="{query}"')) # Perform simple get request return self._query(url, 'GET') def post(self, url, body): """ Perform HTTP POST request to HPE 3PAR array. Method used to create new objects. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's, request parameters and results are described in "HPE 3PAR Web Services API Developer's Guide" :param dict body: Request parameter, used to create new array object. :rtype: tuple (int, dict) :return: Tuple with HTTP status code and dict with request result. For example: (201, {'key':'value'}). Second value may be None if 3PAR array returns no message body. """ return self._query(url, 'POST', json=body) def delete(self, url): """ Perform HTTP DELETE request to HPE 3PAR array. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's, request parameters and results are described in "HPE 3PAR Web Services API Developer's Guide" :return: Tuple with HTTP status code and dict with request result. For example: (200, {'key':'value'}). Second value may be None if 3PAR array returns no message body. """ return self._query(url, 'DELETE') def put(self, url, body): """ Perform HTTP PUT request to HPE 3PAR array. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's, request parameters and results are described in "HPE 3PAR Web Services API Developer's Guide" :param dict body: Request parameter, used to modify array object. :rtype: tuple(int, dict) :return: Tuple with HTTP status code and dict with request result. For example: (200, {'key':'value'}). Second value may be None if 3PAR array returns no message body. """ return self._query(url, 'PUT', json=body) def _set_timeout(self, timeout): if isinstance(timeout, (float, int)): self._timeout = (timeout, timeout) elif isinstance(timeout, tuple): self._timeout = timeout def _get_timeout(self): return self._timeout timeout = property(_get_timeout, _set_timeout) """ :var float|tuple timeout: Number of seconds that Rest API client waits for response from HPE StoreServ before timeout exception generation. You can use different timeouts for connection setup and for getting first piece of data. In this case, you should use tuple(float, float) with first value - connection timeout and the second value - read timeout. Or if you want to use same values for both type of timeouts, you can use one float value. 'None' value can be used instead to wait forever for a device response. Default value: (1, None) """ @property def _base_url(self): """ Generate static part of URL. :rtype: str :return: Static part of URL """ # URL Protocol proto = 'https' if self._ssl else 'http' # Device port number if self._port is None: port = 8080 if self._ssl else 8008 else: port = self._port return f'{proto}://{self._address}:{port}/api/v1' def __enter__(self): return self def __
elf, exc_type, exc_val, exc_tb): if self._key is not None: self.close() class AuthError(Exception): """ Authentification error """
exit__(s
identifier_name
storeserv.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # (C) Copyright 2017-2020 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ .. module:: hpestorapi.storeserv :synopsis: Module with HPE 3PAR disk array wrapper .. moduleauthor:: Ivan Smirnov <[email protected]>, HPE Pointnext DACH & Russia """ import logging import warnings from urllib.parse import quote import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning if __name__ == "__main__": pass LOG = logging.getLogger('hpestorapi.storeserv') class StoreServ: """ HPE 3PAR array implementation class. """ def __init__(self, address, username, password, port=None, ssl=True, verify=True): """ HPE 3PAR constructor. :param str address: Hostname or IP address of HPE 3PAR array (management address). Web Services API should be enabled for this array (disabled by default). To enable Web Services API you should check 3PAR OS command: showwsapi. :param str username: User name for 3PAR Web Services API. Its recommended to create dedicated user with limited rights. For example, if you dont need to create/modify/delete objects on disk array, you should create new user with "browse" role. Of coarse, your script can work with "3paradm" user ("super" role), but its a bad idea. To create new user, you should check 3PAR OS command: createuser. :param str password: Password for 3PAR Web Services API. :param int port: (optional) Custom port number for 3PAR Web Services API. :param bool ssl: (optional) Use secure https (True) or plain text http (False). :param bool|string verify: (optional) Either a boolean, in which case it controls whether we verify the Rest serverโ€™s TLS certificate, or a string, in which case it must be a path to a CA bundle to use. By default: True. :return: None """ self._address = address self._username = username self._password = password self._port = port self._ssl = ssl self._verify = verify # Session key. None, if there is not active session. self._key = None # Default timeouts: # ConnectionTimeout = 1 second # ReadTimeout = infinity self._timeout = (1, None) # Default request headers self._headers = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'Accept-Language': 'en' } def __del__(self): # Perform session close if self._key is not None: self.close() def _query(self, url, method, **kwargs): """ Perform HTTP request to HPE 3PAR array. :param str url: URL address. For example: 'system' or 'volumes'. Static part of url is generated automatically. :param str method: HTTP method. Could be 'GET', 'POST', 'DELETE' or 'PUT'. :param float|tuple timeout: (optional) Like :attr:`StoreServ.timeout` but only for one query. :rtype: tuple(int, dict) :return: Dictionary with HTTP status code and json data. For example: dict('status':200, 'data':{'key':'value'}). Second value may be None if 3PAR array returns no message body, """ # Set connection and read timeout (if not set by user for current request) timeout = kwargs.pop('timeout', self._timeout) # Add default and auth headers to parameter list kwargs.setdefault('headers', dict()) kwargs['headers'].update(self._headers) # Prepare request path = '%s/%s' % (self._base_url, url.strip('/')) request = requests.Request(method, path, **kwargs) prep = request.prepare() LOG.debug('%s(`%s`)', method, prep.url) LOG.debug('Request body = `%s`', prep.body) # Perform request with runtime measuring with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=InsecureRequestWarning) try: session = requests.Session() resp = session.send(prep, timeout=timeout, verify=self._verify) deltafmt = '%d.%d sec' % (resp.elapsed.seconds, resp.elapsed.microseconds // 1000) except Exception as error: LOG.fatal('Cannot connect to StoreServ device. %s', repr(error)) raise # Check Rest service response if resp.status_code not in [200, 201, 202, 204]: LOG.warning('Return code %s, response delay %s', resp.status_code, deltafmt) LOG.warning('resp.content=%s', resp.content) LOG.warning('resp.reason=%s', resp.reason) else: LOG.debug('StoreServ return status %s, delay %s', resp.status_code, deltafmt) # Check response JSON body is exist try: jdata = resp.json() except ValueError: if resp.content: LOG.warning('Cannot decode JSON. Source string: "%s"', resp.content) return resp.status_code, None # (status, data) # Check wsapi session timeout error if (resp.status_code == 403) and (jdata.get('code', None) == 6): if self._key is not None: LOG.info('Session timeout occurs. Session key is invalid. ' 'Try to get new one.') # Just forget about current (inactive) session self._headers.pop('X-HP3PAR-WSAPI-SessionKey', None) self._key = None # Generate new session and replay last query try: self.open() replay = self._query(url, method, **kwargs) except Exception as error: LOG.fatal('Cannot open new WSAPI session. Exception: %s', repr(error)) raise else: LOG.debug('Request replay success.') return replay return resp.status_code, jdata def open(self): """ Open new Rest API session for HPE 3PAR array. You should call it prior any other requests. Do not forget to call :meth:`StoreServ.close` if you don't plan to use session anymore, because 3PAR array has active sessions limit. If some troubles occurs you should manually check: * 3PAR Web services API are enabled on array (3PAR OS command: 'showwsapi') * Array credentials (username and password) * 3PAR array management address is correct and available * Debug logs generated by python logging module :return: None """ auth = {'user': self._username, 'password': self._password} status, data = self.post('credentials', body=auth) if status == 201: # 201 (created) => Session succefully created se
elif status == 403: # 403 (forbidden) => Wrong user or password raise AuthError('Cannot connect to StoreServ. ' 'Authentification error: %s', data['desc']) def close(self): """ Close Rest API session. :return: None """ # There isnt active session if self._key is None: LOG.debug('There isnt active session - skipping session close.') return # Try to close active session path = 'credentials/' + self._key try: self.delete(path) except Exception as error: LOG.warning('Cannot close StoreServ 3PAR session ' 'gracefully. Exception occured: %s', repr(error)) else: self._headers.pop('X-HP3PAR-WSAPI-SessionKey') self._key = None def get(self, url, query=None): """ Perform HTTP GET request to HPE 3PAR array. Method used to get information about objects. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's and requests result are described in "HPE 3PAR Web Services API Developer's Guide" :param str query: (optional) Query filter specification (see "WSAPI query syntax" in "HPE 3PAR Web Services API Developer's Guide"). :rtype: tuple(int, dict) :return: Tuple with HTTP status code and dict with request result. For example: (200, {'key':'value'}). """ # Perform get request with query filter if query is not None: return self._query(url, 'GET', params=quote(f'query="{query}"')) # Perform simple get request return self._query(url, 'GET') def post(self, url, body): """ Perform HTTP POST request to HPE 3PAR array. Method used to create new objects. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's, request parameters and results are described in "HPE 3PAR Web Services API Developer's Guide" :param dict body: Request parameter, used to create new array object. :rtype: tuple (int, dict) :return: Tuple with HTTP status code and dict with request result. For example: (201, {'key':'value'}). Second value may be None if 3PAR array returns no message body. """ return self._query(url, 'POST', json=body) def delete(self, url): """ Perform HTTP DELETE request to HPE 3PAR array. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's, request parameters and results are described in "HPE 3PAR Web Services API Developer's Guide" :return: Tuple with HTTP status code and dict with request result. For example: (200, {'key':'value'}). Second value may be None if 3PAR array returns no message body. """ return self._query(url, 'DELETE') def put(self, url, body): """ Perform HTTP PUT request to HPE 3PAR array. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's, request parameters and results are described in "HPE 3PAR Web Services API Developer's Guide" :param dict body: Request parameter, used to modify array object. :rtype: tuple(int, dict) :return: Tuple with HTTP status code and dict with request result. For example: (200, {'key':'value'}). Second value may be None if 3PAR array returns no message body. """ return self._query(url, 'PUT', json=body) def _set_timeout(self, timeout): if isinstance(timeout, (float, int)): self._timeout = (timeout, timeout) elif isinstance(timeout, tuple): self._timeout = timeout def _get_timeout(self): return self._timeout timeout = property(_get_timeout, _set_timeout) """ :var float|tuple timeout: Number of seconds that Rest API client waits for response from HPE StoreServ before timeout exception generation. You can use different timeouts for connection setup and for getting first piece of data. In this case, you should use tuple(float, float) with first value - connection timeout and the second value - read timeout. Or if you want to use same values for both type of timeouts, you can use one float value. 'None' value can be used instead to wait forever for a device response. Default value: (1, None) """ @property def _base_url(self): """ Generate static part of URL. :rtype: str :return: Static part of URL """ # URL Protocol proto = 'https' if self._ssl else 'http' # Device port number if self._port is None: port = 8080 if self._ssl else 8008 else: port = self._port return f'{proto}://{self._address}:{port}/api/v1' def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): if self._key is not None: self.close() class AuthError(Exception): """ Authentification error """
lf._headers.update({'X-HP3PAR-WSAPI-SessionKey': data['key']}) self._key = data['key']
conditional_block
storeserv.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # (C) Copyright 2017-2020 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ .. module:: hpestorapi.storeserv :synopsis: Module with HPE 3PAR disk array wrapper .. moduleauthor:: Ivan Smirnov <[email protected]>, HPE Pointnext DACH & Russia """ import logging import warnings from urllib.parse import quote import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning
LOG = logging.getLogger('hpestorapi.storeserv') class StoreServ: """ HPE 3PAR array implementation class. """ def __init__(self, address, username, password, port=None, ssl=True, verify=True): """ HPE 3PAR constructor. :param str address: Hostname or IP address of HPE 3PAR array (management address). Web Services API should be enabled for this array (disabled by default). To enable Web Services API you should check 3PAR OS command: showwsapi. :param str username: User name for 3PAR Web Services API. Its recommended to create dedicated user with limited rights. For example, if you dont need to create/modify/delete objects on disk array, you should create new user with "browse" role. Of coarse, your script can work with "3paradm" user ("super" role), but its a bad idea. To create new user, you should check 3PAR OS command: createuser. :param str password: Password for 3PAR Web Services API. :param int port: (optional) Custom port number for 3PAR Web Services API. :param bool ssl: (optional) Use secure https (True) or plain text http (False). :param bool|string verify: (optional) Either a boolean, in which case it controls whether we verify the Rest serverโ€™s TLS certificate, or a string, in which case it must be a path to a CA bundle to use. By default: True. :return: None """ self._address = address self._username = username self._password = password self._port = port self._ssl = ssl self._verify = verify # Session key. None, if there is not active session. self._key = None # Default timeouts: # ConnectionTimeout = 1 second # ReadTimeout = infinity self._timeout = (1, None) # Default request headers self._headers = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'Accept-Language': 'en' } def __del__(self): # Perform session close if self._key is not None: self.close() def _query(self, url, method, **kwargs): """ Perform HTTP request to HPE 3PAR array. :param str url: URL address. For example: 'system' or 'volumes'. Static part of url is generated automatically. :param str method: HTTP method. Could be 'GET', 'POST', 'DELETE' or 'PUT'. :param float|tuple timeout: (optional) Like :attr:`StoreServ.timeout` but only for one query. :rtype: tuple(int, dict) :return: Dictionary with HTTP status code and json data. For example: dict('status':200, 'data':{'key':'value'}). Second value may be None if 3PAR array returns no message body, """ # Set connection and read timeout (if not set by user for current request) timeout = kwargs.pop('timeout', self._timeout) # Add default and auth headers to parameter list kwargs.setdefault('headers', dict()) kwargs['headers'].update(self._headers) # Prepare request path = '%s/%s' % (self._base_url, url.strip('/')) request = requests.Request(method, path, **kwargs) prep = request.prepare() LOG.debug('%s(`%s`)', method, prep.url) LOG.debug('Request body = `%s`', prep.body) # Perform request with runtime measuring with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=InsecureRequestWarning) try: session = requests.Session() resp = session.send(prep, timeout=timeout, verify=self._verify) deltafmt = '%d.%d sec' % (resp.elapsed.seconds, resp.elapsed.microseconds // 1000) except Exception as error: LOG.fatal('Cannot connect to StoreServ device. %s', repr(error)) raise # Check Rest service response if resp.status_code not in [200, 201, 202, 204]: LOG.warning('Return code %s, response delay %s', resp.status_code, deltafmt) LOG.warning('resp.content=%s', resp.content) LOG.warning('resp.reason=%s', resp.reason) else: LOG.debug('StoreServ return status %s, delay %s', resp.status_code, deltafmt) # Check response JSON body is exist try: jdata = resp.json() except ValueError: if resp.content: LOG.warning('Cannot decode JSON. Source string: "%s"', resp.content) return resp.status_code, None # (status, data) # Check wsapi session timeout error if (resp.status_code == 403) and (jdata.get('code', None) == 6): if self._key is not None: LOG.info('Session timeout occurs. Session key is invalid. ' 'Try to get new one.') # Just forget about current (inactive) session self._headers.pop('X-HP3PAR-WSAPI-SessionKey', None) self._key = None # Generate new session and replay last query try: self.open() replay = self._query(url, method, **kwargs) except Exception as error: LOG.fatal('Cannot open new WSAPI session. Exception: %s', repr(error)) raise else: LOG.debug('Request replay success.') return replay return resp.status_code, jdata def open(self): """ Open new Rest API session for HPE 3PAR array. You should call it prior any other requests. Do not forget to call :meth:`StoreServ.close` if you don't plan to use session anymore, because 3PAR array has active sessions limit. If some troubles occurs you should manually check: * 3PAR Web services API are enabled on array (3PAR OS command: 'showwsapi') * Array credentials (username and password) * 3PAR array management address is correct and available * Debug logs generated by python logging module :return: None """ auth = {'user': self._username, 'password': self._password} status, data = self.post('credentials', body=auth) if status == 201: # 201 (created) => Session succefully created self._headers.update({'X-HP3PAR-WSAPI-SessionKey': data['key']}) self._key = data['key'] elif status == 403: # 403 (forbidden) => Wrong user or password raise AuthError('Cannot connect to StoreServ. ' 'Authentification error: %s', data['desc']) def close(self): """ Close Rest API session. :return: None """ # There isnt active session if self._key is None: LOG.debug('There isnt active session - skipping session close.') return # Try to close active session path = 'credentials/' + self._key try: self.delete(path) except Exception as error: LOG.warning('Cannot close StoreServ 3PAR session ' 'gracefully. Exception occured: %s', repr(error)) else: self._headers.pop('X-HP3PAR-WSAPI-SessionKey') self._key = None def get(self, url, query=None): """ Perform HTTP GET request to HPE 3PAR array. Method used to get information about objects. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's and requests result are described in "HPE 3PAR Web Services API Developer's Guide" :param str query: (optional) Query filter specification (see "WSAPI query syntax" in "HPE 3PAR Web Services API Developer's Guide"). :rtype: tuple(int, dict) :return: Tuple with HTTP status code and dict with request result. For example: (200, {'key':'value'}). """ # Perform get request with query filter if query is not None: return self._query(url, 'GET', params=quote(f'query="{query}"')) # Perform simple get request return self._query(url, 'GET') def post(self, url, body): """ Perform HTTP POST request to HPE 3PAR array. Method used to create new objects. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's, request parameters and results are described in "HPE 3PAR Web Services API Developer's Guide" :param dict body: Request parameter, used to create new array object. :rtype: tuple (int, dict) :return: Tuple with HTTP status code and dict with request result. For example: (201, {'key':'value'}). Second value may be None if 3PAR array returns no message body. """ return self._query(url, 'POST', json=body) def delete(self, url): """ Perform HTTP DELETE request to HPE 3PAR array. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's, request parameters and results are described in "HPE 3PAR Web Services API Developer's Guide" :return: Tuple with HTTP status code and dict with request result. For example: (200, {'key':'value'}). Second value may be None if 3PAR array returns no message body. """ return self._query(url, 'DELETE') def put(self, url, body): """ Perform HTTP PUT request to HPE 3PAR array. :param str url: URL address. Base part of url address is generated automatically and you should not care about it. Example of valid url: 'system' or 'volumes'. All available url's, request parameters and results are described in "HPE 3PAR Web Services API Developer's Guide" :param dict body: Request parameter, used to modify array object. :rtype: tuple(int, dict) :return: Tuple with HTTP status code and dict with request result. For example: (200, {'key':'value'}). Second value may be None if 3PAR array returns no message body. """ return self._query(url, 'PUT', json=body) def _set_timeout(self, timeout): if isinstance(timeout, (float, int)): self._timeout = (timeout, timeout) elif isinstance(timeout, tuple): self._timeout = timeout def _get_timeout(self): return self._timeout timeout = property(_get_timeout, _set_timeout) """ :var float|tuple timeout: Number of seconds that Rest API client waits for response from HPE StoreServ before timeout exception generation. You can use different timeouts for connection setup and for getting first piece of data. In this case, you should use tuple(float, float) with first value - connection timeout and the second value - read timeout. Or if you want to use same values for both type of timeouts, you can use one float value. 'None' value can be used instead to wait forever for a device response. Default value: (1, None) """ @property def _base_url(self): """ Generate static part of URL. :rtype: str :return: Static part of URL """ # URL Protocol proto = 'https' if self._ssl else 'http' # Device port number if self._port is None: port = 8080 if self._ssl else 8008 else: port = self._port return f'{proto}://{self._address}:{port}/api/v1' def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): if self._key is not None: self.close() class AuthError(Exception): """ Authentification error """
if __name__ == "__main__": pass
random_line_split
repository_repository.py
# -*- coding: utf-8 -*- import re import os from os.path import isfile from os import environ, path from odoo import api, exceptions, fields, models, _, service, tools import subprocess import logging from os.path import isdir as is_dir import shutil from datetime import datetime from odoo import release from odoo.tools import config from os.path import join as path_join, isdir import sys import urllib.parse _logger = logging.getLogger(__name__) import time import sys import subprocess from datetime import datetime, timedelta from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT try: from git import Repo, Git, cmd from git.exc import InvalidGitRepositoryError, GitCommandError, UnmergedEntriesError, CheckoutError except Exception as ex: subprocess.check_call([sys.executable, "-m", "pip", "install", 'GitPython']) nlist_path = [] list_of_addons_paths = tools.config['addons_path'].split(",") for path in list_of_addons_paths: nlist_path.append((path, path)) class RepositoryRepository(models.TransientModel): _name = 'repository.repository' _rec_name = 'source' path = fields.Char('Path', readonly=True, states={'draft': [('readonly', False)]}) source = fields.Char('Source', readonly=True, states={'draft': [('readonly', False)]}) branch = fields.Char('Branch', default=release.series, readonly=True, states={'draft': [('readonly', False)]}) rev_id = fields.Char('Last Revision', readonly=True) rev_date = fields.Datetime('Last Rev. Date', readonly=True) dirty = fields.Boolean('Dirty', readonly=True) module_ids = fields.Many2many('ir.module.module', string='Modules') module_count = fields.Integer('Modules') state = fields.Selection(string="Estado", selection=[('draft', 'Borrador'), ('cloned', 'Clonado'), ('enabled', 'Enabled'), ('disabled', 'Disabled')], default='draft', readonly=True,) addons_paths = fields.Selection(nlist_path, string="Add-ons Paths", help="Please choose one of these directories to put " "your module in", ) password = fields.Char(string='Password', required=False) user = fields.Char(string='User', required=False) log = fields.Char(string='Log', required=False) def log_(self, mensaje): now = datetime.now() self.write({'log': '\n' + str(now.strftime("%m/%d/%Y, %H:%M:%S")) + " " + str(mensaje) + " " + str(self.log)}) requiremet = fields.Char( string='Requiremet', required=False) def _compute_apps(self): module = self.env['ir.module.module'] curr_addons_path = set(config['addons_path'].split(',')) if self.path in curr_addons_path: self.state = 'enabled' if self.state == 'enabled': module_names = find_modules(self.path) self.module_ids = module.search([('name', 'in', module_names)]) self.module_count = len(self.module_ids) else: self.module_ids = False self.module_count = 0 def copy(self, default=None): raise exceptions.Warning(_("The repository cannot be cloned.")) def unlink(self): if self.env.context.get('remove_repository'): for rec in self: if rec.state == 'enabled': raise exceptions.Warning(_('Unable to remove an enabled repository.')) res = Git(self.path) res.load() res.remove() return super(RepositoryRepository, self).unlink() def action_open_modules(self): self.ensure_one() return { 'name': self.source, 'type': 'ir.actions.act_window', 'res_model': 'ir.module.module', 'view_type': 'form', 'view_mode': 'kanban,tree,form', 'target': 'current', 'domain': [('id', 'in', self.module_ids.ids)] } def install_requirements(self): try: requirement_file = self.path + '/requirements.txt' if os.path.exists(requirement_file): subprocess.check_call(["pip3", "install", "-r", requirement_file]) except Exception as e: log_("Exception exception occured: {}".format(e)) def action_enabled(self): self.ensure_one() if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied addons_path = config['addons_path'].split(',') if config._is_addons_path(self.path) and self.path not in addons_path: addons_path.insert(0, self.path) config['addons_path'] = ','.join(addons_path) config.save() self.state = 'enabled' requirement_file = self.path + '/requiremet.txt' if os.path.exists(requirement_file): f = open(requirement_file, "r") self.requiremet = f.read() self.install_requirements() self._compute_apps() return self.env.ref( 'base.action_view_base_module_update').read()[0] def action_remove(self): self.ensure_one() if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied try: self.with_context(remove_repository=True).unlink() except Exception as e: raise exceptions.Warning(_(" '%s':\n%s") % (self.source, e)) return {'type': 'ir.actions.act_window_close'} def restart(self): service.server.restart() def pull_all(): repo_ids = self.env['repository.repository'].search([]) for r in repo_ids: r.update() service.server.restart() def action_update(self): self.ensure_one() self.update() service.server.restart() def update(self): if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied try: now_time = datetime.now() + timedelta(seconds=60) cron_obj = self.env['ir.cron'] res = Git(self.path, self.user, self.password) res.load() res.update(self.source) for l in res.log(): self.log_(l) # self.install_requirements() self._compute_apps() model_id = self.env['ir.model'].search( [('model', '=', 'ir.module.module')]) cron_data = { 'name': "Update Modules", 'code': 'model.upgrade_changed_checksum(%s)' % self.id, 'nextcall': now_time, 'numbercall': -1, 'user_id': self._uid, 'model_id': model_id.id, 'state': 'code', } cron = cron_obj.sudo().create(cron_data) except Exception as e: raise exceptions.Warning(_("'%s':\n%s") % (self.source, e)) def action_disable(self): self.ensure_one() self.state = 'disabled' if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied addons_path = config['addons_path'].split(',') if self.path in addons_path: if self.module_ids.filtered(lambda r: r.state not in ( 'uninstalled', 'uninstallable')): raise exceptions.Warning( _('Some modules of this repository are installed.')) addons_path.remove(self.path) config['addons_path'] = ','.join(addons_path) config.save() def clone(self): self.state = 'cloned' self.ensure_one() self.path = path_join(self.addons_paths, re.compile(r'[^0-9a-zA-Z]+').sub('_', self.source + self.branch)) shutil.rmtree(self.path) try: res = Git(self.path) res.init(self.source, branch=self.branch, user=self.user, password=self.password) res.load() self.env.cr.commit() service.server.restart() except Exception as e: raise exceptions.Warning(_( "An error has occurred while Clone '%s':\n%s") % (self.source, e)) def _default_repository_ids(self): res = self.env['repository.repository'] for path in config['addons_path'].split(','): git = Git(path) if git.load(): data = git.info() result = res.search([('path', '=', data['path'])]) if not result: data.update({'state': 'enabled'}) result = res.create(data) result._compute_apps() self.env.cr.commit() def remove_finish_import_crons(self): model_id = self.env['ir.model'].search( [('model', '=', 'repository.repository')]) cron_ids = self.env['ir.cron'].search( [('model_id', '=', model_id.id)]) # Remove completed cron cron_ids.unlink() def find_modules(path): return [module for module in os.listdir(path) if any(map( lambda f: isfile(path_join(path, module, f)), ( '__manifest__.py', '__openerp__.py')))] class Git(): _source_http = None _source_git = None _repo = None _user = None _pass = None _path = None _output_list = [] def __init__(self, path=None, user=None, password=None): self._path = path self._user = user self._pass = password def remove(self): if self.is_initialized() and not self.is_clean(): raise exceptions.Warning(_("Error, Repository no clean.")) if self._path and is_dir(self._path): shutil.rmtree(self._path) def is_initialized(self): return not not self._repo def init(self, source, branch=None, user=None, password=None): self._user = user self._pass = password if not self.is_initialized(): if not self._user: self._repo = Repo.clone_from(source, self._path, **{ 'branch': branch, 'single-branch': True, 'depth': 1}) self._source_http = source else: source = source.replace('https://', '') source_git = "https://" + self._user + ":" + self._pass + "@" + source self._source_git=source_git self._repo = Repo.clone_from(source_git, self._path, **{ 'branch': branch, 'single-branch': True, 'depth': 1}) self._source_http = source_git def is_clean(self): return self.is_initialized() and not self._repo.is_dirty(untracked_files=True) def load(self, **kwargs): if not self._repo: if self._path and is_dir(path_join(self._path, '.git')):
else: return False def info(self): branch = self._repo.active_branch.name curr_rev = self._repo.rev_parse(branch) git = self.info_base() source = self._repo.remotes.origin.url.split('@') if len(source) > 1: source = "https://" + source[1] else: source = self._repo.remotes.origin.url return dict(git, **{ 'source': source, 'branch': branch, 'rev_id': curr_rev.hexsha, 'path': self._path, 'rev_date': datetime.fromtimestamp(curr_rev.committed_date), }) def info_base(self): return { 'path': self._path, 'source': None, 'branch': None, 'rev_id': None, 'rev_date': None, 'dirty': not self.is_clean(), } def log(self): return self._output_list def update(self,url): msg = '' old_env = {} ret_flag = True self._output_list.append(str(time.ctime()) + ": Checking for updates") if self.is_initialized(): branch = self._repo.active_branch remote_origin = self._repo.remotes.origin if remote_origin.exists(): try: self._repo.remote() git_cmd = cmd.Git(self._path) if not self._source_git: if self._user: source_git = "https://" + self._user + ":" + self._pass + "@" + url.replace('https://', '') self._source_git = source_git # if self._user and self._pass: # project_dir = os.path.dirname(os.path.abspath(__file__)) # # old_env = git_cmd.update_environment(SSH_ASKPASS=os.path.join(project_dir, 'askpass.py'), # # REPO_USERNAME=self._user, GIT_PASSWORD=self._passwd) # old_env = git_cmd.update_environment(SSH_ASKPASS=os.path.join(project_dir, 'askpass.py'), # REPO_USERNAME=self._user, REPO_PASSWORD=self._pass) # fetch_info = git_cmd.fetch(branch.name) git_cmd.reset('--hard') msg = git_cmd.pull(self._source_git,force=True) # branch.set_reference(fetch_info[0].ref.name) # restore the environment back to its previous state after operation. #if old_env: # git_cmd.update_environment(**old_env) # msg is '' or 'Updating ...' or 'Already up-to-date.' if you pulled successfully if msg: # encoding = 'utf-8', msg1 = msg.decode(encoding) to see if use here instead of msg! _logger.info(str(msg)) self._output_list.append(str(msg)) else: ret_flag = False except GitCommandError as exc: # after some tests we can cancel _logger.error of exc.stdout e exc.stdin because with # GIT_PYTHON_TRACE set to "full" the same output is written to logger. ret_flag = False if exc.stderr: self._output_list.append(exc.stderr.lstrip()) _logger.error('GitCommandError exception occured: {}'.format(exc.stderr.lstrip())) elif exc.stdout: self._output_list.append(exc.stdout.lstrip()) _logger.error('GitCommandError exception occured: {}'.format(exc.stdout.lstrip())) except InvalidGitRepositoryError as exc: ret_flag = False _logger.error('Invalid git repository: {}, {} '.format(self._repo_path, exc)) self._output_list.append(str('Invalid git repository: {}, {} '.format(self._repo_path, exc))) except CheckoutError as exc: ret_flag = False _logger.error("CheckoutError exception occured: {}".format(exc)) self._output_list.append("CheckoutError exception occured: {}".format(exc)) except UnmergedEntriesError as exc: ret_flag = False _logger.error("CheckouUnmergedEntriesError exception occured: {}".format(exc)) self._output_list.append("CheckouUnmergedEntriesError exception occured: {}".format(exc)) # except AssertionError as exc: # ret_flag = False # _logger.error("AssertionError exception occured: {}".format(exc)) else: ret_flag = False _logger.info('Remote repository \'origin\' doesn\'t exsist!') self._output_list.append('Remote repository \'origin\' doesn\'t exsist!') return ret_flag
self._repo = Repo(self._path) return True
conditional_block
repository_repository.py
# -*- coding: utf-8 -*- import re import os from os.path import isfile from os import environ, path from odoo import api, exceptions, fields, models, _, service, tools import subprocess import logging from os.path import isdir as is_dir import shutil from datetime import datetime from odoo import release from odoo.tools import config from os.path import join as path_join, isdir import sys import urllib.parse _logger = logging.getLogger(__name__) import time import sys import subprocess from datetime import datetime, timedelta from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT try: from git import Repo, Git, cmd from git.exc import InvalidGitRepositoryError, GitCommandError, UnmergedEntriesError, CheckoutError except Exception as ex: subprocess.check_call([sys.executable, "-m", "pip", "install", 'GitPython']) nlist_path = [] list_of_addons_paths = tools.config['addons_path'].split(",") for path in list_of_addons_paths: nlist_path.append((path, path)) class RepositoryRepository(models.TransientModel): _name = 'repository.repository' _rec_name = 'source' path = fields.Char('Path', readonly=True, states={'draft': [('readonly', False)]}) source = fields.Char('Source', readonly=True, states={'draft': [('readonly', False)]}) branch = fields.Char('Branch', default=release.series, readonly=True, states={'draft': [('readonly', False)]}) rev_id = fields.Char('Last Revision', readonly=True) rev_date = fields.Datetime('Last Rev. Date', readonly=True) dirty = fields.Boolean('Dirty', readonly=True) module_ids = fields.Many2many('ir.module.module', string='Modules') module_count = fields.Integer('Modules') state = fields.Selection(string="Estado", selection=[('draft', 'Borrador'), ('cloned', 'Clonado'), ('enabled', 'Enabled'), ('disabled', 'Disabled')], default='draft', readonly=True,) addons_paths = fields.Selection(nlist_path, string="Add-ons Paths", help="Please choose one of these directories to put " "your module in", ) password = fields.Char(string='Password', required=False) user = fields.Char(string='User', required=False) log = fields.Char(string='Log', required=False) def log_(self, mensaje): now = datetime.now() self.write({'log': '\n' + str(now.strftime("%m/%d/%Y, %H:%M:%S")) + " " + str(mensaje) + " " + str(self.log)}) requiremet = fields.Char( string='Requiremet', required=False) def _compute_apps(self): module = self.env['ir.module.module'] curr_addons_path = set(config['addons_path'].split(',')) if self.path in curr_addons_path: self.state = 'enabled' if self.state == 'enabled': module_names = find_modules(self.path) self.module_ids = module.search([('name', 'in', module_names)]) self.module_count = len(self.module_ids) else: self.module_ids = False self.module_count = 0 def copy(self, default=None): raise exceptions.Warning(_("The repository cannot be cloned.")) def unlink(self): if self.env.context.get('remove_repository'): for rec in self: if rec.state == 'enabled': raise exceptions.Warning(_('Unable to remove an enabled repository.')) res = Git(self.path) res.load() res.remove() return super(RepositoryRepository, self).unlink() def action_open_modules(self): self.ensure_one() return { 'name': self.source, 'type': 'ir.actions.act_window',
'target': 'current', 'domain': [('id', 'in', self.module_ids.ids)] } def install_requirements(self): try: requirement_file = self.path + '/requirements.txt' if os.path.exists(requirement_file): subprocess.check_call(["pip3", "install", "-r", requirement_file]) except Exception as e: log_("Exception exception occured: {}".format(e)) def action_enabled(self): self.ensure_one() if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied addons_path = config['addons_path'].split(',') if config._is_addons_path(self.path) and self.path not in addons_path: addons_path.insert(0, self.path) config['addons_path'] = ','.join(addons_path) config.save() self.state = 'enabled' requirement_file = self.path + '/requiremet.txt' if os.path.exists(requirement_file): f = open(requirement_file, "r") self.requiremet = f.read() self.install_requirements() self._compute_apps() return self.env.ref( 'base.action_view_base_module_update').read()[0] def action_remove(self): self.ensure_one() if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied try: self.with_context(remove_repository=True).unlink() except Exception as e: raise exceptions.Warning(_(" '%s':\n%s") % (self.source, e)) return {'type': 'ir.actions.act_window_close'} def restart(self): service.server.restart() def pull_all(): repo_ids = self.env['repository.repository'].search([]) for r in repo_ids: r.update() service.server.restart() def action_update(self): self.ensure_one() self.update() service.server.restart() def update(self): if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied try: now_time = datetime.now() + timedelta(seconds=60) cron_obj = self.env['ir.cron'] res = Git(self.path, self.user, self.password) res.load() res.update(self.source) for l in res.log(): self.log_(l) # self.install_requirements() self._compute_apps() model_id = self.env['ir.model'].search( [('model', '=', 'ir.module.module')]) cron_data = { 'name': "Update Modules", 'code': 'model.upgrade_changed_checksum(%s)' % self.id, 'nextcall': now_time, 'numbercall': -1, 'user_id': self._uid, 'model_id': model_id.id, 'state': 'code', } cron = cron_obj.sudo().create(cron_data) except Exception as e: raise exceptions.Warning(_("'%s':\n%s") % (self.source, e)) def action_disable(self): self.ensure_one() self.state = 'disabled' if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied addons_path = config['addons_path'].split(',') if self.path in addons_path: if self.module_ids.filtered(lambda r: r.state not in ( 'uninstalled', 'uninstallable')): raise exceptions.Warning( _('Some modules of this repository are installed.')) addons_path.remove(self.path) config['addons_path'] = ','.join(addons_path) config.save() def clone(self): self.state = 'cloned' self.ensure_one() self.path = path_join(self.addons_paths, re.compile(r'[^0-9a-zA-Z]+').sub('_', self.source + self.branch)) shutil.rmtree(self.path) try: res = Git(self.path) res.init(self.source, branch=self.branch, user=self.user, password=self.password) res.load() self.env.cr.commit() service.server.restart() except Exception as e: raise exceptions.Warning(_( "An error has occurred while Clone '%s':\n%s") % (self.source, e)) def _default_repository_ids(self): res = self.env['repository.repository'] for path in config['addons_path'].split(','): git = Git(path) if git.load(): data = git.info() result = res.search([('path', '=', data['path'])]) if not result: data.update({'state': 'enabled'}) result = res.create(data) result._compute_apps() self.env.cr.commit() def remove_finish_import_crons(self): model_id = self.env['ir.model'].search( [('model', '=', 'repository.repository')]) cron_ids = self.env['ir.cron'].search( [('model_id', '=', model_id.id)]) # Remove completed cron cron_ids.unlink() def find_modules(path): return [module for module in os.listdir(path) if any(map( lambda f: isfile(path_join(path, module, f)), ( '__manifest__.py', '__openerp__.py')))] class Git(): _source_http = None _source_git = None _repo = None _user = None _pass = None _path = None _output_list = [] def __init__(self, path=None, user=None, password=None): self._path = path self._user = user self._pass = password def remove(self): if self.is_initialized() and not self.is_clean(): raise exceptions.Warning(_("Error, Repository no clean.")) if self._path and is_dir(self._path): shutil.rmtree(self._path) def is_initialized(self): return not not self._repo def init(self, source, branch=None, user=None, password=None): self._user = user self._pass = password if not self.is_initialized(): if not self._user: self._repo = Repo.clone_from(source, self._path, **{ 'branch': branch, 'single-branch': True, 'depth': 1}) self._source_http = source else: source = source.replace('https://', '') source_git = "https://" + self._user + ":" + self._pass + "@" + source self._source_git=source_git self._repo = Repo.clone_from(source_git, self._path, **{ 'branch': branch, 'single-branch': True, 'depth': 1}) self._source_http = source_git def is_clean(self): return self.is_initialized() and not self._repo.is_dirty(untracked_files=True) def load(self, **kwargs): if not self._repo: if self._path and is_dir(path_join(self._path, '.git')): self._repo = Repo(self._path) return True else: return False def info(self): branch = self._repo.active_branch.name curr_rev = self._repo.rev_parse(branch) git = self.info_base() source = self._repo.remotes.origin.url.split('@') if len(source) > 1: source = "https://" + source[1] else: source = self._repo.remotes.origin.url return dict(git, **{ 'source': source, 'branch': branch, 'rev_id': curr_rev.hexsha, 'path': self._path, 'rev_date': datetime.fromtimestamp(curr_rev.committed_date), }) def info_base(self): return { 'path': self._path, 'source': None, 'branch': None, 'rev_id': None, 'rev_date': None, 'dirty': not self.is_clean(), } def log(self): return self._output_list def update(self,url): msg = '' old_env = {} ret_flag = True self._output_list.append(str(time.ctime()) + ": Checking for updates") if self.is_initialized(): branch = self._repo.active_branch remote_origin = self._repo.remotes.origin if remote_origin.exists(): try: self._repo.remote() git_cmd = cmd.Git(self._path) if not self._source_git: if self._user: source_git = "https://" + self._user + ":" + self._pass + "@" + url.replace('https://', '') self._source_git = source_git # if self._user and self._pass: # project_dir = os.path.dirname(os.path.abspath(__file__)) # # old_env = git_cmd.update_environment(SSH_ASKPASS=os.path.join(project_dir, 'askpass.py'), # # REPO_USERNAME=self._user, GIT_PASSWORD=self._passwd) # old_env = git_cmd.update_environment(SSH_ASKPASS=os.path.join(project_dir, 'askpass.py'), # REPO_USERNAME=self._user, REPO_PASSWORD=self._pass) # fetch_info = git_cmd.fetch(branch.name) git_cmd.reset('--hard') msg = git_cmd.pull(self._source_git,force=True) # branch.set_reference(fetch_info[0].ref.name) # restore the environment back to its previous state after operation. #if old_env: # git_cmd.update_environment(**old_env) # msg is '' or 'Updating ...' or 'Already up-to-date.' if you pulled successfully if msg: # encoding = 'utf-8', msg1 = msg.decode(encoding) to see if use here instead of msg! _logger.info(str(msg)) self._output_list.append(str(msg)) else: ret_flag = False except GitCommandError as exc: # after some tests we can cancel _logger.error of exc.stdout e exc.stdin because with # GIT_PYTHON_TRACE set to "full" the same output is written to logger. ret_flag = False if exc.stderr: self._output_list.append(exc.stderr.lstrip()) _logger.error('GitCommandError exception occured: {}'.format(exc.stderr.lstrip())) elif exc.stdout: self._output_list.append(exc.stdout.lstrip()) _logger.error('GitCommandError exception occured: {}'.format(exc.stdout.lstrip())) except InvalidGitRepositoryError as exc: ret_flag = False _logger.error('Invalid git repository: {}, {} '.format(self._repo_path, exc)) self._output_list.append(str('Invalid git repository: {}, {} '.format(self._repo_path, exc))) except CheckoutError as exc: ret_flag = False _logger.error("CheckoutError exception occured: {}".format(exc)) self._output_list.append("CheckoutError exception occured: {}".format(exc)) except UnmergedEntriesError as exc: ret_flag = False _logger.error("CheckouUnmergedEntriesError exception occured: {}".format(exc)) self._output_list.append("CheckouUnmergedEntriesError exception occured: {}".format(exc)) # except AssertionError as exc: # ret_flag = False # _logger.error("AssertionError exception occured: {}".format(exc)) else: ret_flag = False _logger.info('Remote repository \'origin\' doesn\'t exsist!') self._output_list.append('Remote repository \'origin\' doesn\'t exsist!') return ret_flag
'res_model': 'ir.module.module', 'view_type': 'form', 'view_mode': 'kanban,tree,form',
random_line_split
repository_repository.py
# -*- coding: utf-8 -*- import re import os from os.path import isfile from os import environ, path from odoo import api, exceptions, fields, models, _, service, tools import subprocess import logging from os.path import isdir as is_dir import shutil from datetime import datetime from odoo import release from odoo.tools import config from os.path import join as path_join, isdir import sys import urllib.parse _logger = logging.getLogger(__name__) import time import sys import subprocess from datetime import datetime, timedelta from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT try: from git import Repo, Git, cmd from git.exc import InvalidGitRepositoryError, GitCommandError, UnmergedEntriesError, CheckoutError except Exception as ex: subprocess.check_call([sys.executable, "-m", "pip", "install", 'GitPython']) nlist_path = [] list_of_addons_paths = tools.config['addons_path'].split(",") for path in list_of_addons_paths: nlist_path.append((path, path)) class RepositoryRepository(models.TransientModel): _name = 'repository.repository' _rec_name = 'source' path = fields.Char('Path', readonly=True, states={'draft': [('readonly', False)]}) source = fields.Char('Source', readonly=True, states={'draft': [('readonly', False)]}) branch = fields.Char('Branch', default=release.series, readonly=True, states={'draft': [('readonly', False)]}) rev_id = fields.Char('Last Revision', readonly=True) rev_date = fields.Datetime('Last Rev. Date', readonly=True) dirty = fields.Boolean('Dirty', readonly=True) module_ids = fields.Many2many('ir.module.module', string='Modules') module_count = fields.Integer('Modules') state = fields.Selection(string="Estado", selection=[('draft', 'Borrador'), ('cloned', 'Clonado'), ('enabled', 'Enabled'), ('disabled', 'Disabled')], default='draft', readonly=True,) addons_paths = fields.Selection(nlist_path, string="Add-ons Paths", help="Please choose one of these directories to put " "your module in", ) password = fields.Char(string='Password', required=False) user = fields.Char(string='User', required=False) log = fields.Char(string='Log', required=False) def log_(self, mensaje): now = datetime.now() self.write({'log': '\n' + str(now.strftime("%m/%d/%Y, %H:%M:%S")) + " " + str(mensaje) + " " + str(self.log)}) requiremet = fields.Char( string='Requiremet', required=False) def _compute_apps(self): module = self.env['ir.module.module'] curr_addons_path = set(config['addons_path'].split(',')) if self.path in curr_addons_path: self.state = 'enabled' if self.state == 'enabled': module_names = find_modules(self.path) self.module_ids = module.search([('name', 'in', module_names)]) self.module_count = len(self.module_ids) else: self.module_ids = False self.module_count = 0 def copy(self, default=None): raise exceptions.Warning(_("The repository cannot be cloned.")) def unlink(self): if self.env.context.get('remove_repository'): for rec in self: if rec.state == 'enabled': raise exceptions.Warning(_('Unable to remove an enabled repository.')) res = Git(self.path) res.load() res.remove() return super(RepositoryRepository, self).unlink() def action_open_modules(self): self.ensure_one() return { 'name': self.source, 'type': 'ir.actions.act_window', 'res_model': 'ir.module.module', 'view_type': 'form', 'view_mode': 'kanban,tree,form', 'target': 'current', 'domain': [('id', 'in', self.module_ids.ids)] } def install_requirements(self): try: requirement_file = self.path + '/requirements.txt' if os.path.exists(requirement_file): subprocess.check_call(["pip3", "install", "-r", requirement_file]) except Exception as e: log_("Exception exception occured: {}".format(e)) def action_enabled(self): self.ensure_one() if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied addons_path = config['addons_path'].split(',') if config._is_addons_path(self.path) and self.path not in addons_path: addons_path.insert(0, self.path) config['addons_path'] = ','.join(addons_path) config.save() self.state = 'enabled' requirement_file = self.path + '/requiremet.txt' if os.path.exists(requirement_file): f = open(requirement_file, "r") self.requiremet = f.read() self.install_requirements() self._compute_apps() return self.env.ref( 'base.action_view_base_module_update').read()[0] def action_remove(self): self.ensure_one() if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied try: self.with_context(remove_repository=True).unlink() except Exception as e: raise exceptions.Warning(_(" '%s':\n%s") % (self.source, e)) return {'type': 'ir.actions.act_window_close'} def restart(self): service.server.restart() def pull_all(): repo_ids = self.env['repository.repository'].search([]) for r in repo_ids: r.update() service.server.restart() def action_update(self): self.ensure_one() self.update() service.server.restart() def update(self): if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied try: now_time = datetime.now() + timedelta(seconds=60) cron_obj = self.env['ir.cron'] res = Git(self.path, self.user, self.password) res.load() res.update(self.source) for l in res.log(): self.log_(l) # self.install_requirements() self._compute_apps() model_id = self.env['ir.model'].search( [('model', '=', 'ir.module.module')]) cron_data = { 'name': "Update Modules", 'code': 'model.upgrade_changed_checksum(%s)' % self.id, 'nextcall': now_time, 'numbercall': -1, 'user_id': self._uid, 'model_id': model_id.id, 'state': 'code', } cron = cron_obj.sudo().create(cron_data) except Exception as e: raise exceptions.Warning(_("'%s':\n%s") % (self.source, e)) def action_disable(self): self.ensure_one() self.state = 'disabled' if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied addons_path = config['addons_path'].split(',') if self.path in addons_path: if self.module_ids.filtered(lambda r: r.state not in ( 'uninstalled', 'uninstallable')): raise exceptions.Warning( _('Some modules of this repository are installed.')) addons_path.remove(self.path) config['addons_path'] = ','.join(addons_path) config.save() def clone(self): self.state = 'cloned' self.ensure_one() self.path = path_join(self.addons_paths, re.compile(r'[^0-9a-zA-Z]+').sub('_', self.source + self.branch)) shutil.rmtree(self.path) try: res = Git(self.path) res.init(self.source, branch=self.branch, user=self.user, password=self.password) res.load() self.env.cr.commit() service.server.restart() except Exception as e: raise exceptions.Warning(_( "An error has occurred while Clone '%s':\n%s") % (self.source, e)) def _default_repository_ids(self): res = self.env['repository.repository'] for path in config['addons_path'].split(','): git = Git(path) if git.load(): data = git.info() result = res.search([('path', '=', data['path'])]) if not result: data.update({'state': 'enabled'}) result = res.create(data) result._compute_apps() self.env.cr.commit() def remove_finish_import_crons(self): model_id = self.env['ir.model'].search( [('model', '=', 'repository.repository')]) cron_ids = self.env['ir.cron'].search( [('model_id', '=', model_id.id)]) # Remove completed cron cron_ids.unlink() def
(path): return [module for module in os.listdir(path) if any(map( lambda f: isfile(path_join(path, module, f)), ( '__manifest__.py', '__openerp__.py')))] class Git(): _source_http = None _source_git = None _repo = None _user = None _pass = None _path = None _output_list = [] def __init__(self, path=None, user=None, password=None): self._path = path self._user = user self._pass = password def remove(self): if self.is_initialized() and not self.is_clean(): raise exceptions.Warning(_("Error, Repository no clean.")) if self._path and is_dir(self._path): shutil.rmtree(self._path) def is_initialized(self): return not not self._repo def init(self, source, branch=None, user=None, password=None): self._user = user self._pass = password if not self.is_initialized(): if not self._user: self._repo = Repo.clone_from(source, self._path, **{ 'branch': branch, 'single-branch': True, 'depth': 1}) self._source_http = source else: source = source.replace('https://', '') source_git = "https://" + self._user + ":" + self._pass + "@" + source self._source_git=source_git self._repo = Repo.clone_from(source_git, self._path, **{ 'branch': branch, 'single-branch': True, 'depth': 1}) self._source_http = source_git def is_clean(self): return self.is_initialized() and not self._repo.is_dirty(untracked_files=True) def load(self, **kwargs): if not self._repo: if self._path and is_dir(path_join(self._path, '.git')): self._repo = Repo(self._path) return True else: return False def info(self): branch = self._repo.active_branch.name curr_rev = self._repo.rev_parse(branch) git = self.info_base() source = self._repo.remotes.origin.url.split('@') if len(source) > 1: source = "https://" + source[1] else: source = self._repo.remotes.origin.url return dict(git, **{ 'source': source, 'branch': branch, 'rev_id': curr_rev.hexsha, 'path': self._path, 'rev_date': datetime.fromtimestamp(curr_rev.committed_date), }) def info_base(self): return { 'path': self._path, 'source': None, 'branch': None, 'rev_id': None, 'rev_date': None, 'dirty': not self.is_clean(), } def log(self): return self._output_list def update(self,url): msg = '' old_env = {} ret_flag = True self._output_list.append(str(time.ctime()) + ": Checking for updates") if self.is_initialized(): branch = self._repo.active_branch remote_origin = self._repo.remotes.origin if remote_origin.exists(): try: self._repo.remote() git_cmd = cmd.Git(self._path) if not self._source_git: if self._user: source_git = "https://" + self._user + ":" + self._pass + "@" + url.replace('https://', '') self._source_git = source_git # if self._user and self._pass: # project_dir = os.path.dirname(os.path.abspath(__file__)) # # old_env = git_cmd.update_environment(SSH_ASKPASS=os.path.join(project_dir, 'askpass.py'), # # REPO_USERNAME=self._user, GIT_PASSWORD=self._passwd) # old_env = git_cmd.update_environment(SSH_ASKPASS=os.path.join(project_dir, 'askpass.py'), # REPO_USERNAME=self._user, REPO_PASSWORD=self._pass) # fetch_info = git_cmd.fetch(branch.name) git_cmd.reset('--hard') msg = git_cmd.pull(self._source_git,force=True) # branch.set_reference(fetch_info[0].ref.name) # restore the environment back to its previous state after operation. #if old_env: # git_cmd.update_environment(**old_env) # msg is '' or 'Updating ...' or 'Already up-to-date.' if you pulled successfully if msg: # encoding = 'utf-8', msg1 = msg.decode(encoding) to see if use here instead of msg! _logger.info(str(msg)) self._output_list.append(str(msg)) else: ret_flag = False except GitCommandError as exc: # after some tests we can cancel _logger.error of exc.stdout e exc.stdin because with # GIT_PYTHON_TRACE set to "full" the same output is written to logger. ret_flag = False if exc.stderr: self._output_list.append(exc.stderr.lstrip()) _logger.error('GitCommandError exception occured: {}'.format(exc.stderr.lstrip())) elif exc.stdout: self._output_list.append(exc.stdout.lstrip()) _logger.error('GitCommandError exception occured: {}'.format(exc.stdout.lstrip())) except InvalidGitRepositoryError as exc: ret_flag = False _logger.error('Invalid git repository: {}, {} '.format(self._repo_path, exc)) self._output_list.append(str('Invalid git repository: {}, {} '.format(self._repo_path, exc))) except CheckoutError as exc: ret_flag = False _logger.error("CheckoutError exception occured: {}".format(exc)) self._output_list.append("CheckoutError exception occured: {}".format(exc)) except UnmergedEntriesError as exc: ret_flag = False _logger.error("CheckouUnmergedEntriesError exception occured: {}".format(exc)) self._output_list.append("CheckouUnmergedEntriesError exception occured: {}".format(exc)) # except AssertionError as exc: # ret_flag = False # _logger.error("AssertionError exception occured: {}".format(exc)) else: ret_flag = False _logger.info('Remote repository \'origin\' doesn\'t exsist!') self._output_list.append('Remote repository \'origin\' doesn\'t exsist!') return ret_flag
find_modules
identifier_name
repository_repository.py
# -*- coding: utf-8 -*- import re import os from os.path import isfile from os import environ, path from odoo import api, exceptions, fields, models, _, service, tools import subprocess import logging from os.path import isdir as is_dir import shutil from datetime import datetime from odoo import release from odoo.tools import config from os.path import join as path_join, isdir import sys import urllib.parse _logger = logging.getLogger(__name__) import time import sys import subprocess from datetime import datetime, timedelta from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT try: from git import Repo, Git, cmd from git.exc import InvalidGitRepositoryError, GitCommandError, UnmergedEntriesError, CheckoutError except Exception as ex: subprocess.check_call([sys.executable, "-m", "pip", "install", 'GitPython']) nlist_path = [] list_of_addons_paths = tools.config['addons_path'].split(",") for path in list_of_addons_paths: nlist_path.append((path, path)) class RepositoryRepository(models.TransientModel): _name = 'repository.repository' _rec_name = 'source' path = fields.Char('Path', readonly=True, states={'draft': [('readonly', False)]}) source = fields.Char('Source', readonly=True, states={'draft': [('readonly', False)]}) branch = fields.Char('Branch', default=release.series, readonly=True, states={'draft': [('readonly', False)]}) rev_id = fields.Char('Last Revision', readonly=True) rev_date = fields.Datetime('Last Rev. Date', readonly=True) dirty = fields.Boolean('Dirty', readonly=True) module_ids = fields.Many2many('ir.module.module', string='Modules') module_count = fields.Integer('Modules') state = fields.Selection(string="Estado", selection=[('draft', 'Borrador'), ('cloned', 'Clonado'), ('enabled', 'Enabled'), ('disabled', 'Disabled')], default='draft', readonly=True,) addons_paths = fields.Selection(nlist_path, string="Add-ons Paths", help="Please choose one of these directories to put " "your module in", ) password = fields.Char(string='Password', required=False) user = fields.Char(string='User', required=False) log = fields.Char(string='Log', required=False) def log_(self, mensaje): now = datetime.now() self.write({'log': '\n' + str(now.strftime("%m/%d/%Y, %H:%M:%S")) + " " + str(mensaje) + " " + str(self.log)}) requiremet = fields.Char( string='Requiremet', required=False) def _compute_apps(self): module = self.env['ir.module.module'] curr_addons_path = set(config['addons_path'].split(',')) if self.path in curr_addons_path: self.state = 'enabled' if self.state == 'enabled': module_names = find_modules(self.path) self.module_ids = module.search([('name', 'in', module_names)]) self.module_count = len(self.module_ids) else: self.module_ids = False self.module_count = 0 def copy(self, default=None): raise exceptions.Warning(_("The repository cannot be cloned.")) def unlink(self): if self.env.context.get('remove_repository'): for rec in self: if rec.state == 'enabled': raise exceptions.Warning(_('Unable to remove an enabled repository.')) res = Git(self.path) res.load() res.remove() return super(RepositoryRepository, self).unlink() def action_open_modules(self): self.ensure_one() return { 'name': self.source, 'type': 'ir.actions.act_window', 'res_model': 'ir.module.module', 'view_type': 'form', 'view_mode': 'kanban,tree,form', 'target': 'current', 'domain': [('id', 'in', self.module_ids.ids)] } def install_requirements(self): try: requirement_file = self.path + '/requirements.txt' if os.path.exists(requirement_file): subprocess.check_call(["pip3", "install", "-r", requirement_file]) except Exception as e: log_("Exception exception occured: {}".format(e)) def action_enabled(self): self.ensure_one() if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied addons_path = config['addons_path'].split(',') if config._is_addons_path(self.path) and self.path not in addons_path: addons_path.insert(0, self.path) config['addons_path'] = ','.join(addons_path) config.save() self.state = 'enabled' requirement_file = self.path + '/requiremet.txt' if os.path.exists(requirement_file): f = open(requirement_file, "r") self.requiremet = f.read() self.install_requirements() self._compute_apps() return self.env.ref( 'base.action_view_base_module_update').read()[0] def action_remove(self): self.ensure_one() if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied try: self.with_context(remove_repository=True).unlink() except Exception as e: raise exceptions.Warning(_(" '%s':\n%s") % (self.source, e)) return {'type': 'ir.actions.act_window_close'} def restart(self): service.server.restart() def pull_all(): repo_ids = self.env['repository.repository'].search([]) for r in repo_ids: r.update() service.server.restart() def action_update(self): self.ensure_one() self.update() service.server.restart() def update(self): if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied try: now_time = datetime.now() + timedelta(seconds=60) cron_obj = self.env['ir.cron'] res = Git(self.path, self.user, self.password) res.load() res.update(self.source) for l in res.log(): self.log_(l) # self.install_requirements() self._compute_apps() model_id = self.env['ir.model'].search( [('model', '=', 'ir.module.module')]) cron_data = { 'name': "Update Modules", 'code': 'model.upgrade_changed_checksum(%s)' % self.id, 'nextcall': now_time, 'numbercall': -1, 'user_id': self._uid, 'model_id': model_id.id, 'state': 'code', } cron = cron_obj.sudo().create(cron_data) except Exception as e: raise exceptions.Warning(_("'%s':\n%s") % (self.source, e)) def action_disable(self): self.ensure_one() self.state = 'disabled' if not self.env.user.has_group('base.group_system'): raise exceptions.AccessDenied addons_path = config['addons_path'].split(',') if self.path in addons_path: if self.module_ids.filtered(lambda r: r.state not in ( 'uninstalled', 'uninstallable')): raise exceptions.Warning( _('Some modules of this repository are installed.')) addons_path.remove(self.path) config['addons_path'] = ','.join(addons_path) config.save() def clone(self): self.state = 'cloned' self.ensure_one() self.path = path_join(self.addons_paths, re.compile(r'[^0-9a-zA-Z]+').sub('_', self.source + self.branch)) shutil.rmtree(self.path) try: res = Git(self.path) res.init(self.source, branch=self.branch, user=self.user, password=self.password) res.load() self.env.cr.commit() service.server.restart() except Exception as e: raise exceptions.Warning(_( "An error has occurred while Clone '%s':\n%s") % (self.source, e)) def _default_repository_ids(self): res = self.env['repository.repository'] for path in config['addons_path'].split(','): git = Git(path) if git.load(): data = git.info() result = res.search([('path', '=', data['path'])]) if not result: data.update({'state': 'enabled'}) result = res.create(data) result._compute_apps() self.env.cr.commit() def remove_finish_import_crons(self): model_id = self.env['ir.model'].search( [('model', '=', 'repository.repository')]) cron_ids = self.env['ir.cron'].search( [('model_id', '=', model_id.id)]) # Remove completed cron cron_ids.unlink() def find_modules(path):
class Git(): _source_http = None _source_git = None _repo = None _user = None _pass = None _path = None _output_list = [] def __init__(self, path=None, user=None, password=None): self._path = path self._user = user self._pass = password def remove(self): if self.is_initialized() and not self.is_clean(): raise exceptions.Warning(_("Error, Repository no clean.")) if self._path and is_dir(self._path): shutil.rmtree(self._path) def is_initialized(self): return not not self._repo def init(self, source, branch=None, user=None, password=None): self._user = user self._pass = password if not self.is_initialized(): if not self._user: self._repo = Repo.clone_from(source, self._path, **{ 'branch': branch, 'single-branch': True, 'depth': 1}) self._source_http = source else: source = source.replace('https://', '') source_git = "https://" + self._user + ":" + self._pass + "@" + source self._source_git=source_git self._repo = Repo.clone_from(source_git, self._path, **{ 'branch': branch, 'single-branch': True, 'depth': 1}) self._source_http = source_git def is_clean(self): return self.is_initialized() and not self._repo.is_dirty(untracked_files=True) def load(self, **kwargs): if not self._repo: if self._path and is_dir(path_join(self._path, '.git')): self._repo = Repo(self._path) return True else: return False def info(self): branch = self._repo.active_branch.name curr_rev = self._repo.rev_parse(branch) git = self.info_base() source = self._repo.remotes.origin.url.split('@') if len(source) > 1: source = "https://" + source[1] else: source = self._repo.remotes.origin.url return dict(git, **{ 'source': source, 'branch': branch, 'rev_id': curr_rev.hexsha, 'path': self._path, 'rev_date': datetime.fromtimestamp(curr_rev.committed_date), }) def info_base(self): return { 'path': self._path, 'source': None, 'branch': None, 'rev_id': None, 'rev_date': None, 'dirty': not self.is_clean(), } def log(self): return self._output_list def update(self,url): msg = '' old_env = {} ret_flag = True self._output_list.append(str(time.ctime()) + ": Checking for updates") if self.is_initialized(): branch = self._repo.active_branch remote_origin = self._repo.remotes.origin if remote_origin.exists(): try: self._repo.remote() git_cmd = cmd.Git(self._path) if not self._source_git: if self._user: source_git = "https://" + self._user + ":" + self._pass + "@" + url.replace('https://', '') self._source_git = source_git # if self._user and self._pass: # project_dir = os.path.dirname(os.path.abspath(__file__)) # # old_env = git_cmd.update_environment(SSH_ASKPASS=os.path.join(project_dir, 'askpass.py'), # # REPO_USERNAME=self._user, GIT_PASSWORD=self._passwd) # old_env = git_cmd.update_environment(SSH_ASKPASS=os.path.join(project_dir, 'askpass.py'), # REPO_USERNAME=self._user, REPO_PASSWORD=self._pass) # fetch_info = git_cmd.fetch(branch.name) git_cmd.reset('--hard') msg = git_cmd.pull(self._source_git,force=True) # branch.set_reference(fetch_info[0].ref.name) # restore the environment back to its previous state after operation. #if old_env: # git_cmd.update_environment(**old_env) # msg is '' or 'Updating ...' or 'Already up-to-date.' if you pulled successfully if msg: # encoding = 'utf-8', msg1 = msg.decode(encoding) to see if use here instead of msg! _logger.info(str(msg)) self._output_list.append(str(msg)) else: ret_flag = False except GitCommandError as exc: # after some tests we can cancel _logger.error of exc.stdout e exc.stdin because with # GIT_PYTHON_TRACE set to "full" the same output is written to logger. ret_flag = False if exc.stderr: self._output_list.append(exc.stderr.lstrip()) _logger.error('GitCommandError exception occured: {}'.format(exc.stderr.lstrip())) elif exc.stdout: self._output_list.append(exc.stdout.lstrip()) _logger.error('GitCommandError exception occured: {}'.format(exc.stdout.lstrip())) except InvalidGitRepositoryError as exc: ret_flag = False _logger.error('Invalid git repository: {}, {} '.format(self._repo_path, exc)) self._output_list.append(str('Invalid git repository: {}, {} '.format(self._repo_path, exc))) except CheckoutError as exc: ret_flag = False _logger.error("CheckoutError exception occured: {}".format(exc)) self._output_list.append("CheckoutError exception occured: {}".format(exc)) except UnmergedEntriesError as exc: ret_flag = False _logger.error("CheckouUnmergedEntriesError exception occured: {}".format(exc)) self._output_list.append("CheckouUnmergedEntriesError exception occured: {}".format(exc)) # except AssertionError as exc: # ret_flag = False # _logger.error("AssertionError exception occured: {}".format(exc)) else: ret_flag = False _logger.info('Remote repository \'origin\' doesn\'t exsist!') self._output_list.append('Remote repository \'origin\' doesn\'t exsist!') return ret_flag
return [module for module in os.listdir(path) if any(map( lambda f: isfile(path_join(path, module, f)), ( '__manifest__.py', '__openerp__.py')))]
identifier_body
mod.rs
use std::{ alloc::Layout, array::TryFromSliceError, borrow::BorrowMut, cell::{Cell, UnsafeCell}, collections::HashMap, ffi::OsStr, fs, io::{self, Read, Write}, mem, ops::{Deref, DerefMut}, path::Path, sync::{Arc, Mutex, MutexGuard},
use anyhow::{anyhow, Result}; use bevy_ecs::{ ComponentId, DynamicFetch, DynamicFetchResult, DynamicQuery, DynamicSystem, EntityBuilder, QueryAccess, StatefulQuery, TypeAccess, TypeInfo, World, }; use bincode::DefaultOptions; use fs::OpenOptions; use io::IoSlice; use mem::ManuallyDrop; use quill::ecs::TypeLayout; use wasmer::{ import_namespace, imports, Array, FromToNativeWasmType, Function, HostEnvInitError, Instance, LazyInit, Memory, Module, NativeFunc, Store, Type, ValueType, WasmPtr, WasmTypeList, WasmerEnv, JIT, LLVM, }; use wasmer_wasi::WasiState; use serde::{de::DeserializeOwned, Deserialize, Serialize}; #[derive(Default)] struct PluginEnv<S> { memory: LazyInit<Memory>, buffer_reserve: LazyInit<NativeFunc<(WasmPtr<RawBuffer>, u32)>>, rpcs: Arc<Mutex<HashMap<String, Box<dyn Fn(&mut Buffer, &PluginEnv<S>) -> Result<()> + Send>>>>, state: Arc<Mutex<S>>, layouts: Arc<Mutex<Layouts>>, } impl<S: Send + Sync + 'static> Clone for PluginEnv<S> { fn clone(&self) -> Self { Self { memory: self.memory.clone(), buffer_reserve: self.buffer_reserve.clone(), rpcs: self.rpcs.clone(), state: self.state.clone(), layouts: Default::default(), } } } impl<S: Send + Sync + 'static> WasmerEnv for PluginEnv<S> { fn init_with_instance(&mut self, instance: &Instance) -> Result<(), HostEnvInitError> { let memory = instance.exports.get_memory("memory")?; self.memory.initialize(memory.clone()); self.buffer_reserve.initialize( instance .exports .get_native_function("__quill_buffer_reserve")?, ); Ok(()) } } impl<S: Send + Sync + 'static> PluginEnv<S> { fn memory(&self) -> &Memory { // TODO: handle errors. self.memory.get_ref().unwrap() } fn buffer_reserve(&self) -> &NativeFunc<(WasmPtr<RawBuffer>, u32)> { self.buffer_reserve.get_ref().unwrap() } fn buffer(&self, raw: WasmPtr<RawBuffer>) -> Buffer { Buffer { memory: self.memory(), reserve: self.buffer_reserve(), raw, } } fn add_rpc< 'a, Args: Serialize + DeserializeOwned + 'static, R: Serialize + DeserializeOwned + 'static, >( &mut self, name: &str, callback: fn(&PluginEnv<S>, Args) -> R, ) -> Result<()> { self.rpcs .lock() .map_err(|_| anyhow!("could not lock rpcs"))? .insert( name.to_owned(), Box::new(move |mut buffer: &mut Buffer, env: &PluginEnv<S>| { let (_, args): (String, Args) = bincode::deserialize(buffer.as_slice()).unwrap(); let result = callback(env, args); buffer.clear(); bincode::serialize_into(buffer, &result).unwrap(); Ok(()) }), ); Ok(()) } fn call<Args: Serialize, R: DeserializeOwned>(&self, name: &str, args: Args) -> Result<R> { // TODO: requires access to buffer. todo!() } } pub struct Plugin { instance: Instance, env: PluginEnv<World>, } impl Plugin { pub fn load<P: AsRef<Path>>(path: P) -> Result<Self> { let mut env = PluginEnv::default(); let store = Store::new(&JIT::new(LLVM::default()).engine()); let module = Module::from_file(&store, &path)?; let mut wasi_env = WasiState::new( path.as_ref() .file_name() .and_then(OsStr::to_str) .unwrap_or("unkown"), ) .finalize()?; let mut import_object = wasi_env.import_object(&module)?; import_object.register( "env", import_namespace!({ "__quill_host_call" => Function::new_native_with_env(&store, env.clone(), __quill_host_call), }), ); // env.add_rpc("players_push", |state, player: String| state.push(player))?; // // TODO: Return reference to state? // env.add_rpc("players", |state, ()| state.clone())?; env.add_rpc("world_spawn", |env, entity: quill::ecs::Entity| { let mut world = env.state.lock().unwrap(); let mut layouts = env.layouts.lock().unwrap(); let mut builder = EntityBuilder::new(); for (layout, data) in entity.components { builder.add_dynamic( TypeInfo::of_external( layouts.external_id(&layout), Layout::new::<Vec<u8>>(), |_| (), ), data.as_slice(), ); } world.spawn(builder.build()); })?; env.add_rpc( "world_query", // TODO: world should not be the state but union(world, layouts) |env, access: quill::ecs::QueryAccess| { let world = env.state.lock().unwrap(); let mut layouts = env.layouts.lock().unwrap(); let query = access.query(&mut layouts).unwrap(); let access = Default::default(); let mut query: StatefulQuery<DynamicQuery, DynamicQuery> = StatefulQuery::new(&world, &access, query); for entity in query.iter_mut() { entity.immutable; entity.mutable; } }, )?; let instance = Instance::new(&module, &import_object)?; let start = instance.exports.get_function("_start")?; start.call(&[])?; Ok(Plugin { instance, env }) } } #[derive(Default)] pub struct Layouts { layouts: HashMap<quill::ecs::TypeLayout, u64>, } impl Layouts { pub fn component_id(&mut self, layout: &TypeLayout) -> ComponentId { ComponentId::ExternalId(self.external_id(layout)) } pub fn external_id(&mut self, layout: &TypeLayout) -> u64 { if let Some(component_id) = self.layouts.get(&layout) { *component_id } else { let next = self.layouts.len() as u64; self.layouts.insert(layout.clone(), next); next } } } trait IntoBevyAccess { fn access(&self, layouts: &mut Layouts) -> Result<QueryAccess>; fn component_ids(&self) -> Result<Vec<ComponentId>>; fn query(&self, layouts: &mut Layouts) -> Result<DynamicQuery>; } impl IntoBevyAccess for quill::ecs::QueryAccess { fn access(&self, layouts: &mut Layouts) -> Result<QueryAccess> { use quill::ecs::QueryAccess::*; Ok(match self { None => QueryAccess::None, Read(layout) => QueryAccess::Read(layouts.component_id(layout), "??"), Write(layout) => QueryAccess::Write(layouts.component_id(layout), "??"), Optional(access) => { QueryAccess::optional(IntoBevyAccess::access(access.as_ref(), layouts)?) } With(layout, access) => QueryAccess::With( layouts.component_id(layout), Box::new(IntoBevyAccess::access(access.as_ref(), layouts)?), ), Without(layout, access) => QueryAccess::Without( layouts.component_id(layout), Box::new(IntoBevyAccess::access(access.as_ref(), layouts)?), ), Union(accesses) => QueryAccess::Union( accesses .into_iter() .map(|access| IntoBevyAccess::access(access, layouts)) .collect::<Result<Vec<QueryAccess>>>()?, ), }) } fn component_ids(&self) -> Result<Vec<ComponentId>> { todo!() } fn query(&self, layouts: &mut Layouts) -> Result<DynamicQuery> { let mut query = DynamicQuery::default(); query.access = self.access(layouts)?; // TODO: TypeInfo Ok(query) } } struct Buffer<'a> { memory: &'a Memory, // fn reserve(ptr: WasmPtr<u8, Array>, cap: u32, len: u32, additional: u32) reserve: &'a NativeFunc<(WasmPtr<RawBuffer>, u32)>, raw: WasmPtr<RawBuffer>, } #[repr(C)] #[derive(Debug, Clone, Copy)] struct RawBuffer { ptr: WasmPtr<u8, Array>, cap: u32, len: u32, } unsafe impl ValueType for RawBuffer {} impl<'a> Buffer<'a> { fn reserve(&mut self, additional: u32) { let raw = self.raw.deref(self.memory).unwrap().get(); if raw.cap < raw.len + additional { self.reserve.call(self.raw, additional).unwrap(); } } fn clear(&mut self) { let raw_cell = self.raw.deref(self.memory).unwrap(); raw_cell.set(RawBuffer { len: 0, ..raw_cell.get() }) } fn push(&mut self, byte: u8) { self.extend_from_slice(&[byte]); } fn extend_from_slice(&mut self, other: &[u8]) { self.reserve(other.len() as u32); let raw_cell = self.raw.deref(self.memory).unwrap(); let raw = raw_cell.get(); raw.ptr .deref(self.memory, raw.len, raw.cap) .unwrap() .into_iter() .zip(other.iter()) .for_each(|(cell, value)| cell.set(*value)); raw_cell.set(RawBuffer { len: raw.len + other.len() as u32, ..raw }); } fn as_slice(&self) -> &[u8] { self } } impl<'a> Write for Buffer<'a> { #[inline] fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.extend_from_slice(buf); Ok(buf.len()) } #[inline] fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { let len = bufs.iter().map(|b| b.len() as u32).sum(); self.reserve(len); for buf in bufs { self.extend_from_slice(buf); } Ok(len as usize) } #[inline] fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { self.extend_from_slice(buf); Ok(()) } #[inline] fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl<'a> Deref for Buffer<'a> { type Target = [u8]; fn deref(&self) -> &Self::Target { let raw = self.raw.deref(self.memory).unwrap().get(); unsafe { mem::transmute(raw.ptr.deref(self.memory, 0, raw.len).unwrap()) } } } impl<'a> AsRef<[u8]> for Buffer<'a> { fn as_ref(&self) -> &[u8] { self } } fn __quill_host_call(env: &PluginEnv<World>, buffer_raw: WasmPtr<RawBuffer>) { let mut buffer = env.buffer(buffer_raw); let name: String = bincode::deserialize_from(buffer.as_slice()).unwrap(); let rpcs = env.rpcs.lock().unwrap(); let rpc = rpcs.get(&name).unwrap(); rpc(&mut buffer, env).unwrap(); }
todo, u32, vec, };
random_line_split
mod.rs
use std::{ alloc::Layout, array::TryFromSliceError, borrow::BorrowMut, cell::{Cell, UnsafeCell}, collections::HashMap, ffi::OsStr, fs, io::{self, Read, Write}, mem, ops::{Deref, DerefMut}, path::Path, sync::{Arc, Mutex, MutexGuard}, todo, u32, vec, }; use anyhow::{anyhow, Result}; use bevy_ecs::{ ComponentId, DynamicFetch, DynamicFetchResult, DynamicQuery, DynamicSystem, EntityBuilder, QueryAccess, StatefulQuery, TypeAccess, TypeInfo, World, }; use bincode::DefaultOptions; use fs::OpenOptions; use io::IoSlice; use mem::ManuallyDrop; use quill::ecs::TypeLayout; use wasmer::{ import_namespace, imports, Array, FromToNativeWasmType, Function, HostEnvInitError, Instance, LazyInit, Memory, Module, NativeFunc, Store, Type, ValueType, WasmPtr, WasmTypeList, WasmerEnv, JIT, LLVM, }; use wasmer_wasi::WasiState; use serde::{de::DeserializeOwned, Deserialize, Serialize}; #[derive(Default)] struct PluginEnv<S> { memory: LazyInit<Memory>, buffer_reserve: LazyInit<NativeFunc<(WasmPtr<RawBuffer>, u32)>>, rpcs: Arc<Mutex<HashMap<String, Box<dyn Fn(&mut Buffer, &PluginEnv<S>) -> Result<()> + Send>>>>, state: Arc<Mutex<S>>, layouts: Arc<Mutex<Layouts>>, } impl<S: Send + Sync + 'static> Clone for PluginEnv<S> { fn clone(&self) -> Self { Self { memory: self.memory.clone(), buffer_reserve: self.buffer_reserve.clone(), rpcs: self.rpcs.clone(), state: self.state.clone(), layouts: Default::default(), } } } impl<S: Send + Sync + 'static> WasmerEnv for PluginEnv<S> { fn init_with_instance(&mut self, instance: &Instance) -> Result<(), HostEnvInitError> { let memory = instance.exports.get_memory("memory")?; self.memory.initialize(memory.clone()); self.buffer_reserve.initialize( instance .exports .get_native_function("__quill_buffer_reserve")?, ); Ok(()) } } impl<S: Send + Sync + 'static> PluginEnv<S> { fn memory(&self) -> &Memory { // TODO: handle errors. self.memory.get_ref().unwrap() } fn buffer_reserve(&self) -> &NativeFunc<(WasmPtr<RawBuffer>, u32)> { self.buffer_reserve.get_ref().unwrap() } fn buffer(&self, raw: WasmPtr<RawBuffer>) -> Buffer { Buffer { memory: self.memory(), reserve: self.buffer_reserve(), raw, } } fn add_rpc< 'a, Args: Serialize + DeserializeOwned + 'static, R: Serialize + DeserializeOwned + 'static, >( &mut self, name: &str, callback: fn(&PluginEnv<S>, Args) -> R, ) -> Result<()> { self.rpcs .lock() .map_err(|_| anyhow!("could not lock rpcs"))? .insert( name.to_owned(), Box::new(move |mut buffer: &mut Buffer, env: &PluginEnv<S>| { let (_, args): (String, Args) = bincode::deserialize(buffer.as_slice()).unwrap(); let result = callback(env, args); buffer.clear(); bincode::serialize_into(buffer, &result).unwrap(); Ok(()) }), ); Ok(()) } fn call<Args: Serialize, R: DeserializeOwned>(&self, name: &str, args: Args) -> Result<R> { // TODO: requires access to buffer. todo!() } } pub struct Plugin { instance: Instance, env: PluginEnv<World>, } impl Plugin { pub fn load<P: AsRef<Path>>(path: P) -> Result<Self> { let mut env = PluginEnv::default(); let store = Store::new(&JIT::new(LLVM::default()).engine()); let module = Module::from_file(&store, &path)?; let mut wasi_env = WasiState::new( path.as_ref() .file_name() .and_then(OsStr::to_str) .unwrap_or("unkown"), ) .finalize()?; let mut import_object = wasi_env.import_object(&module)?; import_object.register( "env", import_namespace!({ "__quill_host_call" => Function::new_native_with_env(&store, env.clone(), __quill_host_call), }), ); // env.add_rpc("players_push", |state, player: String| state.push(player))?; // // TODO: Return reference to state? // env.add_rpc("players", |state, ()| state.clone())?; env.add_rpc("world_spawn", |env, entity: quill::ecs::Entity| { let mut world = env.state.lock().unwrap(); let mut layouts = env.layouts.lock().unwrap(); let mut builder = EntityBuilder::new(); for (layout, data) in entity.components { builder.add_dynamic( TypeInfo::of_external( layouts.external_id(&layout), Layout::new::<Vec<u8>>(), |_| (), ), data.as_slice(), ); } world.spawn(builder.build()); })?; env.add_rpc( "world_query", // TODO: world should not be the state but union(world, layouts) |env, access: quill::ecs::QueryAccess| { let world = env.state.lock().unwrap(); let mut layouts = env.layouts.lock().unwrap(); let query = access.query(&mut layouts).unwrap(); let access = Default::default(); let mut query: StatefulQuery<DynamicQuery, DynamicQuery> = StatefulQuery::new(&world, &access, query); for entity in query.iter_mut() { entity.immutable; entity.mutable; } }, )?; let instance = Instance::new(&module, &import_object)?; let start = instance.exports.get_function("_start")?; start.call(&[])?; Ok(Plugin { instance, env }) } } #[derive(Default)] pub struct Layouts { layouts: HashMap<quill::ecs::TypeLayout, u64>, } impl Layouts { pub fn component_id(&mut self, layout: &TypeLayout) -> ComponentId { ComponentId::ExternalId(self.external_id(layout)) } pub fn external_id(&mut self, layout: &TypeLayout) -> u64 { if let Some(component_id) = self.layouts.get(&layout) { *component_id } else { let next = self.layouts.len() as u64; self.layouts.insert(layout.clone(), next); next } } } trait IntoBevyAccess { fn access(&self, layouts: &mut Layouts) -> Result<QueryAccess>; fn component_ids(&self) -> Result<Vec<ComponentId>>; fn query(&self, layouts: &mut Layouts) -> Result<DynamicQuery>; } impl IntoBevyAccess for quill::ecs::QueryAccess { fn access(&self, layouts: &mut Layouts) -> Result<QueryAccess> { use quill::ecs::QueryAccess::*; Ok(match self { None => QueryAccess::None, Read(layout) => QueryAccess::Read(layouts.component_id(layout), "??"), Write(layout) => QueryAccess::Write(layouts.component_id(layout), "??"), Optional(access) => { QueryAccess::optional(IntoBevyAccess::access(access.as_ref(), layouts)?) } With(layout, access) => QueryAccess::With( layouts.component_id(layout), Box::new(IntoBevyAccess::access(access.as_ref(), layouts)?), ), Without(layout, access) => QueryAccess::Without( layouts.component_id(layout), Box::new(IntoBevyAccess::access(access.as_ref(), layouts)?), ), Union(accesses) => QueryAccess::Union( accesses .into_iter() .map(|access| IntoBevyAccess::access(access, layouts)) .collect::<Result<Vec<QueryAccess>>>()?, ), }) } fn component_ids(&self) -> Result<Vec<ComponentId>> { todo!() } fn query(&self, layouts: &mut Layouts) -> Result<DynamicQuery> { let mut query = DynamicQuery::default(); query.access = self.access(layouts)?; // TODO: TypeInfo Ok(query) } } struct Buffer<'a> { memory: &'a Memory, // fn reserve(ptr: WasmPtr<u8, Array>, cap: u32, len: u32, additional: u32) reserve: &'a NativeFunc<(WasmPtr<RawBuffer>, u32)>, raw: WasmPtr<RawBuffer>, } #[repr(C)] #[derive(Debug, Clone, Copy)] struct RawBuffer { ptr: WasmPtr<u8, Array>, cap: u32, len: u32, } unsafe impl ValueType for RawBuffer {} impl<'a> Buffer<'a> { fn reserve(&mut self, additional: u32) { let raw = self.raw.deref(self.memory).unwrap().get(); if raw.cap < raw.len + additional { self.reserve.call(self.raw, additional).unwrap(); } } fn clear(&mut self) { let raw_cell = self.raw.deref(self.memory).unwrap(); raw_cell.set(RawBuffer { len: 0, ..raw_cell.get() }) } fn push(&mut self, byte: u8) { self.extend_from_slice(&[byte]); } fn extend_from_slice(&mut self, other: &[u8]) { self.reserve(other.len() as u32); let raw_cell = self.raw.deref(self.memory).unwrap(); let raw = raw_cell.get(); raw.ptr .deref(self.memory, raw.len, raw.cap) .unwrap() .into_iter() .zip(other.iter()) .for_each(|(cell, value)| cell.set(*value)); raw_cell.set(RawBuffer { len: raw.len + other.len() as u32, ..raw }); } fn as_slice(&self) -> &[u8] { self } } impl<'a> Write for Buffer<'a> { #[inline] fn
(&mut self, buf: &[u8]) -> io::Result<usize> { self.extend_from_slice(buf); Ok(buf.len()) } #[inline] fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { let len = bufs.iter().map(|b| b.len() as u32).sum(); self.reserve(len); for buf in bufs { self.extend_from_slice(buf); } Ok(len as usize) } #[inline] fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { self.extend_from_slice(buf); Ok(()) } #[inline] fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl<'a> Deref for Buffer<'a> { type Target = [u8]; fn deref(&self) -> &Self::Target { let raw = self.raw.deref(self.memory).unwrap().get(); unsafe { mem::transmute(raw.ptr.deref(self.memory, 0, raw.len).unwrap()) } } } impl<'a> AsRef<[u8]> for Buffer<'a> { fn as_ref(&self) -> &[u8] { self } } fn __quill_host_call(env: &PluginEnv<World>, buffer_raw: WasmPtr<RawBuffer>) { let mut buffer = env.buffer(buffer_raw); let name: String = bincode::deserialize_from(buffer.as_slice()).unwrap(); let rpcs = env.rpcs.lock().unwrap(); let rpc = rpcs.get(&name).unwrap(); rpc(&mut buffer, env).unwrap(); }
write
identifier_name
mod.rs
use std::{ alloc::Layout, array::TryFromSliceError, borrow::BorrowMut, cell::{Cell, UnsafeCell}, collections::HashMap, ffi::OsStr, fs, io::{self, Read, Write}, mem, ops::{Deref, DerefMut}, path::Path, sync::{Arc, Mutex, MutexGuard}, todo, u32, vec, }; use anyhow::{anyhow, Result}; use bevy_ecs::{ ComponentId, DynamicFetch, DynamicFetchResult, DynamicQuery, DynamicSystem, EntityBuilder, QueryAccess, StatefulQuery, TypeAccess, TypeInfo, World, }; use bincode::DefaultOptions; use fs::OpenOptions; use io::IoSlice; use mem::ManuallyDrop; use quill::ecs::TypeLayout; use wasmer::{ import_namespace, imports, Array, FromToNativeWasmType, Function, HostEnvInitError, Instance, LazyInit, Memory, Module, NativeFunc, Store, Type, ValueType, WasmPtr, WasmTypeList, WasmerEnv, JIT, LLVM, }; use wasmer_wasi::WasiState; use serde::{de::DeserializeOwned, Deserialize, Serialize}; #[derive(Default)] struct PluginEnv<S> { memory: LazyInit<Memory>, buffer_reserve: LazyInit<NativeFunc<(WasmPtr<RawBuffer>, u32)>>, rpcs: Arc<Mutex<HashMap<String, Box<dyn Fn(&mut Buffer, &PluginEnv<S>) -> Result<()> + Send>>>>, state: Arc<Mutex<S>>, layouts: Arc<Mutex<Layouts>>, } impl<S: Send + Sync + 'static> Clone for PluginEnv<S> { fn clone(&self) -> Self { Self { memory: self.memory.clone(), buffer_reserve: self.buffer_reserve.clone(), rpcs: self.rpcs.clone(), state: self.state.clone(), layouts: Default::default(), } } } impl<S: Send + Sync + 'static> WasmerEnv for PluginEnv<S> { fn init_with_instance(&mut self, instance: &Instance) -> Result<(), HostEnvInitError> { let memory = instance.exports.get_memory("memory")?; self.memory.initialize(memory.clone()); self.buffer_reserve.initialize( instance .exports .get_native_function("__quill_buffer_reserve")?, ); Ok(()) } } impl<S: Send + Sync + 'static> PluginEnv<S> { fn memory(&self) -> &Memory { // TODO: handle errors. self.memory.get_ref().unwrap() } fn buffer_reserve(&self) -> &NativeFunc<(WasmPtr<RawBuffer>, u32)> { self.buffer_reserve.get_ref().unwrap() } fn buffer(&self, raw: WasmPtr<RawBuffer>) -> Buffer { Buffer { memory: self.memory(), reserve: self.buffer_reserve(), raw, } } fn add_rpc< 'a, Args: Serialize + DeserializeOwned + 'static, R: Serialize + DeserializeOwned + 'static, >( &mut self, name: &str, callback: fn(&PluginEnv<S>, Args) -> R, ) -> Result<()> { self.rpcs .lock() .map_err(|_| anyhow!("could not lock rpcs"))? .insert( name.to_owned(), Box::new(move |mut buffer: &mut Buffer, env: &PluginEnv<S>| { let (_, args): (String, Args) = bincode::deserialize(buffer.as_slice()).unwrap(); let result = callback(env, args); buffer.clear(); bincode::serialize_into(buffer, &result).unwrap(); Ok(()) }), ); Ok(()) } fn call<Args: Serialize, R: DeserializeOwned>(&self, name: &str, args: Args) -> Result<R> { // TODO: requires access to buffer. todo!() } } pub struct Plugin { instance: Instance, env: PluginEnv<World>, } impl Plugin { pub fn load<P: AsRef<Path>>(path: P) -> Result<Self> { let mut env = PluginEnv::default(); let store = Store::new(&JIT::new(LLVM::default()).engine()); let module = Module::from_file(&store, &path)?; let mut wasi_env = WasiState::new( path.as_ref() .file_name() .and_then(OsStr::to_str) .unwrap_or("unkown"), ) .finalize()?; let mut import_object = wasi_env.import_object(&module)?; import_object.register( "env", import_namespace!({ "__quill_host_call" => Function::new_native_with_env(&store, env.clone(), __quill_host_call), }), ); // env.add_rpc("players_push", |state, player: String| state.push(player))?; // // TODO: Return reference to state? // env.add_rpc("players", |state, ()| state.clone())?; env.add_rpc("world_spawn", |env, entity: quill::ecs::Entity| { let mut world = env.state.lock().unwrap(); let mut layouts = env.layouts.lock().unwrap(); let mut builder = EntityBuilder::new(); for (layout, data) in entity.components { builder.add_dynamic( TypeInfo::of_external( layouts.external_id(&layout), Layout::new::<Vec<u8>>(), |_| (), ), data.as_slice(), ); } world.spawn(builder.build()); })?; env.add_rpc( "world_query", // TODO: world should not be the state but union(world, layouts) |env, access: quill::ecs::QueryAccess| { let world = env.state.lock().unwrap(); let mut layouts = env.layouts.lock().unwrap(); let query = access.query(&mut layouts).unwrap(); let access = Default::default(); let mut query: StatefulQuery<DynamicQuery, DynamicQuery> = StatefulQuery::new(&world, &access, query); for entity in query.iter_mut() { entity.immutable; entity.mutable; } }, )?; let instance = Instance::new(&module, &import_object)?; let start = instance.exports.get_function("_start")?; start.call(&[])?; Ok(Plugin { instance, env }) } } #[derive(Default)] pub struct Layouts { layouts: HashMap<quill::ecs::TypeLayout, u64>, } impl Layouts { pub fn component_id(&mut self, layout: &TypeLayout) -> ComponentId { ComponentId::ExternalId(self.external_id(layout)) } pub fn external_id(&mut self, layout: &TypeLayout) -> u64 { if let Some(component_id) = self.layouts.get(&layout) { *component_id } else { let next = self.layouts.len() as u64; self.layouts.insert(layout.clone(), next); next } } } trait IntoBevyAccess { fn access(&self, layouts: &mut Layouts) -> Result<QueryAccess>; fn component_ids(&self) -> Result<Vec<ComponentId>>; fn query(&self, layouts: &mut Layouts) -> Result<DynamicQuery>; } impl IntoBevyAccess for quill::ecs::QueryAccess { fn access(&self, layouts: &mut Layouts) -> Result<QueryAccess> { use quill::ecs::QueryAccess::*; Ok(match self { None => QueryAccess::None, Read(layout) => QueryAccess::Read(layouts.component_id(layout), "??"), Write(layout) => QueryAccess::Write(layouts.component_id(layout), "??"), Optional(access) => { QueryAccess::optional(IntoBevyAccess::access(access.as_ref(), layouts)?) } With(layout, access) => QueryAccess::With( layouts.component_id(layout), Box::new(IntoBevyAccess::access(access.as_ref(), layouts)?), ), Without(layout, access) => QueryAccess::Without( layouts.component_id(layout), Box::new(IntoBevyAccess::access(access.as_ref(), layouts)?), ), Union(accesses) => QueryAccess::Union( accesses .into_iter() .map(|access| IntoBevyAccess::access(access, layouts)) .collect::<Result<Vec<QueryAccess>>>()?, ), }) } fn component_ids(&self) -> Result<Vec<ComponentId>> { todo!() } fn query(&self, layouts: &mut Layouts) -> Result<DynamicQuery>
} struct Buffer<'a> { memory: &'a Memory, // fn reserve(ptr: WasmPtr<u8, Array>, cap: u32, len: u32, additional: u32) reserve: &'a NativeFunc<(WasmPtr<RawBuffer>, u32)>, raw: WasmPtr<RawBuffer>, } #[repr(C)] #[derive(Debug, Clone, Copy)] struct RawBuffer { ptr: WasmPtr<u8, Array>, cap: u32, len: u32, } unsafe impl ValueType for RawBuffer {} impl<'a> Buffer<'a> { fn reserve(&mut self, additional: u32) { let raw = self.raw.deref(self.memory).unwrap().get(); if raw.cap < raw.len + additional { self.reserve.call(self.raw, additional).unwrap(); } } fn clear(&mut self) { let raw_cell = self.raw.deref(self.memory).unwrap(); raw_cell.set(RawBuffer { len: 0, ..raw_cell.get() }) } fn push(&mut self, byte: u8) { self.extend_from_slice(&[byte]); } fn extend_from_slice(&mut self, other: &[u8]) { self.reserve(other.len() as u32); let raw_cell = self.raw.deref(self.memory).unwrap(); let raw = raw_cell.get(); raw.ptr .deref(self.memory, raw.len, raw.cap) .unwrap() .into_iter() .zip(other.iter()) .for_each(|(cell, value)| cell.set(*value)); raw_cell.set(RawBuffer { len: raw.len + other.len() as u32, ..raw }); } fn as_slice(&self) -> &[u8] { self } } impl<'a> Write for Buffer<'a> { #[inline] fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.extend_from_slice(buf); Ok(buf.len()) } #[inline] fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { let len = bufs.iter().map(|b| b.len() as u32).sum(); self.reserve(len); for buf in bufs { self.extend_from_slice(buf); } Ok(len as usize) } #[inline] fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { self.extend_from_slice(buf); Ok(()) } #[inline] fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl<'a> Deref for Buffer<'a> { type Target = [u8]; fn deref(&self) -> &Self::Target { let raw = self.raw.deref(self.memory).unwrap().get(); unsafe { mem::transmute(raw.ptr.deref(self.memory, 0, raw.len).unwrap()) } } } impl<'a> AsRef<[u8]> for Buffer<'a> { fn as_ref(&self) -> &[u8] { self } } fn __quill_host_call(env: &PluginEnv<World>, buffer_raw: WasmPtr<RawBuffer>) { let mut buffer = env.buffer(buffer_raw); let name: String = bincode::deserialize_from(buffer.as_slice()).unwrap(); let rpcs = env.rpcs.lock().unwrap(); let rpc = rpcs.get(&name).unwrap(); rpc(&mut buffer, env).unwrap(); }
{ let mut query = DynamicQuery::default(); query.access = self.access(layouts)?; // TODO: TypeInfo Ok(query) }
identifier_body
scheduler.rs
extern crate clap; extern crate esvm; extern crate fern; extern crate ethereum_newtypes; extern crate rayon; extern crate regex; extern crate reqwest; extern crate hexdecode; extern crate serde_json; #[macro_use] extern crate lazy_static; #[macro_use] extern crate log; extern crate chrono; use std::fs::{self, File}; use std::io::{BufWriter, Read, Write}; use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, Mutex, }; use std::thread; use std::time::Duration; use esvm::{AttackType, TimeoutAnalysis, AnalysisSuccess}; use serde_json::json; use chrono::prelude::Local; use clap::{App, Arg, ArgMatches}; use ethereum_newtypes::{Address}; use regex::Regex; use reqwest::Client; fn init_logger() -> Result<()> { fern::Dispatch::new() // Perform allocation-free log formatting .format(|out, message, record| { out.finish(format_args!( "{}[{}][{}] {}", chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"), record.target(), record.level(), message )) }) // Add blanket level filter - .level(log::LevelFilter::Info) // Output to stdout, files, and other Dispatch configurations .chain(std::io::stdout()) .chain(fern::log_file("log/evmse-scheduler.log")?) // Apply globally .apply()?; Ok(()) } #[derive(Debug)] struct Worker { client: Client, url: String, timeout: Duration, } impl Worker { fn new(url: &str, timeout: usize) -> Result<Worker> { let client = reqwest::Client::builder().timeout(None).build()?; let mut url = format!("{}/analyze_address", url); if timeout > 0 { url.push_str("_timeout"); } let timeout = Duration::from_secs((timeout * 60) as u64); Ok(Worker { client, url: url, timeout, }) } fn analyze(&self, address: Address) -> Result<AnalysisSuccess> { info!("Analyzing {:x}", address.0); let mut res = if self.timeout > Duration::from_secs(0) { self .client .post(&self.url) .json(&TimeoutAnalysis { address, timeout: self.timeout}) .send()? } else { self .client .post(&self.url) .json(&address) .send()? }; Ok(res.json()?) } fn check_alive(&self) -> Result<()> { self.client .get(&format!("{}/alive", &self.url)) .send() .map_err(|e| e.into()) .map(|_| ()) } } struct WorkerHandle<'a> { worker: Option<Worker>, scheduler: &'a Scheduler, kill: bool, } impl<'a> WorkerHandle<'a> { // specifically consume the handle to force readding the worker fn analyze(mut self, addr: Address) -> Result<AnalysisSuccess> { let res = self.worker.as_ref().unwrap().analyze(addr); if let Err(ref error) = res { error!("Error analyzing {:x?}, checking worker!", error); if let Err(_) = self.worker.as_ref().unwrap().check_alive() { error!("Worker died analyzing {:x?}, shuting down worker!", error); self.kill = true; } else { return Err(Error::retry()); } } res } } impl<'a> Drop for WorkerHandle<'a> { fn drop(&mut self) { if !self.kill { let worker = self .worker .take() .expect("Worker replaced before adding back"); self.scheduler.add_worker(worker) } else { self.worker .take() .expect("Worker replaced before adding back"); } } } #[derive(Debug)] struct Scheduler { queue: Arc<Mutex<Vec<Worker>>>, } impl Scheduler { fn new() -> Self { let queue = Arc::new(Mutex::new(Vec::new())); Self { queue } } fn with_worker_count(urls: Vec<String>, timeout: usize) -> Self
fn add_worker(&self, worker: Worker) { self.queue.lock().unwrap().push(worker); } fn get_worker(&self) -> WorkerHandle { let worker; loop { if let Some(w) = self.queue.lock().unwrap().pop() { worker = Some(w); break; } } WorkerHandle { worker, scheduler: self, kill: false, } } } type Result<T> = ::std::result::Result<T, Error>; #[derive(Debug)] struct Error { kind: Kind, } impl Error { fn from_str(s: String) -> Self { Self { kind: Kind::Execution(s), } } fn retry() -> Self { Self { kind: Kind::Retry, } } fn kind(&self) -> &Kind { &self.kind } } macro_rules! impl_error_kind { ( $(#[$struct_attr:meta])* enum Kind { $( $enum_variant_name:ident($error_type:path), )+ ; $( $single_variant_name:ident, )+ } ) => { // meta attributes $(#[$struct_attr])* // enum definition enum Kind { $( $enum_variant_name($error_type), )+ $( $single_variant_name, )+ } // impl error conversion for each type $( impl ::std::convert::From<$error_type> for Error { fn from(error: $error_type) -> Self { Self { kind: Kind::$enum_variant_name(error), } } } )+ }; } impl_error_kind!(#[derive(Debug)] enum Kind { Reqwest(reqwest::Error), SerdeJson(serde_json::Error), Log(log::SetLoggerError), IO(std::io::Error), Execution(String), ; Retry, }); fn parse_args<'a>() -> ArgMatches<'a> { App::new("EthAEG scheduler for analyzing a large list of contracts") .arg( Arg::with_name("INPUT") .help("Set the list of accounts to scan") .required(true) .index(1), ) .arg( Arg::with_name("SERVER_LIST") .help("Set the list of backend servers") .required(true) .index(2), ) .arg(Arg::with_name("timeout").long("timeout").takes_value(true).help("Specify a timeout for the analysis, none used by default")) .arg(Arg::with_name("json").long("json").help("Dump the analysis result in json format.")) .get_matches() } fn parse_account_list(path: &str) -> (Arc<Mutex<Vec<(usize, String)>>>, usize) { let mut acc_list = String::new(); File::open(path) .expect("Could not open account list") .read_to_string(&mut acc_list) .expect("Could not read account list"); let acc_vec: Vec<(usize, String)> = acc_list .lines() .filter_map(|line| match ACC_RE.captures(line) { Some(cap) => { let capture = cap.get(0).unwrap().as_str(); Some((0, capture.to_string())) } None => { warn!("Could not process: {}", line); None } }) .collect(); let len = acc_vec.len(); (Arc::new(Mutex::new(acc_vec)), len) } fn parse_server_list(path: &str) -> Vec<String> { let mut server_list = String::new(); File::open(path) .expect("Could not open server list") .read_to_string(&mut server_list) .expect("Could not read server list"); server_list .lines() .map(|line| { let line = line.trim(); if line.starts_with("http") || line.starts_with("https") { line.to_string() } else { format!("http://{}", line) } }) .collect() } lazy_static! { static ref ACC_RE: Regex = Regex::new(r"0x[A-za-z0-9]{40}").unwrap(); } fn execute( work_stack: Arc<Mutex<Vec<(usize, String)>>>, scheduler: Arc<Scheduler>, counter: Arc<AtomicUsize>, acc_len: usize, root_path: Arc<String>, csv: &Mutex<BufWriter<File>>, json: bool, ) -> Result<()> { loop { let (c, acc) = match work_stack.lock().unwrap().pop() { Some(work) => work, None => { info!("Could not fetch new work, exiting loop!"); return Ok(()); } }; if c >= 5 { info!("Account {} seed {} times, discarding!", acc, c); continue; } let a = Address(hexdecode::decode(&acc.as_bytes()).unwrap().as_slice().into()); let worker = scheduler.get_worker(); let res = worker.analyze(a); match res { Ok(r) => { let file_path = if json { format!("{}/{}.json", root_path, acc) } else { format!("{}/{}", root_path, acc) }; let mut f = match File::create(file_path) { Ok(f) => f, Err(e) => { error!("Could not create file for {}: {:?}", acc, e); return Err(Error::from_str(format!( "Could not create file for {}: {:?}", acc, e ))); } }; if json { if let AnalysisSuccess::Success(ref analysis) = r { let mut res = (false, false, false); if let Some(ref attacks) = analysis.attacks { for attack in attacks { if attack.attack_type == AttackType::StealMoney { res.0 = true; } if attack.attack_type == AttackType::DeleteContract { res.1 = true; } if attack.attack_type == AttackType::HijackControlFlow { res.2 = true; } } } csv.lock().unwrap().write_all(format!("{:x}, {}, {}, {}\n", analysis.address, res.0, res.1, res.2).as_bytes()).expect("Could not write to csv file!"); } let _write_res = f.write_all(json!(r).to_string().as_bytes()); } else { let content = match r { AnalysisSuccess::Success(analysis) => { let mut res = (false, false, false); if let Some(ref attacks) = analysis.attacks { for attack in attacks { if attack.attack_type == AttackType::StealMoney { res.0 = true; } if attack.attack_type == AttackType::DeleteContract { res.1 = true; } if attack.attack_type == AttackType::HijackControlFlow { res.2 = true; } } } csv.lock().unwrap().write_all(format!("{:x}, {}, {}, {}\n", analysis.address, res.0, res.1, res.2).as_bytes()).expect("Could not write to csv file!"); format!("{}", analysis) }, AnalysisSuccess::Failure(s) => { warn!("Failure during analysing {}: {}", acc, s); s }, AnalysisSuccess::Timeout => { warn!("Timeout during analysis: {:?}", acc); format!("Timeout analysing {:?}", acc) }, }; let _write_res = f.write_all(content.as_bytes()); } } Err(e) => { if let Kind::Retry = e.kind() { error!("Error analyzing {}, retrying...", acc); work_stack.lock().unwrap().push((c+1, acc)); } else { error!("Error analyzing {}: {:?} worker died!", acc, e); } } }; info!( "Analyzed {} of {} contracts", counter.fetch_add(1, Ordering::Relaxed), acc_len ); } } fn main() { // init logger init_logger().expect("Could not initialize logger"); // parse args let matches = parse_args(); // create root path let root_path = format!( "analysis/{}/", Local::now().format("%Y-%m-%d-%H:%M:%S").to_string() ); fs::create_dir_all(root_path.clone()).expect("Could not create root folder for analysis"); let root_path = Arc::new(root_path); let acc_path = matches.value_of("INPUT").unwrap(); let server_path = matches.value_of("SERVER_LIST").unwrap(); let (work_stack, acc_len) = parse_account_list(acc_path); let server_list = parse_server_list(server_path); let server_len = server_list.len(); let timeout = if let Some(b) = matches.value_of("timeout") { b.parse().expect("Incorrect timeout supplied!") } else { 0 }; let scheduler = Arc::new(Scheduler::with_worker_count(server_list, timeout)); let counter = Arc::new(AtomicUsize::new(1)); let mut f = File::create(format!("{}/analysis.csv", root_path)).expect("Could not create csv file!"); f.write_all("address, steal ether, trigger suicide, hijack control flow\n".as_bytes()).expect("Could not write header to cvs!"); let csv_writer = Arc::new(Mutex::new(BufWriter::new(f))); info!("Starting Analysis"); let mut threads = Vec::new(); for _ in 0..server_len { let work_stack_clone = Arc::clone(&work_stack); let scheduler_clone = Arc::clone(&scheduler); let counter_clone = Arc::clone(&counter); let root_path_clone = Arc::clone(&root_path); let csv_clone = Arc::clone(&csv_writer); let json = matches.is_present("json"); let join_handle = thread::spawn(move || { execute( work_stack_clone, scheduler_clone, counter_clone, acc_len, root_path_clone, &csv_clone, json, ) }); threads.push(join_handle); } csv_writer.lock().unwrap().flush().expect("Could not finally flush writer"); for handle in threads { let _res = handle.join(); } info!("Finished Analysis"); }
{ let s = Scheduler::new(); for url in &urls { s.queue.lock().unwrap().push(Worker::new(url, timeout).unwrap()); // if the workers can not connect initially fail } s }
identifier_body
scheduler.rs
extern crate clap; extern crate esvm; extern crate fern; extern crate ethereum_newtypes; extern crate rayon; extern crate regex; extern crate reqwest; extern crate hexdecode; extern crate serde_json; #[macro_use] extern crate lazy_static; #[macro_use] extern crate log; extern crate chrono; use std::fs::{self, File}; use std::io::{BufWriter, Read, Write}; use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, Mutex, }; use std::thread; use std::time::Duration; use esvm::{AttackType, TimeoutAnalysis, AnalysisSuccess}; use serde_json::json; use chrono::prelude::Local; use clap::{App, Arg, ArgMatches}; use ethereum_newtypes::{Address}; use regex::Regex; use reqwest::Client; fn init_logger() -> Result<()> { fern::Dispatch::new() // Perform allocation-free log formatting .format(|out, message, record| { out.finish(format_args!( "{}[{}][{}] {}", chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"), record.target(), record.level(), message )) }) // Add blanket level filter - .level(log::LevelFilter::Info) // Output to stdout, files, and other Dispatch configurations .chain(std::io::stdout()) .chain(fern::log_file("log/evmse-scheduler.log")?) // Apply globally .apply()?; Ok(()) } #[derive(Debug)] struct Worker { client: Client, url: String, timeout: Duration, } impl Worker { fn new(url: &str, timeout: usize) -> Result<Worker> { let client = reqwest::Client::builder().timeout(None).build()?; let mut url = format!("{}/analyze_address", url); if timeout > 0 { url.push_str("_timeout"); } let timeout = Duration::from_secs((timeout * 60) as u64); Ok(Worker { client, url: url, timeout, }) } fn analyze(&self, address: Address) -> Result<AnalysisSuccess> { info!("Analyzing {:x}", address.0); let mut res = if self.timeout > Duration::from_secs(0) { self .client .post(&self.url) .json(&TimeoutAnalysis { address, timeout: self.timeout}) .send()? } else { self .client .post(&self.url) .json(&address) .send()? }; Ok(res.json()?) } fn check_alive(&self) -> Result<()> { self.client .get(&format!("{}/alive", &self.url)) .send() .map_err(|e| e.into()) .map(|_| ()) } } struct WorkerHandle<'a> { worker: Option<Worker>, scheduler: &'a Scheduler, kill: bool, } impl<'a> WorkerHandle<'a> { // specifically consume the handle to force readding the worker fn analyze(mut self, addr: Address) -> Result<AnalysisSuccess> { let res = self.worker.as_ref().unwrap().analyze(addr); if let Err(ref error) = res { error!("Error analyzing {:x?}, checking worker!", error); if let Err(_) = self.worker.as_ref().unwrap().check_alive() { error!("Worker died analyzing {:x?}, shuting down worker!", error); self.kill = true; } else { return Err(Error::retry()); } } res } } impl<'a> Drop for WorkerHandle<'a> { fn drop(&mut self) { if !self.kill { let worker = self .worker .take() .expect("Worker replaced before adding back"); self.scheduler.add_worker(worker) } else { self.worker .take() .expect("Worker replaced before adding back"); } } } #[derive(Debug)] struct Scheduler { queue: Arc<Mutex<Vec<Worker>>>, } impl Scheduler { fn new() -> Self { let queue = Arc::new(Mutex::new(Vec::new())); Self { queue } } fn with_worker_count(urls: Vec<String>, timeout: usize) -> Self { let s = Scheduler::new(); for url in &urls { s.queue.lock().unwrap().push(Worker::new(url, timeout).unwrap()); // if the workers can not connect initially fail } s } fn add_worker(&self, worker: Worker) { self.queue.lock().unwrap().push(worker); } fn get_worker(&self) -> WorkerHandle { let worker; loop { if let Some(w) = self.queue.lock().unwrap().pop() { worker = Some(w); break; } } WorkerHandle { worker, scheduler: self, kill: false, } } } type Result<T> = ::std::result::Result<T, Error>; #[derive(Debug)] struct Error { kind: Kind, } impl Error { fn from_str(s: String) -> Self { Self { kind: Kind::Execution(s), } } fn retry() -> Self { Self { kind: Kind::Retry, } } fn kind(&self) -> &Kind { &self.kind } } macro_rules! impl_error_kind { ( $(#[$struct_attr:meta])* enum Kind { $( $enum_variant_name:ident($error_type:path), )+ ; $( $single_variant_name:ident, )+ } ) => { // meta attributes $(#[$struct_attr])* // enum definition enum Kind { $( $enum_variant_name($error_type), )+ $( $single_variant_name, )+ } // impl error conversion for each type $( impl ::std::convert::From<$error_type> for Error { fn from(error: $error_type) -> Self { Self { kind: Kind::$enum_variant_name(error), } } } )+ }; } impl_error_kind!(#[derive(Debug)] enum Kind { Reqwest(reqwest::Error), SerdeJson(serde_json::Error), Log(log::SetLoggerError), IO(std::io::Error), Execution(String), ; Retry, }); fn parse_args<'a>() -> ArgMatches<'a> { App::new("EthAEG scheduler for analyzing a large list of contracts") .arg( Arg::with_name("INPUT") .help("Set the list of accounts to scan") .required(true) .index(1), ) .arg( Arg::with_name("SERVER_LIST") .help("Set the list of backend servers") .required(true) .index(2), ) .arg(Arg::with_name("timeout").long("timeout").takes_value(true).help("Specify a timeout for the analysis, none used by default")) .arg(Arg::with_name("json").long("json").help("Dump the analysis result in json format.")) .get_matches() } fn
(path: &str) -> (Arc<Mutex<Vec<(usize, String)>>>, usize) { let mut acc_list = String::new(); File::open(path) .expect("Could not open account list") .read_to_string(&mut acc_list) .expect("Could not read account list"); let acc_vec: Vec<(usize, String)> = acc_list .lines() .filter_map(|line| match ACC_RE.captures(line) { Some(cap) => { let capture = cap.get(0).unwrap().as_str(); Some((0, capture.to_string())) } None => { warn!("Could not process: {}", line); None } }) .collect(); let len = acc_vec.len(); (Arc::new(Mutex::new(acc_vec)), len) } fn parse_server_list(path: &str) -> Vec<String> { let mut server_list = String::new(); File::open(path) .expect("Could not open server list") .read_to_string(&mut server_list) .expect("Could not read server list"); server_list .lines() .map(|line| { let line = line.trim(); if line.starts_with("http") || line.starts_with("https") { line.to_string() } else { format!("http://{}", line) } }) .collect() } lazy_static! { static ref ACC_RE: Regex = Regex::new(r"0x[A-za-z0-9]{40}").unwrap(); } fn execute( work_stack: Arc<Mutex<Vec<(usize, String)>>>, scheduler: Arc<Scheduler>, counter: Arc<AtomicUsize>, acc_len: usize, root_path: Arc<String>, csv: &Mutex<BufWriter<File>>, json: bool, ) -> Result<()> { loop { let (c, acc) = match work_stack.lock().unwrap().pop() { Some(work) => work, None => { info!("Could not fetch new work, exiting loop!"); return Ok(()); } }; if c >= 5 { info!("Account {} seed {} times, discarding!", acc, c); continue; } let a = Address(hexdecode::decode(&acc.as_bytes()).unwrap().as_slice().into()); let worker = scheduler.get_worker(); let res = worker.analyze(a); match res { Ok(r) => { let file_path = if json { format!("{}/{}.json", root_path, acc) } else { format!("{}/{}", root_path, acc) }; let mut f = match File::create(file_path) { Ok(f) => f, Err(e) => { error!("Could not create file for {}: {:?}", acc, e); return Err(Error::from_str(format!( "Could not create file for {}: {:?}", acc, e ))); } }; if json { if let AnalysisSuccess::Success(ref analysis) = r { let mut res = (false, false, false); if let Some(ref attacks) = analysis.attacks { for attack in attacks { if attack.attack_type == AttackType::StealMoney { res.0 = true; } if attack.attack_type == AttackType::DeleteContract { res.1 = true; } if attack.attack_type == AttackType::HijackControlFlow { res.2 = true; } } } csv.lock().unwrap().write_all(format!("{:x}, {}, {}, {}\n", analysis.address, res.0, res.1, res.2).as_bytes()).expect("Could not write to csv file!"); } let _write_res = f.write_all(json!(r).to_string().as_bytes()); } else { let content = match r { AnalysisSuccess::Success(analysis) => { let mut res = (false, false, false); if let Some(ref attacks) = analysis.attacks { for attack in attacks { if attack.attack_type == AttackType::StealMoney { res.0 = true; } if attack.attack_type == AttackType::DeleteContract { res.1 = true; } if attack.attack_type == AttackType::HijackControlFlow { res.2 = true; } } } csv.lock().unwrap().write_all(format!("{:x}, {}, {}, {}\n", analysis.address, res.0, res.1, res.2).as_bytes()).expect("Could not write to csv file!"); format!("{}", analysis) }, AnalysisSuccess::Failure(s) => { warn!("Failure during analysing {}: {}", acc, s); s }, AnalysisSuccess::Timeout => { warn!("Timeout during analysis: {:?}", acc); format!("Timeout analysing {:?}", acc) }, }; let _write_res = f.write_all(content.as_bytes()); } } Err(e) => { if let Kind::Retry = e.kind() { error!("Error analyzing {}, retrying...", acc); work_stack.lock().unwrap().push((c+1, acc)); } else { error!("Error analyzing {}: {:?} worker died!", acc, e); } } }; info!( "Analyzed {} of {} contracts", counter.fetch_add(1, Ordering::Relaxed), acc_len ); } } fn main() { // init logger init_logger().expect("Could not initialize logger"); // parse args let matches = parse_args(); // create root path let root_path = format!( "analysis/{}/", Local::now().format("%Y-%m-%d-%H:%M:%S").to_string() ); fs::create_dir_all(root_path.clone()).expect("Could not create root folder for analysis"); let root_path = Arc::new(root_path); let acc_path = matches.value_of("INPUT").unwrap(); let server_path = matches.value_of("SERVER_LIST").unwrap(); let (work_stack, acc_len) = parse_account_list(acc_path); let server_list = parse_server_list(server_path); let server_len = server_list.len(); let timeout = if let Some(b) = matches.value_of("timeout") { b.parse().expect("Incorrect timeout supplied!") } else { 0 }; let scheduler = Arc::new(Scheduler::with_worker_count(server_list, timeout)); let counter = Arc::new(AtomicUsize::new(1)); let mut f = File::create(format!("{}/analysis.csv", root_path)).expect("Could not create csv file!"); f.write_all("address, steal ether, trigger suicide, hijack control flow\n".as_bytes()).expect("Could not write header to cvs!"); let csv_writer = Arc::new(Mutex::new(BufWriter::new(f))); info!("Starting Analysis"); let mut threads = Vec::new(); for _ in 0..server_len { let work_stack_clone = Arc::clone(&work_stack); let scheduler_clone = Arc::clone(&scheduler); let counter_clone = Arc::clone(&counter); let root_path_clone = Arc::clone(&root_path); let csv_clone = Arc::clone(&csv_writer); let json = matches.is_present("json"); let join_handle = thread::spawn(move || { execute( work_stack_clone, scheduler_clone, counter_clone, acc_len, root_path_clone, &csv_clone, json, ) }); threads.push(join_handle); } csv_writer.lock().unwrap().flush().expect("Could not finally flush writer"); for handle in threads { let _res = handle.join(); } info!("Finished Analysis"); }
parse_account_list
identifier_name
scheduler.rs
extern crate clap; extern crate esvm; extern crate fern; extern crate ethereum_newtypes; extern crate rayon; extern crate regex; extern crate reqwest; extern crate hexdecode; extern crate serde_json; #[macro_use] extern crate lazy_static; #[macro_use] extern crate log; extern crate chrono; use std::fs::{self, File}; use std::io::{BufWriter, Read, Write}; use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, Mutex, }; use std::thread; use std::time::Duration; use esvm::{AttackType, TimeoutAnalysis, AnalysisSuccess}; use serde_json::json; use chrono::prelude::Local; use clap::{App, Arg, ArgMatches}; use ethereum_newtypes::{Address}; use regex::Regex; use reqwest::Client; fn init_logger() -> Result<()> { fern::Dispatch::new() // Perform allocation-free log formatting .format(|out, message, record| { out.finish(format_args!( "{}[{}][{}] {}", chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"), record.target(), record.level(), message )) }) // Add blanket level filter - .level(log::LevelFilter::Info) // Output to stdout, files, and other Dispatch configurations .chain(std::io::stdout()) .chain(fern::log_file("log/evmse-scheduler.log")?) // Apply globally .apply()?; Ok(()) } #[derive(Debug)] struct Worker { client: Client, url: String, timeout: Duration, } impl Worker { fn new(url: &str, timeout: usize) -> Result<Worker> { let client = reqwest::Client::builder().timeout(None).build()?; let mut url = format!("{}/analyze_address", url); if timeout > 0 { url.push_str("_timeout"); } let timeout = Duration::from_secs((timeout * 60) as u64); Ok(Worker { client, url: url, timeout, }) } fn analyze(&self, address: Address) -> Result<AnalysisSuccess> { info!("Analyzing {:x}", address.0); let mut res = if self.timeout > Duration::from_secs(0) { self .client .post(&self.url) .json(&TimeoutAnalysis { address, timeout: self.timeout}) .send()? } else { self .client .post(&self.url) .json(&address) .send()? }; Ok(res.json()?) } fn check_alive(&self) -> Result<()> { self.client .get(&format!("{}/alive", &self.url)) .send() .map_err(|e| e.into()) .map(|_| ()) } } struct WorkerHandle<'a> { worker: Option<Worker>, scheduler: &'a Scheduler, kill: bool, } impl<'a> WorkerHandle<'a> { // specifically consume the handle to force readding the worker fn analyze(mut self, addr: Address) -> Result<AnalysisSuccess> { let res = self.worker.as_ref().unwrap().analyze(addr); if let Err(ref error) = res { error!("Error analyzing {:x?}, checking worker!", error); if let Err(_) = self.worker.as_ref().unwrap().check_alive() { error!("Worker died analyzing {:x?}, shuting down worker!", error); self.kill = true; } else { return Err(Error::retry());
} res } } impl<'a> Drop for WorkerHandle<'a> { fn drop(&mut self) { if !self.kill { let worker = self .worker .take() .expect("Worker replaced before adding back"); self.scheduler.add_worker(worker) } else { self.worker .take() .expect("Worker replaced before adding back"); } } } #[derive(Debug)] struct Scheduler { queue: Arc<Mutex<Vec<Worker>>>, } impl Scheduler { fn new() -> Self { let queue = Arc::new(Mutex::new(Vec::new())); Self { queue } } fn with_worker_count(urls: Vec<String>, timeout: usize) -> Self { let s = Scheduler::new(); for url in &urls { s.queue.lock().unwrap().push(Worker::new(url, timeout).unwrap()); // if the workers can not connect initially fail } s } fn add_worker(&self, worker: Worker) { self.queue.lock().unwrap().push(worker); } fn get_worker(&self) -> WorkerHandle { let worker; loop { if let Some(w) = self.queue.lock().unwrap().pop() { worker = Some(w); break; } } WorkerHandle { worker, scheduler: self, kill: false, } } } type Result<T> = ::std::result::Result<T, Error>; #[derive(Debug)] struct Error { kind: Kind, } impl Error { fn from_str(s: String) -> Self { Self { kind: Kind::Execution(s), } } fn retry() -> Self { Self { kind: Kind::Retry, } } fn kind(&self) -> &Kind { &self.kind } } macro_rules! impl_error_kind { ( $(#[$struct_attr:meta])* enum Kind { $( $enum_variant_name:ident($error_type:path), )+ ; $( $single_variant_name:ident, )+ } ) => { // meta attributes $(#[$struct_attr])* // enum definition enum Kind { $( $enum_variant_name($error_type), )+ $( $single_variant_name, )+ } // impl error conversion for each type $( impl ::std::convert::From<$error_type> for Error { fn from(error: $error_type) -> Self { Self { kind: Kind::$enum_variant_name(error), } } } )+ }; } impl_error_kind!(#[derive(Debug)] enum Kind { Reqwest(reqwest::Error), SerdeJson(serde_json::Error), Log(log::SetLoggerError), IO(std::io::Error), Execution(String), ; Retry, }); fn parse_args<'a>() -> ArgMatches<'a> { App::new("EthAEG scheduler for analyzing a large list of contracts") .arg( Arg::with_name("INPUT") .help("Set the list of accounts to scan") .required(true) .index(1), ) .arg( Arg::with_name("SERVER_LIST") .help("Set the list of backend servers") .required(true) .index(2), ) .arg(Arg::with_name("timeout").long("timeout").takes_value(true).help("Specify a timeout for the analysis, none used by default")) .arg(Arg::with_name("json").long("json").help("Dump the analysis result in json format.")) .get_matches() } fn parse_account_list(path: &str) -> (Arc<Mutex<Vec<(usize, String)>>>, usize) { let mut acc_list = String::new(); File::open(path) .expect("Could not open account list") .read_to_string(&mut acc_list) .expect("Could not read account list"); let acc_vec: Vec<(usize, String)> = acc_list .lines() .filter_map(|line| match ACC_RE.captures(line) { Some(cap) => { let capture = cap.get(0).unwrap().as_str(); Some((0, capture.to_string())) } None => { warn!("Could not process: {}", line); None } }) .collect(); let len = acc_vec.len(); (Arc::new(Mutex::new(acc_vec)), len) } fn parse_server_list(path: &str) -> Vec<String> { let mut server_list = String::new(); File::open(path) .expect("Could not open server list") .read_to_string(&mut server_list) .expect("Could not read server list"); server_list .lines() .map(|line| { let line = line.trim(); if line.starts_with("http") || line.starts_with("https") { line.to_string() } else { format!("http://{}", line) } }) .collect() } lazy_static! { static ref ACC_RE: Regex = Regex::new(r"0x[A-za-z0-9]{40}").unwrap(); } fn execute( work_stack: Arc<Mutex<Vec<(usize, String)>>>, scheduler: Arc<Scheduler>, counter: Arc<AtomicUsize>, acc_len: usize, root_path: Arc<String>, csv: &Mutex<BufWriter<File>>, json: bool, ) -> Result<()> { loop { let (c, acc) = match work_stack.lock().unwrap().pop() { Some(work) => work, None => { info!("Could not fetch new work, exiting loop!"); return Ok(()); } }; if c >= 5 { info!("Account {} seed {} times, discarding!", acc, c); continue; } let a = Address(hexdecode::decode(&acc.as_bytes()).unwrap().as_slice().into()); let worker = scheduler.get_worker(); let res = worker.analyze(a); match res { Ok(r) => { let file_path = if json { format!("{}/{}.json", root_path, acc) } else { format!("{}/{}", root_path, acc) }; let mut f = match File::create(file_path) { Ok(f) => f, Err(e) => { error!("Could not create file for {}: {:?}", acc, e); return Err(Error::from_str(format!( "Could not create file for {}: {:?}", acc, e ))); } }; if json { if let AnalysisSuccess::Success(ref analysis) = r { let mut res = (false, false, false); if let Some(ref attacks) = analysis.attacks { for attack in attacks { if attack.attack_type == AttackType::StealMoney { res.0 = true; } if attack.attack_type == AttackType::DeleteContract { res.1 = true; } if attack.attack_type == AttackType::HijackControlFlow { res.2 = true; } } } csv.lock().unwrap().write_all(format!("{:x}, {}, {}, {}\n", analysis.address, res.0, res.1, res.2).as_bytes()).expect("Could not write to csv file!"); } let _write_res = f.write_all(json!(r).to_string().as_bytes()); } else { let content = match r { AnalysisSuccess::Success(analysis) => { let mut res = (false, false, false); if let Some(ref attacks) = analysis.attacks { for attack in attacks { if attack.attack_type == AttackType::StealMoney { res.0 = true; } if attack.attack_type == AttackType::DeleteContract { res.1 = true; } if attack.attack_type == AttackType::HijackControlFlow { res.2 = true; } } } csv.lock().unwrap().write_all(format!("{:x}, {}, {}, {}\n", analysis.address, res.0, res.1, res.2).as_bytes()).expect("Could not write to csv file!"); format!("{}", analysis) }, AnalysisSuccess::Failure(s) => { warn!("Failure during analysing {}: {}", acc, s); s }, AnalysisSuccess::Timeout => { warn!("Timeout during analysis: {:?}", acc); format!("Timeout analysing {:?}", acc) }, }; let _write_res = f.write_all(content.as_bytes()); } } Err(e) => { if let Kind::Retry = e.kind() { error!("Error analyzing {}, retrying...", acc); work_stack.lock().unwrap().push((c+1, acc)); } else { error!("Error analyzing {}: {:?} worker died!", acc, e); } } }; info!( "Analyzed {} of {} contracts", counter.fetch_add(1, Ordering::Relaxed), acc_len ); } } fn main() { // init logger init_logger().expect("Could not initialize logger"); // parse args let matches = parse_args(); // create root path let root_path = format!( "analysis/{}/", Local::now().format("%Y-%m-%d-%H:%M:%S").to_string() ); fs::create_dir_all(root_path.clone()).expect("Could not create root folder for analysis"); let root_path = Arc::new(root_path); let acc_path = matches.value_of("INPUT").unwrap(); let server_path = matches.value_of("SERVER_LIST").unwrap(); let (work_stack, acc_len) = parse_account_list(acc_path); let server_list = parse_server_list(server_path); let server_len = server_list.len(); let timeout = if let Some(b) = matches.value_of("timeout") { b.parse().expect("Incorrect timeout supplied!") } else { 0 }; let scheduler = Arc::new(Scheduler::with_worker_count(server_list, timeout)); let counter = Arc::new(AtomicUsize::new(1)); let mut f = File::create(format!("{}/analysis.csv", root_path)).expect("Could not create csv file!"); f.write_all("address, steal ether, trigger suicide, hijack control flow\n".as_bytes()).expect("Could not write header to cvs!"); let csv_writer = Arc::new(Mutex::new(BufWriter::new(f))); info!("Starting Analysis"); let mut threads = Vec::new(); for _ in 0..server_len { let work_stack_clone = Arc::clone(&work_stack); let scheduler_clone = Arc::clone(&scheduler); let counter_clone = Arc::clone(&counter); let root_path_clone = Arc::clone(&root_path); let csv_clone = Arc::clone(&csv_writer); let json = matches.is_present("json"); let join_handle = thread::spawn(move || { execute( work_stack_clone, scheduler_clone, counter_clone, acc_len, root_path_clone, &csv_clone, json, ) }); threads.push(join_handle); } csv_writer.lock().unwrap().flush().expect("Could not finally flush writer"); for handle in threads { let _res = handle.join(); } info!("Finished Analysis"); }
}
random_line_split
scheduler.rs
extern crate clap; extern crate esvm; extern crate fern; extern crate ethereum_newtypes; extern crate rayon; extern crate regex; extern crate reqwest; extern crate hexdecode; extern crate serde_json; #[macro_use] extern crate lazy_static; #[macro_use] extern crate log; extern crate chrono; use std::fs::{self, File}; use std::io::{BufWriter, Read, Write}; use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, Mutex, }; use std::thread; use std::time::Duration; use esvm::{AttackType, TimeoutAnalysis, AnalysisSuccess}; use serde_json::json; use chrono::prelude::Local; use clap::{App, Arg, ArgMatches}; use ethereum_newtypes::{Address}; use regex::Regex; use reqwest::Client; fn init_logger() -> Result<()> { fern::Dispatch::new() // Perform allocation-free log formatting .format(|out, message, record| { out.finish(format_args!( "{}[{}][{}] {}", chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"), record.target(), record.level(), message )) }) // Add blanket level filter - .level(log::LevelFilter::Info) // Output to stdout, files, and other Dispatch configurations .chain(std::io::stdout()) .chain(fern::log_file("log/evmse-scheduler.log")?) // Apply globally .apply()?; Ok(()) } #[derive(Debug)] struct Worker { client: Client, url: String, timeout: Duration, } impl Worker { fn new(url: &str, timeout: usize) -> Result<Worker> { let client = reqwest::Client::builder().timeout(None).build()?; let mut url = format!("{}/analyze_address", url); if timeout > 0 { url.push_str("_timeout"); } let timeout = Duration::from_secs((timeout * 60) as u64); Ok(Worker { client, url: url, timeout, }) } fn analyze(&self, address: Address) -> Result<AnalysisSuccess> { info!("Analyzing {:x}", address.0); let mut res = if self.timeout > Duration::from_secs(0) { self .client .post(&self.url) .json(&TimeoutAnalysis { address, timeout: self.timeout}) .send()? } else { self .client .post(&self.url) .json(&address) .send()? }; Ok(res.json()?) } fn check_alive(&self) -> Result<()> { self.client .get(&format!("{}/alive", &self.url)) .send() .map_err(|e| e.into()) .map(|_| ()) } } struct WorkerHandle<'a> { worker: Option<Worker>, scheduler: &'a Scheduler, kill: bool, } impl<'a> WorkerHandle<'a> { // specifically consume the handle to force readding the worker fn analyze(mut self, addr: Address) -> Result<AnalysisSuccess> { let res = self.worker.as_ref().unwrap().analyze(addr); if let Err(ref error) = res { error!("Error analyzing {:x?}, checking worker!", error); if let Err(_) = self.worker.as_ref().unwrap().check_alive() { error!("Worker died analyzing {:x?}, shuting down worker!", error); self.kill = true; } else { return Err(Error::retry()); } } res } } impl<'a> Drop for WorkerHandle<'a> { fn drop(&mut self) { if !self.kill { let worker = self .worker .take() .expect("Worker replaced before adding back"); self.scheduler.add_worker(worker) } else { self.worker .take() .expect("Worker replaced before adding back"); } } } #[derive(Debug)] struct Scheduler { queue: Arc<Mutex<Vec<Worker>>>, } impl Scheduler { fn new() -> Self { let queue = Arc::new(Mutex::new(Vec::new())); Self { queue } } fn with_worker_count(urls: Vec<String>, timeout: usize) -> Self { let s = Scheduler::new(); for url in &urls { s.queue.lock().unwrap().push(Worker::new(url, timeout).unwrap()); // if the workers can not connect initially fail } s } fn add_worker(&self, worker: Worker) { self.queue.lock().unwrap().push(worker); } fn get_worker(&self) -> WorkerHandle { let worker; loop { if let Some(w) = self.queue.lock().unwrap().pop() { worker = Some(w); break; } } WorkerHandle { worker, scheduler: self, kill: false, } } } type Result<T> = ::std::result::Result<T, Error>; #[derive(Debug)] struct Error { kind: Kind, } impl Error { fn from_str(s: String) -> Self { Self { kind: Kind::Execution(s), } } fn retry() -> Self { Self { kind: Kind::Retry, } } fn kind(&self) -> &Kind { &self.kind } } macro_rules! impl_error_kind { ( $(#[$struct_attr:meta])* enum Kind { $( $enum_variant_name:ident($error_type:path), )+ ; $( $single_variant_name:ident, )+ } ) => { // meta attributes $(#[$struct_attr])* // enum definition enum Kind { $( $enum_variant_name($error_type), )+ $( $single_variant_name, )+ } // impl error conversion for each type $( impl ::std::convert::From<$error_type> for Error { fn from(error: $error_type) -> Self { Self { kind: Kind::$enum_variant_name(error), } } } )+ }; } impl_error_kind!(#[derive(Debug)] enum Kind { Reqwest(reqwest::Error), SerdeJson(serde_json::Error), Log(log::SetLoggerError), IO(std::io::Error), Execution(String), ; Retry, }); fn parse_args<'a>() -> ArgMatches<'a> { App::new("EthAEG scheduler for analyzing a large list of contracts") .arg( Arg::with_name("INPUT") .help("Set the list of accounts to scan") .required(true) .index(1), ) .arg( Arg::with_name("SERVER_LIST") .help("Set the list of backend servers") .required(true) .index(2), ) .arg(Arg::with_name("timeout").long("timeout").takes_value(true).help("Specify a timeout for the analysis, none used by default")) .arg(Arg::with_name("json").long("json").help("Dump the analysis result in json format.")) .get_matches() } fn parse_account_list(path: &str) -> (Arc<Mutex<Vec<(usize, String)>>>, usize) { let mut acc_list = String::new(); File::open(path) .expect("Could not open account list") .read_to_string(&mut acc_list) .expect("Could not read account list"); let acc_vec: Vec<(usize, String)> = acc_list .lines() .filter_map(|line| match ACC_RE.captures(line) { Some(cap) => { let capture = cap.get(0).unwrap().as_str(); Some((0, capture.to_string())) } None => { warn!("Could not process: {}", line); None } }) .collect(); let len = acc_vec.len(); (Arc::new(Mutex::new(acc_vec)), len) } fn parse_server_list(path: &str) -> Vec<String> { let mut server_list = String::new(); File::open(path) .expect("Could not open server list") .read_to_string(&mut server_list) .expect("Could not read server list"); server_list .lines() .map(|line| { let line = line.trim(); if line.starts_with("http") || line.starts_with("https") { line.to_string() } else { format!("http://{}", line) } }) .collect() } lazy_static! { static ref ACC_RE: Regex = Regex::new(r"0x[A-za-z0-9]{40}").unwrap(); } fn execute( work_stack: Arc<Mutex<Vec<(usize, String)>>>, scheduler: Arc<Scheduler>, counter: Arc<AtomicUsize>, acc_len: usize, root_path: Arc<String>, csv: &Mutex<BufWriter<File>>, json: bool, ) -> Result<()> { loop { let (c, acc) = match work_stack.lock().unwrap().pop() { Some(work) => work, None => { info!("Could not fetch new work, exiting loop!"); return Ok(()); } }; if c >= 5 { info!("Account {} seed {} times, discarding!", acc, c); continue; } let a = Address(hexdecode::decode(&acc.as_bytes()).unwrap().as_slice().into()); let worker = scheduler.get_worker(); let res = worker.analyze(a); match res { Ok(r) => { let file_path = if json { format!("{}/{}.json", root_path, acc) } else { format!("{}/{}", root_path, acc) }; let mut f = match File::create(file_path) { Ok(f) => f, Err(e) => { error!("Could not create file for {}: {:?}", acc, e); return Err(Error::from_str(format!( "Could not create file for {}: {:?}", acc, e ))); } }; if json { if let AnalysisSuccess::Success(ref analysis) = r { let mut res = (false, false, false); if let Some(ref attacks) = analysis.attacks { for attack in attacks { if attack.attack_type == AttackType::StealMoney { res.0 = true; } if attack.attack_type == AttackType::DeleteContract { res.1 = true; } if attack.attack_type == AttackType::HijackControlFlow { res.2 = true; } } } csv.lock().unwrap().write_all(format!("{:x}, {}, {}, {}\n", analysis.address, res.0, res.1, res.2).as_bytes()).expect("Could not write to csv file!"); } let _write_res = f.write_all(json!(r).to_string().as_bytes()); } else { let content = match r { AnalysisSuccess::Success(analysis) => { let mut res = (false, false, false); if let Some(ref attacks) = analysis.attacks { for attack in attacks { if attack.attack_type == AttackType::StealMoney
if attack.attack_type == AttackType::DeleteContract { res.1 = true; } if attack.attack_type == AttackType::HijackControlFlow { res.2 = true; } } } csv.lock().unwrap().write_all(format!("{:x}, {}, {}, {}\n", analysis.address, res.0, res.1, res.2).as_bytes()).expect("Could not write to csv file!"); format!("{}", analysis) }, AnalysisSuccess::Failure(s) => { warn!("Failure during analysing {}: {}", acc, s); s }, AnalysisSuccess::Timeout => { warn!("Timeout during analysis: {:?}", acc); format!("Timeout analysing {:?}", acc) }, }; let _write_res = f.write_all(content.as_bytes()); } } Err(e) => { if let Kind::Retry = e.kind() { error!("Error analyzing {}, retrying...", acc); work_stack.lock().unwrap().push((c+1, acc)); } else { error!("Error analyzing {}: {:?} worker died!", acc, e); } } }; info!( "Analyzed {} of {} contracts", counter.fetch_add(1, Ordering::Relaxed), acc_len ); } } fn main() { // init logger init_logger().expect("Could not initialize logger"); // parse args let matches = parse_args(); // create root path let root_path = format!( "analysis/{}/", Local::now().format("%Y-%m-%d-%H:%M:%S").to_string() ); fs::create_dir_all(root_path.clone()).expect("Could not create root folder for analysis"); let root_path = Arc::new(root_path); let acc_path = matches.value_of("INPUT").unwrap(); let server_path = matches.value_of("SERVER_LIST").unwrap(); let (work_stack, acc_len) = parse_account_list(acc_path); let server_list = parse_server_list(server_path); let server_len = server_list.len(); let timeout = if let Some(b) = matches.value_of("timeout") { b.parse().expect("Incorrect timeout supplied!") } else { 0 }; let scheduler = Arc::new(Scheduler::with_worker_count(server_list, timeout)); let counter = Arc::new(AtomicUsize::new(1)); let mut f = File::create(format!("{}/analysis.csv", root_path)).expect("Could not create csv file!"); f.write_all("address, steal ether, trigger suicide, hijack control flow\n".as_bytes()).expect("Could not write header to cvs!"); let csv_writer = Arc::new(Mutex::new(BufWriter::new(f))); info!("Starting Analysis"); let mut threads = Vec::new(); for _ in 0..server_len { let work_stack_clone = Arc::clone(&work_stack); let scheduler_clone = Arc::clone(&scheduler); let counter_clone = Arc::clone(&counter); let root_path_clone = Arc::clone(&root_path); let csv_clone = Arc::clone(&csv_writer); let json = matches.is_present("json"); let join_handle = thread::spawn(move || { execute( work_stack_clone, scheduler_clone, counter_clone, acc_len, root_path_clone, &csv_clone, json, ) }); threads.push(join_handle); } csv_writer.lock().unwrap().flush().expect("Could not finally flush writer"); for handle in threads { let _res = handle.join(); } info!("Finished Analysis"); }
{ res.0 = true; }
conditional_block
lib.rs
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #![deny(missing_docs)] //! This module provides a test HTTP(S) server that can be instantiated simply by a unit test, for //! connecting components to where you need to vary the response(s) from the HTTP(S) server during //! the operation of the test. //! //! It handles the TCP setup, letting the user specify `Handler` implementations which return the //! responses from the server. `Handler` implementations are meant to be composable to provide //! for fault injection and varying behavior in tests. // This is gratuitously borrowed from src/sys/pkg/lib/fuchsia-pkg-testing/src/serve.rs, and then // made generic across all requests by removing the repo-serving aspects of it. use { anyhow::Error, chrono::Utc, fuchsia_async::{self as fasync, Task}, fuchsia_hyper, futures::{future::BoxFuture, prelude::*}, hyper::{ server::{accept::from_stream, Server}, service::{make_service_fn, service_fn}, Body, Request, Response, StatusCode, }, std::{ convert::Infallible, net::{Ipv6Addr, SocketAddr}, pin::Pin, sync::Arc, }, }; // Some provided Handler implementations. pub mod handler; // Some provided Handler implementations for injecting faults into the server's behavior. pub mod fault_injection; /// A "test" HTTP(S) server which is composed of `Handler` implementations, and holding the /// connection state. pub struct TestServer { stop: futures::channel::oneshot::Sender<()>, addr: SocketAddr, use_https: bool, task: Task<()>, } /// Base trait that all Handlers implement. pub trait Handler: 'static + Send + Sync { /// A Handler impl signals that it wishes to handle a request by returning a response for it, /// otherwise it returns None. fn handles(&self, request: &Request<Body>) -> Option<BoxFuture<'_, Response<Body>>>; } impl Handler for Arc<dyn Handler> { fn handles(&self, request: &Request<Body>) -> Option<BoxFuture<'_, Response<Body>>> { (**self).handles(request) } } impl TestServer { /// return the scheme of the TestServer fn scheme(&self) -> &'static str { if self.use_https { "https" } else { "http" } } /// Returns the URL that can be used to connect to this repository from this device. pub fn local_url(&self) -> String { format!("{}://localhost:{}", self.scheme(), self.addr.port()) } /// Returns the URL for the given path that can be used to connect to this repository from this /// device. pub fn local_url_for_path(&self, path: &str) -> String { let path = path.trim_start_matches('/'); format!("{}://localhost:{}/{}", self.scheme(), self.addr.port(), path) } /// Gracefully signal the server to stop and returns a future that resolves when it terminates. pub fn stop(self) -> impl Future<Output = ()> { self.stop.send(()).expect("remote end to still be open"); self.task } /// Internal helper which iterates over all Handlers until it finds one that will respond to the /// request. It then returns that response. If not response is found, it returns 404 NOT_FOUND. async fn handle_request( handlers: Arc<Vec<Arc<dyn Handler>>>, req: Request<Body>, ) -> Response<Body> { let response = handlers.iter().find_map(|h| h.handles(&req)); match response { Some(response) => response.await, None => Response::builder().status(StatusCode::NOT_FOUND).body(Body::empty()).unwrap(), } } /// Create a Builder pub fn builder() -> TestServerBuilder { TestServerBuilder::new() } } /// A builder to construct a `TestServer`. #[derive(Default)] pub struct TestServerBuilder { handlers: Vec<Arc<dyn Handler>>, https_certs: Option<(Vec<rustls::Certificate>, rustls::PrivateKey)>, } impl TestServerBuilder { /// Create a new TestServerBuilder pub fn new() -> Self { Self::default() } /// Serve over TLS, using a server certificate rooted the provided certs pub fn use_https(mut self, cert_chain: &[u8], private_key: &[u8]) -> Self { let cert_chain = parse_cert_chain(cert_chain); let private_key = parse_private_key(private_key); self.https_certs = Some((cert_chain, private_key)); self } /// Add a Handler which implements the server's behavior. These are given the ability to /// handle a request in the order in which they are added to the `TestServerBuilder`. pub fn handler(mut self, handler: impl Handler + 'static) -> Self { self.handlers.push(Arc::new(handler)); self } /// Spawn the server on the current executor, returning a handle to manage the server. pub async fn start(self) -> TestServer { let (mut listener, addr) = { let addr = SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0); let listener = bind_listener(&addr).await; let local_addr = listener.local_addr().unwrap(); (listener, local_addr) }; let (stop, rx_stop) = futures::channel::oneshot::channel(); let (tls_acceptor, use_https) = if let Some((cert_chain, private_key)) = self.https_certs { // build a server configuration using a test CA and cert chain let mut tls_config = rustls::ServerConfig::new(rustls::NoClientAuth::new()); tls_config.set_single_cert(cert_chain, private_key).unwrap(); let tls_acceptor = tokio_rustls::TlsAcceptor::from(Arc::new(tls_config)); (Some(tls_acceptor), true) } else { (None, false) }; let task = fasync::Task::spawn(async move { let listener = accept_stream(&mut listener); let listener = listener .map_err(Error::from) .map_ok(|conn| fuchsia_hyper::TcpStream { stream: conn }); let connections = if let Some(tls_acceptor) = tls_acceptor { // wrap incoming tcp streams listener .and_then(move |conn| { tls_acceptor.accept(conn).map(|res| match res { Ok(conn) => { Ok(Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>) } Err(e) => Err(Error::from(e)), }) }) .boxed() // connections } else { listener .map_ok(|conn| Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>) .boxed() // connections }; // This is the root Arc<Vec<Arc<dyn Handler>>>. let handlers = Arc::new(self.handlers); let make_svc = make_service_fn(move |_socket| { // Each connection to the server receives a separate service_fn instance, and so // needs it's own copy of the handlers, this is a factory of sorts. let handlers = Arc::clone(&handlers); async move { Ok::<_, Infallible>(service_fn(move |req| { // Each request made by a connection is serviced by the service_fn created from // this scope, which is why there is another cloning of the Arc of Handlers. let method = req.method().to_owned(); let path = req.uri().path().to_owned(); TestServer::handle_request(Arc::clone(&handlers), req) .inspect(move |x| { println!( "{} [test http] {} {} => {}", Utc::now().format("%T.%6f"), method, path, x.status() ) }) .map(Ok::<_, Infallible>) })) } }); Server::builder(from_stream(connections)) .executor(fuchsia_hyper::Executor) .serve(make_svc) .with_graceful_shutdown( rx_stop.map(|res| res.unwrap_or_else(|futures::channel::oneshot::Canceled| ())), ) .unwrap_or_else(|e| panic!("error serving repo over http: {}", e)) .await; }); TestServer { stop, addr, use_https, task } } } #[cfg(target_os = "fuchsia")] async fn bind_listener(addr: &SocketAddr) -> fuchsia_async::net::TcpListener { fuchsia_async::net::TcpListener::bind(addr).unwrap() } #[cfg(not(target_os = "fuchsia"))] async fn bind_listener(&addr: &SocketAddr) -> async_net::TcpListener { async_net::TcpListener::bind(addr).await.unwrap() } #[cfg(target_os = "fuchsia")] fn
<'a>( listener: &'a mut fuchsia_async::net::TcpListener, ) -> impl Stream<Item = std::io::Result<fuchsia_async::net::TcpStream>> + 'a { use std::task::{Context, Poll}; #[pin_project::pin_project] struct AcceptStream<'a> { #[pin] listener: &'a mut fuchsia_async::net::TcpListener, } impl<'a> Stream for AcceptStream<'a> { type Item = std::io::Result<fuchsia_async::net::TcpStream>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { let mut this = self.project(); match this.listener.async_accept(cx) { Poll::Ready(Ok((conn, _addr))) => Poll::Ready(Some(Ok(conn))), Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))), Poll::Pending => Poll::Pending, } } } AcceptStream { listener } } #[cfg(not(target_os = "fuchsia"))] fn accept_stream<'a>( listener: &'a mut async_net::TcpListener, ) -> impl Stream<Item = std::io::Result<async_net::TcpStream>> + 'a { listener.incoming() } fn parse_cert_chain(mut bytes: &[u8]) -> Vec<rustls::Certificate> { rustls::internal::pemfile::certs(&mut bytes).expect("certs to parse") } fn parse_private_key(mut bytes: &[u8]) -> rustls::PrivateKey { let keys = rustls::internal::pemfile::rsa_private_keys(&mut bytes).expect("private keys to parse"); assert_eq!(keys.len(), 1, "expecting a single private key"); keys.into_iter().next().unwrap() } trait AsyncReadWrite: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {} impl<T> AsyncReadWrite for T where T: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {} // These are a set of useful functions when writing tests. /// Create a GET request for a given url, which can be used with any hyper client. pub fn make_get(url: impl AsRef<str>) -> Result<Request<Body>, Error> { Request::get(url.as_ref()).body(Body::empty()).map_err(Error::from) } /// Perform an HTTP GET for the given url, returning the result. pub async fn get(url: impl AsRef<str>) -> Result<Response<Body>, Error> { let request = make_get(url)?; let client = fuchsia_hyper::new_client(); let response = client.request(request).await?; Ok(response) } /// Collect a Response into a single Vec of bytes. pub async fn body_as_bytes(response: Response<Body>) -> Result<Vec<u8>, Error> { let bytes = response .into_body() .try_fold(Vec::new(), |mut vec, b| async move { vec.extend(b); Ok(vec) }) .await?; Ok(bytes) } /// Collect a Response's Body and convert the body to a tring. pub async fn body_as_string(response: Response<Body>) -> Result<String, Error> { let bytes = body_as_bytes(response).await?; let string = String::from_utf8(bytes)?; Ok(string) } /// Get a url and return the body of the response as a string. pub async fn get_body_as_string(url: impl AsRef<str>) -> Result<String, Error> { let response = get(url).await?; body_as_string(response).await } #[cfg(test)] mod tests { use super::*; use crate::{fault_injection::*, handler::*}; use anyhow::anyhow; use fasync::TimeoutExt; #[fuchsia_async::run_singlethreaded(test)] async fn test_start_stop() { let server = TestServer::builder().start().await; server.stop().await; } #[fuchsia_async::run_singlethreaded(test)] async fn test_empty_server_404s() { let server = TestServer::builder().start().await; let result = get(server.local_url()).await; assert_eq!(result.unwrap().status(), StatusCode::NOT_FOUND); } #[fuchsia_async::run_singlethreaded(test)] async fn test_shared_handler() { let shared: Arc<dyn Handler> = Arc::new(StaticResponse::ok_body("shared")); let server = TestServer::builder() .handler(ForPath::new("/a", Arc::clone(&shared))) .handler(shared) .start() .await; assert_eq!(get_body_as_string(server.local_url_for_path("/a")).await.unwrap(), "shared"); assert_eq!(get_body_as_string(server.local_url_for_path("/foo")).await.unwrap(), "shared"); } #[fuchsia_async::run_singlethreaded(test)] async fn test_simple_responder() { let server = TestServer::builder().handler(StaticResponse::ok_body("some data")).start().await; assert_eq!( get_body_as_string(server.local_url_for_path("ignored")).await.unwrap(), "some data" ); } #[fuchsia_async::run_singlethreaded(test)] async fn test_simple_path() { let server = TestServer::builder() .handler(ForPath::new("/some/path", StaticResponse::ok_body("some data"))) .start() .await; assert_eq!( get_body_as_string(server.local_url_for_path("/some/path")).await.unwrap(), "some data" ); } #[fuchsia_async::run_singlethreaded(test)] async fn test_simple_path_doesnt_respond_to_wrong_path() { let server = TestServer::builder() .handler(ForPath::new("/some/path", StaticResponse::ok_body("some data"))) .start() .await; // make sure a non-matching path fails let result = get(server.local_url_for_path("/other/path")).await; assert_eq!(result.unwrap().status(), StatusCode::NOT_FOUND); } #[fuchsia_async::run_singlethreaded(test)] async fn test_hang() { let server = TestServer::builder().handler(Hang).start().await; let result = get(server.local_url_for_path("ignored")) .on_timeout(std::time::Duration::from_secs(1), || Err(anyhow!("timed out"))) .await; assert_eq!(result.unwrap_err().to_string(), Error::msg("timed out").to_string()); } #[fuchsia_async::run_singlethreaded(test)] async fn test_hang_body() { let server = TestServer::builder().handler(HangBody::content_length(500)).start().await; let result = get_body_as_string(server.local_url_for_path("ignored")) .on_timeout(std::time::Duration::from_secs(1), || Err(anyhow!("timed out"))) .await; assert_eq!(result.unwrap_err().to_string(), Error::msg("timed out").to_string()); } }
accept_stream
identifier_name
lib.rs
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #![deny(missing_docs)] //! This module provides a test HTTP(S) server that can be instantiated simply by a unit test, for //! connecting components to where you need to vary the response(s) from the HTTP(S) server during //! the operation of the test. //! //! It handles the TCP setup, letting the user specify `Handler` implementations which return the //! responses from the server. `Handler` implementations are meant to be composable to provide //! for fault injection and varying behavior in tests. // This is gratuitously borrowed from src/sys/pkg/lib/fuchsia-pkg-testing/src/serve.rs, and then // made generic across all requests by removing the repo-serving aspects of it. use { anyhow::Error, chrono::Utc, fuchsia_async::{self as fasync, Task}, fuchsia_hyper, futures::{future::BoxFuture, prelude::*}, hyper::{ server::{accept::from_stream, Server}, service::{make_service_fn, service_fn}, Body, Request, Response, StatusCode, }, std::{ convert::Infallible, net::{Ipv6Addr, SocketAddr}, pin::Pin, sync::Arc, }, }; // Some provided Handler implementations. pub mod handler; // Some provided Handler implementations for injecting faults into the server's behavior. pub mod fault_injection; /// A "test" HTTP(S) server which is composed of `Handler` implementations, and holding the /// connection state. pub struct TestServer { stop: futures::channel::oneshot::Sender<()>, addr: SocketAddr, use_https: bool, task: Task<()>, } /// Base trait that all Handlers implement. pub trait Handler: 'static + Send + Sync { /// A Handler impl signals that it wishes to handle a request by returning a response for it, /// otherwise it returns None. fn handles(&self, request: &Request<Body>) -> Option<BoxFuture<'_, Response<Body>>>; } impl Handler for Arc<dyn Handler> { fn handles(&self, request: &Request<Body>) -> Option<BoxFuture<'_, Response<Body>>> { (**self).handles(request) } } impl TestServer { /// return the scheme of the TestServer fn scheme(&self) -> &'static str { if self.use_https { "https" } else { "http" } } /// Returns the URL that can be used to connect to this repository from this device. pub fn local_url(&self) -> String { format!("{}://localhost:{}", self.scheme(), self.addr.port()) } /// Returns the URL for the given path that can be used to connect to this repository from this /// device. pub fn local_url_for_path(&self, path: &str) -> String { let path = path.trim_start_matches('/'); format!("{}://localhost:{}/{}", self.scheme(), self.addr.port(), path) } /// Gracefully signal the server to stop and returns a future that resolves when it terminates. pub fn stop(self) -> impl Future<Output = ()> { self.stop.send(()).expect("remote end to still be open"); self.task } /// Internal helper which iterates over all Handlers until it finds one that will respond to the /// request. It then returns that response. If not response is found, it returns 404 NOT_FOUND. async fn handle_request( handlers: Arc<Vec<Arc<dyn Handler>>>, req: Request<Body>, ) -> Response<Body> { let response = handlers.iter().find_map(|h| h.handles(&req)); match response { Some(response) => response.await, None => Response::builder().status(StatusCode::NOT_FOUND).body(Body::empty()).unwrap(), } } /// Create a Builder pub fn builder() -> TestServerBuilder { TestServerBuilder::new() } } /// A builder to construct a `TestServer`. #[derive(Default)] pub struct TestServerBuilder { handlers: Vec<Arc<dyn Handler>>, https_certs: Option<(Vec<rustls::Certificate>, rustls::PrivateKey)>, } impl TestServerBuilder { /// Create a new TestServerBuilder pub fn new() -> Self { Self::default() } /// Serve over TLS, using a server certificate rooted the provided certs pub fn use_https(mut self, cert_chain: &[u8], private_key: &[u8]) -> Self { let cert_chain = parse_cert_chain(cert_chain); let private_key = parse_private_key(private_key); self.https_certs = Some((cert_chain, private_key)); self } /// Add a Handler which implements the server's behavior. These are given the ability to /// handle a request in the order in which they are added to the `TestServerBuilder`. pub fn handler(mut self, handler: impl Handler + 'static) -> Self { self.handlers.push(Arc::new(handler)); self } /// Spawn the server on the current executor, returning a handle to manage the server. pub async fn start(self) -> TestServer { let (mut listener, addr) = { let addr = SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0); let listener = bind_listener(&addr).await; let local_addr = listener.local_addr().unwrap(); (listener, local_addr) }; let (stop, rx_stop) = futures::channel::oneshot::channel(); let (tls_acceptor, use_https) = if let Some((cert_chain, private_key)) = self.https_certs
else { (None, false) }; let task = fasync::Task::spawn(async move { let listener = accept_stream(&mut listener); let listener = listener .map_err(Error::from) .map_ok(|conn| fuchsia_hyper::TcpStream { stream: conn }); let connections = if let Some(tls_acceptor) = tls_acceptor { // wrap incoming tcp streams listener .and_then(move |conn| { tls_acceptor.accept(conn).map(|res| match res { Ok(conn) => { Ok(Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>) } Err(e) => Err(Error::from(e)), }) }) .boxed() // connections } else { listener .map_ok(|conn| Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>) .boxed() // connections }; // This is the root Arc<Vec<Arc<dyn Handler>>>. let handlers = Arc::new(self.handlers); let make_svc = make_service_fn(move |_socket| { // Each connection to the server receives a separate service_fn instance, and so // needs it's own copy of the handlers, this is a factory of sorts. let handlers = Arc::clone(&handlers); async move { Ok::<_, Infallible>(service_fn(move |req| { // Each request made by a connection is serviced by the service_fn created from // this scope, which is why there is another cloning of the Arc of Handlers. let method = req.method().to_owned(); let path = req.uri().path().to_owned(); TestServer::handle_request(Arc::clone(&handlers), req) .inspect(move |x| { println!( "{} [test http] {} {} => {}", Utc::now().format("%T.%6f"), method, path, x.status() ) }) .map(Ok::<_, Infallible>) })) } }); Server::builder(from_stream(connections)) .executor(fuchsia_hyper::Executor) .serve(make_svc) .with_graceful_shutdown( rx_stop.map(|res| res.unwrap_or_else(|futures::channel::oneshot::Canceled| ())), ) .unwrap_or_else(|e| panic!("error serving repo over http: {}", e)) .await; }); TestServer { stop, addr, use_https, task } } } #[cfg(target_os = "fuchsia")] async fn bind_listener(addr: &SocketAddr) -> fuchsia_async::net::TcpListener { fuchsia_async::net::TcpListener::bind(addr).unwrap() } #[cfg(not(target_os = "fuchsia"))] async fn bind_listener(&addr: &SocketAddr) -> async_net::TcpListener { async_net::TcpListener::bind(addr).await.unwrap() } #[cfg(target_os = "fuchsia")] fn accept_stream<'a>( listener: &'a mut fuchsia_async::net::TcpListener, ) -> impl Stream<Item = std::io::Result<fuchsia_async::net::TcpStream>> + 'a { use std::task::{Context, Poll}; #[pin_project::pin_project] struct AcceptStream<'a> { #[pin] listener: &'a mut fuchsia_async::net::TcpListener, } impl<'a> Stream for AcceptStream<'a> { type Item = std::io::Result<fuchsia_async::net::TcpStream>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { let mut this = self.project(); match this.listener.async_accept(cx) { Poll::Ready(Ok((conn, _addr))) => Poll::Ready(Some(Ok(conn))), Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))), Poll::Pending => Poll::Pending, } } } AcceptStream { listener } } #[cfg(not(target_os = "fuchsia"))] fn accept_stream<'a>( listener: &'a mut async_net::TcpListener, ) -> impl Stream<Item = std::io::Result<async_net::TcpStream>> + 'a { listener.incoming() } fn parse_cert_chain(mut bytes: &[u8]) -> Vec<rustls::Certificate> { rustls::internal::pemfile::certs(&mut bytes).expect("certs to parse") } fn parse_private_key(mut bytes: &[u8]) -> rustls::PrivateKey { let keys = rustls::internal::pemfile::rsa_private_keys(&mut bytes).expect("private keys to parse"); assert_eq!(keys.len(), 1, "expecting a single private key"); keys.into_iter().next().unwrap() } trait AsyncReadWrite: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {} impl<T> AsyncReadWrite for T where T: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {} // These are a set of useful functions when writing tests. /// Create a GET request for a given url, which can be used with any hyper client. pub fn make_get(url: impl AsRef<str>) -> Result<Request<Body>, Error> { Request::get(url.as_ref()).body(Body::empty()).map_err(Error::from) } /// Perform an HTTP GET for the given url, returning the result. pub async fn get(url: impl AsRef<str>) -> Result<Response<Body>, Error> { let request = make_get(url)?; let client = fuchsia_hyper::new_client(); let response = client.request(request).await?; Ok(response) } /// Collect a Response into a single Vec of bytes. pub async fn body_as_bytes(response: Response<Body>) -> Result<Vec<u8>, Error> { let bytes = response .into_body() .try_fold(Vec::new(), |mut vec, b| async move { vec.extend(b); Ok(vec) }) .await?; Ok(bytes) } /// Collect a Response's Body and convert the body to a tring. pub async fn body_as_string(response: Response<Body>) -> Result<String, Error> { let bytes = body_as_bytes(response).await?; let string = String::from_utf8(bytes)?; Ok(string) } /// Get a url and return the body of the response as a string. pub async fn get_body_as_string(url: impl AsRef<str>) -> Result<String, Error> { let response = get(url).await?; body_as_string(response).await } #[cfg(test)] mod tests { use super::*; use crate::{fault_injection::*, handler::*}; use anyhow::anyhow; use fasync::TimeoutExt; #[fuchsia_async::run_singlethreaded(test)] async fn test_start_stop() { let server = TestServer::builder().start().await; server.stop().await; } #[fuchsia_async::run_singlethreaded(test)] async fn test_empty_server_404s() { let server = TestServer::builder().start().await; let result = get(server.local_url()).await; assert_eq!(result.unwrap().status(), StatusCode::NOT_FOUND); } #[fuchsia_async::run_singlethreaded(test)] async fn test_shared_handler() { let shared: Arc<dyn Handler> = Arc::new(StaticResponse::ok_body("shared")); let server = TestServer::builder() .handler(ForPath::new("/a", Arc::clone(&shared))) .handler(shared) .start() .await; assert_eq!(get_body_as_string(server.local_url_for_path("/a")).await.unwrap(), "shared"); assert_eq!(get_body_as_string(server.local_url_for_path("/foo")).await.unwrap(), "shared"); } #[fuchsia_async::run_singlethreaded(test)] async fn test_simple_responder() { let server = TestServer::builder().handler(StaticResponse::ok_body("some data")).start().await; assert_eq!( get_body_as_string(server.local_url_for_path("ignored")).await.unwrap(), "some data" ); } #[fuchsia_async::run_singlethreaded(test)] async fn test_simple_path() { let server = TestServer::builder() .handler(ForPath::new("/some/path", StaticResponse::ok_body("some data"))) .start() .await; assert_eq!( get_body_as_string(server.local_url_for_path("/some/path")).await.unwrap(), "some data" ); } #[fuchsia_async::run_singlethreaded(test)] async fn test_simple_path_doesnt_respond_to_wrong_path() { let server = TestServer::builder() .handler(ForPath::new("/some/path", StaticResponse::ok_body("some data"))) .start() .await; // make sure a non-matching path fails let result = get(server.local_url_for_path("/other/path")).await; assert_eq!(result.unwrap().status(), StatusCode::NOT_FOUND); } #[fuchsia_async::run_singlethreaded(test)] async fn test_hang() { let server = TestServer::builder().handler(Hang).start().await; let result = get(server.local_url_for_path("ignored")) .on_timeout(std::time::Duration::from_secs(1), || Err(anyhow!("timed out"))) .await; assert_eq!(result.unwrap_err().to_string(), Error::msg("timed out").to_string()); } #[fuchsia_async::run_singlethreaded(test)] async fn test_hang_body() { let server = TestServer::builder().handler(HangBody::content_length(500)).start().await; let result = get_body_as_string(server.local_url_for_path("ignored")) .on_timeout(std::time::Duration::from_secs(1), || Err(anyhow!("timed out"))) .await; assert_eq!(result.unwrap_err().to_string(), Error::msg("timed out").to_string()); } }
{ // build a server configuration using a test CA and cert chain let mut tls_config = rustls::ServerConfig::new(rustls::NoClientAuth::new()); tls_config.set_single_cert(cert_chain, private_key).unwrap(); let tls_acceptor = tokio_rustls::TlsAcceptor::from(Arc::new(tls_config)); (Some(tls_acceptor), true) }
conditional_block
lib.rs
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #![deny(missing_docs)] //! This module provides a test HTTP(S) server that can be instantiated simply by a unit test, for //! connecting components to where you need to vary the response(s) from the HTTP(S) server during //! the operation of the test. //! //! It handles the TCP setup, letting the user specify `Handler` implementations which return the //! responses from the server. `Handler` implementations are meant to be composable to provide //! for fault injection and varying behavior in tests. // This is gratuitously borrowed from src/sys/pkg/lib/fuchsia-pkg-testing/src/serve.rs, and then // made generic across all requests by removing the repo-serving aspects of it. use { anyhow::Error, chrono::Utc, fuchsia_async::{self as fasync, Task}, fuchsia_hyper, futures::{future::BoxFuture, prelude::*}, hyper::{ server::{accept::from_stream, Server}, service::{make_service_fn, service_fn}, Body, Request, Response, StatusCode, }, std::{ convert::Infallible, net::{Ipv6Addr, SocketAddr}, pin::Pin, sync::Arc, }, }; // Some provided Handler implementations. pub mod handler; // Some provided Handler implementations for injecting faults into the server's behavior. pub mod fault_injection; /// A "test" HTTP(S) server which is composed of `Handler` implementations, and holding the /// connection state. pub struct TestServer { stop: futures::channel::oneshot::Sender<()>, addr: SocketAddr, use_https: bool, task: Task<()>, } /// Base trait that all Handlers implement. pub trait Handler: 'static + Send + Sync { /// A Handler impl signals that it wishes to handle a request by returning a response for it, /// otherwise it returns None. fn handles(&self, request: &Request<Body>) -> Option<BoxFuture<'_, Response<Body>>>; } impl Handler for Arc<dyn Handler> { fn handles(&self, request: &Request<Body>) -> Option<BoxFuture<'_, Response<Body>>> { (**self).handles(request) } } impl TestServer { /// return the scheme of the TestServer fn scheme(&self) -> &'static str { if self.use_https { "https" } else { "http" } } /// Returns the URL that can be used to connect to this repository from this device. pub fn local_url(&self) -> String { format!("{}://localhost:{}", self.scheme(), self.addr.port()) } /// Returns the URL for the given path that can be used to connect to this repository from this /// device. pub fn local_url_for_path(&self, path: &str) -> String { let path = path.trim_start_matches('/'); format!("{}://localhost:{}/{}", self.scheme(), self.addr.port(), path) } /// Gracefully signal the server to stop and returns a future that resolves when it terminates. pub fn stop(self) -> impl Future<Output = ()> { self.stop.send(()).expect("remote end to still be open"); self.task } /// Internal helper which iterates over all Handlers until it finds one that will respond to the /// request. It then returns that response. If not response is found, it returns 404 NOT_FOUND. async fn handle_request( handlers: Arc<Vec<Arc<dyn Handler>>>, req: Request<Body>, ) -> Response<Body> { let response = handlers.iter().find_map(|h| h.handles(&req)); match response { Some(response) => response.await, None => Response::builder().status(StatusCode::NOT_FOUND).body(Body::empty()).unwrap(), } } /// Create a Builder pub fn builder() -> TestServerBuilder { TestServerBuilder::new() } } /// A builder to construct a `TestServer`. #[derive(Default)] pub struct TestServerBuilder { handlers: Vec<Arc<dyn Handler>>, https_certs: Option<(Vec<rustls::Certificate>, rustls::PrivateKey)>, } impl TestServerBuilder { /// Create a new TestServerBuilder pub fn new() -> Self { Self::default() } /// Serve over TLS, using a server certificate rooted the provided certs pub fn use_https(mut self, cert_chain: &[u8], private_key: &[u8]) -> Self { let cert_chain = parse_cert_chain(cert_chain); let private_key = parse_private_key(private_key); self.https_certs = Some((cert_chain, private_key)); self } /// Add a Handler which implements the server's behavior. These are given the ability to /// handle a request in the order in which they are added to the `TestServerBuilder`. pub fn handler(mut self, handler: impl Handler + 'static) -> Self { self.handlers.push(Arc::new(handler)); self } /// Spawn the server on the current executor, returning a handle to manage the server. pub async fn start(self) -> TestServer { let (mut listener, addr) = { let addr = SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0); let listener = bind_listener(&addr).await; let local_addr = listener.local_addr().unwrap(); (listener, local_addr) }; let (stop, rx_stop) = futures::channel::oneshot::channel(); let (tls_acceptor, use_https) = if let Some((cert_chain, private_key)) = self.https_certs { // build a server configuration using a test CA and cert chain let mut tls_config = rustls::ServerConfig::new(rustls::NoClientAuth::new()); tls_config.set_single_cert(cert_chain, private_key).unwrap(); let tls_acceptor = tokio_rustls::TlsAcceptor::from(Arc::new(tls_config)); (Some(tls_acceptor), true) } else { (None, false) }; let task = fasync::Task::spawn(async move { let listener = accept_stream(&mut listener); let listener = listener .map_err(Error::from) .map_ok(|conn| fuchsia_hyper::TcpStream { stream: conn }); let connections = if let Some(tls_acceptor) = tls_acceptor { // wrap incoming tcp streams listener .and_then(move |conn| { tls_acceptor.accept(conn).map(|res| match res { Ok(conn) => { Ok(Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>) } Err(e) => Err(Error::from(e)), }) }) .boxed() // connections } else { listener .map_ok(|conn| Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>) .boxed() // connections }; // This is the root Arc<Vec<Arc<dyn Handler>>>. let handlers = Arc::new(self.handlers); let make_svc = make_service_fn(move |_socket| { // Each connection to the server receives a separate service_fn instance, and so // needs it's own copy of the handlers, this is a factory of sorts. let handlers = Arc::clone(&handlers); async move { Ok::<_, Infallible>(service_fn(move |req| { // Each request made by a connection is serviced by the service_fn created from // this scope, which is why there is another cloning of the Arc of Handlers. let method = req.method().to_owned(); let path = req.uri().path().to_owned(); TestServer::handle_request(Arc::clone(&handlers), req) .inspect(move |x| { println!( "{} [test http] {} {} => {}", Utc::now().format("%T.%6f"), method, path, x.status() ) }) .map(Ok::<_, Infallible>) })) } }); Server::builder(from_stream(connections)) .executor(fuchsia_hyper::Executor) .serve(make_svc) .with_graceful_shutdown( rx_stop.map(|res| res.unwrap_or_else(|futures::channel::oneshot::Canceled| ())), ) .unwrap_or_else(|e| panic!("error serving repo over http: {}", e)) .await; }); TestServer { stop, addr, use_https, task } } } #[cfg(target_os = "fuchsia")] async fn bind_listener(addr: &SocketAddr) -> fuchsia_async::net::TcpListener { fuchsia_async::net::TcpListener::bind(addr).unwrap() } #[cfg(not(target_os = "fuchsia"))] async fn bind_listener(&addr: &SocketAddr) -> async_net::TcpListener { async_net::TcpListener::bind(addr).await.unwrap() } #[cfg(target_os = "fuchsia")] fn accept_stream<'a>( listener: &'a mut fuchsia_async::net::TcpListener, ) -> impl Stream<Item = std::io::Result<fuchsia_async::net::TcpStream>> + 'a { use std::task::{Context, Poll}; #[pin_project::pin_project] struct AcceptStream<'a> { #[pin] listener: &'a mut fuchsia_async::net::TcpListener, } impl<'a> Stream for AcceptStream<'a> { type Item = std::io::Result<fuchsia_async::net::TcpStream>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>>
} AcceptStream { listener } } #[cfg(not(target_os = "fuchsia"))] fn accept_stream<'a>( listener: &'a mut async_net::TcpListener, ) -> impl Stream<Item = std::io::Result<async_net::TcpStream>> + 'a { listener.incoming() } fn parse_cert_chain(mut bytes: &[u8]) -> Vec<rustls::Certificate> { rustls::internal::pemfile::certs(&mut bytes).expect("certs to parse") } fn parse_private_key(mut bytes: &[u8]) -> rustls::PrivateKey { let keys = rustls::internal::pemfile::rsa_private_keys(&mut bytes).expect("private keys to parse"); assert_eq!(keys.len(), 1, "expecting a single private key"); keys.into_iter().next().unwrap() } trait AsyncReadWrite: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {} impl<T> AsyncReadWrite for T where T: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {} // These are a set of useful functions when writing tests. /// Create a GET request for a given url, which can be used with any hyper client. pub fn make_get(url: impl AsRef<str>) -> Result<Request<Body>, Error> { Request::get(url.as_ref()).body(Body::empty()).map_err(Error::from) } /// Perform an HTTP GET for the given url, returning the result. pub async fn get(url: impl AsRef<str>) -> Result<Response<Body>, Error> { let request = make_get(url)?; let client = fuchsia_hyper::new_client(); let response = client.request(request).await?; Ok(response) } /// Collect a Response into a single Vec of bytes. pub async fn body_as_bytes(response: Response<Body>) -> Result<Vec<u8>, Error> { let bytes = response .into_body() .try_fold(Vec::new(), |mut vec, b| async move { vec.extend(b); Ok(vec) }) .await?; Ok(bytes) } /// Collect a Response's Body and convert the body to a tring. pub async fn body_as_string(response: Response<Body>) -> Result<String, Error> { let bytes = body_as_bytes(response).await?; let string = String::from_utf8(bytes)?; Ok(string) } /// Get a url and return the body of the response as a string. pub async fn get_body_as_string(url: impl AsRef<str>) -> Result<String, Error> { let response = get(url).await?; body_as_string(response).await } #[cfg(test)] mod tests { use super::*; use crate::{fault_injection::*, handler::*}; use anyhow::anyhow; use fasync::TimeoutExt; #[fuchsia_async::run_singlethreaded(test)] async fn test_start_stop() { let server = TestServer::builder().start().await; server.stop().await; } #[fuchsia_async::run_singlethreaded(test)] async fn test_empty_server_404s() { let server = TestServer::builder().start().await; let result = get(server.local_url()).await; assert_eq!(result.unwrap().status(), StatusCode::NOT_FOUND); } #[fuchsia_async::run_singlethreaded(test)] async fn test_shared_handler() { let shared: Arc<dyn Handler> = Arc::new(StaticResponse::ok_body("shared")); let server = TestServer::builder() .handler(ForPath::new("/a", Arc::clone(&shared))) .handler(shared) .start() .await; assert_eq!(get_body_as_string(server.local_url_for_path("/a")).await.unwrap(), "shared"); assert_eq!(get_body_as_string(server.local_url_for_path("/foo")).await.unwrap(), "shared"); } #[fuchsia_async::run_singlethreaded(test)] async fn test_simple_responder() { let server = TestServer::builder().handler(StaticResponse::ok_body("some data")).start().await; assert_eq!( get_body_as_string(server.local_url_for_path("ignored")).await.unwrap(), "some data" ); } #[fuchsia_async::run_singlethreaded(test)] async fn test_simple_path() { let server = TestServer::builder() .handler(ForPath::new("/some/path", StaticResponse::ok_body("some data"))) .start() .await; assert_eq!( get_body_as_string(server.local_url_for_path("/some/path")).await.unwrap(), "some data" ); } #[fuchsia_async::run_singlethreaded(test)] async fn test_simple_path_doesnt_respond_to_wrong_path() { let server = TestServer::builder() .handler(ForPath::new("/some/path", StaticResponse::ok_body("some data"))) .start() .await; // make sure a non-matching path fails let result = get(server.local_url_for_path("/other/path")).await; assert_eq!(result.unwrap().status(), StatusCode::NOT_FOUND); } #[fuchsia_async::run_singlethreaded(test)] async fn test_hang() { let server = TestServer::builder().handler(Hang).start().await; let result = get(server.local_url_for_path("ignored")) .on_timeout(std::time::Duration::from_secs(1), || Err(anyhow!("timed out"))) .await; assert_eq!(result.unwrap_err().to_string(), Error::msg("timed out").to_string()); } #[fuchsia_async::run_singlethreaded(test)] async fn test_hang_body() { let server = TestServer::builder().handler(HangBody::content_length(500)).start().await; let result = get_body_as_string(server.local_url_for_path("ignored")) .on_timeout(std::time::Duration::from_secs(1), || Err(anyhow!("timed out"))) .await; assert_eq!(result.unwrap_err().to_string(), Error::msg("timed out").to_string()); } }
{ let mut this = self.project(); match this.listener.async_accept(cx) { Poll::Ready(Ok((conn, _addr))) => Poll::Ready(Some(Ok(conn))), Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))), Poll::Pending => Poll::Pending, } }
identifier_body
lib.rs
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #![deny(missing_docs)] //! This module provides a test HTTP(S) server that can be instantiated simply by a unit test, for //! connecting components to where you need to vary the response(s) from the HTTP(S) server during //! the operation of the test. //! //! It handles the TCP setup, letting the user specify `Handler` implementations which return the //! responses from the server. `Handler` implementations are meant to be composable to provide //! for fault injection and varying behavior in tests. // This is gratuitously borrowed from src/sys/pkg/lib/fuchsia-pkg-testing/src/serve.rs, and then // made generic across all requests by removing the repo-serving aspects of it. use { anyhow::Error, chrono::Utc, fuchsia_async::{self as fasync, Task}, fuchsia_hyper, futures::{future::BoxFuture, prelude::*}, hyper::{ server::{accept::from_stream, Server}, service::{make_service_fn, service_fn}, Body, Request, Response, StatusCode, }, std::{ convert::Infallible, net::{Ipv6Addr, SocketAddr}, pin::Pin, sync::Arc, }, }; // Some provided Handler implementations. pub mod handler; // Some provided Handler implementations for injecting faults into the server's behavior. pub mod fault_injection; /// A "test" HTTP(S) server which is composed of `Handler` implementations, and holding the /// connection state. pub struct TestServer { stop: futures::channel::oneshot::Sender<()>, addr: SocketAddr, use_https: bool, task: Task<()>, } /// Base trait that all Handlers implement. pub trait Handler: 'static + Send + Sync { /// A Handler impl signals that it wishes to handle a request by returning a response for it, /// otherwise it returns None. fn handles(&self, request: &Request<Body>) -> Option<BoxFuture<'_, Response<Body>>>; } impl Handler for Arc<dyn Handler> { fn handles(&self, request: &Request<Body>) -> Option<BoxFuture<'_, Response<Body>>> { (**self).handles(request) } } impl TestServer { /// return the scheme of the TestServer fn scheme(&self) -> &'static str { if self.use_https { "https" } else { "http" } } /// Returns the URL that can be used to connect to this repository from this device. pub fn local_url(&self) -> String { format!("{}://localhost:{}", self.scheme(), self.addr.port()) } /// Returns the URL for the given path that can be used to connect to this repository from this /// device. pub fn local_url_for_path(&self, path: &str) -> String { let path = path.trim_start_matches('/'); format!("{}://localhost:{}/{}", self.scheme(), self.addr.port(), path) } /// Gracefully signal the server to stop and returns a future that resolves when it terminates. pub fn stop(self) -> impl Future<Output = ()> { self.stop.send(()).expect("remote end to still be open"); self.task } /// Internal helper which iterates over all Handlers until it finds one that will respond to the /// request. It then returns that response. If not response is found, it returns 404 NOT_FOUND. async fn handle_request( handlers: Arc<Vec<Arc<dyn Handler>>>, req: Request<Body>, ) -> Response<Body> { let response = handlers.iter().find_map(|h| h.handles(&req)); match response { Some(response) => response.await, None => Response::builder().status(StatusCode::NOT_FOUND).body(Body::empty()).unwrap(), } }
/// A builder to construct a `TestServer`. #[derive(Default)] pub struct TestServerBuilder { handlers: Vec<Arc<dyn Handler>>, https_certs: Option<(Vec<rustls::Certificate>, rustls::PrivateKey)>, } impl TestServerBuilder { /// Create a new TestServerBuilder pub fn new() -> Self { Self::default() } /// Serve over TLS, using a server certificate rooted the provided certs pub fn use_https(mut self, cert_chain: &[u8], private_key: &[u8]) -> Self { let cert_chain = parse_cert_chain(cert_chain); let private_key = parse_private_key(private_key); self.https_certs = Some((cert_chain, private_key)); self } /// Add a Handler which implements the server's behavior. These are given the ability to /// handle a request in the order in which they are added to the `TestServerBuilder`. pub fn handler(mut self, handler: impl Handler + 'static) -> Self { self.handlers.push(Arc::new(handler)); self } /// Spawn the server on the current executor, returning a handle to manage the server. pub async fn start(self) -> TestServer { let (mut listener, addr) = { let addr = SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0); let listener = bind_listener(&addr).await; let local_addr = listener.local_addr().unwrap(); (listener, local_addr) }; let (stop, rx_stop) = futures::channel::oneshot::channel(); let (tls_acceptor, use_https) = if let Some((cert_chain, private_key)) = self.https_certs { // build a server configuration using a test CA and cert chain let mut tls_config = rustls::ServerConfig::new(rustls::NoClientAuth::new()); tls_config.set_single_cert(cert_chain, private_key).unwrap(); let tls_acceptor = tokio_rustls::TlsAcceptor::from(Arc::new(tls_config)); (Some(tls_acceptor), true) } else { (None, false) }; let task = fasync::Task::spawn(async move { let listener = accept_stream(&mut listener); let listener = listener .map_err(Error::from) .map_ok(|conn| fuchsia_hyper::TcpStream { stream: conn }); let connections = if let Some(tls_acceptor) = tls_acceptor { // wrap incoming tcp streams listener .and_then(move |conn| { tls_acceptor.accept(conn).map(|res| match res { Ok(conn) => { Ok(Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>) } Err(e) => Err(Error::from(e)), }) }) .boxed() // connections } else { listener .map_ok(|conn| Pin::new(Box::new(conn)) as Pin<Box<dyn AsyncReadWrite>>) .boxed() // connections }; // This is the root Arc<Vec<Arc<dyn Handler>>>. let handlers = Arc::new(self.handlers); let make_svc = make_service_fn(move |_socket| { // Each connection to the server receives a separate service_fn instance, and so // needs it's own copy of the handlers, this is a factory of sorts. let handlers = Arc::clone(&handlers); async move { Ok::<_, Infallible>(service_fn(move |req| { // Each request made by a connection is serviced by the service_fn created from // this scope, which is why there is another cloning of the Arc of Handlers. let method = req.method().to_owned(); let path = req.uri().path().to_owned(); TestServer::handle_request(Arc::clone(&handlers), req) .inspect(move |x| { println!( "{} [test http] {} {} => {}", Utc::now().format("%T.%6f"), method, path, x.status() ) }) .map(Ok::<_, Infallible>) })) } }); Server::builder(from_stream(connections)) .executor(fuchsia_hyper::Executor) .serve(make_svc) .with_graceful_shutdown( rx_stop.map(|res| res.unwrap_or_else(|futures::channel::oneshot::Canceled| ())), ) .unwrap_or_else(|e| panic!("error serving repo over http: {}", e)) .await; }); TestServer { stop, addr, use_https, task } } } #[cfg(target_os = "fuchsia")] async fn bind_listener(addr: &SocketAddr) -> fuchsia_async::net::TcpListener { fuchsia_async::net::TcpListener::bind(addr).unwrap() } #[cfg(not(target_os = "fuchsia"))] async fn bind_listener(&addr: &SocketAddr) -> async_net::TcpListener { async_net::TcpListener::bind(addr).await.unwrap() } #[cfg(target_os = "fuchsia")] fn accept_stream<'a>( listener: &'a mut fuchsia_async::net::TcpListener, ) -> impl Stream<Item = std::io::Result<fuchsia_async::net::TcpStream>> + 'a { use std::task::{Context, Poll}; #[pin_project::pin_project] struct AcceptStream<'a> { #[pin] listener: &'a mut fuchsia_async::net::TcpListener, } impl<'a> Stream for AcceptStream<'a> { type Item = std::io::Result<fuchsia_async::net::TcpStream>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { let mut this = self.project(); match this.listener.async_accept(cx) { Poll::Ready(Ok((conn, _addr))) => Poll::Ready(Some(Ok(conn))), Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))), Poll::Pending => Poll::Pending, } } } AcceptStream { listener } } #[cfg(not(target_os = "fuchsia"))] fn accept_stream<'a>( listener: &'a mut async_net::TcpListener, ) -> impl Stream<Item = std::io::Result<async_net::TcpStream>> + 'a { listener.incoming() } fn parse_cert_chain(mut bytes: &[u8]) -> Vec<rustls::Certificate> { rustls::internal::pemfile::certs(&mut bytes).expect("certs to parse") } fn parse_private_key(mut bytes: &[u8]) -> rustls::PrivateKey { let keys = rustls::internal::pemfile::rsa_private_keys(&mut bytes).expect("private keys to parse"); assert_eq!(keys.len(), 1, "expecting a single private key"); keys.into_iter().next().unwrap() } trait AsyncReadWrite: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {} impl<T> AsyncReadWrite for T where T: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send {} // These are a set of useful functions when writing tests. /// Create a GET request for a given url, which can be used with any hyper client. pub fn make_get(url: impl AsRef<str>) -> Result<Request<Body>, Error> { Request::get(url.as_ref()).body(Body::empty()).map_err(Error::from) } /// Perform an HTTP GET for the given url, returning the result. pub async fn get(url: impl AsRef<str>) -> Result<Response<Body>, Error> { let request = make_get(url)?; let client = fuchsia_hyper::new_client(); let response = client.request(request).await?; Ok(response) } /// Collect a Response into a single Vec of bytes. pub async fn body_as_bytes(response: Response<Body>) -> Result<Vec<u8>, Error> { let bytes = response .into_body() .try_fold(Vec::new(), |mut vec, b| async move { vec.extend(b); Ok(vec) }) .await?; Ok(bytes) } /// Collect a Response's Body and convert the body to a tring. pub async fn body_as_string(response: Response<Body>) -> Result<String, Error> { let bytes = body_as_bytes(response).await?; let string = String::from_utf8(bytes)?; Ok(string) } /// Get a url and return the body of the response as a string. pub async fn get_body_as_string(url: impl AsRef<str>) -> Result<String, Error> { let response = get(url).await?; body_as_string(response).await } #[cfg(test)] mod tests { use super::*; use crate::{fault_injection::*, handler::*}; use anyhow::anyhow; use fasync::TimeoutExt; #[fuchsia_async::run_singlethreaded(test)] async fn test_start_stop() { let server = TestServer::builder().start().await; server.stop().await; } #[fuchsia_async::run_singlethreaded(test)] async fn test_empty_server_404s() { let server = TestServer::builder().start().await; let result = get(server.local_url()).await; assert_eq!(result.unwrap().status(), StatusCode::NOT_FOUND); } #[fuchsia_async::run_singlethreaded(test)] async fn test_shared_handler() { let shared: Arc<dyn Handler> = Arc::new(StaticResponse::ok_body("shared")); let server = TestServer::builder() .handler(ForPath::new("/a", Arc::clone(&shared))) .handler(shared) .start() .await; assert_eq!(get_body_as_string(server.local_url_for_path("/a")).await.unwrap(), "shared"); assert_eq!(get_body_as_string(server.local_url_for_path("/foo")).await.unwrap(), "shared"); } #[fuchsia_async::run_singlethreaded(test)] async fn test_simple_responder() { let server = TestServer::builder().handler(StaticResponse::ok_body("some data")).start().await; assert_eq!( get_body_as_string(server.local_url_for_path("ignored")).await.unwrap(), "some data" ); } #[fuchsia_async::run_singlethreaded(test)] async fn test_simple_path() { let server = TestServer::builder() .handler(ForPath::new("/some/path", StaticResponse::ok_body("some data"))) .start() .await; assert_eq!( get_body_as_string(server.local_url_for_path("/some/path")).await.unwrap(), "some data" ); } #[fuchsia_async::run_singlethreaded(test)] async fn test_simple_path_doesnt_respond_to_wrong_path() { let server = TestServer::builder() .handler(ForPath::new("/some/path", StaticResponse::ok_body("some data"))) .start() .await; // make sure a non-matching path fails let result = get(server.local_url_for_path("/other/path")).await; assert_eq!(result.unwrap().status(), StatusCode::NOT_FOUND); } #[fuchsia_async::run_singlethreaded(test)] async fn test_hang() { let server = TestServer::builder().handler(Hang).start().await; let result = get(server.local_url_for_path("ignored")) .on_timeout(std::time::Duration::from_secs(1), || Err(anyhow!("timed out"))) .await; assert_eq!(result.unwrap_err().to_string(), Error::msg("timed out").to_string()); } #[fuchsia_async::run_singlethreaded(test)] async fn test_hang_body() { let server = TestServer::builder().handler(HangBody::content_length(500)).start().await; let result = get_body_as_string(server.local_url_for_path("ignored")) .on_timeout(std::time::Duration::from_secs(1), || Err(anyhow!("timed out"))) .await; assert_eq!(result.unwrap_err().to_string(), Error::msg("timed out").to_string()); } }
/// Create a Builder pub fn builder() -> TestServerBuilder { TestServerBuilder::new() } }
random_line_split
concurrent_ntlm_auth_requests.py
#!/usr/bin/env python # -*- coding:utf-8 -*- #allisnone 20200403 #https://github.com/urllib3/urllib3/issues/1434 #https://github.com/dopstar/requests-ntlm2 #https://github.com/requests/requests-ntlm #base on python3 #if you request https website, you need to add ASWG CA to following file: #/root/.pyenv/versions/3.5.5/lib/python3.5/site-packages/certifi/cacert.pem #ulimit โ€“n 2000 #pip install requests_ntlm import argparse import re import os import csv import string,sys,time,datetime import requests from requests_toolbelt.adapters import source #from requests_ntlm import HttpNtlmAuth import random import subprocess #import zthreads def get_random_ip_or_user(start,end,prefix='172.16.90.',type='ip'): if type=='ip' and max(start,end)>255: end = 255 i = random.randint(start,end) return prefix + str(i) def get_random_ips_users(start,end,num,prefix='172.16.90.',type='ip'): if type=='ip' and max(start,end)>255: end = 255 sequences = [] for i in range(start,end+1): se
if num> len(sequences): num = len(sequences) choices = random.sample(sequences,num) return choices def popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer'): curl_cmd = 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'.format( cert,eth,user,proxy,url) subp = subprocess.Popen(curl_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)#,encoding="utf-8") try: subp.wait(2) #็ญ‰ๅพ…่ถ…ๆ—ถ except Exception as e: print('curl_request_timeout, error: ',e) return if subp.poll() == 0: print(subp.communicate()[1]) else: print("curl_request-ๅคฑ่ดฅ: ",curl_cmd) return def system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer',is_http=False,debug=False): """ -I: header request -k: skip ssl --no-keepalive, keepalive=close """ curl_cmd = '' debug = False if is_http: basic_cmd = 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &' if debug: pass else: basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &' curl_cmd = basic_cmd.format(eth,user,proxy,url) else: basic_cmd = 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &' if debug: pass else: basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &' curl_cmd = basic_cmd.format(cert,eth,user,proxy,url) try: os_p = os.system(curl_cmd) print('curl_cmd=',curl_cmd) except Exception as e: print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.format(curl_cmd,e,url,user)) return def get_urls_from_file(from_file='url16000.txt',url_index=-1,spliter=',',pre_www='www.'): """ ็”จไบŽurlๅˆ†็ฑปๆต‹่ฏ•๏ผŒๆต‹่ฏ•ๆ–‡ไปถไธญๅญ˜ๆ”พๅคง้‡็š„urlๅœฐๅ€ :param from_file: str :return: list๏ผŒ URL_list๏ผˆGenerator๏ผ‰ """ txtfile = open(from_file, 'r',encoding='utf-8') url_list = txtfile.readlines() for i in range(0,len(url_list)): url_list[i] = url_list[i].replace('\n','') # print(url_list[i]) if url_index>=0: url_var = url_list[i].split(spliter)[url_index].replace(' ','') #print('url_var=',url_var) protocol_header = url_var[:9].lower() if pre_www not in url_var and not ("http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header): url_var = pre_www + url_var url_list[i] = url_var protocol_header = url_list[i][:9].lower() #print('protocol_header=',protocol_header) if "http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header: pass else: #ๆ— ๅ่ฎฎๅคด้ƒจ๏ผŒ้ป˜่ฎคๅŠ httpๅ่ฎฎ url_list[i] = "https://" + url_list[i] return url_list def get_eth_user_index(sequence=0,user_start=30,user_num=10,eth_start=0,eth_num=254): """ inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0 inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253 sequence: start with 0 eth_num: eth sequence start with 0 """ user_index = sequence % user_num + user_start eth_index = sequence % eth_num + eth_start """ user_index = sequence if sequence>user_num: #ๅพช็Žฏ๏ผŒๅค็”จ๏ผŒๅ–ไฝ™ user_index = sequence % user_num + user_start eth_index = sequence if eth_index>eth_num: #ๅพช็Žฏ๏ผŒๅค็”จ๏ผŒๅ–ไฝ™ eth_index = eth_index % eth_num + eth_start """ return user_index,eth_index def callback(): return def urls_resquests(urls, proxy='172.17.33.23:8080',user_start=300,user_num=253,sub_eth_start = 0, eth_num=253, ip_prefix = '172.18.1.', cert='rootCA.cer',is_same_url=False, is_http=False,debug=False): """ one ip/eth<--> one user """ i = 0 #count = max(len(urls),user_num,eth_num) #for url in urls: for i in range(max(user_num,eth_num)): url = '' if is_same_url: if is_http: url = 'http://172.16.0.1' #use the same url for request test else: url = 'https://www.baidu.com' user_index = i % user_num + user_start eth_index = i % eth_num + sub_eth_start #ip = get_random_ip_or_user(start=2,end=254) #ip = ip_prefix + str(eth_index + 1) #user = get_random_ip_or_user(start=1,end=99,prefix='df64user',type='user') user = 'userg'+str(user_index) #eth = get_random_ip_or_user(start=2,end=253,prefix='eth0:',type='user') eth = 'eth0:'+str(eth_index) """ For debug print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index)) print('ip_{0}={1}'.format(i,ip)) print('eth=',eth) print('user=',user) print("-" * 50) """ #thread_pool.put(system_curl_request, (url,user,eth,), callback) #popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer') #system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer') system_curl_request(url,user,eth,proxy=proxy,cert=cert,is_http=is_http,debug=debug) #i = i + 1 return #""" if __name__ == '__main__': parser = argparse.ArgumentParser(description='่ฏฅPython3่„šๆœฌ็”จไบŽASWGๅšๅนถๅ‘่ฎค่ฏๆต‹่ฏ•ใ€‚\n 1ใ€ไฝฟ็”จๆ–นๆณ•็คบไพ‹:\n python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080') parser.add_argument('-r','--round', type=int, default=1,help='่ฎค่ฏๅนถๅ‘ๆต‹่ฏ•็š„ๆต‹่ฏ•ๆฌกๆ•ฐ๏ผŒ้ป˜่ฎค1่ฝฎๆต‹่ฏ•ๅณๅœๆญข') parser.add_argument('-s','--starttime', type=str, default='',help='้ฆ–ๆฌก่ฎค่ฏๅนถๅ‘ๆต‹่ฏ•็š„ๆ—ถ้—ด๏ผŒๅฆ‚ 16:20:60') parser.add_argument('-t','--auth-cache-timeout', type=int, default=600,help='่ฎค่ฏ็ผ“ๅญ˜่ฟ‡ๆœŸๆ—ถ้—ด๏ผŒ้ป˜่ฎค600็ง’') parser.add_argument('-p','--aswg-proxy', type=str, default='172.17.33.23:8080',help='ASWG proxy') parser.add_argument('-i','--ip-prefix', type=str, default='172.18.1.',help='ๅฎขๆˆท็ซฏIPๅ‰็ผ€๏ผŒ้ป˜่ฎคๅชๆ”ฏๆŒCๆฎต๏ผ›ๅ…ถไป–ๆ–นๅผ่‡ช่กŒ้€‚้…') parser.add_argument('-u','--is-same-url', type=bool, default=True,help='ๆ˜ฏๅฆไฝฟ็”จ็›ธๅŒURLๆต‹่ฏ•') parser.add_argument('-u1','--is-http', type=bool, default=True,help='ๅฝ“ๆŒ‡ๅฎšไฝฟ็”จ็›ธๅŒURLๆ—ถ๏ผŒๆŒ‡ๅฎšๆ˜ฏhttp่ฟ˜ๆ˜ฏhttps่ฏทๆฑ‚') parser.add_argument('-f','--url-file', type=str, default='hwurls_top10w.txt',help='urlsๆฅๆบๆ–‡ไปถ') parser.add_argument('-f1','--url-index', type=int, default=0,help='urlsๆฅๆบๆ–‡ไปถไธญๅญ—ๆฎตๅบๅท๏ผŒ้ป˜่ฎคไปŽ0ๅผ€ๅง‹') parser.add_argument('-a0','--start-user-index', type=int, default=0,help='auth ็”จๆˆท็š„ๅบๅท๏ผŒ้ป˜่ฎคไปŽ0ๅผ€ๅง‹') parser.add_argument('-a1','--user-num', type=int, default=1275,help='auth ็”จๆˆทๆ•ฐ้‡') parser.add_argument('-e0','--start-eth0-index', type=int, default=0,help='ๅผ€ๅง‹็š„ๅญ็ฝ‘ๅกๅบๅท๏ผŒ้ป˜่ฎคไปŽ0ๅผ€ๅง‹') parser.add_argument('-e1','--sub-eth0-num', type=int, default=1275,help='ๅญ็ฝ‘ๅกๆŽฅๅฃๆ•ฐ้‡๏ผŒๆฏไธชๆŽฅๅฃไธ€ไธชIPๅœฐๅ€') parser.add_argument('-d','--is-debug', type=bool, default=False,help='ๆ˜ฏๅฆๅผ€ๅฏcurl็š„ๆ‰“ๅฐๆ—ฅๅฟ—') args = parser.parse_args() max_round = args.round first_schedule_time = args.starttime now = datetime.datetime.now() now_str = now.strftime("%H:%M:%S") if first_schedule_time: if len(first_schedule_time)==8 and len(first_schedule_time.split(':'))==3 and first_schedule_time > now_str: pass else: print('-sๆˆ–่€…--starttime ๆ ผๅผไธๅฏน๏ผŒ่ฏท่พ“ๅ…ฅๅคงไบŽๅฝ“ๅ‰ๆ—ถ้—ดๅญ—็ฌฆไธฒ๏ผŒๅฆ‚๏ผš16:20:60 ') sys.exit() else: nexttime = now + datetime.timedelta(seconds=60) first_schedule_time = nexttime.strftime("%H:%M:%S") auth_cache_timeout = args.auth_cache_timeout proxy = args.aswg_proxy ip_prefix = args.ip_prefix is_same_url = args.is_same_url is_same_url = True url_file = args.url_file url_index = args.url_index start_user_index = args.start_user_index user_num = args.user_num start_eth0_index = args.start_eth0_index sub_eth0_num = args.sub_eth0_num is_debug = args.is_debug urls = get_urls_from_file(from_file=url_file,url_index=url_index,spliter=',',pre_www='www.') #print('urls=',urls) #url = 'https://www.baidu.com' print('urls_len=',len(urls)) #urls = urls[:300] print('urls_len=',len(urls)) #from zthreads.threadpools.threadpools import Threadpools #thread_pool = Threadpools(5) i = 0 #unique_users = 1275 user_start = start_user_index user_num = user_num sub_eth_start = start_eth0_index eth_num = sub_eth0_num cert = 'rootCA.cer' is_http = True #first_schedule_time = "16:45:00" #auth_cache_timeout = 60 #max_round = 2 print('max_round={0}, first_schedule_time={1}, auth_cache_timeout={2}'.format(max_round,first_schedule_time,auth_cache_timeout)) round_num = 0 while True: #time_now = time.strftime("%H:%M:%S", time.localtime()) now = datetime.datetime.now() time_now = now.strftime("%H:%M:%S") if time_now == first_schedule_time: print('This_schedule_time={0}, round={1}'.format(first_schedule_time,round_num)) start_time = time.time() urls_resquests(urls, proxy=proxy,user_start=user_start,user_num=user_num,sub_eth_start=sub_eth_start, eth_num=eth_num, ip_prefix=ip_prefix, cert=cert,is_same_url=is_same_url, is_http=is_http,debug=is_debug) total_sending_time_seconds = time.time() - start_time print('total_sending_time_seconds={0}. Finished all url requests for round_{1}!!!'.format(total_sending_time_seconds,round_num)) round_num = round_num + 1 if round_num >= max_round: print("-" * 50) print('Finished all test with {0} rounds!!!'.format(max_round)) break else: print("-" * 50) print('Please make sure clear cache before the next schedule time!!!') #now = datetime.datetime.now() #date_str = now.strftime("%Y-%m-%d ") #last_schedule_time_str = date_str + first_schedule_time last_schedule_time = datetime.datetime.strptime(now.strftime("%Y-%m-%d ") + first_schedule_time,'%Y-%m-%d %H:%M:%S') nexttime = last_schedule_time + datetime.timedelta(seconds=auth_cache_timeout+60) # delay 60 seconds first_schedule_time = nexttime.strftime("%H:%M:%S") print('Next_schedule_time={0}...'.format(first_schedule_time)) #time.sleep(sleep_time) else: #print('time_now=',time_now) pass #thread_pool.close() #initial_requests_session(ip=ip,user=ntlm_user)
quences.append(prefix+str(i))
conditional_block
concurrent_ntlm_auth_requests.py
#!/usr/bin/env python # -*- coding:utf-8 -*- #allisnone 20200403 #https://github.com/urllib3/urllib3/issues/1434 #https://github.com/dopstar/requests-ntlm2 #https://github.com/requests/requests-ntlm #base on python3 #if you request https website, you need to add ASWG CA to following file: #/root/.pyenv/versions/3.5.5/lib/python3.5/site-packages/certifi/cacert.pem #ulimit โ€“n 2000 #pip install requests_ntlm import argparse import re import os import csv import string,sys,time,datetime import requests from requests_toolbelt.adapters import source #from requests_ntlm import HttpNtlmAuth import random import subprocess #import zthreads def get_random_ip_or_user(start,end,prefix='172.16.90.',type='ip'): if type=='ip' and max(start,end)>255: end = 255 i = random.randint(start,end) return prefix + str(i) def get_random_ips_users(start,end,num,prefix='172.16.90.',type='ip'): if type=='ip' and max(start,end)>255: end = 255 sequences = [] for i in range(start,end+1): sequences.append(prefix+str(i)) if num> len(sequences): num = len(sequences) choices = random.sample(sequences,num) return choices
subp = subprocess.Popen(curl_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)#,encoding="utf-8") try: subp.wait(2) #็ญ‰ๅพ…่ถ…ๆ—ถ except Exception as e: print('curl_request_timeout, error: ',e) return if subp.poll() == 0: print(subp.communicate()[1]) else: print("curl_request-ๅคฑ่ดฅ: ",curl_cmd) return def system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer',is_http=False,debug=False): """ -I: header request -k: skip ssl --no-keepalive, keepalive=close """ curl_cmd = '' debug = False if is_http: basic_cmd = 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &' if debug: pass else: basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &' curl_cmd = basic_cmd.format(eth,user,proxy,url) else: basic_cmd = 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &' if debug: pass else: basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &' curl_cmd = basic_cmd.format(cert,eth,user,proxy,url) try: os_p = os.system(curl_cmd) print('curl_cmd=',curl_cmd) except Exception as e: print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.format(curl_cmd,e,url,user)) return def get_urls_from_file(from_file='url16000.txt',url_index=-1,spliter=',',pre_www='www.'): """ ็”จไบŽurlๅˆ†็ฑปๆต‹่ฏ•๏ผŒๆต‹่ฏ•ๆ–‡ไปถไธญๅญ˜ๆ”พๅคง้‡็š„urlๅœฐๅ€ :param from_file: str :return: list๏ผŒ URL_list๏ผˆGenerator๏ผ‰ """ txtfile = open(from_file, 'r',encoding='utf-8') url_list = txtfile.readlines() for i in range(0,len(url_list)): url_list[i] = url_list[i].replace('\n','') # print(url_list[i]) if url_index>=0: url_var = url_list[i].split(spliter)[url_index].replace(' ','') #print('url_var=',url_var) protocol_header = url_var[:9].lower() if pre_www not in url_var and not ("http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header): url_var = pre_www + url_var url_list[i] = url_var protocol_header = url_list[i][:9].lower() #print('protocol_header=',protocol_header) if "http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header: pass else: #ๆ— ๅ่ฎฎๅคด้ƒจ๏ผŒ้ป˜่ฎคๅŠ httpๅ่ฎฎ url_list[i] = "https://" + url_list[i] return url_list def get_eth_user_index(sequence=0,user_start=30,user_num=10,eth_start=0,eth_num=254): """ inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0 inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253 sequence: start with 0 eth_num: eth sequence start with 0 """ user_index = sequence % user_num + user_start eth_index = sequence % eth_num + eth_start """ user_index = sequence if sequence>user_num: #ๅพช็Žฏ๏ผŒๅค็”จ๏ผŒๅ–ไฝ™ user_index = sequence % user_num + user_start eth_index = sequence if eth_index>eth_num: #ๅพช็Žฏ๏ผŒๅค็”จ๏ผŒๅ–ไฝ™ eth_index = eth_index % eth_num + eth_start """ return user_index,eth_index def callback(): return def urls_resquests(urls, proxy='172.17.33.23:8080',user_start=300,user_num=253,sub_eth_start = 0, eth_num=253, ip_prefix = '172.18.1.', cert='rootCA.cer',is_same_url=False, is_http=False,debug=False): """ one ip/eth<--> one user """ i = 0 #count = max(len(urls),user_num,eth_num) #for url in urls: for i in range(max(user_num,eth_num)): url = '' if is_same_url: if is_http: url = 'http://172.16.0.1' #use the same url for request test else: url = 'https://www.baidu.com' user_index = i % user_num + user_start eth_index = i % eth_num + sub_eth_start #ip = get_random_ip_or_user(start=2,end=254) #ip = ip_prefix + str(eth_index + 1) #user = get_random_ip_or_user(start=1,end=99,prefix='df64user',type='user') user = 'userg'+str(user_index) #eth = get_random_ip_or_user(start=2,end=253,prefix='eth0:',type='user') eth = 'eth0:'+str(eth_index) """ For debug print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index)) print('ip_{0}={1}'.format(i,ip)) print('eth=',eth) print('user=',user) print("-" * 50) """ #thread_pool.put(system_curl_request, (url,user,eth,), callback) #popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer') #system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer') system_curl_request(url,user,eth,proxy=proxy,cert=cert,is_http=is_http,debug=debug) #i = i + 1 return #""" if __name__ == '__main__': parser = argparse.ArgumentParser(description='่ฏฅPython3่„šๆœฌ็”จไบŽASWGๅšๅนถๅ‘่ฎค่ฏๆต‹่ฏ•ใ€‚\n 1ใ€ไฝฟ็”จๆ–นๆณ•็คบไพ‹:\n python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080') parser.add_argument('-r','--round', type=int, default=1,help='่ฎค่ฏๅนถๅ‘ๆต‹่ฏ•็š„ๆต‹่ฏ•ๆฌกๆ•ฐ๏ผŒ้ป˜่ฎค1่ฝฎๆต‹่ฏ•ๅณๅœๆญข') parser.add_argument('-s','--starttime', type=str, default='',help='้ฆ–ๆฌก่ฎค่ฏๅนถๅ‘ๆต‹่ฏ•็š„ๆ—ถ้—ด๏ผŒๅฆ‚ 16:20:60') parser.add_argument('-t','--auth-cache-timeout', type=int, default=600,help='่ฎค่ฏ็ผ“ๅญ˜่ฟ‡ๆœŸๆ—ถ้—ด๏ผŒ้ป˜่ฎค600็ง’') parser.add_argument('-p','--aswg-proxy', type=str, default='172.17.33.23:8080',help='ASWG proxy') parser.add_argument('-i','--ip-prefix', type=str, default='172.18.1.',help='ๅฎขๆˆท็ซฏIPๅ‰็ผ€๏ผŒ้ป˜่ฎคๅชๆ”ฏๆŒCๆฎต๏ผ›ๅ…ถไป–ๆ–นๅผ่‡ช่กŒ้€‚้…') parser.add_argument('-u','--is-same-url', type=bool, default=True,help='ๆ˜ฏๅฆไฝฟ็”จ็›ธๅŒURLๆต‹่ฏ•') parser.add_argument('-u1','--is-http', type=bool, default=True,help='ๅฝ“ๆŒ‡ๅฎšไฝฟ็”จ็›ธๅŒURLๆ—ถ๏ผŒๆŒ‡ๅฎšๆ˜ฏhttp่ฟ˜ๆ˜ฏhttps่ฏทๆฑ‚') parser.add_argument('-f','--url-file', type=str, default='hwurls_top10w.txt',help='urlsๆฅๆบๆ–‡ไปถ') parser.add_argument('-f1','--url-index', type=int, default=0,help='urlsๆฅๆบๆ–‡ไปถไธญๅญ—ๆฎตๅบๅท๏ผŒ้ป˜่ฎคไปŽ0ๅผ€ๅง‹') parser.add_argument('-a0','--start-user-index', type=int, default=0,help='auth ็”จๆˆท็š„ๅบๅท๏ผŒ้ป˜่ฎคไปŽ0ๅผ€ๅง‹') parser.add_argument('-a1','--user-num', type=int, default=1275,help='auth ็”จๆˆทๆ•ฐ้‡') parser.add_argument('-e0','--start-eth0-index', type=int, default=0,help='ๅผ€ๅง‹็š„ๅญ็ฝ‘ๅกๅบๅท๏ผŒ้ป˜่ฎคไปŽ0ๅผ€ๅง‹') parser.add_argument('-e1','--sub-eth0-num', type=int, default=1275,help='ๅญ็ฝ‘ๅกๆŽฅๅฃๆ•ฐ้‡๏ผŒๆฏไธชๆŽฅๅฃไธ€ไธชIPๅœฐๅ€') parser.add_argument('-d','--is-debug', type=bool, default=False,help='ๆ˜ฏๅฆๅผ€ๅฏcurl็š„ๆ‰“ๅฐๆ—ฅๅฟ—') args = parser.parse_args() max_round = args.round first_schedule_time = args.starttime now = datetime.datetime.now() now_str = now.strftime("%H:%M:%S") if first_schedule_time: if len(first_schedule_time)==8 and len(first_schedule_time.split(':'))==3 and first_schedule_time > now_str: pass else: print('-sๆˆ–่€…--starttime ๆ ผๅผไธๅฏน๏ผŒ่ฏท่พ“ๅ…ฅๅคงไบŽๅฝ“ๅ‰ๆ—ถ้—ดๅญ—็ฌฆไธฒ๏ผŒๅฆ‚๏ผš16:20:60 ') sys.exit() else: nexttime = now + datetime.timedelta(seconds=60) first_schedule_time = nexttime.strftime("%H:%M:%S") auth_cache_timeout = args.auth_cache_timeout proxy = args.aswg_proxy ip_prefix = args.ip_prefix is_same_url = args.is_same_url is_same_url = True url_file = args.url_file url_index = args.url_index start_user_index = args.start_user_index user_num = args.user_num start_eth0_index = args.start_eth0_index sub_eth0_num = args.sub_eth0_num is_debug = args.is_debug urls = get_urls_from_file(from_file=url_file,url_index=url_index,spliter=',',pre_www='www.') #print('urls=',urls) #url = 'https://www.baidu.com' print('urls_len=',len(urls)) #urls = urls[:300] print('urls_len=',len(urls)) #from zthreads.threadpools.threadpools import Threadpools #thread_pool = Threadpools(5) i = 0 #unique_users = 1275 user_start = start_user_index user_num = user_num sub_eth_start = start_eth0_index eth_num = sub_eth0_num cert = 'rootCA.cer' is_http = True #first_schedule_time = "16:45:00" #auth_cache_timeout = 60 #max_round = 2 print('max_round={0}, first_schedule_time={1}, auth_cache_timeout={2}'.format(max_round,first_schedule_time,auth_cache_timeout)) round_num = 0 while True: #time_now = time.strftime("%H:%M:%S", time.localtime()) now = datetime.datetime.now() time_now = now.strftime("%H:%M:%S") if time_now == first_schedule_time: print('This_schedule_time={0}, round={1}'.format(first_schedule_time,round_num)) start_time = time.time() urls_resquests(urls, proxy=proxy,user_start=user_start,user_num=user_num,sub_eth_start=sub_eth_start, eth_num=eth_num, ip_prefix=ip_prefix, cert=cert,is_same_url=is_same_url, is_http=is_http,debug=is_debug) total_sending_time_seconds = time.time() - start_time print('total_sending_time_seconds={0}. Finished all url requests for round_{1}!!!'.format(total_sending_time_seconds,round_num)) round_num = round_num + 1 if round_num >= max_round: print("-" * 50) print('Finished all test with {0} rounds!!!'.format(max_round)) break else: print("-" * 50) print('Please make sure clear cache before the next schedule time!!!') #now = datetime.datetime.now() #date_str = now.strftime("%Y-%m-%d ") #last_schedule_time_str = date_str + first_schedule_time last_schedule_time = datetime.datetime.strptime(now.strftime("%Y-%m-%d ") + first_schedule_time,'%Y-%m-%d %H:%M:%S') nexttime = last_schedule_time + datetime.timedelta(seconds=auth_cache_timeout+60) # delay 60 seconds first_schedule_time = nexttime.strftime("%H:%M:%S") print('Next_schedule_time={0}...'.format(first_schedule_time)) #time.sleep(sleep_time) else: #print('time_now=',time_now) pass #thread_pool.close() #initial_requests_session(ip=ip,user=ntlm_user)
def popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer'): curl_cmd = 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'.format( cert,eth,user,proxy,url)
random_line_split
concurrent_ntlm_auth_requests.py
#!/usr/bin/env python # -*- coding:utf-8 -*- #allisnone 20200403 #https://github.com/urllib3/urllib3/issues/1434 #https://github.com/dopstar/requests-ntlm2 #https://github.com/requests/requests-ntlm #base on python3 #if you request https website, you need to add ASWG CA to following file: #/root/.pyenv/versions/3.5.5/lib/python3.5/site-packages/certifi/cacert.pem #ulimit โ€“n 2000 #pip install requests_ntlm import argparse import re import os import csv import string,sys,time,datetime import requests from requests_toolbelt.adapters import source #from requests_ntlm import HttpNtlmAuth import random import subprocess #import zthreads def get_random_ip_or_user(start,end,prefix='172.16.90.',type='ip'): if type=='ip' and max(start,end)>255: end = 255 i = random.randint(start,end) return prefix + str(i) def get_random_ips_users(start,end,num,prefix='172.16.90.',type='ip'): if type=='ip' and max(start,end)>255: end = 255 sequences = [] for i in range(start,end+1): sequences.append(prefix+str(i)) if num> len(sequences): num = len(sequences) choices = random.sample(sequences,num) return choices def popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer'): curl_cmd = 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'.format( cert,eth,user,proxy,url) subp = subprocess.Popen(curl_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)#,encoding="utf-8") try: subp.wait(2) #็ญ‰ๅพ…่ถ…ๆ—ถ except Exception as e: print('curl_request_timeout, error: ',e) return if subp.poll() == 0: print(subp.communicate()[1]) else: print("curl_request-ๅคฑ่ดฅ: ",curl_cmd) return def system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer',is_http=False,debug=False): """ -I: header request -k: skip ssl --no-keepalive, keepalive=close """ curl_cmd = '' debug = False if is_http: basic_cmd = 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &' if debug: pass else: basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &' curl_cmd = basic_cmd.format(eth,user,proxy,url) else: basic_cmd = 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &' if debug: pass else: basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &' curl_cmd = basic_cmd.format(cert,eth,user,proxy,url) try: os_p = os.system(curl_cmd) print('curl_cmd=',curl_cmd) except Exception as e: print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.format(curl_cmd,e,url,user)) return def get_urls_from_file(from_file='url16000.txt',url_index=-1,spliter=',',pre_www='www.'): """ ็”จไบŽurlๅˆ†็ฑปๆต‹่ฏ•๏ผŒๆต‹่ฏ•ๆ–‡ไปถไธญๅญ˜ๆ”พๅคง้‡็š„urlๅœฐๅ€ :param from_file: str :return: list๏ผŒ URL_list๏ผˆGenerator๏ผ‰ """ txtfile = open(from_file, 'r',encoding='utf-8') url_list = txtfile.readlines() for i in range(0,len(url_list)): url_list[i] = url_list[i].replace('\n','') # print(url_list[i]) if url_index>=0: url_var = url_list[i].split(spliter)[url_index].replace(' ','') #print('url_var=',url_var) protocol_header = url_var[:9].lower() if pre_www not in url_var and not ("http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header): url_var = pre_www + url_var url_list[i] = url_var protocol_header = url_list[i][:9].lower() #print('protocol_header=',protocol_header) if "http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header: pass else: #ๆ— ๅ่ฎฎๅคด้ƒจ๏ผŒ้ป˜่ฎคๅŠ httpๅ่ฎฎ url_list[i] = "https://" + url_list[i] return url_list def get_eth_user_index(sequence=0,user_start=30,user_num=10,eth_start=0,eth_num=254): """ inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0 inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253 sequence: start with 0 eth_num: eth sequence start with 0 """ user_index = sequence % user_num + user_start eth_index = sequence % eth_num + eth_start """ user_index = sequence if sequence>user_num: #ๅพช็Žฏ๏ผŒๅค็”จ๏ผŒๅ–ไฝ™ user_index = sequence % user_num + user_start eth_index = sequence if eth_index>eth_num: #ๅพช็Žฏ๏ผŒๅค็”จ๏ผŒๅ–ไฝ™ eth_index = eth_index % eth_num + eth_start """ return user_index,eth_index def callback(): return def urls_resquests(urls, proxy='172.17.33.23:8080',user_start=300,user_num=253,sub_eth_start = 0, eth_num=253,
172.18.1.', cert='rootCA.cer',is_same_url=False, is_http=False,debug=False): """ one ip/eth<--> one user """ i = 0 #count = max(len(urls),user_num,eth_num) #for url in urls: for i in range(max(user_num,eth_num)): url = '' if is_same_url: if is_http: url = 'http://172.16.0.1' #use the same url for request test else: url = 'https://www.baidu.com' user_index = i % user_num + user_start eth_index = i % eth_num + sub_eth_start #ip = get_random_ip_or_user(start=2,end=254) #ip = ip_prefix + str(eth_index + 1) #user = get_random_ip_or_user(start=1,end=99,prefix='df64user',type='user') user = 'userg'+str(user_index) #eth = get_random_ip_or_user(start=2,end=253,prefix='eth0:',type='user') eth = 'eth0:'+str(eth_index) """ For debug print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index)) print('ip_{0}={1}'.format(i,ip)) print('eth=',eth) print('user=',user) print("-" * 50) """ #thread_pool.put(system_curl_request, (url,user,eth,), callback) #popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer') #system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer') system_curl_request(url,user,eth,proxy=proxy,cert=cert,is_http=is_http,debug=debug) #i = i + 1 return #""" if __name__ == '__main__': parser = argparse.ArgumentParser(description='่ฏฅPython3่„šๆœฌ็”จไบŽASWGๅšๅนถๅ‘่ฎค่ฏๆต‹่ฏ•ใ€‚\n 1ใ€ไฝฟ็”จๆ–นๆณ•็คบไพ‹:\n python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080') parser.add_argument('-r','--round', type=int, default=1,help='่ฎค่ฏๅนถๅ‘ๆต‹่ฏ•็š„ๆต‹่ฏ•ๆฌกๆ•ฐ๏ผŒ้ป˜่ฎค1่ฝฎๆต‹่ฏ•ๅณๅœๆญข') parser.add_argument('-s','--starttime', type=str, default='',help='้ฆ–ๆฌก่ฎค่ฏๅนถๅ‘ๆต‹่ฏ•็š„ๆ—ถ้—ด๏ผŒๅฆ‚ 16:20:60') parser.add_argument('-t','--auth-cache-timeout', type=int, default=600,help='่ฎค่ฏ็ผ“ๅญ˜่ฟ‡ๆœŸๆ—ถ้—ด๏ผŒ้ป˜่ฎค600็ง’') parser.add_argument('-p','--aswg-proxy', type=str, default='172.17.33.23:8080',help='ASWG proxy') parser.add_argument('-i','--ip-prefix', type=str, default='172.18.1.',help='ๅฎขๆˆท็ซฏIPๅ‰็ผ€๏ผŒ้ป˜่ฎคๅชๆ”ฏๆŒCๆฎต๏ผ›ๅ…ถไป–ๆ–นๅผ่‡ช่กŒ้€‚้…') parser.add_argument('-u','--is-same-url', type=bool, default=True,help='ๆ˜ฏๅฆไฝฟ็”จ็›ธๅŒURLๆต‹่ฏ•') parser.add_argument('-u1','--is-http', type=bool, default=True,help='ๅฝ“ๆŒ‡ๅฎšไฝฟ็”จ็›ธๅŒURLๆ—ถ๏ผŒๆŒ‡ๅฎšๆ˜ฏhttp่ฟ˜ๆ˜ฏhttps่ฏทๆฑ‚') parser.add_argument('-f','--url-file', type=str, default='hwurls_top10w.txt',help='urlsๆฅๆบๆ–‡ไปถ') parser.add_argument('-f1','--url-index', type=int, default=0,help='urlsๆฅๆบๆ–‡ไปถไธญๅญ—ๆฎตๅบๅท๏ผŒ้ป˜่ฎคไปŽ0ๅผ€ๅง‹') parser.add_argument('-a0','--start-user-index', type=int, default=0,help='auth ็”จๆˆท็š„ๅบๅท๏ผŒ้ป˜่ฎคไปŽ0ๅผ€ๅง‹') parser.add_argument('-a1','--user-num', type=int, default=1275,help='auth ็”จๆˆทๆ•ฐ้‡') parser.add_argument('-e0','--start-eth0-index', type=int, default=0,help='ๅผ€ๅง‹็š„ๅญ็ฝ‘ๅกๅบๅท๏ผŒ้ป˜่ฎคไปŽ0ๅผ€ๅง‹') parser.add_argument('-e1','--sub-eth0-num', type=int, default=1275,help='ๅญ็ฝ‘ๅกๆŽฅๅฃๆ•ฐ้‡๏ผŒๆฏไธชๆŽฅๅฃไธ€ไธชIPๅœฐๅ€') parser.add_argument('-d','--is-debug', type=bool, default=False,help='ๆ˜ฏๅฆๅผ€ๅฏcurl็š„ๆ‰“ๅฐๆ—ฅๅฟ—') args = parser.parse_args() max_round = args.round first_schedule_time = args.starttime now = datetime.datetime.now() now_str = now.strftime("%H:%M:%S") if first_schedule_time: if len(first_schedule_time)==8 and len(first_schedule_time.split(':'))==3 and first_schedule_time > now_str: pass else: print('-sๆˆ–่€…--starttime ๆ ผๅผไธๅฏน๏ผŒ่ฏท่พ“ๅ…ฅๅคงไบŽๅฝ“ๅ‰ๆ—ถ้—ดๅญ—็ฌฆไธฒ๏ผŒๅฆ‚๏ผš16:20:60 ') sys.exit() else: nexttime = now + datetime.timedelta(seconds=60) first_schedule_time = nexttime.strftime("%H:%M:%S") auth_cache_timeout = args.auth_cache_timeout proxy = args.aswg_proxy ip_prefix = args.ip_prefix is_same_url = args.is_same_url is_same_url = True url_file = args.url_file url_index = args.url_index start_user_index = args.start_user_index user_num = args.user_num start_eth0_index = args.start_eth0_index sub_eth0_num = args.sub_eth0_num is_debug = args.is_debug urls = get_urls_from_file(from_file=url_file,url_index=url_index,spliter=',',pre_www='www.') #print('urls=',urls) #url = 'https://www.baidu.com' print('urls_len=',len(urls)) #urls = urls[:300] print('urls_len=',len(urls)) #from zthreads.threadpools.threadpools import Threadpools #thread_pool = Threadpools(5) i = 0 #unique_users = 1275 user_start = start_user_index user_num = user_num sub_eth_start = start_eth0_index eth_num = sub_eth0_num cert = 'rootCA.cer' is_http = True #first_schedule_time = "16:45:00" #auth_cache_timeout = 60 #max_round = 2 print('max_round={0}, first_schedule_time={1}, auth_cache_timeout={2}'.format(max_round,first_schedule_time,auth_cache_timeout)) round_num = 0 while True: #time_now = time.strftime("%H:%M:%S", time.localtime()) now = datetime.datetime.now() time_now = now.strftime("%H:%M:%S") if time_now == first_schedule_time: print('This_schedule_time={0}, round={1}'.format(first_schedule_time,round_num)) start_time = time.time() urls_resquests(urls, proxy=proxy,user_start=user_start,user_num=user_num,sub_eth_start=sub_eth_start, eth_num=eth_num, ip_prefix=ip_prefix, cert=cert,is_same_url=is_same_url, is_http=is_http,debug=is_debug) total_sending_time_seconds = time.time() - start_time print('total_sending_time_seconds={0}. Finished all url requests for round_{1}!!!'.format(total_sending_time_seconds,round_num)) round_num = round_num + 1 if round_num >= max_round: print("-" * 50) print('Finished all test with {0} rounds!!!'.format(max_round)) break else: print("-" * 50) print('Please make sure clear cache before the next schedule time!!!') #now = datetime.datetime.now() #date_str = now.strftime("%Y-%m-%d ") #last_schedule_time_str = date_str + first_schedule_time last_schedule_time = datetime.datetime.strptime(now.strftime("%Y-%m-%d ") + first_schedule_time,'%Y-%m-%d %H:%M:%S') nexttime = last_schedule_time + datetime.timedelta(seconds=auth_cache_timeout+60) # delay 60 seconds first_schedule_time = nexttime.strftime("%H:%M:%S") print('Next_schedule_time={0}...'.format(first_schedule_time)) #time.sleep(sleep_time) else: #print('time_now=',time_now) pass #thread_pool.close() #initial_requests_session(ip=ip,user=ntlm_user)
ip_prefix = '
identifier_name
concurrent_ntlm_auth_requests.py
#!/usr/bin/env python # -*- coding:utf-8 -*- #allisnone 20200403 #https://github.com/urllib3/urllib3/issues/1434 #https://github.com/dopstar/requests-ntlm2 #https://github.com/requests/requests-ntlm #base on python3 #if you request https website, you need to add ASWG CA to following file: #/root/.pyenv/versions/3.5.5/lib/python3.5/site-packages/certifi/cacert.pem #ulimit โ€“n 2000 #pip install requests_ntlm import argparse import re import os import csv import string,sys,time,datetime import requests from requests_toolbelt.adapters import source #from requests_ntlm import HttpNtlmAuth import random import subprocess #import zthreads def get_random_ip_or_user(start,end,prefix='172.16.90.',type='ip'): if type=='ip' and max(start,end)>255: end = 255 i = random.randint(start,end) return prefix + str(i) def get_random_ips_users(start,end,num,prefix='172.16.90.',type='ip'): if type=='ip' and max(start,end)>255: end = 255 sequences = [] for i in range(start,end+1): sequences.append(prefix+str(i)) if num> len(sequences): num = len(sequences) choices = random.sample(sequences,num) return choices def popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer'): curl_cmd = 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'.format( cert,eth,user,proxy,url) subp = subprocess.Popen(curl_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)#,encoding="utf-8") try: subp.wait(2) #็ญ‰ๅพ…่ถ…ๆ—ถ except Exception as e: print('curl_request_timeout, error: ',e) return if subp.poll() == 0: print(subp.communicate()[1]) else: print("curl_request-ๅคฑ่ดฅ: ",curl_cmd) return def system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer',is_http=False,debug=False): """ -I: header request -k: skip ssl --no-keepalive, keepalive=close """ curl_cmd = '' debug = False if is_http: basic_cmd = 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &' if debug: pass else: basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &' curl_cmd = basic_cmd.format(eth,user,proxy,url) else: basic_cmd = 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &' if debug: pass else: basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &' curl_cmd = basic_cmd.format(cert,eth,user,proxy,url) try: os_p = os.system(curl_cmd) print('curl_cmd=',curl_cmd) except Exception as e: print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.format(curl_cmd,e,url,user)) return def get_urls_from_file(from_file='url16000.txt',url_index=-1,spliter=',',pre_www='www.'): """ ็”จไบŽurl
th_num=254): """ inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0 inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253 sequence: start with 0 eth_num: eth sequence start with 0 """ user_index = sequence % user_num + user_start eth_index = sequence % eth_num + eth_start """ user_index = sequence if sequence>user_num: #ๅพช็Žฏ๏ผŒๅค็”จ๏ผŒๅ–ไฝ™ user_index = sequence % user_num + user_start eth_index = sequence if eth_index>eth_num: #ๅพช็Žฏ๏ผŒๅค็”จ๏ผŒๅ–ไฝ™ eth_index = eth_index % eth_num + eth_start """ return user_index,eth_index def callback(): return def urls_resquests(urls, proxy='172.17.33.23:8080',user_start=300,user_num=253,sub_eth_start = 0, eth_num=253, ip_prefix = '172.18.1.', cert='rootCA.cer',is_same_url=False, is_http=False,debug=False): """ one ip/eth<--> one user """ i = 0 #count = max(len(urls),user_num,eth_num) #for url in urls: for i in range(max(user_num,eth_num)): url = '' if is_same_url: if is_http: url = 'http://172.16.0.1' #use the same url for request test else: url = 'https://www.baidu.com' user_index = i % user_num + user_start eth_index = i % eth_num + sub_eth_start #ip = get_random_ip_or_user(start=2,end=254) #ip = ip_prefix + str(eth_index + 1) #user = get_random_ip_or_user(start=1,end=99,prefix='df64user',type='user') user = 'userg'+str(user_index) #eth = get_random_ip_or_user(start=2,end=253,prefix='eth0:',type='user') eth = 'eth0:'+str(eth_index) """ For debug print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index)) print('ip_{0}={1}'.format(i,ip)) print('eth=',eth) print('user=',user) print("-" * 50) """ #thread_pool.put(system_curl_request, (url,user,eth,), callback) #popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer') #system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer') system_curl_request(url,user,eth,proxy=proxy,cert=cert,is_http=is_http,debug=debug) #i = i + 1 return #""" if __name__ == '__main__': parser = argparse.ArgumentParser(description='่ฏฅPython3่„šๆœฌ็”จไบŽASWGๅšๅนถๅ‘่ฎค่ฏๆต‹่ฏ•ใ€‚\n 1ใ€ไฝฟ็”จๆ–นๆณ•็คบไพ‹:\n python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080') parser.add_argument('-r','--round', type=int, default=1,help='่ฎค่ฏๅนถๅ‘ๆต‹่ฏ•็š„ๆต‹่ฏ•ๆฌกๆ•ฐ๏ผŒ้ป˜่ฎค1่ฝฎๆต‹่ฏ•ๅณๅœๆญข') parser.add_argument('-s','--starttime', type=str, default='',help='้ฆ–ๆฌก่ฎค่ฏๅนถๅ‘ๆต‹่ฏ•็š„ๆ—ถ้—ด๏ผŒๅฆ‚ 16:20:60') parser.add_argument('-t','--auth-cache-timeout', type=int, default=600,help='่ฎค่ฏ็ผ“ๅญ˜่ฟ‡ๆœŸๆ—ถ้—ด๏ผŒ้ป˜่ฎค600็ง’') parser.add_argument('-p','--aswg-proxy', type=str, default='172.17.33.23:8080',help='ASWG proxy') parser.add_argument('-i','--ip-prefix', type=str, default='172.18.1.',help='ๅฎขๆˆท็ซฏIPๅ‰็ผ€๏ผŒ้ป˜่ฎคๅชๆ”ฏๆŒCๆฎต๏ผ›ๅ…ถไป–ๆ–นๅผ่‡ช่กŒ้€‚้…') parser.add_argument('-u','--is-same-url', type=bool, default=True,help='ๆ˜ฏๅฆไฝฟ็”จ็›ธๅŒURLๆต‹่ฏ•') parser.add_argument('-u1','--is-http', type=bool, default=True,help='ๅฝ“ๆŒ‡ๅฎšไฝฟ็”จ็›ธๅŒURLๆ—ถ๏ผŒๆŒ‡ๅฎšๆ˜ฏhttp่ฟ˜ๆ˜ฏhttps่ฏทๆฑ‚') parser.add_argument('-f','--url-file', type=str, default='hwurls_top10w.txt',help='urlsๆฅๆบๆ–‡ไปถ') parser.add_argument('-f1','--url-index', type=int, default=0,help='urlsๆฅๆบๆ–‡ไปถไธญๅญ—ๆฎตๅบๅท๏ผŒ้ป˜่ฎคไปŽ0ๅผ€ๅง‹') parser.add_argument('-a0','--start-user-index', type=int, default=0,help='auth ็”จๆˆท็š„ๅบๅท๏ผŒ้ป˜่ฎคไปŽ0ๅผ€ๅง‹') parser.add_argument('-a1','--user-num', type=int, default=1275,help='auth ็”จๆˆทๆ•ฐ้‡') parser.add_argument('-e0','--start-eth0-index', type=int, default=0,help='ๅผ€ๅง‹็š„ๅญ็ฝ‘ๅกๅบๅท๏ผŒ้ป˜่ฎคไปŽ0ๅผ€ๅง‹') parser.add_argument('-e1','--sub-eth0-num', type=int, default=1275,help='ๅญ็ฝ‘ๅกๆŽฅๅฃๆ•ฐ้‡๏ผŒๆฏไธชๆŽฅๅฃไธ€ไธชIPๅœฐๅ€') parser.add_argument('-d','--is-debug', type=bool, default=False,help='ๆ˜ฏๅฆๅผ€ๅฏcurl็š„ๆ‰“ๅฐๆ—ฅๅฟ—') args = parser.parse_args() max_round = args.round first_schedule_time = args.starttime now = datetime.datetime.now() now_str = now.strftime("%H:%M:%S") if first_schedule_time: if len(first_schedule_time)==8 and len(first_schedule_time.split(':'))==3 and first_schedule_time > now_str: pass else: print('-sๆˆ–่€…--starttime ๆ ผๅผไธๅฏน๏ผŒ่ฏท่พ“ๅ…ฅๅคงไบŽๅฝ“ๅ‰ๆ—ถ้—ดๅญ—็ฌฆไธฒ๏ผŒๅฆ‚๏ผš16:20:60 ') sys.exit() else: nexttime = now + datetime.timedelta(seconds=60) first_schedule_time = nexttime.strftime("%H:%M:%S") auth_cache_timeout = args.auth_cache_timeout proxy = args.aswg_proxy ip_prefix = args.ip_prefix is_same_url = args.is_same_url is_same_url = True url_file = args.url_file url_index = args.url_index start_user_index = args.start_user_index user_num = args.user_num start_eth0_index = args.start_eth0_index sub_eth0_num = args.sub_eth0_num is_debug = args.is_debug urls = get_urls_from_file(from_file=url_file,url_index=url_index,spliter=',',pre_www='www.') #print('urls=',urls) #url = 'https://www.baidu.com' print('urls_len=',len(urls)) #urls = urls[:300] print('urls_len=',len(urls)) #from zthreads.threadpools.threadpools import Threadpools #thread_pool = Threadpools(5) i = 0 #unique_users = 1275 user_start = start_user_index user_num = user_num sub_eth_start = start_eth0_index eth_num = sub_eth0_num cert = 'rootCA.cer' is_http = True #first_schedule_time = "16:45:00" #auth_cache_timeout = 60 #max_round = 2 print('max_round={0}, first_schedule_time={1}, auth_cache_timeout={2}'.format(max_round,first_schedule_time,auth_cache_timeout)) round_num = 0 while True: #time_now = time.strftime("%H:%M:%S", time.localtime()) now = datetime.datetime.now() time_now = now.strftime("%H:%M:%S") if time_now == first_schedule_time: print('This_schedule_time={0}, round={1}'.format(first_schedule_time,round_num)) start_time = time.time() urls_resquests(urls, proxy=proxy,user_start=user_start,user_num=user_num,sub_eth_start=sub_eth_start, eth_num=eth_num, ip_prefix=ip_prefix, cert=cert,is_same_url=is_same_url, is_http=is_http,debug=is_debug) total_sending_time_seconds = time.time() - start_time print('total_sending_time_seconds={0}. Finished all url requests for round_{1}!!!'.format(total_sending_time_seconds,round_num)) round_num = round_num + 1 if round_num >= max_round: print("-" * 50) print('Finished all test with {0} rounds!!!'.format(max_round)) break else: print("-" * 50) print('Please make sure clear cache before the next schedule time!!!') #now = datetime.datetime.now() #date_str = now.strftime("%Y-%m-%d ") #last_schedule_time_str = date_str + first_schedule_time last_schedule_time = datetime.datetime.strptime(now.strftime("%Y-%m-%d ") + first_schedule_time,'%Y-%m-%d %H:%M:%S') nexttime = last_schedule_time + datetime.timedelta(seconds=auth_cache_timeout+60) # delay 60 seconds first_schedule_time = nexttime.strftime("%H:%M:%S") print('Next_schedule_time={0}...'.format(first_schedule_time)) #time.sleep(sleep_time) else: #print('time_now=',time_now) pass #thread_pool.close() #initial_requests_session(ip=ip,user=ntlm_user)
ๅˆ†็ฑปๆต‹่ฏ•๏ผŒๆต‹่ฏ•ๆ–‡ไปถไธญๅญ˜ๆ”พๅคง้‡็š„urlๅœฐๅ€ :param from_file: str :return: list๏ผŒ URL_list๏ผˆGenerator๏ผ‰ """ txtfile = open(from_file, 'r',encoding='utf-8') url_list = txtfile.readlines() for i in range(0,len(url_list)): url_list[i] = url_list[i].replace('\n','') # print(url_list[i]) if url_index>=0: url_var = url_list[i].split(spliter)[url_index].replace(' ','') #print('url_var=',url_var) protocol_header = url_var[:9].lower() if pre_www not in url_var and not ("http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header): url_var = pre_www + url_var url_list[i] = url_var protocol_header = url_list[i][:9].lower() #print('protocol_header=',protocol_header) if "http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header: pass else: #ๆ— ๅ่ฎฎๅคด้ƒจ๏ผŒ้ป˜่ฎคๅŠ httpๅ่ฎฎ url_list[i] = "https://" + url_list[i] return url_list def get_eth_user_index(sequence=0,user_start=30,user_num=10,eth_start=0,e
identifier_body
spline.rs
use ordered_float::NotNan; use crate::hitobject::SliderSplineKind; use crate::math::{Math, Point}; /// Represents a spline, a set of points that represents the actual shape of a slider, generated /// from the control points. #[derive(Clone, Debug)] pub struct Spline { /// The actual points pub spline_points: Vec<P>, /// The cumulative lengths over the points. The indices correspond to the spline_points field pub cumulative_lengths: Vec<NotNan<f64>>, } impl Spline { /// Create a new spline from the control points of a slider. /// /// Pixel length gives the length in osu!pixels that the slider should be. If it's not given, /// the full slider will be rendered. pub fn from_control( kind: SliderSplineKind, control_points: &[Point<i32>], pixel_length: Option<f64>, ) -> Self { // no matter what, if there's 2 control points, it's linear let mut kind = kind; let mut control_points = control_points.to_vec(); if control_points.len() == 2 { kind = SliderSplineKind::Linear; } if control_points.len() == 3 && Math::is_line( control_points[0].to_float::<f64>().unwrap(), control_points[1].to_float::<f64>().unwrap(), control_points[2].to_float::<f64>().unwrap(), ) { kind = SliderSplineKind::Linear; control_points.remove(1); } let points = control_points .iter() .map(|p| Point::new(p.x as f64, p.y as f64)) .collect::<Vec<_>>(); let spline_points = match kind { SliderSplineKind::Linear => { let start = points[0]; let end = if let Some(pixel_length) = pixel_length { Math::point_on_line(points[0], points[1], pixel_length) } else { points[1] }; vec![start, end] } SliderSplineKind::Perfect => { let (p1, p2, p3) = (points[0], points[1], points[2]); let (center, radius) = Math::circumcircle(p1, p2, p3); // find the t-values of the start and end of the slider let t0 = (center.y - p1.y).atan2(p1.x - center.x); let mut mid = (center.y - p2.y).atan2(p2.x - center.x); let mut t1 = (center.y - p3.y).atan2(p3.x - center.x); // make sure t0 is less than t1 while mid < t0 { mid += std::f64::consts::TAU; } while t1 < t0 { t1 += std::f64::consts::TAU; } if mid > t1 { t1 -= std::f64::consts::TAU; } let diff = (t1 - t0).abs(); let pixel_length = pixel_length.unwrap_or(radius * diff); // circumference is 2 * pi * r, slider length over circumference is length/(2 * pi * r) let direction_unit = (t1 - t0) / (t1 - t0).abs(); let new_t1 = t0 + direction_unit * (pixel_length / radius); let mut t = t0; let mut c = Vec::new(); loop { if !((new_t1 >= t0 && t < new_t1) || (new_t1 < t0 && t > new_t1)) { break; } let rel = Point::new(t.cos() * radius, -t.sin() * radius); c.push(center + rel); t += (new_t1 - t0) / pixel_length; } c } SliderSplineKind::Bezier => { let mut output = Vec::new(); let mut last_index = 0; let mut i = 0; while i < points.len() { let multipart_segment = i < points.len() - 2 && (points[i] == points[i + 1]); if multipart_segment || i == points.len() - 1 { let sub = &points[last_index..i + 1]; if sub.len() == 2 { output.push(points[0]); output.push(points[1]); } else { create_singlebezier(&mut output, sub); } if multipart_segment { i += 1; } last_index = i; } i += 1; } output } _ => todo!(), }; let mut cumulative_lengths = Vec::with_capacity(spline_points.len()); let mut curr = 0.0; // using NotNan here because these need to be binary-searched over // and f64 isn't Ord cumulative_lengths.push(NotNan::new(curr).unwrap()); for points in spline_points.windows(2) { let dist = points[0].distance(points[1]); curr += dist; cumulative_lengths.push(NotNan::new(curr).unwrap()); } Spline { spline_points, cumulative_lengths, } } /// Truncate the length of the spline irreversibly pub fn truncate(&mut self, to_length: f64) { debug!("truncating to {} pixels", to_length); let mut limit_idx = None; for (i, cumul_length) in self.cumulative_lengths.iter().enumerate() { if cumul_length.into_inner() > to_length { limit_idx = Some(i); break; } } let limit_idx = match limit_idx { Some(v) if v > 0 => v, _ => return, }; let prev_idx = limit_idx - 1; let a = self.spline_points[prev_idx]; let b = self.spline_points[limit_idx]; let a_len = self.cumulative_lengths[prev_idx]; debug!("a={:?} (a_len={}) b={:?}", a, b, a_len); let remain = to_length - a_len.into_inner(); let mid = Math::point_on_line(a, b, remain); debug!("remain={:?} mid={:?}", remain, mid); self.spline_points[limit_idx] = mid; self.cumulative_lengths[limit_idx] = NotNan::new(to_length).unwrap(); debug!("spline_points[{}] = {:?}", limit_idx, mid); debug!("cumulative_lengths[{}] = {:?}", limit_idx, to_length); self.spline_points.truncate(limit_idx + 1); self.cumulative_lengths.truncate(limit_idx + 1); debug!("truncated to len {}", limit_idx + 1); } /// Return the pixel length of this spline pub fn pixel_length(&self) -> f64 { self.cumulative_lengths.last().unwrap().into_inner() } /// Return the endpoint of this spline pub fn end_point(&self) -> P { self.spline_points.last().cloned().unwrap() } /// Calculate the angle at the given length on the slider fn angle_at_length(&self, length: f64) -> P { let _length_notnan = NotNan::new(length).unwrap(); // match self.cumulative_lengths.binary_search(&length_notnan) { // Ok(_) => {} // Err(_) => {} // } todo!() } /// Calculate the point at which the slider ball would be after it has traveled a distance of /// `length` into the slider. pub fn point_at_length(&self, length: f64) -> P { let length_notnan = NotNan::new(length).unwrap(); match self.cumulative_lengths.binary_search(&length_notnan) { Ok(idx) => self.spline_points[idx], Err(idx) => { let n = self.spline_points.len(); if idx == 0 && self.spline_points.len() > 2 { return self.spline_points[0]; } else if idx == n { return self.spline_points[n - 1]; }
self.cumulative_lengths[idx].into_inner(), ); let proportion = (length - len1) / (len2 - len1); let (p1, p2) = (self.spline_points[idx - 1], self.spline_points[idx]); (p2 - p1) * P::new(proportion, proportion) + p1 } } } } type P = Point<f64>; fn subdivide(control_points: &[P], l: &mut [P], r: &mut [P], midpoints_buf: &mut [P]) { let count = control_points.len(); midpoints_buf.copy_from_slice(control_points); for i in 0..count { l[i] = midpoints_buf[0]; r[count - i - 1] = midpoints_buf[count - i - 1]; for j in 0..count - i - 1 { midpoints_buf[j] = (midpoints_buf[j] + midpoints_buf[j + 1]) / P::new(2.0, 2.0); } } } fn approximate( control_points: &[P], output: &mut Vec<P>, l_buf: &mut [P], r_buf: &mut [P], midpoints_buf: &mut [P], ) { let count = control_points.len(); subdivide(&control_points, l_buf, r_buf, midpoints_buf); l_buf[count..(count * 2) - 1].clone_from_slice(&r_buf[1..count]); output.push(control_points[0]); for i in 1..count - 1 { let index = 2 * i; let p = (l_buf[index] * P::new(2.0, 2.0) + l_buf[index - 1] + l_buf[index + 1]) * P::new(0.25, 0.25); output.push(p); } } fn is_flat_enough(control_points: &[P], tolerance_sq: f64) -> bool { for i in 1..control_points.len() - 1 { if (control_points[i - 1] - control_points[i] * P::new(2.0, 2.0) + control_points[i + 1]) .length_squared() > tolerance_sq { return false; } } true } fn create_singlebezier(output: &mut Vec<P>, control_points: &[P]) { let count = control_points.len(); const TOLERANCE: f64 = 0.25; const TOLERANCE_SQ: f64 = TOLERANCE * TOLERANCE; if count == 0 { return; } let mut to_flatten: Vec<Vec<P>> = Vec::new(); let mut free_buffers: Vec<Vec<P>> = Vec::new(); let last_control_point = control_points[count - 1]; to_flatten.push(control_points.to_vec()); let mut left_child = vec![P::new(0.0, 0.0); count * 2 - 1]; let mut l_buf = vec![P::new(0.0, 0.0); count * 2 - 1]; let mut r_buf = vec![P::new(0.0, 0.0); count]; let mut midpoints_buf = vec![P::new(0.0, 0.0); count]; while !to_flatten.is_empty() { let mut parent = to_flatten.pop().unwrap(); if is_flat_enough(&parent, TOLERANCE_SQ) { approximate( &parent, output, &mut l_buf[..count * 2 - 1], &mut r_buf[..count], &mut midpoints_buf[..count], ); free_buffers.push(parent); continue; } let mut right_child = free_buffers .pop() .unwrap_or_else(|| vec![P::new(0.0, 0.0); count]); subdivide( &parent, &mut left_child, &mut right_child, &mut midpoints_buf[..count], ); // We re-use the buffer of the parent for one of the children, so that we save one allocation per iteration. parent[..count].clone_from_slice(&left_child[..count]); to_flatten.push(right_child); to_flatten.push(parent); } output.push(last_control_point); }
let (len1, len2) = ( self.cumulative_lengths[idx - 1].into_inner(),
random_line_split
spline.rs
use ordered_float::NotNan; use crate::hitobject::SliderSplineKind; use crate::math::{Math, Point}; /// Represents a spline, a set of points that represents the actual shape of a slider, generated /// from the control points. #[derive(Clone, Debug)] pub struct Spline { /// The actual points pub spline_points: Vec<P>, /// The cumulative lengths over the points. The indices correspond to the spline_points field pub cumulative_lengths: Vec<NotNan<f64>>, } impl Spline { /// Create a new spline from the control points of a slider. /// /// Pixel length gives the length in osu!pixels that the slider should be. If it's not given, /// the full slider will be rendered. pub fn from_control( kind: SliderSplineKind, control_points: &[Point<i32>], pixel_length: Option<f64>, ) -> Self { // no matter what, if there's 2 control points, it's linear let mut kind = kind; let mut control_points = control_points.to_vec(); if control_points.len() == 2 { kind = SliderSplineKind::Linear; } if control_points.len() == 3 && Math::is_line( control_points[0].to_float::<f64>().unwrap(), control_points[1].to_float::<f64>().unwrap(), control_points[2].to_float::<f64>().unwrap(), ) { kind = SliderSplineKind::Linear; control_points.remove(1); } let points = control_points .iter() .map(|p| Point::new(p.x as f64, p.y as f64)) .collect::<Vec<_>>(); let spline_points = match kind { SliderSplineKind::Linear => { let start = points[0]; let end = if let Some(pixel_length) = pixel_length { Math::point_on_line(points[0], points[1], pixel_length) } else { points[1] }; vec![start, end] } SliderSplineKind::Perfect => { let (p1, p2, p3) = (points[0], points[1], points[2]); let (center, radius) = Math::circumcircle(p1, p2, p3); // find the t-values of the start and end of the slider let t0 = (center.y - p1.y).atan2(p1.x - center.x); let mut mid = (center.y - p2.y).atan2(p2.x - center.x); let mut t1 = (center.y - p3.y).atan2(p3.x - center.x); // make sure t0 is less than t1 while mid < t0 { mid += std::f64::consts::TAU; } while t1 < t0 { t1 += std::f64::consts::TAU; } if mid > t1 { t1 -= std::f64::consts::TAU; } let diff = (t1 - t0).abs(); let pixel_length = pixel_length.unwrap_or(radius * diff); // circumference is 2 * pi * r, slider length over circumference is length/(2 * pi * r) let direction_unit = (t1 - t0) / (t1 - t0).abs(); let new_t1 = t0 + direction_unit * (pixel_length / radius); let mut t = t0; let mut c = Vec::new(); loop { if !((new_t1 >= t0 && t < new_t1) || (new_t1 < t0 && t > new_t1)) { break; } let rel = Point::new(t.cos() * radius, -t.sin() * radius); c.push(center + rel); t += (new_t1 - t0) / pixel_length; } c } SliderSplineKind::Bezier => { let mut output = Vec::new(); let mut last_index = 0; let mut i = 0; while i < points.len() { let multipart_segment = i < points.len() - 2 && (points[i] == points[i + 1]); if multipart_segment || i == points.len() - 1 { let sub = &points[last_index..i + 1]; if sub.len() == 2 { output.push(points[0]); output.push(points[1]); } else { create_singlebezier(&mut output, sub); } if multipart_segment { i += 1; } last_index = i; } i += 1; } output } _ => todo!(), }; let mut cumulative_lengths = Vec::with_capacity(spline_points.len()); let mut curr = 0.0; // using NotNan here because these need to be binary-searched over // and f64 isn't Ord cumulative_lengths.push(NotNan::new(curr).unwrap()); for points in spline_points.windows(2) { let dist = points[0].distance(points[1]); curr += dist; cumulative_lengths.push(NotNan::new(curr).unwrap()); } Spline { spline_points, cumulative_lengths, } } /// Truncate the length of the spline irreversibly pub fn truncate(&mut self, to_length: f64) { debug!("truncating to {} pixels", to_length); let mut limit_idx = None; for (i, cumul_length) in self.cumulative_lengths.iter().enumerate() { if cumul_length.into_inner() > to_length { limit_idx = Some(i); break; } } let limit_idx = match limit_idx { Some(v) if v > 0 => v, _ => return, }; let prev_idx = limit_idx - 1; let a = self.spline_points[prev_idx]; let b = self.spline_points[limit_idx]; let a_len = self.cumulative_lengths[prev_idx]; debug!("a={:?} (a_len={}) b={:?}", a, b, a_len); let remain = to_length - a_len.into_inner(); let mid = Math::point_on_line(a, b, remain); debug!("remain={:?} mid={:?}", remain, mid); self.spline_points[limit_idx] = mid; self.cumulative_lengths[limit_idx] = NotNan::new(to_length).unwrap(); debug!("spline_points[{}] = {:?}", limit_idx, mid); debug!("cumulative_lengths[{}] = {:?}", limit_idx, to_length); self.spline_points.truncate(limit_idx + 1); self.cumulative_lengths.truncate(limit_idx + 1); debug!("truncated to len {}", limit_idx + 1); } /// Return the pixel length of this spline pub fn pixel_length(&self) -> f64 { self.cumulative_lengths.last().unwrap().into_inner() } /// Return the endpoint of this spline pub fn end_point(&self) -> P { self.spline_points.last().cloned().unwrap() } /// Calculate the angle at the given length on the slider fn angle_at_length(&self, length: f64) -> P { let _length_notnan = NotNan::new(length).unwrap(); // match self.cumulative_lengths.binary_search(&length_notnan) { // Ok(_) => {} // Err(_) => {} // } todo!() } /// Calculate the point at which the slider ball would be after it has traveled a distance of /// `length` into the slider. pub fn point_at_length(&self, length: f64) -> P { let length_notnan = NotNan::new(length).unwrap(); match self.cumulative_lengths.binary_search(&length_notnan) { Ok(idx) => self.spline_points[idx], Err(idx) => { let n = self.spline_points.len(); if idx == 0 && self.spline_points.len() > 2 { return self.spline_points[0]; } else if idx == n { return self.spline_points[n - 1]; } let (len1, len2) = ( self.cumulative_lengths[idx - 1].into_inner(), self.cumulative_lengths[idx].into_inner(), ); let proportion = (length - len1) / (len2 - len1); let (p1, p2) = (self.spline_points[idx - 1], self.spline_points[idx]); (p2 - p1) * P::new(proportion, proportion) + p1 } } } } type P = Point<f64>; fn
(control_points: &[P], l: &mut [P], r: &mut [P], midpoints_buf: &mut [P]) { let count = control_points.len(); midpoints_buf.copy_from_slice(control_points); for i in 0..count { l[i] = midpoints_buf[0]; r[count - i - 1] = midpoints_buf[count - i - 1]; for j in 0..count - i - 1 { midpoints_buf[j] = (midpoints_buf[j] + midpoints_buf[j + 1]) / P::new(2.0, 2.0); } } } fn approximate( control_points: &[P], output: &mut Vec<P>, l_buf: &mut [P], r_buf: &mut [P], midpoints_buf: &mut [P], ) { let count = control_points.len(); subdivide(&control_points, l_buf, r_buf, midpoints_buf); l_buf[count..(count * 2) - 1].clone_from_slice(&r_buf[1..count]); output.push(control_points[0]); for i in 1..count - 1 { let index = 2 * i; let p = (l_buf[index] * P::new(2.0, 2.0) + l_buf[index - 1] + l_buf[index + 1]) * P::new(0.25, 0.25); output.push(p); } } fn is_flat_enough(control_points: &[P], tolerance_sq: f64) -> bool { for i in 1..control_points.len() - 1 { if (control_points[i - 1] - control_points[i] * P::new(2.0, 2.0) + control_points[i + 1]) .length_squared() > tolerance_sq { return false; } } true } fn create_singlebezier(output: &mut Vec<P>, control_points: &[P]) { let count = control_points.len(); const TOLERANCE: f64 = 0.25; const TOLERANCE_SQ: f64 = TOLERANCE * TOLERANCE; if count == 0 { return; } let mut to_flatten: Vec<Vec<P>> = Vec::new(); let mut free_buffers: Vec<Vec<P>> = Vec::new(); let last_control_point = control_points[count - 1]; to_flatten.push(control_points.to_vec()); let mut left_child = vec![P::new(0.0, 0.0); count * 2 - 1]; let mut l_buf = vec![P::new(0.0, 0.0); count * 2 - 1]; let mut r_buf = vec![P::new(0.0, 0.0); count]; let mut midpoints_buf = vec![P::new(0.0, 0.0); count]; while !to_flatten.is_empty() { let mut parent = to_flatten.pop().unwrap(); if is_flat_enough(&parent, TOLERANCE_SQ) { approximate( &parent, output, &mut l_buf[..count * 2 - 1], &mut r_buf[..count], &mut midpoints_buf[..count], ); free_buffers.push(parent); continue; } let mut right_child = free_buffers .pop() .unwrap_or_else(|| vec![P::new(0.0, 0.0); count]); subdivide( &parent, &mut left_child, &mut right_child, &mut midpoints_buf[..count], ); // We re-use the buffer of the parent for one of the children, so that we save one allocation per iteration. parent[..count].clone_from_slice(&left_child[..count]); to_flatten.push(right_child); to_flatten.push(parent); } output.push(last_control_point); }
subdivide
identifier_name
spline.rs
use ordered_float::NotNan; use crate::hitobject::SliderSplineKind; use crate::math::{Math, Point}; /// Represents a spline, a set of points that represents the actual shape of a slider, generated /// from the control points. #[derive(Clone, Debug)] pub struct Spline { /// The actual points pub spline_points: Vec<P>, /// The cumulative lengths over the points. The indices correspond to the spline_points field pub cumulative_lengths: Vec<NotNan<f64>>, } impl Spline { /// Create a new spline from the control points of a slider. /// /// Pixel length gives the length in osu!pixels that the slider should be. If it's not given, /// the full slider will be rendered. pub fn from_control( kind: SliderSplineKind, control_points: &[Point<i32>], pixel_length: Option<f64>, ) -> Self { // no matter what, if there's 2 control points, it's linear let mut kind = kind; let mut control_points = control_points.to_vec(); if control_points.len() == 2 { kind = SliderSplineKind::Linear; } if control_points.len() == 3 && Math::is_line( control_points[0].to_float::<f64>().unwrap(), control_points[1].to_float::<f64>().unwrap(), control_points[2].to_float::<f64>().unwrap(), ) { kind = SliderSplineKind::Linear; control_points.remove(1); } let points = control_points .iter() .map(|p| Point::new(p.x as f64, p.y as f64)) .collect::<Vec<_>>(); let spline_points = match kind { SliderSplineKind::Linear => { let start = points[0]; let end = if let Some(pixel_length) = pixel_length { Math::point_on_line(points[0], points[1], pixel_length) } else { points[1] }; vec![start, end] } SliderSplineKind::Perfect => { let (p1, p2, p3) = (points[0], points[1], points[2]); let (center, radius) = Math::circumcircle(p1, p2, p3); // find the t-values of the start and end of the slider let t0 = (center.y - p1.y).atan2(p1.x - center.x); let mut mid = (center.y - p2.y).atan2(p2.x - center.x); let mut t1 = (center.y - p3.y).atan2(p3.x - center.x); // make sure t0 is less than t1 while mid < t0 { mid += std::f64::consts::TAU; } while t1 < t0 { t1 += std::f64::consts::TAU; } if mid > t1 { t1 -= std::f64::consts::TAU; } let diff = (t1 - t0).abs(); let pixel_length = pixel_length.unwrap_or(radius * diff); // circumference is 2 * pi * r, slider length over circumference is length/(2 * pi * r) let direction_unit = (t1 - t0) / (t1 - t0).abs(); let new_t1 = t0 + direction_unit * (pixel_length / radius); let mut t = t0; let mut c = Vec::new(); loop { if !((new_t1 >= t0 && t < new_t1) || (new_t1 < t0 && t > new_t1)) { break; } let rel = Point::new(t.cos() * radius, -t.sin() * radius); c.push(center + rel); t += (new_t1 - t0) / pixel_length; } c } SliderSplineKind::Bezier => { let mut output = Vec::new(); let mut last_index = 0; let mut i = 0; while i < points.len() { let multipart_segment = i < points.len() - 2 && (points[i] == points[i + 1]); if multipart_segment || i == points.len() - 1 { let sub = &points[last_index..i + 1]; if sub.len() == 2 { output.push(points[0]); output.push(points[1]); } else { create_singlebezier(&mut output, sub); } if multipart_segment { i += 1; } last_index = i; } i += 1; } output } _ => todo!(), }; let mut cumulative_lengths = Vec::with_capacity(spline_points.len()); let mut curr = 0.0; // using NotNan here because these need to be binary-searched over // and f64 isn't Ord cumulative_lengths.push(NotNan::new(curr).unwrap()); for points in spline_points.windows(2) { let dist = points[0].distance(points[1]); curr += dist; cumulative_lengths.push(NotNan::new(curr).unwrap()); } Spline { spline_points, cumulative_lengths, } } /// Truncate the length of the spline irreversibly pub fn truncate(&mut self, to_length: f64) { debug!("truncating to {} pixels", to_length); let mut limit_idx = None; for (i, cumul_length) in self.cumulative_lengths.iter().enumerate() { if cumul_length.into_inner() > to_length { limit_idx = Some(i); break; } } let limit_idx = match limit_idx { Some(v) if v > 0 => v, _ => return, }; let prev_idx = limit_idx - 1; let a = self.spline_points[prev_idx]; let b = self.spline_points[limit_idx]; let a_len = self.cumulative_lengths[prev_idx]; debug!("a={:?} (a_len={}) b={:?}", a, b, a_len); let remain = to_length - a_len.into_inner(); let mid = Math::point_on_line(a, b, remain); debug!("remain={:?} mid={:?}", remain, mid); self.spline_points[limit_idx] = mid; self.cumulative_lengths[limit_idx] = NotNan::new(to_length).unwrap(); debug!("spline_points[{}] = {:?}", limit_idx, mid); debug!("cumulative_lengths[{}] = {:?}", limit_idx, to_length); self.spline_points.truncate(limit_idx + 1); self.cumulative_lengths.truncate(limit_idx + 1); debug!("truncated to len {}", limit_idx + 1); } /// Return the pixel length of this spline pub fn pixel_length(&self) -> f64 { self.cumulative_lengths.last().unwrap().into_inner() } /// Return the endpoint of this spline pub fn end_point(&self) -> P
/// Calculate the angle at the given length on the slider fn angle_at_length(&self, length: f64) -> P { let _length_notnan = NotNan::new(length).unwrap(); // match self.cumulative_lengths.binary_search(&length_notnan) { // Ok(_) => {} // Err(_) => {} // } todo!() } /// Calculate the point at which the slider ball would be after it has traveled a distance of /// `length` into the slider. pub fn point_at_length(&self, length: f64) -> P { let length_notnan = NotNan::new(length).unwrap(); match self.cumulative_lengths.binary_search(&length_notnan) { Ok(idx) => self.spline_points[idx], Err(idx) => { let n = self.spline_points.len(); if idx == 0 && self.spline_points.len() > 2 { return self.spline_points[0]; } else if idx == n { return self.spline_points[n - 1]; } let (len1, len2) = ( self.cumulative_lengths[idx - 1].into_inner(), self.cumulative_lengths[idx].into_inner(), ); let proportion = (length - len1) / (len2 - len1); let (p1, p2) = (self.spline_points[idx - 1], self.spline_points[idx]); (p2 - p1) * P::new(proportion, proportion) + p1 } } } } type P = Point<f64>; fn subdivide(control_points: &[P], l: &mut [P], r: &mut [P], midpoints_buf: &mut [P]) { let count = control_points.len(); midpoints_buf.copy_from_slice(control_points); for i in 0..count { l[i] = midpoints_buf[0]; r[count - i - 1] = midpoints_buf[count - i - 1]; for j in 0..count - i - 1 { midpoints_buf[j] = (midpoints_buf[j] + midpoints_buf[j + 1]) / P::new(2.0, 2.0); } } } fn approximate( control_points: &[P], output: &mut Vec<P>, l_buf: &mut [P], r_buf: &mut [P], midpoints_buf: &mut [P], ) { let count = control_points.len(); subdivide(&control_points, l_buf, r_buf, midpoints_buf); l_buf[count..(count * 2) - 1].clone_from_slice(&r_buf[1..count]); output.push(control_points[0]); for i in 1..count - 1 { let index = 2 * i; let p = (l_buf[index] * P::new(2.0, 2.0) + l_buf[index - 1] + l_buf[index + 1]) * P::new(0.25, 0.25); output.push(p); } } fn is_flat_enough(control_points: &[P], tolerance_sq: f64) -> bool { for i in 1..control_points.len() - 1 { if (control_points[i - 1] - control_points[i] * P::new(2.0, 2.0) + control_points[i + 1]) .length_squared() > tolerance_sq { return false; } } true } fn create_singlebezier(output: &mut Vec<P>, control_points: &[P]) { let count = control_points.len(); const TOLERANCE: f64 = 0.25; const TOLERANCE_SQ: f64 = TOLERANCE * TOLERANCE; if count == 0 { return; } let mut to_flatten: Vec<Vec<P>> = Vec::new(); let mut free_buffers: Vec<Vec<P>> = Vec::new(); let last_control_point = control_points[count - 1]; to_flatten.push(control_points.to_vec()); let mut left_child = vec![P::new(0.0, 0.0); count * 2 - 1]; let mut l_buf = vec![P::new(0.0, 0.0); count * 2 - 1]; let mut r_buf = vec![P::new(0.0, 0.0); count]; let mut midpoints_buf = vec![P::new(0.0, 0.0); count]; while !to_flatten.is_empty() { let mut parent = to_flatten.pop().unwrap(); if is_flat_enough(&parent, TOLERANCE_SQ) { approximate( &parent, output, &mut l_buf[..count * 2 - 1], &mut r_buf[..count], &mut midpoints_buf[..count], ); free_buffers.push(parent); continue; } let mut right_child = free_buffers .pop() .unwrap_or_else(|| vec![P::new(0.0, 0.0); count]); subdivide( &parent, &mut left_child, &mut right_child, &mut midpoints_buf[..count], ); // We re-use the buffer of the parent for one of the children, so that we save one allocation per iteration. parent[..count].clone_from_slice(&left_child[..count]); to_flatten.push(right_child); to_flatten.push(parent); } output.push(last_control_point); }
{ self.spline_points.last().cloned().unwrap() }
identifier_body
spline.rs
use ordered_float::NotNan; use crate::hitobject::SliderSplineKind; use crate::math::{Math, Point}; /// Represents a spline, a set of points that represents the actual shape of a slider, generated /// from the control points. #[derive(Clone, Debug)] pub struct Spline { /// The actual points pub spline_points: Vec<P>, /// The cumulative lengths over the points. The indices correspond to the spline_points field pub cumulative_lengths: Vec<NotNan<f64>>, } impl Spline { /// Create a new spline from the control points of a slider. /// /// Pixel length gives the length in osu!pixels that the slider should be. If it's not given, /// the full slider will be rendered. pub fn from_control( kind: SliderSplineKind, control_points: &[Point<i32>], pixel_length: Option<f64>, ) -> Self { // no matter what, if there's 2 control points, it's linear let mut kind = kind; let mut control_points = control_points.to_vec(); if control_points.len() == 2 { kind = SliderSplineKind::Linear; } if control_points.len() == 3 && Math::is_line( control_points[0].to_float::<f64>().unwrap(), control_points[1].to_float::<f64>().unwrap(), control_points[2].to_float::<f64>().unwrap(), ) { kind = SliderSplineKind::Linear; control_points.remove(1); } let points = control_points .iter() .map(|p| Point::new(p.x as f64, p.y as f64)) .collect::<Vec<_>>(); let spline_points = match kind { SliderSplineKind::Linear => { let start = points[0]; let end = if let Some(pixel_length) = pixel_length { Math::point_on_line(points[0], points[1], pixel_length) } else { points[1] }; vec![start, end] } SliderSplineKind::Perfect =>
SliderSplineKind::Bezier => { let mut output = Vec::new(); let mut last_index = 0; let mut i = 0; while i < points.len() { let multipart_segment = i < points.len() - 2 && (points[i] == points[i + 1]); if multipart_segment || i == points.len() - 1 { let sub = &points[last_index..i + 1]; if sub.len() == 2 { output.push(points[0]); output.push(points[1]); } else { create_singlebezier(&mut output, sub); } if multipart_segment { i += 1; } last_index = i; } i += 1; } output } _ => todo!(), }; let mut cumulative_lengths = Vec::with_capacity(spline_points.len()); let mut curr = 0.0; // using NotNan here because these need to be binary-searched over // and f64 isn't Ord cumulative_lengths.push(NotNan::new(curr).unwrap()); for points in spline_points.windows(2) { let dist = points[0].distance(points[1]); curr += dist; cumulative_lengths.push(NotNan::new(curr).unwrap()); } Spline { spline_points, cumulative_lengths, } } /// Truncate the length of the spline irreversibly pub fn truncate(&mut self, to_length: f64) { debug!("truncating to {} pixels", to_length); let mut limit_idx = None; for (i, cumul_length) in self.cumulative_lengths.iter().enumerate() { if cumul_length.into_inner() > to_length { limit_idx = Some(i); break; } } let limit_idx = match limit_idx { Some(v) if v > 0 => v, _ => return, }; let prev_idx = limit_idx - 1; let a = self.spline_points[prev_idx]; let b = self.spline_points[limit_idx]; let a_len = self.cumulative_lengths[prev_idx]; debug!("a={:?} (a_len={}) b={:?}", a, b, a_len); let remain = to_length - a_len.into_inner(); let mid = Math::point_on_line(a, b, remain); debug!("remain={:?} mid={:?}", remain, mid); self.spline_points[limit_idx] = mid; self.cumulative_lengths[limit_idx] = NotNan::new(to_length).unwrap(); debug!("spline_points[{}] = {:?}", limit_idx, mid); debug!("cumulative_lengths[{}] = {:?}", limit_idx, to_length); self.spline_points.truncate(limit_idx + 1); self.cumulative_lengths.truncate(limit_idx + 1); debug!("truncated to len {}", limit_idx + 1); } /// Return the pixel length of this spline pub fn pixel_length(&self) -> f64 { self.cumulative_lengths.last().unwrap().into_inner() } /// Return the endpoint of this spline pub fn end_point(&self) -> P { self.spline_points.last().cloned().unwrap() } /// Calculate the angle at the given length on the slider fn angle_at_length(&self, length: f64) -> P { let _length_notnan = NotNan::new(length).unwrap(); // match self.cumulative_lengths.binary_search(&length_notnan) { // Ok(_) => {} // Err(_) => {} // } todo!() } /// Calculate the point at which the slider ball would be after it has traveled a distance of /// `length` into the slider. pub fn point_at_length(&self, length: f64) -> P { let length_notnan = NotNan::new(length).unwrap(); match self.cumulative_lengths.binary_search(&length_notnan) { Ok(idx) => self.spline_points[idx], Err(idx) => { let n = self.spline_points.len(); if idx == 0 && self.spline_points.len() > 2 { return self.spline_points[0]; } else if idx == n { return self.spline_points[n - 1]; } let (len1, len2) = ( self.cumulative_lengths[idx - 1].into_inner(), self.cumulative_lengths[idx].into_inner(), ); let proportion = (length - len1) / (len2 - len1); let (p1, p2) = (self.spline_points[idx - 1], self.spline_points[idx]); (p2 - p1) * P::new(proportion, proportion) + p1 } } } } type P = Point<f64>; fn subdivide(control_points: &[P], l: &mut [P], r: &mut [P], midpoints_buf: &mut [P]) { let count = control_points.len(); midpoints_buf.copy_from_slice(control_points); for i in 0..count { l[i] = midpoints_buf[0]; r[count - i - 1] = midpoints_buf[count - i - 1]; for j in 0..count - i - 1 { midpoints_buf[j] = (midpoints_buf[j] + midpoints_buf[j + 1]) / P::new(2.0, 2.0); } } } fn approximate( control_points: &[P], output: &mut Vec<P>, l_buf: &mut [P], r_buf: &mut [P], midpoints_buf: &mut [P], ) { let count = control_points.len(); subdivide(&control_points, l_buf, r_buf, midpoints_buf); l_buf[count..(count * 2) - 1].clone_from_slice(&r_buf[1..count]); output.push(control_points[0]); for i in 1..count - 1 { let index = 2 * i; let p = (l_buf[index] * P::new(2.0, 2.0) + l_buf[index - 1] + l_buf[index + 1]) * P::new(0.25, 0.25); output.push(p); } } fn is_flat_enough(control_points: &[P], tolerance_sq: f64) -> bool { for i in 1..control_points.len() - 1 { if (control_points[i - 1] - control_points[i] * P::new(2.0, 2.0) + control_points[i + 1]) .length_squared() > tolerance_sq { return false; } } true } fn create_singlebezier(output: &mut Vec<P>, control_points: &[P]) { let count = control_points.len(); const TOLERANCE: f64 = 0.25; const TOLERANCE_SQ: f64 = TOLERANCE * TOLERANCE; if count == 0 { return; } let mut to_flatten: Vec<Vec<P>> = Vec::new(); let mut free_buffers: Vec<Vec<P>> = Vec::new(); let last_control_point = control_points[count - 1]; to_flatten.push(control_points.to_vec()); let mut left_child = vec![P::new(0.0, 0.0); count * 2 - 1]; let mut l_buf = vec![P::new(0.0, 0.0); count * 2 - 1]; let mut r_buf = vec![P::new(0.0, 0.0); count]; let mut midpoints_buf = vec![P::new(0.0, 0.0); count]; while !to_flatten.is_empty() { let mut parent = to_flatten.pop().unwrap(); if is_flat_enough(&parent, TOLERANCE_SQ) { approximate( &parent, output, &mut l_buf[..count * 2 - 1], &mut r_buf[..count], &mut midpoints_buf[..count], ); free_buffers.push(parent); continue; } let mut right_child = free_buffers .pop() .unwrap_or_else(|| vec![P::new(0.0, 0.0); count]); subdivide( &parent, &mut left_child, &mut right_child, &mut midpoints_buf[..count], ); // We re-use the buffer of the parent for one of the children, so that we save one allocation per iteration. parent[..count].clone_from_slice(&left_child[..count]); to_flatten.push(right_child); to_flatten.push(parent); } output.push(last_control_point); }
{ let (p1, p2, p3) = (points[0], points[1], points[2]); let (center, radius) = Math::circumcircle(p1, p2, p3); // find the t-values of the start and end of the slider let t0 = (center.y - p1.y).atan2(p1.x - center.x); let mut mid = (center.y - p2.y).atan2(p2.x - center.x); let mut t1 = (center.y - p3.y).atan2(p3.x - center.x); // make sure t0 is less than t1 while mid < t0 { mid += std::f64::consts::TAU; } while t1 < t0 { t1 += std::f64::consts::TAU; } if mid > t1 { t1 -= std::f64::consts::TAU; } let diff = (t1 - t0).abs(); let pixel_length = pixel_length.unwrap_or(radius * diff); // circumference is 2 * pi * r, slider length over circumference is length/(2 * pi * r) let direction_unit = (t1 - t0) / (t1 - t0).abs(); let new_t1 = t0 + direction_unit * (pixel_length / radius); let mut t = t0; let mut c = Vec::new(); loop { if !((new_t1 >= t0 && t < new_t1) || (new_t1 < t0 && t > new_t1)) { break; } let rel = Point::new(t.cos() * radius, -t.sin() * radius); c.push(center + rel); t += (new_t1 - t0) / pixel_length; } c }
conditional_block
pattern.rs
use fmt::Formatter; use log::*; use std::borrow::Cow; use std::fmt::{self, Display}; use std::{convert::TryFrom, str::FromStr}; use thiserror::Error; use crate::*; /// A pattern that can function as either a [`Searcher`] or [`Applier`]. /// /// A [`Pattern`] is essentially a for-all quantified expression with /// [`Var`]s as the variables (in the logical sense). /// /// When creating a [`Rewrite`], the most common thing to use as either /// the left hand side (the [`Searcher`]) or the right hand side /// (the [`Applier`]) is a [`Pattern`]. /// /// As a [`Searcher`], a [`Pattern`] does the intuitive /// thing. /// Here is a somewhat verbose formal-ish statement: /// Searching for a pattern in an egraph yields substitutions /// ([`Subst`]s) _s_ such that, for any _s'_โ€”where instead of /// mapping a variables to an eclass as _s_ does, _s'_ maps /// a variable to an arbitrary expression represented by that /// eclassโ€”_p[s']_ (the pattern under substitution _s'_) is also /// represented by the egraph. /// /// As an [`Applier`], a [`Pattern`] performs the given substitution /// and adds the result to the [`EGraph`]. /// /// Importantly, [`Pattern`] implements [`FromStr`] if the /// [`Language`] does. /// This is probably how you'll create most [`Pattern`]s. /// /// ``` /// use egg::*; /// define_language! { /// enum Math { /// Num(i32), /// "+" = Add([Id; 2]), /// } /// } /// /// let mut egraph = EGraph::<Math, ()>::default(); /// let a11 = egraph.add_expr(&"(+ 1 1)".parse().unwrap()); /// let a22 = egraph.add_expr(&"(+ 2 2)".parse().unwrap()); /// /// // use Var syntax (leading question mark) to get a /// // variable in the Pattern /// let same_add: Pattern<Math> = "(+ ?a ?a)".parse().unwrap(); /// /// // Rebuild before searching /// egraph.rebuild(); /// /// // This is the search method from the Searcher trait /// let matches = same_add.search(&egraph); /// let matched_eclasses: Vec<Id> = matches.iter().map(|m| m.eclass).collect(); /// assert_eq!(matched_eclasses, vec![a11, a22]); /// ``` /// /// [`FromStr`]: std::str::FromStr #[derive(Debug, PartialEq, Clone)] pub struct Pattern<L> { /// The actual pattern as a [`RecExpr`] pub ast: PatternAst<L>, program: machine::Program<L>, } /// A [`RecExpr`] that represents a /// [`Pattern`]. pub type PatternAst<L> = RecExpr<ENodeOrVar<L>>; impl<L: Language> PatternAst<L> { /// Returns a new `PatternAst` with the variables renames canonically pub fn alpha_rename(&self) -> Self { let mut vars = HashMap::<Var, Var>::default(); let mut new = PatternAst::default(); fn mkvar(i: usize) -> Var { let vs = &["?x", "?y", "?z", "?w"]; match vs.get(i) { Some(v) => v.parse().unwrap(), None => format!("?v{}", i - vs.len()).parse().unwrap(), } } for n in self.as_ref() { new.add(match n { ENodeOrVar::ENode(_) => n.clone(), ENodeOrVar::Var(v) => { let i = vars.len(); ENodeOrVar::Var(*vars.entry(*v).or_insert_with(|| mkvar(i))) } }); } new } } impl<L: Language> Pattern<L> { /// Creates a new pattern from the given pattern ast. pub fn new(ast: PatternAst<L>) -> Self { let ast = ast.compact(); let program = machine::Program::compile_from_pat(&ast); Pattern { ast, program } } /// Returns a list of the [`Var`]s in this pattern. pub fn vars(&self) -> Vec<Var> { let mut vars = vec![]; for n in self.ast.as_ref() { if let ENodeOrVar::Var(v) = n { if !vars.contains(v) { vars.push(*v) } } } vars } } impl<L: Language + Display> Pattern<L> { /// Pretty print this pattern as a sexp with the given width pub fn pretty(&self, width: usize) -> String { self.ast.pretty(width) } } /// The language of [`Pattern`]s. /// #[derive(Debug, Hash, PartialEq, Eq, Clone, PartialOrd, Ord)] pub enum ENodeOrVar<L> { /// An enode from the underlying [`Language`] ENode(L), /// A pattern variable Var(Var), } impl<L: Language> Language for ENodeOrVar<L> { fn matches(&self, _other: &Self) -> bool { panic!("Should never call this") } fn children(&self) -> &[Id] { match self { ENodeOrVar::ENode(n) => n.children(), ENodeOrVar::Var(_) => &[], } } fn children_mut(&mut self) -> &mut [Id] { match self { ENodeOrVar::ENode(n) => n.children_mut(), ENodeOrVar::Var(_) => &mut [], } } } impl<L: Language + Display> Display for ENodeOrVar<L> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { Self::ENode(node) => Display::fmt(node, f), Self::Var(var) => Display::fmt(var, f), } } } #[derive(Debug, Error)] pub enum ENodeOrVarParseError<E> { #[error(transparent)] BadVar(<Var as FromStr>::Err), #[error("tried to parse pattern variable {0:?} as an operator")] UnexpectedVar(String), #[error(transparent)] BadOp(E), } impl<L: FromOp> FromOp for ENodeOrVar<L> { type Error = ENodeOrVarParseError<L::Error>; fn from_op(op: &str, children: Vec<Id>) -> Result<Self, Self::Error> { use ENodeOrVarParseError::*; if op.starts_with('?') && op.len() > 1 { if children.is_empty() { op.parse().map(Self::Var).map_err(BadVar) } else { Err(UnexpectedVar(op.to_owned())) } } else { L::from_op(op, children).map(Self::ENode).map_err(BadOp) } } } impl<L: FromOp> std::str::FromStr for Pattern<L> { type Err = RecExprParseError<ENodeOrVarParseError<L::Error>>; fn from_str(s: &str) -> Result<Self, Self::Err> { PatternAst::from_str(s).map(Self::from) } } impl<'a, L: Language> From<&'a [L]> for Pattern<L> { fn from(expr: &'a [L]) -> Self { let nodes: Vec<_> = expr.iter().cloned().map(ENodeOrVar::ENode).collect(); let ast = RecExpr::from(nodes); Self::new(ast) } } impl<L: Language> From<PatternAst<L>> for Pattern<L> { fn from(ast: PatternAst<L>) -> Self { Self::new(ast) } } impl<L: Language> TryFrom<Pattern<L>> for RecExpr<L> { type Error = Var; fn try_from(pat: Pattern<L>) -> Result<Self, Self::Error> { let nodes = pat.ast.as_ref().iter().cloned(); let ns: Result<Vec<_>, _> = nodes .map(|n| match n { ENodeOrVar::ENode(n) => Ok(n), ENodeOrVar::Var(v) => Err(v), }) .collect(); ns.map(RecExpr::from) } } impl<L: Language + Display> Display for Pattern<L> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { Display::fmt(&self.ast, f) } } /// The result of searching a [`Searcher`] over one eclass. /// /// Note that one [`SearchMatches`] can contain many found /// substititions. So taking the length of a list of [`SearchMatches`] /// tells you how many eclasses something was matched in, _not_ how /// many matches were found total. /// #[derive(Debug)] pub struct SearchMatches<'a, L: Language> { /// The eclass id that these matches were found in. pub eclass: Id, /// The substitutions for each match. pub substs: Vec<Subst>, /// Optionally, an ast for the matches used in proof production. pub ast: Option<Cow<'a, PatternAst<L>>>, } impl<L: Language, A: Analysis<L>> Searcher<L, A> for Pattern<L> { fn get_pattern_ast(&self) -> Option<&PatternAst<L>> { Some(&self.ast) } fn search(&self, egraph: &EGraph<L, A>) -> Vec<SearchMatches<L>> { match self.ast.as_ref().last().unwrap() { ENodeOrVar::ENode(e) => { #[allow(clippy::mem_discriminant_non_enum)] let key = std::mem::discriminant(e); match egraph.classes_by_op.get(&key) { None => vec![], Some(ids) => ids .iter() .filter_map(|&id| self.search_eclass(egraph, id)) .collect(), } } ENodeOrVar::Var(_) => egraph .classes() .filter_map(|e| self.search_eclass(egraph, e.id)) .collect(), } } fn search_eclass(&self, egraph: &EGraph<L, A>, eclass: Id) -> Option<SearchMatches<L>> { let substs = self.program.run(egraph, eclass); if substs.is_empty() { None } else { let ast = Some(Cow::Borrowed(&self.ast)); Some(SearchMatches { eclass, substs, ast, }) } } fn vars
lf) -> Vec<Var> { Pattern::vars(self) } } impl<L, A> Applier<L, A> for Pattern<L> where L: Language, A: Analysis<L>, { fn get_pattern_ast(&self) -> Option<&PatternAst<L>> { Some(&self.ast) } fn apply_matches( &self, egraph: &mut EGraph<L, A>, matches: &[SearchMatches<L>], rule_name: Symbol, ) -> Vec<Id> { let mut added = vec![]; let ast = self.ast.as_ref(); let mut id_buf = vec![0.into(); ast.len()]; for mat in matches { let sast = mat.ast.as_ref().map(|cow| cow.as_ref()); for subst in &mat.substs { let did_something; let id; if egraph.are_explanations_enabled() { let (id_temp, did_something_temp) = egraph.union_instantiations(sast.unwrap(), &self.ast, subst, rule_name); did_something = did_something_temp; id = id_temp; } else { id = apply_pat(&mut id_buf, ast, egraph, subst); did_something = egraph.union(id, mat.eclass); } if did_something { added.push(id) } } } added } fn apply_one( &self, egraph: &mut EGraph<L, A>, eclass: Id, subst: &Subst, searcher_ast: Option<&PatternAst<L>>, rule_name: Symbol, ) -> Vec<Id> { let ast = self.ast.as_ref(); let mut id_buf = vec![0.into(); ast.len()]; let id = apply_pat(&mut id_buf, ast, egraph, subst); if let Some(ast) = searcher_ast { let (from, did_something) = egraph.union_instantiations(ast, &self.ast, subst, rule_name); if did_something { vec![from] } else { vec![] } } else if egraph.union(eclass, id) { vec![eclass] } else { vec![] } } fn vars(&self) -> Vec<Var> { Pattern::vars(self) } } pub(crate) fn apply_pat<L: Language, A: Analysis<L>>( ids: &mut [Id], pat: &[ENodeOrVar<L>], egraph: &mut EGraph<L, A>, subst: &Subst, ) -> Id { debug_assert_eq!(pat.len(), ids.len()); trace!("apply_rec {:2?} {:?}", pat, subst); for (i, pat_node) in pat.iter().enumerate() { let id = match pat_node { ENodeOrVar::Var(w) => subst[*w], ENodeOrVar::ENode(e) => { let n = e.clone().map_children(|child| ids[usize::from(child)]); trace!("adding: {:?}", n); egraph.add(n) } }; ids[i] = id; } *ids.last().unwrap() } #[cfg(test)] mod tests { use crate::{SymbolLang as S, *}; type EGraph = crate::EGraph<S, ()>; #[test] fn simple_match() { crate::init_logger(); let mut egraph = EGraph::default(); let (plus_id, _) = egraph.union_instantiations( &"(+ x y)".parse().unwrap(), &"(+ z w)".parse().unwrap(), &Default::default(), "union_plus".to_string(), ); egraph.rebuild(); let commute_plus = rewrite!( "commute_plus"; "(+ ?a ?b)" => "(+ ?b ?a)" ); let matches = commute_plus.search(&egraph); let n_matches: usize = matches.iter().map(|m| m.substs.len()).sum(); assert_eq!(n_matches, 2, "matches is wrong: {:#?}", matches); let applications = commute_plus.apply(&mut egraph, &matches); egraph.rebuild(); assert_eq!(applications.len(), 2); let actual_substs: Vec<Subst> = matches.iter().flat_map(|m| m.substs.clone()).collect(); println!("Here are the substs!"); for m in &actual_substs { println!("substs: {:?}", m); } egraph.dot().to_dot("target/simple-match.dot").unwrap(); use crate::extract::{AstSize, Extractor}; let ext = Extractor::new(&egraph, AstSize); let (_, best) = ext.find_best(plus_id); eprintln!("Best: {:#?}", best); } #[test] fn nonlinear_patterns() { crate::init_logger(); let mut egraph = EGraph::default(); egraph.add_expr(&"(f a a)".parse().unwrap()); egraph.add_expr(&"(f a (g a))))".parse().unwrap()); egraph.add_expr(&"(f a (g b))))".parse().unwrap()); egraph.add_expr(&"(h (foo a b) 0 1)".parse().unwrap()); egraph.add_expr(&"(h (foo a b) 1 0)".parse().unwrap()); egraph.add_expr(&"(h (foo a b) 0 0)".parse().unwrap()); egraph.rebuild(); let n_matches = |s: &str| s.parse::<Pattern<S>>().unwrap().n_matches(&egraph); assert_eq!(n_matches("(f ?x ?y)"), 3); assert_eq!(n_matches("(f ?x ?x)"), 1); assert_eq!(n_matches("(f ?x (g ?y))))"), 2); assert_eq!(n_matches("(f ?x (g ?x))))"), 1); assert_eq!(n_matches("(h ?x 0 0)"), 1); } }
(&se
identifier_name
pattern.rs
use fmt::Formatter; use log::*; use std::borrow::Cow; use std::fmt::{self, Display}; use std::{convert::TryFrom, str::FromStr}; use thiserror::Error; use crate::*; /// A pattern that can function as either a [`Searcher`] or [`Applier`]. /// /// A [`Pattern`] is essentially a for-all quantified expression with /// [`Var`]s as the variables (in the logical sense). /// /// When creating a [`Rewrite`], the most common thing to use as either /// the left hand side (the [`Searcher`]) or the right hand side /// (the [`Applier`]) is a [`Pattern`]. /// /// As a [`Searcher`], a [`Pattern`] does the intuitive /// thing. /// Here is a somewhat verbose formal-ish statement: /// Searching for a pattern in an egraph yields substitutions /// ([`Subst`]s) _s_ such that, for any _s'_โ€”where instead of /// mapping a variables to an eclass as _s_ does, _s'_ maps /// a variable to an arbitrary expression represented by that /// eclassโ€”_p[s']_ (the pattern under substitution _s'_) is also /// represented by the egraph. /// /// As an [`Applier`], a [`Pattern`] performs the given substitution /// and adds the result to the [`EGraph`]. /// /// Importantly, [`Pattern`] implements [`FromStr`] if the /// [`Language`] does. /// This is probably how you'll create most [`Pattern`]s. /// /// ``` /// use egg::*; /// define_language! { /// enum Math { /// Num(i32), /// "+" = Add([Id; 2]), /// } /// } /// /// let mut egraph = EGraph::<Math, ()>::default(); /// let a11 = egraph.add_expr(&"(+ 1 1)".parse().unwrap()); /// let a22 = egraph.add_expr(&"(+ 2 2)".parse().unwrap()); /// /// // use Var syntax (leading question mark) to get a /// // variable in the Pattern /// let same_add: Pattern<Math> = "(+ ?a ?a)".parse().unwrap(); /// /// // Rebuild before searching /// egraph.rebuild(); /// /// // This is the search method from the Searcher trait /// let matches = same_add.search(&egraph); /// let matched_eclasses: Vec<Id> = matches.iter().map(|m| m.eclass).collect(); /// assert_eq!(matched_eclasses, vec![a11, a22]); /// ``` /// /// [`FromStr`]: std::str::FromStr #[derive(Debug, PartialEq, Clone)] pub struct Pattern<L> { /// The actual pattern as a [`RecExpr`] pub ast: PatternAst<L>, program: machine::Program<L>, } /// A [`RecExpr`] that represents a /// [`Pattern`]. pub type PatternAst<L> = RecExpr<ENodeOrVar<L>>; impl<L: Language> PatternAst<L> { /// Returns a new `PatternAst` with the variables renames canonically pub fn alpha_rename(&self) -> Self { let mut vars = HashMap::<Var, Var>::default(); let mut new = PatternAst::default(); fn mkvar(i: usize) -> Var { let vs = &["?x", "?y", "?z", "?w"]; match vs.get(i) { Some(v) => v.parse().unwrap(), None => format!("?v{}", i - vs.len()).parse().unwrap(), } } for n in self.as_ref() { new.add(match n { ENodeOrVar::ENode(_) => n.clone(), ENodeOrVar::Var(v) => { let i = vars.len(); ENodeOrVar::Var(*vars.entry(*v).or_insert_with(|| mkvar(i))) } }); } new } } impl<L: Language> Pattern<L> { /// Creates a new pattern from the given pattern ast. pub fn new(ast: PatternAst<L>) -> Self { let ast = ast.compact(); let program = machine::Program::compile_from_pat(&ast); Pattern { ast, program } } /// Returns a list of the [`Var`]s in this pattern. pub fn vars(&self) -> Vec<Var> { let mut vars = vec![]; for n in self.ast.as_ref() { if let ENodeOrVar::Var(v) = n { if !vars.contains(v) { vars.push(*v) } } } vars } } impl<L: Language + Display> Pattern<L> { /// Pretty print this pattern as a sexp with the given width pub fn pretty(&self, width: usize) -> String { self.ast.pretty(width) } } /// The language of [`Pattern`]s. /// #[derive(Debug, Hash, PartialEq, Eq, Clone, PartialOrd, Ord)] pub enum ENodeOrVar<L> { /// An enode from the underlying [`Language`] ENode(L), /// A pattern variable Var(Var), } impl<L: Language> Language for ENodeOrVar<L> { fn matches(&self, _other: &Self) -> bool { panic!("Should never call this") } fn children(&self) -> &[Id] { match self { ENodeOrVar::ENode(n) => n.children(), ENodeOrVar::Var(_) => &[], } } fn children_mut(&mut self) -> &mut [Id] { match self { ENodeOrVar::ENode(n) => n.children_mut(), ENodeOrVar::Var(_) => &mut [], } } } impl<L: Language + Display> Display for ENodeOrVar<L> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { Self::ENode(node) => Display::fmt(node, f), Self::Var(var) => Display::fmt(var, f), } } } #[derive(Debug, Error)] pub enum ENodeOrVarParseError<E> { #[error(transparent)] BadVar(<Var as FromStr>::Err), #[error("tried to parse pattern variable {0:?} as an operator")] UnexpectedVar(String), #[error(transparent)] BadOp(E), } impl<L: FromOp> FromOp for ENodeOrVar<L> { type Error = ENodeOrVarParseError<L::Error>; fn from_op(op: &str, children: Vec<Id>) -> Result<Self, Self::Error> { use ENodeOrVarParseError::*; if op.starts_with('?') && op.len() > 1 { if children.is_empty() { op.parse().map(Self::Var).map_err(BadVar) } else { Err(UnexpectedVar(op.to_owned())) } } else { L::from_op(op, children).map(Self::ENode).map_err(BadOp) } } } impl<L: FromOp> std::str::FromStr for Pattern<L> { type Err = RecExprParseError<ENodeOrVarParseError<L::Error>>; fn from_str(s: &str) -> Result<Self, Self::Err> { PatternAst::from_str(s).map(Self::from) } } impl<'a, L: Language> From<&'a [L]> for Pattern<L> { fn from(expr: &'a [L]) -> Self { let nodes: Vec<_> = expr.iter().cloned().map(ENodeOrVar::ENode).collect(); let ast = RecExpr::from(nodes); Self::new(ast) } } impl<L: Language> From<PatternAst<L>> for Pattern<L> { fn from(ast: PatternAst<L>) -> Self { Self::new(ast) } } impl<L: Language> TryFrom<Pattern<L>> for RecExpr<L> { type Error = Var; fn try_from(pat: Pattern<L>) -> Result<Self, Self::Error> { let nodes = pat.ast.as_ref().iter().cloned(); let ns: Result<Vec<_>, _> = nodes .map(|n| match n { ENodeOrVar::ENode(n) => Ok(n), ENodeOrVar::Var(v) => Err(v), }) .collect(); ns.map(RecExpr::from) } } impl<L: Language + Display> Display for Pattern<L> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { Display::fmt(&self.ast, f) } } /// The result of searching a [`Searcher`] over one eclass. /// /// Note that one [`SearchMatches`] can contain many found /// substititions. So taking the length of a list of [`SearchMatches`] /// tells you how many eclasses something was matched in, _not_ how /// many matches were found total. /// #[derive(Debug)] pub struct SearchMatches<'a, L: Language> { /// The eclass id that these matches were found in. pub eclass: Id, /// The substitutions for each match. pub substs: Vec<Subst>, /// Optionally, an ast for the matches used in proof production. pub ast: Option<Cow<'a, PatternAst<L>>>, } impl<L: Language, A: Analysis<L>> Searcher<L, A> for Pattern<L> { fn get_pattern_ast(&self) -> Option<&PatternAst<L>> { Some(&self.ast) } fn search(&self, egraph: &EGraph<L, A>) -> Vec<SearchMatches<L>> { match self.ast.as_ref().last().unwrap() { ENodeOrVar::ENode(e) => { #[allow(clippy::mem_discriminant_non_enum)] let key = std::mem::discriminant(e); match egraph.classes_by_op.get(&key) { None => vec![], Some(ids) => ids .iter() .filter_map(|&id| self.search_eclass(egraph, id)) .collect(), } } ENodeOrVar::Var(_) => egraph .classes() .filter_map(|e| self.search_eclass(egraph, e.id)) .collect(), } } fn search_eclass(&self, egraph: &EGraph<L, A>, eclass: Id) -> Option<SearchMatches<L>> { let substs = self.program.run(egraph, eclass); if substs.is_empty() { None } else { let ast = Some(Cow::Borrowed(&self.ast)); Some(SearchMatches { eclass, substs, ast, }) } } fn vars(&self) -> Vec<Var> { Pattern::vars(self) } } impl<L, A> Applier<L, A> for Pattern<L> where L: Language, A: Analysis<L>, { fn get_pattern_ast(&self) -> Option<&PatternAst<L>> { Some(&self.ast) } fn apply_matches( &self, egraph: &mut EGraph<L, A>, matches: &[SearchMatches<L>], rule_name: Symbol, ) -> Vec<Id> { let mut added = vec![]; let ast = self.ast.as_ref(); let mut id_buf = vec![0.into(); ast.len()]; for mat in matches { let sast = mat.ast.as_ref().map(|cow| cow.as_ref()); for subst in &mat.substs { let did_something; let id; if egraph.are_explanations_enabled() { let (id_temp, did_something_temp) = egraph.union_instantiations(sast.unwrap(), &self.ast, subst, rule_name); did_something = did_something_temp; id = id_temp; } else { id = apply_pat(&mut id_buf, ast, egraph, subst); did_something = egraph.union(id, mat.eclass); } if did_something { added.push(id) } } } added } fn apply_one( &self, egraph: &mut EGraph<L, A>, eclass: Id, subst: &Subst, searcher_ast: Option<&PatternAst<L>>, rule_name: Symbol, ) -> Vec<Id> { let ast = self.ast.as_ref(); let mut id_buf = vec![0.into(); ast.len()]; let id = apply_pat(&mut id_buf, ast, egraph, subst); if let Some(ast) = searcher_ast { let (from, did_something) = egraph.union_instantiations(ast, &self.ast, subst, rule_name); if did_something { vec![from] } else { vec![] } } else if egraph.union(eclass, id) { vec![eclass] } else { vec![] } } fn vars(&self) -> Vec<Var> { Pattern::vars(self) } } pub(crate) fn apply_pat<L: Language, A: Analysis<L>>( ids: &mut [Id], pat: &[ENodeOrVar<L>], egraph: &mut EGraph<L, A>, subst: &Subst, ) -> Id { debug_assert_eq!(pat.len(), ids.len()); trace!("apply_rec {:2?} {:?}", pat, subst); for (i, pat_node) in pat.iter().enumerate() { let id = match pat_node { ENodeOrVar::Var(w) => subst[*w], ENodeOrVar::ENode(e) => { let n = e.clone().map_children(|child| ids[usize::from(child)]); trace!("adding: {:?}", n); egraph.add(n) }
} *ids.last().unwrap() } #[cfg(test)] mod tests { use crate::{SymbolLang as S, *}; type EGraph = crate::EGraph<S, ()>; #[test] fn simple_match() { crate::init_logger(); let mut egraph = EGraph::default(); let (plus_id, _) = egraph.union_instantiations( &"(+ x y)".parse().unwrap(), &"(+ z w)".parse().unwrap(), &Default::default(), "union_plus".to_string(), ); egraph.rebuild(); let commute_plus = rewrite!( "commute_plus"; "(+ ?a ?b)" => "(+ ?b ?a)" ); let matches = commute_plus.search(&egraph); let n_matches: usize = matches.iter().map(|m| m.substs.len()).sum(); assert_eq!(n_matches, 2, "matches is wrong: {:#?}", matches); let applications = commute_plus.apply(&mut egraph, &matches); egraph.rebuild(); assert_eq!(applications.len(), 2); let actual_substs: Vec<Subst> = matches.iter().flat_map(|m| m.substs.clone()).collect(); println!("Here are the substs!"); for m in &actual_substs { println!("substs: {:?}", m); } egraph.dot().to_dot("target/simple-match.dot").unwrap(); use crate::extract::{AstSize, Extractor}; let ext = Extractor::new(&egraph, AstSize); let (_, best) = ext.find_best(plus_id); eprintln!("Best: {:#?}", best); } #[test] fn nonlinear_patterns() { crate::init_logger(); let mut egraph = EGraph::default(); egraph.add_expr(&"(f a a)".parse().unwrap()); egraph.add_expr(&"(f a (g a))))".parse().unwrap()); egraph.add_expr(&"(f a (g b))))".parse().unwrap()); egraph.add_expr(&"(h (foo a b) 0 1)".parse().unwrap()); egraph.add_expr(&"(h (foo a b) 1 0)".parse().unwrap()); egraph.add_expr(&"(h (foo a b) 0 0)".parse().unwrap()); egraph.rebuild(); let n_matches = |s: &str| s.parse::<Pattern<S>>().unwrap().n_matches(&egraph); assert_eq!(n_matches("(f ?x ?y)"), 3); assert_eq!(n_matches("(f ?x ?x)"), 1); assert_eq!(n_matches("(f ?x (g ?y))))"), 2); assert_eq!(n_matches("(f ?x (g ?x))))"), 1); assert_eq!(n_matches("(h ?x 0 0)"), 1); } }
}; ids[i] = id;
random_line_split
pattern.rs
use fmt::Formatter; use log::*; use std::borrow::Cow; use std::fmt::{self, Display}; use std::{convert::TryFrom, str::FromStr}; use thiserror::Error; use crate::*; /// A pattern that can function as either a [`Searcher`] or [`Applier`]. /// /// A [`Pattern`] is essentially a for-all quantified expression with /// [`Var`]s as the variables (in the logical sense). /// /// When creating a [`Rewrite`], the most common thing to use as either /// the left hand side (the [`Searcher`]) or the right hand side /// (the [`Applier`]) is a [`Pattern`]. /// /// As a [`Searcher`], a [`Pattern`] does the intuitive /// thing. /// Here is a somewhat verbose formal-ish statement: /// Searching for a pattern in an egraph yields substitutions /// ([`Subst`]s) _s_ such that, for any _s'_โ€”where instead of /// mapping a variables to an eclass as _s_ does, _s'_ maps /// a variable to an arbitrary expression represented by that /// eclassโ€”_p[s']_ (the pattern under substitution _s'_) is also /// represented by the egraph. /// /// As an [`Applier`], a [`Pattern`] performs the given substitution /// and adds the result to the [`EGraph`]. /// /// Importantly, [`Pattern`] implements [`FromStr`] if the /// [`Language`] does. /// This is probably how you'll create most [`Pattern`]s. /// /// ``` /// use egg::*; /// define_language! { /// enum Math { /// Num(i32), /// "+" = Add([Id; 2]), /// } /// } /// /// let mut egraph = EGraph::<Math, ()>::default(); /// let a11 = egraph.add_expr(&"(+ 1 1)".parse().unwrap()); /// let a22 = egraph.add_expr(&"(+ 2 2)".parse().unwrap()); /// /// // use Var syntax (leading question mark) to get a /// // variable in the Pattern /// let same_add: Pattern<Math> = "(+ ?a ?a)".parse().unwrap(); /// /// // Rebuild before searching /// egraph.rebuild(); /// /// // This is the search method from the Searcher trait /// let matches = same_add.search(&egraph); /// let matched_eclasses: Vec<Id> = matches.iter().map(|m| m.eclass).collect(); /// assert_eq!(matched_eclasses, vec![a11, a22]); /// ``` /// /// [`FromStr`]: std::str::FromStr #[derive(Debug, PartialEq, Clone)] pub struct Pattern<L> { /// The actual pattern as a [`RecExpr`] pub ast: PatternAst<L>, program: machine::Program<L>, } /// A [`RecExpr`] that represents a /// [`Pattern`]. pub type PatternAst<L> = RecExpr<ENodeOrVar<L>>; impl<L: Language> PatternAst<L> { /// Returns a new `PatternAst` with the variables renames canonically pub fn alpha_rename(&self) -> Self { let mut vars = HashMap::<Var, Var>::default(); let mut new = PatternAst::default(); fn mkvar(i: usize) -> Var { let vs = &["?x", "?y", "?z", "?w"]; match vs.get(i) { Some(v) => v.parse().unwrap(), None => format!("?v{}", i - vs.len()).parse().unwrap(), } } for n in self.as_ref() { new.add(match n { ENodeOrVar::ENode(_) => n.clone(), ENodeOrVar::Var(v) => { let i = vars.len(); ENodeOrVar::Var(*vars.entry(*v).or_insert_with(|| mkvar(i))) } }); } new } } impl<L: Language> Pattern<L> { /// Creates a new pattern from the given pattern ast. pub fn new(ast: PatternAst<L>) -> Self { let ast = ast.compact(); let program = machine::Program::compile_from_pat(&ast); Pattern { ast, program } } /// Returns a list of the [`Var`]s in this pattern. pub fn vars(&self) -> Vec<Var> { let mut vars = vec![]; for n in self.ast.as_ref() { if let ENodeOrVar::Var(v) = n { if !vars.contains(v) { vars.push(*v) } } } vars } } impl<L: Language + Display> Pattern<L> { /// Pretty print this pattern as a sexp with the given width pub fn pretty(&self, width: usize) -> String { self.ast.pretty(width) } } /// The language of [`Pattern`]s. /// #[derive(Debug, Hash, PartialEq, Eq, Clone, PartialOrd, Ord)] pub enum ENodeOrVar<L> { /// An enode from the underlying [`Language`] ENode(L), /// A pattern variable Var(Var), } impl<L: Language> Language for ENodeOrVar<L> { fn matches(&self, _other: &Self) -> bool { panic!("Should never call this") } fn children(&self) -> &[Id] { match self { ENodeOrVar::ENode(n) => n.children(), ENodeOrVar::Var(_) => &[], } } fn children_mut(&mut self) -> &mut [Id] { match self { ENodeOrVar::ENode(n) => n.children_mut(), ENodeOrVar::Var(_) => &mut [], } } } impl<L: Language + Display> Display for ENodeOrVar<L> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { Self::ENode(node) => Display::fmt(node, f), Self::Var(var) => Display::fmt(var, f), } } } #[derive(Debug, Error)] pub enum ENodeOrVarParseError<E> { #[error(transparent)] BadVar(<Var as FromStr>::Err), #[error("tried to parse pattern variable {0:?} as an operator")] UnexpectedVar(String), #[error(transparent)] BadOp(E), } impl<L: FromOp> FromOp for ENodeOrVar<L> { type Error = ENodeOrVarParseError<L::Error>; fn from_op(op: &str, children: Vec<Id>) -> Result<Self, Self::Error> { use ENodeOrVarParseError::*; if op.starts_with('?') && op.len() > 1 { if children.is_empty() { op.parse().map(Self::Var).map_err(BadVar) } else { Err(UnexpectedVar(op.to_owned())) } } else { L::from_op(op, children).map(Self::ENode).map_err(BadOp) } } } impl<L: FromOp> std::str::FromStr for Pattern<L> { type Err = RecExprParseError<ENodeOrVarParseError<L::Error>>; fn from_str(s: &str) -> Result<Self, Self::Err> { PatternAst::from_str(s).map(Self::from) } } impl<'a, L: Language> From<&'a [L]> for Pattern<L> { fn from(expr: &'a [L]) -> Self { let nodes: Vec<_> = expr.iter().cloned().map(ENodeOrVar::ENode).collect(); let ast = RecExpr::from(nodes); Self::new(ast) } } impl<L: Language> From<PatternAst<L>> for Pattern<L> { fn from(ast: PatternAst<L>) -> Self { Self::new(ast) } } impl<L: Language> TryFrom<Pattern<L>> for RecExpr<L> { type Error = Var; fn try_from(pat: Pattern<L>) -> Result<Self, Self::Error> { let nodes = pat.ast.as_ref().iter().cloned(); let ns: Result<Vec<_>, _> = nodes .map(|n| match n { ENodeOrVar::ENode(n) => Ok(n), ENodeOrVar::Var(v) => Err(v), }) .collect(); ns.map(RecExpr::from) } } impl<L: Language + Display> Display for Pattern<L> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { Display::fmt(&self.ast, f) } } /// The result of searching a [`Searcher`] over one eclass. /// /// Note that one [`SearchMatches`] can contain many found /// substititions. So taking the length of a list of [`SearchMatches`] /// tells you how many eclasses something was matched in, _not_ how /// many matches were found total. /// #[derive(Debug)] pub struct SearchMatches<'a, L: Language> { /// The eclass id that these matches were found in. pub eclass: Id, /// The substitutions for each match. pub substs: Vec<Subst>, /// Optionally, an ast for the matches used in proof production. pub ast: Option<Cow<'a, PatternAst<L>>>, } impl<L: Language, A: Analysis<L>> Searcher<L, A> for Pattern<L> { fn get_pattern_ast(&self) -> Option<&PatternAst<L>> { Some(&self.ast) } fn search(&self, egraph: &EGraph<L, A>) -> Vec<SearchMatches<L>> { match self.ast.as_ref().last().unwrap() { ENodeOrVar::ENode(e) => { #[allow(clippy::mem_discriminant_non_enum)] let key = std::mem::discriminant(e); match egraph.classes_by_op.get(&key) { None => vec![], Some(ids) => ids .iter() .filter_map(|&id| self.search_eclass(egraph, id)) .collect(), } } ENodeOrVar::Var(_) => egraph .classes() .filter_map(|e| self.search_eclass(egraph, e.id)) .collect(), } } fn search_eclass(&self, egraph: &EGraph<L, A>, eclass: Id) -> Option<SearchMatches<L>> {
fn vars(&self) -> Vec<Var> { Pattern::vars(self) } } impl<L, A> Applier<L, A> for Pattern<L> where L: Language, A: Analysis<L>, { fn get_pattern_ast(&self) -> Option<&PatternAst<L>> { Some(&self.ast) } fn apply_matches( &self, egraph: &mut EGraph<L, A>, matches: &[SearchMatches<L>], rule_name: Symbol, ) -> Vec<Id> { let mut added = vec![]; let ast = self.ast.as_ref(); let mut id_buf = vec![0.into(); ast.len()]; for mat in matches { let sast = mat.ast.as_ref().map(|cow| cow.as_ref()); for subst in &mat.substs { let did_something; let id; if egraph.are_explanations_enabled() { let (id_temp, did_something_temp) = egraph.union_instantiations(sast.unwrap(), &self.ast, subst, rule_name); did_something = did_something_temp; id = id_temp; } else { id = apply_pat(&mut id_buf, ast, egraph, subst); did_something = egraph.union(id, mat.eclass); } if did_something { added.push(id) } } } added } fn apply_one( &self, egraph: &mut EGraph<L, A>, eclass: Id, subst: &Subst, searcher_ast: Option<&PatternAst<L>>, rule_name: Symbol, ) -> Vec<Id> { let ast = self.ast.as_ref(); let mut id_buf = vec![0.into(); ast.len()]; let id = apply_pat(&mut id_buf, ast, egraph, subst); if let Some(ast) = searcher_ast { let (from, did_something) = egraph.union_instantiations(ast, &self.ast, subst, rule_name); if did_something { vec![from] } else { vec![] } } else if egraph.union(eclass, id) { vec![eclass] } else { vec![] } } fn vars(&self) -> Vec<Var> { Pattern::vars(self) } } pub(crate) fn apply_pat<L: Language, A: Analysis<L>>( ids: &mut [Id], pat: &[ENodeOrVar<L>], egraph: &mut EGraph<L, A>, subst: &Subst, ) -> Id { debug_assert_eq!(pat.len(), ids.len()); trace!("apply_rec {:2?} {:?}", pat, subst); for (i, pat_node) in pat.iter().enumerate() { let id = match pat_node { ENodeOrVar::Var(w) => subst[*w], ENodeOrVar::ENode(e) => { let n = e.clone().map_children(|child| ids[usize::from(child)]); trace!("adding: {:?}", n); egraph.add(n) } }; ids[i] = id; } *ids.last().unwrap() } #[cfg(test)] mod tests { use crate::{SymbolLang as S, *}; type EGraph = crate::EGraph<S, ()>; #[test] fn simple_match() { crate::init_logger(); let mut egraph = EGraph::default(); let (plus_id, _) = egraph.union_instantiations( &"(+ x y)".parse().unwrap(), &"(+ z w)".parse().unwrap(), &Default::default(), "union_plus".to_string(), ); egraph.rebuild(); let commute_plus = rewrite!( "commute_plus"; "(+ ?a ?b)" => "(+ ?b ?a)" ); let matches = commute_plus.search(&egraph); let n_matches: usize = matches.iter().map(|m| m.substs.len()).sum(); assert_eq!(n_matches, 2, "matches is wrong: {:#?}", matches); let applications = commute_plus.apply(&mut egraph, &matches); egraph.rebuild(); assert_eq!(applications.len(), 2); let actual_substs: Vec<Subst> = matches.iter().flat_map(|m| m.substs.clone()).collect(); println!("Here are the substs!"); for m in &actual_substs { println!("substs: {:?}", m); } egraph.dot().to_dot("target/simple-match.dot").unwrap(); use crate::extract::{AstSize, Extractor}; let ext = Extractor::new(&egraph, AstSize); let (_, best) = ext.find_best(plus_id); eprintln!("Best: {:#?}", best); } #[test] fn nonlinear_patterns() { crate::init_logger(); let mut egraph = EGraph::default(); egraph.add_expr(&"(f a a)".parse().unwrap()); egraph.add_expr(&"(f a (g a))))".parse().unwrap()); egraph.add_expr(&"(f a (g b))))".parse().unwrap()); egraph.add_expr(&"(h (foo a b) 0 1)".parse().unwrap()); egraph.add_expr(&"(h (foo a b) 1 0)".parse().unwrap()); egraph.add_expr(&"(h (foo a b) 0 0)".parse().unwrap()); egraph.rebuild(); let n_matches = |s: &str| s.parse::<Pattern<S>>().unwrap().n_matches(&egraph); assert_eq!(n_matches("(f ?x ?y)"), 3); assert_eq!(n_matches("(f ?x ?x)"), 1); assert_eq!(n_matches("(f ?x (g ?y))))"), 2); assert_eq!(n_matches("(f ?x (g ?x))))"), 1); assert_eq!(n_matches("(h ?x 0 0)"), 1); } }
let substs = self.program.run(egraph, eclass); if substs.is_empty() { None } else { let ast = Some(Cow::Borrowed(&self.ast)); Some(SearchMatches { eclass, substs, ast, }) } }
identifier_body
pattern.rs
use fmt::Formatter; use log::*; use std::borrow::Cow; use std::fmt::{self, Display}; use std::{convert::TryFrom, str::FromStr}; use thiserror::Error; use crate::*; /// A pattern that can function as either a [`Searcher`] or [`Applier`]. /// /// A [`Pattern`] is essentially a for-all quantified expression with /// [`Var`]s as the variables (in the logical sense). /// /// When creating a [`Rewrite`], the most common thing to use as either /// the left hand side (the [`Searcher`]) or the right hand side /// (the [`Applier`]) is a [`Pattern`]. /// /// As a [`Searcher`], a [`Pattern`] does the intuitive /// thing. /// Here is a somewhat verbose formal-ish statement: /// Searching for a pattern in an egraph yields substitutions /// ([`Subst`]s) _s_ such that, for any _s'_โ€”where instead of /// mapping a variables to an eclass as _s_ does, _s'_ maps /// a variable to an arbitrary expression represented by that /// eclassโ€”_p[s']_ (the pattern under substitution _s'_) is also /// represented by the egraph. /// /// As an [`Applier`], a [`Pattern`] performs the given substitution /// and adds the result to the [`EGraph`]. /// /// Importantly, [`Pattern`] implements [`FromStr`] if the /// [`Language`] does. /// This is probably how you'll create most [`Pattern`]s. /// /// ``` /// use egg::*; /// define_language! { /// enum Math { /// Num(i32), /// "+" = Add([Id; 2]), /// } /// } /// /// let mut egraph = EGraph::<Math, ()>::default(); /// let a11 = egraph.add_expr(&"(+ 1 1)".parse().unwrap()); /// let a22 = egraph.add_expr(&"(+ 2 2)".parse().unwrap()); /// /// // use Var syntax (leading question mark) to get a /// // variable in the Pattern /// let same_add: Pattern<Math> = "(+ ?a ?a)".parse().unwrap(); /// /// // Rebuild before searching /// egraph.rebuild(); /// /// // This is the search method from the Searcher trait /// let matches = same_add.search(&egraph); /// let matched_eclasses: Vec<Id> = matches.iter().map(|m| m.eclass).collect(); /// assert_eq!(matched_eclasses, vec![a11, a22]); /// ``` /// /// [`FromStr`]: std::str::FromStr #[derive(Debug, PartialEq, Clone)] pub struct Pattern<L> { /// The actual pattern as a [`RecExpr`] pub ast: PatternAst<L>, program: machine::Program<L>, } /// A [`RecExpr`] that represents a /// [`Pattern`]. pub type PatternAst<L> = RecExpr<ENodeOrVar<L>>; impl<L: Language> PatternAst<L> { /// Returns a new `PatternAst` with the variables renames canonically pub fn alpha_rename(&self) -> Self { let mut vars = HashMap::<Var, Var>::default(); let mut new = PatternAst::default(); fn mkvar(i: usize) -> Var { let vs = &["?x", "?y", "?z", "?w"]; match vs.get(i) { Some(v) => v.parse().unwrap(), None => format!("?v{}", i - vs.len()).parse().unwrap(), } } for n in self.as_ref() { new.add(match n { ENodeOrVar::ENode(_) => n.clone(), ENodeOrVar::Var(v) => { let i = vars.len(); ENodeOrVar::Var(*vars.entry(*v).or_insert_with(|| mkvar(i))) } }); } new } } impl<L: Language> Pattern<L> { /// Creates a new pattern from the given pattern ast. pub fn new(ast: PatternAst<L>) -> Self { let ast = ast.compact(); let program = machine::Program::compile_from_pat(&ast); Pattern { ast, program } } /// Returns a list of the [`Var`]s in this pattern. pub fn vars(&self) -> Vec<Var> { let mut vars = vec![]; for n in self.ast.as_ref() { if let ENodeOrVar::Var(v) = n { if !vars.contains(v) { vars.push(*v) } } } vars } } impl<L: Language + Display> Pattern<L> { /// Pretty print this pattern as a sexp with the given width pub fn pretty(&self, width: usize) -> String { self.ast.pretty(width) } } /// The language of [`Pattern`]s. /// #[derive(Debug, Hash, PartialEq, Eq, Clone, PartialOrd, Ord)] pub enum ENodeOrVar<L> { /// An enode from the underlying [`Language`] ENode(L), /// A pattern variable Var(Var), } impl<L: Language> Language for ENodeOrVar<L> { fn matches(&self, _other: &Self) -> bool { panic!("Should never call this") } fn children(&self) -> &[Id] { match self { ENodeOrVar::ENode(n) => n.children(), ENodeOrVar::Var(_) => &[], } } fn children_mut(&mut self) -> &mut [Id] { match self { ENodeOrVar::ENode(n) => n.children_mut(), ENodeOrVar::Var(_) => &mut [], } } } impl<L: Language + Display> Display for ENodeOrVar<L> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { Self::ENode(node) => Display::fmt(node, f), Self::Var(var) => Display::fmt(var, f), } } } #[derive(Debug, Error)] pub enum ENodeOrVarParseError<E> { #[error(transparent)] BadVar(<Var as FromStr>::Err), #[error("tried to parse pattern variable {0:?} as an operator")] UnexpectedVar(String), #[error(transparent)] BadOp(E), } impl<L: FromOp> FromOp for ENodeOrVar<L> { type Error = ENodeOrVarParseError<L::Error>; fn from_op(op: &str, children: Vec<Id>) -> Result<Self, Self::Error> { use ENodeOrVarParseError::*; if op.starts_with('?') && op.len() > 1 { if children.is_empty() { op.parse().map(Self::Var).map_err(BadVar) } else { Err(UnexpectedVar(op.to_owned())) } } else { L::from_op(op, children).map(Self::ENode).map_err(BadOp) } } } impl<L: FromOp> std::str::FromStr for Pattern<L> { type Err = RecExprParseError<ENodeOrVarParseError<L::Error>>; fn from_str(s: &str) -> Result<Self, Self::Err> { PatternAst::from_str(s).map(Self::from) } } impl<'a, L: Language> From<&'a [L]> for Pattern<L> { fn from(expr: &'a [L]) -> Self { let nodes: Vec<_> = expr.iter().cloned().map(ENodeOrVar::ENode).collect(); let ast = RecExpr::from(nodes); Self::new(ast) } } impl<L: Language> From<PatternAst<L>> for Pattern<L> { fn from(ast: PatternAst<L>) -> Self { Self::new(ast) } } impl<L: Language> TryFrom<Pattern<L>> for RecExpr<L> { type Error = Var; fn try_from(pat: Pattern<L>) -> Result<Self, Self::Error> { let nodes = pat.ast.as_ref().iter().cloned(); let ns: Result<Vec<_>, _> = nodes .map(|n| match n { ENodeOrVar::ENode(n) => Ok(n), ENodeOrVar::Var(v) => Err(v), }) .collect(); ns.map(RecExpr::from) } } impl<L: Language + Display> Display for Pattern<L> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { Display::fmt(&self.ast, f) } } /// The result of searching a [`Searcher`] over one eclass. /// /// Note that one [`SearchMatches`] can contain many found /// substititions. So taking the length of a list of [`SearchMatches`] /// tells you how many eclasses something was matched in, _not_ how /// many matches were found total. /// #[derive(Debug)] pub struct SearchMatches<'a, L: Language> { /// The eclass id that these matches were found in. pub eclass: Id, /// The substitutions for each match. pub substs: Vec<Subst>, /// Optionally, an ast for the matches used in proof production. pub ast: Option<Cow<'a, PatternAst<L>>>, } impl<L: Language, A: Analysis<L>> Searcher<L, A> for Pattern<L> { fn get_pattern_ast(&self) -> Option<&PatternAst<L>> { Some(&self.ast) } fn search(&self, egraph: &EGraph<L, A>) -> Vec<SearchMatches<L>> { match self.ast.as_ref().last().unwrap() { ENodeOrVar::ENode(e) => { #[allow(clippy::mem_discriminant_non_enum)] let key = std::mem::discriminant(e); match egraph.classes_by_op.get(&key) { None => vec![], Some(ids) => ids .iter() .filter_map(|&id| self.search_eclass(egraph, id)) .collect(), } } ENodeOrVar::Var(_) => egraph .classes() .filter_map(|e| self.search_eclass(egraph, e.id)) .collect(), } } fn search_eclass(&self, egraph: &EGraph<L, A>, eclass: Id) -> Option<SearchMatches<L>> { let substs = self.program.run(egraph, eclass); if substs.is_empty() { None } else { let ast = Some(Cow::Borrowed(&self.ast)); Some(SearchMatches { eclass, substs, ast, }) } } fn vars(&self) -> Vec<Var> { Pattern::vars(self) } } impl<L, A> Applier<L, A> for Pattern<L> where L: Language, A: Analysis<L>, { fn get_pattern_ast(&self) -> Option<&PatternAst<L>> { Some(&self.ast) } fn apply_matches( &self, egraph: &mut EGraph<L, A>, matches: &[SearchMatches<L>], rule_name: Symbol, ) -> Vec<Id> { let mut added = vec![]; let ast = self.ast.as_ref(); let mut id_buf = vec![0.into(); ast.len()]; for mat in matches { let sast = mat.ast.as_ref().map(|cow| cow.as_ref()); for subst in &mat.substs { let did_something; let id; if egraph.are_explanations_enabled() { let (id_temp, did_something_temp) = egraph.union_instantiations(sast.unwrap(), &self.ast, subst, rule_name); did_something = did_something_temp; id = id_temp; } else { id = apply_pat(&mut id_buf, ast, egraph, subst); did_something = egraph.union(id, mat.eclass); } if did_something { added.push(id) } } } added } fn apply_one( &self, egraph: &mut EGraph<L, A>, eclass: Id, subst: &Subst, searcher_ast: Option<&PatternAst<L>>, rule_name: Symbol, ) -> Vec<Id> { let ast = self.ast.as_ref(); let mut id_buf = vec![0.into(); ast.len()]; let id = apply_pat(&mut id_buf, ast, egraph, subst); if let Some(ast) = searcher_ast { let (from, did_something) = egraph.union_instantiations(ast, &self.ast, subst, rule_name); if did_something { vec![from] } else { vec![] } } else if egraph.union(eclass, id) { vec![eclass] } else { vec![] } } fn vars(&self) -> Vec<Var> { Pattern::vars(self) } } pub(crate) fn apply_pat<L: Language, A: Analysis<L>>( ids: &mut [Id], pat: &[ENodeOrVar<L>], egraph: &mut EGraph<L, A>, subst: &Subst, ) -> Id { debug_assert_eq!(pat.len(), ids.len()); trace!("apply_rec {:2?} {:?}", pat, subst); for (i, pat_node) in pat.iter().enumerate() { let id = match pat_node { ENodeOrVar::Var(w) => subst[*w], ENodeOrVar::ENode(e) => {
}; ids[i] = id; } *ids.last().unwrap() } #[cfg(test)] mod tests { use crate::{SymbolLang as S, *}; type EGraph = crate::EGraph<S, ()>; #[test] fn simple_match() { crate::init_logger(); let mut egraph = EGraph::default(); let (plus_id, _) = egraph.union_instantiations( &"(+ x y)".parse().unwrap(), &"(+ z w)".parse().unwrap(), &Default::default(), "union_plus".to_string(), ); egraph.rebuild(); let commute_plus = rewrite!( "commute_plus"; "(+ ?a ?b)" => "(+ ?b ?a)" ); let matches = commute_plus.search(&egraph); let n_matches: usize = matches.iter().map(|m| m.substs.len()).sum(); assert_eq!(n_matches, 2, "matches is wrong: {:#?}", matches); let applications = commute_plus.apply(&mut egraph, &matches); egraph.rebuild(); assert_eq!(applications.len(), 2); let actual_substs: Vec<Subst> = matches.iter().flat_map(|m| m.substs.clone()).collect(); println!("Here are the substs!"); for m in &actual_substs { println!("substs: {:?}", m); } egraph.dot().to_dot("target/simple-match.dot").unwrap(); use crate::extract::{AstSize, Extractor}; let ext = Extractor::new(&egraph, AstSize); let (_, best) = ext.find_best(plus_id); eprintln!("Best: {:#?}", best); } #[test] fn nonlinear_patterns() { crate::init_logger(); let mut egraph = EGraph::default(); egraph.add_expr(&"(f a a)".parse().unwrap()); egraph.add_expr(&"(f a (g a))))".parse().unwrap()); egraph.add_expr(&"(f a (g b))))".parse().unwrap()); egraph.add_expr(&"(h (foo a b) 0 1)".parse().unwrap()); egraph.add_expr(&"(h (foo a b) 1 0)".parse().unwrap()); egraph.add_expr(&"(h (foo a b) 0 0)".parse().unwrap()); egraph.rebuild(); let n_matches = |s: &str| s.parse::<Pattern<S>>().unwrap().n_matches(&egraph); assert_eq!(n_matches("(f ?x ?y)"), 3); assert_eq!(n_matches("(f ?x ?x)"), 1); assert_eq!(n_matches("(f ?x (g ?y))))"), 2); assert_eq!(n_matches("(f ?x (g ?x))))"), 1); assert_eq!(n_matches("(h ?x 0 0)"), 1); } }
let n = e.clone().map_children(|child| ids[usize::from(child)]); trace!("adding: {:?}", n); egraph.add(n) }
conditional_block
main.rs
#![allow(clippy::single_match)] use ::std::collections::HashMap; use ::std::sync::Mutex; use anyhow::Error; use chrono::naive::NaiveDate as Date; use derive_more::Display; use log::*; use serde::{Deserialize, Deserializer, Serialize, Serializer}; mod citation; mod filters; mod github; mod md; lazy_static::lazy_static! { static ref FOOTNOTES: Mutex<Option<HashMap<String, usize>>> = Mutex::new(Some(HashMap::new())); } struct DateRange { start: Date, end: Option<Date>, } impl DateRange { fn to_resume_string(&self) -> String { if let Some(end) = self.end { format!( "{} - {}", self.start.format("%b,&nbsp;%Y"), end.format("%b,&nbsp;%Y") ) } else { format!("{} - Current", self.start.format("%b,&nbsp;%Y")) } } } impl std::fmt::Display for DateRange { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if let Some(end) = self.end { write!(f, "{}~{}", self.start.format("%Y-%m"), end.format("%Y-%m")) } else { write!(f, "{}~", self.start.format("%Y-%m")) } } } impl std::str::FromStr for DateRange { type Err = anyhow::Error; fn from_str(s: &str) -> Result<Self, Self::Err> { let p: Vec<_> = s.split('~').collect(); if p.len() != 2 { Err(anyhow::anyhow!( "A date range should have 2 and only 2 dates" )) } else { Ok(DateRange { start: Date::parse_from_str(&format!("{}-01", p[0]), "%Y-%m-%d").unwrap(), end: if p[1].is_empty() { None } else { Some(Date::parse_from_str(&format!("{}-01", p[1]), "%Y-%m-%d").unwrap()) }, }) } } } impl Serialize for DateRange { fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { s.serialize_str(&self.to_string()) } } impl<'a> Deserialize<'a> for DateRange { fn deserialize<D: Deserializer<'a>>(d: D) -> Result<Self, D::Error> { let s = String::deserialize(d)?; s.parse().map_err(serde::de::Error::custom) } } #[derive(Serialize, Deserialize, Debug)] #[serde(untagged)] enum Citation { Raw(String), RawWithYear { text: String, year: Option<u32> }, Url(citation::UrlCitation), Doi(citation::DoiCitation), Bibtex(citation::BibtexCitation), } impl Citation { fn to_raw(&self) -> Option<Citation> { use Citation::*; match self { Raw(s) => Some(Raw(s.clone())), RawWithYear { text, .. } => Some(Raw(text.clone())), Url(url) => url.to_raw(), Doi(doi) => doi.to_raw(), Bibtex(bib) => bib.to_raw(), } } fn set_year(self, year: Option<u32>) -> Citation { use Citation::*; if let Raw(s) = self { RawWithYear { text: s, year } } else { self } } fn to_raw_with_year(&self) -> Option<Citation> { use Citation::*; match self { Raw(s) => Some(RawWithYear { text: s.clone(), year: None, }), RawWithYear { text, year } => Some(RawWithYear { text: text.clone(), year: *year, }), Url(url) => url.to_raw().map(|v| v.set_year(url.year())), Doi(doi) => doi.to_raw().map(|v| v.set_year(doi.year())), Bibtex(bib) => bib.to_raw().map(|v| v.set_year(bib.year())), } } } #[derive(Serialize, Deserialize)] enum Degree { BS, MS, PhD, } impl Degree { fn
(&self) -> String { match self { Self::BS => "Bachelor of Science".into(), Self::MS => "Master of Science".into(), Self::PhD => "PhD".into(), } } } #[derive(Serialize, Deserialize)] struct Education { institution: String, degree: Degree, major: String, duration: DateRange, #[serde(skip_serializing_if = "Option::is_none", default)] location: Option<String>, #[serde(skip_serializing_if = "Option::is_none", default)] gpa: Option<f32>, #[serde(skip_serializing_if = "Option::is_none", default)] courses: Option<Vec<String>>, } #[derive(Serialize, Deserialize)] struct Experience { company: String, position: String, duration: DateRange, description: String, #[serde(skip_serializing_if = "Option::is_none", default)] location: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] tags: Vec<String>, } #[derive(Serialize, Deserialize)] struct Contact { #[serde(rename = "type")] type_: String, value: String, } #[derive(Serialize, Deserialize)] struct Skill { category: String, #[serde(default)] description: Option<String>, } #[derive(Serialize, Deserialize)] struct Person { name: String, #[serde(default)] resume_url: Option<String>, contacts: Vec<Contact>, educations: Vec<Education>, experiences: Vec<Experience>, projects: Vec<ProjectParam>, #[serde(default)] skills: Vec<Skill>, #[serde(default)] references: HashMap<String, Citation>, #[serde(default)] publications: Vec<Citation>, } #[allow(clippy::large_enum_variant)] #[derive(Serialize, Deserialize)] #[serde(untagged)] enum ProjectParam { Import(ProjectImport), Sort { order_by: ProjectSortOrder }, ImportMode { import_mode: ProjectImportMode }, Raw(Project), } #[derive(Serialize, Deserialize, Copy, Clone, PartialEq)] #[serde(rename_all = "snake_case")] enum ProjectImportMode { Whitelist, Combine, } impl Default for ProjectImportMode { fn default() -> Self { Self::Combine } } #[derive(Serialize, Deserialize, Copy, Clone)] #[serde(rename_all = "snake_case")] enum ProjectSortOrder { Stars, Forks, StarsThenForks, ForksThenStars, Manual, } #[derive(Serialize, Deserialize)] #[serde(tag = "from", rename_all = "lowercase")] enum ProjectImport { GitHub { #[serde(default)] ignore_forks: bool, #[serde(default)] repos: Option<Vec<String>>, #[serde(default)] token: Option<String>, }, } #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Display)] #[serde(rename_all = "lowercase")] enum ProjectRole { Owner, Maintainer, Contributor, } /// Single digit precision deciaml real number #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Ord, Eq)] struct Decimal1(u64); impl ::std::fmt::Display for Decimal1 { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { <f64 as ::std::fmt::Display>::fmt(&(*self).into(), f) } } impl ::std::ops::Add<Decimal1> for Decimal1 { type Output = Self; fn add(self, rhs: Self) -> Self::Output { Self(rhs.0 + self.0) } } impl ::std::ops::AddAssign<Decimal1> for Decimal1 { fn add_assign(&mut self, rhs: Self) { self.0 += rhs.0; } } impl From<f64> for Decimal1 { fn from(f: f64) -> Self { Self((f * 10.0) as u64) } } impl From<Decimal1> for f64 { fn from(f: Decimal1) -> f64 { f.0 as f64 / 10.0 } } impl<'de> ::serde::Deserialize<'de> for Decimal1 { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { struct Visitor; impl<'de> ::serde::de::Visitor<'de> for Visitor { type Value = Decimal1; fn expecting(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { write!(fmt, "a float") } fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E> where E: ::serde::de::Error, { Ok(v.into()) } } deserializer.deserialize_f64(Visitor) } } impl ::serde::Serialize for Decimal1 { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { serializer.serialize_f64((*self).into()) } } #[derive(Serialize, Deserialize, Debug, Clone)] struct LanguageStat { language: String, percentage: Decimal1, } #[serde_with::serde_as] #[derive(Serialize, Deserialize, Debug, Clone)] struct Project { name: String, #[serde(default, skip_serializing_if = "Option::is_none")] description: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] contributions: Option<String>, #[serde( with = "serde_option_display_fromstr", default, skip_serializing_if = "Option::is_none" )] url: Option<url::Url>, #[serde(default, skip_serializing_if = "Option::is_none")] stars: Option<u64>, #[serde(default, skip_serializing_if = "Option::is_none")] forks: Option<u64>, #[serde(default, skip_serializing_if = "Option::is_none")] active: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] owner: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] commits: Option<u64>, #[serde(default, skip_serializing_if = "Option::is_none")] additions: Option<u64>, #[serde(default, skip_serializing_if = "Option::is_none")] deletions: Option<u64>, #[serde(default, skip_serializing_if = "Vec::is_empty")] languages: Vec<LanguageStat>, #[serde(default, skip_serializing_if = "Vec::is_empty")] tags: Vec<String>, #[serde(default, skip_serializing_if = "Option::is_none")] role: Option<ProjectRole>, } mod serde_option_display_fromstr { pub(crate) fn deserialize<'de, D, T>(deser: D) -> Result<Option<T>, D::Error> where D: serde::Deserializer<'de>, T: ::std::str::FromStr, <T as ::std::str::FromStr>::Err: ::std::fmt::Display, { #[derive(Default)] struct Visitor<T>(::std::marker::PhantomData<T>); impl<'de, T> serde::de::Visitor<'de> for Visitor<T> where T: ::std::str::FromStr, <T as ::std::str::FromStr>::Err: ::std::fmt::Display, { type Value = Option<T>; fn expecting(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { write!(fmt, "a string") } fn visit_str<E: serde::de::Error>(self, v: &str) -> Result<Self::Value, E> { v.parse() .map_err(serde::de::Error::custom) .map(Option::Some) } } deser.deserialize_str(Visitor::<T>(Default::default())) } pub(crate) fn serialize<S, T>(v: &Option<T>, ser: S) -> Result<S::Ok, S::Error> where S: serde::ser::Serializer, T: ::std::fmt::Display, { match v { Some(v) => ser.serialize_str(&v.to_string()), None => ser.serialize_none(), } } } use askama::Template; struct ContactParams { value: String, icon: Option<String>, link: Option<String>, } #[derive(Template)] #[template(path = "resume.html", escape = "none")] struct ResumeParams<'a> { name: &'a str, resume_url: Option<&'a str>, contacts: Vec<ContactParams>, educations: &'a [Education], experiences: &'a [Experience], projects: Vec<Project>, references: Vec<(&'a str, &'a str)>, publications: Vec<(&'a str, Option<u32>)>, skills: &'a [Skill], } async fn fetch(mut person: Person) -> anyhow::Result<Person> { use futures::stream::TryStreamExt; let github_username = person .contacts .iter() .find(|v| v.type_ == "github") .map(|v| v.value.as_str()); let mut project_map = HashMap::new(); let mut sort_order = None; let mut import_mode = ProjectImportMode::Combine; // Process project imports first for pi in person.projects.iter() { match pi { ProjectParam::Import(ProjectImport::GitHub { ignore_forks, repos: None, token, }) => project_map.extend( github::get_user_projects_from_github(*ignore_forks, token.clone()) .await? .into_iter() .map(|v| (v.name.clone(), v)), ), ProjectParam::Import(ProjectImport::GitHub { repos: Some(repos), token, .. }) => { project_map.extend( github::get_projects_info_from_github( repos, token.clone(), github_username.map(ToOwned::to_owned), ) .await? .into_iter() .map(|v| (v.name.clone(), v)), ); } ProjectParam::Sort { order_by } => { sort_order = Some(*order_by); } ProjectParam::ImportMode { import_mode: im } => { import_mode = *im; } _ => {} } } if sort_order.is_none() && import_mode == ProjectImportMode::Whitelist { sort_order = Some(ProjectSortOrder::Manual); } // Adding manually project entries for pi in person.projects.iter_mut() { match pi { ProjectParam::Raw(p) => { p.languages .sort_unstable_by_key(|v| ::std::cmp::Reverse(v.percentage)); if let Some(old) = project_map.get_mut(&p.name) { debug!("Merging project entry {}", p.name); if p.url.is_some() { old.url = p.url.clone(); } if p.description.is_some() { old.description = p.description.clone(); } if p.owner.is_some() { old.owner = p.owner.clone(); } if p.contributions.is_some() { old.contributions = p.contributions.clone(); } if !p.tags.is_empty() { old.tags = p.tags.clone(); } if p.role.is_some() { old.role = p.role; } } else { project_map.insert(p.name.clone(), p.clone()); } } _ => {} } } let mut projects: Vec<_>; if let ProjectImportMode::Whitelist = import_mode { let raw_entries: Vec<_> = person .projects .iter() .filter_map(|p| match p { ProjectParam::Raw(p) => Some(&p.name), _ => None, }) .collect(); projects = raw_entries .into_iter() .filter_map(|name| project_map.get(name).map(Clone::clone)) .collect(); } else { projects = project_map.iter().map(|(_, v)| v.clone()).collect(); } if let Some(sort_order) = sort_order { use ::std::cmp::Reverse; match sort_order { ProjectSortOrder::Stars => projects.sort_unstable_by_key(|v| Reverse(v.stars)), ProjectSortOrder::Forks => projects.sort_unstable_by_key(|v| Reverse(v.forks)), ProjectSortOrder::ForksThenStars => { projects.sort_unstable_by_key(|v| Reverse((v.forks, v.stars))) } ProjectSortOrder::StarsThenForks => { projects.sort_unstable_by_key(|v| Reverse((v.stars, v.forks))) } ProjectSortOrder::Manual if import_mode != ProjectImportMode::Whitelist => { debug!("Manual sort"); let raw_entries: HashMap<_, _> = person .projects .iter() .filter_map(|p| match p { ProjectParam::Raw(p) => Some(&p.name), _ => None, }) .enumerate() .map(|(i, v)| (v, i)) .collect(); projects.sort_unstable_by_key(|v| raw_entries.get(&v.name).map(|v| *v)); } _ => {} } } debug!("{}", serde_yaml::to_string(&projects)?); person.projects = projects.into_iter().map(|v| ProjectParam::Raw(v)).collect(); person.projects.push(ProjectParam::Sort { order_by: ProjectSortOrder::Manual, }); // Fetch citations use ::futures::FutureExt; debug!("{:?}", person.references); let fut: futures::stream::FuturesUnordered<_> = person .references .iter_mut() .map(|(_, v)| v) .chain(person.publications.iter_mut()) .map(|v| { async move { Result::<_, Error>::Ok(match v { Citation::Url(url) => url.fetch().await?, Citation::Doi(doi) => doi.fetch().await?, Citation::Bibtex(bib) => bib.fetch().await?, _ => (), }) } .boxed() }) .collect(); let () = fut.try_collect().await?; person.references = person .references .into_iter() .filter_map(|(k, v)| v.to_raw().map(|v| (k, v))) .collect(); person.publications = person .publications .into_iter() .filter_map(|v| v.to_raw_with_year()) .collect(); debug!("{:?}", person.references); Ok(person) } fn build_params<'a>( p: &'a Person, footnotes: Option<HashMap<String, usize>>, ) -> Result<ResumeParams<'a>, Error> { let mut c = Vec::new(); let it = p.references.iter().filter_map(|(k, v)| match v { Citation::Raw(s) => Some((k.as_str(), s.as_str())), _ => None, }); let mut references: Vec<_> = if let Some(footnotes) = footnotes.as_ref() { // Remove unused references it.filter(|(k, _)| footnotes.get(*k).is_some()).collect() } else { it.collect() }; // Sort references if let Some(footnotes) = footnotes { references.sort_unstable_by_key(|(k, _)| footnotes.get(*k).unwrap()); } for i in p.contacts.iter() { c.push(ContactParams { link: match i.type_.as_str() { "github" => Some(format!("https://github.com/{}", i.value)), "email" => Some(format!("mailto:{}", i.value)), "blog" => Some(i.value.clone()), _ => None, }, icon: match i.type_.as_str() { "github" => Some("icons/github.svg".into()), "email" => Some("icons/mail.svg".into()), "blog" => Some("icons/blog.svg".into()), _ => None, }, value: i.value.clone(), }); } Ok(ResumeParams { name: &p.name, resume_url: p.resume_url.as_ref().map(String::as_str), contacts: c, educations: p.educations.as_slice(), experiences: p.experiences.as_slice(), projects: p .projects .iter() .filter_map(|v| match v { ProjectParam::Raw(p) => Some(p.clone()), _ => None, }) .collect(), publications: p .publications .iter() .filter_map(|v| match v { Citation::RawWithYear { text, year } => Some((text.as_str(), *year)), _ => None, }) .collect(), references, skills: p.skills.as_slice(), }) } fn main() -> Result<(), Error> { env_logger::init(); let args = clap::Command::new("resume") .arg(clap::Arg::new("input").required(true)) .get_matches(); let input_filename = args.get_one::<String>("input").unwrap(); let cache_filename = format!("{}-cache", input_filename); let cache_info = std::fs::metadata(&cache_filename); let input_info = std::fs::metadata(&input_filename)?; let cache_data = if cache_info.is_ok() { std::fs::read(&cache_filename).ok() } else { None }; let r = if cache_data.is_some() && cache_info?.modified()? >= input_info.modified()? { serde_yaml::from_slice::<Person>(cache_data.unwrap().as_slice())? } else { let f = std::fs::read(input_filename).unwrap(); let r = serde_yaml::from_slice::<Person>(f.as_slice())?; debug!("{}", serde_yaml::to_string(&r)?); let mut runtime = tokio::runtime::Runtime::new()?; let r = runtime.block_on(fetch(r))?; if let Some(mut cache_f) = std::fs::File::create(format!("{}-cache", input_filename)).ok() { use ::std::io::Write; write!(cache_f, "{}", serde_yaml::to_string(&r)?)?; } r }; let resume = build_params(&r, None)?; resume.render()?; let footnotes = FOOTNOTES.lock().unwrap().replace(HashMap::new()).unwrap(); let resume = build_params(&r, Some(footnotes))?; println!("{}", resume.render()?); Ok(()) }
to_resume_string
identifier_name
main.rs
#![allow(clippy::single_match)] use ::std::collections::HashMap; use ::std::sync::Mutex; use anyhow::Error; use chrono::naive::NaiveDate as Date; use derive_more::Display; use log::*; use serde::{Deserialize, Deserializer, Serialize, Serializer}; mod citation; mod filters; mod github; mod md; lazy_static::lazy_static! { static ref FOOTNOTES: Mutex<Option<HashMap<String, usize>>> = Mutex::new(Some(HashMap::new())); } struct DateRange { start: Date, end: Option<Date>, } impl DateRange { fn to_resume_string(&self) -> String { if let Some(end) = self.end { format!( "{} - {}", self.start.format("%b,&nbsp;%Y"), end.format("%b,&nbsp;%Y") ) } else { format!("{} - Current", self.start.format("%b,&nbsp;%Y")) } } } impl std::fmt::Display for DateRange { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if let Some(end) = self.end { write!(f, "{}~{}", self.start.format("%Y-%m"), end.format("%Y-%m")) } else { write!(f, "{}~", self.start.format("%Y-%m")) } } } impl std::str::FromStr for DateRange { type Err = anyhow::Error; fn from_str(s: &str) -> Result<Self, Self::Err> { let p: Vec<_> = s.split('~').collect(); if p.len() != 2 { Err(anyhow::anyhow!( "A date range should have 2 and only 2 dates" )) } else { Ok(DateRange { start: Date::parse_from_str(&format!("{}-01", p[0]), "%Y-%m-%d").unwrap(), end: if p[1].is_empty() { None } else { Some(Date::parse_from_str(&format!("{}-01", p[1]), "%Y-%m-%d").unwrap()) }, }) } } } impl Serialize for DateRange { fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> { s.serialize_str(&self.to_string()) }
fn deserialize<D: Deserializer<'a>>(d: D) -> Result<Self, D::Error> { let s = String::deserialize(d)?; s.parse().map_err(serde::de::Error::custom) } } #[derive(Serialize, Deserialize, Debug)] #[serde(untagged)] enum Citation { Raw(String), RawWithYear { text: String, year: Option<u32> }, Url(citation::UrlCitation), Doi(citation::DoiCitation), Bibtex(citation::BibtexCitation), } impl Citation { fn to_raw(&self) -> Option<Citation> { use Citation::*; match self { Raw(s) => Some(Raw(s.clone())), RawWithYear { text, .. } => Some(Raw(text.clone())), Url(url) => url.to_raw(), Doi(doi) => doi.to_raw(), Bibtex(bib) => bib.to_raw(), } } fn set_year(self, year: Option<u32>) -> Citation { use Citation::*; if let Raw(s) = self { RawWithYear { text: s, year } } else { self } } fn to_raw_with_year(&self) -> Option<Citation> { use Citation::*; match self { Raw(s) => Some(RawWithYear { text: s.clone(), year: None, }), RawWithYear { text, year } => Some(RawWithYear { text: text.clone(), year: *year, }), Url(url) => url.to_raw().map(|v| v.set_year(url.year())), Doi(doi) => doi.to_raw().map(|v| v.set_year(doi.year())), Bibtex(bib) => bib.to_raw().map(|v| v.set_year(bib.year())), } } } #[derive(Serialize, Deserialize)] enum Degree { BS, MS, PhD, } impl Degree { fn to_resume_string(&self) -> String { match self { Self::BS => "Bachelor of Science".into(), Self::MS => "Master of Science".into(), Self::PhD => "PhD".into(), } } } #[derive(Serialize, Deserialize)] struct Education { institution: String, degree: Degree, major: String, duration: DateRange, #[serde(skip_serializing_if = "Option::is_none", default)] location: Option<String>, #[serde(skip_serializing_if = "Option::is_none", default)] gpa: Option<f32>, #[serde(skip_serializing_if = "Option::is_none", default)] courses: Option<Vec<String>>, } #[derive(Serialize, Deserialize)] struct Experience { company: String, position: String, duration: DateRange, description: String, #[serde(skip_serializing_if = "Option::is_none", default)] location: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] tags: Vec<String>, } #[derive(Serialize, Deserialize)] struct Contact { #[serde(rename = "type")] type_: String, value: String, } #[derive(Serialize, Deserialize)] struct Skill { category: String, #[serde(default)] description: Option<String>, } #[derive(Serialize, Deserialize)] struct Person { name: String, #[serde(default)] resume_url: Option<String>, contacts: Vec<Contact>, educations: Vec<Education>, experiences: Vec<Experience>, projects: Vec<ProjectParam>, #[serde(default)] skills: Vec<Skill>, #[serde(default)] references: HashMap<String, Citation>, #[serde(default)] publications: Vec<Citation>, } #[allow(clippy::large_enum_variant)] #[derive(Serialize, Deserialize)] #[serde(untagged)] enum ProjectParam { Import(ProjectImport), Sort { order_by: ProjectSortOrder }, ImportMode { import_mode: ProjectImportMode }, Raw(Project), } #[derive(Serialize, Deserialize, Copy, Clone, PartialEq)] #[serde(rename_all = "snake_case")] enum ProjectImportMode { Whitelist, Combine, } impl Default for ProjectImportMode { fn default() -> Self { Self::Combine } } #[derive(Serialize, Deserialize, Copy, Clone)] #[serde(rename_all = "snake_case")] enum ProjectSortOrder { Stars, Forks, StarsThenForks, ForksThenStars, Manual, } #[derive(Serialize, Deserialize)] #[serde(tag = "from", rename_all = "lowercase")] enum ProjectImport { GitHub { #[serde(default)] ignore_forks: bool, #[serde(default)] repos: Option<Vec<String>>, #[serde(default)] token: Option<String>, }, } #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Display)] #[serde(rename_all = "lowercase")] enum ProjectRole { Owner, Maintainer, Contributor, } /// Single digit precision deciaml real number #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Ord, Eq)] struct Decimal1(u64); impl ::std::fmt::Display for Decimal1 { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { <f64 as ::std::fmt::Display>::fmt(&(*self).into(), f) } } impl ::std::ops::Add<Decimal1> for Decimal1 { type Output = Self; fn add(self, rhs: Self) -> Self::Output { Self(rhs.0 + self.0) } } impl ::std::ops::AddAssign<Decimal1> for Decimal1 { fn add_assign(&mut self, rhs: Self) { self.0 += rhs.0; } } impl From<f64> for Decimal1 { fn from(f: f64) -> Self { Self((f * 10.0) as u64) } } impl From<Decimal1> for f64 { fn from(f: Decimal1) -> f64 { f.0 as f64 / 10.0 } } impl<'de> ::serde::Deserialize<'de> for Decimal1 { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { struct Visitor; impl<'de> ::serde::de::Visitor<'de> for Visitor { type Value = Decimal1; fn expecting(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { write!(fmt, "a float") } fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E> where E: ::serde::de::Error, { Ok(v.into()) } } deserializer.deserialize_f64(Visitor) } } impl ::serde::Serialize for Decimal1 { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { serializer.serialize_f64((*self).into()) } } #[derive(Serialize, Deserialize, Debug, Clone)] struct LanguageStat { language: String, percentage: Decimal1, } #[serde_with::serde_as] #[derive(Serialize, Deserialize, Debug, Clone)] struct Project { name: String, #[serde(default, skip_serializing_if = "Option::is_none")] description: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] contributions: Option<String>, #[serde( with = "serde_option_display_fromstr", default, skip_serializing_if = "Option::is_none" )] url: Option<url::Url>, #[serde(default, skip_serializing_if = "Option::is_none")] stars: Option<u64>, #[serde(default, skip_serializing_if = "Option::is_none")] forks: Option<u64>, #[serde(default, skip_serializing_if = "Option::is_none")] active: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] owner: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] commits: Option<u64>, #[serde(default, skip_serializing_if = "Option::is_none")] additions: Option<u64>, #[serde(default, skip_serializing_if = "Option::is_none")] deletions: Option<u64>, #[serde(default, skip_serializing_if = "Vec::is_empty")] languages: Vec<LanguageStat>, #[serde(default, skip_serializing_if = "Vec::is_empty")] tags: Vec<String>, #[serde(default, skip_serializing_if = "Option::is_none")] role: Option<ProjectRole>, } mod serde_option_display_fromstr { pub(crate) fn deserialize<'de, D, T>(deser: D) -> Result<Option<T>, D::Error> where D: serde::Deserializer<'de>, T: ::std::str::FromStr, <T as ::std::str::FromStr>::Err: ::std::fmt::Display, { #[derive(Default)] struct Visitor<T>(::std::marker::PhantomData<T>); impl<'de, T> serde::de::Visitor<'de> for Visitor<T> where T: ::std::str::FromStr, <T as ::std::str::FromStr>::Err: ::std::fmt::Display, { type Value = Option<T>; fn expecting(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { write!(fmt, "a string") } fn visit_str<E: serde::de::Error>(self, v: &str) -> Result<Self::Value, E> { v.parse() .map_err(serde::de::Error::custom) .map(Option::Some) } } deser.deserialize_str(Visitor::<T>(Default::default())) } pub(crate) fn serialize<S, T>(v: &Option<T>, ser: S) -> Result<S::Ok, S::Error> where S: serde::ser::Serializer, T: ::std::fmt::Display, { match v { Some(v) => ser.serialize_str(&v.to_string()), None => ser.serialize_none(), } } } use askama::Template; struct ContactParams { value: String, icon: Option<String>, link: Option<String>, } #[derive(Template)] #[template(path = "resume.html", escape = "none")] struct ResumeParams<'a> { name: &'a str, resume_url: Option<&'a str>, contacts: Vec<ContactParams>, educations: &'a [Education], experiences: &'a [Experience], projects: Vec<Project>, references: Vec<(&'a str, &'a str)>, publications: Vec<(&'a str, Option<u32>)>, skills: &'a [Skill], } async fn fetch(mut person: Person) -> anyhow::Result<Person> { use futures::stream::TryStreamExt; let github_username = person .contacts .iter() .find(|v| v.type_ == "github") .map(|v| v.value.as_str()); let mut project_map = HashMap::new(); let mut sort_order = None; let mut import_mode = ProjectImportMode::Combine; // Process project imports first for pi in person.projects.iter() { match pi { ProjectParam::Import(ProjectImport::GitHub { ignore_forks, repos: None, token, }) => project_map.extend( github::get_user_projects_from_github(*ignore_forks, token.clone()) .await? .into_iter() .map(|v| (v.name.clone(), v)), ), ProjectParam::Import(ProjectImport::GitHub { repos: Some(repos), token, .. }) => { project_map.extend( github::get_projects_info_from_github( repos, token.clone(), github_username.map(ToOwned::to_owned), ) .await? .into_iter() .map(|v| (v.name.clone(), v)), ); } ProjectParam::Sort { order_by } => { sort_order = Some(*order_by); } ProjectParam::ImportMode { import_mode: im } => { import_mode = *im; } _ => {} } } if sort_order.is_none() && import_mode == ProjectImportMode::Whitelist { sort_order = Some(ProjectSortOrder::Manual); } // Adding manually project entries for pi in person.projects.iter_mut() { match pi { ProjectParam::Raw(p) => { p.languages .sort_unstable_by_key(|v| ::std::cmp::Reverse(v.percentage)); if let Some(old) = project_map.get_mut(&p.name) { debug!("Merging project entry {}", p.name); if p.url.is_some() { old.url = p.url.clone(); } if p.description.is_some() { old.description = p.description.clone(); } if p.owner.is_some() { old.owner = p.owner.clone(); } if p.contributions.is_some() { old.contributions = p.contributions.clone(); } if !p.tags.is_empty() { old.tags = p.tags.clone(); } if p.role.is_some() { old.role = p.role; } } else { project_map.insert(p.name.clone(), p.clone()); } } _ => {} } } let mut projects: Vec<_>; if let ProjectImportMode::Whitelist = import_mode { let raw_entries: Vec<_> = person .projects .iter() .filter_map(|p| match p { ProjectParam::Raw(p) => Some(&p.name), _ => None, }) .collect(); projects = raw_entries .into_iter() .filter_map(|name| project_map.get(name).map(Clone::clone)) .collect(); } else { projects = project_map.iter().map(|(_, v)| v.clone()).collect(); } if let Some(sort_order) = sort_order { use ::std::cmp::Reverse; match sort_order { ProjectSortOrder::Stars => projects.sort_unstable_by_key(|v| Reverse(v.stars)), ProjectSortOrder::Forks => projects.sort_unstable_by_key(|v| Reverse(v.forks)), ProjectSortOrder::ForksThenStars => { projects.sort_unstable_by_key(|v| Reverse((v.forks, v.stars))) } ProjectSortOrder::StarsThenForks => { projects.sort_unstable_by_key(|v| Reverse((v.stars, v.forks))) } ProjectSortOrder::Manual if import_mode != ProjectImportMode::Whitelist => { debug!("Manual sort"); let raw_entries: HashMap<_, _> = person .projects .iter() .filter_map(|p| match p { ProjectParam::Raw(p) => Some(&p.name), _ => None, }) .enumerate() .map(|(i, v)| (v, i)) .collect(); projects.sort_unstable_by_key(|v| raw_entries.get(&v.name).map(|v| *v)); } _ => {} } } debug!("{}", serde_yaml::to_string(&projects)?); person.projects = projects.into_iter().map(|v| ProjectParam::Raw(v)).collect(); person.projects.push(ProjectParam::Sort { order_by: ProjectSortOrder::Manual, }); // Fetch citations use ::futures::FutureExt; debug!("{:?}", person.references); let fut: futures::stream::FuturesUnordered<_> = person .references .iter_mut() .map(|(_, v)| v) .chain(person.publications.iter_mut()) .map(|v| { async move { Result::<_, Error>::Ok(match v { Citation::Url(url) => url.fetch().await?, Citation::Doi(doi) => doi.fetch().await?, Citation::Bibtex(bib) => bib.fetch().await?, _ => (), }) } .boxed() }) .collect(); let () = fut.try_collect().await?; person.references = person .references .into_iter() .filter_map(|(k, v)| v.to_raw().map(|v| (k, v))) .collect(); person.publications = person .publications .into_iter() .filter_map(|v| v.to_raw_with_year()) .collect(); debug!("{:?}", person.references); Ok(person) } fn build_params<'a>( p: &'a Person, footnotes: Option<HashMap<String, usize>>, ) -> Result<ResumeParams<'a>, Error> { let mut c = Vec::new(); let it = p.references.iter().filter_map(|(k, v)| match v { Citation::Raw(s) => Some((k.as_str(), s.as_str())), _ => None, }); let mut references: Vec<_> = if let Some(footnotes) = footnotes.as_ref() { // Remove unused references it.filter(|(k, _)| footnotes.get(*k).is_some()).collect() } else { it.collect() }; // Sort references if let Some(footnotes) = footnotes { references.sort_unstable_by_key(|(k, _)| footnotes.get(*k).unwrap()); } for i in p.contacts.iter() { c.push(ContactParams { link: match i.type_.as_str() { "github" => Some(format!("https://github.com/{}", i.value)), "email" => Some(format!("mailto:{}", i.value)), "blog" => Some(i.value.clone()), _ => None, }, icon: match i.type_.as_str() { "github" => Some("icons/github.svg".into()), "email" => Some("icons/mail.svg".into()), "blog" => Some("icons/blog.svg".into()), _ => None, }, value: i.value.clone(), }); } Ok(ResumeParams { name: &p.name, resume_url: p.resume_url.as_ref().map(String::as_str), contacts: c, educations: p.educations.as_slice(), experiences: p.experiences.as_slice(), projects: p .projects .iter() .filter_map(|v| match v { ProjectParam::Raw(p) => Some(p.clone()), _ => None, }) .collect(), publications: p .publications .iter() .filter_map(|v| match v { Citation::RawWithYear { text, year } => Some((text.as_str(), *year)), _ => None, }) .collect(), references, skills: p.skills.as_slice(), }) } fn main() -> Result<(), Error> { env_logger::init(); let args = clap::Command::new("resume") .arg(clap::Arg::new("input").required(true)) .get_matches(); let input_filename = args.get_one::<String>("input").unwrap(); let cache_filename = format!("{}-cache", input_filename); let cache_info = std::fs::metadata(&cache_filename); let input_info = std::fs::metadata(&input_filename)?; let cache_data = if cache_info.is_ok() { std::fs::read(&cache_filename).ok() } else { None }; let r = if cache_data.is_some() && cache_info?.modified()? >= input_info.modified()? { serde_yaml::from_slice::<Person>(cache_data.unwrap().as_slice())? } else { let f = std::fs::read(input_filename).unwrap(); let r = serde_yaml::from_slice::<Person>(f.as_slice())?; debug!("{}", serde_yaml::to_string(&r)?); let mut runtime = tokio::runtime::Runtime::new()?; let r = runtime.block_on(fetch(r))?; if let Some(mut cache_f) = std::fs::File::create(format!("{}-cache", input_filename)).ok() { use ::std::io::Write; write!(cache_f, "{}", serde_yaml::to_string(&r)?)?; } r }; let resume = build_params(&r, None)?; resume.render()?; let footnotes = FOOTNOTES.lock().unwrap().replace(HashMap::new()).unwrap(); let resume = build_params(&r, Some(footnotes))?; println!("{}", resume.render()?); Ok(()) }
} impl<'a> Deserialize<'a> for DateRange {
random_line_split
readbuf.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License.. use crate::fmt::{self, Debug, Formatter}; use crate::io::{Result, Write}; use crate::mem::{self, MaybeUninit}; use crate::{cmp, ptr};
/// A borrowed byte buffer which is incrementally filled and initialized. /// /// This type is a sort of "double cursor". It tracks three regions in the buffer: a region at the beginning of the /// buffer that has been logically filled with data, a region that has been initialized at some point but not yet /// logically filled, and a region at the end that is fully uninitialized. The filled region is guaranteed to be a /// subset of the initialized region. /// /// In summary, the contents of the buffer can be visualized as: /// ```not_rust /// [ capacity ] /// [ filled | unfilled ] /// [ initialized | uninitialized ] /// ``` /// /// A `BorrowedBuf` is created around some existing data (or capacity for data) via a unique reference /// (`&mut`). The `BorrowedBuf` can be configured (e.g., using `clear` or `set_init`), but cannot be /// directly written. To write into the buffer, use `unfilled` to create a `BorrowedCursor`. The cursor /// has write-only access to the unfilled portion of the buffer (you can think of it as a /// write-only iterator). /// /// The lifetime `'data` is a bound on the lifetime of the underlying data. pub struct BorrowedBuf<'data> { /// The buffer's underlying data. buf: &'data mut [MaybeUninit<u8>], /// The length of `self.buf` which is known to be filled. filled: usize, /// The length of `self.buf` which is known to be initialized. init: usize, } impl Debug for BorrowedBuf<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("BorrowedBuf") .field("init", &self.init) .field("filled", &self.filled) .field("capacity", &self.capacity()) .finish() } } /// Create a new `BorrowedBuf` from a fully initialized slice. impl<'data> From<&'data mut [u8]> for BorrowedBuf<'data> { #[inline] fn from(slice: &'data mut [u8]) -> BorrowedBuf<'data> { let len = slice.len(); BorrowedBuf { // SAFETY: initialized data never becoming uninitialized is an invariant of BorrowedBuf buf: unsafe { (slice as *mut [u8]).as_uninit_slice_mut().unwrap() }, filled: 0, init: len, } } } /// Create a new `BorrowedBuf` from an uninitialized buffer. /// /// Use `set_init` if part of the buffer is known to be already initialized. impl<'data> From<&'data mut [MaybeUninit<u8>]> for BorrowedBuf<'data> { #[inline] fn from(buf: &'data mut [MaybeUninit<u8>]) -> BorrowedBuf<'data> { BorrowedBuf { buf, filled: 0, init: 0 } } } impl<'data> BorrowedBuf<'data> { /// Returns the total capacity of the buffer. #[inline] pub fn capacity(&self) -> usize { self.buf.len() } /// Returns the length of the filled part of the buffer. #[inline] pub fn len(&self) -> usize { self.filled } /// Returns the length of the initialized part of the buffer. #[inline] pub fn init_len(&self) -> usize { self.init } /// Returns a shared reference to the filled portion of the buffer. #[inline] pub fn filled(&self) -> &[u8] { // SAFETY: We only slice the filled part of the buffer, which is always valid unsafe { MaybeUninit::slice_assume_init_ref(&self.buf[0..self.filled]) } } /// Returns a cursor over the unfilled part of the buffer. #[inline] pub fn unfilled<'this>(&'this mut self) -> BorrowedCursor<'this> { BorrowedCursor { start: self.filled, // SAFETY: we never assign into `BorrowedCursor::buf`, so treating its // lifetime covariantly is safe. buf: unsafe { mem::transmute::<&'this mut BorrowedBuf<'data>, &'this mut BorrowedBuf<'this>>(self) }, } } /// Clears the buffer, resetting the filled region to empty. /// /// The number of initialized bytes is not changed, and the contents of the buffer are not modified. #[inline] pub fn clear(&mut self) -> &mut Self { self.filled = 0; self } /// Asserts that the first `n` bytes of the buffer are initialized. /// /// `BorrowedBuf` assumes that bytes are never de-initialized, so this method does nothing when called with fewer /// bytes than are already known to be initialized. /// /// # Safety /// /// The caller must ensure that the first `n` unfilled bytes of the buffer have already been initialized. #[inline] pub unsafe fn set_init(&mut self, n: usize) -> &mut Self { self.init = cmp::max(self.init, n); self } } /// A writeable view of the unfilled portion of a [`BorrowedBuf`](BorrowedBuf). /// /// Provides access to the initialized and uninitialized parts of the underlying `BorrowedBuf`. /// Data can be written directly to the cursor by using [`append`](BorrowedCursor::append) or /// indirectly by getting a slice of part or all of the cursor and writing into the slice. In the /// indirect case, the caller must call [`advance`](BorrowedCursor::advance) after writing to inform /// the cursor how many bytes have been written. /// /// Once data is written to the cursor, it becomes part of the filled portion of the underlying /// `BorrowedBuf` and can no longer be accessed or re-written by the cursor. I.e., the cursor tracks /// the unfilled part of the underlying `BorrowedBuf`. /// /// The lifetime `'a` is a bound on the lifetime of the underlying buffer (which means it is a bound /// on the data in that buffer by transitivity). #[derive(Debug)] pub struct BorrowedCursor<'a> { /// The underlying buffer. // Safety invariant: we treat the type of buf as covariant in the lifetime of `BorrowedBuf` when // we create a `BorrowedCursor`. This is only safe if we never replace `buf` by assigning into // it, so don't do that! buf: &'a mut BorrowedBuf<'a>, /// The length of the filled portion of the underlying buffer at the time of the cursor's /// creation. start: usize, } impl<'a> BorrowedCursor<'a> { /// Reborrow this cursor by cloning it with a smaller lifetime. /// /// Since a cursor maintains unique access to its underlying buffer, the borrowed cursor is /// not accessible while the new cursor exists. #[inline] pub fn reborrow<'this>(&'this mut self) -> BorrowedCursor<'this> { BorrowedCursor { // SAFETY: we never assign into `BorrowedCursor::buf`, so treating its // lifetime covariantly is safe. buf: unsafe { mem::transmute::<&'this mut BorrowedBuf<'a>, &'this mut BorrowedBuf<'this>>( self.buf, ) }, start: self.start, } } /// Returns the available space in the cursor. #[inline] pub fn capacity(&self) -> usize { self.buf.capacity() - self.buf.filled } /// Returns the number of bytes written to this cursor since it was created from a `BorrowedBuf`. /// /// Note that if this cursor is a reborrowed clone of another, then the count returned is the /// count written via either cursor, not the count since the cursor was reborrowed. #[inline] pub fn written(&self) -> usize { self.buf.filled - self.start } /// Returns a shared reference to the initialized portion of the cursor. #[inline] pub fn init_ref(&self) -> &[u8] { // SAFETY: We only slice the initialized part of the buffer, which is always valid unsafe { MaybeUninit::slice_assume_init_ref(&self.buf.buf[self.buf.filled..self.buf.init]) } } /// Returns a mutable reference to the initialized portion of the cursor. #[inline] pub fn init_mut(&mut self) -> &mut [u8] { // SAFETY: We only slice the initialized part of the buffer, which is always valid unsafe { MaybeUninit::slice_assume_init_mut(&mut self.buf.buf[self.buf.filled..self.buf.init]) } } /// Returns a mutable reference to the uninitialized part of the cursor. /// /// It is safe to uninitialize any of these bytes. #[inline] pub fn uninit_mut(&mut self) -> &mut [MaybeUninit<u8>] { &mut self.buf.buf[self.buf.init..] } /// Returns a mutable reference to the whole cursor. /// /// # Safety /// /// The caller must not uninitialize any bytes in the initialized portion of the cursor. #[inline] pub unsafe fn as_mut(&mut self) -> &mut [MaybeUninit<u8>] { &mut self.buf.buf[self.buf.filled..] } /// Advance the cursor by asserting that `n` bytes have been filled. /// /// After advancing, the `n` bytes are no longer accessible via the cursor and can only be /// accessed via the underlying buffer. I.e., the buffer's filled portion grows by `n` elements /// and its unfilled portion (and the capacity of this cursor) shrinks by `n` elements. /// /// # Safety /// /// The caller must ensure that the first `n` bytes of the cursor have been properly /// initialised. #[inline] pub unsafe fn advance(&mut self, n: usize) -> &mut Self { self.buf.filled += n; self.buf.init = cmp::max(self.buf.init, self.buf.filled); self } /// Initializes all bytes in the cursor. #[inline] pub fn ensure_init(&mut self) -> &mut Self { let uninit = self.uninit_mut(); // SAFETY: 0 is a valid value for MaybeUninit<u8> and the length matches the allocation // since it is comes from a slice reference. unsafe { ptr::write_bytes(uninit.as_mut_ptr(), 0, uninit.len()); } self.buf.init = self.buf.capacity(); self } /// Asserts that the first `n` unfilled bytes of the cursor are initialized. /// /// `BorrowedBuf` assumes that bytes are never de-initialized, so this method does nothing when /// called with fewer bytes than are already known to be initialized. /// /// # Safety /// /// The caller must ensure that the first `n` bytes of the buffer have already been initialized. #[inline] pub unsafe fn set_init(&mut self, n: usize) -> &mut Self { self.buf.init = cmp::max(self.buf.init, self.buf.filled + n); self } /// Appends data to the cursor, advancing position within its buffer. /// /// # Panics /// /// Panics if `self.capacity()` is less than `buf.len()`. #[inline] pub fn append(&mut self, buf: &[u8]) { assert!(self.capacity() >= buf.len()); // SAFETY: we do not de-initialize any of the elements of the slice unsafe { MaybeUninit::write_slice(&mut self.as_mut()[..buf.len()], buf); } // SAFETY: We just added the entire contents of buf to the filled section. unsafe { self.set_init(buf.len()); } self.buf.filled += buf.len(); } } impl<'a> Write for BorrowedCursor<'a> { fn write(&mut self, buf: &[u8]) -> Result<usize> { self.append(buf); Ok(buf.len()) } fn flush(&mut self) -> Result<()> { Ok(()) } }
random_line_split
readbuf.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License.. use crate::fmt::{self, Debug, Formatter}; use crate::io::{Result, Write}; use crate::mem::{self, MaybeUninit}; use crate::{cmp, ptr}; /// A borrowed byte buffer which is incrementally filled and initialized. /// /// This type is a sort of "double cursor". It tracks three regions in the buffer: a region at the beginning of the /// buffer that has been logically filled with data, a region that has been initialized at some point but not yet /// logically filled, and a region at the end that is fully uninitialized. The filled region is guaranteed to be a /// subset of the initialized region. /// /// In summary, the contents of the buffer can be visualized as: /// ```not_rust /// [ capacity ] /// [ filled | unfilled ] /// [ initialized | uninitialized ] /// ``` /// /// A `BorrowedBuf` is created around some existing data (or capacity for data) via a unique reference /// (`&mut`). The `BorrowedBuf` can be configured (e.g., using `clear` or `set_init`), but cannot be /// directly written. To write into the buffer, use `unfilled` to create a `BorrowedCursor`. The cursor /// has write-only access to the unfilled portion of the buffer (you can think of it as a /// write-only iterator). /// /// The lifetime `'data` is a bound on the lifetime of the underlying data. pub struct BorrowedBuf<'data> { /// The buffer's underlying data. buf: &'data mut [MaybeUninit<u8>], /// The length of `self.buf` which is known to be filled. filled: usize, /// The length of `self.buf` which is known to be initialized. init: usize, } impl Debug for BorrowedBuf<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("BorrowedBuf") .field("init", &self.init) .field("filled", &self.filled) .field("capacity", &self.capacity()) .finish() } } /// Create a new `BorrowedBuf` from a fully initialized slice. impl<'data> From<&'data mut [u8]> for BorrowedBuf<'data> { #[inline] fn from(slice: &'data mut [u8]) -> BorrowedBuf<'data> { let len = slice.len(); BorrowedBuf { // SAFETY: initialized data never becoming uninitialized is an invariant of BorrowedBuf buf: unsafe { (slice as *mut [u8]).as_uninit_slice_mut().unwrap() }, filled: 0, init: len, } } } /// Create a new `BorrowedBuf` from an uninitialized buffer. /// /// Use `set_init` if part of the buffer is known to be already initialized. impl<'data> From<&'data mut [MaybeUninit<u8>]> for BorrowedBuf<'data> { #[inline] fn from(buf: &'data mut [MaybeUninit<u8>]) -> BorrowedBuf<'data> { BorrowedBuf { buf, filled: 0, init: 0 } } } impl<'data> BorrowedBuf<'data> { /// Returns the total capacity of the buffer. #[inline] pub fn capacity(&self) -> usize { self.buf.len() } /// Returns the length of the filled part of the buffer. #[inline] pub fn len(&self) -> usize { self.filled } /// Returns the length of the initialized part of the buffer. #[inline] pub fn init_len(&self) -> usize { self.init } /// Returns a shared reference to the filled portion of the buffer. #[inline] pub fn filled(&self) -> &[u8] { // SAFETY: We only slice the filled part of the buffer, which is always valid unsafe { MaybeUninit::slice_assume_init_ref(&self.buf[0..self.filled]) } } /// Returns a cursor over the unfilled part of the buffer. #[inline] pub fn unfilled<'this>(&'this mut self) -> BorrowedCursor<'this> { BorrowedCursor { start: self.filled, // SAFETY: we never assign into `BorrowedCursor::buf`, so treating its // lifetime covariantly is safe. buf: unsafe { mem::transmute::<&'this mut BorrowedBuf<'data>, &'this mut BorrowedBuf<'this>>(self) }, } } /// Clears the buffer, resetting the filled region to empty. /// /// The number of initialized bytes is not changed, and the contents of the buffer are not modified. #[inline] pub fn clear(&mut self) -> &mut Self { self.filled = 0; self } /// Asserts that the first `n` bytes of the buffer are initialized. /// /// `BorrowedBuf` assumes that bytes are never de-initialized, so this method does nothing when called with fewer /// bytes than are already known to be initialized. /// /// # Safety /// /// The caller must ensure that the first `n` unfilled bytes of the buffer have already been initialized. #[inline] pub unsafe fn set_init(&mut self, n: usize) -> &mut Self { self.init = cmp::max(self.init, n); self } } /// A writeable view of the unfilled portion of a [`BorrowedBuf`](BorrowedBuf). /// /// Provides access to the initialized and uninitialized parts of the underlying `BorrowedBuf`. /// Data can be written directly to the cursor by using [`append`](BorrowedCursor::append) or /// indirectly by getting a slice of part or all of the cursor and writing into the slice. In the /// indirect case, the caller must call [`advance`](BorrowedCursor::advance) after writing to inform /// the cursor how many bytes have been written. /// /// Once data is written to the cursor, it becomes part of the filled portion of the underlying /// `BorrowedBuf` and can no longer be accessed or re-written by the cursor. I.e., the cursor tracks /// the unfilled part of the underlying `BorrowedBuf`. /// /// The lifetime `'a` is a bound on the lifetime of the underlying buffer (which means it is a bound /// on the data in that buffer by transitivity). #[derive(Debug)] pub struct BorrowedCursor<'a> { /// The underlying buffer. // Safety invariant: we treat the type of buf as covariant in the lifetime of `BorrowedBuf` when // we create a `BorrowedCursor`. This is only safe if we never replace `buf` by assigning into // it, so don't do that! buf: &'a mut BorrowedBuf<'a>, /// The length of the filled portion of the underlying buffer at the time of the cursor's /// creation. start: usize, } impl<'a> BorrowedCursor<'a> { /// Reborrow this cursor by cloning it with a smaller lifetime. /// /// Since a cursor maintains unique access to its underlying buffer, the borrowed cursor is /// not accessible while the new cursor exists. #[inline] pub fn reborrow<'this>(&'this mut self) -> BorrowedCursor<'this> { BorrowedCursor { // SAFETY: we never assign into `BorrowedCursor::buf`, so treating its // lifetime covariantly is safe. buf: unsafe { mem::transmute::<&'this mut BorrowedBuf<'a>, &'this mut BorrowedBuf<'this>>( self.buf, ) }, start: self.start, } } /// Returns the available space in the cursor. #[inline] pub fn capacity(&self) -> usize { self.buf.capacity() - self.buf.filled } /// Returns the number of bytes written to this cursor since it was created from a `BorrowedBuf`. /// /// Note that if this cursor is a reborrowed clone of another, then the count returned is the /// count written via either cursor, not the count since the cursor was reborrowed. #[inline] pub fn written(&self) -> usize { self.buf.filled - self.start } /// Returns a shared reference to the initialized portion of the cursor. #[inline] pub fn init_ref(&self) -> &[u8]
/// Returns a mutable reference to the initialized portion of the cursor. #[inline] pub fn init_mut(&mut self) -> &mut [u8] { // SAFETY: We only slice the initialized part of the buffer, which is always valid unsafe { MaybeUninit::slice_assume_init_mut(&mut self.buf.buf[self.buf.filled..self.buf.init]) } } /// Returns a mutable reference to the uninitialized part of the cursor. /// /// It is safe to uninitialize any of these bytes. #[inline] pub fn uninit_mut(&mut self) -> &mut [MaybeUninit<u8>] { &mut self.buf.buf[self.buf.init..] } /// Returns a mutable reference to the whole cursor. /// /// # Safety /// /// The caller must not uninitialize any bytes in the initialized portion of the cursor. #[inline] pub unsafe fn as_mut(&mut self) -> &mut [MaybeUninit<u8>] { &mut self.buf.buf[self.buf.filled..] } /// Advance the cursor by asserting that `n` bytes have been filled. /// /// After advancing, the `n` bytes are no longer accessible via the cursor and can only be /// accessed via the underlying buffer. I.e., the buffer's filled portion grows by `n` elements /// and its unfilled portion (and the capacity of this cursor) shrinks by `n` elements. /// /// # Safety /// /// The caller must ensure that the first `n` bytes of the cursor have been properly /// initialised. #[inline] pub unsafe fn advance(&mut self, n: usize) -> &mut Self { self.buf.filled += n; self.buf.init = cmp::max(self.buf.init, self.buf.filled); self } /// Initializes all bytes in the cursor. #[inline] pub fn ensure_init(&mut self) -> &mut Self { let uninit = self.uninit_mut(); // SAFETY: 0 is a valid value for MaybeUninit<u8> and the length matches the allocation // since it is comes from a slice reference. unsafe { ptr::write_bytes(uninit.as_mut_ptr(), 0, uninit.len()); } self.buf.init = self.buf.capacity(); self } /// Asserts that the first `n` unfilled bytes of the cursor are initialized. /// /// `BorrowedBuf` assumes that bytes are never de-initialized, so this method does nothing when /// called with fewer bytes than are already known to be initialized. /// /// # Safety /// /// The caller must ensure that the first `n` bytes of the buffer have already been initialized. #[inline] pub unsafe fn set_init(&mut self, n: usize) -> &mut Self { self.buf.init = cmp::max(self.buf.init, self.buf.filled + n); self } /// Appends data to the cursor, advancing position within its buffer. /// /// # Panics /// /// Panics if `self.capacity()` is less than `buf.len()`. #[inline] pub fn append(&mut self, buf: &[u8]) { assert!(self.capacity() >= buf.len()); // SAFETY: we do not de-initialize any of the elements of the slice unsafe { MaybeUninit::write_slice(&mut self.as_mut()[..buf.len()], buf); } // SAFETY: We just added the entire contents of buf to the filled section. unsafe { self.set_init(buf.len()); } self.buf.filled += buf.len(); } } impl<'a> Write for BorrowedCursor<'a> { fn write(&mut self, buf: &[u8]) -> Result<usize> { self.append(buf); Ok(buf.len()) } fn flush(&mut self) -> Result<()> { Ok(()) } }
{ // SAFETY: We only slice the initialized part of the buffer, which is always valid unsafe { MaybeUninit::slice_assume_init_ref(&self.buf.buf[self.buf.filled..self.buf.init]) } }
identifier_body
readbuf.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License.. use crate::fmt::{self, Debug, Formatter}; use crate::io::{Result, Write}; use crate::mem::{self, MaybeUninit}; use crate::{cmp, ptr}; /// A borrowed byte buffer which is incrementally filled and initialized. /// /// This type is a sort of "double cursor". It tracks three regions in the buffer: a region at the beginning of the /// buffer that has been logically filled with data, a region that has been initialized at some point but not yet /// logically filled, and a region at the end that is fully uninitialized. The filled region is guaranteed to be a /// subset of the initialized region. /// /// In summary, the contents of the buffer can be visualized as: /// ```not_rust /// [ capacity ] /// [ filled | unfilled ] /// [ initialized | uninitialized ] /// ``` /// /// A `BorrowedBuf` is created around some existing data (or capacity for data) via a unique reference /// (`&mut`). The `BorrowedBuf` can be configured (e.g., using `clear` or `set_init`), but cannot be /// directly written. To write into the buffer, use `unfilled` to create a `BorrowedCursor`. The cursor /// has write-only access to the unfilled portion of the buffer (you can think of it as a /// write-only iterator). /// /// The lifetime `'data` is a bound on the lifetime of the underlying data. pub struct BorrowedBuf<'data> { /// The buffer's underlying data. buf: &'data mut [MaybeUninit<u8>], /// The length of `self.buf` which is known to be filled. filled: usize, /// The length of `self.buf` which is known to be initialized. init: usize, } impl Debug for BorrowedBuf<'_> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("BorrowedBuf") .field("init", &self.init) .field("filled", &self.filled) .field("capacity", &self.capacity()) .finish() } } /// Create a new `BorrowedBuf` from a fully initialized slice. impl<'data> From<&'data mut [u8]> for BorrowedBuf<'data> { #[inline] fn from(slice: &'data mut [u8]) -> BorrowedBuf<'data> { let len = slice.len(); BorrowedBuf { // SAFETY: initialized data never becoming uninitialized is an invariant of BorrowedBuf buf: unsafe { (slice as *mut [u8]).as_uninit_slice_mut().unwrap() }, filled: 0, init: len, } } } /// Create a new `BorrowedBuf` from an uninitialized buffer. /// /// Use `set_init` if part of the buffer is known to be already initialized. impl<'data> From<&'data mut [MaybeUninit<u8>]> for BorrowedBuf<'data> { #[inline] fn from(buf: &'data mut [MaybeUninit<u8>]) -> BorrowedBuf<'data> { BorrowedBuf { buf, filled: 0, init: 0 } } } impl<'data> BorrowedBuf<'data> { /// Returns the total capacity of the buffer. #[inline] pub fn capacity(&self) -> usize { self.buf.len() } /// Returns the length of the filled part of the buffer. #[inline] pub fn len(&self) -> usize { self.filled } /// Returns the length of the initialized part of the buffer. #[inline] pub fn init_len(&self) -> usize { self.init } /// Returns a shared reference to the filled portion of the buffer. #[inline] pub fn filled(&self) -> &[u8] { // SAFETY: We only slice the filled part of the buffer, which is always valid unsafe { MaybeUninit::slice_assume_init_ref(&self.buf[0..self.filled]) } } /// Returns a cursor over the unfilled part of the buffer. #[inline] pub fn unfilled<'this>(&'this mut self) -> BorrowedCursor<'this> { BorrowedCursor { start: self.filled, // SAFETY: we never assign into `BorrowedCursor::buf`, so treating its // lifetime covariantly is safe. buf: unsafe { mem::transmute::<&'this mut BorrowedBuf<'data>, &'this mut BorrowedBuf<'this>>(self) }, } } /// Clears the buffer, resetting the filled region to empty. /// /// The number of initialized bytes is not changed, and the contents of the buffer are not modified. #[inline] pub fn clear(&mut self) -> &mut Self { self.filled = 0; self } /// Asserts that the first `n` bytes of the buffer are initialized. /// /// `BorrowedBuf` assumes that bytes are never de-initialized, so this method does nothing when called with fewer /// bytes than are already known to be initialized. /// /// # Safety /// /// The caller must ensure that the first `n` unfilled bytes of the buffer have already been initialized. #[inline] pub unsafe fn set_init(&mut self, n: usize) -> &mut Self { self.init = cmp::max(self.init, n); self } } /// A writeable view of the unfilled portion of a [`BorrowedBuf`](BorrowedBuf). /// /// Provides access to the initialized and uninitialized parts of the underlying `BorrowedBuf`. /// Data can be written directly to the cursor by using [`append`](BorrowedCursor::append) or /// indirectly by getting a slice of part or all of the cursor and writing into the slice. In the /// indirect case, the caller must call [`advance`](BorrowedCursor::advance) after writing to inform /// the cursor how many bytes have been written. /// /// Once data is written to the cursor, it becomes part of the filled portion of the underlying /// `BorrowedBuf` and can no longer be accessed or re-written by the cursor. I.e., the cursor tracks /// the unfilled part of the underlying `BorrowedBuf`. /// /// The lifetime `'a` is a bound on the lifetime of the underlying buffer (which means it is a bound /// on the data in that buffer by transitivity). #[derive(Debug)] pub struct BorrowedCursor<'a> { /// The underlying buffer. // Safety invariant: we treat the type of buf as covariant in the lifetime of `BorrowedBuf` when // we create a `BorrowedCursor`. This is only safe if we never replace `buf` by assigning into // it, so don't do that! buf: &'a mut BorrowedBuf<'a>, /// The length of the filled portion of the underlying buffer at the time of the cursor's /// creation. start: usize, } impl<'a> BorrowedCursor<'a> { /// Reborrow this cursor by cloning it with a smaller lifetime. /// /// Since a cursor maintains unique access to its underlying buffer, the borrowed cursor is /// not accessible while the new cursor exists. #[inline] pub fn reborrow<'this>(&'this mut self) -> BorrowedCursor<'this> { BorrowedCursor { // SAFETY: we never assign into `BorrowedCursor::buf`, so treating its // lifetime covariantly is safe. buf: unsafe { mem::transmute::<&'this mut BorrowedBuf<'a>, &'this mut BorrowedBuf<'this>>( self.buf, ) }, start: self.start, } } /// Returns the available space in the cursor. #[inline] pub fn capacity(&self) -> usize { self.buf.capacity() - self.buf.filled } /// Returns the number of bytes written to this cursor since it was created from a `BorrowedBuf`. /// /// Note that if this cursor is a reborrowed clone of another, then the count returned is the /// count written via either cursor, not the count since the cursor was reborrowed. #[inline] pub fn written(&self) -> usize { self.buf.filled - self.start } /// Returns a shared reference to the initialized portion of the cursor. #[inline] pub fn init_ref(&self) -> &[u8] { // SAFETY: We only slice the initialized part of the buffer, which is always valid unsafe { MaybeUninit::slice_assume_init_ref(&self.buf.buf[self.buf.filled..self.buf.init]) } } /// Returns a mutable reference to the initialized portion of the cursor. #[inline] pub fn init_mut(&mut self) -> &mut [u8] { // SAFETY: We only slice the initialized part of the buffer, which is always valid unsafe { MaybeUninit::slice_assume_init_mut(&mut self.buf.buf[self.buf.filled..self.buf.init]) } } /// Returns a mutable reference to the uninitialized part of the cursor. /// /// It is safe to uninitialize any of these bytes. #[inline] pub fn uninit_mut(&mut self) -> &mut [MaybeUninit<u8>] { &mut self.buf.buf[self.buf.init..] } /// Returns a mutable reference to the whole cursor. /// /// # Safety /// /// The caller must not uninitialize any bytes in the initialized portion of the cursor. #[inline] pub unsafe fn as_mut(&mut self) -> &mut [MaybeUninit<u8>] { &mut self.buf.buf[self.buf.filled..] } /// Advance the cursor by asserting that `n` bytes have been filled. /// /// After advancing, the `n` bytes are no longer accessible via the cursor and can only be /// accessed via the underlying buffer. I.e., the buffer's filled portion grows by `n` elements /// and its unfilled portion (and the capacity of this cursor) shrinks by `n` elements. /// /// # Safety /// /// The caller must ensure that the first `n` bytes of the cursor have been properly /// initialised. #[inline] pub unsafe fn advance(&mut self, n: usize) -> &mut Self { self.buf.filled += n; self.buf.init = cmp::max(self.buf.init, self.buf.filled); self } /// Initializes all bytes in the cursor. #[inline] pub fn ensure_init(&mut self) -> &mut Self { let uninit = self.uninit_mut(); // SAFETY: 0 is a valid value for MaybeUninit<u8> and the length matches the allocation // since it is comes from a slice reference. unsafe { ptr::write_bytes(uninit.as_mut_ptr(), 0, uninit.len()); } self.buf.init = self.buf.capacity(); self } /// Asserts that the first `n` unfilled bytes of the cursor are initialized. /// /// `BorrowedBuf` assumes that bytes are never de-initialized, so this method does nothing when /// called with fewer bytes than are already known to be initialized. /// /// # Safety /// /// The caller must ensure that the first `n` bytes of the buffer have already been initialized. #[inline] pub unsafe fn set_init(&mut self, n: usize) -> &mut Self { self.buf.init = cmp::max(self.buf.init, self.buf.filled + n); self } /// Appends data to the cursor, advancing position within its buffer. /// /// # Panics /// /// Panics if `self.capacity()` is less than `buf.len()`. #[inline] pub fn append(&mut self, buf: &[u8]) { assert!(self.capacity() >= buf.len()); // SAFETY: we do not de-initialize any of the elements of the slice unsafe { MaybeUninit::write_slice(&mut self.as_mut()[..buf.len()], buf); } // SAFETY: We just added the entire contents of buf to the filled section. unsafe { self.set_init(buf.len()); } self.buf.filled += buf.len(); } } impl<'a> Write for BorrowedCursor<'a> { fn
(&mut self, buf: &[u8]) -> Result<usize> { self.append(buf); Ok(buf.len()) } fn flush(&mut self) -> Result<()> { Ok(()) } }
write
identifier_name
replicated_session.go
// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package client import ( "context" "fmt" "time" "github.com/uber-go/tally" "go.uber.org/zap" "github.com/m3db/m3/src/dbnode/encoding" "github.com/m3db/m3/src/dbnode/generated/thrift/rpc" "github.com/m3db/m3/src/dbnode/namespace" "github.com/m3db/m3/src/dbnode/storage/block" "github.com/m3db/m3/src/dbnode/storage/bootstrap/result" "github.com/m3db/m3/src/dbnode/storage/index" "github.com/m3db/m3/src/dbnode/topology" "github.com/m3db/m3/src/x/ident" m3sync "github.com/m3db/m3/src/x/sync" xtime "github.com/m3db/m3/src/x/time" ) type newSessionFn func(Options) (clientSession, error) // replicatedSession is an implementation of clientSession which replicates // session read/writes to a set of clusters asynchronously. type replicatedSession struct { session clientSession asyncSessions []clientSession newSessionFn newSessionFn identifierPool ident.Pool workerPool m3sync.PooledWorkerPool replicationSemaphore chan struct{} scope tally.Scope log *zap.Logger metrics replicatedSessionMetrics outCh chan error writeTimestampOffset time.Duration } type replicatedSessionMetrics struct { replicateExecuted tally.Counter replicateNotExecuted tally.Counter replicateError tally.Counter replicateSuccess tally.Counter } func newReplicatedSessionMetrics(scope tally.Scope) replicatedSessionMetrics { return replicatedSessionMetrics{ replicateExecuted: scope.Counter("replicate.executed"), replicateNotExecuted: scope.Counter("replicate.not-executed"), replicateError: scope.Counter("replicate.error"), replicateSuccess: scope.Counter("replicate.success"), } } // Ensure replicatedSession implements the clientSession interface. var _ clientSession = (*replicatedSession)(nil) type replicatedSessionOption func(*replicatedSession) func withNewSessionFn(fn newSessionFn) replicatedSessionOption { return func(session *replicatedSession) { session.newSessionFn = fn } } func newReplicatedSession( opts Options, asyncOpts []Options, options ...replicatedSessionOption, ) (clientSession, error) { workerPool := opts.AsyncWriteWorkerPool() scope := opts.InstrumentOptions().MetricsScope() session := replicatedSession{ newSessionFn: newSession, identifierPool: opts.IdentifierPool(), workerPool: workerPool, replicationSemaphore: make(chan struct{}, opts.AsyncWriteMaxConcurrency()), scope: scope, log: opts.InstrumentOptions().Logger(), metrics: newReplicatedSessionMetrics(scope), writeTimestampOffset: opts.WriteTimestampOffset(), } // Apply options for _, option := range options { option(&session) } if err := session.setSession(opts); err != nil { return nil, err } if err := session.setAsyncSessions(asyncOpts); err != nil { return nil, err } return &session, nil } func (s *replicatedSession) setSession(opts Options) error { if opts.TopologyInitializer() == nil { return nil } session, err := s.newSessionFn(opts) if err != nil { return err } s.session = session return nil } func (s *replicatedSession) setAsyncSessions(opts []Options) error { sessions := make([]clientSession, 0, len(opts)) for i, oo := range opts { subscope := oo.InstrumentOptions().MetricsScope().SubScope(fmt.Sprintf("async-%d", i)) oo = oo.SetInstrumentOptions(oo.InstrumentOptions().SetMetricsScope(subscope)) session, err := s.newSessionFn(oo) if err != nil { return err } sessions = append(sessions, session) } s.asyncSessions = sessions return nil } type replicatedParams struct { namespace ident.ID id ident.ID t xtime.UnixNano value float64 unit xtime.Unit annotation []byte tags ident.TagIterator useTags bool } // NB(srobb): it would be a nicer to accept a lambda which is the fn to // be performed on all sessions, however this causes an extra allocation. func (s replicatedSession) replicate(params replicatedParams) error { for _, asyncSession := range s.asyncSessions { asyncSession := asyncSession // capture var var ( clonedID = s.identifierPool.Clone(params.id) clonedNS = s.identifierPool.Clone(params.namespace) clonedTags ident.TagIterator ) if params.useTags { clonedTags = params.tags.Duplicate() } select { case s.replicationSemaphore <- struct{}{}: s.workerPool.Go(func() { var err error if params.useTags { err = asyncSession.WriteTagged( clonedNS, clonedID, clonedTags, params.t, params.value, params.unit, params.annotation, ) } else { err = asyncSession.Write( clonedNS, clonedID, params.t, params.value, params.unit, params.annotation, ) } if err != nil { s.metrics.replicateError.Inc(1) s.log.Error("could not replicate write", zap.Error(err)) } else { s.metrics.replicateSuccess.Inc(1) } if s.outCh != nil { s.outCh <- err } <-s.replicationSemaphore }) s.metrics.replicateExecuted.Inc(1) default: s.metrics.replicateNotExecuted.Inc(1) } } if params.useTags { return s.session.WriteTagged( params.namespace, params.id, params.tags, params.t, params.value, params.unit, params.annotation, ) } return s.session.Write( params.namespace, params.id, params.t, params.value, params.unit, params.annotation, ) } func (s *replicatedSession) ReadClusterAvailability() (bool, error) { return s.session.ReadClusterAvailability() } func (s *replicatedSession) WriteClusterAvailability() (bool, error) { return s.session.WriteClusterAvailability() } // Write value to the database for an ID. func (s replicatedSession) Write( namespace, id ident.ID, t xtime.UnixNano, value float64, unit xtime.Unit, annotation []byte, ) error { return s.replicate(replicatedParams{ namespace: namespace, id: id, t: t.Add(-s.writeTimestampOffset), value: value, unit: unit, annotation: annotation, }) } // WriteTagged value to the database for an ID and given tags. func (s replicatedSession) WriteTagged( namespace, id ident.ID, tags ident.TagIterator, t xtime.UnixNano, value float64, unit xtime.Unit, annotation []byte, ) error { return s.replicate(replicatedParams{ namespace: namespace, id: id, t: t.Add(-s.writeTimestampOffset), value: value, unit: unit, annotation: annotation, tags: tags, useTags: true, }) } // Fetch values from the database for an ID. func (s replicatedSession) Fetch( namespace, id ident.ID, startInclusive, endExclusive xtime.UnixNano, ) (encoding.SeriesIterator, error) { return s.session.Fetch(namespace, id, startInclusive, endExclusive) } // FetchIDs values from the database for a set of IDs. func (s replicatedSession) FetchIDs( namespace ident.ID, ids ident.Iterator, startInclusive, endExclusive xtime.UnixNano, ) (encoding.SeriesIterators, error) { return s.session.FetchIDs(namespace, ids, startInclusive, endExclusive) } // Aggregate aggregates values from the database for the given set of constraints. func (s replicatedSession) Aggregate( ctx context.Context, ns ident.ID, q index.Query, opts index.AggregationOptions, ) (AggregatedTagsIterator, FetchResponseMetadata, error) { return s.session.Aggregate(ctx, ns, q, opts) } // FetchTagged resolves the provided query to known IDs, and fetches the data for them. func (s replicatedSession) FetchTagged( ctx context.Context, namespace ident.ID, q index.Query, opts index.QueryOptions, ) (encoding.SeriesIterators, FetchResponseMetadata, error) { return s.session.FetchTagged(ctx, namespace, q, opts) } // FetchTaggedIDs resolves the provided query to known IDs. func (s replicatedSession) FetchTaggedIDs( ctx context.Context, namespace ident.ID, q index.Query, opts index.QueryOptions, ) (TaggedIDsIterator, FetchResponseMetadata, error) { return s.session.FetchTaggedIDs(ctx, namespace, q, opts) } // ShardID returns the given shard for an ID for callers // to easily discern what shard is failing when operations // for given IDs begin failing. func (s replicatedSession) ShardID(id ident.ID) (uint32, error) { return s.session.ShardID(id) } // IteratorPools exposes the internal iterator pools used by the session to clients. func (s replicatedSession) IteratorPools() (encoding.IteratorPools, error) { return s.session.IteratorPools() } // Close the session. func (s replicatedSession) Close() error { err := s.session.Close() for _, as := range s.asyncSessions { if err := as.Close(); err != nil
} return err } // Origin returns the host that initiated the session. func (s replicatedSession) Origin() topology.Host { return s.session.Origin() } // Replicas returns the replication factor. func (s replicatedSession) Replicas() int { return s.session.Replicas() } // TopologyMap returns the current topology map. Note that the session // has a separate topology watch than the database itself, so the two // values can be out of sync and this method should not be relied upon // if the current view of the topology as seen by the database is required. func (s replicatedSession) TopologyMap() (topology.Map, error) { return s.session.TopologyMap() } // Truncate will truncate the namespace for a given shard. func (s replicatedSession) Truncate(namespace ident.ID) (int64, error) { return s.session.Truncate(namespace) } // FetchBootstrapBlocksFromPeers will fetch the most fulfilled block // for each series using the runtime configurable bootstrap level consistency. func (s replicatedSession) FetchBootstrapBlocksFromPeers( namespace namespace.Metadata, shard uint32, start, end xtime.UnixNano, opts result.Options, ) (result.ShardResult, error) { return s.session.FetchBootstrapBlocksFromPeers(namespace, shard, start, end, opts) } // FetchBootstrapBlocksMetadataFromPeers will fetch the blocks metadata from // available peers using the runtime configurable bootstrap level consistency. func (s replicatedSession) FetchBootstrapBlocksMetadataFromPeers( namespace ident.ID, shard uint32, start, end xtime.UnixNano, result result.Options, ) (PeerBlockMetadataIter, error) { return s.session.FetchBootstrapBlocksMetadataFromPeers(namespace, shard, start, end, result) } // FetchBlocksMetadataFromPeers will fetch the blocks metadata from // available peers. func (s replicatedSession) FetchBlocksMetadataFromPeers( namespace ident.ID, shard uint32, start, end xtime.UnixNano, consistencyLevel topology.ReadConsistencyLevel, result result.Options, ) (PeerBlockMetadataIter, error) { return s.session.FetchBlocksMetadataFromPeers(namespace, shard, start, end, consistencyLevel, result) } // FetchBlocksFromPeers will fetch the required blocks from the // peers specified. func (s replicatedSession) FetchBlocksFromPeers( namespace namespace.Metadata, shard uint32, consistencyLevel topology.ReadConsistencyLevel, metadatas []block.ReplicaMetadata, opts result.Options, ) (PeerBlocksIter, error) { return s.session.FetchBlocksFromPeers(namespace, shard, consistencyLevel, metadatas, opts) } func (s *replicatedSession) BorrowConnections( shardID uint32, fn WithBorrowConnectionFn, opts BorrowConnectionOptions, ) (BorrowConnectionsResult, error) { return s.session.BorrowConnections(shardID, fn, opts) } func (s *replicatedSession) DedicatedConnection( shardID uint32, opts DedicatedConnectionOptions, ) (rpc.TChanNode, Channel, error) { return s.session.DedicatedConnection(shardID, opts) } // Open the client session. func (s replicatedSession) Open() error { if err := s.session.Open(); err != nil { return err } for _, asyncSession := range s.asyncSessions { if err := asyncSession.Open(); err != nil { s.log.Error("could not open session to async cluster: %v", zap.Error(err)) } } return nil }
{ s.log.Error("could not close async session: %v", zap.Error(err)) }
conditional_block
replicated_session.go
// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package client import ( "context" "fmt" "time" "github.com/uber-go/tally" "go.uber.org/zap" "github.com/m3db/m3/src/dbnode/encoding" "github.com/m3db/m3/src/dbnode/generated/thrift/rpc" "github.com/m3db/m3/src/dbnode/namespace" "github.com/m3db/m3/src/dbnode/storage/block" "github.com/m3db/m3/src/dbnode/storage/bootstrap/result" "github.com/m3db/m3/src/dbnode/storage/index" "github.com/m3db/m3/src/dbnode/topology" "github.com/m3db/m3/src/x/ident" m3sync "github.com/m3db/m3/src/x/sync" xtime "github.com/m3db/m3/src/x/time" ) type newSessionFn func(Options) (clientSession, error) // replicatedSession is an implementation of clientSession which replicates // session read/writes to a set of clusters asynchronously. type replicatedSession struct { session clientSession asyncSessions []clientSession newSessionFn newSessionFn identifierPool ident.Pool workerPool m3sync.PooledWorkerPool replicationSemaphore chan struct{} scope tally.Scope log *zap.Logger metrics replicatedSessionMetrics outCh chan error writeTimestampOffset time.Duration } type replicatedSessionMetrics struct { replicateExecuted tally.Counter replicateNotExecuted tally.Counter replicateError tally.Counter replicateSuccess tally.Counter } func newReplicatedSessionMetrics(scope tally.Scope) replicatedSessionMetrics { return replicatedSessionMetrics{ replicateExecuted: scope.Counter("replicate.executed"), replicateNotExecuted: scope.Counter("replicate.not-executed"), replicateError: scope.Counter("replicate.error"), replicateSuccess: scope.Counter("replicate.success"), } } // Ensure replicatedSession implements the clientSession interface. var _ clientSession = (*replicatedSession)(nil) type replicatedSessionOption func(*replicatedSession) func withNewSessionFn(fn newSessionFn) replicatedSessionOption { return func(session *replicatedSession) { session.newSessionFn = fn } } func newReplicatedSession( opts Options, asyncOpts []Options, options ...replicatedSessionOption, ) (clientSession, error) { workerPool := opts.AsyncWriteWorkerPool() scope := opts.InstrumentOptions().MetricsScope() session := replicatedSession{ newSessionFn: newSession, identifierPool: opts.IdentifierPool(), workerPool: workerPool, replicationSemaphore: make(chan struct{}, opts.AsyncWriteMaxConcurrency()), scope: scope, log: opts.InstrumentOptions().Logger(), metrics: newReplicatedSessionMetrics(scope), writeTimestampOffset: opts.WriteTimestampOffset(), } // Apply options for _, option := range options { option(&session) } if err := session.setSession(opts); err != nil { return nil, err } if err := session.setAsyncSessions(asyncOpts); err != nil { return nil, err } return &session, nil } func (s *replicatedSession) setSession(opts Options) error { if opts.TopologyInitializer() == nil { return nil } session, err := s.newSessionFn(opts) if err != nil { return err } s.session = session return nil } func (s *replicatedSession) setAsyncSessions(opts []Options) error { sessions := make([]clientSession, 0, len(opts)) for i, oo := range opts { subscope := oo.InstrumentOptions().MetricsScope().SubScope(fmt.Sprintf("async-%d", i)) oo = oo.SetInstrumentOptions(oo.InstrumentOptions().SetMetricsScope(subscope)) session, err := s.newSessionFn(oo) if err != nil { return err } sessions = append(sessions, session) } s.asyncSessions = sessions return nil } type replicatedParams struct { namespace ident.ID id ident.ID t xtime.UnixNano value float64 unit xtime.Unit annotation []byte tags ident.TagIterator useTags bool } // NB(srobb): it would be a nicer to accept a lambda which is the fn to // be performed on all sessions, however this causes an extra allocation. func (s replicatedSession) replicate(params replicatedParams) error { for _, asyncSession := range s.asyncSessions { asyncSession := asyncSession // capture var var ( clonedID = s.identifierPool.Clone(params.id) clonedNS = s.identifierPool.Clone(params.namespace) clonedTags ident.TagIterator ) if params.useTags { clonedTags = params.tags.Duplicate() } select { case s.replicationSemaphore <- struct{}{}: s.workerPool.Go(func() { var err error if params.useTags { err = asyncSession.WriteTagged( clonedNS, clonedID, clonedTags, params.t, params.value, params.unit, params.annotation, ) } else { err = asyncSession.Write( clonedNS, clonedID, params.t, params.value, params.unit, params.annotation, ) } if err != nil { s.metrics.replicateError.Inc(1) s.log.Error("could not replicate write", zap.Error(err)) } else { s.metrics.replicateSuccess.Inc(1) } if s.outCh != nil { s.outCh <- err } <-s.replicationSemaphore }) s.metrics.replicateExecuted.Inc(1) default: s.metrics.replicateNotExecuted.Inc(1) } } if params.useTags { return s.session.WriteTagged( params.namespace, params.id, params.tags, params.t, params.value, params.unit, params.annotation, ) } return s.session.Write( params.namespace, params.id, params.t, params.value, params.unit, params.annotation, ) } func (s *replicatedSession) ReadClusterAvailability() (bool, error) { return s.session.ReadClusterAvailability() } func (s *replicatedSession) WriteClusterAvailability() (bool, error) { return s.session.WriteClusterAvailability() } // Write value to the database for an ID. func (s replicatedSession) Write( namespace, id ident.ID, t xtime.UnixNano, value float64, unit xtime.Unit, annotation []byte, ) error
// WriteTagged value to the database for an ID and given tags. func (s replicatedSession) WriteTagged( namespace, id ident.ID, tags ident.TagIterator, t xtime.UnixNano, value float64, unit xtime.Unit, annotation []byte, ) error { return s.replicate(replicatedParams{ namespace: namespace, id: id, t: t.Add(-s.writeTimestampOffset), value: value, unit: unit, annotation: annotation, tags: tags, useTags: true, }) } // Fetch values from the database for an ID. func (s replicatedSession) Fetch( namespace, id ident.ID, startInclusive, endExclusive xtime.UnixNano, ) (encoding.SeriesIterator, error) { return s.session.Fetch(namespace, id, startInclusive, endExclusive) } // FetchIDs values from the database for a set of IDs. func (s replicatedSession) FetchIDs( namespace ident.ID, ids ident.Iterator, startInclusive, endExclusive xtime.UnixNano, ) (encoding.SeriesIterators, error) { return s.session.FetchIDs(namespace, ids, startInclusive, endExclusive) } // Aggregate aggregates values from the database for the given set of constraints. func (s replicatedSession) Aggregate( ctx context.Context, ns ident.ID, q index.Query, opts index.AggregationOptions, ) (AggregatedTagsIterator, FetchResponseMetadata, error) { return s.session.Aggregate(ctx, ns, q, opts) } // FetchTagged resolves the provided query to known IDs, and fetches the data for them. func (s replicatedSession) FetchTagged( ctx context.Context, namespace ident.ID, q index.Query, opts index.QueryOptions, ) (encoding.SeriesIterators, FetchResponseMetadata, error) { return s.session.FetchTagged(ctx, namespace, q, opts) } // FetchTaggedIDs resolves the provided query to known IDs. func (s replicatedSession) FetchTaggedIDs( ctx context.Context, namespace ident.ID, q index.Query, opts index.QueryOptions, ) (TaggedIDsIterator, FetchResponseMetadata, error) { return s.session.FetchTaggedIDs(ctx, namespace, q, opts) } // ShardID returns the given shard for an ID for callers // to easily discern what shard is failing when operations // for given IDs begin failing. func (s replicatedSession) ShardID(id ident.ID) (uint32, error) { return s.session.ShardID(id) } // IteratorPools exposes the internal iterator pools used by the session to clients. func (s replicatedSession) IteratorPools() (encoding.IteratorPools, error) { return s.session.IteratorPools() } // Close the session. func (s replicatedSession) Close() error { err := s.session.Close() for _, as := range s.asyncSessions { if err := as.Close(); err != nil { s.log.Error("could not close async session: %v", zap.Error(err)) } } return err } // Origin returns the host that initiated the session. func (s replicatedSession) Origin() topology.Host { return s.session.Origin() } // Replicas returns the replication factor. func (s replicatedSession) Replicas() int { return s.session.Replicas() } // TopologyMap returns the current topology map. Note that the session // has a separate topology watch than the database itself, so the two // values can be out of sync and this method should not be relied upon // if the current view of the topology as seen by the database is required. func (s replicatedSession) TopologyMap() (topology.Map, error) { return s.session.TopologyMap() } // Truncate will truncate the namespace for a given shard. func (s replicatedSession) Truncate(namespace ident.ID) (int64, error) { return s.session.Truncate(namespace) } // FetchBootstrapBlocksFromPeers will fetch the most fulfilled block // for each series using the runtime configurable bootstrap level consistency. func (s replicatedSession) FetchBootstrapBlocksFromPeers( namespace namespace.Metadata, shard uint32, start, end xtime.UnixNano, opts result.Options, ) (result.ShardResult, error) { return s.session.FetchBootstrapBlocksFromPeers(namespace, shard, start, end, opts) } // FetchBootstrapBlocksMetadataFromPeers will fetch the blocks metadata from // available peers using the runtime configurable bootstrap level consistency. func (s replicatedSession) FetchBootstrapBlocksMetadataFromPeers( namespace ident.ID, shard uint32, start, end xtime.UnixNano, result result.Options, ) (PeerBlockMetadataIter, error) { return s.session.FetchBootstrapBlocksMetadataFromPeers(namespace, shard, start, end, result) } // FetchBlocksMetadataFromPeers will fetch the blocks metadata from // available peers. func (s replicatedSession) FetchBlocksMetadataFromPeers( namespace ident.ID, shard uint32, start, end xtime.UnixNano, consistencyLevel topology.ReadConsistencyLevel, result result.Options, ) (PeerBlockMetadataIter, error) { return s.session.FetchBlocksMetadataFromPeers(namespace, shard, start, end, consistencyLevel, result) } // FetchBlocksFromPeers will fetch the required blocks from the // peers specified. func (s replicatedSession) FetchBlocksFromPeers( namespace namespace.Metadata, shard uint32, consistencyLevel topology.ReadConsistencyLevel, metadatas []block.ReplicaMetadata, opts result.Options, ) (PeerBlocksIter, error) { return s.session.FetchBlocksFromPeers(namespace, shard, consistencyLevel, metadatas, opts) } func (s *replicatedSession) BorrowConnections( shardID uint32, fn WithBorrowConnectionFn, opts BorrowConnectionOptions, ) (BorrowConnectionsResult, error) { return s.session.BorrowConnections(shardID, fn, opts) } func (s *replicatedSession) DedicatedConnection( shardID uint32, opts DedicatedConnectionOptions, ) (rpc.TChanNode, Channel, error) { return s.session.DedicatedConnection(shardID, opts) } // Open the client session. func (s replicatedSession) Open() error { if err := s.session.Open(); err != nil { return err } for _, asyncSession := range s.asyncSessions { if err := asyncSession.Open(); err != nil { s.log.Error("could not open session to async cluster: %v", zap.Error(err)) } } return nil }
{ return s.replicate(replicatedParams{ namespace: namespace, id: id, t: t.Add(-s.writeTimestampOffset), value: value, unit: unit, annotation: annotation, }) }
identifier_body
replicated_session.go
// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package client import ( "context" "fmt" "time" "github.com/uber-go/tally" "go.uber.org/zap" "github.com/m3db/m3/src/dbnode/encoding" "github.com/m3db/m3/src/dbnode/generated/thrift/rpc" "github.com/m3db/m3/src/dbnode/namespace" "github.com/m3db/m3/src/dbnode/storage/block" "github.com/m3db/m3/src/dbnode/storage/bootstrap/result" "github.com/m3db/m3/src/dbnode/storage/index" "github.com/m3db/m3/src/dbnode/topology" "github.com/m3db/m3/src/x/ident" m3sync "github.com/m3db/m3/src/x/sync" xtime "github.com/m3db/m3/src/x/time" ) type newSessionFn func(Options) (clientSession, error) // replicatedSession is an implementation of clientSession which replicates // session read/writes to a set of clusters asynchronously. type replicatedSession struct { session clientSession asyncSessions []clientSession newSessionFn newSessionFn identifierPool ident.Pool workerPool m3sync.PooledWorkerPool replicationSemaphore chan struct{} scope tally.Scope log *zap.Logger metrics replicatedSessionMetrics outCh chan error writeTimestampOffset time.Duration } type replicatedSessionMetrics struct { replicateExecuted tally.Counter replicateNotExecuted tally.Counter replicateError tally.Counter replicateSuccess tally.Counter } func newReplicatedSessionMetrics(scope tally.Scope) replicatedSessionMetrics { return replicatedSessionMetrics{ replicateExecuted: scope.Counter("replicate.executed"), replicateNotExecuted: scope.Counter("replicate.not-executed"), replicateError: scope.Counter("replicate.error"), replicateSuccess: scope.Counter("replicate.success"), } } // Ensure replicatedSession implements the clientSession interface. var _ clientSession = (*replicatedSession)(nil) type replicatedSessionOption func(*replicatedSession) func withNewSessionFn(fn newSessionFn) replicatedSessionOption { return func(session *replicatedSession) { session.newSessionFn = fn } } func newReplicatedSession( opts Options, asyncOpts []Options, options ...replicatedSessionOption, ) (clientSession, error) { workerPool := opts.AsyncWriteWorkerPool() scope := opts.InstrumentOptions().MetricsScope() session := replicatedSession{ newSessionFn: newSession, identifierPool: opts.IdentifierPool(), workerPool: workerPool, replicationSemaphore: make(chan struct{}, opts.AsyncWriteMaxConcurrency()), scope: scope, log: opts.InstrumentOptions().Logger(), metrics: newReplicatedSessionMetrics(scope), writeTimestampOffset: opts.WriteTimestampOffset(), } // Apply options for _, option := range options { option(&session) } if err := session.setSession(opts); err != nil { return nil, err } if err := session.setAsyncSessions(asyncOpts); err != nil { return nil, err } return &session, nil } func (s *replicatedSession) setSession(opts Options) error { if opts.TopologyInitializer() == nil { return nil } session, err := s.newSessionFn(opts) if err != nil { return err } s.session = session return nil } func (s *replicatedSession) setAsyncSessions(opts []Options) error { sessions := make([]clientSession, 0, len(opts)) for i, oo := range opts { subscope := oo.InstrumentOptions().MetricsScope().SubScope(fmt.Sprintf("async-%d", i)) oo = oo.SetInstrumentOptions(oo.InstrumentOptions().SetMetricsScope(subscope)) session, err := s.newSessionFn(oo) if err != nil { return err } sessions = append(sessions, session) } s.asyncSessions = sessions return nil } type replicatedParams struct { namespace ident.ID id ident.ID t xtime.UnixNano value float64 unit xtime.Unit annotation []byte tags ident.TagIterator useTags bool } // NB(srobb): it would be a nicer to accept a lambda which is the fn to // be performed on all sessions, however this causes an extra allocation. func (s replicatedSession) replicate(params replicatedParams) error { for _, asyncSession := range s.asyncSessions { asyncSession := asyncSession // capture var var ( clonedID = s.identifierPool.Clone(params.id) clonedNS = s.identifierPool.Clone(params.namespace) clonedTags ident.TagIterator ) if params.useTags { clonedTags = params.tags.Duplicate() } select { case s.replicationSemaphore <- struct{}{}: s.workerPool.Go(func() { var err error if params.useTags { err = asyncSession.WriteTagged( clonedNS, clonedID, clonedTags, params.t, params.value, params.unit, params.annotation, ) } else { err = asyncSession.Write( clonedNS, clonedID, params.t, params.value, params.unit, params.annotation, ) } if err != nil { s.metrics.replicateError.Inc(1) s.log.Error("could not replicate write", zap.Error(err)) } else { s.metrics.replicateSuccess.Inc(1) } if s.outCh != nil { s.outCh <- err } <-s.replicationSemaphore }) s.metrics.replicateExecuted.Inc(1) default: s.metrics.replicateNotExecuted.Inc(1) } } if params.useTags { return s.session.WriteTagged( params.namespace, params.id, params.tags, params.t, params.value, params.unit, params.annotation, ) } return s.session.Write( params.namespace, params.id, params.t, params.value, params.unit, params.annotation, ) } func (s *replicatedSession) ReadClusterAvailability() (bool, error) { return s.session.ReadClusterAvailability() } func (s *replicatedSession) WriteClusterAvailability() (bool, error) { return s.session.WriteClusterAvailability() } // Write value to the database for an ID. func (s replicatedSession) Write( namespace, id ident.ID, t xtime.UnixNano, value float64, unit xtime.Unit, annotation []byte, ) error { return s.replicate(replicatedParams{ namespace: namespace, id: id, t: t.Add(-s.writeTimestampOffset), value: value, unit: unit, annotation: annotation, }) } // WriteTagged value to the database for an ID and given tags. func (s replicatedSession) WriteTagged( namespace, id ident.ID, tags ident.TagIterator, t xtime.UnixNano, value float64, unit xtime.Unit, annotation []byte, ) error { return s.replicate(replicatedParams{ namespace: namespace,
annotation: annotation, tags: tags, useTags: true, }) } // Fetch values from the database for an ID. func (s replicatedSession) Fetch( namespace, id ident.ID, startInclusive, endExclusive xtime.UnixNano, ) (encoding.SeriesIterator, error) { return s.session.Fetch(namespace, id, startInclusive, endExclusive) } // FetchIDs values from the database for a set of IDs. func (s replicatedSession) FetchIDs( namespace ident.ID, ids ident.Iterator, startInclusive, endExclusive xtime.UnixNano, ) (encoding.SeriesIterators, error) { return s.session.FetchIDs(namespace, ids, startInclusive, endExclusive) } // Aggregate aggregates values from the database for the given set of constraints. func (s replicatedSession) Aggregate( ctx context.Context, ns ident.ID, q index.Query, opts index.AggregationOptions, ) (AggregatedTagsIterator, FetchResponseMetadata, error) { return s.session.Aggregate(ctx, ns, q, opts) } // FetchTagged resolves the provided query to known IDs, and fetches the data for them. func (s replicatedSession) FetchTagged( ctx context.Context, namespace ident.ID, q index.Query, opts index.QueryOptions, ) (encoding.SeriesIterators, FetchResponseMetadata, error) { return s.session.FetchTagged(ctx, namespace, q, opts) } // FetchTaggedIDs resolves the provided query to known IDs. func (s replicatedSession) FetchTaggedIDs( ctx context.Context, namespace ident.ID, q index.Query, opts index.QueryOptions, ) (TaggedIDsIterator, FetchResponseMetadata, error) { return s.session.FetchTaggedIDs(ctx, namespace, q, opts) } // ShardID returns the given shard for an ID for callers // to easily discern what shard is failing when operations // for given IDs begin failing. func (s replicatedSession) ShardID(id ident.ID) (uint32, error) { return s.session.ShardID(id) } // IteratorPools exposes the internal iterator pools used by the session to clients. func (s replicatedSession) IteratorPools() (encoding.IteratorPools, error) { return s.session.IteratorPools() } // Close the session. func (s replicatedSession) Close() error { err := s.session.Close() for _, as := range s.asyncSessions { if err := as.Close(); err != nil { s.log.Error("could not close async session: %v", zap.Error(err)) } } return err } // Origin returns the host that initiated the session. func (s replicatedSession) Origin() topology.Host { return s.session.Origin() } // Replicas returns the replication factor. func (s replicatedSession) Replicas() int { return s.session.Replicas() } // TopologyMap returns the current topology map. Note that the session // has a separate topology watch than the database itself, so the two // values can be out of sync and this method should not be relied upon // if the current view of the topology as seen by the database is required. func (s replicatedSession) TopologyMap() (topology.Map, error) { return s.session.TopologyMap() } // Truncate will truncate the namespace for a given shard. func (s replicatedSession) Truncate(namespace ident.ID) (int64, error) { return s.session.Truncate(namespace) } // FetchBootstrapBlocksFromPeers will fetch the most fulfilled block // for each series using the runtime configurable bootstrap level consistency. func (s replicatedSession) FetchBootstrapBlocksFromPeers( namespace namespace.Metadata, shard uint32, start, end xtime.UnixNano, opts result.Options, ) (result.ShardResult, error) { return s.session.FetchBootstrapBlocksFromPeers(namespace, shard, start, end, opts) } // FetchBootstrapBlocksMetadataFromPeers will fetch the blocks metadata from // available peers using the runtime configurable bootstrap level consistency. func (s replicatedSession) FetchBootstrapBlocksMetadataFromPeers( namespace ident.ID, shard uint32, start, end xtime.UnixNano, result result.Options, ) (PeerBlockMetadataIter, error) { return s.session.FetchBootstrapBlocksMetadataFromPeers(namespace, shard, start, end, result) } // FetchBlocksMetadataFromPeers will fetch the blocks metadata from // available peers. func (s replicatedSession) FetchBlocksMetadataFromPeers( namespace ident.ID, shard uint32, start, end xtime.UnixNano, consistencyLevel topology.ReadConsistencyLevel, result result.Options, ) (PeerBlockMetadataIter, error) { return s.session.FetchBlocksMetadataFromPeers(namespace, shard, start, end, consistencyLevel, result) } // FetchBlocksFromPeers will fetch the required blocks from the // peers specified. func (s replicatedSession) FetchBlocksFromPeers( namespace namespace.Metadata, shard uint32, consistencyLevel topology.ReadConsistencyLevel, metadatas []block.ReplicaMetadata, opts result.Options, ) (PeerBlocksIter, error) { return s.session.FetchBlocksFromPeers(namespace, shard, consistencyLevel, metadatas, opts) } func (s *replicatedSession) BorrowConnections( shardID uint32, fn WithBorrowConnectionFn, opts BorrowConnectionOptions, ) (BorrowConnectionsResult, error) { return s.session.BorrowConnections(shardID, fn, opts) } func (s *replicatedSession) DedicatedConnection( shardID uint32, opts DedicatedConnectionOptions, ) (rpc.TChanNode, Channel, error) { return s.session.DedicatedConnection(shardID, opts) } // Open the client session. func (s replicatedSession) Open() error { if err := s.session.Open(); err != nil { return err } for _, asyncSession := range s.asyncSessions { if err := asyncSession.Open(); err != nil { s.log.Error("could not open session to async cluster: %v", zap.Error(err)) } } return nil }
id: id, t: t.Add(-s.writeTimestampOffset), value: value, unit: unit,
random_line_split
replicated_session.go
// Copyright (c) 2019 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package client import ( "context" "fmt" "time" "github.com/uber-go/tally" "go.uber.org/zap" "github.com/m3db/m3/src/dbnode/encoding" "github.com/m3db/m3/src/dbnode/generated/thrift/rpc" "github.com/m3db/m3/src/dbnode/namespace" "github.com/m3db/m3/src/dbnode/storage/block" "github.com/m3db/m3/src/dbnode/storage/bootstrap/result" "github.com/m3db/m3/src/dbnode/storage/index" "github.com/m3db/m3/src/dbnode/topology" "github.com/m3db/m3/src/x/ident" m3sync "github.com/m3db/m3/src/x/sync" xtime "github.com/m3db/m3/src/x/time" ) type newSessionFn func(Options) (clientSession, error) // replicatedSession is an implementation of clientSession which replicates // session read/writes to a set of clusters asynchronously. type replicatedSession struct { session clientSession asyncSessions []clientSession newSessionFn newSessionFn identifierPool ident.Pool workerPool m3sync.PooledWorkerPool replicationSemaphore chan struct{} scope tally.Scope log *zap.Logger metrics replicatedSessionMetrics outCh chan error writeTimestampOffset time.Duration } type replicatedSessionMetrics struct { replicateExecuted tally.Counter replicateNotExecuted tally.Counter replicateError tally.Counter replicateSuccess tally.Counter } func newReplicatedSessionMetrics(scope tally.Scope) replicatedSessionMetrics { return replicatedSessionMetrics{ replicateExecuted: scope.Counter("replicate.executed"), replicateNotExecuted: scope.Counter("replicate.not-executed"), replicateError: scope.Counter("replicate.error"), replicateSuccess: scope.Counter("replicate.success"), } } // Ensure replicatedSession implements the clientSession interface. var _ clientSession = (*replicatedSession)(nil) type replicatedSessionOption func(*replicatedSession) func withNewSessionFn(fn newSessionFn) replicatedSessionOption { return func(session *replicatedSession) { session.newSessionFn = fn } } func newReplicatedSession( opts Options, asyncOpts []Options, options ...replicatedSessionOption, ) (clientSession, error) { workerPool := opts.AsyncWriteWorkerPool() scope := opts.InstrumentOptions().MetricsScope() session := replicatedSession{ newSessionFn: newSession, identifierPool: opts.IdentifierPool(), workerPool: workerPool, replicationSemaphore: make(chan struct{}, opts.AsyncWriteMaxConcurrency()), scope: scope, log: opts.InstrumentOptions().Logger(), metrics: newReplicatedSessionMetrics(scope), writeTimestampOffset: opts.WriteTimestampOffset(), } // Apply options for _, option := range options { option(&session) } if err := session.setSession(opts); err != nil { return nil, err } if err := session.setAsyncSessions(asyncOpts); err != nil { return nil, err } return &session, nil } func (s *replicatedSession)
(opts Options) error { if opts.TopologyInitializer() == nil { return nil } session, err := s.newSessionFn(opts) if err != nil { return err } s.session = session return nil } func (s *replicatedSession) setAsyncSessions(opts []Options) error { sessions := make([]clientSession, 0, len(opts)) for i, oo := range opts { subscope := oo.InstrumentOptions().MetricsScope().SubScope(fmt.Sprintf("async-%d", i)) oo = oo.SetInstrumentOptions(oo.InstrumentOptions().SetMetricsScope(subscope)) session, err := s.newSessionFn(oo) if err != nil { return err } sessions = append(sessions, session) } s.asyncSessions = sessions return nil } type replicatedParams struct { namespace ident.ID id ident.ID t xtime.UnixNano value float64 unit xtime.Unit annotation []byte tags ident.TagIterator useTags bool } // NB(srobb): it would be a nicer to accept a lambda which is the fn to // be performed on all sessions, however this causes an extra allocation. func (s replicatedSession) replicate(params replicatedParams) error { for _, asyncSession := range s.asyncSessions { asyncSession := asyncSession // capture var var ( clonedID = s.identifierPool.Clone(params.id) clonedNS = s.identifierPool.Clone(params.namespace) clonedTags ident.TagIterator ) if params.useTags { clonedTags = params.tags.Duplicate() } select { case s.replicationSemaphore <- struct{}{}: s.workerPool.Go(func() { var err error if params.useTags { err = asyncSession.WriteTagged( clonedNS, clonedID, clonedTags, params.t, params.value, params.unit, params.annotation, ) } else { err = asyncSession.Write( clonedNS, clonedID, params.t, params.value, params.unit, params.annotation, ) } if err != nil { s.metrics.replicateError.Inc(1) s.log.Error("could not replicate write", zap.Error(err)) } else { s.metrics.replicateSuccess.Inc(1) } if s.outCh != nil { s.outCh <- err } <-s.replicationSemaphore }) s.metrics.replicateExecuted.Inc(1) default: s.metrics.replicateNotExecuted.Inc(1) } } if params.useTags { return s.session.WriteTagged( params.namespace, params.id, params.tags, params.t, params.value, params.unit, params.annotation, ) } return s.session.Write( params.namespace, params.id, params.t, params.value, params.unit, params.annotation, ) } func (s *replicatedSession) ReadClusterAvailability() (bool, error) { return s.session.ReadClusterAvailability() } func (s *replicatedSession) WriteClusterAvailability() (bool, error) { return s.session.WriteClusterAvailability() } // Write value to the database for an ID. func (s replicatedSession) Write( namespace, id ident.ID, t xtime.UnixNano, value float64, unit xtime.Unit, annotation []byte, ) error { return s.replicate(replicatedParams{ namespace: namespace, id: id, t: t.Add(-s.writeTimestampOffset), value: value, unit: unit, annotation: annotation, }) } // WriteTagged value to the database for an ID and given tags. func (s replicatedSession) WriteTagged( namespace, id ident.ID, tags ident.TagIterator, t xtime.UnixNano, value float64, unit xtime.Unit, annotation []byte, ) error { return s.replicate(replicatedParams{ namespace: namespace, id: id, t: t.Add(-s.writeTimestampOffset), value: value, unit: unit, annotation: annotation, tags: tags, useTags: true, }) } // Fetch values from the database for an ID. func (s replicatedSession) Fetch( namespace, id ident.ID, startInclusive, endExclusive xtime.UnixNano, ) (encoding.SeriesIterator, error) { return s.session.Fetch(namespace, id, startInclusive, endExclusive) } // FetchIDs values from the database for a set of IDs. func (s replicatedSession) FetchIDs( namespace ident.ID, ids ident.Iterator, startInclusive, endExclusive xtime.UnixNano, ) (encoding.SeriesIterators, error) { return s.session.FetchIDs(namespace, ids, startInclusive, endExclusive) } // Aggregate aggregates values from the database for the given set of constraints. func (s replicatedSession) Aggregate( ctx context.Context, ns ident.ID, q index.Query, opts index.AggregationOptions, ) (AggregatedTagsIterator, FetchResponseMetadata, error) { return s.session.Aggregate(ctx, ns, q, opts) } // FetchTagged resolves the provided query to known IDs, and fetches the data for them. func (s replicatedSession) FetchTagged( ctx context.Context, namespace ident.ID, q index.Query, opts index.QueryOptions, ) (encoding.SeriesIterators, FetchResponseMetadata, error) { return s.session.FetchTagged(ctx, namespace, q, opts) } // FetchTaggedIDs resolves the provided query to known IDs. func (s replicatedSession) FetchTaggedIDs( ctx context.Context, namespace ident.ID, q index.Query, opts index.QueryOptions, ) (TaggedIDsIterator, FetchResponseMetadata, error) { return s.session.FetchTaggedIDs(ctx, namespace, q, opts) } // ShardID returns the given shard for an ID for callers // to easily discern what shard is failing when operations // for given IDs begin failing. func (s replicatedSession) ShardID(id ident.ID) (uint32, error) { return s.session.ShardID(id) } // IteratorPools exposes the internal iterator pools used by the session to clients. func (s replicatedSession) IteratorPools() (encoding.IteratorPools, error) { return s.session.IteratorPools() } // Close the session. func (s replicatedSession) Close() error { err := s.session.Close() for _, as := range s.asyncSessions { if err := as.Close(); err != nil { s.log.Error("could not close async session: %v", zap.Error(err)) } } return err } // Origin returns the host that initiated the session. func (s replicatedSession) Origin() topology.Host { return s.session.Origin() } // Replicas returns the replication factor. func (s replicatedSession) Replicas() int { return s.session.Replicas() } // TopologyMap returns the current topology map. Note that the session // has a separate topology watch than the database itself, so the two // values can be out of sync and this method should not be relied upon // if the current view of the topology as seen by the database is required. func (s replicatedSession) TopologyMap() (topology.Map, error) { return s.session.TopologyMap() } // Truncate will truncate the namespace for a given shard. func (s replicatedSession) Truncate(namespace ident.ID) (int64, error) { return s.session.Truncate(namespace) } // FetchBootstrapBlocksFromPeers will fetch the most fulfilled block // for each series using the runtime configurable bootstrap level consistency. func (s replicatedSession) FetchBootstrapBlocksFromPeers( namespace namespace.Metadata, shard uint32, start, end xtime.UnixNano, opts result.Options, ) (result.ShardResult, error) { return s.session.FetchBootstrapBlocksFromPeers(namespace, shard, start, end, opts) } // FetchBootstrapBlocksMetadataFromPeers will fetch the blocks metadata from // available peers using the runtime configurable bootstrap level consistency. func (s replicatedSession) FetchBootstrapBlocksMetadataFromPeers( namespace ident.ID, shard uint32, start, end xtime.UnixNano, result result.Options, ) (PeerBlockMetadataIter, error) { return s.session.FetchBootstrapBlocksMetadataFromPeers(namespace, shard, start, end, result) } // FetchBlocksMetadataFromPeers will fetch the blocks metadata from // available peers. func (s replicatedSession) FetchBlocksMetadataFromPeers( namespace ident.ID, shard uint32, start, end xtime.UnixNano, consistencyLevel topology.ReadConsistencyLevel, result result.Options, ) (PeerBlockMetadataIter, error) { return s.session.FetchBlocksMetadataFromPeers(namespace, shard, start, end, consistencyLevel, result) } // FetchBlocksFromPeers will fetch the required blocks from the // peers specified. func (s replicatedSession) FetchBlocksFromPeers( namespace namespace.Metadata, shard uint32, consistencyLevel topology.ReadConsistencyLevel, metadatas []block.ReplicaMetadata, opts result.Options, ) (PeerBlocksIter, error) { return s.session.FetchBlocksFromPeers(namespace, shard, consistencyLevel, metadatas, opts) } func (s *replicatedSession) BorrowConnections( shardID uint32, fn WithBorrowConnectionFn, opts BorrowConnectionOptions, ) (BorrowConnectionsResult, error) { return s.session.BorrowConnections(shardID, fn, opts) } func (s *replicatedSession) DedicatedConnection( shardID uint32, opts DedicatedConnectionOptions, ) (rpc.TChanNode, Channel, error) { return s.session.DedicatedConnection(shardID, opts) } // Open the client session. func (s replicatedSession) Open() error { if err := s.session.Open(); err != nil { return err } for _, asyncSession := range s.asyncSessions { if err := asyncSession.Open(); err != nil { s.log.Error("could not open session to async cluster: %v", zap.Error(err)) } } return nil }
setSession
identifier_name
shadow_logger.rs
use std::cell::RefCell; use std::sync::mpsc::{Receiver, Sender}; use std::sync::Arc; use std::sync::{Mutex, RwLock}; use std::time::Duration; use crossbeam::queue::ArrayQueue; use log::{Level, LevelFilter, Log, Metadata, Record, SetLoggerError}; use logger as c_log; use once_cell::sync::{Lazy, OnceCell}; use shadow_shim_helper_rs::emulated_time::EmulatedTime; use shadow_shim_helper_rs::util::time::TimeParts; use crate::core::worker::Worker; use crate::host::host::HostInfo; /// Trigger an asynchronous flush when this many lines are queued. const ASYNC_FLUSH_QD_LINES_THRESHOLD: usize = 100_000; /// Performs a *synchronous* flush when this many lines are queued. i.e. if /// after reaching the `ASYNC_FLUSH_QD_LINES_THRESHOLD`, log lines are still /// coming in faster than they can actually be flushed, when we reach this limit /// we'll pause and let it finish flushing rather than letting the queue /// continue growing. const SYNC_FLUSH_QD_LINES_THRESHOLD: usize = 10 * ASYNC_FLUSH_QD_LINES_THRESHOLD; /// Logging thread flushes at least this often. const MIN_FLUSH_FREQUENCY: Duration = Duration::from_secs(10); static SHADOW_LOGGER: Lazy<ShadowLogger> = Lazy::new(ShadowLogger::new); /// Initialize the Shadow logger. pub fn init(max_log_level: LevelFilter, log_errors_to_stderr: bool) -> Result<(), SetLoggerError> { SHADOW_LOGGER.set_max_level(max_log_level); SHADOW_LOGGER.set_log_errors_to_stderr(log_errors_to_stderr); log::set_logger(&*SHADOW_LOGGER)?; // Shadow's logger has its own logic for deciding the max level (see `ShadowLogger::enabled`), // so the log crate should give us all log messages and we can decide whether to show it or not. log::set_max_level(log::LevelFilter::Trace); // Start the thread that will receive log records and flush them to output. std::thread::Builder::new() .name("shadow-logger".to_string()) .spawn(move || SHADOW_LOGGER.logger_thread_fn()) .unwrap(); // Arrange to flush the logger on panic. let default_panic_handler = std::panic::take_hook(); std::panic::set_hook(Box::new(move |panic_info| { // Attempt to flush the logger. We want to avoid a recursive panic, so // we flush the queue on the current thread instead of trying to send // a command to the logger thread (because our thread-local sender // may have already been destructed, and because the logger thread // itself may be in a bad state), and ignore errors. SHADOW_LOGGER.flush_records(None).ok(); default_panic_handler(panic_info); })); Ok(()) } /// A logger specialized for Shadow. It attaches simulation context to log /// entries (e.g. sim time, running process, etc.). It's also designed for /// high performance to accomodate heavy logging from multiple threads. pub struct ShadowLogger { // Channel used to send commands to the logger's thread. // // The Sender half of a channel isn't Sync, so we must protect it with a // Mutex to make ShadowLogger be Sync. This is only accessed once per // thread, though, to clone into the thread-local SENDER. command_sender: Mutex<Sender<LoggerCommand>>, // Like the sender, needs a Mutex for ShadowLogger to be Sync. // The Mutex is only locked once though by the logger thread, which keeps // it locked for as long as it's running. command_receiver: Mutex<Receiver<LoggerCommand>>, // A lock-free queue for individual log records. We don't put the records // themselves in the `command_sender`, because `Sender` doesn't support // getting the queue length. Conversely we don't put commands in this queue // because it doesn't support blocking operations. // // The size is roughly SYNC_FLUSH_QD_LINES_THRESHOLD * // size_of<ShadowLogRecord>; we might want to consider SegQueue (which grows // and shrinks dynamically) instead if we ever make SYNC_FLUSH_QD_LINES_THRESHOLD very // large. records: ArrayQueue<ShadowLogRecord>, // When false, sends a (still-asynchronous) flush command to the logger // thread every time a record is pushed into `records`. buffering_enabled: RwLock<bool>, // The maximum log level, unless overridden by a host-specific log level. max_log_level: OnceCell<LevelFilter>, // Whether to log errors to stderr in addition to stdout. log_errors_to_stderr: OnceCell<bool>, } thread_local!(static SENDER: RefCell<Option<Sender<LoggerCommand>>> = RefCell::new(None)); thread_local!(static THREAD_NAME: Lazy<String> = Lazy::new(|| { get_thread_name() })); thread_local!(static THREAD_ID: Lazy<nix::unistd::Pid> = Lazy::new(|| { nix::unistd::gettid() })); fn get_thread_name() -> String { let mut thread_name = Vec::<i8>::with_capacity(16); let res = unsafe { thread_name.set_len(thread_name.capacity()); // ~infallible when host_name is at least 16 bytes. libc::pthread_getname_np( libc::pthread_self(), thread_name.as_mut_ptr(), thread_name.len(), ) }; // The most likely cause of failure is a bug in the caller. debug_assert_eq!(res, 0, "pthread_getname_np: {}", nix::errno::from_i32(res)); if res == 0 { // SAFETY: We just initialized the input buffer `thread_name`, and // `thread_name_cstr` won't outlive it. let thread_name_cstr = unsafe { std::ffi::CStr::from_ptr(thread_name.as_ptr()) }; return thread_name_cstr.to_owned().to_string_lossy().to_string(); } // Another potential reason for failure is if it couldn't open // /proc/self/task/[tid]/comm. We're probably in a bad state anyway if that // happens, but try to recover anyway. // Empty string String::new() } impl ShadowLogger { fn new() -> ShadowLogger { let (sender, receiver) = std::sync::mpsc::channel(); ShadowLogger { records: ArrayQueue::new(SYNC_FLUSH_QD_LINES_THRESHOLD), command_sender: Mutex::new(sender), command_receiver: Mutex::new(receiver), buffering_enabled: RwLock::new(false), max_log_level: OnceCell::new(), log_errors_to_stderr: OnceCell::new(), } } // Function executed by the logger's helper thread, onto which we offload as // much work as we can. fn logger_thread_fn(&self) { let command_receiver = self.command_receiver.lock().unwrap(); loop { use std::sync::mpsc::RecvTimeoutError; match command_receiver.recv_timeout(MIN_FLUSH_FREQUENCY) { Ok(LoggerCommand::Flush(done_sender)) => self.flush_records(done_sender).unwrap(), Err(RecvTimeoutError::Timeout) => { // Flush self.flush_records(None).unwrap(); } Err(e) => panic!("Unexpected error {}", e), } } } // Function called by the logger's helper thread to flush the contents of // self.records. If `done_sender` is provided, it's notified after the flush // has completed. fn flush_records(&self, done_sender: Option<Sender<()>>) -> std::io::Result<()> { use std::io::Write; // Only flush records that are already in the queue, not ones that // arrive while we're flushing. Otherwise callers who perform a // synchronous flush (whether this flush operation or another one that // arrives while we're flushing) will be left waiting longer than // necessary. Also keeps us from holding the stdout lock indefinitely. let mut toflush = self.records.len(); let stdout_unlocked = std::io::stdout(); let stdout_locked = stdout_unlocked.lock(); let mut stdout = std::io::BufWriter::new(stdout_locked); while toflush > 0 { let record = match self.records.pop() { Some(r) => r, None => { // This can happen if another thread panics while the // logging thread is flushing. In that case both threads // will be consuming from the queue. break; } }; toflush -= 1; if record.level <= Level::Error && *self.log_errors_to_stderr.get().unwrap() { // Send to both stdout and stderr. let stderr_unlocked = std::io::stderr(); let stderr_locked = stderr_unlocked.lock(); let mut stderr = std::io::BufWriter::new(stderr_locked); let line = format!("{record}"); write!(stdout, "{line}")?; write!(stderr, "{line}")?; } else { write!(stdout, "{record}")?; } } if let Some(done_sender) = done_sender { // We can't log from this thread without risking deadlock, so in the // unlikely case that the calling thread has gone away, just print // directly. done_sender.send(()).unwrap_or_else(|e| { println!( "WARNING: Logger couldn't notify calling thread: {:?}", e ) }); } Ok(()) } /// When disabled, the logger thread is notified to write each record as /// soon as it's created. The calling thread still isn't blocked on the /// record actually being written, though. pub fn set_buffering_enabled(&self, buffering_enabled: bool) { let mut writer = self.buffering_enabled.write().unwrap(); *writer = buffering_enabled; } /// If the maximum log level has not yet been set, returns `LevelFilter::Trace`. pub fn max_level(&self) -> LevelFilter { self.max_log_level .get() .copied() .unwrap_or(LevelFilter::Trace) } /// Set the default maximum log level, but this can be overridden per-host. Is only intended to /// be called from `init()`. Will panic if called more than once. fn set_max_level(&self, level: LevelFilter) { self.max_log_level.set(level).unwrap() } /// Set whether to log errors to stderr in addition to stdout. /// /// Is only intended to be called from `init()`. Will panic if called more /// than once. fn set_log_errors_to_stderr(&self, val: bool) { self.log_errors_to_stderr.set(val).unwrap() } // Send a flush command to the logger thread. fn flush_impl(&self, notify_done: Option<Sender<()>>) { self.send_command(LoggerCommand::Flush(notify_done)) } // Send a flush command to the logger thread and block until it's completed. fn flush_sync(&self) { let (done_sender, done_receiver) = std::sync::mpsc::channel(); self.flush_impl(Some(done_sender)); done_receiver.recv().unwrap(); } // Send a flush command to the logger thread. fn flush_async(&self) { self.flush_impl(None); } // Send a command to the logger thread. fn send_command(&self, cmd: LoggerCommand) { SENDER .try_with(|thread_sender| { if thread_sender.borrow().is_none() { let lock = self.command_sender.lock().unwrap(); *thread_sender.borrow_mut() = Some(lock.clone()); } thread_sender .borrow() .as_ref() .unwrap() .send(cmd) .unwrap_or_else(|e| { println!("WARNING: Couldn't send command to logger thread: {:?}", e); }); }) .unwrap_or_else(|e| { println!( "WARNING: Couldn't get sender channel to logger thread: {:?}", e ); }); } } impl Log for ShadowLogger { fn enabled(&self, metadata: &Metadata) -> bool { let filter = match Worker::with_active_host(|host| host.info().log_level) { Some(Some(level)) => level, _ => self.max_level(), }; metadata.level() <= filter } fn log(&self, record: &Record) { if !self.enabled(record.metadata()) { return; } let message = std::fmt::format(*record.args()); let host_info = Worker::with_active_host(|host| host.info().clone()); let mut shadowrecord = ShadowLogRecord { level: record.level(), file: record.file_static(), module_path: record.module_path_static(), line: record.line(), message, wall_time: Duration::from_micros(unsafe { u64::try_from(c_log::logger_elapsed_micros()).unwrap() }), emu_time: Worker::current_time(), thread_name: THREAD_NAME .try_with(|name| (*name).clone()) .unwrap_or_else(|_| get_thread_name()), thread_id: THREAD_ID .try_with(|id| **id) .unwrap_or_else(|_| nix::unistd::gettid()), host_info, }; loop { match self.records.push(shadowrecord) { Ok(()) => break, Err(r) => { // Queue is full. Flush it and try again. shadowrecord = r; self.flush_sync(); } } } if record.level() == Level::Error { // Unlike in Shadow's C code, we don't abort the program on Error // logs. In Rust the same purpose is filled with `panic` and // `unwrap`. C callers will still exit or abort via the lib/logger wrapper. // // Flush *synchronously*, since we're likely about to crash one way or another. self.flush_sync(); } else if self.records.len() > ASYNC_FLUSH_QD_LINES_THRESHOLD || !*self.buffering_enabled.read().unwrap() { self.flush_async(); } } fn flush(&self) { self.flush_sync(); } } struct ShadowLogRecord { level: Level, file: Option<&'static str>, module_path: Option<&'static str>, line: Option<u32>, message: String, wall_time: Duration, emu_time: Option<EmulatedTime>, thread_name: String, thread_id: nix::unistd::Pid, host_info: Option<Arc<HostInfo>>, } impl std::fmt::Display for ShadowLogRecord { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { { let parts = TimeParts::from_nanos(self.wall_time.as_nanos()); write!( f, "{:02}:{:02}:{:02}.{:06}", parts.hours, parts.mins, parts.secs, parts.nanos / 1000 )?; } write!(f, " [{}:{}]", self.thread_id, self.thread_name)?; if let Some(emu_time) = self.emu_time { let sim_time = emu_time.duration_since(&EmulatedTime::SIMULATION_START); let parts = TimeParts::from_nanos(sim_time.as_nanos()); write!( f, " {:02}:{:02}:{:02}.{:09}", parts.hours, parts.mins, parts.secs, parts.nanos )?; } else { write!(f, " n/a")?; } write!(f, " [{level}]", level = self.level)?; if let Some(host) = &self.host_info { write!( f, " [{hostname}:{ip}]", hostname = host.name, ip = host.default_ip, )?; } else { write!(f, " [n/a]",)?; } write!( f, " [{file}:", file = self .file .map(|f| if let Some(sep_pos) = f.rfind('/') { &f[(sep_pos + 1)..] } else { f }) .unwrap_or("n/a"), )?; if let Some(line) = self.line { write!(f, "{line}", line = line)?; } else { write!(f, "n/a")?; } writeln!( f, "] [{module}] {msg}", module = self.module_path.unwrap_or("n/a"), msg = self.message )?; Ok(()) } } enum LoggerCommand { // Flush; takes an optional one-shot channel to notify that the flush has completed. Flush(Option<Sender<()>>), } pub fn set_buffering_enabled(buffering_enabled: bool) { SHADOW_LOGGER.set_buffering_enabled(buffering_enabled); } mod export { use super::*; /// When disabled, the logger thread is notified to write each record as /// soon as it's created. The calling thread still isn't blocked on the /// record actually being written, though. #[no_mangle] pub unsafe extern "C" fn shadow_logger_setEnableBuffering(buffering_enabled: i32)
}
{ set_buffering_enabled(buffering_enabled != 0) }
identifier_body
shadow_logger.rs
use std::cell::RefCell; use std::sync::mpsc::{Receiver, Sender}; use std::sync::Arc; use std::sync::{Mutex, RwLock}; use std::time::Duration; use crossbeam::queue::ArrayQueue; use log::{Level, LevelFilter, Log, Metadata, Record, SetLoggerError}; use logger as c_log; use once_cell::sync::{Lazy, OnceCell}; use shadow_shim_helper_rs::emulated_time::EmulatedTime; use shadow_shim_helper_rs::util::time::TimeParts; use crate::core::worker::Worker; use crate::host::host::HostInfo; /// Trigger an asynchronous flush when this many lines are queued. const ASYNC_FLUSH_QD_LINES_THRESHOLD: usize = 100_000; /// Performs a *synchronous* flush when this many lines are queued. i.e. if /// after reaching the `ASYNC_FLUSH_QD_LINES_THRESHOLD`, log lines are still /// coming in faster than they can actually be flushed, when we reach this limit /// we'll pause and let it finish flushing rather than letting the queue /// continue growing. const SYNC_FLUSH_QD_LINES_THRESHOLD: usize = 10 * ASYNC_FLUSH_QD_LINES_THRESHOLD; /// Logging thread flushes at least this often. const MIN_FLUSH_FREQUENCY: Duration = Duration::from_secs(10); static SHADOW_LOGGER: Lazy<ShadowLogger> = Lazy::new(ShadowLogger::new); /// Initialize the Shadow logger. pub fn init(max_log_level: LevelFilter, log_errors_to_stderr: bool) -> Result<(), SetLoggerError> { SHADOW_LOGGER.set_max_level(max_log_level); SHADOW_LOGGER.set_log_errors_to_stderr(log_errors_to_stderr); log::set_logger(&*SHADOW_LOGGER)?; // Shadow's logger has its own logic for deciding the max level (see `ShadowLogger::enabled`), // so the log crate should give us all log messages and we can decide whether to show it or not. log::set_max_level(log::LevelFilter::Trace); // Start the thread that will receive log records and flush them to output. std::thread::Builder::new() .name("shadow-logger".to_string()) .spawn(move || SHADOW_LOGGER.logger_thread_fn()) .unwrap(); // Arrange to flush the logger on panic. let default_panic_handler = std::panic::take_hook(); std::panic::set_hook(Box::new(move |panic_info| { // Attempt to flush the logger. We want to avoid a recursive panic, so // we flush the queue on the current thread instead of trying to send // a command to the logger thread (because our thread-local sender // may have already been destructed, and because the logger thread // itself may be in a bad state), and ignore errors. SHADOW_LOGGER.flush_records(None).ok(); default_panic_handler(panic_info); })); Ok(()) } /// A logger specialized for Shadow. It attaches simulation context to log /// entries (e.g. sim time, running process, etc.). It's also designed for /// high performance to accomodate heavy logging from multiple threads. pub struct ShadowLogger { // Channel used to send commands to the logger's thread. // // The Sender half of a channel isn't Sync, so we must protect it with a // Mutex to make ShadowLogger be Sync. This is only accessed once per // thread, though, to clone into the thread-local SENDER. command_sender: Mutex<Sender<LoggerCommand>>, // Like the sender, needs a Mutex for ShadowLogger to be Sync. // The Mutex is only locked once though by the logger thread, which keeps // it locked for as long as it's running. command_receiver: Mutex<Receiver<LoggerCommand>>, // A lock-free queue for individual log records. We don't put the records // themselves in the `command_sender`, because `Sender` doesn't support // getting the queue length. Conversely we don't put commands in this queue // because it doesn't support blocking operations. // // The size is roughly SYNC_FLUSH_QD_LINES_THRESHOLD * // size_of<ShadowLogRecord>; we might want to consider SegQueue (which grows // and shrinks dynamically) instead if we ever make SYNC_FLUSH_QD_LINES_THRESHOLD very // large. records: ArrayQueue<ShadowLogRecord>, // When false, sends a (still-asynchronous) flush command to the logger // thread every time a record is pushed into `records`. buffering_enabled: RwLock<bool>, // The maximum log level, unless overridden by a host-specific log level. max_log_level: OnceCell<LevelFilter>, // Whether to log errors to stderr in addition to stdout. log_errors_to_stderr: OnceCell<bool>, } thread_local!(static SENDER: RefCell<Option<Sender<LoggerCommand>>> = RefCell::new(None)); thread_local!(static THREAD_NAME: Lazy<String> = Lazy::new(|| { get_thread_name() })); thread_local!(static THREAD_ID: Lazy<nix::unistd::Pid> = Lazy::new(|| { nix::unistd::gettid() })); fn get_thread_name() -> String { let mut thread_name = Vec::<i8>::with_capacity(16); let res = unsafe { thread_name.set_len(thread_name.capacity()); // ~infallible when host_name is at least 16 bytes. libc::pthread_getname_np( libc::pthread_self(), thread_name.as_mut_ptr(), thread_name.len(), ) }; // The most likely cause of failure is a bug in the caller. debug_assert_eq!(res, 0, "pthread_getname_np: {}", nix::errno::from_i32(res)); if res == 0 { // SAFETY: We just initialized the input buffer `thread_name`, and // `thread_name_cstr` won't outlive it. let thread_name_cstr = unsafe { std::ffi::CStr::from_ptr(thread_name.as_ptr()) }; return thread_name_cstr.to_owned().to_string_lossy().to_string(); } // Another potential reason for failure is if it couldn't open // /proc/self/task/[tid]/comm. We're probably in a bad state anyway if that // happens, but try to recover anyway. // Empty string String::new() } impl ShadowLogger { fn new() -> ShadowLogger { let (sender, receiver) = std::sync::mpsc::channel(); ShadowLogger { records: ArrayQueue::new(SYNC_FLUSH_QD_LINES_THRESHOLD), command_sender: Mutex::new(sender), command_receiver: Mutex::new(receiver), buffering_enabled: RwLock::new(false), max_log_level: OnceCell::new(), log_errors_to_stderr: OnceCell::new(), } } // Function executed by the logger's helper thread, onto which we offload as // much work as we can. fn logger_thread_fn(&self) { let command_receiver = self.command_receiver.lock().unwrap(); loop { use std::sync::mpsc::RecvTimeoutError; match command_receiver.recv_timeout(MIN_FLUSH_FREQUENCY) { Ok(LoggerCommand::Flush(done_sender)) => self.flush_records(done_sender).unwrap(), Err(RecvTimeoutError::Timeout) => { // Flush self.flush_records(None).unwrap(); } Err(e) => panic!("Unexpected error {}", e), } } } // Function called by the logger's helper thread to flush the contents of // self.records. If `done_sender` is provided, it's notified after the flush // has completed. fn flush_records(&self, done_sender: Option<Sender<()>>) -> std::io::Result<()> { use std::io::Write; // Only flush records that are already in the queue, not ones that // arrive while we're flushing. Otherwise callers who perform a // synchronous flush (whether this flush operation or another one that // arrives while we're flushing) will be left waiting longer than // necessary. Also keeps us from holding the stdout lock indefinitely. let mut toflush = self.records.len(); let stdout_unlocked = std::io::stdout(); let stdout_locked = stdout_unlocked.lock(); let mut stdout = std::io::BufWriter::new(stdout_locked); while toflush > 0 { let record = match self.records.pop() { Some(r) => r, None => { // This can happen if another thread panics while the // logging thread is flushing. In that case both threads // will be consuming from the queue. break; } }; toflush -= 1; if record.level <= Level::Error && *self.log_errors_to_stderr.get().unwrap() { // Send to both stdout and stderr. let stderr_unlocked = std::io::stderr(); let stderr_locked = stderr_unlocked.lock(); let mut stderr = std::io::BufWriter::new(stderr_locked); let line = format!("{record}"); write!(stdout, "{line}")?; write!(stderr, "{line}")?; } else { write!(stdout, "{record}")?; } } if let Some(done_sender) = done_sender { // We can't log from this thread without risking deadlock, so in the // unlikely case that the calling thread has gone away, just print // directly. done_sender.send(()).unwrap_or_else(|e| { println!( "WARNING: Logger couldn't notify calling thread: {:?}", e ) }); } Ok(()) } /// When disabled, the logger thread is notified to write each record as /// soon as it's created. The calling thread still isn't blocked on the /// record actually being written, though. pub fn set_buffering_enabled(&self, buffering_enabled: bool) { let mut writer = self.buffering_enabled.write().unwrap(); *writer = buffering_enabled; } /// If the maximum log level has not yet been set, returns `LevelFilter::Trace`. pub fn max_level(&self) -> LevelFilter { self.max_log_level .get() .copied() .unwrap_or(LevelFilter::Trace) } /// Set the default maximum log level, but this can be overridden per-host. Is only intended to /// be called from `init()`. Will panic if called more than once. fn set_max_level(&self, level: LevelFilter) { self.max_log_level.set(level).unwrap() } /// Set whether to log errors to stderr in addition to stdout. /// /// Is only intended to be called from `init()`. Will panic if called more /// than once. fn set_log_errors_to_stderr(&self, val: bool) { self.log_errors_to_stderr.set(val).unwrap() } // Send a flush command to the logger thread. fn flush_impl(&self, notify_done: Option<Sender<()>>) { self.send_command(LoggerCommand::Flush(notify_done)) } // Send a flush command to the logger thread and block until it's completed. fn flush_sync(&self) { let (done_sender, done_receiver) = std::sync::mpsc::channel(); self.flush_impl(Some(done_sender)); done_receiver.recv().unwrap(); } // Send a flush command to the logger thread. fn flush_async(&self) { self.flush_impl(None); } // Send a command to the logger thread. fn send_command(&self, cmd: LoggerCommand) { SENDER .try_with(|thread_sender| { if thread_sender.borrow().is_none() { let lock = self.command_sender.lock().unwrap(); *thread_sender.borrow_mut() = Some(lock.clone()); } thread_sender .borrow() .as_ref() .unwrap() .send(cmd) .unwrap_or_else(|e| { println!("WARNING: Couldn't send command to logger thread: {:?}", e); }); }) .unwrap_or_else(|e| { println!( "WARNING: Couldn't get sender channel to logger thread: {:?}", e ); }); } } impl Log for ShadowLogger { fn
(&self, metadata: &Metadata) -> bool { let filter = match Worker::with_active_host(|host| host.info().log_level) { Some(Some(level)) => level, _ => self.max_level(), }; metadata.level() <= filter } fn log(&self, record: &Record) { if !self.enabled(record.metadata()) { return; } let message = std::fmt::format(*record.args()); let host_info = Worker::with_active_host(|host| host.info().clone()); let mut shadowrecord = ShadowLogRecord { level: record.level(), file: record.file_static(), module_path: record.module_path_static(), line: record.line(), message, wall_time: Duration::from_micros(unsafe { u64::try_from(c_log::logger_elapsed_micros()).unwrap() }), emu_time: Worker::current_time(), thread_name: THREAD_NAME .try_with(|name| (*name).clone()) .unwrap_or_else(|_| get_thread_name()), thread_id: THREAD_ID .try_with(|id| **id) .unwrap_or_else(|_| nix::unistd::gettid()), host_info, }; loop { match self.records.push(shadowrecord) { Ok(()) => break, Err(r) => { // Queue is full. Flush it and try again. shadowrecord = r; self.flush_sync(); } } } if record.level() == Level::Error { // Unlike in Shadow's C code, we don't abort the program on Error // logs. In Rust the same purpose is filled with `panic` and // `unwrap`. C callers will still exit or abort via the lib/logger wrapper. // // Flush *synchronously*, since we're likely about to crash one way or another. self.flush_sync(); } else if self.records.len() > ASYNC_FLUSH_QD_LINES_THRESHOLD || !*self.buffering_enabled.read().unwrap() { self.flush_async(); } } fn flush(&self) { self.flush_sync(); } } struct ShadowLogRecord { level: Level, file: Option<&'static str>, module_path: Option<&'static str>, line: Option<u32>, message: String, wall_time: Duration, emu_time: Option<EmulatedTime>, thread_name: String, thread_id: nix::unistd::Pid, host_info: Option<Arc<HostInfo>>, } impl std::fmt::Display for ShadowLogRecord { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { { let parts = TimeParts::from_nanos(self.wall_time.as_nanos()); write!( f, "{:02}:{:02}:{:02}.{:06}", parts.hours, parts.mins, parts.secs, parts.nanos / 1000 )?; } write!(f, " [{}:{}]", self.thread_id, self.thread_name)?; if let Some(emu_time) = self.emu_time { let sim_time = emu_time.duration_since(&EmulatedTime::SIMULATION_START); let parts = TimeParts::from_nanos(sim_time.as_nanos()); write!( f, " {:02}:{:02}:{:02}.{:09}", parts.hours, parts.mins, parts.secs, parts.nanos )?; } else { write!(f, " n/a")?; } write!(f, " [{level}]", level = self.level)?; if let Some(host) = &self.host_info { write!( f, " [{hostname}:{ip}]", hostname = host.name, ip = host.default_ip, )?; } else { write!(f, " [n/a]",)?; } write!( f, " [{file}:", file = self .file .map(|f| if let Some(sep_pos) = f.rfind('/') { &f[(sep_pos + 1)..] } else { f }) .unwrap_or("n/a"), )?; if let Some(line) = self.line { write!(f, "{line}", line = line)?; } else { write!(f, "n/a")?; } writeln!( f, "] [{module}] {msg}", module = self.module_path.unwrap_or("n/a"), msg = self.message )?; Ok(()) } } enum LoggerCommand { // Flush; takes an optional one-shot channel to notify that the flush has completed. Flush(Option<Sender<()>>), } pub fn set_buffering_enabled(buffering_enabled: bool) { SHADOW_LOGGER.set_buffering_enabled(buffering_enabled); } mod export { use super::*; /// When disabled, the logger thread is notified to write each record as /// soon as it's created. The calling thread still isn't blocked on the /// record actually being written, though. #[no_mangle] pub unsafe extern "C" fn shadow_logger_setEnableBuffering(buffering_enabled: i32) { set_buffering_enabled(buffering_enabled != 0) } }
enabled
identifier_name
shadow_logger.rs
use std::cell::RefCell; use std::sync::mpsc::{Receiver, Sender}; use std::sync::Arc; use std::sync::{Mutex, RwLock}; use std::time::Duration; use crossbeam::queue::ArrayQueue; use log::{Level, LevelFilter, Log, Metadata, Record, SetLoggerError}; use logger as c_log; use once_cell::sync::{Lazy, OnceCell}; use shadow_shim_helper_rs::emulated_time::EmulatedTime; use shadow_shim_helper_rs::util::time::TimeParts; use crate::core::worker::Worker; use crate::host::host::HostInfo; /// Trigger an asynchronous flush when this many lines are queued. const ASYNC_FLUSH_QD_LINES_THRESHOLD: usize = 100_000; /// Performs a *synchronous* flush when this many lines are queued. i.e. if /// after reaching the `ASYNC_FLUSH_QD_LINES_THRESHOLD`, log lines are still /// coming in faster than they can actually be flushed, when we reach this limit /// we'll pause and let it finish flushing rather than letting the queue /// continue growing. const SYNC_FLUSH_QD_LINES_THRESHOLD: usize = 10 * ASYNC_FLUSH_QD_LINES_THRESHOLD; /// Logging thread flushes at least this often. const MIN_FLUSH_FREQUENCY: Duration = Duration::from_secs(10); static SHADOW_LOGGER: Lazy<ShadowLogger> = Lazy::new(ShadowLogger::new); /// Initialize the Shadow logger. pub fn init(max_log_level: LevelFilter, log_errors_to_stderr: bool) -> Result<(), SetLoggerError> { SHADOW_LOGGER.set_max_level(max_log_level); SHADOW_LOGGER.set_log_errors_to_stderr(log_errors_to_stderr); log::set_logger(&*SHADOW_LOGGER)?; // Shadow's logger has its own logic for deciding the max level (see `ShadowLogger::enabled`), // so the log crate should give us all log messages and we can decide whether to show it or not. log::set_max_level(log::LevelFilter::Trace); // Start the thread that will receive log records and flush them to output. std::thread::Builder::new() .name("shadow-logger".to_string()) .spawn(move || SHADOW_LOGGER.logger_thread_fn()) .unwrap(); // Arrange to flush the logger on panic. let default_panic_handler = std::panic::take_hook(); std::panic::set_hook(Box::new(move |panic_info| { // Attempt to flush the logger. We want to avoid a recursive panic, so // we flush the queue on the current thread instead of trying to send // a command to the logger thread (because our thread-local sender // may have already been destructed, and because the logger thread // itself may be in a bad state), and ignore errors. SHADOW_LOGGER.flush_records(None).ok(); default_panic_handler(panic_info); })); Ok(()) } /// A logger specialized for Shadow. It attaches simulation context to log /// entries (e.g. sim time, running process, etc.). It's also designed for /// high performance to accomodate heavy logging from multiple threads. pub struct ShadowLogger { // Channel used to send commands to the logger's thread. // // The Sender half of a channel isn't Sync, so we must protect it with a // Mutex to make ShadowLogger be Sync. This is only accessed once per // thread, though, to clone into the thread-local SENDER. command_sender: Mutex<Sender<LoggerCommand>>, // Like the sender, needs a Mutex for ShadowLogger to be Sync. // The Mutex is only locked once though by the logger thread, which keeps // it locked for as long as it's running. command_receiver: Mutex<Receiver<LoggerCommand>>, // A lock-free queue for individual log records. We don't put the records // themselves in the `command_sender`, because `Sender` doesn't support // getting the queue length. Conversely we don't put commands in this queue // because it doesn't support blocking operations. // // The size is roughly SYNC_FLUSH_QD_LINES_THRESHOLD * // size_of<ShadowLogRecord>; we might want to consider SegQueue (which grows // and shrinks dynamically) instead if we ever make SYNC_FLUSH_QD_LINES_THRESHOLD very // large. records: ArrayQueue<ShadowLogRecord>, // When false, sends a (still-asynchronous) flush command to the logger // thread every time a record is pushed into `records`. buffering_enabled: RwLock<bool>, // The maximum log level, unless overridden by a host-specific log level. max_log_level: OnceCell<LevelFilter>, // Whether to log errors to stderr in addition to stdout. log_errors_to_stderr: OnceCell<bool>, } thread_local!(static SENDER: RefCell<Option<Sender<LoggerCommand>>> = RefCell::new(None)); thread_local!(static THREAD_NAME: Lazy<String> = Lazy::new(|| { get_thread_name() })); thread_local!(static THREAD_ID: Lazy<nix::unistd::Pid> = Lazy::new(|| { nix::unistd::gettid() })); fn get_thread_name() -> String { let mut thread_name = Vec::<i8>::with_capacity(16); let res = unsafe { thread_name.set_len(thread_name.capacity()); // ~infallible when host_name is at least 16 bytes. libc::pthread_getname_np( libc::pthread_self(), thread_name.as_mut_ptr(), thread_name.len(), ) }; // The most likely cause of failure is a bug in the caller. debug_assert_eq!(res, 0, "pthread_getname_np: {}", nix::errno::from_i32(res)); if res == 0 { // SAFETY: We just initialized the input buffer `thread_name`, and // `thread_name_cstr` won't outlive it. let thread_name_cstr = unsafe { std::ffi::CStr::from_ptr(thread_name.as_ptr()) }; return thread_name_cstr.to_owned().to_string_lossy().to_string(); } // Another potential reason for failure is if it couldn't open // /proc/self/task/[tid]/comm. We're probably in a bad state anyway if that // happens, but try to recover anyway. // Empty string String::new() } impl ShadowLogger { fn new() -> ShadowLogger { let (sender, receiver) = std::sync::mpsc::channel(); ShadowLogger { records: ArrayQueue::new(SYNC_FLUSH_QD_LINES_THRESHOLD), command_sender: Mutex::new(sender), command_receiver: Mutex::new(receiver), buffering_enabled: RwLock::new(false), max_log_level: OnceCell::new(), log_errors_to_stderr: OnceCell::new(), } } // Function executed by the logger's helper thread, onto which we offload as // much work as we can. fn logger_thread_fn(&self) { let command_receiver = self.command_receiver.lock().unwrap(); loop { use std::sync::mpsc::RecvTimeoutError; match command_receiver.recv_timeout(MIN_FLUSH_FREQUENCY) { Ok(LoggerCommand::Flush(done_sender)) => self.flush_records(done_sender).unwrap(), Err(RecvTimeoutError::Timeout) => { // Flush self.flush_records(None).unwrap(); } Err(e) => panic!("Unexpected error {}", e), } } } // Function called by the logger's helper thread to flush the contents of // self.records. If `done_sender` is provided, it's notified after the flush // has completed. fn flush_records(&self, done_sender: Option<Sender<()>>) -> std::io::Result<()> { use std::io::Write; // Only flush records that are already in the queue, not ones that // arrive while we're flushing. Otherwise callers who perform a // synchronous flush (whether this flush operation or another one that // arrives while we're flushing) will be left waiting longer than // necessary. Also keeps us from holding the stdout lock indefinitely. let mut toflush = self.records.len(); let stdout_unlocked = std::io::stdout(); let stdout_locked = stdout_unlocked.lock(); let mut stdout = std::io::BufWriter::new(stdout_locked); while toflush > 0 { let record = match self.records.pop() { Some(r) => r, None => { // This can happen if another thread panics while the // logging thread is flushing. In that case both threads // will be consuming from the queue. break; } }; toflush -= 1; if record.level <= Level::Error && *self.log_errors_to_stderr.get().unwrap() { // Send to both stdout and stderr. let stderr_unlocked = std::io::stderr(); let stderr_locked = stderr_unlocked.lock(); let mut stderr = std::io::BufWriter::new(stderr_locked); let line = format!("{record}"); write!(stdout, "{line}")?; write!(stderr, "{line}")?; } else { write!(stdout, "{record}")?; } } if let Some(done_sender) = done_sender { // We can't log from this thread without risking deadlock, so in the // unlikely case that the calling thread has gone away, just print // directly. done_sender.send(()).unwrap_or_else(|e| { println!( "WARNING: Logger couldn't notify calling thread: {:?}", e ) }); } Ok(()) } /// When disabled, the logger thread is notified to write each record as /// soon as it's created. The calling thread still isn't blocked on the /// record actually being written, though. pub fn set_buffering_enabled(&self, buffering_enabled: bool) { let mut writer = self.buffering_enabled.write().unwrap(); *writer = buffering_enabled; } /// If the maximum log level has not yet been set, returns `LevelFilter::Trace`. pub fn max_level(&self) -> LevelFilter { self.max_log_level .get() .copied() .unwrap_or(LevelFilter::Trace) } /// Set the default maximum log level, but this can be overridden per-host. Is only intended to /// be called from `init()`. Will panic if called more than once. fn set_max_level(&self, level: LevelFilter) { self.max_log_level.set(level).unwrap() } /// Set whether to log errors to stderr in addition to stdout. /// /// Is only intended to be called from `init()`. Will panic if called more /// than once. fn set_log_errors_to_stderr(&self, val: bool) { self.log_errors_to_stderr.set(val).unwrap() } // Send a flush command to the logger thread. fn flush_impl(&self, notify_done: Option<Sender<()>>) { self.send_command(LoggerCommand::Flush(notify_done)) } // Send a flush command to the logger thread and block until it's completed. fn flush_sync(&self) { let (done_sender, done_receiver) = std::sync::mpsc::channel(); self.flush_impl(Some(done_sender)); done_receiver.recv().unwrap(); } // Send a flush command to the logger thread. fn flush_async(&self) { self.flush_impl(None); } // Send a command to the logger thread. fn send_command(&self, cmd: LoggerCommand) { SENDER .try_with(|thread_sender| { if thread_sender.borrow().is_none() { let lock = self.command_sender.lock().unwrap(); *thread_sender.borrow_mut() = Some(lock.clone()); } thread_sender .borrow() .as_ref() .unwrap() .send(cmd) .unwrap_or_else(|e| { println!("WARNING: Couldn't send command to logger thread: {:?}", e); }); }) .unwrap_or_else(|e| { println!( "WARNING: Couldn't get sender channel to logger thread: {:?}", e ); }); } } impl Log for ShadowLogger { fn enabled(&self, metadata: &Metadata) -> bool { let filter = match Worker::with_active_host(|host| host.info().log_level) { Some(Some(level)) => level, _ => self.max_level(), }; metadata.level() <= filter
fn log(&self, record: &Record) { if !self.enabled(record.metadata()) { return; } let message = std::fmt::format(*record.args()); let host_info = Worker::with_active_host(|host| host.info().clone()); let mut shadowrecord = ShadowLogRecord { level: record.level(), file: record.file_static(), module_path: record.module_path_static(), line: record.line(), message, wall_time: Duration::from_micros(unsafe { u64::try_from(c_log::logger_elapsed_micros()).unwrap() }), emu_time: Worker::current_time(), thread_name: THREAD_NAME .try_with(|name| (*name).clone()) .unwrap_or_else(|_| get_thread_name()), thread_id: THREAD_ID .try_with(|id| **id) .unwrap_or_else(|_| nix::unistd::gettid()), host_info, }; loop { match self.records.push(shadowrecord) { Ok(()) => break, Err(r) => { // Queue is full. Flush it and try again. shadowrecord = r; self.flush_sync(); } } } if record.level() == Level::Error { // Unlike in Shadow's C code, we don't abort the program on Error // logs. In Rust the same purpose is filled with `panic` and // `unwrap`. C callers will still exit or abort via the lib/logger wrapper. // // Flush *synchronously*, since we're likely about to crash one way or another. self.flush_sync(); } else if self.records.len() > ASYNC_FLUSH_QD_LINES_THRESHOLD || !*self.buffering_enabled.read().unwrap() { self.flush_async(); } } fn flush(&self) { self.flush_sync(); } } struct ShadowLogRecord { level: Level, file: Option<&'static str>, module_path: Option<&'static str>, line: Option<u32>, message: String, wall_time: Duration, emu_time: Option<EmulatedTime>, thread_name: String, thread_id: nix::unistd::Pid, host_info: Option<Arc<HostInfo>>, } impl std::fmt::Display for ShadowLogRecord { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { { let parts = TimeParts::from_nanos(self.wall_time.as_nanos()); write!( f, "{:02}:{:02}:{:02}.{:06}", parts.hours, parts.mins, parts.secs, parts.nanos / 1000 )?; } write!(f, " [{}:{}]", self.thread_id, self.thread_name)?; if let Some(emu_time) = self.emu_time { let sim_time = emu_time.duration_since(&EmulatedTime::SIMULATION_START); let parts = TimeParts::from_nanos(sim_time.as_nanos()); write!( f, " {:02}:{:02}:{:02}.{:09}", parts.hours, parts.mins, parts.secs, parts.nanos )?; } else { write!(f, " n/a")?; } write!(f, " [{level}]", level = self.level)?; if let Some(host) = &self.host_info { write!( f, " [{hostname}:{ip}]", hostname = host.name, ip = host.default_ip, )?; } else { write!(f, " [n/a]",)?; } write!( f, " [{file}:", file = self .file .map(|f| if let Some(sep_pos) = f.rfind('/') { &f[(sep_pos + 1)..] } else { f }) .unwrap_or("n/a"), )?; if let Some(line) = self.line { write!(f, "{line}", line = line)?; } else { write!(f, "n/a")?; } writeln!( f, "] [{module}] {msg}", module = self.module_path.unwrap_or("n/a"), msg = self.message )?; Ok(()) } } enum LoggerCommand { // Flush; takes an optional one-shot channel to notify that the flush has completed. Flush(Option<Sender<()>>), } pub fn set_buffering_enabled(buffering_enabled: bool) { SHADOW_LOGGER.set_buffering_enabled(buffering_enabled); } mod export { use super::*; /// When disabled, the logger thread is notified to write each record as /// soon as it's created. The calling thread still isn't blocked on the /// record actually being written, though. #[no_mangle] pub unsafe extern "C" fn shadow_logger_setEnableBuffering(buffering_enabled: i32) { set_buffering_enabled(buffering_enabled != 0) } }
}
random_line_split
raw_data_process.py
import logging import os import random import re from collections import defaultdict, OrderedDict import jieba from utils.json_io import write_json, read_json from utils.logger import setup_logging import subprocess ZH_SYMBOLS = set('๏ผŒใ€‚๏ผ๏ผŸใ€ใ€‘๏ผˆ๏ผ‰ใ€Šใ€‹โ€˜โ€™โ€œโ€ใ€๏ผ›๏ผš') def preprocess(sentence: str) -> str: """ ๅฏนๅฅๅญ่ฟ›่กŒ้ข„ๅค„็†๏ผŒๅŽปๆމ็ฉบๆ ผๅ’Œ่‹ฑๆ–‡็ฌฆๅท่ฝฌไธญๆ–‡ :param sentence: :return: ๅค„็†ไปฅๅŽ็š„ๆ–ฐๅฅๅญ๏ผˆstrๆ˜ฏไธๅฏๅ˜็š„๏ผŒๅช่ƒฝ่ฟ”ๅ›žๆ–ฐๅฏน่ฑก๏ผ‰ """ # ่‹ฑๆ–‡็ฌฆๅท่ฝฌไธญๆ–‡ en_symbols = ',.!?[]()<>\'\'\"\"' zh_symbols = '๏ผŒใ€‚๏ผ๏ผŸใ€ใ€‘๏ผˆ๏ผ‰ใ€Šใ€‹โ€˜โ€™โ€œโ€' trans = str.maketrans(en_symbols, zh_symbols) sentence = sentence.translate(trans) # ไฝฟ็”จๆญฃๅˆ™ๅŽปๆމๅœๆญข็ฌฆๅท return re.sub(r'[\s]', "", sentence) def get_stopwords(file_path) -> set: """ ไปŽๅœ็”จ่ฏๆ–‡ไปถไธญ่ฏปๅ–ๅœ็”จ่ฏ :param file_path: ๅœ็”จ่ฏๆ–‡ไปถ่ทฏๅพ„ :return: ๅœ็”จ่ฏๅˆ—่กจ """ stopwords = set() with open(file_path, 'r', encoding='utf8') as f: for line in f: stopwords.add(line.strip()) return stopwords class ProcessedData: def __init__(self, question, question_label, background, background_label): self.question = question self.question_label = question_label self.background = background self.background_label = background_label class Word: def __init__(self, text: str, start: int, end: int): self.text = text self.start = start self.end = end # ๅฎž็Žฐ__hash__ and __eq__ ๆ–นไพฟไน‹ๅŽ็š„setๅŽป้‡ def __hash__(self): return hash(self.text) def __eq__(self, other): return type(self) == type(other) and self.text == other.text def __str__(self): return str(self.to_dict()) def to_dict(self): return {'text': self.text, 'start': self.start, 'end': self.end} def strip(self, s): if self.text.startswith(s): self.text = self.text[len(s):] self.start += len(s) if self.text.endswith(s): self.text = self.text[:-len(s)] self.end -= len(s) class RawProcessor: def __init__(self, file_paths: [str], store_path: str, least_word_len: int = 2, use_cut: bool = False, redundant: bool = True): """ ๅค„็†ๅŽŸๅง‹ๆ•ฐๆฎ :param file_paths: ่ฆๅค„็†็š„ๆ–‡ไปถ้›†ๅˆ็š„่ทฏๅพ„ :param store_path: ๅค„็†ๅฎŒๆˆ็š„ๆ•ฐๆฎๅญ˜ๆ”พ่ทฏๅพ„ :param least_word_len: ๆŠฝๅ–็š„ๆœ€ๅฐ่ฏ้•ฟ :param use_cut: ๆ˜ฏๅฆไฝฟ็”จๅˆ†่ฏ :param redundant: ๆ˜ฏๅฆไฟ็•™ๆฏไธชๅฅๅญไธญ็š„้‡ๅค่ฏ """ self.file_paths = file_paths self.store_path = store_path self.use_cut = use_cut self.least_word_len = least_word_len self.redundant = redundant # ๅค„็†่ฟ‡็จ‹ไธญ็š„่ฏ้ข‘็ปŸ่ฎก {word : freq} self.words_counter = defaultdict(int) # ็ปŸ่ฎกๆ€ป้•ฟๅบฆ๏ผŒ่ฎพ็ฝฎๅˆ้€‚max_len {len : freq} self.length_counter = defaultdict(int) # ๅค„็†ๅฎŒ็š„word้›†ๅˆ self.processed_data: [ProcessedData] = [] def _get_same_word(self, source: str, target: str): """ ้ๅކๅŽŸๅฅๅญ๏ผŒๅœจๅŽŸๅฅๅญไธญๆๅ–ไธŽ็›ฎๆ ‡ๅฅๅญ็›ธๅŒ้ƒจๅˆ†๏ผŒๆŒ‰็…งๆœ€้•ฟๅŒน้…ๅŽŸๅˆ™ :param source: ๅŽŸๅฅๅญ :param target: ็›ฎๆ ‡ๅฅๅญ :return: word, word_index """ source_len = len(source) res_words: [Word] = [] # ่ฟ”ๅ›ž็ป“ๆžœ index = 0 while index < source_len: # ไธๆ˜ฏๆ ‡็‚น็ฌฆๅท๏ผŒไธ”ๅœจ่งฃๆžไธญ if source[index] not in ZH_SYMBOLS and source[index] in target: word_len = 1 # ๅ‘ๅŽๅปถ็”ณ while index + word_len < source_len and source[index:index + word_len + 1] in target: if source[index + word_len] not in ZH_SYMBOLS: word_len += 1 else: break word = source[index:index + word_len] if len(word) >= self.least_word_len and word not in STOPWORDS: # ๅŠ ๅ…ฅ่ฏฅ่ฏ res_words.append(Word(text=word, start=index, end=index + word_len)) index += word_len else: index += 1 return res_words def _get_same_words_with_cut(self, source: str, target: str): """ ไฝฟ็”จ็ป“ๅทดๅˆ†่ฏๆฅๆŠฝๅ–็›ธๅŒ่ฏ """ res_words: [Word] = [] source_cut = [Word(*word) for word in jieba.tokenize(source)] target_cut = [Word(*word) for word in jieba.tokenize(target)] for word in source_cut: if word in target_cut and word.text not in STOPWORDS and len(word.text) >= self.least_word_len: res_words.append(word) return res_words def _filter_words(self, words: [Word]): """ ๆ นๆฎไธ€ๅฎšๆกไปถๅŽป้™คๆŠฝๅ–้‡ๅˆ่ฏไธญ็š„ไธ€ไบ›่ฏ """ _words = words if self.redundant else list(set(words)) # counter_path = os.path.join(self.store_path, 'word_count.json') # if not os.path.exists(counter_path): # return _words # else: # counter_dict = read_json(counter_path) # ๅ€’ๅบ้ๅކ๏ผŒๅœจ้ๅކๆ—ถๅˆ ้™ค for i in range(len(_words) - 1, -1, -1): word = _words[i] word.strip('็š„') word.strip('ๅœจ') word.strip('ไธŽ')
trip('ไบŽ') # ้•ฟๅบฆไธ็ฌฆๅˆๆˆ–ๅชๅŒ…ๅซๆ•ฐๅญ—ๅ’Œๅญ—ๆฏ if len(word.text) < self.least_word_len or word.text.isnumeric(): _words.remove(word) return _words @staticmethod def generate_tags(sequence_len: int, words: [Word]): """ ๆ นๆฎ้•ฟๅบฆๅ’Œไฝ็ฝฎ็ดขๅผ•ไบง็”ŸBIOๆ ‡็ญพ :param sequence_len: ้•ฟๅบฆ :param words: ๆŠฝๅ–่ฏ้›†ๅˆ :return: ๆ ‡็ญพ """ tags = ['O'] * sequence_len for word in words: start = word.start tags[start] = 'B' while start < word.end - 1: start += 1 tags[start] = 'I' return tags def process_raw_data(self): """ ๅค„็†ๅŽŸๅง‹ๆ•ฐๆฎ """ # ๅพช็Žฏ่ฏปๅ–ๆ–‡ไปถ if self.use_cut: jieba.load_userdict("data/geo_words_no_normal.txt") for file_path in self.file_paths: # ่ฏปๅ–raw_data raw_data = read_json(file_path) logger.info(f"Processing {file_path} - Question count: {len(raw_data)}") background_key = 'scenario_text' if "websoft" in file_path else 'background' # ๅค„็†raw_data for idx, total_question in enumerate(raw_data): # ๆๅ–่ƒŒๆ™ฏๅ’Œ่งฃๆž background = total_question[background_key] explain = total_question['explanation'] question = total_question['question'] # ่ทณ่ฟ‡ๆฒกๆœ‰่ƒŒๆ™ฏไฟกๆฏๆˆ–่งฃๆž็š„้ข˜็›ฎ if not background or not explain or len(explain) < 50: continue # ้ข„ๅค„็† background = preprocess(background) explain = preprocess(explain) question = preprocess(question) # ๅฏปๆ‰พ้‡ๅˆ่ฏ if self.use_cut: words_background = self._get_same_words_with_cut(background, explain) words_question = self._get_same_words_with_cut(question, explain) else: words_background = self._get_same_word(background, explain) words_question = self._get_same_word(question, explain) # ่ฟ‡ๆปคๅ™ชๅฃฐ words_question = self._filter_words(words_question) words_background = self._filter_words(words_background) # ็ปŸ่ฎก่ฏ้ข‘ไฟกๆฏ for word in words_question + words_background: self.words_counter[word.text] += 1 # ็”Ÿๆˆๆ ‡็ญพ tags_back = self.generate_tags(len(background), words_background) tags_question = self.generate_tags(len(question), words_question) # ็ปŸ่ฎก้•ฟๅบฆ self.length_counter[len(question) + len(background)] += 1 # ๆทปๅŠ ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎ/ self.processed_data.append(ProcessedData(question=question, question_label=tags_question, background=background, background_label=tags_back)) if idx < 5: logger.info(f"\t example_{idx + 1} - total len: {len(question) + len(background)}") logger.info(f"question_len: {len(question)}, question: {question}") logger.info("words_question: " + ' | '.join([word.text for word in words_question])) logger.info("tags_question: " + ' '.join(tags_question)) logger.info(f"background_len: {len(background)}, background: {background}") logger.info("tags_back: " + ' '.join(tags_back)) logger.info("words_back: " + ' | '.join([word.text for word in words_background])) logger.info("explain: " + explain) def write_processed_data(self, data_type: str, processed_data: [ProcessedData]): """ ๅฐ†ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎๅ†™ๅ…ฅๆ–‡ไปถ :param data_type: ๅ†™ๅ…ฅๆ–‡ไปถ็š„็ง็ฑป๏ผštrain dev test :param processed_data: ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎ """ with open(os.path.join(self.store_path, f'{data_type}.txt'), 'w', encoding='utf8') as f: for data in processed_data: f.write('-DOC_START-\n') for i in range(len(data.question)): f.write(data.question[i] + ' ' + data.question_label[i] + '\n') f.write('\n') for i in range(len(data.background)): f.write(data.background[i] + ' ' + data.background_label[i] + '\n') def prepare_data(self, data_split_dict): """ ๆŒ‰็…งไธ€ๅฎš็š„ๆฏ”ไพ‹ๅฐ†ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎๅ†™ๅ…ฅๆŒ‡ๅฎšๆ–‡ไปถ :param data_split_dict: """ random.shuffle(self.processed_data) total_size = len(self.processed_data) train_size = int(data_split_dict['train'] * total_size) dev_size = int(data_split_dict['dev'] * total_size) # [a,b)ๅทฆๅผ€ๅณ้—ญๅŒบ้—ด self.write_processed_data('train', self.processed_data[:train_size]) self.write_processed_data('dev', self.processed_data[train_size:dev_size + train_size]) self.write_processed_data('test', self.processed_data[dev_size + train_size:]) logger.info(f"Prepared: total size = {total_size} | train size = {train_size} | dev size = {dev_size}") def write_counter(counter: dict, path, key=None, reverse=False): """ ๅฐ†่ฏ้ข‘็ปŸ่ฎกๆŽ’ๅบ็„ถๅŽๅ†™ๅ…ฅjsonๆ–‡ไปถ """ ordered_words_counter = OrderedDict( sorted(counter.items(), key=key, reverse=reverse)) write_json(path, ordered_words_counter) def start(): # ๆ•ฐๆฎๅญ˜ๅ‚จ่ทฏๅพ„ data_process_types = ['data_all'] cuts = ['cut', 'no_cut'] redundants = ['no_redundant'] for data_process_type in data_process_types: data_path = os.path.join('./data/raw', data_process_type) # ๅพ…ๅค„็†ๆ–‡ไปถ้›†ๅˆ files = ['53_data.json', 'spider_data.json', 'websoft_data.json'] file_paths = [] for file in files: file_paths.append(os.path.join(data_path, file)) # ๆ˜ฏๅฆๅˆ†่ฏ for cut in cuts: use_cut = (cut == 'cut') # ๆ˜ฏๅฆๅ…่ฎธ้‡ๅค for redundant in redundants: use_redundant = (redundant == 'redundant') store_data_path = os.path.join('./data/processed', data_process_type, cut, redundant) if not os.path.exists(store_data_path): os.makedirs(store_data_path) processor = RawProcessor(file_paths=file_paths, store_path=store_data_path, use_cut=use_cut, redundant=use_redundant) # ๅค„็†ๆ•ฐๆฎ processor.process_raw_data() # ๅ†™ๅ…ฅๆ•ฐๆฎ processor.prepare_data(data_split_dict={'train': 0.7, 'dev': 0.2, 'test': 0.1}) # ๅ†™ๅ…ฅ็ปŸ่ฎก่ฏ้ข‘ write_counter(processor.words_counter, os.path.join(store_data_path, 'word_count.json'), key=lambda kv: (kv[1], kv[0]), reverse=True) write_counter(processor.length_counter, os.path.join(store_data_path, 'length_count.json')) if __name__ == '__main__': STOPWORDS = get_stopwords('./data/stopwords.txt') if os.path.exists('./logs/data_info.log'): os.remove('./logs/data_info.log') setup_logging(default_path='./utils/logger_config.yaml') logger = logging.getLogger("data_logger") start() # ๆธ…็†ๅทฒไฟๅญ˜็š„ๆ•ฐๆฎ subprocess.call(r'find ./data/processed/data_all -name *_data -type f -print -exec rm {} \;', shell=True)
word.s
identifier_name
raw_data_process.py
import logging import os import random import re from collections import defaultdict, OrderedDict import jieba from utils.json_io import write_json, read_json from utils.logger import setup_logging import subprocess ZH_SYMBOLS = set('๏ผŒใ€‚๏ผ๏ผŸใ€ใ€‘๏ผˆ๏ผ‰ใ€Šใ€‹โ€˜โ€™โ€œโ€ใ€๏ผ›๏ผš') def preprocess(sentence: str) -> str: """ ๅฏนๅฅๅญ่ฟ›่กŒ้ข„ๅค„็†๏ผŒๅŽปๆމ็ฉบๆ ผๅ’Œ่‹ฑๆ–‡็ฌฆๅท่ฝฌไธญๆ–‡ :param sentence: :return: ๅค„็†ไปฅๅŽ็š„ๆ–ฐๅฅๅญ๏ผˆstrๆ˜ฏไธๅฏๅ˜็š„๏ผŒๅช่ƒฝ่ฟ”ๅ›žๆ–ฐๅฏน่ฑก๏ผ‰ """ # ่‹ฑๆ–‡็ฌฆๅท่ฝฌไธญๆ–‡ en_symbols = ',.!?[]()<>\'\'\"\"' zh_symbols = '๏ผŒใ€‚๏ผ๏ผŸใ€ใ€‘๏ผˆ๏ผ‰ใ€Šใ€‹โ€˜โ€™โ€œโ€' trans = str.maketrans(en_symbols, zh_symbols) sentence = sentence.translate(trans) # ไฝฟ็”จๆญฃๅˆ™ๅŽปๆމๅœๆญข็ฌฆๅท return re.sub(r'[\s]', "", sentence) def get_stopwords(file_path) -> set: """ ไปŽๅœ็”จ่ฏๆ–‡ไปถไธญ่ฏปๅ–ๅœ็”จ่ฏ :param file_path: ๅœ็”จ่ฏๆ–‡ไปถ่ทฏๅพ„ :return: ๅœ็”จ่ฏๅˆ—่กจ """ stopwords = set() with open(file_path, 'r', encoding='utf8') as f: for line in f: stopwords.add(line.strip()) return stopwords class ProcessedData: def __init__(self, question, question_label, background, background_label): self.question = question self.question_label = question_label self.background = background self.background_label = background_label class Word: def __init__(self, text: str, start: int, end: int): self.text = text self.start = start self.end = end # ๅฎž็Žฐ__hash__ and __eq__ ๆ–นไพฟไน‹ๅŽ็š„setๅŽป้‡ def __hash__(self): return hash(self.text) def __eq__(self, other): return type(self) == type(other) and self.text == other.text def __str__(self): return str(self.to_dict()) def to_dict(self): return {'text': self.text, 'start': self.start, 'end': self.end} def strip(self, s): if self.text.startswith(s): self.text = self.text[len(s):] self.start += len(s) if self.text.endswith(s): self.text = self.text[:-len(s)] self.end -= len(s) class RawProcessor: def __init__(self, file_paths: [str], store_path: str, least_word_len: int = 2, use_cut: bool = False, redundant: bool = True): """ ๅค„็†ๅŽŸๅง‹ๆ•ฐๆฎ :param file_paths: ่ฆๅค„็†็š„ๆ–‡ไปถ้›†ๅˆ็š„่ทฏๅพ„ :param store_path: ๅค„็†ๅฎŒๆˆ็š„ๆ•ฐๆฎๅญ˜ๆ”พ่ทฏๅพ„ :param least_word_len: ๆŠฝๅ–็š„ๆœ€ๅฐ่ฏ้•ฟ :param use_cut: ๆ˜ฏๅฆไฝฟ็”จๅˆ†่ฏ :param redundant: ๆ˜ฏๅฆไฟ็•™ๆฏไธชๅฅๅญไธญ็š„้‡ๅค่ฏ """ self.file_paths = file_paths self.store_path = store_path self.use_cut = use_cut self.least_word_len = least_word_len self.redundant = redundant # ๅค„็†่ฟ‡็จ‹ไธญ็š„่ฏ้ข‘็ปŸ่ฎก {word : freq} self.words_counter = defaultdict(int) # ็ปŸ่ฎกๆ€ป้•ฟๅบฆ๏ผŒ่ฎพ็ฝฎๅˆ้€‚max_len {len : freq} self.length_counter = defaultdict(int) # ๅค„็†ๅฎŒ็š„word้›†ๅˆ self.processed_data: [ProcessedData] = [] def _get_same_word(self, source: str, target: str): """ ้ๅކๅŽŸๅฅๅญ๏ผŒๅœจๅŽŸๅฅๅญไธญๆๅ–ไธŽ็›ฎๆ ‡ๅฅๅญ็›ธๅŒ้ƒจๅˆ†๏ผŒๆŒ‰็…งๆœ€้•ฟๅŒน้…ๅŽŸๅˆ™ :param source: ๅŽŸๅฅๅญ :param target: ็›ฎๆ ‡ๅฅๅญ :return: word, word_index """ source_len = len(source) res_words: [Word] = [] # ่ฟ”ๅ›ž็ป“ๆžœ index = 0 while index < source_len: # ไธๆ˜ฏๆ ‡็‚น็ฌฆๅท๏ผŒไธ”ๅœจ่งฃๆžไธญ if source[index] not in ZH_SYMBOLS and source[index] in target: word_len = 1 # ๅ‘ๅŽๅปถ็”ณ while index + word_len < source_len and source[index:index + word_len + 1] in target: if source[index + word_len] not in ZH_SYMBOLS: word_len += 1 else: break word = source[index:index + word_len] if len(word) >= self.least_word_len and word not in STOPWORDS: # ๅŠ ๅ…ฅ่ฏฅ่ฏ res_words.append(Word(text=word, start=index, end=index + word_len)) index += word_len else: index += 1 return res_words def _get_same_words_with_cut(self, source: str, target: str): """ ไฝฟ็”จ็ป“ๅทดๅˆ†่ฏๆฅๆŠฝๅ–็›ธๅŒ่ฏ """ res_words: [Word] = [] source_cut = [Word(*word) for word in jieba.tokenize(source)] target_cut = [Word(*word) for word in jieba.tokenize(target)] for word in source_cut: if word in target_cut and word.text not in STOPWORDS and len(word.text) >= self.least_word_len: res_words.append(word) return res_words def _filter_words(self, words: [Word]): """ ๆ นๆฎไธ€ๅฎšๆกไปถๅŽป้™คๆŠฝๅ–้‡ๅˆ่ฏไธญ็š„ไธ€ไบ›่ฏ """ _words = words if self.redundant else list(set(words)) # counter_path = os.path.join(self.store_path, 'word_count.json') # if not os.path.exists(counter_path): # return _words # else: # counter_dict = read_json(counter_path) # ๅ€’ๅบ้ๅކ๏ผŒๅœจ้ๅކๆ—ถๅˆ ้™ค for i in range(len(_words) - 1, -1, -1): word = _words[i] word.strip('็š„') word.strip('ๅœจ') word.strip('ไธŽ') word.strip('ไบŽ') # ้•ฟๅบฆไธ็ฌฆๅˆๆˆ–ๅชๅŒ…ๅซๆ•ฐๅญ—ๅ’Œๅญ—ๆฏ if len(word.text) < self.least_word_len or word.text.isnumeric(): _words.remove(word) return _words @staticmethod def generate_tags(sequence_len: int, words: [Word]): """ ๆ นๆฎ้•ฟๅบฆๅ’Œไฝ็ฝฎ็ดขๅผ•ไบง็”ŸBIOๆ ‡็ญพ :param sequence_len: ้•ฟๅบฆ :param words: ๆŠฝๅ–่ฏ้›†ๅˆ :return: ๆ ‡็ญพ """ tags = ['O'] * sequence_len for word in words: start = word.start tags[start] = 'B' while start < word.end - 1: start += 1 tags[start] = 'I' return tags def process_raw_data(self): """ ๅค„็†ๅŽŸๅง‹ๆ•ฐๆฎ """ # ๅพช็Žฏ่ฏปๅ–ๆ–‡ไปถ if self.use_cut: jieba.load_userdict("data/geo_words_no_normal.txt") for file_path in self.file_paths: # ่ฏปๅ–raw_data raw_data = read_json(file_path) logger.info(f"Processing {file_path} - Question count: {len(raw_data)}") background_key = 'scenario_text' if "websoft" in file_path else 'background'
question = total_question['question'] # ่ทณ่ฟ‡ๆฒกๆœ‰่ƒŒๆ™ฏไฟกๆฏๆˆ–่งฃๆž็š„้ข˜็›ฎ if not background or not explain or len(explain) < 50: continue # ้ข„ๅค„็† background = preprocess(background) explain = preprocess(explain) question = preprocess(question) # ๅฏปๆ‰พ้‡ๅˆ่ฏ if self.use_cut: words_background = self._get_same_words_with_cut(background, explain) words_question = self._get_same_words_with_cut(question, explain) else: words_background = self._get_same_word(background, explain) words_question = self._get_same_word(question, explain) # ่ฟ‡ๆปคๅ™ชๅฃฐ words_question = self._filter_words(words_question) words_background = self._filter_words(words_background) # ็ปŸ่ฎก่ฏ้ข‘ไฟกๆฏ for word in words_question + words_background: self.words_counter[word.text] += 1 # ็”Ÿๆˆๆ ‡็ญพ tags_back = self.generate_tags(len(background), words_background) tags_question = self.generate_tags(len(question), words_question) # ็ปŸ่ฎก้•ฟๅบฆ self.length_counter[len(question) + len(background)] += 1 # ๆทปๅŠ ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎ/ self.processed_data.append(ProcessedData(question=question, question_label=tags_question, background=background, background_label=tags_back)) if idx < 5: logger.info(f"\t example_{idx + 1} - total len: {len(question) + len(background)}") logger.info(f"question_len: {len(question)}, question: {question}") logger.info("words_question: " + ' | '.join([word.text for word in words_question])) logger.info("tags_question: " + ' '.join(tags_question)) logger.info(f"background_len: {len(background)}, background: {background}") logger.info("tags_back: " + ' '.join(tags_back)) logger.info("words_back: " + ' | '.join([word.text for word in words_background])) logger.info("explain: " + explain) def write_processed_data(self, data_type: str, processed_data: [ProcessedData]): """ ๅฐ†ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎๅ†™ๅ…ฅๆ–‡ไปถ :param data_type: ๅ†™ๅ…ฅๆ–‡ไปถ็š„็ง็ฑป๏ผštrain dev test :param processed_data: ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎ """ with open(os.path.join(self.store_path, f'{data_type}.txt'), 'w', encoding='utf8') as f: for data in processed_data: f.write('-DOC_START-\n') for i in range(len(data.question)): f.write(data.question[i] + ' ' + data.question_label[i] + '\n') f.write('\n') for i in range(len(data.background)): f.write(data.background[i] + ' ' + data.background_label[i] + '\n') def prepare_data(self, data_split_dict): """ ๆŒ‰็…งไธ€ๅฎš็š„ๆฏ”ไพ‹ๅฐ†ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎๅ†™ๅ…ฅๆŒ‡ๅฎšๆ–‡ไปถ :param data_split_dict: """ random.shuffle(self.processed_data) total_size = len(self.processed_data) train_size = int(data_split_dict['train'] * total_size) dev_size = int(data_split_dict['dev'] * total_size) # [a,b)ๅทฆๅผ€ๅณ้—ญๅŒบ้—ด self.write_processed_data('train', self.processed_data[:train_size]) self.write_processed_data('dev', self.processed_data[train_size:dev_size + train_size]) self.write_processed_data('test', self.processed_data[dev_size + train_size:]) logger.info(f"Prepared: total size = {total_size} | train size = {train_size} | dev size = {dev_size}") def write_counter(counter: dict, path, key=None, reverse=False): """ ๅฐ†่ฏ้ข‘็ปŸ่ฎกๆŽ’ๅบ็„ถๅŽๅ†™ๅ…ฅjsonๆ–‡ไปถ """ ordered_words_counter = OrderedDict( sorted(counter.items(), key=key, reverse=reverse)) write_json(path, ordered_words_counter) def start(): # ๆ•ฐๆฎๅญ˜ๅ‚จ่ทฏๅพ„ data_process_types = ['data_all'] cuts = ['cut', 'no_cut'] redundants = ['no_redundant'] for data_process_type in data_process_types: data_path = os.path.join('./data/raw', data_process_type) # ๅพ…ๅค„็†ๆ–‡ไปถ้›†ๅˆ files = ['53_data.json', 'spider_data.json', 'websoft_data.json'] file_paths = [] for file in files: file_paths.append(os.path.join(data_path, file)) # ๆ˜ฏๅฆๅˆ†่ฏ for cut in cuts: use_cut = (cut == 'cut') # ๆ˜ฏๅฆๅ…่ฎธ้‡ๅค for redundant in redundants: use_redundant = (redundant == 'redundant') store_data_path = os.path.join('./data/processed', data_process_type, cut, redundant) if not os.path.exists(store_data_path): os.makedirs(store_data_path) processor = RawProcessor(file_paths=file_paths, store_path=store_data_path, use_cut=use_cut, redundant=use_redundant) # ๅค„็†ๆ•ฐๆฎ processor.process_raw_data() # ๅ†™ๅ…ฅๆ•ฐๆฎ processor.prepare_data(data_split_dict={'train': 0.7, 'dev': 0.2, 'test': 0.1}) # ๅ†™ๅ…ฅ็ปŸ่ฎก่ฏ้ข‘ write_counter(processor.words_counter, os.path.join(store_data_path, 'word_count.json'), key=lambda kv: (kv[1], kv[0]), reverse=True) write_counter(processor.length_counter, os.path.join(store_data_path, 'length_count.json')) if __name__ == '__main__': STOPWORDS = get_stopwords('./data/stopwords.txt') if os.path.exists('./logs/data_info.log'): os.remove('./logs/data_info.log') setup_logging(default_path='./utils/logger_config.yaml') logger = logging.getLogger("data_logger") start() # ๆธ…็†ๅทฒไฟๅญ˜็š„ๆ•ฐๆฎ subprocess.call(r'find ./data/processed/data_all -name *_data -type f -print -exec rm {} \;', shell=True)
# ๅค„็†raw_data for idx, total_question in enumerate(raw_data): # ๆๅ–่ƒŒๆ™ฏๅ’Œ่งฃๆž background = total_question[background_key] explain = total_question['explanation']
random_line_split
raw_data_process.py
import logging import os import random import re from collections import defaultdict, OrderedDict import jieba from utils.json_io import write_json, read_json from utils.logger import setup_logging import subprocess ZH_SYMBOLS = set('๏ผŒใ€‚๏ผ๏ผŸใ€ใ€‘๏ผˆ๏ผ‰ใ€Šใ€‹โ€˜โ€™โ€œโ€ใ€๏ผ›๏ผš') def preprocess(sentence: str) -> str: """ ๅฏนๅฅๅญ่ฟ›่กŒ้ข„ๅค„็†๏ผŒๅŽปๆމ็ฉบๆ ผๅ’Œ่‹ฑๆ–‡็ฌฆๅท่ฝฌไธญๆ–‡ :param sentence: :return: ๅค„็†ไปฅๅŽ็š„ๆ–ฐๅฅๅญ๏ผˆstrๆ˜ฏไธๅฏๅ˜็š„๏ผŒๅช่ƒฝ่ฟ”ๅ›žๆ–ฐๅฏน่ฑก๏ผ‰ """ # ่‹ฑๆ–‡็ฌฆๅท่ฝฌไธญๆ–‡ en_symbols = ',.!?[]()<>\'\'\"\"' zh_symbols = '๏ผŒใ€‚๏ผ๏ผŸใ€ใ€‘๏ผˆ๏ผ‰ใ€Šใ€‹โ€˜โ€™โ€œโ€' trans = str.maketrans(en_symbols, zh_symbols) sentence = sentence.translate(trans) # ไฝฟ็”จๆญฃๅˆ™ๅŽปๆމๅœๆญข็ฌฆๅท return re.sub(r'[\s]', "", sentence) def get_stopwords(file_path) -> set: """ ไปŽๅœ็”จ่ฏๆ–‡ไปถไธญ่ฏปๅ–ๅœ็”จ่ฏ :param file_path: ๅœ็”จ่ฏๆ–‡ไปถ่ทฏๅพ„ :return: ๅœ็”จ่ฏๅˆ—่กจ """ stopwords = set() with open(file_path, 'r', encoding='utf8') as f: for line in f: stopwords.add(line.strip()) return stopwords class ProcessedData: def __init__(self, question, question_label, background, background_label): self.question = question self.question_label = question_label self.background = background self.background_label = background_label class Word: def __init__(self, text: str, start: int, end: int): self.text = text self.start = start self.end = end # ๅฎž็Žฐ__hash__ and __eq__ ๆ–นไพฟไน‹ๅŽ็š„setๅŽป้‡ def __hash__(self): return hash(self.text) def __eq__(self, other): return type(self) == type(other) and self.text == other.text def __str__(self): return str(self.to_dict()) def to_dict(self): return {'text': self.text, 'start': self.start, 'end': self.end} def strip(self, s): if self.text.startswith(s): self.text = self.text[len(s):] self.start += len(s) if self.text.endswith(s): self.text = self.text[:-len(s)] self.end -= len(s) class RawProcessor: def __init__(self, file_paths: [str], store_path: str, least_word_len: int = 2, use_cut: bool = False, redundant: bool = True): """ ๅค„็†ๅŽŸๅง‹ๆ•ฐๆฎ :param file_paths: ่ฆๅค„็†็š„ๆ–‡ไปถ้›†ๅˆ็š„่ทฏๅพ„ :param store_path: ๅค„็†ๅฎŒๆˆ็š„ๆ•ฐๆฎๅญ˜ๆ”พ่ทฏๅพ„ :param least_word_len: ๆŠฝๅ–็š„ๆœ€ๅฐ่ฏ้•ฟ :param use_cut: ๆ˜ฏๅฆไฝฟ็”จๅˆ†่ฏ :param redundant: ๆ˜ฏๅฆไฟ็•™ๆฏไธชๅฅๅญไธญ็š„้‡ๅค่ฏ """ self.file_paths = file_paths self.store_path = store_path self.use_cut = use_cut self.least_word_len = least_word_len self.redundant = redundant # ๅค„็†่ฟ‡็จ‹ไธญ็š„่ฏ้ข‘็ปŸ่ฎก {word : freq} self.words_counter = defaultdict(int) # ็ปŸ่ฎกๆ€ป้•ฟๅบฆ๏ผŒ่ฎพ็ฝฎๅˆ้€‚max_len {len : freq} self.length_counter = defaultdict(int) # ๅค„็†ๅฎŒ็š„word้›†ๅˆ self.processed_data: [ProcessedData] = [] def _get_same_word(self, source: str, target: str): """ ้ๅކๅŽŸๅฅๅญ๏ผŒๅœจๅŽŸๅฅๅญไธญๆๅ–ไธŽ็›ฎๆ ‡ๅฅๅญ็›ธๅŒ้ƒจๅˆ†๏ผŒๆŒ‰็…งๆœ€้•ฟๅŒน้…ๅŽŸๅˆ™ :param source: ๅŽŸๅฅๅญ :param target: ็›ฎๆ ‡ๅฅๅญ :return: word, word_index """ source_len = len(source) res_words: [Word] = [] # ่ฟ”ๅ›ž็ป“ๆžœ index = 0 while index < source_len: # ไธๆ˜ฏๆ ‡็‚น็ฌฆๅท๏ผŒไธ”ๅœจ่งฃๆžไธญ if source[index] not in ZH_SYMBOLS and source[index] in target: word_len = 1 # ๅ‘ๅŽๅปถ็”ณ while index + word_len < source_len and source[index:index + word_len + 1] in target: if source[index + word_len] not in ZH_SYMBOLS: word_len += 1 else: break word = source[index:index + word_len] if len(word) >= self.least_word_len and word not in STOPWORDS: # ๅŠ ๅ…ฅ่ฏฅ่ฏ res_words.append(Word(text=word, start=index, end=index + word_len)) index += word_len else: index += 1 return res_words def _get_same_words_with_cut(self, source: str, target: str): """ ไฝฟ็”จ็ป“ๅทดๅˆ†่ฏๆฅๆŠฝๅ–็›ธๅŒ่ฏ """ res_words: [Word] = [] source_cut = [Word(*word) for word in jieba.tokenize(source)] target_cut = [Word(*word) for word in jieba.tokenize(target)] for word in source_cut: if word in target_cut and word.text not in STOPWORDS and len(word.text) >= self.least_word_len: res_words.append(word) return res_words def _filter_words(self, words: [Word]): """ ๆ นๆฎไธ€ๅฎšๆกไปถๅŽป้™คๆŠฝๅ–้‡ๅˆ่ฏไธญ็š„ไธ€ไบ›่ฏ """ _words = words if self.redundant else list(set(words)) # counter_path = os.path.join(self.store_path, 'word_count.json') # if not os.path.exists(counter_path): # return _words # else: # counter_dict = read_json(counter_path) # ๅ€’ๅบ้ๅކ๏ผŒๅœจ้ๅކๆ—ถๅˆ ้™ค for i in range(len(_words) - 1, -1, -1): word = _words[i] word.strip('็š„') word.strip('ๅœจ') word.strip('ไธŽ') word.strip('ไบŽ') # ้•ฟๅบฆไธ็ฌฆๅˆๆˆ–ๅชๅŒ…ๅซๆ•ฐๅญ—ๅ’Œๅญ—ๆฏ if len(word.text) < self.least_word_len or word.text.isnumeric(): _words.remove(word) return _words @staticmethod def generate_tags(sequence_len: int, words: [Word]): """ ๆ นๆฎ้•ฟๅบฆๅ’Œไฝ็ฝฎ็ดขๅผ•ไบง็”ŸBIOๆ ‡็ญพ :param sequence_len: ้•ฟๅบฆ :param words: ๆŠฝๅ–่ฏ้›†ๅˆ :return: ๆ ‡็ญพ """ tags = ['O'] * sequence_len for word in words: start = word.start tags[start] = 'B' while start < word.end - 1: start += 1 tags[start] = 'I' return tags def process_raw_data(self): """ ๅค„็†ๅŽŸๅง‹ๆ•ฐๆฎ """ # ๅพช็Žฏ่ฏปๅ–ๆ–‡ไปถ if self.use_cut: jieba.load_userdict("data/geo_words_no_normal.txt") for file_path in self.file_paths: # ่ฏปๅ–raw_data raw_data = read_json(file_path) logger.info(f"Processing {file_path} - Question count: {len(raw_data)}") background_key = 'scenario_text' if "websoft" in file_path else 'background' # ๅค„็†raw_data for idx, total_question in enumerate(raw_data): # ๆๅ–่ƒŒๆ™ฏๅ’Œ่งฃๆž background = total_question[background_key] explain = total_question['explanation'] question = total_question['question'] # ่ทณ่ฟ‡ๆฒกๆœ‰่ƒŒๆ™ฏไฟกๆฏๆˆ–่งฃๆž็š„้ข˜็›ฎ if not background or not explain or len(explain) < 50: continue # ้ข„ๅค„็† background = preprocess(background) explain = preprocess(explain) question = preprocess(question) # ๅฏปๆ‰พ้‡ๅˆ่ฏ if self.use_cut: words_background = self._get_same_words_with_cut(background, explain) words_question = self._get_same_words_with_cut(question, explain) else: words_background = self._get_same_word(background, explain) words_question = self._get_same_word(question, explain) # ่ฟ‡ๆปคๅ™ชๅฃฐ words_question = self._filter_words(words_question) words_background = self._filter_words(words_background) # ็ปŸ่ฎก่ฏ้ข‘ไฟกๆฏ for word in words_question + words_background: self.words_counter[word.text] += 1 # ็”Ÿๆˆๆ ‡็ญพ tags_back = self.generate_tags(len(background), words_background) tags_question = self.generate_tags(len(question), words_question) # ็ปŸ่ฎก้•ฟๅบฆ self.length_counter[len(question) + len(background)] += 1 # ๆทปๅŠ ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎ/ self.processed_data.append(ProcessedData(question=question, question_label=tags_question, background=background, background_label=tags_back)) if idx < 5: logger.info(f"\t example_{idx + 1} - total len: {len(question) + len(background)}") logger.info(f"question_len: {len(question)}, question: {question}") logger.info("words_question: " + ' | '.join([word.text for word in words_question])) logger.info("tags_question: " + ' '.join(tags_question)) logger.info(f"background_len: {len(background)}, background: {background}") logger.info("tags_back: " + ' '.join(tags_back)) logger.info("words_back: " + ' | '.join([word.text for word in words_background])) logger.info("explain: " + explain) def write_processed_data(self, data_type: str, processed_data: [ProcessedData]): """ ๅฐ†ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎๅ†™ๅ…ฅๆ–‡ไปถ :param data_type: ๅ†™ๅ…ฅๆ–‡ไปถ็š„็ง็ฑป๏ผštrain dev test :param processed_data: ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎ """ with open(os.path.join(self.store_path, f'{data_type}.txt'), 'w', encoding='utf8') as f: for data in processed_data: f.write('-DOC_START-\n') for i in range(len(data.question)): f.write(data.question[i] + ' ' + data.question_label[i] + '\n') f.write('\n') for i in range(len(data.background)): f.write(data.background[i] + ' ' + data.background_label[i] + '\n') def prepare_data(self, data_split_dict): """ ๆŒ‰็…งไธ€ๅฎš็š„ๆฏ”ไพ‹ๅฐ†ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎๅ†™ๅ…ฅๆŒ‡ๅฎšๆ–‡ไปถ :param data_split_dict: """ random.shuffle(self.processed_data) total_size = len(self.processed_data) train_size = int(data_split_dict['train'] * total_size) dev_size = int(data_split_dict['dev'] * total_size) # [a,b)ๅทฆๅผ€ๅณ้—ญๅŒบ้—ด self.write_processed_data('train', self.processed_data[:train_size]) self.write_processed_data('dev', self.processed_data[train_size:dev_size + train_size]) self.write_processed_data('test', self.processed_data[dev_size + train_size:]) logger.info(f"Prepared: total size = {total_size} | train size = {train_size} | dev size = {dev_size}") def write_counter(counter: dict, path, key=None, reverse=False): """ ๅฐ†่ฏ้ข‘็ปŸ่ฎกๆŽ’ๅบ็„ถๅŽๅ†™ๅ…ฅjsonๆ–‡ไปถ """ ordered_words_counter = OrderedDict( sorted(counter.items(), key=key, reverse=reverse)) write_json(path, ordered_words_counter) def start(): # ๆ•ฐๆฎๅญ˜ๅ‚จ่ทฏๅพ„ data_process_types = ['data_all'] cuts = ['cut', 'no_cut'] redundants = ['no_redundant'] for data_process_type in data_process_types: data_path = os.path.join('./data/raw', data_process_type) # ๅพ…ๅค„็†ๆ–‡ไปถ้›†ๅˆ files = ['53_data.json', 'spider_data.json', 'websoft_data.json'] file_paths = [] for file in files: file_paths.append(os.path.join(data_path, file)) # ๆ˜ฏๅฆๅˆ†่ฏ for cut in cuts: use_cut = (cut == 'cut') # ๆ˜ฏๅฆๅ…่ฎธ้‡ๅค for redundant in redundants: use_redundant = (redundant == 'redundant') store_data_path = os.path.join('./data/processed', data_process_type, cut, redundant)
redundant=use_redundant) # ๅค„็†ๆ•ฐๆฎ processor.process_raw_data() # ๅ†™ๅ…ฅๆ•ฐๆฎ processor.prepare_data(data_split_dict={'train': 0.7, 'dev': 0.2, 'test': 0.1}) # ๅ†™ๅ…ฅ็ปŸ่ฎก่ฏ้ข‘ write_counter(processor.words_counter, os.path.join(store_data_path, 'word_count.json'), key=lambda kv: (kv[1], kv[0]), reverse=True) write_counter(processor.length_counter, os.path.join(store_data_path, 'length_count.json')) if __name__ == '__main__': STOPWORDS = get_stopwords('./data/stopwords.txt') if os.path.exists('./logs/data_info.log'): os.remove('./logs/data_info.log') setup_logging(default_path='./utils/logger_config.yaml') logger = logging.getLogger("data_logger") start() # ๆธ…็†ๅทฒไฟๅญ˜็š„ๆ•ฐๆฎ subprocess.call(r'find ./data/processed/data_all -name *_data -type f -print -exec rm {} \;', shell=True)
if not os.path.exists(store_data_path): os.makedirs(store_data_path) processor = RawProcessor(file_paths=file_paths, store_path=store_data_path, use_cut=use_cut,
identifier_body
raw_data_process.py
import logging import os import random import re from collections import defaultdict, OrderedDict import jieba from utils.json_io import write_json, read_json from utils.logger import setup_logging import subprocess ZH_SYMBOLS = set('๏ผŒใ€‚๏ผ๏ผŸใ€ใ€‘๏ผˆ๏ผ‰ใ€Šใ€‹โ€˜โ€™โ€œโ€ใ€๏ผ›๏ผš') def preprocess(sentence: str) -> str: """ ๅฏนๅฅๅญ่ฟ›่กŒ้ข„ๅค„็†๏ผŒๅŽปๆމ็ฉบๆ ผๅ’Œ่‹ฑๆ–‡็ฌฆๅท่ฝฌไธญๆ–‡ :param sentence: :return: ๅค„็†ไปฅๅŽ็š„ๆ–ฐๅฅๅญ๏ผˆstrๆ˜ฏไธๅฏๅ˜็š„๏ผŒๅช่ƒฝ่ฟ”ๅ›žๆ–ฐๅฏน่ฑก๏ผ‰ """ # ่‹ฑๆ–‡็ฌฆๅท่ฝฌไธญๆ–‡ en_symbols = ',.!?[]()<>\'\'\"\"' zh_symbols = '๏ผŒใ€‚๏ผ๏ผŸใ€ใ€‘๏ผˆ๏ผ‰ใ€Šใ€‹โ€˜โ€™โ€œโ€' trans = str.maketrans(en_symbols, zh_symbols) sentence = sentence.translate(trans) # ไฝฟ็”จๆญฃๅˆ™ๅŽปๆމๅœๆญข็ฌฆๅท return re.sub(r'[\s]', "", sentence) def get_stopwords(file_path) -> set: """ ไปŽๅœ็”จ่ฏๆ–‡ไปถไธญ่ฏปๅ–ๅœ็”จ่ฏ :param file_path: ๅœ็”จ่ฏๆ–‡ไปถ่ทฏๅพ„ :return: ๅœ็”จ่ฏๅˆ—่กจ """ stopwords = set() with open(file_path, 'r', encoding='utf8') as f: for line in f: stopwords.add(line.strip()) return stopwords class ProcessedData: def __init__(self, question, question_label, background, background_label): self.question = question self.question_label = question_label self.background = background self.background_label = background_label class Word: def __init__(self, text: str, start: int, end: int): self.text = text self.start = start self.end = end # ๅฎž็Žฐ__hash__ and __eq__ ๆ–นไพฟไน‹ๅŽ็š„setๅŽป้‡ def __hash__(self): return hash(self.text) def __eq__(self, other): return type(self) == type(other) and self.text == other.text def __str__(self): return str(self.to_dict()) def to_dict(self): return {'text': self.text, 'start': self.start, 'end': self.end} def strip(self, s): if self.text.startswith(s): self.text = self.text[len(s):] self.start += len(s) if self.text.endswith(s): self.text = self.text[:-len(s)] self.end -= len(s) class RawProcessor: def __init__(self, file_paths: [str], store_path: str, least_word_len: int = 2, use_cut: bool = False, redundant: bool = True): """ ๅค„็†ๅŽŸๅง‹ๆ•ฐๆฎ :param file_paths: ่ฆๅค„็†็š„ๆ–‡ไปถ้›†ๅˆ็š„่ทฏๅพ„ :param store_path: ๅค„็†ๅฎŒๆˆ็š„ๆ•ฐๆฎๅญ˜ๆ”พ่ทฏๅพ„ :param least_word_len: ๆŠฝๅ–็š„ๆœ€ๅฐ่ฏ้•ฟ :param use_cut: ๆ˜ฏๅฆไฝฟ็”จๅˆ†่ฏ :param redundant: ๆ˜ฏๅฆไฟ็•™ๆฏไธชๅฅๅญไธญ็š„้‡ๅค่ฏ """ self.file_paths = file_paths self.store_path = store_path self.use_cut = use_cut self.least_word_len = least_word_len self.redundant = redundant # ๅค„็†่ฟ‡็จ‹ไธญ็š„่ฏ้ข‘็ปŸ่ฎก {word : freq} self.words_counter = defaultdict(int) # ็ปŸ่ฎกๆ€ป้•ฟๅบฆ๏ผŒ่ฎพ็ฝฎๅˆ้€‚max_len {len : freq} self.length_counter = defaultdict(int) # ๅค„็†ๅฎŒ็š„word้›†ๅˆ self.processed_data: [ProcessedData] = [] def _get_same_word(self, source: str, target: str): """ ้ๅކๅŽŸๅฅๅญ๏ผŒๅœจๅŽŸๅฅๅญไธญๆๅ–ไธŽ็›ฎๆ ‡ๅฅๅญ็›ธๅŒ้ƒจๅˆ†๏ผŒๆŒ‰็…งๆœ€้•ฟๅŒน้…ๅŽŸๅˆ™ :param source: ๅŽŸๅฅๅญ :param target: ็›ฎๆ ‡ๅฅๅญ :return: word, word_index """ source_len = len(source) res_words: [Word] = [] # ่ฟ”ๅ›ž็ป“ๆžœ index = 0 while index < source_len: # ไธๆ˜ฏๆ ‡็‚น็ฌฆๅท๏ผŒไธ”ๅœจ่งฃๆžไธญ if source[index] not in ZH_SYMBOLS and source[index] in target: word_len = 1 # ๅ‘ๅŽๅปถ็”ณ while index + word_len < source_len and source[index:index + word_len + 1] in target: if source[index + word_len] not in ZH_SYMBOLS: word_len += 1 else: break word = source[index:index + word_len] if len(word) >= self.least_word_len and word not in STOPWORDS: # ๅŠ ๅ…ฅ่ฏฅ่ฏ res_words.append(Word(text=word, start=index, end=index + word_len)) index += word_len else: index += 1 return res_words def _get_same_words_with_c
word) for word in jieba.tokenize(source)] target_cut = [Word(*word) for word in jieba.tokenize(target)] for word in source_cut: if word in target_cut and word.text not in STOPWORDS and len(word.text) >= self.least_word_len: res_words.append(word) return res_words def _filter_words(self, words: [Word]): """ ๆ นๆฎไธ€ๅฎšๆกไปถๅŽป้™คๆŠฝๅ–้‡ๅˆ่ฏไธญ็š„ไธ€ไบ›่ฏ """ _words = words if self.redundant else list(set(words)) # counter_path = os.path.join(self.store_path, 'word_count.json') # if not os.path.exists(counter_path): # return _words # else: # counter_dict = read_json(counter_path) # ๅ€’ๅบ้ๅކ๏ผŒๅœจ้ๅކๆ—ถๅˆ ้™ค for i in range(len(_words) - 1, -1, -1): word = _words[i] word.strip('็š„') word.strip('ๅœจ') word.strip('ไธŽ') word.strip('ไบŽ') # ้•ฟๅบฆไธ็ฌฆๅˆๆˆ–ๅชๅŒ…ๅซๆ•ฐๅญ—ๅ’Œๅญ—ๆฏ if len(word.text) < self.least_word_len or word.text.isnumeric(): _words.remove(word) return _words @staticmethod def generate_tags(sequence_len: int, words: [Word]): """ ๆ นๆฎ้•ฟๅบฆๅ’Œไฝ็ฝฎ็ดขๅผ•ไบง็”ŸBIOๆ ‡็ญพ :param sequence_len: ้•ฟๅบฆ :param words: ๆŠฝๅ–่ฏ้›†ๅˆ :return: ๆ ‡็ญพ """ tags = ['O'] * sequence_len for word in words: start = word.start tags[start] = 'B' while start < word.end - 1: start += 1 tags[start] = 'I' return tags def process_raw_data(self): """ ๅค„็†ๅŽŸๅง‹ๆ•ฐๆฎ """ # ๅพช็Žฏ่ฏปๅ–ๆ–‡ไปถ if self.use_cut: jieba.load_userdict("data/geo_words_no_normal.txt") for file_path in self.file_paths: # ่ฏปๅ–raw_data raw_data = read_json(file_path) logger.info(f"Processing {file_path} - Question count: {len(raw_data)}") background_key = 'scenario_text' if "websoft" in file_path else 'background' # ๅค„็†raw_data for idx, total_question in enumerate(raw_data): # ๆๅ–่ƒŒๆ™ฏๅ’Œ่งฃๆž background = total_question[background_key] explain = total_question['explanation'] question = total_question['question'] # ่ทณ่ฟ‡ๆฒกๆœ‰่ƒŒๆ™ฏไฟกๆฏๆˆ–่งฃๆž็š„้ข˜็›ฎ if not background or not explain or len(explain) < 50: continue # ้ข„ๅค„็† background = preprocess(background) explain = preprocess(explain) question = preprocess(question) # ๅฏปๆ‰พ้‡ๅˆ่ฏ if self.use_cut: words_background = self._get_same_words_with_cut(background, explain) words_question = self._get_same_words_with_cut(question, explain) else: words_background = self._get_same_word(background, explain) words_question = self._get_same_word(question, explain) # ่ฟ‡ๆปคๅ™ชๅฃฐ words_question = self._filter_words(words_question) words_background = self._filter_words(words_background) # ็ปŸ่ฎก่ฏ้ข‘ไฟกๆฏ for word in words_question + words_background: self.words_counter[word.text] += 1 # ็”Ÿๆˆๆ ‡็ญพ tags_back = self.generate_tags(len(background), words_background) tags_question = self.generate_tags(len(question), words_question) # ็ปŸ่ฎก้•ฟๅบฆ self.length_counter[len(question) + len(background)] += 1 # ๆทปๅŠ ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎ/ self.processed_data.append(ProcessedData(question=question, question_label=tags_question, background=background, background_label=tags_back)) if idx < 5: logger.info(f"\t example_{idx + 1} - total len: {len(question) + len(background)}") logger.info(f"question_len: {len(question)}, question: {question}") logger.info("words_question: " + ' | '.join([word.text for word in words_question])) logger.info("tags_question: " + ' '.join(tags_question)) logger.info(f"background_len: {len(background)}, background: {background}") logger.info("tags_back: " + ' '.join(tags_back)) logger.info("words_back: " + ' | '.join([word.text for word in words_background])) logger.info("explain: " + explain) def write_processed_data(self, data_type: str, processed_data: [ProcessedData]): """ ๅฐ†ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎๅ†™ๅ…ฅๆ–‡ไปถ :param data_type: ๅ†™ๅ…ฅๆ–‡ไปถ็š„็ง็ฑป๏ผštrain dev test :param processed_data: ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎ """ with open(os.path.join(self.store_path, f'{data_type}.txt'), 'w', encoding='utf8') as f: for data in processed_data: f.write('-DOC_START-\n') for i in range(len(data.question)): f.write(data.question[i] + ' ' + data.question_label[i] + '\n') f.write('\n') for i in range(len(data.background)): f.write(data.background[i] + ' ' + data.background_label[i] + '\n') def prepare_data(self, data_split_dict): """ ๆŒ‰็…งไธ€ๅฎš็š„ๆฏ”ไพ‹ๅฐ†ๅค„็†ๅฅฝ็š„ๆ•ฐๆฎๅ†™ๅ…ฅๆŒ‡ๅฎšๆ–‡ไปถ :param data_split_dict: """ random.shuffle(self.processed_data) total_size = len(self.processed_data) train_size = int(data_split_dict['train'] * total_size) dev_size = int(data_split_dict['dev'] * total_size) # [a,b)ๅทฆๅผ€ๅณ้—ญๅŒบ้—ด self.write_processed_data('train', self.processed_data[:train_size]) self.write_processed_data('dev', self.processed_data[train_size:dev_size + train_size]) self.write_processed_data('test', self.processed_data[dev_size + train_size:]) logger.info(f"Prepared: total size = {total_size} | train size = {train_size} | dev size = {dev_size}") def write_counter(counter: dict, path, key=None, reverse=False): """ ๅฐ†่ฏ้ข‘็ปŸ่ฎกๆŽ’ๅบ็„ถๅŽๅ†™ๅ…ฅjsonๆ–‡ไปถ """ ordered_words_counter = OrderedDict( sorted(counter.items(), key=key, reverse=reverse)) write_json(path, ordered_words_counter) def start(): # ๆ•ฐๆฎๅญ˜ๅ‚จ่ทฏๅพ„ data_process_types = ['data_all'] cuts = ['cut', 'no_cut'] redundants = ['no_redundant'] for data_process_type in data_process_types: data_path = os.path.join('./data/raw', data_process_type) # ๅพ…ๅค„็†ๆ–‡ไปถ้›†ๅˆ files = ['53_data.json', 'spider_data.json', 'websoft_data.json'] file_paths = [] for file in files: file_paths.append(os.path.join(data_path, file)) # ๆ˜ฏๅฆๅˆ†่ฏ for cut in cuts: use_cut = (cut == 'cut') # ๆ˜ฏๅฆๅ…่ฎธ้‡ๅค for redundant in redundants: use_redundant = (redundant == 'redundant') store_data_path = os.path.join('./data/processed', data_process_type, cut, redundant) if not os.path.exists(store_data_path): os.makedirs(store_data_path) processor = RawProcessor(file_paths=file_paths, store_path=store_data_path, use_cut=use_cut, redundant=use_redundant) # ๅค„็†ๆ•ฐๆฎ processor.process_raw_data() # ๅ†™ๅ…ฅๆ•ฐๆฎ processor.prepare_data(data_split_dict={'train': 0.7, 'dev': 0.2, 'test': 0.1}) # ๅ†™ๅ…ฅ็ปŸ่ฎก่ฏ้ข‘ write_counter(processor.words_counter, os.path.join(store_data_path, 'word_count.json'), key=lambda kv: (kv[1], kv[0]), reverse=True) write_counter(processor.length_counter, os.path.join(store_data_path, 'length_count.json')) if __name__ == '__main__': STOPWORDS = get_stopwords('./data/stopwords.txt') if os.path.exists('./logs/data_info.log'): os.remove('./logs/data_info.log') setup_logging(default_path='./utils/logger_config.yaml') logger = logging.getLogger("data_logger") start() # ๆธ…็†ๅทฒไฟๅญ˜็š„ๆ•ฐๆฎ subprocess.call(r'find ./data/processed/data_all -name *_data -type f -print -exec rm {} \;', shell=True)
ut(self, source: str, target: str): """ ไฝฟ็”จ็ป“ๅทดๅˆ†่ฏๆฅๆŠฝๅ–็›ธๅŒ่ฏ """ res_words: [Word] = [] source_cut = [Word(*
conditional_block
exchange_endpoint0.py
from flask import Flask, request, g from flask_restful import Resource, Api from sqlalchemy import create_engine from flask import jsonify import json import eth_account import algosdk from sqlalchemy.orm import sessionmaker from sqlalchemy.orm import scoped_session from sqlalchemy.orm import load_only from datetime import datetime import sys from models import Base, Order, Log engine = create_engine('sqlite:///orders.db') Base.metadata.bind = engine DBSession = sessionmaker(bind=engine) app = Flask(__name__) # These decorators allow you to use g.session to access the database inside the request code # g is an "application global" https://flask.palletsprojects.com/en/1.1.x/api/#application-globals @app.before_request def create_session(): g.session = scoped_session(DBSession) @app.teardown_appcontext # def shutdown_session(response_or_exc): def shutdown_session(exception=None): sys.stdout.flush() g.session.commit() g.session.remove() """ Suggested helper methods """ # check whether โ€œsigโ€ is a valid signature of json.dumps(payload), # using the signature algorithm specified by the platform field. # Be sure to verify the payload using the sender_pk. def check_sig(payload,sig): pk = payload['sender_pk'] platform = payload['platform'] payload_json = json.dumps(payload) result = False if platform == "Algorand": print("Algorand") if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk): print("Algo sig verifies!") result = True elif platform == "Ethereum": print("Ethereum") eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json) if eth_account.Account.recover_message(eth_encoded_msg, signature=sig) == pk: print("Eth sig verifies!") result = True return result, payload_json # def fill_order(order,txes=[]): # pass # the inner recursive function def fill_order(): # get the order you just inserted from the DB current_order = g.session.query(Order).order_by(Order.id.desc()).first() # print("_order_id") # print(current_order.id) # Check if there are any existing orders that match and add them into a list order_list = [] orders = g.session.query(Order).filter(Order.filled == None).all() for existing_order in orders: # if ((existing_order.buy_amount != 0) and (current_order.sell_amount != 0)): if ((existing_order.buy_currency == current_order.sell_currency) and (existing_order.sell_currency == current_order.buy_currency) and (existing_order.sell_amount / existing_order.buy_amount >= current_order.buy_amount / current_order.sell_amount) and (existing_order.counterparty_id == None)): order_list.append(existing_order) # If a match is found between order and existing_order if (len(order_list) > 0): # print(" order_list_length") # print(len(order_list)) # pick the first one in the list match_order = order_list[0] # Set the filled field to be the current timestamp on both orders # Set counterparty_id to be the id of the other order match_order.filled = datetime.now() current_order.filled = datetime.now() match_order.counterparty_id = current_order.id current_order.counterparty_id = match_order.id g.session.commit() # if both orders can completely fill each other # no child order needs to be generated # If match_order is not completely filled if (current_order.sell_amount < match_order.buy_amount): # print("_match_order is not completely filled") diff = match_order.buy_amount - current_order.sell_amount exchange_rate_match = match_order.sell_amount / match_order.buy_amount sell_amount_new_match = diff * exchange_rate_match # print(match_order.id) # print(diff) # print(sell_amount_new_match) new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=match_order.receiver_pk, buy_currency=match_order.buy_currency, sell_currency=match_order.sell_currency, buy_amount=diff, sell_amount=sell_amount_new_match, creator_id=match_order.id) g.session.add(new_order) g.session.commit() print("M") fill_order() # If current_order is not completely filled if (current_order.buy_amount > match_order.sell_amount): # print("_current_order is not completely filled") diff = current_order.buy_amount - match_order.sell_amount exchange_rate_current = current_order.buy_amount / current_order.sell_amount sell_amount_new_current = diff / exchange_rate_current # print(current_order.id) # print(diff) # print(sell_amount_new_current) new_order = Order(sender_pk=current_order.sender_pk, receiver_pk=current_order.receiver_pk, buy_currency=current_order.buy_currency, sell_currency=current_order.sell_currency, buy_amount=diff, sell_amount=sell_amount_new_current, creator_id=current_order.id) g.session.add(new_order) g.session.commit() print("C") fill_order() # Takes input dictionary d and writes it to the Log table # Hint: use json.dumps or str() to get it in a nice string form def log_message(d): create_session() order_obj = Log(message=d) g.session.add(order_obj) shutdown_session() # convert a row in DB into a dict def row2dict(row): retu
print a dictionary nicely def print_dict(d): for key, value in d.items(): print(key, ' : ', value) """ End of helper methods """ @app.route('/trade', methods=['POST']) def trade(): print("In trade endpoint") if request.method == "POST": print("--------- trade ---------") content = request.get_json(silent=True) print( f"content = {json.dumps(content)}" ) columns = [ "sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", "platform" ] fields = [ "sig", "payload" ] # check whether the input contains both "sig" and "payload" for field in fields: if not field in content.keys(): print( f"{field} not received by Trade" ) print( json.dumps(content) ) log_message(content) return jsonify( False ) # check whether the input contains all 7 fields of payload for column in columns: if not column in content['payload'].keys(): print( f"{column} not received by Trade" ) print( json.dumps(content) ) log_message(content) return jsonify( False ) #Your code here #Note that you can access the database session using g.session # TODO 1: Check the signature # extract contents from json sig = content['sig'] payload = content['payload'] platform = payload['platform'] # The platform must be either โ€œAlgorandโ€ or "Ethereum". platforms = ["Algorand", "Ethereum"] if not platform in platforms: print("input platform is not Algorand or Ethereum") return jsonify(False) # check signature check_result = check_sig(payload,sig) result = check_result[0] payload_json = check_result[1] # TODO 2: Add the order to the database # TODO 4: Be sure to return jsonify(True) or jsonify(False) depending on if the method was successful # If the signature does not verify, do not insert the order into the โ€œOrderโ€ table. # Instead, insert a record into the โ€œLogโ€ table, with the message field set to be json.dumps(payload). if result is False: print("signature does NOT verify") log_message(payload_json) return jsonify(result) # If the signature verifies, store the signature, # as well as all of the fields under the โ€˜payloadโ€™ in the โ€œOrderโ€ table EXCEPT for 'platformโ€™. if result is True: print("signature verifies") create_session() order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=payload['receiver_pk'], buy_currency=payload['buy_currency'], sell_currency=payload['sell_currency'], buy_amount=payload['buy_amount'], sell_amount=payload['sell_amount'], signature=sig) g.session.add(order_obj) # TODO 3: Fill the order fill_order() shutdown_session() return jsonify(result) @app.route('/order_book') def order_book(): #Your code here #Note that you can access the database session using g.session # The โ€œ/order_bookโ€ endpoint should return a list of all orders in the database. # The response should contain a single key โ€œdataโ€ that refers to a list of orders formatted as JSON. # Each order should be a dict with (at least) the following fields # ("sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", โ€œsignatureโ€). print("--------- order_book ---------") create_session() # get orders from DB into a list order_dict_list = [ row2dict(order) for order in g.session.query(Order).all() ] # add the list into a dict result = { 'data': order_dict_list } print("order book length: ") print(len(order_dict_list)) # print_dict(order_dict_list[-2]) # print_dict(order_dict_list[-1]) shutdown_session() return jsonify(result) if __name__ == '__main__': app.run(port='5002')
rn { c.name: getattr(row, c.name) for c in row.__table__.columns } #
identifier_body
exchange_endpoint0.py
from flask import Flask, request, g from flask_restful import Resource, Api from sqlalchemy import create_engine from flask import jsonify import json import eth_account import algosdk from sqlalchemy.orm import sessionmaker from sqlalchemy.orm import scoped_session from sqlalchemy.orm import load_only from datetime import datetime import sys from models import Base, Order, Log engine = create_engine('sqlite:///orders.db') Base.metadata.bind = engine DBSession = sessionmaker(bind=engine) app = Flask(__name__) # These decorators allow you to use g.session to access the database inside the request code # g is an "application global" https://flask.palletsprojects.com/en/1.1.x/api/#application-globals @app.before_request def create_session(): g.session = scoped_session(DBSession) @app.teardown_appcontext # def shutdown_session(response_or_exc): def shutdown_session(exception=None): sys.stdout.flush() g.session.commit() g.session.remove() """ Suggested helper methods """ # check whether โ€œsigโ€ is a valid signature of json.dumps(payload), # using the signature algorithm specified by the platform field. # Be sure to verify the payload using the sender_pk. def check_sig(payload,sig): pk = payload['sender_pk'] platform = payload['platform'] payload_json = json.dumps(payload) result = False if platform == "Algorand": print("Algorand") if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk): print("Algo sig verifies!") result = True elif platform == "Ethereum": print("Ethereum") eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json) if eth_account.Account.recover_message(eth_encoded_msg, signature=sig) == pk: print("Eth sig verifies!") result = True return result, payload_json # def fill_order(order,txes=[]): # pass # the inner recursive function def fill_order(): # get the order you just inserted from the DB current_order = g.session.query(Order).order_by(Order.id.desc()).first() # print("_order_id") # print(current_order.id) # Check if there are any existing orders that match and add them into a list order_list = [] orders = g.session.query(Order).filter(Order.filled == None).all() for existing_order in orders: # if ((existing_order.buy_amount != 0) and (current_order.sell_amount != 0)): if ((existing_order.buy_currency == current_order.sell_currency) and (existing_order.sell_currency == current_order.buy_currency) and (existing_order.sell_amount / existing_order.buy_amount >= current_order.buy_amount / current_order.sell_amount) and (existing_order.counterparty_id == None)): order_list.append(existing_order)
# print(len(order_list)) # pick the first one in the list match_order = order_list[0] # Set the filled field to be the current timestamp on both orders # Set counterparty_id to be the id of the other order match_order.filled = datetime.now() current_order.filled = datetime.now() match_order.counterparty_id = current_order.id current_order.counterparty_id = match_order.id g.session.commit() # if both orders can completely fill each other # no child order needs to be generated # If match_order is not completely filled if (current_order.sell_amount < match_order.buy_amount): # print("_match_order is not completely filled") diff = match_order.buy_amount - current_order.sell_amount exchange_rate_match = match_order.sell_amount / match_order.buy_amount sell_amount_new_match = diff * exchange_rate_match # print(match_order.id) # print(diff) # print(sell_amount_new_match) new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=match_order.receiver_pk, buy_currency=match_order.buy_currency, sell_currency=match_order.sell_currency, buy_amount=diff, sell_amount=sell_amount_new_match, creator_id=match_order.id) g.session.add(new_order) g.session.commit() print("M") fill_order() # If current_order is not completely filled if (current_order.buy_amount > match_order.sell_amount): # print("_current_order is not completely filled") diff = current_order.buy_amount - match_order.sell_amount exchange_rate_current = current_order.buy_amount / current_order.sell_amount sell_amount_new_current = diff / exchange_rate_current # print(current_order.id) # print(diff) # print(sell_amount_new_current) new_order = Order(sender_pk=current_order.sender_pk, receiver_pk=current_order.receiver_pk, buy_currency=current_order.buy_currency, sell_currency=current_order.sell_currency, buy_amount=diff, sell_amount=sell_amount_new_current, creator_id=current_order.id) g.session.add(new_order) g.session.commit() print("C") fill_order() # Takes input dictionary d and writes it to the Log table # Hint: use json.dumps or str() to get it in a nice string form def log_message(d): create_session() order_obj = Log(message=d) g.session.add(order_obj) shutdown_session() # convert a row in DB into a dict def row2dict(row): return { c.name: getattr(row, c.name) for c in row.__table__.columns } # print a dictionary nicely def print_dict(d): for key, value in d.items(): print(key, ' : ', value) """ End of helper methods """ @app.route('/trade', methods=['POST']) def trade(): print("In trade endpoint") if request.method == "POST": print("--------- trade ---------") content = request.get_json(silent=True) print( f"content = {json.dumps(content)}" ) columns = [ "sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", "platform" ] fields = [ "sig", "payload" ] # check whether the input contains both "sig" and "payload" for field in fields: if not field in content.keys(): print( f"{field} not received by Trade" ) print( json.dumps(content) ) log_message(content) return jsonify( False ) # check whether the input contains all 7 fields of payload for column in columns: if not column in content['payload'].keys(): print( f"{column} not received by Trade" ) print( json.dumps(content) ) log_message(content) return jsonify( False ) #Your code here #Note that you can access the database session using g.session # TODO 1: Check the signature # extract contents from json sig = content['sig'] payload = content['payload'] platform = payload['platform'] # The platform must be either โ€œAlgorandโ€ or "Ethereum". platforms = ["Algorand", "Ethereum"] if not platform in platforms: print("input platform is not Algorand or Ethereum") return jsonify(False) # check signature check_result = check_sig(payload,sig) result = check_result[0] payload_json = check_result[1] # TODO 2: Add the order to the database # TODO 4: Be sure to return jsonify(True) or jsonify(False) depending on if the method was successful # If the signature does not verify, do not insert the order into the โ€œOrderโ€ table. # Instead, insert a record into the โ€œLogโ€ table, with the message field set to be json.dumps(payload). if result is False: print("signature does NOT verify") log_message(payload_json) return jsonify(result) # If the signature verifies, store the signature, # as well as all of the fields under the โ€˜payloadโ€™ in the โ€œOrderโ€ table EXCEPT for 'platformโ€™. if result is True: print("signature verifies") create_session() order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=payload['receiver_pk'], buy_currency=payload['buy_currency'], sell_currency=payload['sell_currency'], buy_amount=payload['buy_amount'], sell_amount=payload['sell_amount'], signature=sig) g.session.add(order_obj) # TODO 3: Fill the order fill_order() shutdown_session() return jsonify(result) @app.route('/order_book') def order_book(): #Your code here #Note that you can access the database session using g.session # The โ€œ/order_bookโ€ endpoint should return a list of all orders in the database. # The response should contain a single key โ€œdataโ€ that refers to a list of orders formatted as JSON. # Each order should be a dict with (at least) the following fields # ("sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", โ€œsignatureโ€). print("--------- order_book ---------") create_session() # get orders from DB into a list order_dict_list = [ row2dict(order) for order in g.session.query(Order).all() ] # add the list into a dict result = { 'data': order_dict_list } print("order book length: ") print(len(order_dict_list)) # print_dict(order_dict_list[-2]) # print_dict(order_dict_list[-1]) shutdown_session() return jsonify(result) if __name__ == '__main__': app.run(port='5002')
# If a match is found between order and existing_order if (len(order_list) > 0): # print(" order_list_length")
random_line_split
exchange_endpoint0.py
from flask import Flask, request, g from flask_restful import Resource, Api from sqlalchemy import create_engine from flask import jsonify import json import eth_account import algosdk from sqlalchemy.orm import sessionmaker from sqlalchemy.orm import scoped_session from sqlalchemy.orm import load_only from datetime import datetime import sys from models import Base, Order, Log engine = create_engine('sqlite:///orders.db') Base.metadata.bind = engine DBSession = sessionmaker(bind=engine) app = Flask(__name__) # These decorators allow you to use g.session to access the database inside the request code # g is an "application global" https://flask.palletsprojects.com/en/1.1.x/api/#application-globals @app.before_request def create_session(): g.session = scoped_session(DBSession) @app.teardown_appcontext # def shutdown_session(response_or_exc): def shutdown_session(exception=None): sys.stdout.flush() g.session.commit() g.session.remove() """ Suggested helper methods """ # check whether โ€œsigโ€ is a valid signature of json.dumps(payload), # using the signature algorithm specified by the platform field. # Be sure to verify the payload using the sender_pk. def check_sig(payload,sig): pk = payload['sender_pk'] platform = payload['platform'] payload_json = json.dumps(payload) result = False if platform == "Algorand": print("Algorand") if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk): print("Algo sig verifies!") result = True elif platform == "Ethereum": print("Ethereum") eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json) if eth_account.Account.recover_message(eth_encoded_msg, signature=sig) == pk: print("Eth sig verifies!") result = True return result, payload_json # def fill_order(order,txes=[]): # pass # the inner recursive function def fill_order(): # get the order you just inserted from the DB current_order = g.session.query(Order).order_by(Order.id.desc()).first() # print("_order_id") # print(current_order.id) # Check if there are any existing orders that match and add them into a list order_list = [] orders = g.session.query(Order).filter(Order.filled == None).all() for existing_order in orders: # if ((existing_order.buy_amount != 0) and (current_order.sell_amount != 0)): if ((existing_order.buy_currency == current_order.sell_currency) and (existing_order.sell_currency == current_order.buy_currency) and (existing_order.sell_amount / existing_order.buy_amount >= current_order.buy_amount / current_order.sell_amount) and (existing_order.counterparty_id == None)): order_list.append(existing_order) # If a match is found between order and existing_order if (len(order_list) > 0): # print(" order_list_length") # print(len(order_list)) # pick the first one in the list match_order = order_list[0] # Set the filled field to be the current timestamp on both orders # Set counterparty_id to be the id of the other order match_order.filled = datetime.now() current_order.filled = datetime.now() match_order.counterparty_id = current_order.id current_order.counterparty_id = match_order.id g.session.commit() # if both orders can completely fill each other # no child order needs to be generated # If match_order is not completely filled if (current_order.sell_amount < match_order.buy_amount): # print("_match_order is not completely filled") diff = match_order.buy_amount - current_order.sell_amount exchange_rate_match = match_order.sell_amount / match_order.buy_amount sell_amount_new_match = diff * exchange_rate_match # print(match_order.id) # print(diff) # print(sell_amount_new_match) new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=match_order.receiver_pk, buy_currency=match_order.buy_currency, sell_currency=match_order.sell_currency, buy_amount=diff, sell_amount=sell_amount_new_match, creator_id=match_order.id) g.session.add(new_order) g.session.commit() print("M") fill_order() # If current_order is not completely filled if (current_order.buy_amount > match_order.sell_amount): # print("_current_order is not completely filled") diff = current_order.buy_amount - match_order.sell_amount exchange_rate_current = current_order.buy_amount / current_order.sell_amount sell_amount_new_current = diff / exchange_rate_current # print(current_order.id) # print(diff) # print(sell_amount_new_current) new_order = Order(sender_pk=current_order.sender_pk, receiver_pk=current_order.receiver_pk, buy_currency=current_order.buy_currency, sell_currency=current_order.sell_currency, buy_amount=diff, sell_amount=sell_amount_new_current, creator_id=current_order.id) g.session.add(new_order) g.session.commit() print("C") fill_order() # Takes input dictionary d and writes it to the Log table # Hint: use json.dumps or str() to get it in a nice string form def log_message(d): create_session() order_obj = Log(message=d) g.session.add(order_obj) shutdown_session() # convert a row in DB into a dict def row2dict(row): return { c.name: getattr(row, c.name) for c in row.__table__.columns } # print a dictionary nicely def print_dict(d): for key, value in d.items(): print(key, ' : ', value) """ End of helper methods """ @app.route('/trade', methods=['POST']) def trade(): print("In trade endpoint") if request.method == "POST": print("--------- trade ---------") content = request.get_json(silent=True) print( f"content = {json.dumps(content)}" ) columns = [ "sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", "platform" ] fields = [ "sig", "payload" ] # check whether the input contains both "sig" and "payload" for field in fields: if not field in content.keys(): prin
# check whether the input contains all 7 fields of payload for column in columns: if not column in content['payload'].keys(): print( f"{column} not received by Trade" ) print( json.dumps(content) ) log_message(content) return jsonify( False ) #Your code here #Note that you can access the database session using g.session # TODO 1: Check the signature # extract contents from json sig = content['sig'] payload = content['payload'] platform = payload['platform'] # The platform must be either โ€œAlgorandโ€ or "Ethereum". platforms = ["Algorand", "Ethereum"] if not platform in platforms: print("input platform is not Algorand or Ethereum") return jsonify(False) # check signature check_result = check_sig(payload,sig) result = check_result[0] payload_json = check_result[1] # TODO 2: Add the order to the database # TODO 4: Be sure to return jsonify(True) or jsonify(False) depending on if the method was successful # If the signature does not verify, do not insert the order into the โ€œOrderโ€ table. # Instead, insert a record into the โ€œLogโ€ table, with the message field set to be json.dumps(payload). if result is False: print("signature does NOT verify") log_message(payload_json) return jsonify(result) # If the signature verifies, store the signature, # as well as all of the fields under the โ€˜payloadโ€™ in the โ€œOrderโ€ table EXCEPT for 'platformโ€™. if result is True: print("signature verifies") create_session() order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=payload['receiver_pk'], buy_currency=payload['buy_currency'], sell_currency=payload['sell_currency'], buy_amount=payload['buy_amount'], sell_amount=payload['sell_amount'], signature=sig) g.session.add(order_obj) # TODO 3: Fill the order fill_order() shutdown_session() return jsonify(result) @app.route('/order_book') def order_book(): #Your code here #Note that you can access the database session using g.session # The โ€œ/order_bookโ€ endpoint should return a list of all orders in the database. # The response should contain a single key โ€œdataโ€ that refers to a list of orders formatted as JSON. # Each order should be a dict with (at least) the following fields # ("sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", โ€œsignatureโ€). print("--------- order_book ---------") create_session() # get orders from DB into a list order_dict_list = [ row2dict(order) for order in g.session.query(Order).all() ] # add the list into a dict result = { 'data': order_dict_list } print("order book length: ") print(len(order_dict_list)) # print_dict(order_dict_list[-2]) # print_dict(order_dict_list[-1]) shutdown_session() return jsonify(result) if __name__ == '__main__': app.run(port='5002')
t( f"{field} not received by Trade" ) print( json.dumps(content) ) log_message(content) return jsonify( False )
conditional_block
exchange_endpoint0.py
from flask import Flask, request, g from flask_restful import Resource, Api from sqlalchemy import create_engine from flask import jsonify import json import eth_account import algosdk from sqlalchemy.orm import sessionmaker from sqlalchemy.orm import scoped_session from sqlalchemy.orm import load_only from datetime import datetime import sys from models import Base, Order, Log engine = create_engine('sqlite:///orders.db') Base.metadata.bind = engine DBSession = sessionmaker(bind=engine) app = Flask(__name__) # These decorators allow you to use g.session to access the database inside the request code # g is an "application global" https://flask.palletsprojects.com/en/1.1.x/api/#application-globals @app.before_request def create_session(): g.session = scoped_session(DBSession) @app.teardown_appcontext # def shutdown_session(response_or_exc): def shutdown_session(exception=None): sys.stdout.flush() g.session.commit() g.session.remove() """ Suggested helper methods """ # check whether โ€œsigโ€ is a valid signature of json.dumps(payload), # using the signature algorithm specified by the platform field. # Be sure to verify the payload using the sender_pk. def check_sig(payload,sig): pk = payload['sender_pk'] platform = payload['platform'] payload_json = json.dumps(payload) result = False if platform == "Algorand": print("Algorand") if algosdk.util.verify_bytes(payload_json.encode('utf-8'), sig, pk): print("Algo sig verifies!") result = True elif platform == "Ethereum": print("Ethereum") eth_encoded_msg = eth_account.messages.encode_defunct(text=payload_json) if eth_account.Account.recover_message(eth_encoded_msg, signature=sig) == pk: print("Eth sig verifies!") result = True return result, payload_json # def fill_order(order,txes=[]): # pass # the inner recursive function def fill_order(): # get the order you just inserted from the DB current_order = g.session.query(Order).order_by(Order.id.desc()).first() # print("_order_id") # print(current_order.id) # Check if there are any existing orders that match and add them into a list order_list = [] orders = g.session.query(Order).filter(Order.filled == None).all() for existing_order in orders: # if ((existing_order.buy_amount != 0) and (current_order.sell_amount != 0)): if ((existing_order.buy_currency == current_order.sell_currency) and (existing_order.sell_currency == current_order.buy_currency) and (existing_order.sell_amount / existing_order.buy_amount >= current_order.buy_amount / current_order.sell_amount) and (existing_order.counterparty_id == None)): order_list.append(existing_order) # If a match is found between order and existing_order if (len(order_list) > 0): # print(" order_list_length") # print(len(order_list)) # pick the first one in the list match_order = order_list[0] # Set the filled field to be the current timestamp on both orders # Set counterparty_id to be the id of the other order match_order.filled = datetime.now() current_order.filled = datetime.now() match_order.counterparty_id = current_order.id current_order.counterparty_id = match_order.id g.session.commit() # if both orders can completely fill each other # no child order needs to be generated # If match_order is not completely filled if (current_order.sell_amount < match_order.buy_amount): # print("_match_order is not completely filled") diff = match_order.buy_amount - current_order.sell_amount exchange_rate_match = match_order.sell_amount / match_order.buy_amount sell_amount_new_match = diff * exchange_rate_match # print(match_order.id) # print(diff) # print(sell_amount_new_match) new_order = Order(sender_pk=match_order.sender_pk, receiver_pk=match_order.receiver_pk, buy_currency=match_order.buy_currency, sell_currency=match_order.sell_currency, buy_amount=diff, sell_amount=sell_amount_new_match, creator_id=match_order.id) g.session.add(new_order) g.session.commit() print("M") fill_order() # If current_order is not completely filled if (current_order.buy_amount > match_order.sell_amount): # print("_current_order is not completely filled") diff = current_order.buy_amount - match_order.sell_amount exchange_rate_current = current_order.buy_amount / current_order.sell_amount sell_amount_new_current = diff / exchange_rate_current # print(current_order.id) # print(diff) # print(sell_amount_new_current) new_order = Order(sender_pk=current_order.sender_pk, receiver_pk=current_order.receiver_pk, buy_currency=current_order.buy_currency, sell_currency=current_order.sell_currency, buy_amount=diff, sell_amount=sell_amount_new_current, creator_id=current_order.id) g.session.add(new_order) g.session.commit() print("C") fill_order() # Takes input dictionary d and writes it to the Log table # Hint: use json.dumps or str() to get it in a nice string form def log_
create_session() order_obj = Log(message=d) g.session.add(order_obj) shutdown_session() # convert a row in DB into a dict def row2dict(row): return { c.name: getattr(row, c.name) for c in row.__table__.columns } # print a dictionary nicely def print_dict(d): for key, value in d.items(): print(key, ' : ', value) """ End of helper methods """ @app.route('/trade', methods=['POST']) def trade(): print("In trade endpoint") if request.method == "POST": print("--------- trade ---------") content = request.get_json(silent=True) print( f"content = {json.dumps(content)}" ) columns = [ "sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", "platform" ] fields = [ "sig", "payload" ] # check whether the input contains both "sig" and "payload" for field in fields: if not field in content.keys(): print( f"{field} not received by Trade" ) print( json.dumps(content) ) log_message(content) return jsonify( False ) # check whether the input contains all 7 fields of payload for column in columns: if not column in content['payload'].keys(): print( f"{column} not received by Trade" ) print( json.dumps(content) ) log_message(content) return jsonify( False ) #Your code here #Note that you can access the database session using g.session # TODO 1: Check the signature # extract contents from json sig = content['sig'] payload = content['payload'] platform = payload['platform'] # The platform must be either โ€œAlgorandโ€ or "Ethereum". platforms = ["Algorand", "Ethereum"] if not platform in platforms: print("input platform is not Algorand or Ethereum") return jsonify(False) # check signature check_result = check_sig(payload,sig) result = check_result[0] payload_json = check_result[1] # TODO 2: Add the order to the database # TODO 4: Be sure to return jsonify(True) or jsonify(False) depending on if the method was successful # If the signature does not verify, do not insert the order into the โ€œOrderโ€ table. # Instead, insert a record into the โ€œLogโ€ table, with the message field set to be json.dumps(payload). if result is False: print("signature does NOT verify") log_message(payload_json) return jsonify(result) # If the signature verifies, store the signature, # as well as all of the fields under the โ€˜payloadโ€™ in the โ€œOrderโ€ table EXCEPT for 'platformโ€™. if result is True: print("signature verifies") create_session() order_obj = Order(sender_pk=payload['sender_pk'], receiver_pk=payload['receiver_pk'], buy_currency=payload['buy_currency'], sell_currency=payload['sell_currency'], buy_amount=payload['buy_amount'], sell_amount=payload['sell_amount'], signature=sig) g.session.add(order_obj) # TODO 3: Fill the order fill_order() shutdown_session() return jsonify(result) @app.route('/order_book') def order_book(): #Your code here #Note that you can access the database session using g.session # The โ€œ/order_bookโ€ endpoint should return a list of all orders in the database. # The response should contain a single key โ€œdataโ€ that refers to a list of orders formatted as JSON. # Each order should be a dict with (at least) the following fields # ("sender_pk", "receiver_pk", "buy_currency", "sell_currency", "buy_amount", "sell_amount", โ€œsignatureโ€). print("--------- order_book ---------") create_session() # get orders from DB into a list order_dict_list = [ row2dict(order) for order in g.session.query(Order).all() ] # add the list into a dict result = { 'data': order_dict_list } print("order book length: ") print(len(order_dict_list)) # print_dict(order_dict_list[-2]) # print_dict(order_dict_list[-1]) shutdown_session() return jsonify(result) if __name__ == '__main__': app.run(port='5002')
message(d):
identifier_name
wxpayv3.go
package mcommon import ( "bytes" "crypto" "crypto/aes" "crypto/cipher" "crypto/md5" "crypto/rand" "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/base64" "encoding/pem" "encoding/xml" "fmt" "io/ioutil" "net/url" "strconv" "time" "github.com/gin-gonic/gin"
// StWxPayRawResp ๅ›žๅค type StWxPayRawResp struct { ID string `json:"id"` CreateTime time.Time `json:"create_time"` ResourceType string `json:"resource_type"` EventType string `json:"event_type"` Summary string `json:"summary"` Resource struct { OriginalType string `json:"original_type"` Algorithm string `json:"algorithm"` Ciphertext string `json:"ciphertext"` AssociatedData string `json:"associated_data"` Nonce string `json:"nonce"` } `json:"resource"` } // StWxPayResp ๅ›žๅค type StWxPayResp struct { Mchid string `json:"mchid"` Appid string `json:"appid"` OutTradeNo string `json:"out_trade_no"` TransactionID string `json:"transaction_id"` TradeType string `json:"trade_type"` TradeState string `json:"trade_state"` TradeStateDesc string `json:"trade_state_desc"` BankType string `json:"bank_type"` Attach string `json:"attach"` SuccessTime time.Time `json:"success_time"` Payer struct { Openid string `json:"openid"` } `json:"payer"` Amount struct { Total int `json:"total"` PayerTotal int `json:"payer_total"` Currency string `json:"currency"` PayerCurrency string `json:"payer_currency"` } `json:"amount"` } // StWxRefundCb ๅ›ž่ฐƒ type StWxRefundCb struct { XMLName xml.Name `xml:"root"` Text string `xml:",chardata"` OutRefundNo string `xml:"out_refund_no"` OutTradeNo string `xml:"out_trade_no"` RefundAccount string `xml:"refund_account"` RefundFee string `xml:"refund_fee"` RefundID string `xml:"refund_id"` RefundRecvAccout string `xml:"refund_recv_accout"` RefundRequestSource string `xml:"refund_request_source"` RefundStatus string `xml:"refund_status"` SettlementRefundFee string `xml:"settlement_refund_fee"` SettlementTotalFee string `xml:"settlement_total_fee"` SuccessTime string `xml:"success_time"` TotalFee string `xml:"total_fee"` TransactionID string `xml:"transaction_id"` } type StWxV3RefundResp struct { Amount struct { Currency string `json:"currency"` DiscountRefund int `json:"discount_refund"` PayerRefund int `json:"payer_refund"` PayerTotal int `json:"payer_total"` Refund int `json:"refund"` SettlementRefund int `json:"settlement_refund"` SettlementTotal int `json:"settlement_total"` Total int `json:"total"` } `json:"amount"` Channel string `json:"channel"` CreateTime time.Time `json:"create_time"` FundsAccount string `json:"funds_account"` OutRefundNo string `json:"out_refund_no"` OutTradeNo string `json:"out_trade_no"` PromotionDetail []interface{} `json:"promotion_detail"` RefundID string `json:"refund_id"` Status string `json:"status"` TransactionID string `json:"transaction_id"` UserReceivedAccount string `json:"user_received_account"` Code string `json:"code"` Message string `json:"message"` } type StWxV3RefundCb struct { ID string `json:"id"` CreateTime time.Time `json:"create_time"` ResourceType string `json:"resource_type"` EventType string `json:"event_type"` Summary string `json:"summary"` Resource struct { OriginalType string `json:"original_type"` Algorithm string `json:"algorithm"` Ciphertext string `json:"ciphertext"` AssociatedData string `json:"associated_data"` Nonce string `json:"nonce"` } `json:"resource"` } type StWxV3RefundCbContent struct { Mchid string `json:"mchid"` OutTradeNo string `json:"out_trade_no"` TransactionID string `json:"transaction_id"` OutRefundNo string `json:"out_refund_no"` RefundID string `json:"refund_id"` RefundStatus string `json:"refund_status"` SuccessTime time.Time `json:"success_time"` Amount struct { Total int `json:"total"` Refund int `json:"refund"` PayerTotal int `json:"payer_total"` PayerRefund int `json:"payer_refund"` } `json:"amount"` UserReceivedAccount string `json:"user_received_account"` } // RsaSign ็ญพๅ func RsaSign(signContent string, privateKey *rsa.PrivateKey, hash crypto.Hash) (string, error) { shaNew := hash.New() shaNew.Write([]byte(signContent)) hashed := shaNew.Sum(nil) signature, err := rsa.SignPKCS1v15(rand.Reader, privateKey, hash, hashed) if err != nil { return "", err } return base64.StdEncoding.EncodeToString(signature), nil } // WxPayV3SignStr ่Žทๅ–็ญพๅ็ป“ๆžœ func WxPayV3SignStr(key *rsa.PrivateKey, cols []string) (string, error) { var buf bytes.Buffer for _, col := range cols { buf.WriteString(col) buf.WriteString("\n") } sign, err := RsaSign(buf.String(), key, crypto.SHA256) if err != nil { return "", err } return sign, nil } // WxPayV3Sign v3็ญพๅ func WxPayV3Sign(mchid, keySerial string, key *rsa.PrivateKey, req *gorequest.SuperAgent) (*gorequest.SuperAgent, error) { timestamp := time.Now().Unix() nonce := GetUUIDStr() uri, err := url.Parse(req.Url) if err != nil { return nil, err } var bodyBytes []byte if req.Method == "POST" { request, err := req.MakeRequest() if err != nil { return nil, err } bodyReader, err := request.GetBody() if err != nil { return nil, err } bodyBytes, err = ioutil.ReadAll(bodyReader) if err != nil { return nil, err } } sign, err := WxPayV3SignStr(key, []string{ req.Method, uri.Path, strconv.FormatInt(timestamp, 10), nonce, string(bodyBytes), }) if err != nil { return nil, err } auth := fmt.Sprintf( `WECHATPAY2-SHA256-RSA2048 mchid="%s",nonce_str="%s",signature="%s",timestamp="%d",serial_no="%s"`, mchid, nonce, sign, timestamp, keySerial, ) req = req. Set("Authorization", auth). Set("Accept", "application/json"). Set("User-Agent", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50") return req, nil } // WxPayV3Decrype ่งฃๅฏ† func WxPayV3Decrype(key string, cipherStr, nonce, associatedData string) (string, error) { keyBytes := []byte(key) nonceBytes := []byte(nonce) associatedDataBytes := []byte(associatedData) ciphertext, err := base64.StdEncoding.DecodeString(cipherStr) if err != nil { return "", err } block, err := aes.NewCipher(keyBytes) if err != nil { return "", err } aesgcm, err := cipher.NewGCM(block) if err != nil { return "", err } plaintext, err := aesgcm.Open(nil, nonceBytes, ciphertext, associatedDataBytes) if err != nil { return "", err } return string(plaintext), nil } // WxPayV3CheckSign v3็ญพๅ้ชŒ่ฏ func WxPayV3CheckSign(header map[string][]string, body []byte, cerStr string) error { if len(cerStr) == 0 { return fmt.Errorf("no cer") } timestamp, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Timestamp") if err != nil { return err } nonce, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Nonce") if err != nil { return err } signature, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Signature") if err != nil { return err } checkStr := timestamp + "\n" + nonce + "\n" + string(body) + "\n" block, _ := pem.Decode([]byte(cerStr)) var cert *x509.Certificate cert, err = x509.ParseCertificate(block.Bytes) if err != nil { return err } rsaPublicKey := cert.PublicKey.(*rsa.PublicKey) oldSign, err := base64.StdEncoding.DecodeString(signature) if err != nil { return err } hashed := sha256.Sum256([]byte(checkStr)) err = rsa.VerifyPKCS1v15(rsaPublicKey, crypto.SHA256, hashed[:], oldSign) return err } // WxPayV3GetHeaderByKey ่Žทๅ–ๅคด func WxPayV3GetHeaderByKey(header map[string][]string, key string) (string, error) { v, ok := header[key] if !ok { return "", fmt.Errorf("no key %s", key) } if len(v) == 0 { return "", fmt.Errorf("key empty %s", key) } return v[0], nil } // WxPayV3GetPrepay ่Žทๅ–้ข„ๆ”ฏไป˜ไฟกๆฏ func WxPayV3GetPrepay(keySerial string, key *rsa.PrivateKey, appID, mchID, openID, payBody, outTradeNo, cbURL string, totalFee int64, expireAt time.Time) (gin.H, string, error) { req := gorequest.New(). Post("https://api.mch.weixin.qq.com/v3/pay/transactions/jsapi"). Send( H{ "appid": appID, "mchid": mchID, "description": payBody, "out_trade_no": outTradeNo, "time_expire": expireAt.Format(time.RFC3339), "notify_url": cbURL, "amount": H{ "total": totalFee, }, "payer": H{ "openid": openID, }, }, ) req, err := WxPayV3Sign( mchID, keySerial, key, req, ) if err != nil { return nil, "", err } _, body, errs := req.EndBytes() if errs != nil { return nil, "", errs[0] } var prepayResp struct { PrepayID string `json:"prepay_id"` } err = jsoniter.Unmarshal(body, &prepayResp) if err != nil { return nil, "", err } if len(prepayResp.PrepayID) == 0 { return nil, "", fmt.Errorf("get prepay id err: %s", body) } v, err := WxPayV3SignPrepayid(key, appID, prepayResp.PrepayID) if err != nil { return nil, "", err } return v, prepayResp.PrepayID, nil } // WxPayV3SignPrepayid ็ญพๅprepayid func WxPayV3SignPrepayid(key *rsa.PrivateKey, appID, prepayid string) (gin.H, error) { objTimestamp := strconv.FormatInt(time.Now().Unix(), 10) objNonce := GetUUIDStr() objCol := fmt.Sprintf("prepay_id=%s", prepayid) objSign, err := WxPayV3SignStr( key, []string{ appID, objTimestamp, objNonce, objCol, }, ) if err != nil { return nil, err } v := gin.H{ "timeStamp": objTimestamp, "nonceStr": objNonce, "package": objCol, "signType": "RSA", "paySign": objSign, } return v, nil } // WxPayV3DecodePayResp ่งฃๆžๆ”ฏไป˜ๅ›ž่ฐƒ func WxPayV3DecodePayResp(v3Key string, body []byte, mchid, appid string) (*StWxPayResp, error) { var rawResp StWxPayRawResp err := jsoniter.Unmarshal(body, &rawResp) if err != nil { return nil, err } if rawResp.EventType != "TRANSACTION.SUCCESS" { return nil, fmt.Errorf("error event_type: %s", rawResp.EventType) } if rawResp.ResourceType != "encrypt-resource" { return nil, fmt.Errorf("error resource_type: %s", rawResp.ResourceType) } originalType := rawResp.Resource.OriginalType if originalType != "transaction" { return nil, fmt.Errorf("error original_type: %s", originalType) } algorithm := rawResp.Resource.Algorithm if algorithm != "AEAD_AES_256_GCM" { return nil, fmt.Errorf("error algorithm: %s", algorithm) } ciphertext := rawResp.Resource.Ciphertext associatedData := rawResp.Resource.AssociatedData nonce := rawResp.Resource.Nonce plain, err := WxPayV3Decrype( v3Key, ciphertext, nonce, associatedData, ) if err != nil { return nil, err } var finalResp StWxPayResp err = jsoniter.Unmarshal([]byte(plain), &finalResp) if err != nil { return nil, err } if finalResp.Mchid != mchid { return nil, fmt.Errorf("mchid error") } if finalResp.Appid != appid { return nil, fmt.Errorf("appid error") } if finalResp.TradeState != "SUCCESS" { return nil, fmt.Errorf("error trade_state: %s", finalResp.TradeState) } return &finalResp, nil } // WxPayCheckRefundCb ้ชŒ่ฏๅ›ž่ฐƒ func WxPayCheckRefundCb(mchKey string, body []byte) (*StWxRefundCb, error) { mchKeyMd5 := fmt.Sprintf("%x", md5.Sum([]byte(mchKey))) bodyMap, err := XMLWalk(body) if err != nil { // ่ฟ”ๅ›žๆ•ฐๆฎ return nil, err } reqInfo, ok := bodyMap["req_info"] if !ok { return nil, fmt.Errorf("no key req_info %s", body) } reqInfoStr, ok := reqInfo.(string) if !ok { return nil, fmt.Errorf("error format req_info: %s", body) } reqInfoBytes, err := base64.StdEncoding.DecodeString(reqInfoStr) if err != nil { return nil, err } reqInfoFull, err := DecryptAesEcb(reqInfoBytes, []byte(mchKeyMd5)) if err != nil { return nil, err } var bodyXML StWxRefundCb err = xml.Unmarshal(reqInfoFull, &bodyXML) if err != nil { return nil, err } return &bodyXML, nil } // WxPayV3Refunds ้€€ๆฌพ func WxPayV3Refunds(keySerial string, key *rsa.PrivateKey, mchID, transactionID, outRefundNo, cbURL string, totalFee, refundFee int64) (*StWxV3RefundResp, error) { req := gorequest.New(). Post("https://api.mch.weixin.qq.com/v3/refund/domestic/refunds"). Send( H{ "transaction_id": transactionID, "out_refund_no": outRefundNo, "notify_url": cbURL, "amount": H{ "refund": refundFee, "total": totalFee, "currency": "CNY", }, }, ) req, err := WxPayV3Sign( mchID, keySerial, key, req, ) if err != nil { return nil, err } _, body, errs := req.EndBytes() if errs != nil { return nil, errs[0] } Log.Debugf("body: %s", body) var resp StWxV3RefundResp err = jsoniter.Unmarshal(body, &resp) if err != nil { return nil, err } if resp.Code != "" { return nil, fmt.Errorf("refund err: %s", body) } return &resp, nil } // WxPayV3DecodeRefundsCb ่งฃๆž้€€ๆฌพๅ›ž่ฐƒ func WxPayV3DecodeRefundsCb(v3Key string, body []byte) (*StWxV3RefundCbContent, error) { var rawResp StWxV3RefundCb err := jsoniter.Unmarshal(body, &rawResp) if err != nil { return nil, err } if rawResp.EventType != "REFUND.SUCCESS" { return nil, fmt.Errorf("error event_type: %s", rawResp.EventType) } if rawResp.ResourceType != "encrypt-resource" { return nil, fmt.Errorf("error resource_type: %s", rawResp.ResourceType) } originalType := rawResp.Resource.OriginalType if originalType != "refund" { return nil, fmt.Errorf("error original_type: %s", originalType) } algorithm := rawResp.Resource.Algorithm if algorithm != "AEAD_AES_256_GCM" { return nil, fmt.Errorf("error algorithm: %s", algorithm) } ciphertext := rawResp.Resource.Ciphertext associatedData := rawResp.Resource.AssociatedData nonce := rawResp.Resource.Nonce plain, err := WxPayV3Decrype( v3Key, ciphertext, nonce, associatedData, ) if err != nil { return nil, err } Log.Debugf("plain: %s", plain) var content StWxV3RefundCbContent err = jsoniter.Unmarshal([]byte(plain), &content) if err != nil { return nil, err } return &content, nil }
jsoniter "github.com/json-iterator/go" "github.com/parnurzeal/gorequest" )
random_line_split
wxpayv3.go
package mcommon import ( "bytes" "crypto" "crypto/aes" "crypto/cipher" "crypto/md5" "crypto/rand" "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/base64" "encoding/pem" "encoding/xml" "fmt" "io/ioutil" "net/url" "strconv" "time" "github.com/gin-gonic/gin" jsoniter "github.com/json-iterator/go" "github.com/parnurzeal/gorequest" ) // StWxPayRawResp ๅ›žๅค type StWxPayRawResp struct { ID string `json:"id"` CreateTime time.Time `json:"create_time"` ResourceType string `json:"resource_type"` EventType string `json:"event_type"` Summary string `json:"summary"` Resource struct { OriginalType string `json:"original_type"` Algorithm string `json:"algorithm"` Ciphertext string `json:"ciphertext"` AssociatedData string `json:"associated_data"` Nonce string `json:"nonce"` } `json:"resource"` } // StWxPayResp ๅ›žๅค type StWxPayResp struct { Mchid string `json:"mchid"` Appid string `json:"appid"` OutTradeNo string `json:"out_trade_no"` TransactionID string `json:"transaction_id"` TradeType string `json:"trade_type"` TradeState string `json:"trade_state"` TradeStateDesc string `json:"trade_state_desc"` BankType string `json:"bank_type"` Attach string `json:"attach"` SuccessTime time.Time `json:"success_time"` Payer struct { Openid string `json:"openid"` } `json:"payer"` Amount struct { Total int `json:"total"` PayerTotal int `json:"payer_total"` Currency string `json:"currency"` PayerCurrency string `json:"payer_currency"` } `json:"amount"` } // StWxRefundCb ๅ›ž่ฐƒ type StWxRefundCb struct { XMLName xml.Name `xml:"root"` Text string `xml:",chardata"` OutRefundNo string `xml:"out_refund_no"` OutTradeNo string `xml:"out_trade_no"` RefundAccount string `xml:"refund_account"` RefundFee string `xml:"refund_fee"` RefundID string `xml:"refund_id"` RefundRecvAccout string `xml:"refund_recv_accout"` RefundRequestSource string `xml:"refund_request_source"` RefundStatus string `xml:"refund_status"` SettlementRefundFee string `xml:"settlement_refund_fee"` SettlementTotalFee string `xml:"settlement_total_fee"` SuccessTime string `xml:"success_time"` TotalFee string `xml:"total_fee"` TransactionID string `xml:"transaction_id"` } type StWxV3RefundResp struct { Amount struct { Currency string `json:"currency"` DiscountRefund int `json:"discount_refund"` PayerRefund int `json:"payer_refund"` PayerTotal int `json:"payer_total"` Refund int `json:"refund"` SettlementRefund int `json:"settlement_refund"` SettlementTotal int `json:"settlement_total"` Total int `json:"total"` } `json:"amount"` Channel string `json:"channel"` CreateTime time.Time `json:"create_time"` FundsAccount string `json:"funds_account"` OutRefundNo string `json:"out_refund_no"` OutTradeNo string `json:"out_trade_no"` PromotionDetail []interface{} `json:"promotion_detail"` RefundID string `json:"refund_id"` Status string `json:"status"` TransactionID string `json:"transaction_id"` UserReceivedAccount string `json:"user_received_account"` Code string `json:"code"` Message string `json:"message"` } type StWxV3RefundCb struct { ID string `json:"id"` CreateTime time.Time `json:"create_time"` ResourceType string `json:"resource_type"` EventType string `json:"event_type"` Summary string `json:"summary"` Resource struct { OriginalType string `json:"original_type"` Algorithm string `json:"algorithm"` Ciphertext string `json:"ciphertext"` AssociatedData string `json:"associated_data"` Nonce string `json:"nonce"` } `json:"resource"` } type StWxV3RefundCbContent struct { Mchid string `json:"mchid"` OutTradeNo string `json:"out_trade_no"` TransactionID string `json:"transaction_id"` OutRefundNo string `json:"out_refund_no"` RefundID string `json:"refund_id"` RefundStatus string `json:"refund_status"` SuccessTime time.Time `json:"success_time"` Amount struct { Total int `json:"total"` Refund int `json:"refund"` PayerTotal int `json:"payer_total"` PayerRefund int `json:"payer_refund"` } `json:"amount"` UserReceivedAccount string `json:"user_received_account"` } // RsaSign ็ญพๅ func RsaSign(signContent string, privateKey *rsa.PrivateKey, hash crypto.Hash) (string, error) { shaNew := hash.New() shaNew.Write([]byte(signContent)) hashed := shaNew.Sum(nil) signature, err := rsa.SignPKCS1v15(rand.Reader, privateKey, hash, hashed) if err != nil { return "", err } return base64.StdEncoding.EncodeToString(signature), nil } // WxPayV3SignStr ่Žทๅ–็ญพๅ็ป“ๆžœ func WxPayV3SignStr(key *rsa.Priv
]string) (string, error) { var buf bytes.Buffer for _, col := range cols { buf.WriteString(col) buf.WriteString("\n") } sign, err := RsaSign(buf.String(), key, crypto.SHA256) if err != nil { return "", err } return sign, nil } // WxPayV3Sign v3็ญพๅ func WxPayV3Sign(mchid, keySerial string, key *rsa.PrivateKey, req *gorequest.SuperAgent) (*gorequest.SuperAgent, error) { timestamp := time.Now().Unix() nonce := GetUUIDStr() uri, err := url.Parse(req.Url) if err != nil { return nil, err } var bodyBytes []byte if req.Method == "POST" { request, err := req.MakeRequest() if err != nil { return nil, err } bodyReader, err := request.GetBody() if err != nil { return nil, err } bodyBytes, err = ioutil.ReadAll(bodyReader) if err != nil { return nil, err } } sign, err := WxPayV3SignStr(key, []string{ req.Method, uri.Path, strconv.FormatInt(timestamp, 10), nonce, string(bodyBytes), }) if err != nil { return nil, err } auth := fmt.Sprintf( `WECHATPAY2-SHA256-RSA2048 mchid="%s",nonce_str="%s",signature="%s",timestamp="%d",serial_no="%s"`, mchid, nonce, sign, timestamp, keySerial, ) req = req. Set("Authorization", auth). Set("Accept", "application/json"). Set("User-Agent", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50") return req, nil } // WxPayV3Decrype ่งฃๅฏ† func WxPayV3Decrype(key string, cipherStr, nonce, associatedData string) (string, error) { keyBytes := []byte(key) nonceBytes := []byte(nonce) associatedDataBytes := []byte(associatedData) ciphertext, err := base64.StdEncoding.DecodeString(cipherStr) if err != nil { return "", err } block, err := aes.NewCipher(keyBytes) if err != nil { return "", err } aesgcm, err := cipher.NewGCM(block) if err != nil { return "", err } plaintext, err := aesgcm.Open(nil, nonceBytes, ciphertext, associatedDataBytes) if err != nil { return "", err } return string(plaintext), nil } // WxPayV3CheckSign v3็ญพๅ้ชŒ่ฏ func WxPayV3CheckSign(header map[string][]string, body []byte, cerStr string) error { if len(cerStr) == 0 { return fmt.Errorf("no cer") } timestamp, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Timestamp") if err != nil { return err } nonce, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Nonce") if err != nil { return err } signature, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Signature") if err != nil { return err } checkStr := timestamp + "\n" + nonce + "\n" + string(body) + "\n" block, _ := pem.Decode([]byte(cerStr)) var cert *x509.Certificate cert, err = x509.ParseCertificate(block.Bytes) if err != nil { return err } rsaPublicKey := cert.PublicKey.(*rsa.PublicKey) oldSign, err := base64.StdEncoding.DecodeString(signature) if err != nil { return err } hashed := sha256.Sum256([]byte(checkStr)) err = rsa.VerifyPKCS1v15(rsaPublicKey, crypto.SHA256, hashed[:], oldSign) return err } // WxPayV3GetHeaderByKey ่Žทๅ–ๅคด func WxPayV3GetHeaderByKey(header map[string][]string, key string) (string, error) { v, ok := header[key] if !ok { return "", fmt.Errorf("no key %s", key) } if len(v) == 0 { return "", fmt.Errorf("key empty %s", key) } return v[0], nil } // WxPayV3GetPrepay ่Žทๅ–้ข„ๆ”ฏไป˜ไฟกๆฏ func WxPayV3GetPrepay(keySerial string, key *rsa.PrivateKey, appID, mchID, openID, payBody, outTradeNo, cbURL string, totalFee int64, expireAt time.Time) (gin.H, string, error) { req := gorequest.New(). Post("https://api.mch.weixin.qq.com/v3/pay/transactions/jsapi"). Send( H{ "appid": appID, "mchid": mchID, "description": payBody, "out_trade_no": outTradeNo, "time_expire": expireAt.Format(time.RFC3339), "notify_url": cbURL, "amount": H{ "total": totalFee, }, "payer": H{ "openid": openID, }, }, ) req, err := WxPayV3Sign( mchID, keySerial, key, req, ) if err != nil { return nil, "", err } _, body, errs := req.EndBytes() if errs != nil { return nil, "", errs[0] } var prepayResp struct { PrepayID string `json:"prepay_id"` } err = jsoniter.Unmarshal(body, &prepayResp) if err != nil { return nil, "", err } if len(prepayResp.PrepayID) == 0 { return nil, "", fmt.Errorf("get prepay id err: %s", body) } v, err := WxPayV3SignPrepayid(key, appID, prepayResp.PrepayID) if err != nil { return nil, "", err } return v, prepayResp.PrepayID, nil } // WxPayV3SignPrepayid ็ญพๅprepayid func WxPayV3SignPrepayid(key *rsa.PrivateKey, appID, prepayid string) (gin.H, error) { objTimestamp := strconv.FormatInt(time.Now().Unix(), 10) objNonce := GetUUIDStr() objCol := fmt.Sprintf("prepay_id=%s", prepayid) objSign, err := WxPayV3SignStr( key, []string{ appID, objTimestamp, objNonce, objCol, }, ) if err != nil { return nil, err } v := gin.H{ "timeStamp": objTimestamp, "nonceStr": objNonce, "package": objCol, "signType": "RSA", "paySign": objSign, } return v, nil } // WxPayV3DecodePayResp ่งฃๆžๆ”ฏไป˜ๅ›ž่ฐƒ func WxPayV3DecodePayResp(v3Key string, body []byte, mchid, appid string) (*StWxPayResp, error) { var rawResp StWxPayRawResp err := jsoniter.Unmarshal(body, &rawResp) if err != nil { return nil, err } if rawResp.EventType != "TRANSACTION.SUCCESS" { return nil, fmt.Errorf("error event_type: %s", rawResp.EventType) } if rawResp.ResourceType != "encrypt-resource" { return nil, fmt.Errorf("error resource_type: %s", rawResp.ResourceType) } originalType := rawResp.Resource.OriginalType if originalType != "transaction" { return nil, fmt.Errorf("error original_type: %s", originalType) } algorithm := rawResp.Resource.Algorithm if algorithm != "AEAD_AES_256_GCM" { return nil, fmt.Errorf("error algorithm: %s", algorithm) } ciphertext := rawResp.Resource.Ciphertext associatedData := rawResp.Resource.AssociatedData nonce := rawResp.Resource.Nonce plain, err := WxPayV3Decrype( v3Key, ciphertext, nonce, associatedData, ) if err != nil { return nil, err } var finalResp StWxPayResp err = jsoniter.Unmarshal([]byte(plain), &finalResp) if err != nil { return nil, err } if finalResp.Mchid != mchid { return nil, fmt.Errorf("mchid error") } if finalResp.Appid != appid { return nil, fmt.Errorf("appid error") } if finalResp.TradeState != "SUCCESS" { return nil, fmt.Errorf("error trade_state: %s", finalResp.TradeState) } return &finalResp, nil } // WxPayCheckRefundCb ้ชŒ่ฏๅ›ž่ฐƒ func WxPayCheckRefundCb(mchKey string, body []byte) (*StWxRefundCb, error) { mchKeyMd5 := fmt.Sprintf("%x", md5.Sum([]byte(mchKey))) bodyMap, err := XMLWalk(body) if err != nil { // ่ฟ”ๅ›žๆ•ฐๆฎ return nil, err } reqInfo, ok := bodyMap["req_info"] if !ok { return nil, fmt.Errorf("no key req_info %s", body) } reqInfoStr, ok := reqInfo.(string) if !ok { return nil, fmt.Errorf("error format req_info: %s", body) } reqInfoBytes, err := base64.StdEncoding.DecodeString(reqInfoStr) if err != nil { return nil, err } reqInfoFull, err := DecryptAesEcb(reqInfoBytes, []byte(mchKeyMd5)) if err != nil { return nil, err } var bodyXML StWxRefundCb err = xml.Unmarshal(reqInfoFull, &bodyXML) if err != nil { return nil, err } return &bodyXML, nil } // WxPayV3Refunds ้€€ๆฌพ func WxPayV3Refunds(keySerial string, key *rsa.PrivateKey, mchID, transactionID, outRefundNo, cbURL string, totalFee, refundFee int64) (*StWxV3RefundResp, error) { req := gorequest.New(). Post("https://api.mch.weixin.qq.com/v3/refund/domestic/refunds"). Send( H{ "transaction_id": transactionID, "out_refund_no": outRefundNo, "notify_url": cbURL, "amount": H{ "refund": refundFee, "total": totalFee, "currency": "CNY", }, }, ) req, err := WxPayV3Sign( mchID, keySerial, key, req, ) if err != nil { return nil, err } _, body, errs := req.EndBytes() if errs != nil { return nil, errs[0] } Log.Debugf("body: %s", body) var resp StWxV3RefundResp err = jsoniter.Unmarshal(body, &resp) if err != nil { return nil, err } if resp.Code != "" { return nil, fmt.Errorf("refund err: %s", body) } return &resp, nil } // WxPayV3DecodeRefundsCb ่งฃๆž้€€ๆฌพๅ›ž่ฐƒ func WxPayV3DecodeRefundsCb(v3Key string, body []byte) (*StWxV3RefundCbContent, error) { var rawResp StWxV3RefundCb err := jsoniter.Unmarshal(body, &rawResp) if err != nil { return nil, err } if rawResp.EventType != "REFUND.SUCCESS" { return nil, fmt.Errorf("error event_type: %s", rawResp.EventType) } if rawResp.ResourceType != "encrypt-resource" { return nil, fmt.Errorf("error resource_type: %s", rawResp.ResourceType) } originalType := rawResp.Resource.OriginalType if originalType != "refund" { return nil, fmt.Errorf("error original_type: %s", originalType) } algorithm := rawResp.Resource.Algorithm if algorithm != "AEAD_AES_256_GCM" { return nil, fmt.Errorf("error algorithm: %s", algorithm) } ciphertext := rawResp.Resource.Ciphertext associatedData := rawResp.Resource.AssociatedData nonce := rawResp.Resource.Nonce plain, err := WxPayV3Decrype( v3Key, ciphertext, nonce, associatedData, ) if err != nil { return nil, err } Log.Debugf("plain: %s", plain) var content StWxV3RefundCbContent err = jsoniter.Unmarshal([]byte(plain), &content) if err != nil { return nil, err } return &content, nil }
ateKey, cols [
identifier_name
wxpayv3.go
package mcommon import ( "bytes" "crypto" "crypto/aes" "crypto/cipher" "crypto/md5" "crypto/rand" "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/base64" "encoding/pem" "encoding/xml" "fmt" "io/ioutil" "net/url" "strconv" "time" "github.com/gin-gonic/gin" jsoniter "github.com/json-iterator/go" "github.com/parnurzeal/gorequest" ) // StWxPayRawResp ๅ›žๅค type StWxPayRawResp struct { ID string `json:"id"` CreateTime time.Time `json:"create_time"` ResourceType string `json:"resource_type"` EventType string `json:"event_type"` Summary string `json:"summary"` Resource struct { OriginalType string `json:"original_type"` Algorithm string `json:"algorithm"` Ciphertext string `json:"ciphertext"` AssociatedData string `json:"associated_data"` Nonce string `json:"nonce"` } `json:"resource"` } // StWxPayResp ๅ›žๅค type StWxPayResp struct { Mchid string `json:"mchid"` Appid string `json:"appid"` OutTradeNo string `json:"out_trade_no"` TransactionID string `json:"transaction_id"` TradeType string `json:"trade_type"` TradeState string `json:"trade_state"` TradeStateDesc string `json:"trade_state_desc"` BankType string `json:"bank_type"` Attach string `json:"attach"` SuccessTime time.Time `json:"success_time"` Payer struct { Openid string `json:"openid"` } `json:"payer"` Amount struct { Total int `json:"total"` PayerTotal int `json:"payer_total"` Currency string `json:"currency"` PayerCurrency string `json:"payer_currency"` } `json:"amount"` } // StWxRefundCb ๅ›ž่ฐƒ type StWxRefundCb struct { XMLName xml.Name `xml:"root"` Text string `xml:",chardata"` OutRefundNo string `xml:"out_refund_no"` OutTradeNo string `xml:"out_trade_no"` RefundAccount string `xml:"refund_account"` RefundFee string `xml:"refund_fee"` RefundID string `xml:"refund_id"` RefundRecvAccout string `xml:"refund_recv_accout"` RefundRequestSource string `xml:"refund_request_source"` RefundStatus string `xml:"refund_status"` SettlementRefundFee string `xml:"settlement_refund_fee"` SettlementTotalFee string `xml:"settlement_total_fee"` SuccessTime string `xml:"success_time"` TotalFee string `xml:"total_fee"` TransactionID string `xml:"transaction_id"` } type StWxV3RefundResp struct { Amount struct { Currency string `json:"currency"` DiscountRefund int `json:"discount_refund"` PayerRefund int `json:"payer_refund"` PayerTotal int `json:"payer_total"` Refund int `json:"refund"` SettlementRefund int `json:"settlement_refund"` SettlementTotal int `json:"settlement_total"` Total int `json:"total"` } `json:"amount"` Channel string `json:"channel"` CreateTime time.Time `json:"create_time"` FundsAccount string `json:"funds_account"` OutRefundNo string `json:"out_refund_no"` OutTradeNo string `json:"out_trade_no"` PromotionDetail []interface{} `json:"promotion_detail"` RefundID string `json:"refund_id"` Status string `json:"status"` TransactionID string `json:"transaction_id"` UserReceivedAccount string `json:"user_received_account"` Code string `json:"code"` Message string `json:"message"` } type StWxV3RefundCb struct { ID string `json:"id"` CreateTime time.Time `json:"create_time"` ResourceType string `json:"resource_type"` EventType string `json:"event_type"` Summary string `json:"summary"` Resource struct { OriginalType string `json:"original_type"` Algorithm string `json:"algorithm"` Ciphertext string `json:"ciphertext"` AssociatedData string `json:"associated_data"` Nonce string `json:"nonce"` } `json:"resource"` } type StWxV3RefundCbContent struct { Mchid string `json:"mchid"` OutTradeNo string `json:"out_trade_no"` TransactionID string `json:"transaction_id"` OutRefundNo string `json:"out_refund_no"` RefundID string `json:"refund_id"` RefundStatus string `json:"refund_status"` SuccessTime time.Time `json:"success_time"` Amount struct { Total int `json:"total"` Refund int `json:"refund"` PayerTotal int `json:"payer_total"` PayerRefund int `json:"payer_refund"` } `json:"amount"` UserReceivedAccount string `json:"user_received_account"` } // RsaSign ็ญพๅ func RsaSign(signContent string, privateKey *rsa.PrivateKey, hash crypto.Hash) (string, error) { shaNew := hash.New() shaNew.Write([]byte(signContent)) hashed := shaNew.Sum(nil) signature, err := rsa.SignPKCS1v15(rand.Reader, privateKey, hash, hashed) if err != nil { return "", err } return base64.StdEncoding.EncodeToString(signature), nil } // WxPayV3SignStr ่Žทๅ–็ญพๅ็ป“ๆžœ func WxPayV3SignStr(key *rsa.PrivateKey, cols []string) (string, error) { var buf bytes.Buffer for _, col := range cols { buf.WriteString(col) buf.WriteString("\n") } sign, err := RsaSign(buf.String(), key, crypto.SHA256) if err != nil { return "", err } return sign, nil } // WxPayV3Sign v3็ญพๅ func WxPayV3Sign(mchid, keySerial string, key *rsa.PrivateKey, req *gorequest.SuperAgent) (*gorequest.SuperAgent, error) { timestamp := time.Now().Unix() nonce := GetUUIDStr() uri, err := url.Parse(req.Url) if err != nil { return nil, err } var bodyBytes []byte if req.Method == "POST" { request, err := req.MakeRequest() if err != nil { return nil, err } bodyReader, err := request.GetBody() if err != nil { return nil, err } bodyBytes, err = ioutil.ReadAll(bodyReader) if err != nil { return nil, err } } sign, err := WxPayV3SignStr(key, []string{ req.Method, uri.Path, strconv.FormatInt(timestamp, 10), nonce, string(bodyBytes), }) if err != nil { return nil, err } auth := fmt.Sprintf( `WECHATPAY2-SHA256-RSA2048 mchid="%s",nonce_str="%s",signature="%s",timestamp="%d",serial_no="%s"`, mchid, nonce, sign, timestamp, keySerial, ) req = req. Set("Authorization", auth). Set("Accept", "application/json"). Set("User-Agent", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50") return req, nil } // WxPayV3Decrype ่งฃๅฏ† func WxPayV3Decrype(key string, cipherStr, nonce, associatedData string) (string, error) { keyBytes := []byte(key) nonceBytes := []byte(nonce) associatedDataBytes := []byte(associatedData) ciphertext, err := base64.StdEncoding.DecodeString(cipherStr) if err != nil { return "", err } block, err := aes.NewCipher(keyBytes) if err != nil { return "", err } aesgcm, err := cipher.NewGCM(block) if err != nil { return "", err } plaintext, err := aesgcm.Open(nil, nonceBytes, ciphertext, associatedDataBytes) if err != nil { return "", err } return string(plaintext), nil } // WxPayV3CheckSign v3็ญพๅ้ชŒ่ฏ func WxPayV3CheckSign(header map[string][]string, body []byte, cerStr string) error { if len(cerStr) == 0 { return fmt.Errorf("no cer") } timestamp, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Timestamp") if err != nil { return err } nonce, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Nonce") if err != nil { return err } signature, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Signature") if err != nil { return err } checkStr := timestamp + "\n" + nonce + "\n" + string(body) + "\n" block, _ := pem.Decode([]byte(cerStr)) var cert *x509.Certificate cert, err = x509.ParseCertificate(block.Bytes) if err != nil { return err } rsaPublicKey := cert.PublicKey.(*rsa.PublicKey) oldSign, err := base64.StdEncoding.DecodeString(signature) if err != nil { return err } hashed := sha256.Sum256([]byte(checkStr)) err = rsa.VerifyPKCS1v15(rsaPublicKey, crypto.SHA256, hashed[:], oldSign) return err } // WxPayV3GetHeaderByKey ่Žทๅ–ๅคด func WxPayV3GetHeaderByKey(header map[string][]string, key string) (string, error) { v, ok := header[key] if !ok { return "", fmt.Errorf("no key %s", key) } if len(v) == 0 { return "", fmt.Errorf("key empty %s", key) } return v[0], nil } // WxPayV3GetPrepay ่Žทๅ–้ข„ๆ”ฏไป˜ไฟกๆฏ func WxPayV3GetPrepay(keySerial string, key *rsa.PrivateKey, appID, mchID, openID, payBody, outTradeNo, cbURL string, totalFee int64, expireAt time.Time) (gin.H, string, error) { req := gorequest.New(). Post("https://api.mch.weixin.qq.com/v3/pay/transactions/jsapi"). Send( H{ "appid": appID, "mchid": mchID, "description": payBody, "out_trade_no": outTradeNo, "time_expire": expireAt.Format(time.RFC3339), "notify_url": cbURL, "amount": H{ "total": totalFee, }, "payer": H{ "openid": openID, }, }, ) req, err := WxPayV3Sign( mchID, keySerial, key, req, ) if err != nil { return nil, "", err } _, body, errs := req.EndBytes() if errs != nil { return nil, "", errs[0] } var prepayResp struct { PrepayID string `json:"prepay_id"` } err = jsoniter.Unmarshal(body, &prepayResp) if err != nil { return nil, "", err } if len(prepayResp.PrepayID) == 0 { return nil, "", fmt.Errorf("get prepay id err: %s", body) } v, err := WxPayV3SignPrepayid(key, appID, prepayResp.PrepayID) if err != nil { return nil, "", err } return v, prepayResp.PrepayID, nil } // WxPayV3SignPrepayid ็ญพๅprepayid func WxPayV3SignPrepayid(key *rsa.PrivateKey, appID, prepayid string) (gin.H, error) { objTimestamp := strconv.FormatInt(time.Now().Unix(), 10) objNonce := GetUUIDStr() objCol := fmt.Sprintf("prepay_id=%s", prepayid) objSign, err := WxPayV3SignStr( key, []string{ appID, objTimestamp, objNonce, objCol, }, ) if err != nil { return nil, err } v := gin.H{ "timeStamp": objTimestamp, "nonceStr": objNonce, "package": objCol, "signType": "RSA", "paySign": objSign, } return v, nil } // WxPayV3DecodePayResp ่งฃๆžๆ”ฏไป˜ๅ›ž่ฐƒ func WxPayV3DecodePayResp(v3Key string, body []byte, mchid, appid string) (*StWxPayResp, error) { var rawResp StWxPayRawResp err := jsoniter.Unmarshal(body, &rawResp) if err != nil { return nil, err } if rawResp.EventType != "TRANSACTION.SUCCESS" { return nil, fmt.Errorf("error event_type: %s", rawResp.EventType) } if rawResp.ResourceType != "encrypt-resource" { return nil, fmt.Errorf("error resource_type: %s", rawResp.ResourceType) } originalType := rawResp.Resource.OriginalType if originalType != "transaction" { return nil, fmt.Errorf("error original_type: %s", originalType) } algorithm := rawResp.Resource.Algorithm if algorithm != "AEAD_AES_256_GCM" { return nil, fmt.Errorf("error algorithm: %s", algorithm) } ciphertext := rawResp.Resource.Ciphertext associatedData := rawResp.Resource.AssociatedData nonce := rawResp.Resource.Nonce plain, err := WxPayV3Decrype( v3Key, ciphertext, nonce, associatedData, ) if err != nil { return nil, err } var finalResp StWxPayResp err = jsoniter.Unmarshal([]byte(plain), &finalResp) if err != nil { return nil, err } if finalResp.Mchid != mchid { return nil, fmt.Errorf("mchid error") } if finalResp.Appid != appid { return nil, fmt.Errorf("appid error") } if finalResp.TradeState != "SUCCESS" { return nil, fmt.Errorf("error trade_state: %s", finalResp.TradeState) } return &finalResp, nil } // WxPayCheckRefundCb ้ชŒ่ฏๅ›ž่ฐƒ func WxPayCheckRefundCb(mchKey string, body []byte) (*StWxRefundCb, error) { mchKeyMd5 := fmt.Sprintf("%x", md5.Sum([]byte(mchKey))) bodyMap, err := XMLWalk(body) if err != nil { // ่ฟ”ๅ›žๆ•ฐๆฎ return nil, err } reqInfo, ok := bodyMap["req_info"] if !ok { return nil, fmt.Errorf("no key req_info %s", body) } reqInfoStr, ok := reqInfo.(string) if !ok { return nil, fmt.Errorf("error format req_info: %s", body) } reqInfoBytes, err := base64.StdEncoding.DecodeString(reqInfoStr) if err != nil { return nil, err } reqInfoFull, err := DecryptAesEcb(reqInfoBytes, []byte(mchKeyMd5)) if err != nil { return nil, err } var bodyXML StWxRefundCb err = xml.Unmarshal(reqInfoFull, &bodyXML) if err != nil { return nil, err } return &bodyXML, nil } // WxPayV3Refunds ้€€ๆฌพ func WxPayV3Refunds(keySerial string, key *rsa.PrivateKey, mchID, transactionID, outRefundNo, cbURL string, totalFee, refundFee int64) (*StWxV3RefundResp, error) { req := gorequest.New(). Post("https://api.mch.weixin.qq.com/v3/refund/domestic/refunds"). Send( H{ "transaction_id": transactionID, "out_refund_no": outRefundNo, "notify_url": cbURL, "amount": H{ "refund": refundFee, "total": totalFee, "currency": "CNY", }, }, ) req, err := WxPayV3Sign( mchID, keySerial, key, req, ) if err != nil { return nil, err } _, body, errs := req.EndBytes() if errs != nil { return nil, errs[0] } Log.Debugf("body: %s", body) var resp StWxV3RefundResp err = jsoniter.Unmarshal(body, &resp) if err != nil { return nil, err } if resp.Code != "" { return nil, fmt.Errorf("refund err: %s", body) } return &resp, nil } // WxPayV3DecodeRefundsCb ่งฃๆž้€€ๆฌพๅ›ž่ฐƒ func WxPayV3DecodeRefundsCb(v3Key string, body []byte) (*StWxV3RefundCbContent, error) { var rawResp StWxV3RefundCb err := jsoniter.Unmarshal(body, &rawResp) if err != nil { return nil, err }
if rawResp.EventType != "REFUND.SUCCESS" { return nil, fmt.Errorf("error event_type: %s", rawResp.EventType) } if rawResp.ResourceType != "encrypt-resource" { return nil, fmt.Errorf("error resource_type: %s", rawResp.ResourceType) } originalType := rawResp.Resource.OriginalType if originalType != "refund" { return nil, fmt.Errorf("error original_type: %s", originalType) } algorithm := rawResp.Resource.Algorithm if algorithm != "AEAD_AES_256_GCM" { return nil, fmt.Errorf("error algorithm: %s", algorithm) } ciphertext := rawResp.Resource.Ciphertext associatedData := rawResp.Resource.AssociatedData nonce := rawResp.Resource.Nonce plain, err := WxPayV3Decrype( v3Key, ciphertext, nonce, associatedData, ) if err != nil { return nil, err } Log.Debugf("plain: %s", plain) var content StWxV3RefundCbContent err = jsoniter.Unmarshal([]byte(plain), &content) if err != nil { return nil, err } return &content, nil }
identifier_body
wxpayv3.go
package mcommon import ( "bytes" "crypto" "crypto/aes" "crypto/cipher" "crypto/md5" "crypto/rand" "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/base64" "encoding/pem" "encoding/xml" "fmt" "io/ioutil" "net/url" "strconv" "time" "github.com/gin-gonic/gin" jsoniter "github.com/json-iterator/go" "github.com/parnurzeal/gorequest" ) // StWxPayRawResp ๅ›žๅค type StWxPayRawResp struct { ID string `json:"id"` CreateTime time.Time `json:"create_time"` ResourceType string `json:"resource_type"` EventType string `json:"event_type"` Summary string `json:"summary"` Resource struct { OriginalType string `json:"original_type"` Algorithm string `json:"algorithm"` Ciphertext string `json:"ciphertext"` AssociatedData string `json:"associated_data"` Nonce string `json:"nonce"` } `json:"resource"` } // StWxPayResp ๅ›žๅค type StWxPayResp struct { Mchid string `json:"mchid"` Appid string `json:"appid"` OutTradeNo string `json:"out_trade_no"` TransactionID string `json:"transaction_id"` TradeType string `json:"trade_type"` TradeState string `json:"trade_state"` TradeStateDesc string `json:"trade_state_desc"` BankType string `json:"bank_type"` Attach string `json:"attach"` SuccessTime time.Time `json:"success_time"` Payer struct { Openid string `json:"openid"` } `json:"payer"` Amount struct { Total int `json:"total"` PayerTotal int `json:"payer_total"` Currency string `json:"currency"` PayerCurrency string `json:"payer_currency"` } `json:"amount"` } // StWxRefundCb ๅ›ž่ฐƒ type StWxRefundCb struct { XMLName xml.Name `xml:"root"` Text string `xml:",chardata"` OutRefundNo string `xml:"out_refund_no"` OutTradeNo string `xml:"out_trade_no"` RefundAccount string `xml:"refund_account"` RefundFee string `xml:"refund_fee"` RefundID string `xml:"refund_id"` RefundRecvAccout string `xml:"refund_recv_accout"` RefundRequestSource string `xml:"refund_request_source"` RefundStatus string `xml:"refund_status"` SettlementRefundFee string `xml:"settlement_refund_fee"` SettlementTotalFee string `xml:"settlement_total_fee"` SuccessTime string `xml:"success_time"` TotalFee string `xml:"total_fee"` TransactionID string `xml:"transaction_id"` } type StWxV3RefundResp struct { Amount struct { Currency string `json:"currency"` DiscountRefund int `json:"discount_refund"` PayerRefund int `json:"payer_refund"` PayerTotal int `json:"payer_total"` Refund int `json:"refund"` SettlementRefund int `json:"settlement_refund"` SettlementTotal int `json:"settlement_total"` Total int `json:"total"` } `json:"amount"` Channel string `json:"channel"` CreateTime time.Time `json:"create_time"` FundsAccount string `json:"funds_account"` OutRefundNo string `json:"out_refund_no"` OutTradeNo string `json:"out_trade_no"` PromotionDetail []interface{} `json:"promotion_detail"` RefundID string `json:"refund_id"` Status string `json:"status"` TransactionID string `json:"transaction_id"` UserReceivedAccount string `json:"user_received_account"` Code string `json:"code"` Message string `json:"message"` } type StWxV3RefundCb struct { ID string `json:"id"` CreateTime time.Time `json:"create_time"` ResourceType string `json:"resource_type"` EventType string `json:"event_type"` Summary string `json:"summary"` Resource struct { OriginalType string `json:"original_type"` Algorithm string `json:"algorithm"` Ciphertext string `json:"ciphertext"` AssociatedData string `json:"associated_data"` Nonce string `json:"nonce"` } `json:"resource"` } type StWxV3RefundCbContent struct { Mchid string `json:"mchid"` OutTradeNo string `json:"out_trade_no"` TransactionID string `json:"transaction_id"` OutRefundNo string `json:"out_refund_no"` RefundID string `json:"refund_id"` RefundStatus string `json:"refund_status"` SuccessTime time.Time `json:"success_time"` Amount struct { Total int `json:"total"` Refund int `json:"refund"` PayerTotal int `json:"payer_total"` PayerRefund int `json:"payer_refund"` } `json:"amount"` UserReceivedAccount string `json:"user_received_account"` } // RsaSign ็ญพๅ func RsaSign(signContent string, privateKey *rsa.PrivateKey, hash crypto.Hash) (string, error) { shaNew := hash.New() shaNew.Write([]byte(signContent)) hashed := shaNew.Sum(nil) signature, err := rsa.SignPKCS1v15(rand.Reader, privateKey, hash, hashed) if err != nil { return "", err } return base64.StdEncoding.EncodeToString(signature), nil } // WxPayV3SignStr ่Žทๅ–็ญพๅ็ป“ๆžœ func WxPayV3SignStr(key *rsa.PrivateKey, cols []string) (string, error) { var buf bytes.Buffer for _, col := range cols { buf.WriteString(col) buf.WriteString("\n") } sign, err := RsaSign(buf.String(), key, crypto.SHA256) if err != nil { return "", err } return sign, nil } // WxPayV3Sign v3็ญพๅ func WxPayV3Sign(mchid, keySerial string, key *rsa.PrivateKey, req *gorequest.SuperAgent) (*gorequest.SuperAgent, error) { timestamp := time.Now().Unix() nonce := GetUUIDStr() uri, err := url.Parse(req.Url) if err != nil { return nil, err } var bodyBytes []byte if req.Method == "POST" { request, err := req.MakeRequest() if err != nil { return nil, err } bodyReader, err := request.GetBody() if err != nil { return nil, err } bodyBytes, err = ioutil.ReadAll(bodyReader) if err != nil { return nil, err } } sign, err := WxPayV3SignStr(key, []string{ req.Method, uri.Path, strconv.FormatInt(timestamp, 10), nonce, string(bodyBytes), }) if err != nil { return nil, err } auth := fmt.Sprintf( `WECHATPAY2-SHA256-RSA2048 mchid="%s",nonce_str="%s",signature="%s",timestamp="%d",serial_no="%s"`, mchid, nonce, sign, timestamp, keySerial, ) req = req. Set("Authorization", auth). Set("Accept", "application/json"). Set("User-Agent", "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50") return req, nil } // WxPayV3Decrype ่งฃๅฏ† func WxPayV3Decrype(key string, cipherStr, nonce, associatedData string) (string, error) { keyBytes := []byte(key) nonceBytes := []byte(nonce) associatedDataBytes := []byte(associatedData) ciphertext, err := base64.StdEncoding.DecodeString(cipherStr) if err != nil { return "", err } block, err := aes.NewCipher(keyBytes) if err != nil { return "", err } aesgcm, err := cipher.NewGCM(block) if err != nil { return "", err } plaintext, err := aesgcm.Open(nil, nonceBytes, ciphertext, associatedDataBytes) if err != nil { return "", err } return string(plaintext), nil } // WxPayV3CheckSign v3็ญพๅ้ชŒ่ฏ func WxPayV3CheckSign(header map[string][]string, body []byte, cerStr string) error { if len(cerStr) == 0 { return fmt.Errorf("no cer") } timestamp, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Timestamp") if err != nil { return err } nonce, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Nonce") if err != nil { return err } signature, err := WxPayV3GetHeaderByKey(header, "Wechatpay-Signature") if err != nil { return err } checkStr := timestamp + "\n" + nonce + "\n" + string(body) + "\n" block, _ := pem.Decode([]byte(cerStr)) var cert *x509.Certificate cert, err = x509.ParseCertificate(block.Bytes) if err != nil { return err } rsaPublicKey := cert.PublicKey.(*rsa.PublicKey) oldSign, err := base64.StdEncoding.DecodeString(signature) if err != nil { return err } hashed := sha256.Sum256([]byte(checkStr)) err = rsa.VerifyPKCS1v15(rsaPublicKey, crypto.SHA256, hashed[:], oldSign) return err } // WxPayV3GetHeaderByKey ่Žทๅ–ๅคด func WxPayV3GetHeaderByKey(header map[string][]string, key string) (string, error) { v, ok := header[key] if !ok { return "", fmt.Errorf("no key %s", key) } if len(v) == 0 { return "", fmt.Errorf("key empty %s", key) } return v[0], nil } // WxPayV3GetPrepay ่Žทๅ–้ข„ๆ”ฏไป˜ไฟกๆฏ func WxPayV3GetPrepay(keySerial string, key *rsa.PrivateKey, appID, mchID, openID, payBody, outTradeNo, cbURL string, totalFee int64, expireAt time.Time) (gin.H, string, error) { req := gorequest.New(). Post("https://api.mch.weixin.qq.com/v3/pay/transactions/jsapi"). Send( H{ "appid": appID, "mchid": mchID, "description": payBody, "out_trade_no": outTradeNo, "time_expire": expireAt.Format(time.RFC3339), "notify_url": cbURL, "amount": H{ "total": totalFee, }, "payer": H{ "openid": openID, }, }, ) req, err := WxPayV3Sign( mchID, keySerial, key, req, ) if err != nil { return nil, "", err } _, body, errs := req.EndBytes() if errs != nil { return nil, "", errs[0] } var prepayResp struct { PrepayID string `json:"prepay_id"` } err = jsoniter.Unmarshal(body, &prepayResp) if err != nil { return nil, "", err } if len(prepayResp.PrepayID) == 0 { return nil, "", fmt.Errorf("get prepay id err: %s", body) } v, err := WxPayV3SignPrepayid(key, appID, prepayResp.PrepayID) if err != nil { return nil, "", err } return v, prepayResp.PrepayID, nil } // WxPayV3SignPrepayid ็ญพๅprepayid func WxPayV3SignPrepayid(key *rsa.PrivateKey, appID, prepayid string) (gin.H, error) { objTimestamp := strconv.FormatInt(time.Now().Unix(), 10) objNonce := GetUUIDStr() objCol := fmt.Sprintf("prepay_id=%s", prepayid) objSign, err := WxPayV3SignStr( key, []string{ appID, objTimestamp, objNonce, objCol, }, ) if err != nil { return nil, err } v := gin.H{ "timeStamp": objTimestamp, "nonceStr": objNonce, "package": objCol, "signType": "RSA", "paySign": objSign, } return v, nil } // WxPayV3DecodePayResp ่งฃๆžๆ”ฏไป˜ๅ›ž่ฐƒ func WxPayV3DecodePayResp(v3Key string, body []byte, mchid, appid string) (*StWxPayResp, error) { var rawResp StWxPayRawResp err := jsoniter.Unmarshal(body, &rawResp) if err != nil { return nil, err } if rawResp.EventType != "TRANSACTION.SUCCESS" { return nil, fmt.Errorf("error event_type: %s", rawResp.EventType) } if rawResp.ResourceType != "encrypt-resource" { return nil, fmt.Errorf("error resource_type: %s", rawResp.ResourceType) }
" { return nil, fmt.Errorf("error original_type: %s", originalType) } algorithm := rawResp.Resource.Algorithm if algorithm != "AEAD_AES_256_GCM" { return nil, fmt.Errorf("error algorithm: %s", algorithm) } ciphertext := rawResp.Resource.Ciphertext associatedData := rawResp.Resource.AssociatedData nonce := rawResp.Resource.Nonce plain, err := WxPayV3Decrype( v3Key, ciphertext, nonce, associatedData, ) if err != nil { return nil, err } var finalResp StWxPayResp err = jsoniter.Unmarshal([]byte(plain), &finalResp) if err != nil { return nil, err } if finalResp.Mchid != mchid { return nil, fmt.Errorf("mchid error") } if finalResp.Appid != appid { return nil, fmt.Errorf("appid error") } if finalResp.TradeState != "SUCCESS" { return nil, fmt.Errorf("error trade_state: %s", finalResp.TradeState) } return &finalResp, nil } // WxPayCheckRefundCb ้ชŒ่ฏๅ›ž่ฐƒ func WxPayCheckRefundCb(mchKey string, body []byte) (*StWxRefundCb, error) { mchKeyMd5 := fmt.Sprintf("%x", md5.Sum([]byte(mchKey))) bodyMap, err := XMLWalk(body) if err != nil { // ่ฟ”ๅ›žๆ•ฐๆฎ return nil, err } reqInfo, ok := bodyMap["req_info"] if !ok { return nil, fmt.Errorf("no key req_info %s", body) } reqInfoStr, ok := reqInfo.(string) if !ok { return nil, fmt.Errorf("error format req_info: %s", body) } reqInfoBytes, err := base64.StdEncoding.DecodeString(reqInfoStr) if err != nil { return nil, err } reqInfoFull, err := DecryptAesEcb(reqInfoBytes, []byte(mchKeyMd5)) if err != nil { return nil, err } var bodyXML StWxRefundCb err = xml.Unmarshal(reqInfoFull, &bodyXML) if err != nil { return nil, err } return &bodyXML, nil } // WxPayV3Refunds ้€€ๆฌพ func WxPayV3Refunds(keySerial string, key *rsa.PrivateKey, mchID, transactionID, outRefundNo, cbURL string, totalFee, refundFee int64) (*StWxV3RefundResp, error) { req := gorequest.New(). Post("https://api.mch.weixin.qq.com/v3/refund/domestic/refunds"). Send( H{ "transaction_id": transactionID, "out_refund_no": outRefundNo, "notify_url": cbURL, "amount": H{ "refund": refundFee, "total": totalFee, "currency": "CNY", }, }, ) req, err := WxPayV3Sign( mchID, keySerial, key, req, ) if err != nil { return nil, err } _, body, errs := req.EndBytes() if errs != nil { return nil, errs[0] } Log.Debugf("body: %s", body) var resp StWxV3RefundResp err = jsoniter.Unmarshal(body, &resp) if err != nil { return nil, err } if resp.Code != "" { return nil, fmt.Errorf("refund err: %s", body) } return &resp, nil } // WxPayV3DecodeRefundsCb ่งฃๆž้€€ๆฌพๅ›ž่ฐƒ func WxPayV3DecodeRefundsCb(v3Key string, body []byte) (*StWxV3RefundCbContent, error) { var rawResp StWxV3RefundCb err := jsoniter.Unmarshal(body, &rawResp) if err != nil { return nil, err } if rawResp.EventType != "REFUND.SUCCESS" { return nil, fmt.Errorf("error event_type: %s", rawResp.EventType) } if rawResp.ResourceType != "encrypt-resource" { return nil, fmt.Errorf("error resource_type: %s", rawResp.ResourceType) } originalType := rawResp.Resource.OriginalType if originalType != "refund" { return nil, fmt.Errorf("error original_type: %s", originalType) } algorithm := rawResp.Resource.Algorithm if algorithm != "AEAD_AES_256_GCM" { return nil, fmt.Errorf("error algorithm: %s", algorithm) } ciphertext := rawResp.Resource.Ciphertext associatedData := rawResp.Resource.AssociatedData nonce := rawResp.Resource.Nonce plain, err := WxPayV3Decrype( v3Key, ciphertext, nonce, associatedData, ) if err != nil { return nil, err } Log.Debugf("plain: %s", plain) var content StWxV3RefundCbContent err = jsoniter.Unmarshal([]byte(plain), &content) if err != nil { return nil, err } return &content, nil }
originalType := rawResp.Resource.OriginalType if originalType != "transaction
conditional_block
functions.py
import os import sys import pygame from settings import * import random import sqlite3 sc = pygame.display.set_mode((WIDTH, HEIGHT)) # ั‚ะตะบัƒั‰ะธะน ัƒั€ะพะฒะตะฝัŒ all_logs = [] def load_image(name, colorkey=None): fullname = os.path.join('data', name) # ะตัะปะธ ั„ะฐะนะป ะฝะต ััƒั‰ะตัั‚ะฒัƒะตั‚, ั‚ะพ ะฒั‹ั…ะพะดะธะผ if not os.path.isfile(fullname): print(f"ะคะฐะนะป ั ะธะทะพะฑั€ะฐะถะตะฝะธะตะผ '{fullname}' ะฝะต ะฝะฐะนะดะตะฝ") sys.exit() image = pygame.image.load(fullname) return image def add_to_log(text): all_logs.append(text) if len(all_logs) > 20: del all_log
5), (x, y, 50, 50), width=1) def print_log(): font = pygame.font.Font(None, 25) text_coord = 25 for line in all_logs: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 725 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) def update_wall_color(cur_level): if cur_level == 1: texture_wall = load_image("image/Brick_Wall_009.jpg") texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2)) elif cur_level == 2: texture_wall = load_image("image/stone_wall2.png") texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2)) else: texture_wall = load_image("image/wall3.jpg") texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2)) return texture_wall texture_floor = load_image("image/dark-brick-wall-texture_1048-7626.jpg") texture_floor = pygame.transform.scale(texture_floor, (cell_size, cell_size)) hp_bar = load_image("image/hud/frame.png") hp_bar = pygame.transform.scale(hp_bar, (275, 20)) door = load_image("image/castledoors.png") door = pygame.transform.scale(door, (cell_size, cell_size)) frame = load_image("image/hud/button_1(frame).png") frame = pygame.transform.scale(frame, (cell_size, cell_size)) blood_screen = load_image("image/BloodOverlay.png") blood_screen = pygame.transform.scale(blood_screen, (WIDTH, HEIGHT)) # ัะพะทะดะฐะดะธะผ ะณั€ัƒะฟะฟัƒ, ัะพะดะตั€ะถะฐั‰ัƒัŽ ะฒัะต ัะฟั€ะฐะนั‚ั‹ all_sprites = pygame.sprite.Group() equipment_sprites = pygame.sprite.Group() inventory_sprites = pygame.sprite.Group() character_sprites = pygame.sprite.Group() def terminate(): pygame.quit() sys.exit() def start_screen(): intro_text = ["ะ”ะžะ‘ะ ะž ะŸะžะ–ะะ›ะžะ’ะะขะฌ ะ’ PYDUNGEON", "ะ”ะปั ั‚ะพะณะพ, ั‡ั‚ะพะฑั‹ ะฒั‹ะฑั€ะฐั‚ัŒัั ะพั‚ ััŽะดะฐ,", "ะ’ะฐะผ ะฟะพะฝะฐะดะพะฑะธั‚ัั ะฟั€ะพะนั‚ะธ ั‚ั€ะธ ัƒั€ะพะฒะฝั ะบะฐั‚ะฐะบะพะผะฑ ะธ ะฟะพะฑะพั€ะพั‚ัŒ ะฝะตะฒะธะดะฐะฝะฝั‹ั… ั‡ัƒะดะธั‰", "ะ•ัะปะธ ะ’ั‹ ะฟัะธั…, ะฝะฐะถะผะธั‚ะต ะปัŽะฑัƒัŽ ะบะฝะพะฟะบัƒ"] fon = pygame.transform.scale(load_image('image/dungeon_intro.jpeg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 50 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def level2_screen(): intro_text = ["ะ’ะซ ะกะœะžะ“ะ›ะ˜ ะŸะ ะžะ™ะขะ˜ ะŸะ•ะ ะ’ะซะ™ ะฃะ ะžะ’ะ•ะะฌ ะŸะžะ”ะ—ะ•ะœะ•ะ›ะฌะฏ", "ะฝะพ ัั‚ะพ ะตั‰ะต ะฝะต ะบะพะฝะตั†...", "ะ’ะฟะตั€ะตะดะธ ะ’ะฐั ะถะดัƒั‚ ะตั‰ะต ะฑะพะปะตะต ัะธะปัŒะฝั‹ะต ะฟั€ะพั‚ะธะฒะฝะธะบะธ", "ะธ ะณะพั€ะฐะทะดะพ ะผะตะฝัŒัˆะต ัˆะฐะฝัะพะฒ ะฝะฐ ะฒั‹ะถะธะฒะฐะฝะธะต"] fon = pygame.transform.scale(load_image('image/level2_dungeon.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 50 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def level3_screen(): intro_text = ["ะะ•ะœะะžะ“ะ˜ะ• ะ—ะะฅะžะ”ะ˜ะ›ะ˜ ะขะะš ะ”ะะ›ะ•ะšะž", "ะ˜ ะตั‰ะต ะฝะธะบั‚ะพ ะฝะต ะฒะพะทะฒั€ะฐั‰ะฐะปัั", "ะก ะฟะพัะปะตะดะฝะตะณะพ, ัะฐะผะพะณะพ ะผั€ะฐั‡ะฝะพะณะพ ะธ ะณะปัƒะฑะพะบะพะณะพ ัƒั€ะพะฒะฝั", "ะกะผะตะปัŒั‡ะฐะบ ะปะธ ั‚ั‹ ะธะปะธ ะฑะตะทัƒะผะตั†?"] fon = pygame.transform.scale(load_image('image/level3_dungeon.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 50 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def gameover_screen(): intro_text = ["ะ’ ัะปะตะดัƒัŽั‰ะธะน ั€ะฐะท ะฟะพะฒะตะทะตั‚", "ะฒะพะทะผะพะถะฝะพ..."] fon = pygame.transform.scale(load_image('image/gameover.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 700 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def victory_screen(): intro_text = ["ะญั‚ะพ ะฟะพะฑะตะดะฐ!", "ะ’ั‹ ัะผะพะณะปะธ ะฒั‹ะฑั€ะฐั‚ัŒัั ะธะท ะฟะพะดะทะตะผะตะปัŒั,", "ั‡ั‚ะพ ะพะฑะตัะฟะตั‡ะธั‚ ะ’ะฐะผ ะฑะพะณะฐั‚ัั‚ะฒะพ ะธ ัะปะฐะฒัƒ ะฝะฐ ะฒััŽ ะถะธะทะฝัŒ"] fon = pygame.transform.scale(load_image('image/victory.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 700 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def exit_screen(): button_exit = Button(575, 500, 45, 30, "ะžะบ") name = '' fon = pygame.transform.scale(load_image('image\exit.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) title = font.render('ะะฐะฟะธัˆะธ ัะฒะพะต ะธะผั', True, (255, 255, 255)) while True: sc.blit(fon, (0, 0)) for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() if event.type == pygame.KEYDOWN: if event.unicode.isalpha(): name += event.unicode elif event.key == K_BACKSPACE: name = name[:-1] elif event.key == K_RETURN: name = "" if event.type == pygame.MOUSEBUTTONDOWN: if button_exit.push_button(event.pos): gameover(name, killed_monsters) text = font.render(name, True, (255, 255, 255)) rect = text.get_rect() rect.center = (600, 400) button_exit.draw() sc.blit(text, rect) sc.blit(title, (500, 250)) pygame.display.flip() def exchange_equipment_inventory(inventory, hero): obj = inventory.get_selected_cell() if obj.get_type() == "weapon": old_w = hero.replace_weapon(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Helmet1" or obj.get_name() == 'Helmet2': old_w = hero.replace_helmet(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Cuiras1" or obj.get_name() == 'Cuiras2': old_w = hero.replace_armor(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Leg_armor1" or obj.get_name() == 'Leg_armor2': old_w = hero.replace_leg(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Arm_armor1" or obj.get_name() == 'Arm_armor2': old_w = hero.replace_bracers(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_type() == "potion": if obj.get_name() == "Small_health": if hero.health + 5 <= hero.max_health: hero.health += 5 else: hero.health = hero.max_health elif obj.get_name() == "Small_strength": hero.max_health += 5 inventory.clear_cell() return inventory, hero class Button: def __init__(self, x, y, height, width, text): self.x = x self.y = y self.height = height self.width = width self.text = text def draw(self): pygame.draw.rect(sc, (255, 255, 255), (self.x, self.y, self.height, self.width), width=1) font = pygame.font.Font(None, self.width) text = font.render(self.text, True, (255, 255, 255)) sc.blit(text, (self.x + 5, self.y + 5)) def push_button(self, pos): if pos[0] > self.x and pos[0] < self.x + self.height and pos[1] > self.y and pos[1] < self.y + self.width: return True class Particle(pygame.sprite.Sprite): # ัะณะตะฝะตั€ะธั€ัƒะตะผ ั‡ะฐัั‚ะธั†ั‹ ั€ะฐะทะฝะพะณะพ ั€ะฐะทะผะตั€ะฐ fire = [load_image("image/bloodsplats.png")] fire[0] = pygame.transform.scale(fire[0], (int(cell_size * 0.2), int(cell_size * 0.2))) for scale in (5, 10, 20): fire.append(pygame.transform.scale(fire[0], (scale, scale))) def __init__(self, pos, dx, dy, screen_rect): super().__init__(all_sprites) self.image = random.choice(self.fire) self.rect = self.image.get_rect() self.screen_rect = screen_rect # ัƒ ะบะฐะถะดะพะน ั‡ะฐัั‚ะธั†ั‹ ัะฒะพั ัะบะพั€ะพัั‚ัŒ โ€” ัั‚ะพ ะฒะตะบั‚ะพั€ self.velocity = [0, 0] self.velocity[0] = random.randrange(-1, 1) # ะธ ัะฒะพะธ ะบะพะพั€ะดะธะฝะฐั‚ั‹ self.rect.x, self.rect.y = pos # ะณั€ะฐะฒะธั‚ะฐั†ะธั ะฑัƒะดะตั‚ ะพะดะธะฝะฐะบะพะฒะพะน (ะทะฝะฐั‡ะตะฝะธะต ะบะพะฝัั‚ะฐะฝั‚ั‹) self.gravity = GRAVITY def update(self): # ะฟั€ะธะผะตะฝัะตะผ ะณั€ะฐะฒะธั‚ะฐั†ะธะพะฝะฝั‹ะน ัั„ั„ะตะบั‚: # ะดะฒะธะถะตะฝะธะต ั ัƒัะบะพั€ะตะฝะธะตะผ ะฟะพะด ะดะตะนัั‚ะฒะธะตะผ ะณั€ะฐะฒะธั‚ะฐั†ะธะธ self.velocity[1] += self.gravity # ะฟะตั€ะตะผะตั‰ะฐะตะผ ั‡ะฐัั‚ะธั†ัƒ self.rect.x += self.velocity[0] self.rect.y += self.velocity[1] # ัƒะฑะธะฒะฐะตะผ, ะตัะปะธ ั‡ะฐัั‚ะธั†ะฐ ัƒัˆะปะฐ ะทะฐ ัะบั€ะฐะฝ if not self.rect.colliderect(self.screen_rect): self.kill() GRAVITY = 0.5 def create_particles(position, screen_rect): # ะบะพะปะธั‡ะตัั‚ะฒะพ ัะพะทะดะฐะฒะฐะตะผั‹ั… ั‡ะฐัั‚ะธั† particle_count = 20 # ะฒะพะทะผะพะถะฝั‹ะต ัะบะพั€ะพัั‚ะธ numbers = range(-5, 6) for _ in range(particle_count): Particle(position, random.choice(numbers), random.choice(numbers), screen_rect) def gameover(name, score): con = sqlite3.connect("Data_Base.db") cur = con.cursor() if name == '': name = 'ะฐะฝะพะฝะธะผ' cur.execute(f"""INSERT INTO result_table(name, murders) VALUES('{name}', '{score}')""") con.commit() con.close() terminate()
s[0] def draw_white_rect(x, y): pygame.draw.rect(sc, (255, 255, 25
identifier_body
functions.py
import os import sys import pygame from settings import * import random import sqlite3 sc = pygame.display.set_mode((WIDTH, HEIGHT)) # ั‚ะตะบัƒั‰ะธะน ัƒั€ะพะฒะตะฝัŒ all_logs = [] def load_image(name, colorkey=None): fullname = os.path.join('data', name) # ะตัะปะธ ั„ะฐะนะป ะฝะต ััƒั‰ะตัั‚ะฒัƒะตั‚, ั‚ะพ ะฒั‹ั…ะพะดะธะผ if not os.path.isfile(fullname): print(f"ะคะฐะนะป ั ะธะทะพะฑั€ะฐะถะตะฝะธะตะผ '{fullname}' ะฝะต ะฝะฐะนะดะตะฝ") sys.exit() image = pygame.image.load(fullname) return image def add_to_log(text): all_logs.append(text) if len(all_logs) > 20: del all_logs[0] def draw_white_rect(x, y): pygame.draw.rect(sc, (255, 255, 255), (x, y, 50, 50), width=1) def print_log(): font = pygame.font.Font(None, 25) text_coord = 25 for line in all_logs: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 725 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) def update_wall_color(cur_level): if cur_level == 1: texture_wall = load_image("image/Brick_Wall_009.jpg") texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2)) elif cur_level == 2: texture_wall = load_image("image/stone_wall2.png") texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2)) else: texture_wall = load_image("image/wall3.jpg") texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2)) return texture_wall texture_floor = load_image("image/dark-brick-wall-texture_1048-7626.jpg") texture_floor = pygame.transform.scale(texture_floor, (cell_size, cell_size)) hp_bar = load_image("image/hud/frame.png") hp_bar = pygame.transform.scale(hp_bar, (275, 20)) door = load_image("image/castledoors.png") door = pygame.transform.scale(door, (cell_size, cell_size)) frame = load_image("image/hud/button_1(frame).png") frame = pygame.transform.scale(frame, (cell_size, cell_size)) blood_screen = load_image("image/BloodOverlay.png") blood_screen = pygame.transform.scale(blood_screen, (WIDTH, HEIGHT)) # ัะพะทะดะฐะดะธะผ ะณั€ัƒะฟะฟัƒ, ัะพะดะตั€ะถะฐั‰ัƒัŽ ะฒัะต ัะฟั€ะฐะนั‚ั‹ all_sprites = pygame.sprite.Group() equipment_sprites = pygame.sprite.Group() inventory_sprites = pygame.sprite.Group() character_sprites = pygame.sprite.Group() def terminate(): pygame.quit() sys.exit() def start_screen(): intro_text = ["ะ”ะžะ‘ะ ะž ะŸะžะ–ะะ›ะžะ’ะะขะฌ ะ’ PYDUNGEON", "ะ”ะปั ั‚ะพะณะพ, ั‡ั‚ะพะฑั‹ ะฒั‹ะฑั€ะฐั‚ัŒัั ะพั‚ ััŽะดะฐ,", "ะ’ะฐะผ ะฟะพะฝะฐะดะพะฑะธั‚ัั ะฟั€ะพะนั‚ะธ ั‚ั€ะธ ัƒั€ะพะฒะฝั ะบะฐั‚ะฐะบะพะผะฑ ะธ ะฟะพะฑะพั€ะพั‚ัŒ ะฝะตะฒะธะดะฐะฝะฝั‹ั… ั‡ัƒะดะธั‰", "ะ•ัะปะธ ะ’ั‹ ะฟัะธั…, ะฝะฐะถะผะธั‚ะต ะปัŽะฑัƒัŽ ะบะฝะพะฟะบัƒ"] fon = pygame.transform.scale(load_image('image/dungeon_intro.jpeg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 50 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def level2_screen(): intro_text = ["ะ’ะซ ะกะœะžะ“ะ›ะ˜ ะŸะ ะžะ™ะขะ˜ ะŸะ•ะ ะ’ะซะ™ ะฃะ ะžะ’ะ•ะะฌ ะŸะžะ”ะ—ะ•ะœะ•ะ›ะฌะฏ", "ะฝะพ ัั‚ะพ ะตั‰ะต ะฝะต ะบะพะฝะตั†...", "ะ’ะฟะตั€ะตะดะธ ะ’ะฐั ะถะดัƒั‚ ะตั‰ะต ะฑะพะปะตะต ัะธะปัŒะฝั‹ะต ะฟั€ะพั‚ะธะฒะฝะธะบะธ", "ะธ ะณะพั€ะฐะทะดะพ ะผะตะฝัŒัˆะต ัˆะฐะฝัะพะฒ ะฝะฐ ะฒั‹ะถะธะฒะฐะฝะธะต"] fon = pygame.transform.scale(load_image('image/level2_dungeon.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 50 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def level3_screen(): intro_text = ["ะะ•ะœะะžะ“ะ˜ะ• ะ—ะะฅะžะ”ะ˜ะ›ะ˜ ะขะะš ะ”ะะ›ะ•ะšะž", "ะ˜ ะตั‰ะต ะฝะธะบั‚ะพ ะฝะต ะฒะพะทะฒั€ะฐั‰ะฐะปัั", "ะก ะฟะพัะปะตะดะฝะตะณะพ, ัะฐะผะพะณะพ ะผั€ะฐั‡ะฝะพะณะพ ะธ ะณะปัƒะฑะพะบะพะณะพ ัƒั€ะพะฒะฝั", "ะกะผะตะปัŒั‡ะฐะบ ะปะธ ั‚ั‹ ะธะปะธ ะฑะตะทัƒะผะตั†?"] fon = pygame.transform.scale(load_image('image/level3_dungeon.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 50 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def gameover_screen(): intro_text = ["ะ’ ัะปะตะดัƒัŽั‰ะธะน ั€ะฐะท ะฟะพะฒะตะทะตั‚", "ะฒะพะทะผะพะถะฝะพ..."] fon = pygame.transform.scale(load_image('image/gameover.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 700 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def victory_screen(): intro_text = ["ะญั‚ะพ ะฟะพะฑะตะดะฐ!", "ะ’ั‹ ัะผะพะณะปะธ ะฒั‹ะฑั€ะฐั‚ัŒัั ะธะท ะฟะพะดะทะตะผะตะปัŒั,", "ั‡ั‚ะพ ะพะฑะตัะฟะตั‡ะธั‚ ะ’ะฐะผ ะฑะพะณะฐั‚ัั‚ะฒะพ ะธ ัะปะฐะฒัƒ ะฝะฐ ะฒััŽ ะถะธะทะฝัŒ"] fon = pygame.transform.scale(load_image('image/victory.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 700 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def exit_screen(): button_exit = Button(575, 500, 45, 30, "ะžะบ") name = '' fon = pygame.transform.scale(load_image('image\exit.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) title = font.render('ะะฐะฟะธัˆะธ ัะฒะพะต ะธะผั', True, (255, 255, 255)) while True: sc.blit(fon, (0, 0)) for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() if event.type == pygame.KEYDOWN:
name = "" if event.type == pygame.MOUSEBUTTONDOWN: if button_exit.push_button(event.pos): gameover(name, killed_monsters) text = font.render(name, True, (255, 255, 255)) rect = text.get_rect() rect.center = (600, 400) button_exit.draw() sc.blit(text, rect) sc.blit(title, (500, 250)) pygame.display.flip() def exchange_equipment_inventory(inventory, hero): obj = inventory.get_selected_cell() if obj.get_type() == "weapon": old_w = hero.replace_weapon(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Helmet1" or obj.get_name() == 'Helmet2': old_w = hero.replace_helmet(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Cuiras1" or obj.get_name() == 'Cuiras2': old_w = hero.replace_armor(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Leg_armor1" or obj.get_name() == 'Leg_armor2': old_w = hero.replace_leg(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Arm_armor1" or obj.get_name() == 'Arm_armor2': old_w = hero.replace_bracers(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_type() == "potion": if obj.get_name() == "Small_health": if hero.health + 5 <= hero.max_health: hero.health += 5 else: hero.health = hero.max_health elif obj.get_name() == "Small_strength": hero.max_health += 5 inventory.clear_cell() return inventory, hero class Button: def __init__(self, x, y, height, width, text): self.x = x self.y = y self.height = height self.width = width self.text = text def draw(self): pygame.draw.rect(sc, (255, 255, 255), (self.x, self.y, self.height, self.width), width=1) font = pygame.font.Font(None, self.width) text = font.render(self.text, True, (255, 255, 255)) sc.blit(text, (self.x + 5, self.y + 5)) def push_button(self, pos): if pos[0] > self.x and pos[0] < self.x + self.height and pos[1] > self.y and pos[1] < self.y + self.width: return True class Particle(pygame.sprite.Sprite): # ัะณะตะฝะตั€ะธั€ัƒะตะผ ั‡ะฐัั‚ะธั†ั‹ ั€ะฐะทะฝะพะณะพ ั€ะฐะทะผะตั€ะฐ fire = [load_image("image/bloodsplats.png")] fire[0] = pygame.transform.scale(fire[0], (int(cell_size * 0.2), int(cell_size * 0.2))) for scale in (5, 10, 20): fire.append(pygame.transform.scale(fire[0], (scale, scale))) def __init__(self, pos, dx, dy, screen_rect): super().__init__(all_sprites) self.image = random.choice(self.fire) self.rect = self.image.get_rect() self.screen_rect = screen_rect # ัƒ ะบะฐะถะดะพะน ั‡ะฐัั‚ะธั†ั‹ ัะฒะพั ัะบะพั€ะพัั‚ัŒ โ€” ัั‚ะพ ะฒะตะบั‚ะพั€ self.velocity = [0, 0] self.velocity[0] = random.randrange(-1, 1) # ะธ ัะฒะพะธ ะบะพะพั€ะดะธะฝะฐั‚ั‹ self.rect.x, self.rect.y = pos # ะณั€ะฐะฒะธั‚ะฐั†ะธั ะฑัƒะดะตั‚ ะพะดะธะฝะฐะบะพะฒะพะน (ะทะฝะฐั‡ะตะฝะธะต ะบะพะฝัั‚ะฐะฝั‚ั‹) self.gravity = GRAVITY def update(self): # ะฟั€ะธะผะตะฝัะตะผ ะณั€ะฐะฒะธั‚ะฐั†ะธะพะฝะฝั‹ะน ัั„ั„ะตะบั‚: # ะดะฒะธะถะตะฝะธะต ั ัƒัะบะพั€ะตะฝะธะตะผ ะฟะพะด ะดะตะนัั‚ะฒะธะตะผ ะณั€ะฐะฒะธั‚ะฐั†ะธะธ self.velocity[1] += self.gravity # ะฟะตั€ะตะผะตั‰ะฐะตะผ ั‡ะฐัั‚ะธั†ัƒ self.rect.x += self.velocity[0] self.rect.y += self.velocity[1] # ัƒะฑะธะฒะฐะตะผ, ะตัะปะธ ั‡ะฐัั‚ะธั†ะฐ ัƒัˆะปะฐ ะทะฐ ัะบั€ะฐะฝ if not self.rect.colliderect(self.screen_rect): self.kill() GRAVITY = 0.5 def create_particles(position, screen_rect): # ะบะพะปะธั‡ะตัั‚ะฒะพ ัะพะทะดะฐะฒะฐะตะผั‹ั… ั‡ะฐัั‚ะธั† particle_count = 20 # ะฒะพะทะผะพะถะฝั‹ะต ัะบะพั€ะพัั‚ะธ numbers = range(-5, 6) for _ in range(particle_count): Particle(position, random.choice(numbers), random.choice(numbers), screen_rect) def gameover(name, score): con = sqlite3.connect("Data_Base.db") cur = con.cursor() if name == '': name = 'ะฐะฝะพะฝะธะผ' cur.execute(f"""INSERT INTO result_table(name, murders) VALUES('{name}', '{score}')""") con.commit() con.close() terminate()
if event.unicode.isalpha(): name += event.unicode elif event.key == K_BACKSPACE: name = name[:-1] elif event.key == K_RETURN:
random_line_split
functions.py
import os import sys import pygame from settings import * import random import sqlite3 sc = pygame.display.set_mode((WIDTH, HEIGHT)) # ั‚ะตะบัƒั‰ะธะน ัƒั€ะพะฒะตะฝัŒ all_logs = [] def load_image(name, colorkey=None): fullname = os.path.join('data', name) # ะตัะปะธ ั„ะฐะนะป ะฝะต ััƒั‰ะตัั‚ะฒัƒะตั‚, ั‚ะพ ะฒั‹ั…ะพะดะธะผ if not os.path.isfile(fullname): print(f"ะคะฐะนะป ั ะธะทะพะฑั€ะฐะถะตะฝะธะตะผ '{fullname}' ะฝะต ะฝะฐะนะดะตะฝ") sys.exit() image = pygame.image.load(fullname) return image def add_to_log(text): all_logs.append(text) if len(all_logs) > 20: del all_logs[0] def draw_white_rect(x, y): pygame.draw.rect(sc, (255, 255, 255), (x, y, 50, 50), width=1) def print_log(): font = pygame.font.Font(None, 25) text_coord = 25 for line in all_logs: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 725 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) def update_wall_color(cur_level): if cur_level == 1: texture_wall = load_image("image/Brick_Wall_009.jpg") texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2)) elif cur_level == 2: texture_wall = load_image("image/stone_wall2.png") texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2)) else: texture_wall = load_image("image/wall3.jpg") texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2)) return texture_wall texture_floor = load_image("image/dark-brick-wall-texture_1048-7626.jpg") texture_floor = pygame.transform.scale(texture_floor, (cell_size, cell_size)) hp_bar = load_image("image/hud/frame.png") hp_bar = pygame.transform.scale(hp_bar, (275, 20)) door = load_image("image/castledoors.png") door = pygame.transform.scale(door, (cell_size, cell_size)) frame = load_image("image/hud/button_1(frame).png") frame = pygame.transform.scale(frame, (cell_size, cell_size)) blood_screen = load_image("image/BloodOverlay.png") blood_screen = pygame.transform.scale(blood_screen, (WIDTH, HEIGHT)) # ัะพะทะดะฐะดะธะผ ะณั€ัƒะฟะฟัƒ, ัะพะดะตั€ะถะฐั‰ัƒัŽ ะฒัะต ัะฟั€ะฐะนั‚ั‹ all_sprites = pygame.sprite.Group() equipment_sprites = pygame.sprite.Group() inventory_sprites = pygame.sprite.Group() character_sprites = pygame.sprite.Group() def terminate(): pygame.quit() sys.exit() def start_screen(): intro_text = ["ะ”ะžะ‘ะ ะž ะŸะžะ–ะะ›ะžะ’ะะขะฌ ะ’ PYDUNGEON", "ะ”ะปั ั‚ะพะณะพ, ั‡ั‚ะพะฑั‹ ะฒั‹ะฑั€ะฐั‚ัŒัั ะพั‚ ััŽะดะฐ,", "ะ’ะฐะผ ะฟะพะฝะฐะดะพะฑะธั‚ัั ะฟั€ะพะนั‚ะธ ั‚ั€ะธ ัƒั€ะพะฒะฝั ะบะฐั‚ะฐะบะพะผะฑ ะธ ะฟะพะฑะพั€ะพั‚ัŒ ะฝะตะฒะธะดะฐะฝะฝั‹ั… ั‡ัƒะดะธั‰", "ะ•ัะปะธ ะ’ั‹ ะฟัะธั…, ะฝะฐะถะผะธั‚ะต ะปัŽะฑัƒัŽ ะบะฝะพะฟะบัƒ"] fon = pygame.transform.scale(load_image('image/dungeon_intro.jpeg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 50 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def level2_screen(): intro_text = ["ะ’ะซ ะกะœะžะ“ะ›ะ˜ ะŸะ ะžะ™ะขะ˜ ะŸะ•ะ ะ’ะซะ™ ะฃะ ะžะ’ะ•ะะฌ ะŸะžะ”ะ—ะ•ะœะ•ะ›ะฌะฏ", "ะฝะพ ัั‚ะพ ะตั‰ะต ะฝะต ะบะพะฝะตั†...", "ะ’ะฟะตั€ะตะดะธ ะ’ะฐั ะถะดัƒั‚ ะตั‰ะต ะฑะพะปะตะต ัะธะปัŒะฝั‹ะต ะฟั€ะพั‚ะธะฒะฝะธะบะธ", "ะธ ะณะพั€ะฐะทะดะพ ะผะตะฝัŒัˆะต ัˆะฐะฝัะพะฒ ะฝะฐ ะฒั‹ะถะธะฒะฐะฝะธะต"] fon = pygame.transform.scale(load_image('image/level2_dungeon.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 50 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def level3_screen(): intro_text = ["ะะ•ะœะะžะ“ะ˜ะ• ะ—ะะฅะžะ”ะ˜ะ›ะ˜ ะขะะš ะ”ะะ›ะ•ะšะž", "ะ˜ ะตั‰ะต ะฝะธะบั‚ะพ ะฝะต ะฒะพะทะฒั€ะฐั‰ะฐะปัั", "ะก ะฟะพัะปะตะดะฝะตะณะพ, ัะฐะผะพะณะพ ะผั€ะฐั‡ะฝะพะณะพ ะธ ะณะปัƒะฑะพะบะพะณะพ ัƒั€ะพะฒะฝั", "ะกะผะตะปัŒั‡ะฐะบ ะปะธ ั‚ั‹ ะธะปะธ ะฑะตะทัƒะผะตั†?"] fon = pygame.transform.scale(load_image('image/level3_dungeon.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 50 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def gameover_screen(): intro_text = ["ะ’ ัะปะตะดัƒัŽั‰ะธะน ั€ะฐะท ะฟะพะฒะตะทะตั‚", "ะฒะพะทะผะพะถะฝะพ..."] fon = pygame.transform.scale(load_image('image/gameover.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 700 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def victory_screen(): intro_text = ["ะญั‚ะพ ะฟะพะฑะตะดะฐ!", "ะ’ั‹ ัะผะพะณะปะธ ะฒั‹ะฑั€ะฐั‚ัŒัั ะธะท ะฟะพะดะทะตะผะตะปัŒั,", "ั‡ั‚ะพ ะพะฑะตัะฟะตั‡ะธั‚ ะ’ะฐะผ ะฑะพะณะฐั‚ัั‚ะฒะพ ะธ ัะปะฐะฒัƒ ะฝะฐ ะฒััŽ ะถะธะทะฝัŒ"] fon = pygame.transform.scale(load_image('image/victory.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 700 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def exit_screen(): button_exit = Button(575, 500, 45, 30, "ะžะบ") name = '' fon = pygame.transform.scale(load_image('image\exit.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) title = font.render('ะะฐะฟะธัˆะธ ัะฒะพะต ะธะผั', True, (255, 255, 255)) while True: sc.blit(fon, (0, 0)) for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() if event.type == pygame.KEYDOWN: if event.unicode.isalpha(): name += event.unicode elif event.key == K_BACKSPACE: name = name[:-1] elif event.key == K_RETURN: name = "" if event.type == pygame.MOUSEBUTTONDOWN: if button_exit.push_button(event.pos): gameover(name, killed_monsters) text = font.render(name, True, (255, 255, 255)) rect = text.get_rect() rect.center = (600, 40
button_exit.draw() sc.blit(text, rect) sc.blit(title, (500, 250)) pygame.display.flip() def exchange_equipment_inventory(inventory, hero): obj = inventory.get_selected_cell() if obj.get_type() == "weapon": old_w = hero.replace_weapon(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Helmet1" or obj.get_name() == 'Helmet2': old_w = hero.replace_helmet(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Cuiras1" or obj.get_name() == 'Cuiras2': old_w = hero.replace_armor(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Leg_armor1" or obj.get_name() == 'Leg_armor2': old_w = hero.replace_leg(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Arm_armor1" or obj.get_name() == 'Arm_armor2': old_w = hero.replace_bracers(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_type() == "potion": if obj.get_name() == "Small_health": if hero.health + 5 <= hero.max_health: hero.health += 5 else: hero.health = hero.max_health elif obj.get_name() == "Small_strength": hero.max_health += 5 inventory.clear_cell() return inventory, hero class Button: def __init__(self, x, y, height, width, text): self.x = x self.y = y self.height = height self.width = width self.text = text def draw(self): pygame.draw.rect(sc, (255, 255, 255), (self.x, self.y, self.height, self.width), width=1) font = pygame.font.Font(None, self.width) text = font.render(self.text, True, (255, 255, 255)) sc.blit(text, (self.x + 5, self.y + 5)) def push_button(self, pos): if pos[0] > self.x and pos[0] < self.x + self.height and pos[1] > self.y and pos[1] < self.y + self.width: return True class Particle(pygame.sprite.Sprite): # ัะณะตะฝะตั€ะธั€ัƒะตะผ ั‡ะฐัั‚ะธั†ั‹ ั€ะฐะทะฝะพะณะพ ั€ะฐะทะผะตั€ะฐ fire = [load_image("image/bloodsplats.png")] fire[0] = pygame.transform.scale(fire[0], (int(cell_size * 0.2), int(cell_size * 0.2))) for scale in (5, 10, 20): fire.append(pygame.transform.scale(fire[0], (scale, scale))) def __init__(self, pos, dx, dy, screen_rect): super().__init__(all_sprites) self.image = random.choice(self.fire) self.rect = self.image.get_rect() self.screen_rect = screen_rect # ัƒ ะบะฐะถะดะพะน ั‡ะฐัั‚ะธั†ั‹ ัะฒะพั ัะบะพั€ะพัั‚ัŒ โ€” ัั‚ะพ ะฒะตะบั‚ะพั€ self.velocity = [0, 0] self.velocity[0] = random.randrange(-1, 1) # ะธ ัะฒะพะธ ะบะพะพั€ะดะธะฝะฐั‚ั‹ self.rect.x, self.rect.y = pos # ะณั€ะฐะฒะธั‚ะฐั†ะธั ะฑัƒะดะตั‚ ะพะดะธะฝะฐะบะพะฒะพะน (ะทะฝะฐั‡ะตะฝะธะต ะบะพะฝัั‚ะฐะฝั‚ั‹) self.gravity = GRAVITY def update(self): # ะฟั€ะธะผะตะฝัะตะผ ะณั€ะฐะฒะธั‚ะฐั†ะธะพะฝะฝั‹ะน ัั„ั„ะตะบั‚: # ะดะฒะธะถะตะฝะธะต ั ัƒัะบะพั€ะตะฝะธะตะผ ะฟะพะด ะดะตะนัั‚ะฒะธะตะผ ะณั€ะฐะฒะธั‚ะฐั†ะธะธ self.velocity[1] += self.gravity # ะฟะตั€ะตะผะตั‰ะฐะตะผ ั‡ะฐัั‚ะธั†ัƒ self.rect.x += self.velocity[0] self.rect.y += self.velocity[1] # ัƒะฑะธะฒะฐะตะผ, ะตัะปะธ ั‡ะฐัั‚ะธั†ะฐ ัƒัˆะปะฐ ะทะฐ ัะบั€ะฐะฝ if not self.rect.colliderect(self.screen_rect): self.kill() GRAVITY = 0.5 def create_particles(position, screen_rect): # ะบะพะปะธั‡ะตัั‚ะฒะพ ัะพะทะดะฐะฒะฐะตะผั‹ั… ั‡ะฐัั‚ะธั† particle_count = 20 # ะฒะพะทะผะพะถะฝั‹ะต ัะบะพั€ะพัั‚ะธ numbers = range(-5, 6) for _ in range(particle_count): Particle(position, random.choice(numbers), random.choice(numbers), screen_rect) def gameover(name, score): con = sqlite3.connect("Data_Base.db") cur = con.cursor() if name == '': name = 'ะฐะฝะพะฝะธะผ' cur.execute(f"""INSERT INTO result_table(name, murders) VALUES('{name}', '{score}')""") con.commit() con.close() terminate()
0)
conditional_block
functions.py
import os import sys import pygame from settings import * import random import sqlite3 sc = pygame.display.set_mode((WIDTH, HEIGHT)) # ั‚ะตะบัƒั‰ะธะน ัƒั€ะพะฒะตะฝัŒ all_logs = [] def load_image(name, colorkey=None): fullname = os.path.join('data', name) # ะตัะปะธ ั„ะฐะนะป ะฝะต ััƒั‰ะตัั‚ะฒัƒะตั‚, ั‚ะพ ะฒั‹ั…ะพะดะธะผ if not os.path.isfile(fullname): print(f"ะคะฐะนะป ั ะธะทะพะฑั€ะฐะถะตะฝะธะตะผ '{fullname}' ะฝะต ะฝะฐะนะดะตะฝ") sys.exit() image = pygame.image.load(fullname) return image def add_to_log(text): all_logs.append(text) if len(all_logs) > 20: del all_logs[0] def draw_white_rect(x, y): pygame.draw.rect(sc, (255, 255, 255), (x, y, 50, 50), width=1) def print_log(): font = pygame.font.Font(None, 25) text_coord = 25 for line in all_logs: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 725 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) def update_wall_color(cur_level): if cur_level == 1: texture_wall = load_image("image/Brick_Wall_009.jpg") texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2)) elif cur_level == 2: texture_wall = load_image("image/stone_wall2.png") texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2)) else: texture_wall = load_image("image/wall3.jpg") texture_wall = pygame.transform.scale(texture_wall, (cell_size * 2, cell_size * 2)) return texture_wall texture_floor = load_image("image/dark-brick-wall-texture_1048-7626.jpg") texture_floor = pygame.transform.scale(texture_floor, (cell_size, cell_size)) hp_bar = load_image("image/hud/frame.png") hp_bar = pygame.transform.scale(hp_bar, (275, 20)) door = load_image("image/castledoors.png") door = pygame.transform.scale(door, (cell_size, cell_size)) frame = load_image("image/hud/button_1(frame).png") frame = pygame.transform.scale(frame, (cell_size, cell_size)) blood_screen = load_image("image/BloodOverlay.png") blood_screen = pygame.transform.scale(blood_screen, (WIDTH, HEIGHT)) # ัะพะทะดะฐะดะธะผ ะณั€ัƒะฟะฟัƒ, ัะพะดะตั€ะถะฐั‰ัƒัŽ ะฒัะต ัะฟั€ะฐะนั‚ั‹ all_sprites = pygame.sprite.Group() equipment_sprites = pygame.sprite.Group() inventory_sprites = pygame.sprite.Group() character_sprites = pygame.sprite.Group() def terminate(): pygame.quit() sys.exit() def start_screen(): intro_text = ["ะ”ะžะ‘ะ ะž ะŸะžะ–ะะ›ะžะ’ะะขะฌ ะ’ PYDUNGEON", "ะ”ะปั ั‚ะพะณะพ, ั‡ั‚ะพะฑั‹ ะฒั‹ะฑั€ะฐั‚ัŒัั ะพั‚ ััŽะดะฐ,", "ะ’ะฐะผ ะฟะพะฝะฐะดะพะฑะธั‚ัั ะฟั€ะพะนั‚ะธ ั‚ั€ะธ ัƒั€ะพะฒะฝั ะบะฐั‚ะฐะบะพะผะฑ ะธ ะฟะพะฑะพั€ะพั‚ัŒ ะฝะตะฒะธะดะฐะฝะฝั‹ั… ั‡ัƒะดะธั‰", "ะ•ัะปะธ ะ’ั‹ ะฟัะธั…, ะฝะฐะถะผะธั‚ะต ะปัŽะฑัƒัŽ ะบะฝะพะฟะบัƒ"] fon = pygame.transform.scale(load_image('image/dungeon_intro.jpeg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 50 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def level2_screen(): intro_text = ["ะ’ะซ ะกะœะžะ“ะ›ะ˜ ะŸะ ะžะ™ะขะ˜ ะŸะ•ะ ะ’ะซะ™ ะฃะ ะžะ’ะ•ะะฌ ะŸะžะ”ะ—ะ•ะœะ•ะ›ะฌะฏ", "ะฝะพ ัั‚ะพ ะตั‰ะต ะฝะต ะบะพะฝะตั†...", "ะ’ะฟะตั€ะตะดะธ ะ’ะฐั ะถะดัƒั‚ ะตั‰ะต ะฑะพะปะตะต ัะธะปัŒะฝั‹ะต ะฟั€ะพั‚ะธะฒะฝะธะบะธ", "ะธ ะณะพั€ะฐะทะดะพ ะผะตะฝัŒัˆะต ัˆะฐะฝัะพะฒ ะฝะฐ ะฒั‹ะถะธะฒะฐะฝะธะต"] fon = pygame.transform.scale(load_image('image/level2_dungeon.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 50 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def level3_screen(): intro_text = ["ะะ•ะœะะžะ“ะ˜ะ• ะ—ะะฅะžะ”ะ˜ะ›ะ˜ ะขะะš ะ”ะะ›ะ•ะšะž", "ะ˜ ะตั‰ะต ะฝะธะบั‚ะพ ะฝะต ะฒะพะทะฒั€ะฐั‰ะฐะปัั", "ะก ะฟะพัะปะตะดะฝะตะณะพ, ัะฐะผะพะณะพ ะผั€ะฐั‡ะฝะพะณะพ ะธ ะณะปัƒะฑะพะบะพะณะพ ัƒั€ะพะฒะฝั", "ะกะผะตะปัŒั‡ะฐะบ ะปะธ ั‚ั‹ ะธะปะธ ะฑะตะทัƒะผะตั†?"] fon = pygame.transform.scale(load_image('image/level3_dungeon.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 50 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def gameover_screen(): intro_text = ["ะ’ ัะปะตะดัƒัŽั‰ะธะน ั€ะฐะท ะฟะพะฒะตะทะตั‚", "ะฒะพะทะผะพะถะฝะพ..."] fon = pygame.transform.scale(load_image('image/gameover.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 700 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def victory_screen(): intro_text = ["ะญั‚ะพ ะฟะพะฑะตะดะฐ!", "ะ’ั‹ ัะผะพะณะปะธ ะฒั‹ะฑั€ะฐั‚ัŒัั ะธะท ะฟะพะดะทะตะผะตะปัŒั,", "ั‡ั‚ะพ ะพะฑะตัะฟะตั‡ะธั‚ ะ’ะฐะผ ะฑะพะณะฐั‚ัั‚ะฒะพ ะธ ัะปะฐะฒัƒ ะฝะฐ ะฒััŽ ะถะธะทะฝัŒ"] fon = pygame.transform.scale(load_image('image/victory.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) text_coord = 700 for line in intro_text: string_rendered = font.render(line, 1, pygame.Color('yellow')) intro_rect = string_rendered.get_rect() text_coord += 10 intro_rect.top = text_coord intro_rect.x = 10 text_coord += intro_rect.height sc.blit(string_rendered, intro_rect) while True: for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() elif event.type == pygame.KEYDOWN or \ event.type == pygame.MOUSEBUTTONDOWN: return pygame.display.flip() def exit_screen(): button_exit = Button(575, 500, 45, 30, "ะžะบ") name = '' fon = pygame.transform.scale(load_image('image\exit.jpg'), (WIDTH, HEIGHT)) sc.blit(fon, (0, 0)) font = pygame.font.Font(None, 30) title = font.render('ะะฐะฟะธัˆะธ ัะฒะพะต ะธะผั', True, (255, 255, 255)) while True: sc.blit(fon, (0, 0)) for event in pygame.event.get(): if event.type == pygame.QUIT: terminate() if event.type == pygame.KEYDOWN: if event.unicode.isalpha(): name += event.unicode elif event.key == K_BACKSPACE: name = name[:-1] elif event.key == K_RETURN: name = "" if event.type == pygame.MOUSEBUTTONDOWN: if button_exit.push_button(event.pos): gameover(name, killed_monsters) text = font.render(name, True, (255, 255, 255)) rect = text.get_rect() rect.center = (600, 400) button_exit.draw() sc.blit(text, rect) sc.blit(title, (500, 250)) pygame.display.flip() def exchange_equipment_inventory(inventory, hero): obj = inventory.get_selected_cell() if obj.get_type() == "weapon": old_w = hero.replace_weapon(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Helmet1" or obj.get_name() == 'Helmet2': old_w = hero.replace_helmet(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Cuiras1" or obj.get_name() == 'Cuiras2': old_w = hero.replace_armor(obj) inv
ted_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Leg_armor1" or obj.get_name() == 'Leg_armor2': old_w = hero.replace_leg(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_name() == "Arm_armor1" or obj.get_name() == 'Arm_armor2': old_w = hero.replace_bracers(obj) inventory.board[inventory.selected_cell[0]][inventory.selected_cell[1]] = old_w if obj.get_type() == "potion": if obj.get_name() == "Small_health": if hero.health + 5 <= hero.max_health: hero.health += 5 else: hero.health = hero.max_health elif obj.get_name() == "Small_strength": hero.max_health += 5 inventory.clear_cell() return inventory, hero class Button: def __init__(self, x, y, height, width, text): self.x = x self.y = y self.height = height self.width = width self.text = text def draw(self): pygame.draw.rect(sc, (255, 255, 255), (self.x, self.y, self.height, self.width), width=1) font = pygame.font.Font(None, self.width) text = font.render(self.text, True, (255, 255, 255)) sc.blit(text, (self.x + 5, self.y + 5)) def push_button(self, pos): if pos[0] > self.x and pos[0] < self.x + self.height and pos[1] > self.y and pos[1] < self.y + self.width: return True class Particle(pygame.sprite.Sprite): # ัะณะตะฝะตั€ะธั€ัƒะตะผ ั‡ะฐัั‚ะธั†ั‹ ั€ะฐะทะฝะพะณะพ ั€ะฐะทะผะตั€ะฐ fire = [load_image("image/bloodsplats.png")] fire[0] = pygame.transform.scale(fire[0], (int(cell_size * 0.2), int(cell_size * 0.2))) for scale in (5, 10, 20): fire.append(pygame.transform.scale(fire[0], (scale, scale))) def __init__(self, pos, dx, dy, screen_rect): super().__init__(all_sprites) self.image = random.choice(self.fire) self.rect = self.image.get_rect() self.screen_rect = screen_rect # ัƒ ะบะฐะถะดะพะน ั‡ะฐัั‚ะธั†ั‹ ัะฒะพั ัะบะพั€ะพัั‚ัŒ โ€” ัั‚ะพ ะฒะตะบั‚ะพั€ self.velocity = [0, 0] self.velocity[0] = random.randrange(-1, 1) # ะธ ัะฒะพะธ ะบะพะพั€ะดะธะฝะฐั‚ั‹ self.rect.x, self.rect.y = pos # ะณั€ะฐะฒะธั‚ะฐั†ะธั ะฑัƒะดะตั‚ ะพะดะธะฝะฐะบะพะฒะพะน (ะทะฝะฐั‡ะตะฝะธะต ะบะพะฝัั‚ะฐะฝั‚ั‹) self.gravity = GRAVITY def update(self): # ะฟั€ะธะผะตะฝัะตะผ ะณั€ะฐะฒะธั‚ะฐั†ะธะพะฝะฝั‹ะน ัั„ั„ะตะบั‚: # ะดะฒะธะถะตะฝะธะต ั ัƒัะบะพั€ะตะฝะธะตะผ ะฟะพะด ะดะตะนัั‚ะฒะธะตะผ ะณั€ะฐะฒะธั‚ะฐั†ะธะธ self.velocity[1] += self.gravity # ะฟะตั€ะตะผะตั‰ะฐะตะผ ั‡ะฐัั‚ะธั†ัƒ self.rect.x += self.velocity[0] self.rect.y += self.velocity[1] # ัƒะฑะธะฒะฐะตะผ, ะตัะปะธ ั‡ะฐัั‚ะธั†ะฐ ัƒัˆะปะฐ ะทะฐ ัะบั€ะฐะฝ if not self.rect.colliderect(self.screen_rect): self.kill() GRAVITY = 0.5 def create_particles(position, screen_rect): # ะบะพะปะธั‡ะตัั‚ะฒะพ ัะพะทะดะฐะฒะฐะตะผั‹ั… ั‡ะฐัั‚ะธั† particle_count = 20 # ะฒะพะทะผะพะถะฝั‹ะต ัะบะพั€ะพัั‚ะธ numbers = range(-5, 6) for _ in range(particle_count): Particle(position, random.choice(numbers), random.choice(numbers), screen_rect) def gameover(name, score): con = sqlite3.connect("Data_Base.db") cur = con.cursor() if name == '': name = 'ะฐะฝะพะฝะธะผ' cur.execute(f"""INSERT INTO result_table(name, murders) VALUES('{name}', '{score}')""") con.commit() con.close() terminate()
entory.board[inventory.selec
identifier_name
machine.go
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kubevirt import ( gocontext "context" "fmt" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" kubedrain "k8s.io/kubectl/pkg/drain" kubevirtv1 "kubevirt.io/api/core/v1" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/noderefutil" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "time" infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/ssh" ) const ( vmiDeleteGraceTimeoutDurationSeconds = 600 // 10 minutes ) // Machine implement a service for managing the KubeVirt VM hosting a kubernetes node. type Machine struct { client client.Client namespace string machineContext *context.MachineContext vmiInstance *kubevirtv1.VirtualMachineInstance vmInstance *kubevirtv1.VirtualMachine sshKeys *ssh.ClusterNodeSshKeys getCommandExecutor func(string, *ssh.ClusterNodeSshKeys) ssh.VMCommandExecutor } // NewMachine returns a new Machine service for the given context. func NewMachine(ctx *context.MachineContext, client client.Client, namespace string, sshKeys *ssh.ClusterNodeSshKeys) (*Machine, error) { machine := &Machine{ client: client, namespace: namespace, machineContext: ctx, vmiInstance: nil, vmInstance: nil, sshKeys: sshKeys, getCommandExecutor: ssh.NewVMCommandExecutor, } namespacedName := types.NamespacedName{Namespace: namespace, Name: ctx.KubevirtMachine.Name} vm := &kubevirtv1.VirtualMachine{} vmi := &kubevirtv1.VirtualMachineInstance{} // Get the active running VMI if it exists err := client.Get(ctx.Context, namespacedName, vmi) if err != nil { if !apierrors.IsNotFound(err) { return nil, err } } else { machine.vmiInstance = vmi } // Get the top level VM object if it exists err = client.Get(ctx.Context, namespacedName, vm) if err != nil { if !apierrors.IsNotFound(err) { return nil, err } } else { machine.vmInstance = vm } return machine, nil } // IsTerminal Reports back if the VM is either being requested to terminate or is terminate // in a way that it will never recover from. func (m *Machine) IsTerminal() (bool, string, error) { if m.vmInstance == nil || m.vmiInstance == nil { // vm/vmi hasn't been created yet return false, "", nil } // VMI is being asked to terminate gracefully due to node drain if !m.vmiInstance.IsFinal() && !m.vmiInstance.IsMigratable() && m.vmiInstance.Status.EvacuationNodeName != "" { // VM's infra node is being drained and VM is not live migratable. // We need to report a FailureReason so the MachineHealthCheck and // MachineSet controllers will gracefully take the VM down. return true, "The Machine's VM pod is marked for eviction due to infra node drain.", nil } // The infrav1.KubevirtVMTerminalLabel is a way users or automation to mark // a VM as being in a terminal state that requires remediation. This is used // by the functional test suite to test remediation and can also be triggered // by users as a way to manually trigger remediation. terminalReason, ok := m.vmInstance.Labels[infrav1.KubevirtMachineVMTerminalLabel] if ok { return true, fmt.Sprintf("VM's %s label has the vm marked as being terminal with reason [%s]", infrav1.KubevirtMachineVMTerminalLabel, terminalReason), nil } // Also check the VMI for this label terminalReason, ok = m.vmiInstance.Labels[infrav1.KubevirtMachineVMTerminalLabel] if ok { return true, fmt.Sprintf("VMI's %s label has the vm marked as being terminal with reason [%s]", infrav1.KubevirtMachineVMTerminalLabel, terminalReason), nil } runStrategy, err := m.vmInstance.RunStrategy() if err != nil { return false, "", err } switch runStrategy { case kubevirtv1.RunStrategyAlways: // VM should recover if it is down. return false, "", nil case kubevirtv1.RunStrategyManual: // If VM is manually controlled, we stay out of the loop return false, "", nil case kubevirtv1.RunStrategyHalted, kubevirtv1.RunStrategyOnce: if m.vmiInstance.IsFinal() { return true, "VMI has reached a permanent finalized state", nil } return false, "", nil case kubevirtv1.RunStrategyRerunOnFailure: // only recovers when vmi is failed if m.vmiInstance.Status.Phase == kubevirtv1.Succeeded { return true, "VMI has reached a permanent finalized state", nil } return false, "", nil } return false, "", nil } // Exists checks if the VM has been provisioned already. func (m *Machine) Exists() bool { return m.vmInstance != nil } // Create creates a new VM for this machine. func (m *Machine) Create(ctx gocontext.Context) error { m.machineContext.Logger.Info(fmt.Sprintf("Creating VM with role '%s'...", nodeRole(m.machineContext))) virtualMachine := newVirtualMachineFromKubevirtMachine(m.machineContext, m.namespace) mutateFn := func() (err error) { if virtualMachine.Labels == nil { virtualMachine.Labels = map[string]string{} } if virtualMachine.Spec.Template.ObjectMeta.Labels == nil { virtualMachine.Spec.Template.ObjectMeta.Labels = map[string]string{} } virtualMachine.Labels[clusterv1.ClusterLabelName] = m.machineContext.Cluster.Name virtualMachine.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name virtualMachine.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace return nil } if _, err := controllerutil.CreateOrUpdate(ctx, m.client, virtualMachine, mutateFn); err != nil { return err } return nil } // Returns if VMI has ready condition or not. func (m *Machine) hasReadyCondition() bool { if m.vmiInstance == nil { return false } for _, cond := range m.vmiInstance.Status.Conditions { if cond.Type == kubevirtv1.VirtualMachineInstanceReady && cond.Status == corev1.ConditionTrue { return true } } return false } // Address returns the IP address of the VM. func (m *Machine) Address() string { if m.vmiInstance != nil && len(m.vmiInstance.Status.Interfaces) > 0 { return m.vmiInstance.Status.Interfaces[0].IP } return "" } // IsReady checks if the VM is ready func (m *Machine) IsReady() bool { return m.hasReadyCondition() } // SupportsCheckingIsBootstrapped checks if we have a method of checking // that this bootstrapper has completed. func (m *Machine) SupportsCheckingIsBootstrapped() bool { // Right now, we can only check if bootstrapping has // completed if we are using a bootstrapper that allows // for us to inject ssh keys into the guest. if m.sshKeys != nil { return m.machineContext.HasInjectedCapkSSHKeys(m.sshKeys.PublicKey) } return false } // IsBootstrapped checks if the VM is bootstrapped with Kubernetes. func (m *Machine) IsBootstrapped() bool { // CheckStrategy value is already sanitized by apiserver switch m.machineContext.KubevirtMachine.Spec.BootstrapCheckSpec.CheckStrategy { case "none": // skip bootstrap check and always returns positively return true case "": fallthrough // ssh is default check strategy, fallthrough case "ssh": return m.IsBootstrappedWithSSH() default: // Since CRD CheckStrategy field is validated by an enum, this case should never be hit return false } } // IsBootstrappedWithSSH checks if the VM is bootstrapped with Kubernetes using SSH strategy. func (m *Machine) IsBootstrappedWithSSH() bool { if !m.IsReady() || m.sshKeys == nil { return false } executor := m.getCommandExecutor(m.Address(), m.sshKeys) output, err := executor.ExecuteCommand("cat /run/cluster-api/bootstrap-success.complete") if err != nil || output != "success" { return false } return true } // GenerateProviderID generates the KubeVirt provider ID to be used for the NodeRef func (m *Machine) GenerateProviderID() (string, error) { if m.vmiInstance == nil { return "", errors.New("Underlying Kubevirt VM is NOT running") } providerID := fmt.Sprintf("kubevirt://%s", m.machineContext.KubevirtMachine.Name) return providerID, nil } // Delete deletes VM for this machine. func (m *Machine) Delete() error { namespacedName := types.NamespacedName{Namespace: m.namespace, Name: m.machineContext.KubevirtMachine.Name} vm := &kubevirtv1.VirtualMachine{} if err := m.client.Get(m.machineContext.Context, namespacedName, vm); err != nil { if apierrors.IsNotFound(err) { m.machineContext.Logger.Info("VM does not exist, nothing to do.") return nil } return errors.Wrapf(err, "failed to retrieve VM to delete") } if err := m.client.Delete(gocontext.Background(), vm); err != nil { return errors.Wrapf(err, "failed to delete VM") } return nil } func (m *Machine) DrainNodeIfNeeded(wrkldClstr workloadcluster.WorkloadCluster) (time.Duration, error) { if m.vmiInstance == nil || !m.shouldGracefulDeleteVMI() { if _, anntExists := m.machineContext.KubevirtMachine.Annotations[infrav1.VmiDeletionGraceTime]; anntExists { if err := m.removeGracePeriodAnnotation(); err != nil { return 100 * time.Millisecond, err } } return 0, nil } exceeded, err := m.drainGracePeriodExceeded() if err != nil { return 0, err } if !exceeded {
return 0, err } if retryDuration > 0 { return retryDuration, nil } } // now, when the node is drained (or vmiDeleteGraceTimeoutDurationSeconds has passed), we can delete the VMI propagationPolicy := metav1.DeletePropagationForeground err = m.client.Delete(m.machineContext, m.vmiInstance, &client.DeleteOptions{PropagationPolicy: &propagationPolicy}) if err != nil { m.machineContext.Logger.Error(err, "failed to delete VirtualMachineInstance") return 0, err } if err = m.removeGracePeriodAnnotation(); err != nil { return 100 * time.Millisecond, err } // requeue to force reading the VMI again return time.Second * 10, nil } const removeGracePeriodAnnotationPatch = `[{"op": "remove", "path": "/metadata/annotations/` + infrav1.VmiDeletionGraceTimeEscape + `"}]` func (m *Machine) removeGracePeriodAnnotation() error { patch := client.RawPatch(types.JSONPatchType, []byte(removeGracePeriodAnnotationPatch)) if err := m.client.Patch(m.machineContext, m.machineContext.KubevirtMachine, patch); err != nil { return fmt.Errorf("failed to remove the %s annotation to the KubeVirtMachine %s; %w", infrav1.VmiDeletionGraceTime, m.machineContext.KubevirtMachine.Name, err) } return nil } func (m *Machine) shouldGracefulDeleteVMI() bool { if m.vmiInstance.DeletionTimestamp != nil { m.machineContext.Logger.V(4).Info("DrainNode: the virtualMachineInstance is already in deletion process. Nothing to do here") return false } if m.vmiInstance.Spec.EvictionStrategy == nil || *m.vmiInstance.Spec.EvictionStrategy != kubevirtv1.EvictionStrategyExternal { m.machineContext.Logger.V(4).Info("DrainNode: graceful deletion is not supported for virtualMachineInstance. Nothing to do here") return false } // KubeVirt will set the EvacuationNodeName field in case of guest node eviction. If the field is not set, there is // nothing to do. if len(m.vmiInstance.Status.EvacuationNodeName) == 0 { m.machineContext.Logger.V(4).Info("DrainNode: the virtualMachineInstance is not marked for deletion. Nothing to do here") return false } return true } // wait vmiDeleteGraceTimeoutDurationSeconds to the node to be drained. If this time had passed, don't wait anymore. func (m *Machine) drainGracePeriodExceeded() (bool, error) { if graceTime, found := m.machineContext.KubevirtMachine.Annotations[infrav1.VmiDeletionGraceTime]; found { deletionGraceTime, err := time.Parse(time.RFC3339, graceTime) if err != nil { // wrong format - rewrite if err = m.setVmiDeletionGraceTime(); err != nil { return false, err } } else { return time.Now().UTC().After(deletionGraceTime), nil } } else { if err := m.setVmiDeletionGraceTime(); err != nil { return false, err } } return false, nil } func (m *Machine) setVmiDeletionGraceTime() error { m.machineContext.Logger.Info(fmt.Sprintf("setting the %s annotation", infrav1.VmiDeletionGraceTime)) graceTime := time.Now().Add(vmiDeleteGraceTimeoutDurationSeconds * time.Second).UTC().Format(time.RFC3339) patch := fmt.Sprintf(`{"metadata":{"annotations":{"%s": "%s"}}}`, infrav1.VmiDeletionGraceTime, graceTime) patchRequest := client.RawPatch(types.MergePatchType, []byte(patch)) if err := m.client.Patch(m.machineContext, m.machineContext.KubevirtMachine, patchRequest); err != nil { return fmt.Errorf("failed to add the %s annotation to the KubeVirtMachine %s; %w", infrav1.VmiDeletionGraceTime, m.machineContext.KubevirtMachine.Name, err) } return nil } // This functions drains a node from a tenant cluster. // The function returns 3 values: // * drain done - boolean // * retry time, or 0 if not needed // * error - to be returned if we want to retry func (m *Machine) drainNode(wrkldClstr workloadcluster.WorkloadCluster) (time.Duration, error) { kubeClient, err := wrkldClstr.GenerateWorkloadClusterK8sClient(m.machineContext) if err != nil { m.machineContext.Logger.Error(err, "Error creating a remote client while deleting Machine, won't retry") return 0, fmt.Errorf("failed to get client to remote cluster; %w", err) } nodeName := m.vmiInstance.Status.EvacuationNodeName node, err := kubeClient.CoreV1().Nodes().Get(m.machineContext, nodeName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { // If an admin deletes the node directly, we'll end up here. m.machineContext.Logger.Error(err, "Could not find node from noderef, it may have already been deleted") return 0, nil } return 0, fmt.Errorf("unable to get node %q: %w", nodeName, err) } drainer := &kubedrain.Helper{ Client: kubeClient, Ctx: m.machineContext, Force: true, IgnoreAllDaemonSets: true, DeleteEmptyDirData: true, GracePeriodSeconds: -1, // If a pod is not evicted in 20 seconds, retry the eviction next time the // machine gets reconciled again (to allow other machines to be reconciled). Timeout: 20 * time.Second, OnPodDeletedOrEvicted: func(pod *corev1.Pod, usingEviction bool) { verbStr := "Deleted" if usingEviction { verbStr = "Evicted" } m.machineContext.Logger.Info(fmt.Sprintf("%s pod from Node", verbStr), "pod", fmt.Sprintf("%s/%s", pod.Name, pod.Namespace)) }, Out: writer{m.machineContext.Logger.Info}, ErrOut: writer{func(msg string, keysAndValues ...interface{}) { m.machineContext.Logger.Error(nil, msg, keysAndValues...) }}, } if noderefutil.IsNodeUnreachable(node) { // When the node is unreachable and some pods are not evicted for as long as this timeout, we ignore them. drainer.SkipWaitForDeleteTimeoutSeconds = 60 * 5 // 5 minutes } if err = kubedrain.RunCordonOrUncordon(drainer, node, true); err != nil { // Machine will be re-reconciled after a cordon failure. m.machineContext.Logger.Error(err, "Cordon failed") return 0, errors.Errorf("unable to cordon node %s: %v", nodeName, err) } if err = kubedrain.RunNodeDrain(drainer, node.Name); err != nil { // Machine will be re-reconciled after a drain failure. m.machineContext.Logger.Error(err, "Drain failed, retry in a second", "node name", nodeName) return time.Second, nil } m.machineContext.Logger.Info("Drain successful", "node name", nodeName) return 0, nil } // writer implements io.Writer interface as a pass-through for klog. type writer struct { logFunc func(msg string, keysAndValues ...interface{}) } // Write passes string(p) into writer's logFunc and always returns len(p). func (w writer) Write(p []byte) (n int, err error) { w.logFunc(string(p)) return len(p), nil }
retryDuration, err := m.drainNode(wrkldClstr) if err != nil {
random_line_split
machine.go
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kubevirt import ( gocontext "context" "fmt" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" kubedrain "k8s.io/kubectl/pkg/drain" kubevirtv1 "kubevirt.io/api/core/v1" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/noderefutil" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "time" infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/ssh" ) const ( vmiDeleteGraceTimeoutDurationSeconds = 600 // 10 minutes ) // Machine implement a service for managing the KubeVirt VM hosting a kubernetes node. type Machine struct { client client.Client namespace string machineContext *context.MachineContext vmiInstance *kubevirtv1.VirtualMachineInstance vmInstance *kubevirtv1.VirtualMachine sshKeys *ssh.ClusterNodeSshKeys getCommandExecutor func(string, *ssh.ClusterNodeSshKeys) ssh.VMCommandExecutor } // NewMachine returns a new Machine service for the given context. func NewMachine(ctx *context.MachineContext, client client.Client, namespace string, sshKeys *ssh.ClusterNodeSshKeys) (*Machine, error) { machine := &Machine{ client: client, namespace: namespace, machineContext: ctx, vmiInstance: nil, vmInstance: nil, sshKeys: sshKeys, getCommandExecutor: ssh.NewVMCommandExecutor, } namespacedName := types.NamespacedName{Namespace: namespace, Name: ctx.KubevirtMachine.Name} vm := &kubevirtv1.VirtualMachine{} vmi := &kubevirtv1.VirtualMachineInstance{} // Get the active running VMI if it exists err := client.Get(ctx.Context, namespacedName, vmi) if err != nil { if !apierrors.IsNotFound(err) { return nil, err } } else { machine.vmiInstance = vmi } // Get the top level VM object if it exists err = client.Get(ctx.Context, namespacedName, vm) if err != nil { if !apierrors.IsNotFound(err) { return nil, err } } else { machine.vmInstance = vm } return machine, nil } // IsTerminal Reports back if the VM is either being requested to terminate or is terminate // in a way that it will never recover from. func (m *Machine) IsTerminal() (bool, string, error) { if m.vmInstance == nil || m.vmiInstance == nil { // vm/vmi hasn't been created yet return false, "", nil } // VMI is being asked to terminate gracefully due to node drain if !m.vmiInstance.IsFinal() && !m.vmiInstance.IsMigratable() && m.vmiInstance.Status.EvacuationNodeName != "" { // VM's infra node is being drained and VM is not live migratable. // We need to report a FailureReason so the MachineHealthCheck and // MachineSet controllers will gracefully take the VM down. return true, "The Machine's VM pod is marked for eviction due to infra node drain.", nil } // The infrav1.KubevirtVMTerminalLabel is a way users or automation to mark // a VM as being in a terminal state that requires remediation. This is used // by the functional test suite to test remediation and can also be triggered // by users as a way to manually trigger remediation. terminalReason, ok := m.vmInstance.Labels[infrav1.KubevirtMachineVMTerminalLabel] if ok { return true, fmt.Sprintf("VM's %s label has the vm marked as being terminal with reason [%s]", infrav1.KubevirtMachineVMTerminalLabel, terminalReason), nil } // Also check the VMI for this label terminalReason, ok = m.vmiInstance.Labels[infrav1.KubevirtMachineVMTerminalLabel] if ok { return true, fmt.Sprintf("VMI's %s label has the vm marked as being terminal with reason [%s]", infrav1.KubevirtMachineVMTerminalLabel, terminalReason), nil } runStrategy, err := m.vmInstance.RunStrategy() if err != nil { return false, "", err } switch runStrategy { case kubevirtv1.RunStrategyAlways: // VM should recover if it is down. return false, "", nil case kubevirtv1.RunStrategyManual: // If VM is manually controlled, we stay out of the loop return false, "", nil case kubevirtv1.RunStrategyHalted, kubevirtv1.RunStrategyOnce: if m.vmiInstance.IsFinal() { return true, "VMI has reached a permanent finalized state", nil } return false, "", nil case kubevirtv1.RunStrategyRerunOnFailure: // only recovers when vmi is failed if m.vmiInstance.Status.Phase == kubevirtv1.Succeeded { return true, "VMI has reached a permanent finalized state", nil } return false, "", nil } return false, "", nil } // Exists checks if the VM has been provisioned already. func (m *Machine) Exists() bool { return m.vmInstance != nil } // Create creates a new VM for this machine. func (m *Machine) Create(ctx gocontext.Context) error { m.machineContext.Logger.Info(fmt.Sprintf("Creating VM with role '%s'...", nodeRole(m.machineContext))) virtualMachine := newVirtualMachineFromKubevirtMachine(m.machineContext, m.namespace) mutateFn := func() (err error) { if virtualMachine.Labels == nil { virtualMachine.Labels = map[string]string{} } if virtualMachine.Spec.Template.ObjectMeta.Labels == nil { virtualMachine.Spec.Template.ObjectMeta.Labels = map[string]string{} } virtualMachine.Labels[clusterv1.ClusterLabelName] = m.machineContext.Cluster.Name virtualMachine.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name virtualMachine.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace return nil } if _, err := controllerutil.CreateOrUpdate(ctx, m.client, virtualMachine, mutateFn); err != nil { return err } return nil } // Returns if VMI has ready condition or not. func (m *Machine) hasReadyCondition() bool { if m.vmiInstance == nil { return false } for _, cond := range m.vmiInstance.Status.Conditions { if cond.Type == kubevirtv1.VirtualMachineInstanceReady && cond.Status == corev1.ConditionTrue { return true } } return false } // Address returns the IP address of the VM. func (m *Machine) Address() string { if m.vmiInstance != nil && len(m.vmiInstance.Status.Interfaces) > 0 { return m.vmiInstance.Status.Interfaces[0].IP } return "" } // IsReady checks if the VM is ready func (m *Machine) IsReady() bool { return m.hasReadyCondition() } // SupportsCheckingIsBootstrapped checks if we have a method of checking // that this bootstrapper has completed. func (m *Machine) SupportsCheckingIsBootstrapped() bool { // Right now, we can only check if bootstrapping has // completed if we are using a bootstrapper that allows // for us to inject ssh keys into the guest. if m.sshKeys != nil { return m.machineContext.HasInjectedCapkSSHKeys(m.sshKeys.PublicKey) } return false } // IsBootstrapped checks if the VM is bootstrapped with Kubernetes. func (m *Machine) IsBootstrapped() bool { // CheckStrategy value is already sanitized by apiserver switch m.machineContext.KubevirtMachine.Spec.BootstrapCheckSpec.CheckStrategy { case "none": // skip bootstrap check and always returns positively return true case "": fallthrough // ssh is default check strategy, fallthrough case "ssh": return m.IsBootstrappedWithSSH() default: // Since CRD CheckStrategy field is validated by an enum, this case should never be hit return false } } // IsBootstrappedWithSSH checks if the VM is bootstrapped with Kubernetes using SSH strategy. func (m *Machine) IsBootstrappedWithSSH() bool { if !m.IsReady() || m.sshKeys == nil { return false } executor := m.getCommandExecutor(m.Address(), m.sshKeys) output, err := executor.ExecuteCommand("cat /run/cluster-api/bootstrap-success.complete") if err != nil || output != "success" { return false } return true } // GenerateProviderID generates the KubeVirt provider ID to be used for the NodeRef func (m *Machine) GenerateProviderID() (string, error) { if m.vmiInstance == nil { return "", errors.New("Underlying Kubevirt VM is NOT running") } providerID := fmt.Sprintf("kubevirt://%s", m.machineContext.KubevirtMachine.Name) return providerID, nil } // Delete deletes VM for this machine. func (m *Machine) Delete() error { namespacedName := types.NamespacedName{Namespace: m.namespace, Name: m.machineContext.KubevirtMachine.Name} vm := &kubevirtv1.VirtualMachine{} if err := m.client.Get(m.machineContext.Context, namespacedName, vm); err != nil { if apierrors.IsNotFound(err) { m.machineContext.Logger.Info("VM does not exist, nothing to do.") return nil } return errors.Wrapf(err, "failed to retrieve VM to delete") } if err := m.client.Delete(gocontext.Background(), vm); err != nil { return errors.Wrapf(err, "failed to delete VM") } return nil } func (m *Machine) DrainNodeIfNeeded(wrkldClstr workloadcluster.WorkloadCluster) (time.Duration, error) { if m.vmiInstance == nil || !m.shouldGracefulDeleteVMI() { if _, anntExists := m.machineContext.KubevirtMachine.Annotations[infrav1.VmiDeletionGraceTime]; anntExists { if err := m.removeGracePeriodAnnotation(); err != nil { return 100 * time.Millisecond, err } } return 0, nil } exceeded, err := m.drainGracePeriodExceeded() if err != nil { return 0, err } if !exceeded { retryDuration, err := m.drainNode(wrkldClstr) if err != nil { return 0, err } if retryDuration > 0 { return retryDuration, nil } } // now, when the node is drained (or vmiDeleteGraceTimeoutDurationSeconds has passed), we can delete the VMI propagationPolicy := metav1.DeletePropagationForeground err = m.client.Delete(m.machineContext, m.vmiInstance, &client.DeleteOptions{PropagationPolicy: &propagationPolicy}) if err != nil { m.machineContext.Logger.Error(err, "failed to delete VirtualMachineInstance") return 0, err } if err = m.removeGracePeriodAnnotation(); err != nil { return 100 * time.Millisecond, err } // requeue to force reading the VMI again return time.Second * 10, nil } const removeGracePeriodAnnotationPatch = `[{"op": "remove", "path": "/metadata/annotations/` + infrav1.VmiDeletionGraceTimeEscape + `"}]` func (m *Machine) removeGracePeriodAnnotation() error { patch := client.RawPatch(types.JSONPatchType, []byte(removeGracePeriodAnnotationPatch)) if err := m.client.Patch(m.machineContext, m.machineContext.KubevirtMachine, patch); err != nil { return fmt.Errorf("failed to remove the %s annotation to the KubeVirtMachine %s; %w", infrav1.VmiDeletionGraceTime, m.machineContext.KubevirtMachine.Name, err) } return nil } func (m *Machine) shouldGracefulDeleteVMI() bool { if m.vmiInstance.DeletionTimestamp != nil { m.machineContext.Logger.V(4).Info("DrainNode: the virtualMachineInstance is already in deletion process. Nothing to do here") return false } if m.vmiInstance.Spec.EvictionStrategy == nil || *m.vmiInstance.Spec.EvictionStrategy != kubevirtv1.EvictionStrategyExternal { m.machineContext.Logger.V(4).Info("DrainNode: graceful deletion is not supported for virtualMachineInstance. Nothing to do here") return false } // KubeVirt will set the EvacuationNodeName field in case of guest node eviction. If the field is not set, there is // nothing to do. if len(m.vmiInstance.Status.EvacuationNodeName) == 0 { m.machineContext.Logger.V(4).Info("DrainNode: the virtualMachineInstance is not marked for deletion. Nothing to do here") return false } return true } // wait vmiDeleteGraceTimeoutDurationSeconds to the node to be drained. If this time had passed, don't wait anymore. func (m *Machine) drainGracePeriodExceeded() (bool, error) { if graceTime, found := m.machineContext.KubevirtMachine.Annotations[infrav1.VmiDeletionGraceTime]; found { deletionGraceTime, err := time.Parse(time.RFC3339, graceTime) if err != nil { // wrong format - rewrite if err = m.setVmiDeletionGraceTime(); err != nil { return false, err } } else { return time.Now().UTC().After(deletionGraceTime), nil } } else { if err := m.setVmiDeletionGraceTime(); err != nil { return false, err } } return false, nil } func (m *Machine)
() error { m.machineContext.Logger.Info(fmt.Sprintf("setting the %s annotation", infrav1.VmiDeletionGraceTime)) graceTime := time.Now().Add(vmiDeleteGraceTimeoutDurationSeconds * time.Second).UTC().Format(time.RFC3339) patch := fmt.Sprintf(`{"metadata":{"annotations":{"%s": "%s"}}}`, infrav1.VmiDeletionGraceTime, graceTime) patchRequest := client.RawPatch(types.MergePatchType, []byte(patch)) if err := m.client.Patch(m.machineContext, m.machineContext.KubevirtMachine, patchRequest); err != nil { return fmt.Errorf("failed to add the %s annotation to the KubeVirtMachine %s; %w", infrav1.VmiDeletionGraceTime, m.machineContext.KubevirtMachine.Name, err) } return nil } // This functions drains a node from a tenant cluster. // The function returns 3 values: // * drain done - boolean // * retry time, or 0 if not needed // * error - to be returned if we want to retry func (m *Machine) drainNode(wrkldClstr workloadcluster.WorkloadCluster) (time.Duration, error) { kubeClient, err := wrkldClstr.GenerateWorkloadClusterK8sClient(m.machineContext) if err != nil { m.machineContext.Logger.Error(err, "Error creating a remote client while deleting Machine, won't retry") return 0, fmt.Errorf("failed to get client to remote cluster; %w", err) } nodeName := m.vmiInstance.Status.EvacuationNodeName node, err := kubeClient.CoreV1().Nodes().Get(m.machineContext, nodeName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { // If an admin deletes the node directly, we'll end up here. m.machineContext.Logger.Error(err, "Could not find node from noderef, it may have already been deleted") return 0, nil } return 0, fmt.Errorf("unable to get node %q: %w", nodeName, err) } drainer := &kubedrain.Helper{ Client: kubeClient, Ctx: m.machineContext, Force: true, IgnoreAllDaemonSets: true, DeleteEmptyDirData: true, GracePeriodSeconds: -1, // If a pod is not evicted in 20 seconds, retry the eviction next time the // machine gets reconciled again (to allow other machines to be reconciled). Timeout: 20 * time.Second, OnPodDeletedOrEvicted: func(pod *corev1.Pod, usingEviction bool) { verbStr := "Deleted" if usingEviction { verbStr = "Evicted" } m.machineContext.Logger.Info(fmt.Sprintf("%s pod from Node", verbStr), "pod", fmt.Sprintf("%s/%s", pod.Name, pod.Namespace)) }, Out: writer{m.machineContext.Logger.Info}, ErrOut: writer{func(msg string, keysAndValues ...interface{}) { m.machineContext.Logger.Error(nil, msg, keysAndValues...) }}, } if noderefutil.IsNodeUnreachable(node) { // When the node is unreachable and some pods are not evicted for as long as this timeout, we ignore them. drainer.SkipWaitForDeleteTimeoutSeconds = 60 * 5 // 5 minutes } if err = kubedrain.RunCordonOrUncordon(drainer, node, true); err != nil { // Machine will be re-reconciled after a cordon failure. m.machineContext.Logger.Error(err, "Cordon failed") return 0, errors.Errorf("unable to cordon node %s: %v", nodeName, err) } if err = kubedrain.RunNodeDrain(drainer, node.Name); err != nil { // Machine will be re-reconciled after a drain failure. m.machineContext.Logger.Error(err, "Drain failed, retry in a second", "node name", nodeName) return time.Second, nil } m.machineContext.Logger.Info("Drain successful", "node name", nodeName) return 0, nil } // writer implements io.Writer interface as a pass-through for klog. type writer struct { logFunc func(msg string, keysAndValues ...interface{}) } // Write passes string(p) into writer's logFunc and always returns len(p). func (w writer) Write(p []byte) (n int, err error) { w.logFunc(string(p)) return len(p), nil }
setVmiDeletionGraceTime
identifier_name
machine.go
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kubevirt import ( gocontext "context" "fmt" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" kubedrain "k8s.io/kubectl/pkg/drain" kubevirtv1 "kubevirt.io/api/core/v1" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/noderefutil" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "time" infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/ssh" ) const ( vmiDeleteGraceTimeoutDurationSeconds = 600 // 10 minutes ) // Machine implement a service for managing the KubeVirt VM hosting a kubernetes node. type Machine struct { client client.Client namespace string machineContext *context.MachineContext vmiInstance *kubevirtv1.VirtualMachineInstance vmInstance *kubevirtv1.VirtualMachine sshKeys *ssh.ClusterNodeSshKeys getCommandExecutor func(string, *ssh.ClusterNodeSshKeys) ssh.VMCommandExecutor } // NewMachine returns a new Machine service for the given context. func NewMachine(ctx *context.MachineContext, client client.Client, namespace string, sshKeys *ssh.ClusterNodeSshKeys) (*Machine, error) { machine := &Machine{ client: client, namespace: namespace, machineContext: ctx, vmiInstance: nil, vmInstance: nil, sshKeys: sshKeys, getCommandExecutor: ssh.NewVMCommandExecutor, } namespacedName := types.NamespacedName{Namespace: namespace, Name: ctx.KubevirtMachine.Name} vm := &kubevirtv1.VirtualMachine{} vmi := &kubevirtv1.VirtualMachineInstance{} // Get the active running VMI if it exists err := client.Get(ctx.Context, namespacedName, vmi) if err != nil { if !apierrors.IsNotFound(err) { return nil, err } } else { machine.vmiInstance = vmi } // Get the top level VM object if it exists err = client.Get(ctx.Context, namespacedName, vm) if err != nil { if !apierrors.IsNotFound(err) { return nil, err } } else { machine.vmInstance = vm } return machine, nil } // IsTerminal Reports back if the VM is either being requested to terminate or is terminate // in a way that it will never recover from. func (m *Machine) IsTerminal() (bool, string, error) { if m.vmInstance == nil || m.vmiInstance == nil { // vm/vmi hasn't been created yet return false, "", nil } // VMI is being asked to terminate gracefully due to node drain if !m.vmiInstance.IsFinal() && !m.vmiInstance.IsMigratable() && m.vmiInstance.Status.EvacuationNodeName != "" { // VM's infra node is being drained and VM is not live migratable. // We need to report a FailureReason so the MachineHealthCheck and // MachineSet controllers will gracefully take the VM down. return true, "The Machine's VM pod is marked for eviction due to infra node drain.", nil } // The infrav1.KubevirtVMTerminalLabel is a way users or automation to mark // a VM as being in a terminal state that requires remediation. This is used // by the functional test suite to test remediation and can also be triggered // by users as a way to manually trigger remediation. terminalReason, ok := m.vmInstance.Labels[infrav1.KubevirtMachineVMTerminalLabel] if ok { return true, fmt.Sprintf("VM's %s label has the vm marked as being terminal with reason [%s]", infrav1.KubevirtMachineVMTerminalLabel, terminalReason), nil } // Also check the VMI for this label terminalReason, ok = m.vmiInstance.Labels[infrav1.KubevirtMachineVMTerminalLabel] if ok { return true, fmt.Sprintf("VMI's %s label has the vm marked as being terminal with reason [%s]", infrav1.KubevirtMachineVMTerminalLabel, terminalReason), nil } runStrategy, err := m.vmInstance.RunStrategy() if err != nil
switch runStrategy { case kubevirtv1.RunStrategyAlways: // VM should recover if it is down. return false, "", nil case kubevirtv1.RunStrategyManual: // If VM is manually controlled, we stay out of the loop return false, "", nil case kubevirtv1.RunStrategyHalted, kubevirtv1.RunStrategyOnce: if m.vmiInstance.IsFinal() { return true, "VMI has reached a permanent finalized state", nil } return false, "", nil case kubevirtv1.RunStrategyRerunOnFailure: // only recovers when vmi is failed if m.vmiInstance.Status.Phase == kubevirtv1.Succeeded { return true, "VMI has reached a permanent finalized state", nil } return false, "", nil } return false, "", nil } // Exists checks if the VM has been provisioned already. func (m *Machine) Exists() bool { return m.vmInstance != nil } // Create creates a new VM for this machine. func (m *Machine) Create(ctx gocontext.Context) error { m.machineContext.Logger.Info(fmt.Sprintf("Creating VM with role '%s'...", nodeRole(m.machineContext))) virtualMachine := newVirtualMachineFromKubevirtMachine(m.machineContext, m.namespace) mutateFn := func() (err error) { if virtualMachine.Labels == nil { virtualMachine.Labels = map[string]string{} } if virtualMachine.Spec.Template.ObjectMeta.Labels == nil { virtualMachine.Spec.Template.ObjectMeta.Labels = map[string]string{} } virtualMachine.Labels[clusterv1.ClusterLabelName] = m.machineContext.Cluster.Name virtualMachine.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name virtualMachine.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace return nil } if _, err := controllerutil.CreateOrUpdate(ctx, m.client, virtualMachine, mutateFn); err != nil { return err } return nil } // Returns if VMI has ready condition or not. func (m *Machine) hasReadyCondition() bool { if m.vmiInstance == nil { return false } for _, cond := range m.vmiInstance.Status.Conditions { if cond.Type == kubevirtv1.VirtualMachineInstanceReady && cond.Status == corev1.ConditionTrue { return true } } return false } // Address returns the IP address of the VM. func (m *Machine) Address() string { if m.vmiInstance != nil && len(m.vmiInstance.Status.Interfaces) > 0 { return m.vmiInstance.Status.Interfaces[0].IP } return "" } // IsReady checks if the VM is ready func (m *Machine) IsReady() bool { return m.hasReadyCondition() } // SupportsCheckingIsBootstrapped checks if we have a method of checking // that this bootstrapper has completed. func (m *Machine) SupportsCheckingIsBootstrapped() bool { // Right now, we can only check if bootstrapping has // completed if we are using a bootstrapper that allows // for us to inject ssh keys into the guest. if m.sshKeys != nil { return m.machineContext.HasInjectedCapkSSHKeys(m.sshKeys.PublicKey) } return false } // IsBootstrapped checks if the VM is bootstrapped with Kubernetes. func (m *Machine) IsBootstrapped() bool { // CheckStrategy value is already sanitized by apiserver switch m.machineContext.KubevirtMachine.Spec.BootstrapCheckSpec.CheckStrategy { case "none": // skip bootstrap check and always returns positively return true case "": fallthrough // ssh is default check strategy, fallthrough case "ssh": return m.IsBootstrappedWithSSH() default: // Since CRD CheckStrategy field is validated by an enum, this case should never be hit return false } } // IsBootstrappedWithSSH checks if the VM is bootstrapped with Kubernetes using SSH strategy. func (m *Machine) IsBootstrappedWithSSH() bool { if !m.IsReady() || m.sshKeys == nil { return false } executor := m.getCommandExecutor(m.Address(), m.sshKeys) output, err := executor.ExecuteCommand("cat /run/cluster-api/bootstrap-success.complete") if err != nil || output != "success" { return false } return true } // GenerateProviderID generates the KubeVirt provider ID to be used for the NodeRef func (m *Machine) GenerateProviderID() (string, error) { if m.vmiInstance == nil { return "", errors.New("Underlying Kubevirt VM is NOT running") } providerID := fmt.Sprintf("kubevirt://%s", m.machineContext.KubevirtMachine.Name) return providerID, nil } // Delete deletes VM for this machine. func (m *Machine) Delete() error { namespacedName := types.NamespacedName{Namespace: m.namespace, Name: m.machineContext.KubevirtMachine.Name} vm := &kubevirtv1.VirtualMachine{} if err := m.client.Get(m.machineContext.Context, namespacedName, vm); err != nil { if apierrors.IsNotFound(err) { m.machineContext.Logger.Info("VM does not exist, nothing to do.") return nil } return errors.Wrapf(err, "failed to retrieve VM to delete") } if err := m.client.Delete(gocontext.Background(), vm); err != nil { return errors.Wrapf(err, "failed to delete VM") } return nil } func (m *Machine) DrainNodeIfNeeded(wrkldClstr workloadcluster.WorkloadCluster) (time.Duration, error) { if m.vmiInstance == nil || !m.shouldGracefulDeleteVMI() { if _, anntExists := m.machineContext.KubevirtMachine.Annotations[infrav1.VmiDeletionGraceTime]; anntExists { if err := m.removeGracePeriodAnnotation(); err != nil { return 100 * time.Millisecond, err } } return 0, nil } exceeded, err := m.drainGracePeriodExceeded() if err != nil { return 0, err } if !exceeded { retryDuration, err := m.drainNode(wrkldClstr) if err != nil { return 0, err } if retryDuration > 0 { return retryDuration, nil } } // now, when the node is drained (or vmiDeleteGraceTimeoutDurationSeconds has passed), we can delete the VMI propagationPolicy := metav1.DeletePropagationForeground err = m.client.Delete(m.machineContext, m.vmiInstance, &client.DeleteOptions{PropagationPolicy: &propagationPolicy}) if err != nil { m.machineContext.Logger.Error(err, "failed to delete VirtualMachineInstance") return 0, err } if err = m.removeGracePeriodAnnotation(); err != nil { return 100 * time.Millisecond, err } // requeue to force reading the VMI again return time.Second * 10, nil } const removeGracePeriodAnnotationPatch = `[{"op": "remove", "path": "/metadata/annotations/` + infrav1.VmiDeletionGraceTimeEscape + `"}]` func (m *Machine) removeGracePeriodAnnotation() error { patch := client.RawPatch(types.JSONPatchType, []byte(removeGracePeriodAnnotationPatch)) if err := m.client.Patch(m.machineContext, m.machineContext.KubevirtMachine, patch); err != nil { return fmt.Errorf("failed to remove the %s annotation to the KubeVirtMachine %s; %w", infrav1.VmiDeletionGraceTime, m.machineContext.KubevirtMachine.Name, err) } return nil } func (m *Machine) shouldGracefulDeleteVMI() bool { if m.vmiInstance.DeletionTimestamp != nil { m.machineContext.Logger.V(4).Info("DrainNode: the virtualMachineInstance is already in deletion process. Nothing to do here") return false } if m.vmiInstance.Spec.EvictionStrategy == nil || *m.vmiInstance.Spec.EvictionStrategy != kubevirtv1.EvictionStrategyExternal { m.machineContext.Logger.V(4).Info("DrainNode: graceful deletion is not supported for virtualMachineInstance. Nothing to do here") return false } // KubeVirt will set the EvacuationNodeName field in case of guest node eviction. If the field is not set, there is // nothing to do. if len(m.vmiInstance.Status.EvacuationNodeName) == 0 { m.machineContext.Logger.V(4).Info("DrainNode: the virtualMachineInstance is not marked for deletion. Nothing to do here") return false } return true } // wait vmiDeleteGraceTimeoutDurationSeconds to the node to be drained. If this time had passed, don't wait anymore. func (m *Machine) drainGracePeriodExceeded() (bool, error) { if graceTime, found := m.machineContext.KubevirtMachine.Annotations[infrav1.VmiDeletionGraceTime]; found { deletionGraceTime, err := time.Parse(time.RFC3339, graceTime) if err != nil { // wrong format - rewrite if err = m.setVmiDeletionGraceTime(); err != nil { return false, err } } else { return time.Now().UTC().After(deletionGraceTime), nil } } else { if err := m.setVmiDeletionGraceTime(); err != nil { return false, err } } return false, nil } func (m *Machine) setVmiDeletionGraceTime() error { m.machineContext.Logger.Info(fmt.Sprintf("setting the %s annotation", infrav1.VmiDeletionGraceTime)) graceTime := time.Now().Add(vmiDeleteGraceTimeoutDurationSeconds * time.Second).UTC().Format(time.RFC3339) patch := fmt.Sprintf(`{"metadata":{"annotations":{"%s": "%s"}}}`, infrav1.VmiDeletionGraceTime, graceTime) patchRequest := client.RawPatch(types.MergePatchType, []byte(patch)) if err := m.client.Patch(m.machineContext, m.machineContext.KubevirtMachine, patchRequest); err != nil { return fmt.Errorf("failed to add the %s annotation to the KubeVirtMachine %s; %w", infrav1.VmiDeletionGraceTime, m.machineContext.KubevirtMachine.Name, err) } return nil } // This functions drains a node from a tenant cluster. // The function returns 3 values: // * drain done - boolean // * retry time, or 0 if not needed // * error - to be returned if we want to retry func (m *Machine) drainNode(wrkldClstr workloadcluster.WorkloadCluster) (time.Duration, error) { kubeClient, err := wrkldClstr.GenerateWorkloadClusterK8sClient(m.machineContext) if err != nil { m.machineContext.Logger.Error(err, "Error creating a remote client while deleting Machine, won't retry") return 0, fmt.Errorf("failed to get client to remote cluster; %w", err) } nodeName := m.vmiInstance.Status.EvacuationNodeName node, err := kubeClient.CoreV1().Nodes().Get(m.machineContext, nodeName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { // If an admin deletes the node directly, we'll end up here. m.machineContext.Logger.Error(err, "Could not find node from noderef, it may have already been deleted") return 0, nil } return 0, fmt.Errorf("unable to get node %q: %w", nodeName, err) } drainer := &kubedrain.Helper{ Client: kubeClient, Ctx: m.machineContext, Force: true, IgnoreAllDaemonSets: true, DeleteEmptyDirData: true, GracePeriodSeconds: -1, // If a pod is not evicted in 20 seconds, retry the eviction next time the // machine gets reconciled again (to allow other machines to be reconciled). Timeout: 20 * time.Second, OnPodDeletedOrEvicted: func(pod *corev1.Pod, usingEviction bool) { verbStr := "Deleted" if usingEviction { verbStr = "Evicted" } m.machineContext.Logger.Info(fmt.Sprintf("%s pod from Node", verbStr), "pod", fmt.Sprintf("%s/%s", pod.Name, pod.Namespace)) }, Out: writer{m.machineContext.Logger.Info}, ErrOut: writer{func(msg string, keysAndValues ...interface{}) { m.machineContext.Logger.Error(nil, msg, keysAndValues...) }}, } if noderefutil.IsNodeUnreachable(node) { // When the node is unreachable and some pods are not evicted for as long as this timeout, we ignore them. drainer.SkipWaitForDeleteTimeoutSeconds = 60 * 5 // 5 minutes } if err = kubedrain.RunCordonOrUncordon(drainer, node, true); err != nil { // Machine will be re-reconciled after a cordon failure. m.machineContext.Logger.Error(err, "Cordon failed") return 0, errors.Errorf("unable to cordon node %s: %v", nodeName, err) } if err = kubedrain.RunNodeDrain(drainer, node.Name); err != nil { // Machine will be re-reconciled after a drain failure. m.machineContext.Logger.Error(err, "Drain failed, retry in a second", "node name", nodeName) return time.Second, nil } m.machineContext.Logger.Info("Drain successful", "node name", nodeName) return 0, nil } // writer implements io.Writer interface as a pass-through for klog. type writer struct { logFunc func(msg string, keysAndValues ...interface{}) } // Write passes string(p) into writer's logFunc and always returns len(p). func (w writer) Write(p []byte) (n int, err error) { w.logFunc(string(p)) return len(p), nil }
{ return false, "", err }
conditional_block
machine.go
/* Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kubevirt import ( gocontext "context" "fmt" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" kubedrain "k8s.io/kubectl/pkg/drain" kubevirtv1 "kubevirt.io/api/core/v1" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/workloadcluster" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/noderefutil" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "time" infrav1 "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/context" "sigs.k8s.io/cluster-api-provider-kubevirt/pkg/ssh" ) const ( vmiDeleteGraceTimeoutDurationSeconds = 600 // 10 minutes ) // Machine implement a service for managing the KubeVirt VM hosting a kubernetes node. type Machine struct { client client.Client namespace string machineContext *context.MachineContext vmiInstance *kubevirtv1.VirtualMachineInstance vmInstance *kubevirtv1.VirtualMachine sshKeys *ssh.ClusterNodeSshKeys getCommandExecutor func(string, *ssh.ClusterNodeSshKeys) ssh.VMCommandExecutor } // NewMachine returns a new Machine service for the given context. func NewMachine(ctx *context.MachineContext, client client.Client, namespace string, sshKeys *ssh.ClusterNodeSshKeys) (*Machine, error) { machine := &Machine{ client: client, namespace: namespace, machineContext: ctx, vmiInstance: nil, vmInstance: nil, sshKeys: sshKeys, getCommandExecutor: ssh.NewVMCommandExecutor, } namespacedName := types.NamespacedName{Namespace: namespace, Name: ctx.KubevirtMachine.Name} vm := &kubevirtv1.VirtualMachine{} vmi := &kubevirtv1.VirtualMachineInstance{} // Get the active running VMI if it exists err := client.Get(ctx.Context, namespacedName, vmi) if err != nil { if !apierrors.IsNotFound(err) { return nil, err } } else { machine.vmiInstance = vmi } // Get the top level VM object if it exists err = client.Get(ctx.Context, namespacedName, vm) if err != nil { if !apierrors.IsNotFound(err) { return nil, err } } else { machine.vmInstance = vm } return machine, nil } // IsTerminal Reports back if the VM is either being requested to terminate or is terminate // in a way that it will never recover from. func (m *Machine) IsTerminal() (bool, string, error) { if m.vmInstance == nil || m.vmiInstance == nil { // vm/vmi hasn't been created yet return false, "", nil } // VMI is being asked to terminate gracefully due to node drain if !m.vmiInstance.IsFinal() && !m.vmiInstance.IsMigratable() && m.vmiInstance.Status.EvacuationNodeName != "" { // VM's infra node is being drained and VM is not live migratable. // We need to report a FailureReason so the MachineHealthCheck and // MachineSet controllers will gracefully take the VM down. return true, "The Machine's VM pod is marked for eviction due to infra node drain.", nil } // The infrav1.KubevirtVMTerminalLabel is a way users or automation to mark // a VM as being in a terminal state that requires remediation. This is used // by the functional test suite to test remediation and can also be triggered // by users as a way to manually trigger remediation. terminalReason, ok := m.vmInstance.Labels[infrav1.KubevirtMachineVMTerminalLabel] if ok { return true, fmt.Sprintf("VM's %s label has the vm marked as being terminal with reason [%s]", infrav1.KubevirtMachineVMTerminalLabel, terminalReason), nil } // Also check the VMI for this label terminalReason, ok = m.vmiInstance.Labels[infrav1.KubevirtMachineVMTerminalLabel] if ok { return true, fmt.Sprintf("VMI's %s label has the vm marked as being terminal with reason [%s]", infrav1.KubevirtMachineVMTerminalLabel, terminalReason), nil } runStrategy, err := m.vmInstance.RunStrategy() if err != nil { return false, "", err } switch runStrategy { case kubevirtv1.RunStrategyAlways: // VM should recover if it is down. return false, "", nil case kubevirtv1.RunStrategyManual: // If VM is manually controlled, we stay out of the loop return false, "", nil case kubevirtv1.RunStrategyHalted, kubevirtv1.RunStrategyOnce: if m.vmiInstance.IsFinal() { return true, "VMI has reached a permanent finalized state", nil } return false, "", nil case kubevirtv1.RunStrategyRerunOnFailure: // only recovers when vmi is failed if m.vmiInstance.Status.Phase == kubevirtv1.Succeeded { return true, "VMI has reached a permanent finalized state", nil } return false, "", nil } return false, "", nil } // Exists checks if the VM has been provisioned already. func (m *Machine) Exists() bool { return m.vmInstance != nil } // Create creates a new VM for this machine. func (m *Machine) Create(ctx gocontext.Context) error { m.machineContext.Logger.Info(fmt.Sprintf("Creating VM with role '%s'...", nodeRole(m.machineContext))) virtualMachine := newVirtualMachineFromKubevirtMachine(m.machineContext, m.namespace) mutateFn := func() (err error) { if virtualMachine.Labels == nil { virtualMachine.Labels = map[string]string{} } if virtualMachine.Spec.Template.ObjectMeta.Labels == nil { virtualMachine.Spec.Template.ObjectMeta.Labels = map[string]string{} } virtualMachine.Labels[clusterv1.ClusterLabelName] = m.machineContext.Cluster.Name virtualMachine.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name virtualMachine.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNameLabel] = m.machineContext.KubevirtMachine.Name virtualMachine.Spec.Template.ObjectMeta.Labels[infrav1.KubevirtMachineNamespaceLabel] = m.machineContext.KubevirtMachine.Namespace return nil } if _, err := controllerutil.CreateOrUpdate(ctx, m.client, virtualMachine, mutateFn); err != nil { return err } return nil } // Returns if VMI has ready condition or not. func (m *Machine) hasReadyCondition() bool { if m.vmiInstance == nil { return false } for _, cond := range m.vmiInstance.Status.Conditions { if cond.Type == kubevirtv1.VirtualMachineInstanceReady && cond.Status == corev1.ConditionTrue { return true } } return false } // Address returns the IP address of the VM. func (m *Machine) Address() string { if m.vmiInstance != nil && len(m.vmiInstance.Status.Interfaces) > 0 { return m.vmiInstance.Status.Interfaces[0].IP } return "" } // IsReady checks if the VM is ready func (m *Machine) IsReady() bool { return m.hasReadyCondition() } // SupportsCheckingIsBootstrapped checks if we have a method of checking // that this bootstrapper has completed. func (m *Machine) SupportsCheckingIsBootstrapped() bool { // Right now, we can only check if bootstrapping has // completed if we are using a bootstrapper that allows // for us to inject ssh keys into the guest. if m.sshKeys != nil { return m.machineContext.HasInjectedCapkSSHKeys(m.sshKeys.PublicKey) } return false } // IsBootstrapped checks if the VM is bootstrapped with Kubernetes. func (m *Machine) IsBootstrapped() bool { // CheckStrategy value is already sanitized by apiserver switch m.machineContext.KubevirtMachine.Spec.BootstrapCheckSpec.CheckStrategy { case "none": // skip bootstrap check and always returns positively return true case "": fallthrough // ssh is default check strategy, fallthrough case "ssh": return m.IsBootstrappedWithSSH() default: // Since CRD CheckStrategy field is validated by an enum, this case should never be hit return false } } // IsBootstrappedWithSSH checks if the VM is bootstrapped with Kubernetes using SSH strategy. func (m *Machine) IsBootstrappedWithSSH() bool { if !m.IsReady() || m.sshKeys == nil { return false } executor := m.getCommandExecutor(m.Address(), m.sshKeys) output, err := executor.ExecuteCommand("cat /run/cluster-api/bootstrap-success.complete") if err != nil || output != "success" { return false } return true } // GenerateProviderID generates the KubeVirt provider ID to be used for the NodeRef func (m *Machine) GenerateProviderID() (string, error) { if m.vmiInstance == nil { return "", errors.New("Underlying Kubevirt VM is NOT running") } providerID := fmt.Sprintf("kubevirt://%s", m.machineContext.KubevirtMachine.Name) return providerID, nil } // Delete deletes VM for this machine. func (m *Machine) Delete() error { namespacedName := types.NamespacedName{Namespace: m.namespace, Name: m.machineContext.KubevirtMachine.Name} vm := &kubevirtv1.VirtualMachine{} if err := m.client.Get(m.machineContext.Context, namespacedName, vm); err != nil { if apierrors.IsNotFound(err) { m.machineContext.Logger.Info("VM does not exist, nothing to do.") return nil } return errors.Wrapf(err, "failed to retrieve VM to delete") } if err := m.client.Delete(gocontext.Background(), vm); err != nil { return errors.Wrapf(err, "failed to delete VM") } return nil } func (m *Machine) DrainNodeIfNeeded(wrkldClstr workloadcluster.WorkloadCluster) (time.Duration, error) { if m.vmiInstance == nil || !m.shouldGracefulDeleteVMI() { if _, anntExists := m.machineContext.KubevirtMachine.Annotations[infrav1.VmiDeletionGraceTime]; anntExists { if err := m.removeGracePeriodAnnotation(); err != nil { return 100 * time.Millisecond, err } } return 0, nil } exceeded, err := m.drainGracePeriodExceeded() if err != nil { return 0, err } if !exceeded { retryDuration, err := m.drainNode(wrkldClstr) if err != nil { return 0, err } if retryDuration > 0 { return retryDuration, nil } } // now, when the node is drained (or vmiDeleteGraceTimeoutDurationSeconds has passed), we can delete the VMI propagationPolicy := metav1.DeletePropagationForeground err = m.client.Delete(m.machineContext, m.vmiInstance, &client.DeleteOptions{PropagationPolicy: &propagationPolicy}) if err != nil { m.machineContext.Logger.Error(err, "failed to delete VirtualMachineInstance") return 0, err } if err = m.removeGracePeriodAnnotation(); err != nil { return 100 * time.Millisecond, err } // requeue to force reading the VMI again return time.Second * 10, nil } const removeGracePeriodAnnotationPatch = `[{"op": "remove", "path": "/metadata/annotations/` + infrav1.VmiDeletionGraceTimeEscape + `"}]` func (m *Machine) removeGracePeriodAnnotation() error { patch := client.RawPatch(types.JSONPatchType, []byte(removeGracePeriodAnnotationPatch)) if err := m.client.Patch(m.machineContext, m.machineContext.KubevirtMachine, patch); err != nil { return fmt.Errorf("failed to remove the %s annotation to the KubeVirtMachine %s; %w", infrav1.VmiDeletionGraceTime, m.machineContext.KubevirtMachine.Name, err) } return nil } func (m *Machine) shouldGracefulDeleteVMI() bool { if m.vmiInstance.DeletionTimestamp != nil { m.machineContext.Logger.V(4).Info("DrainNode: the virtualMachineInstance is already in deletion process. Nothing to do here") return false } if m.vmiInstance.Spec.EvictionStrategy == nil || *m.vmiInstance.Spec.EvictionStrategy != kubevirtv1.EvictionStrategyExternal { m.machineContext.Logger.V(4).Info("DrainNode: graceful deletion is not supported for virtualMachineInstance. Nothing to do here") return false } // KubeVirt will set the EvacuationNodeName field in case of guest node eviction. If the field is not set, there is // nothing to do. if len(m.vmiInstance.Status.EvacuationNodeName) == 0 { m.machineContext.Logger.V(4).Info("DrainNode: the virtualMachineInstance is not marked for deletion. Nothing to do here") return false } return true } // wait vmiDeleteGraceTimeoutDurationSeconds to the node to be drained. If this time had passed, don't wait anymore. func (m *Machine) drainGracePeriodExceeded() (bool, error) { if graceTime, found := m.machineContext.KubevirtMachine.Annotations[infrav1.VmiDeletionGraceTime]; found { deletionGraceTime, err := time.Parse(time.RFC3339, graceTime) if err != nil { // wrong format - rewrite if err = m.setVmiDeletionGraceTime(); err != nil { return false, err } } else { return time.Now().UTC().After(deletionGraceTime), nil } } else { if err := m.setVmiDeletionGraceTime(); err != nil { return false, err } } return false, nil } func (m *Machine) setVmiDeletionGraceTime() error
// This functions drains a node from a tenant cluster. // The function returns 3 values: // * drain done - boolean // * retry time, or 0 if not needed // * error - to be returned if we want to retry func (m *Machine) drainNode(wrkldClstr workloadcluster.WorkloadCluster) (time.Duration, error) { kubeClient, err := wrkldClstr.GenerateWorkloadClusterK8sClient(m.machineContext) if err != nil { m.machineContext.Logger.Error(err, "Error creating a remote client while deleting Machine, won't retry") return 0, fmt.Errorf("failed to get client to remote cluster; %w", err) } nodeName := m.vmiInstance.Status.EvacuationNodeName node, err := kubeClient.CoreV1().Nodes().Get(m.machineContext, nodeName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { // If an admin deletes the node directly, we'll end up here. m.machineContext.Logger.Error(err, "Could not find node from noderef, it may have already been deleted") return 0, nil } return 0, fmt.Errorf("unable to get node %q: %w", nodeName, err) } drainer := &kubedrain.Helper{ Client: kubeClient, Ctx: m.machineContext, Force: true, IgnoreAllDaemonSets: true, DeleteEmptyDirData: true, GracePeriodSeconds: -1, // If a pod is not evicted in 20 seconds, retry the eviction next time the // machine gets reconciled again (to allow other machines to be reconciled). Timeout: 20 * time.Second, OnPodDeletedOrEvicted: func(pod *corev1.Pod, usingEviction bool) { verbStr := "Deleted" if usingEviction { verbStr = "Evicted" } m.machineContext.Logger.Info(fmt.Sprintf("%s pod from Node", verbStr), "pod", fmt.Sprintf("%s/%s", pod.Name, pod.Namespace)) }, Out: writer{m.machineContext.Logger.Info}, ErrOut: writer{func(msg string, keysAndValues ...interface{}) { m.machineContext.Logger.Error(nil, msg, keysAndValues...) }}, } if noderefutil.IsNodeUnreachable(node) { // When the node is unreachable and some pods are not evicted for as long as this timeout, we ignore them. drainer.SkipWaitForDeleteTimeoutSeconds = 60 * 5 // 5 minutes } if err = kubedrain.RunCordonOrUncordon(drainer, node, true); err != nil { // Machine will be re-reconciled after a cordon failure. m.machineContext.Logger.Error(err, "Cordon failed") return 0, errors.Errorf("unable to cordon node %s: %v", nodeName, err) } if err = kubedrain.RunNodeDrain(drainer, node.Name); err != nil { // Machine will be re-reconciled after a drain failure. m.machineContext.Logger.Error(err, "Drain failed, retry in a second", "node name", nodeName) return time.Second, nil } m.machineContext.Logger.Info("Drain successful", "node name", nodeName) return 0, nil } // writer implements io.Writer interface as a pass-through for klog. type writer struct { logFunc func(msg string, keysAndValues ...interface{}) } // Write passes string(p) into writer's logFunc and always returns len(p). func (w writer) Write(p []byte) (n int, err error) { w.logFunc(string(p)) return len(p), nil }
{ m.machineContext.Logger.Info(fmt.Sprintf("setting the %s annotation", infrav1.VmiDeletionGraceTime)) graceTime := time.Now().Add(vmiDeleteGraceTimeoutDurationSeconds * time.Second).UTC().Format(time.RFC3339) patch := fmt.Sprintf(`{"metadata":{"annotations":{"%s": "%s"}}}`, infrav1.VmiDeletionGraceTime, graceTime) patchRequest := client.RawPatch(types.MergePatchType, []byte(patch)) if err := m.client.Patch(m.machineContext, m.machineContext.KubevirtMachine, patchRequest); err != nil { return fmt.Errorf("failed to add the %s annotation to the KubeVirtMachine %s; %w", infrav1.VmiDeletionGraceTime, m.machineContext.KubevirtMachine.Name, err) } return nil }
identifier_body
header.go
package member_role import ( "net/http" "fmt" "text/template" config "../../config" "strconv" "encoding/json" datatables "../../datatables" login "../../login" "strings" ) type Profile struct { Message string // error status output or validated data Status bool //pass or fail ID string } func HListHandler(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) //fmt.Fprint(w,"mars test here") //fmt.Fprint(w, r.URL.Query().Get("rights")) rights :=r.URL.Query().Get("rights") //rights :="static here" last_start :=r.URL.Query().Get("last_start") last_length :=r.URL.Query().Get("last_length") last_search :=r.URL.Query().Get("last_search") dec_rights := config.Decrypt(config.Encrypt_key,rights) //dec_rights :="static here" tconf := make(map[string]string) ln := r.URL.Query().Get("length") tconf["Panel_name"] ="Member Role Record" tconf["Add_new_button"] ="true" tconf["Rights"] = rights tconf["Add_back_button"] ="false" tconf["Add_new_button_details"] ="false" tconf["upload_button"] ="false" tconf["Add_new_button_url"] ="/administrator/member_role/HaddHandler?rights="+rights tconf["Action_baseURL"] ="/administrator/member_role?rights="+rights tconf["ActionCol_param"] ="ID" tconf["hdr_id"] = "0" tconf["Interpret_button"] ="false" tconf["Add_post_button"] ="false" tconf["Add_post_button_url"] ="" tconf["Add_cancel_button"] ="false" tconf["Add_cancel_button_url"] ="" tconf["level"] ="1" tconf["pageLength"] =last_length tconf["displayStart"] =last_start tconf["last_search"] = last_search if last_length==""{ tconf["pageLength"] ="10" tconf["displayStart"] ="0" } var header = []string{"Action", "ID", "Role Name", "Member Name"} //column header if ln=="" { // content access here if( strings.Contains(dec_rights, "HAdd") ){ tconf["Add_new_button"] ="true" //fmt.Println("run herer") } if( strings.Contains(dec_rights, "Dadd ") ){ } //end of content accesss var ajaxURLdata = "/administrator/member_role?rights="+rights type TblConf struct{ Headercol []string AjaxUrl string Tempconf map[string]string } tmpl := template.New("table.html") var err error //if tmpl, err = tmpl.ParseFiles("hris/assignments/assignment_list.html"); err != nil { if tmpl, err = tmpl.ParseFiles("admin/member_role/table.html"); err != nil { fmt.Println(err) } err1 := tmpl.Execute(w,&TblConf{header,ajaxURLdata,tconf}) if err1 != nil { http.Error(w, err1.Error(), http.StatusInternalServerError) } }else{ dr := r.URL.Query().Get("draw") sr := r.URL.Query().Get("search[value]") fmt.Println(sr) srt1,_ :=strconv.Atoi(r.URL.Query().Get("start")) ln1,_ :=strconv.Atoi(r.URL.Query().Get("length")) srt1 = (srt1 / ln1) + 1 str := strconv.Itoa(srt1) sortcol := r.URL.Query().Get("order[0][column]") if sortcol=="0"{ sortcol="1" } srtcolindexInt,_ :=strconv.Atoi(sortcol) var sortColName string for key , value := range header{ if ( srtcolindexInt==key){ sortColName = value } } fmt.Println(sortColName) tconf["ActionCol"] ="true" // config for action column tconf["ActionCol_param"] ="ID" // config for parameter of action tconf["ActionCol_edit"] ="true" // config for edit click tconf["ActionCol_edit_is_modal"] ="false" // config for edit click //tconf["ActionCol_edit_url"] ="/timekeeping/overtime_logs/OvertimeLogsHeaderEdit?rights="+rights+"&h_id=" // config for edit click tconf["ActionCol_edit_url"] ="/administrator/member_role/HEditHandler?rights="+rights+"&h_id=" // config for edit click tconf["ActionCol_delete"] ="true" // config for delete click tconf["ActionCol_delete_url"] ="/administrator/member_role/HDeleteHandler?h_id=" tconf["ActionCol_detail"] ="false" // config for details click tconf["ActionCol_add_child"] ="false" // config for addchild click tconf["style_size"] ="12" tconf["style_font"] ="" tconf["format_number_col"] ="Amount" tconf["dr"]=dr //draw //geting total record etc // rights for tables if( strings.Contains(dec_rights, "HEdit") ){ tconf["ActionCol_edit"] ="true" } if( strings.Contains(dec_rights, "HDelete") ){ tconf["ActionCol_delete"] ="true" // config for delete click } if( strings.Contains(dec_rights, "HDetails") ){ tconf["ActionCol_detail"] ="true" // config for delete click } //end rights for tables //_,session_user_id := login.Get_account_info(r) session_user_id := 1 //static here session_user_id_str := strconv.Itoa(session_user_id) fmt.Println(session_user_id_str) /* fmt.Println("exec LBR_LogHdr_List 1, 2, 1, 1, 1, '"+sortColName+"', '', '"+sr+"' " ) fmt.Println(sortColName,str,sr)*/ //fmt.Println(`exec LBR_OTHdr_List 1, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'`) //tconf["sql_total"] = `exec LBR_OTHdr_List 1, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'` //tconf["sql_data"] = `exec LBR_OTHdr_List 0, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'` //tconf["sql_total"] = `exec dailysumhdr_list 1, 0, 0 , `+str+`, `+ln+`, '`+sortcol+`','','`+sr+`'` //tconf["sql_data"] = `exec dailysumhdr_list 0, 0, 0 , `+str+`, `+ln+`, '`+sortcol+`','','`+sr+`'` Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) tconf["sql_total"] = `OrgMemRol_List 1, `+str_OrgID+`, `+str+`, `+ln+`, '`+sr+`'` tconf["sql_data"] = `OrgMemRol_List 0, `+str_OrgID+`, `+str+`, `+ln+`, '`+sr+`'` datatables.DatatableretArray(w,tconf) } } type RetMessage struct { RetNo int RetMsg string } func DoAdd(branch string, date string, remarks string , username string) (bool ,int) { //rowret ,err, _,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Add', '`+username+`', 0, '`+trandate+`', `+lbr_assign+`, '`+remarks+`'`,1) rowret ,err, _,_ := config.Ap_sql(`exec DailySumHdr_Save 'Add', 'Mars', 0, 1, `+branch+`, '`+date+`', '`+remarks+`'`,1) if err != nil { panic(err.Error()) } var r RetMessage if err != nil { fmt.Println(err.Error) panic(err.Error()) } for rowret.Next() { err = rowret.Scan(&r.RetNo,&r.RetMsg) if err != nil { panic(err.Error()) } r = RetMessage{r.RetNo,r.RetMsg} } if( strings.Contains(r.RetMsg, "Success") ){ return true ,r.RetNo }else{ return false ,r.RetNo } } var local_FuncMap = template.FuncMap{ "Sql_list": func(s_qry string , org_id string, tag_id string) [][]string{ fmt.Println(`sis_itemtags_get 1, `+org_id+`, 0 ,`+tag_id) retdata := datatables.DataList(`sis_itemtags_get 1, `+org_id+`, 0 ,`+tag_id) return retdata }, } func HAddHandler(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) username, ses_uID := login.Get_account_info(r) //username := "static" //ses_uID := 1 str_ses_uID :=strconv.Itoa(ses_uID) rights :=r.URL.Query().Get("rights") fmt.Println(str_ses_uID) //fmt.Fprint(w,rights) if r.Method =="GET" { Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) tconf := make(map[string]string) //tconf["parentID"] =r.URL.Query().Get("parentID") tconf["username"] =username tconf["org_id"] = str_OrgID
tconf["test_js"] = `alert("from webserver")` arr_sysrole := datatables.DataList(`sysrole_get 2`) type Data struct { Rights string Conf map[string]string Arr_Sysrole [][]string } tmpl := template.New("Hadd.html").Funcs(local_FuncMap) var err error if tmpl, err = tmpl.ParseFiles("admin/member_role/Hadd.html"); err != nil { fmt.Println(err) } err1 := tmpl.Execute(w,&Data{rights , tconf ,arr_sysrole}) if err1 != nil { http.Error(w, err1.Error(), http.StatusInternalServerError) } }else { r.ParseForm() add_status, lastinsertedID := DoAdd(r.Form["branch"][0],r.Form["date"][0],r.Form["remarks"][0],username) if(add_status){ str_lastinsertedID :=strconv.Itoa(lastinsertedID) profile := Profile{"Data Successfully added ",true ,str_lastinsertedID} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Data Successfully added") w.Header().Set("Content-Type", "application/json") w.Write(js) }else{ profile := Profile{"Oppsss something went wrong",false,""} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Oppsss something went wrong") w.Header().Set("Content-Type", "application/json") w.Write(js) } } } func HAddTagHandler(w http.ResponseWriter, r *http.Request) { if r.Method =="POST" { r.ParseForm() item_id := r.Form["item_id"][0] username, _ := login.Get_account_info(r) Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) var returnData[] string for key ,_ := range r.Form["tag"] { tag := r.Form["tag"][key] value_input := r.Form["value_input"][key] remarks := r.Form["remarks"][key] if(value_input!=""){ arr_data_itemtag := datatables.Data_row(`SIS_itemtags_save 'Update', '`+username+`',`+str_OrgID+`,`+item_id+`, `+tag+`, '`+value_input+`', '`+remarks+`'` ) fmt.Println(arr_data_itemtag) returnData = append(returnData, arr_data_itemtag[0]) returnData = append(returnData, arr_data_itemtag[1]) if(strings.Contains(arr_data_itemtag[1] , `Error`)){ returnData = append(returnData, `HasError`) } } } js, err := json.Marshal(returnData) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json") w.Write(js) } } //edit here type LBR_OTHdr struct{ ID int Status string Trandate interface{} Lbr_assign int Remarks interface{} } type Dailysumhdr_get struct{ ID int Branch interface{} Docdate interface{} Remarks interface{} } func LBR_OTHdr_Get_id( Hdr_id string ) Dailysumhdr_get { //db_raw ,err, _,_ := config.Ap_sql(`LBR_OTHdr_Get 1 ,`+Hdr_id,1) db_raw ,err, _,_ := config.Ap_sql(`dailysumhdr_get 1,`+Hdr_id,1) if err != nil { panic(err.Error()) } var r Dailysumhdr_get for db_raw.Next() { err = db_raw.Scan(&r.ID, &r.Branch,&r.Docdate,&r.Remarks) if err != nil { panic(err.Error()) } } return r } func HEditHandler(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) rights :=r.URL.Query().Get("rights") Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) //rights :="rights" if r.Method =="GET" { username, _ := login.Get_account_info(r) tconf := make(map[string]string) tconf["h_id"] =r.URL.Query().Get("h_id") tconf["rights"]=rights tconf["username"] = username tconf["org_id"] = str_OrgID //tconf["data"] = datatables.DataList(sql) //tconf["Assign_ID"] = strconv.Itoa( LBR_LogHdr_Get_id(tconf["h_id"]).Lbr_assign ) //_,session_user_id := login.Get_account_info(r) session_user_id := 1 //static here session_user_id_str := strconv.Itoa(session_user_id) fmt.Println(session_user_id_str) type Data struct { Rights string Conf map[string]string } //arr_data := datatables.Data_row(`exec branch_get 1, 1,`+ tconf["h_id"] ) arr_data := datatables.Data_row(`select orgMem.id orgmemID ,orgMem.member MemberID , orgMem.status, orgMem.remarks , lastname, firstname,middlename,Username,txdate from member inner join orgMem on orgMem.member=member.id where orgMem.id= `+tconf["h_id"] ) fmt.Println(arr_data) tconf["orgmemID"] = arr_data[0] tconf["memberID"] = arr_data[1] tconf["status"] = arr_data[2] tconf["remarks"] = arr_data[3] tconf["lastname"] = arr_data[4] tconf["firstname"] = arr_data[5] tconf["middlename"] = arr_data[6] tconf["username"] = arr_data[7] tconf["txdate"] = arr_data[8] //arr_data_itemclass := datatables.DataList(`select id,dbo.SIS_Itemclass_Name(1, 3, id) [itemclass_name] from sis_itemclass` ) //arr_data_supplier := datatables.DataList(`select id,dbo.SIS_Supplier_Name(1, 3, id) [supplier_name] from SIS_Supplier` ) tmpl := template.New("Hedit.html").Funcs(local_FuncMap) var err error if tmpl, err = tmpl.ParseFiles("admin/member_role/Hedit.html"); err != nil { fmt.Println(err) } err1 := tmpl.Execute( w,&Data{rights,tconf} ) if err1 != nil { http.Error(w, err1.Error(), http.StatusInternalServerError) } }else{ //session_username := `static` r.ParseForm() //_ ,err, ex_stat,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Edit', '`+session_username+`', `+r.Form["h_id"][0] +`, '`+r.Form["trandate"][0]+`', `+r.Form["lbr_assign"][0] +`, '`+r.Form["remarks"][0]+`'`,3) //_ ,err, ex_stat,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Edit', '`+session_username+`', `+r.Form["h_id"][0] +`, '`+r.Form["trandate"][0]+`', `+r.Form["lbr_assign"][0] +`, '`+r.Form["remarks"][0]+`'`,3) _ ,err, ex_stat,_ := config.Ap_sql(`exec DailySumHdr_Save 'Add', 'Mars', `+r.Form["h_id"][0] +` , 1, `+r.Form["branch"][0]+`, '`+r.Form["date"][0]+`', '`+r.Form["remarks"][0]+`'`,3) //exec LBR_LogHdr_Save 'Edit', 'Ian', 3, '11 Jul 2016', 1, 'logfile abc', 'device abc', 'remarks abc' if err != nil { fmt.Println(err.Error) panic(err.Error()) } //lastinsertedID, _ := res.LastInsertId() if ex_stat == true{ profile := Profile{"Data Successfully Update ",true ,""} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Data Successfully Edited") w.Header().Set("Content-Type", "application/json") w.Write(js) }else{ profile := Profile{"Oppsss something went wrong",false,""} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Oppsss something went wrong") w.Header().Set("Content-Type", "application/json") w.Write(js) } } } func HDeleteHandler(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) if r.Method =="GET" { tconf := make(map[string]string) tconf["h_id"] = r.URL.Query().Get("h_id") tconf["delete_url"] = "/administrator/member_role/HDeleteHandler" tmpl := template.New("modal_delete_loghdr.html") var err error if tmpl, err = tmpl.ParseFiles("admin/member_role/modal_delete_loghdr.html"); err != nil { fmt.Println(err) } err = tmpl.Execute(w,tconf) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } }else{ //session_username,_ := login.Get_account_info(r) r.ParseForm() username, _ := login.Get_account_info(r) //_ ,err, _,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Delete', '`+session_username+`', `+r.Form["h_id"][0],3) //_ ,err, _,_ := config.Ap_sql(`Supplier_Save 'Delete','programmer',1, ` +r.Form["h_id"][0] ,3) //_ ,err, _,_ := config.Ap_sql(`SIS_ItemClass_Save 'Delete','programmer',` +r.Form["h_id"][0] ,3) arr_data := datatables.Data_row(`OrgMemRol_Save 'Delete','`+username+`',` +r.Form["h_id"][0] ) /* fmt.Println(arr_data) tconf["item_id"] = arr_data[0] tconf["item_org"] = arr_data[1] tconf["item_name"] = arr_data[2] tconf["item_parent"] = arr_data[3] tconf["item_parent_name"] = arr_data[4] tconf["item_remarks"] = arr_data[5] */ js, err := json.Marshal(arr_data) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json") w.Write(js) /*if err != nil { fmt.Println(err.Error) http.Error(w, err.Error(), http.StatusInternalServerError) panic(err.Error()) }*/ } }
random_line_split
header.go
package member_role import ( "net/http" "fmt" "text/template" config "../../config" "strconv" "encoding/json" datatables "../../datatables" login "../../login" "strings" ) type Profile struct { Message string // error status output or validated data Status bool //pass or fail ID string } func HListHandler(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) //fmt.Fprint(w,"mars test here") //fmt.Fprint(w, r.URL.Query().Get("rights")) rights :=r.URL.Query().Get("rights") //rights :="static here" last_start :=r.URL.Query().Get("last_start") last_length :=r.URL.Query().Get("last_length") last_search :=r.URL.Query().Get("last_search") dec_rights := config.Decrypt(config.Encrypt_key,rights) //dec_rights :="static here" tconf := make(map[string]string) ln := r.URL.Query().Get("length") tconf["Panel_name"] ="Member Role Record" tconf["Add_new_button"] ="true" tconf["Rights"] = rights tconf["Add_back_button"] ="false" tconf["Add_new_button_details"] ="false" tconf["upload_button"] ="false" tconf["Add_new_button_url"] ="/administrator/member_role/HaddHandler?rights="+rights tconf["Action_baseURL"] ="/administrator/member_role?rights="+rights tconf["ActionCol_param"] ="ID" tconf["hdr_id"] = "0" tconf["Interpret_button"] ="false" tconf["Add_post_button"] ="false" tconf["Add_post_button_url"] ="" tconf["Add_cancel_button"] ="false" tconf["Add_cancel_button_url"] ="" tconf["level"] ="1" tconf["pageLength"] =last_length tconf["displayStart"] =last_start tconf["last_search"] = last_search if last_length==""{ tconf["pageLength"] ="10" tconf["displayStart"] ="0" } var header = []string{"Action", "ID", "Role Name", "Member Name"} //column header if ln=="" { // content access here if( strings.Contains(dec_rights, "HAdd") ){ tconf["Add_new_button"] ="true" //fmt.Println("run herer") } if( strings.Contains(dec_rights, "Dadd ") ){ } //end of content accesss var ajaxURLdata = "/administrator/member_role?rights="+rights type TblConf struct{ Headercol []string AjaxUrl string Tempconf map[string]string } tmpl := template.New("table.html") var err error //if tmpl, err = tmpl.ParseFiles("hris/assignments/assignment_list.html"); err != nil { if tmpl, err = tmpl.ParseFiles("admin/member_role/table.html"); err != nil { fmt.Println(err) } err1 := tmpl.Execute(w,&TblConf{header,ajaxURLdata,tconf}) if err1 != nil { http.Error(w, err1.Error(), http.StatusInternalServerError) } }else{ dr := r.URL.Query().Get("draw") sr := r.URL.Query().Get("search[value]") fmt.Println(sr) srt1,_ :=strconv.Atoi(r.URL.Query().Get("start")) ln1,_ :=strconv.Atoi(r.URL.Query().Get("length")) srt1 = (srt1 / ln1) + 1 str := strconv.Itoa(srt1) sortcol := r.URL.Query().Get("order[0][column]") if sortcol=="0"{ sortcol="1" } srtcolindexInt,_ :=strconv.Atoi(sortcol) var sortColName string for key , value := range header{ if ( srtcolindexInt==key){ sortColName = value } } fmt.Println(sortColName) tconf["ActionCol"] ="true" // config for action column tconf["ActionCol_param"] ="ID" // config for parameter of action tconf["ActionCol_edit"] ="true" // config for edit click tconf["ActionCol_edit_is_modal"] ="false" // config for edit click //tconf["ActionCol_edit_url"] ="/timekeeping/overtime_logs/OvertimeLogsHeaderEdit?rights="+rights+"&h_id=" // config for edit click tconf["ActionCol_edit_url"] ="/administrator/member_role/HEditHandler?rights="+rights+"&h_id=" // config for edit click tconf["ActionCol_delete"] ="true" // config for delete click tconf["ActionCol_delete_url"] ="/administrator/member_role/HDeleteHandler?h_id=" tconf["ActionCol_detail"] ="false" // config for details click tconf["ActionCol_add_child"] ="false" // config for addchild click tconf["style_size"] ="12" tconf["style_font"] ="" tconf["format_number_col"] ="Amount" tconf["dr"]=dr //draw //geting total record etc // rights for tables if( strings.Contains(dec_rights, "HEdit") ){ tconf["ActionCol_edit"] ="true" } if( strings.Contains(dec_rights, "HDelete") ){ tconf["ActionCol_delete"] ="true" // config for delete click } if( strings.Contains(dec_rights, "HDetails") ){ tconf["ActionCol_detail"] ="true" // config for delete click } //end rights for tables //_,session_user_id := login.Get_account_info(r) session_user_id := 1 //static here session_user_id_str := strconv.Itoa(session_user_id) fmt.Println(session_user_id_str) /* fmt.Println("exec LBR_LogHdr_List 1, 2, 1, 1, 1, '"+sortColName+"', '', '"+sr+"' " ) fmt.Println(sortColName,str,sr)*/ //fmt.Println(`exec LBR_OTHdr_List 1, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'`) //tconf["sql_total"] = `exec LBR_OTHdr_List 1, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'` //tconf["sql_data"] = `exec LBR_OTHdr_List 0, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'` //tconf["sql_total"] = `exec dailysumhdr_list 1, 0, 0 , `+str+`, `+ln+`, '`+sortcol+`','','`+sr+`'` //tconf["sql_data"] = `exec dailysumhdr_list 0, 0, 0 , `+str+`, `+ln+`, '`+sortcol+`','','`+sr+`'` Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) tconf["sql_total"] = `OrgMemRol_List 1, `+str_OrgID+`, `+str+`, `+ln+`, '`+sr+`'` tconf["sql_data"] = `OrgMemRol_List 0, `+str_OrgID+`, `+str+`, `+ln+`, '`+sr+`'` datatables.DatatableretArray(w,tconf) } } type RetMessage struct { RetNo int RetMsg string } func DoAdd(branch string, date string, remarks string , username string) (bool ,int) { //rowret ,err, _,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Add', '`+username+`', 0, '`+trandate+`', `+lbr_assign+`, '`+remarks+`'`,1) rowret ,err, _,_ := config.Ap_sql(`exec DailySumHdr_Save 'Add', 'Mars', 0, 1, `+branch+`, '`+date+`', '`+remarks+`'`,1) if err != nil { panic(err.Error()) } var r RetMessage if err != nil { fmt.Println(err.Error) panic(err.Error()) } for rowret.Next() { err = rowret.Scan(&r.RetNo,&r.RetMsg) if err != nil { panic(err.Error()) } r = RetMessage{r.RetNo,r.RetMsg} } if( strings.Contains(r.RetMsg, "Success") ){ return true ,r.RetNo }else{ return false ,r.RetNo } } var local_FuncMap = template.FuncMap{ "Sql_list": func(s_qry string , org_id string, tag_id string) [][]string{ fmt.Println(`sis_itemtags_get 1, `+org_id+`, 0 ,`+tag_id) retdata := datatables.DataList(`sis_itemtags_get 1, `+org_id+`, 0 ,`+tag_id) return retdata }, } func HAddHandler(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) username, ses_uID := login.Get_account_info(r) //username := "static" //ses_uID := 1 str_ses_uID :=strconv.Itoa(ses_uID) rights :=r.URL.Query().Get("rights") fmt.Println(str_ses_uID) //fmt.Fprint(w,rights) if r.Method =="GET" { Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) tconf := make(map[string]string) //tconf["parentID"] =r.URL.Query().Get("parentID") tconf["username"] =username tconf["org_id"] = str_OrgID tconf["test_js"] = `alert("from webserver")` arr_sysrole := datatables.DataList(`sysrole_get 2`) type Data struct { Rights string Conf map[string]string Arr_Sysrole [][]string } tmpl := template.New("Hadd.html").Funcs(local_FuncMap) var err error if tmpl, err = tmpl.ParseFiles("admin/member_role/Hadd.html"); err != nil { fmt.Println(err) } err1 := tmpl.Execute(w,&Data{rights , tconf ,arr_sysrole}) if err1 != nil { http.Error(w, err1.Error(), http.StatusInternalServerError) } }else { r.ParseForm() add_status, lastinsertedID := DoAdd(r.Form["branch"][0],r.Form["date"][0],r.Form["remarks"][0],username) if(add_status){ str_lastinsertedID :=strconv.Itoa(lastinsertedID) profile := Profile{"Data Successfully added ",true ,str_lastinsertedID} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Data Successfully added") w.Header().Set("Content-Type", "application/json") w.Write(js) }else{ profile := Profile{"Oppsss something went wrong",false,""} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Oppsss something went wrong") w.Header().Set("Content-Type", "application/json") w.Write(js) } } } func HAddTagHandler(w http.ResponseWriter, r *http.Request) { if r.Method =="POST" { r.ParseForm() item_id := r.Form["item_id"][0] username, _ := login.Get_account_info(r) Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) var returnData[] string for key ,_ := range r.Form["tag"] { tag := r.Form["tag"][key] value_input := r.Form["value_input"][key] remarks := r.Form["remarks"][key] if(value_input!=""){ arr_data_itemtag := datatables.Data_row(`SIS_itemtags_save 'Update', '`+username+`',`+str_OrgID+`,`+item_id+`, `+tag+`, '`+value_input+`', '`+remarks+`'` ) fmt.Println(arr_data_itemtag) returnData = append(returnData, arr_data_itemtag[0]) returnData = append(returnData, arr_data_itemtag[1]) if(strings.Contains(arr_data_itemtag[1] , `Error`)){ returnData = append(returnData, `HasError`) } } } js, err := json.Marshal(returnData) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json") w.Write(js) } } //edit here type LBR_OTHdr struct{ ID int Status string Trandate interface{} Lbr_assign int Remarks interface{} } type Dailysumhdr_get struct{ ID int Branch interface{} Docdate interface{} Remarks interface{} } func LBR_OTHdr_Get_id( Hdr_id string ) Dailysumhdr_get { //db_raw ,err, _,_ := config.Ap_sql(`LBR_OTHdr_Get 1 ,`+Hdr_id,1) db_raw ,err, _,_ := config.Ap_sql(`dailysumhdr_get 1,`+Hdr_id,1) if err != nil { panic(err.Error()) } var r Dailysumhdr_get for db_raw.Next() { err = db_raw.Scan(&r.ID, &r.Branch,&r.Docdate,&r.Remarks) if err != nil { panic(err.Error()) } } return r } func HEditHandler(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) rights :=r.URL.Query().Get("rights") Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) //rights :="rights" if r.Method =="GET" { username, _ := login.Get_account_info(r) tconf := make(map[string]string) tconf["h_id"] =r.URL.Query().Get("h_id") tconf["rights"]=rights tconf["username"] = username tconf["org_id"] = str_OrgID //tconf["data"] = datatables.DataList(sql) //tconf["Assign_ID"] = strconv.Itoa( LBR_LogHdr_Get_id(tconf["h_id"]).Lbr_assign ) //_,session_user_id := login.Get_account_info(r) session_user_id := 1 //static here session_user_id_str := strconv.Itoa(session_user_id) fmt.Println(session_user_id_str) type Data struct { Rights string Conf map[string]string } //arr_data := datatables.Data_row(`exec branch_get 1, 1,`+ tconf["h_id"] ) arr_data := datatables.Data_row(`select orgMem.id orgmemID ,orgMem.member MemberID , orgMem.status, orgMem.remarks , lastname, firstname,middlename,Username,txdate from member inner join orgMem on orgMem.member=member.id where orgMem.id= `+tconf["h_id"] ) fmt.Println(arr_data) tconf["orgmemID"] = arr_data[0] tconf["memberID"] = arr_data[1] tconf["status"] = arr_data[2] tconf["remarks"] = arr_data[3] tconf["lastname"] = arr_data[4] tconf["firstname"] = arr_data[5] tconf["middlename"] = arr_data[6] tconf["username"] = arr_data[7] tconf["txdate"] = arr_data[8] //arr_data_itemclass := datatables.DataList(`select id,dbo.SIS_Itemclass_Name(1, 3, id) [itemclass_name] from sis_itemclass` ) //arr_data_supplier := datatables.DataList(`select id,dbo.SIS_Supplier_Name(1, 3, id) [supplier_name] from SIS_Supplier` ) tmpl := template.New("Hedit.html").Funcs(local_FuncMap) var err error if tmpl, err = tmpl.ParseFiles("admin/member_role/Hedit.html"); err != nil { fmt.Println(err) } err1 := tmpl.Execute( w,&Data{rights,tconf} ) if err1 != nil { http.Error(w, err1.Error(), http.StatusInternalServerError) } }else{ //session_username := `static` r.ParseForm() //_ ,err, ex_stat,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Edit', '`+session_username+`', `+r.Form["h_id"][0] +`, '`+r.Form["trandate"][0]+`', `+r.Form["lbr_assign"][0] +`, '`+r.Form["remarks"][0]+`'`,3) //_ ,err, ex_stat,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Edit', '`+session_username+`', `+r.Form["h_id"][0] +`, '`+r.Form["trandate"][0]+`', `+r.Form["lbr_assign"][0] +`, '`+r.Form["remarks"][0]+`'`,3) _ ,err, ex_stat,_ := config.Ap_sql(`exec DailySumHdr_Save 'Add', 'Mars', `+r.Form["h_id"][0] +` , 1, `+r.Form["branch"][0]+`, '`+r.Form["date"][0]+`', '`+r.Form["remarks"][0]+`'`,3) //exec LBR_LogHdr_Save 'Edit', 'Ian', 3, '11 Jul 2016', 1, 'logfile abc', 'device abc', 'remarks abc' if err != nil { fmt.Println(err.Error) panic(err.Error()) } //lastinsertedID, _ := res.LastInsertId() if ex_stat == true{ profile := Profile{"Data Successfully Update ",true ,""} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Data Successfully Edited") w.Header().Set("Content-Type", "application/json") w.Write(js) }else{ profile := Profile{"Oppsss something went wrong",false,""} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Oppsss something went wrong") w.Header().Set("Content-Type", "application/json") w.Write(js) } } } func
(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) if r.Method =="GET" { tconf := make(map[string]string) tconf["h_id"] = r.URL.Query().Get("h_id") tconf["delete_url"] = "/administrator/member_role/HDeleteHandler" tmpl := template.New("modal_delete_loghdr.html") var err error if tmpl, err = tmpl.ParseFiles("admin/member_role/modal_delete_loghdr.html"); err != nil { fmt.Println(err) } err = tmpl.Execute(w,tconf) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } }else{ //session_username,_ := login.Get_account_info(r) r.ParseForm() username, _ := login.Get_account_info(r) //_ ,err, _,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Delete', '`+session_username+`', `+r.Form["h_id"][0],3) //_ ,err, _,_ := config.Ap_sql(`Supplier_Save 'Delete','programmer',1, ` +r.Form["h_id"][0] ,3) //_ ,err, _,_ := config.Ap_sql(`SIS_ItemClass_Save 'Delete','programmer',` +r.Form["h_id"][0] ,3) arr_data := datatables.Data_row(`OrgMemRol_Save 'Delete','`+username+`',` +r.Form["h_id"][0] ) /* fmt.Println(arr_data) tconf["item_id"] = arr_data[0] tconf["item_org"] = arr_data[1] tconf["item_name"] = arr_data[2] tconf["item_parent"] = arr_data[3] tconf["item_parent_name"] = arr_data[4] tconf["item_remarks"] = arr_data[5] */ js, err := json.Marshal(arr_data) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json") w.Write(js) /*if err != nil { fmt.Println(err.Error) http.Error(w, err.Error(), http.StatusInternalServerError) panic(err.Error()) }*/ } }
HDeleteHandler
identifier_name
header.go
package member_role import ( "net/http" "fmt" "text/template" config "../../config" "strconv" "encoding/json" datatables "../../datatables" login "../../login" "strings" ) type Profile struct { Message string // error status output or validated data Status bool //pass or fail ID string } func HListHandler(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) //fmt.Fprint(w,"mars test here") //fmt.Fprint(w, r.URL.Query().Get("rights")) rights :=r.URL.Query().Get("rights") //rights :="static here" last_start :=r.URL.Query().Get("last_start") last_length :=r.URL.Query().Get("last_length") last_search :=r.URL.Query().Get("last_search") dec_rights := config.Decrypt(config.Encrypt_key,rights) //dec_rights :="static here" tconf := make(map[string]string) ln := r.URL.Query().Get("length") tconf["Panel_name"] ="Member Role Record" tconf["Add_new_button"] ="true" tconf["Rights"] = rights tconf["Add_back_button"] ="false" tconf["Add_new_button_details"] ="false" tconf["upload_button"] ="false" tconf["Add_new_button_url"] ="/administrator/member_role/HaddHandler?rights="+rights tconf["Action_baseURL"] ="/administrator/member_role?rights="+rights tconf["ActionCol_param"] ="ID" tconf["hdr_id"] = "0" tconf["Interpret_button"] ="false" tconf["Add_post_button"] ="false" tconf["Add_post_button_url"] ="" tconf["Add_cancel_button"] ="false" tconf["Add_cancel_button_url"] ="" tconf["level"] ="1" tconf["pageLength"] =last_length tconf["displayStart"] =last_start tconf["last_search"] = last_search if last_length==""{ tconf["pageLength"] ="10" tconf["displayStart"] ="0" } var header = []string{"Action", "ID", "Role Name", "Member Name"} //column header if ln=="" { // content access here if( strings.Contains(dec_rights, "HAdd") ){ tconf["Add_new_button"] ="true" //fmt.Println("run herer") } if( strings.Contains(dec_rights, "Dadd ") ){ } //end of content accesss var ajaxURLdata = "/administrator/member_role?rights="+rights type TblConf struct{ Headercol []string AjaxUrl string Tempconf map[string]string } tmpl := template.New("table.html") var err error //if tmpl, err = tmpl.ParseFiles("hris/assignments/assignment_list.html"); err != nil { if tmpl, err = tmpl.ParseFiles("admin/member_role/table.html"); err != nil { fmt.Println(err) } err1 := tmpl.Execute(w,&TblConf{header,ajaxURLdata,tconf}) if err1 != nil { http.Error(w, err1.Error(), http.StatusInternalServerError) } }else{ dr := r.URL.Query().Get("draw") sr := r.URL.Query().Get("search[value]") fmt.Println(sr) srt1,_ :=strconv.Atoi(r.URL.Query().Get("start")) ln1,_ :=strconv.Atoi(r.URL.Query().Get("length")) srt1 = (srt1 / ln1) + 1 str := strconv.Itoa(srt1) sortcol := r.URL.Query().Get("order[0][column]") if sortcol=="0"{ sortcol="1" } srtcolindexInt,_ :=strconv.Atoi(sortcol) var sortColName string for key , value := range header{ if ( srtcolindexInt==key){ sortColName = value } } fmt.Println(sortColName) tconf["ActionCol"] ="true" // config for action column tconf["ActionCol_param"] ="ID" // config for parameter of action tconf["ActionCol_edit"] ="true" // config for edit click tconf["ActionCol_edit_is_modal"] ="false" // config for edit click //tconf["ActionCol_edit_url"] ="/timekeeping/overtime_logs/OvertimeLogsHeaderEdit?rights="+rights+"&h_id=" // config for edit click tconf["ActionCol_edit_url"] ="/administrator/member_role/HEditHandler?rights="+rights+"&h_id=" // config for edit click tconf["ActionCol_delete"] ="true" // config for delete click tconf["ActionCol_delete_url"] ="/administrator/member_role/HDeleteHandler?h_id=" tconf["ActionCol_detail"] ="false" // config for details click tconf["ActionCol_add_child"] ="false" // config for addchild click tconf["style_size"] ="12" tconf["style_font"] ="" tconf["format_number_col"] ="Amount" tconf["dr"]=dr //draw //geting total record etc // rights for tables if( strings.Contains(dec_rights, "HEdit") ){ tconf["ActionCol_edit"] ="true" } if( strings.Contains(dec_rights, "HDelete") ){ tconf["ActionCol_delete"] ="true" // config for delete click } if( strings.Contains(dec_rights, "HDetails") ){ tconf["ActionCol_detail"] ="true" // config for delete click } //end rights for tables //_,session_user_id := login.Get_account_info(r) session_user_id := 1 //static here session_user_id_str := strconv.Itoa(session_user_id) fmt.Println(session_user_id_str) /* fmt.Println("exec LBR_LogHdr_List 1, 2, 1, 1, 1, '"+sortColName+"', '', '"+sr+"' " ) fmt.Println(sortColName,str,sr)*/ //fmt.Println(`exec LBR_OTHdr_List 1, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'`) //tconf["sql_total"] = `exec LBR_OTHdr_List 1, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'` //tconf["sql_data"] = `exec LBR_OTHdr_List 0, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'` //tconf["sql_total"] = `exec dailysumhdr_list 1, 0, 0 , `+str+`, `+ln+`, '`+sortcol+`','','`+sr+`'` //tconf["sql_data"] = `exec dailysumhdr_list 0, 0, 0 , `+str+`, `+ln+`, '`+sortcol+`','','`+sr+`'` Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) tconf["sql_total"] = `OrgMemRol_List 1, `+str_OrgID+`, `+str+`, `+ln+`, '`+sr+`'` tconf["sql_data"] = `OrgMemRol_List 0, `+str_OrgID+`, `+str+`, `+ln+`, '`+sr+`'` datatables.DatatableretArray(w,tconf) } } type RetMessage struct { RetNo int RetMsg string } func DoAdd(branch string, date string, remarks string , username string) (bool ,int) { //rowret ,err, _,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Add', '`+username+`', 0, '`+trandate+`', `+lbr_assign+`, '`+remarks+`'`,1) rowret ,err, _,_ := config.Ap_sql(`exec DailySumHdr_Save 'Add', 'Mars', 0, 1, `+branch+`, '`+date+`', '`+remarks+`'`,1) if err != nil { panic(err.Error()) } var r RetMessage if err != nil { fmt.Println(err.Error) panic(err.Error()) } for rowret.Next() { err = rowret.Scan(&r.RetNo,&r.RetMsg) if err != nil { panic(err.Error()) } r = RetMessage{r.RetNo,r.RetMsg} } if( strings.Contains(r.RetMsg, "Success") ){ return true ,r.RetNo }else{ return false ,r.RetNo } } var local_FuncMap = template.FuncMap{ "Sql_list": func(s_qry string , org_id string, tag_id string) [][]string{ fmt.Println(`sis_itemtags_get 1, `+org_id+`, 0 ,`+tag_id) retdata := datatables.DataList(`sis_itemtags_get 1, `+org_id+`, 0 ,`+tag_id) return retdata }, } func HAddHandler(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) username, ses_uID := login.Get_account_info(r) //username := "static" //ses_uID := 1 str_ses_uID :=strconv.Itoa(ses_uID) rights :=r.URL.Query().Get("rights") fmt.Println(str_ses_uID) //fmt.Fprint(w,rights) if r.Method =="GET" { Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) tconf := make(map[string]string) //tconf["parentID"] =r.URL.Query().Get("parentID") tconf["username"] =username tconf["org_id"] = str_OrgID tconf["test_js"] = `alert("from webserver")` arr_sysrole := datatables.DataList(`sysrole_get 2`) type Data struct { Rights string Conf map[string]string Arr_Sysrole [][]string } tmpl := template.New("Hadd.html").Funcs(local_FuncMap) var err error if tmpl, err = tmpl.ParseFiles("admin/member_role/Hadd.html"); err != nil { fmt.Println(err) } err1 := tmpl.Execute(w,&Data{rights , tconf ,arr_sysrole}) if err1 != nil { http.Error(w, err1.Error(), http.StatusInternalServerError) } }else { r.ParseForm() add_status, lastinsertedID := DoAdd(r.Form["branch"][0],r.Form["date"][0],r.Form["remarks"][0],username) if(add_status){ str_lastinsertedID :=strconv.Itoa(lastinsertedID) profile := Profile{"Data Successfully added ",true ,str_lastinsertedID} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Data Successfully added") w.Header().Set("Content-Type", "application/json") w.Write(js) }else{ profile := Profile{"Oppsss something went wrong",false,""} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Oppsss something went wrong") w.Header().Set("Content-Type", "application/json") w.Write(js) } } } func HAddTagHandler(w http.ResponseWriter, r *http.Request) { if r.Method =="POST" { r.ParseForm() item_id := r.Form["item_id"][0] username, _ := login.Get_account_info(r) Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) var returnData[] string for key ,_ := range r.Form["tag"] { tag := r.Form["tag"][key] value_input := r.Form["value_input"][key] remarks := r.Form["remarks"][key] if(value_input!=""){ arr_data_itemtag := datatables.Data_row(`SIS_itemtags_save 'Update', '`+username+`',`+str_OrgID+`,`+item_id+`, `+tag+`, '`+value_input+`', '`+remarks+`'` ) fmt.Println(arr_data_itemtag) returnData = append(returnData, arr_data_itemtag[0]) returnData = append(returnData, arr_data_itemtag[1]) if(strings.Contains(arr_data_itemtag[1] , `Error`)){ returnData = append(returnData, `HasError`) } } } js, err := json.Marshal(returnData) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json") w.Write(js) } } //edit here type LBR_OTHdr struct{ ID int Status string Trandate interface{} Lbr_assign int Remarks interface{} } type Dailysumhdr_get struct{ ID int Branch interface{} Docdate interface{} Remarks interface{} } func LBR_OTHdr_Get_id( Hdr_id string ) Dailysumhdr_get
func HEditHandler(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) rights :=r.URL.Query().Get("rights") Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) //rights :="rights" if r.Method =="GET" { username, _ := login.Get_account_info(r) tconf := make(map[string]string) tconf["h_id"] =r.URL.Query().Get("h_id") tconf["rights"]=rights tconf["username"] = username tconf["org_id"] = str_OrgID //tconf["data"] = datatables.DataList(sql) //tconf["Assign_ID"] = strconv.Itoa( LBR_LogHdr_Get_id(tconf["h_id"]).Lbr_assign ) //_,session_user_id := login.Get_account_info(r) session_user_id := 1 //static here session_user_id_str := strconv.Itoa(session_user_id) fmt.Println(session_user_id_str) type Data struct { Rights string Conf map[string]string } //arr_data := datatables.Data_row(`exec branch_get 1, 1,`+ tconf["h_id"] ) arr_data := datatables.Data_row(`select orgMem.id orgmemID ,orgMem.member MemberID , orgMem.status, orgMem.remarks , lastname, firstname,middlename,Username,txdate from member inner join orgMem on orgMem.member=member.id where orgMem.id= `+tconf["h_id"] ) fmt.Println(arr_data) tconf["orgmemID"] = arr_data[0] tconf["memberID"] = arr_data[1] tconf["status"] = arr_data[2] tconf["remarks"] = arr_data[3] tconf["lastname"] = arr_data[4] tconf["firstname"] = arr_data[5] tconf["middlename"] = arr_data[6] tconf["username"] = arr_data[7] tconf["txdate"] = arr_data[8] //arr_data_itemclass := datatables.DataList(`select id,dbo.SIS_Itemclass_Name(1, 3, id) [itemclass_name] from sis_itemclass` ) //arr_data_supplier := datatables.DataList(`select id,dbo.SIS_Supplier_Name(1, 3, id) [supplier_name] from SIS_Supplier` ) tmpl := template.New("Hedit.html").Funcs(local_FuncMap) var err error if tmpl, err = tmpl.ParseFiles("admin/member_role/Hedit.html"); err != nil { fmt.Println(err) } err1 := tmpl.Execute( w,&Data{rights,tconf} ) if err1 != nil { http.Error(w, err1.Error(), http.StatusInternalServerError) } }else{ //session_username := `static` r.ParseForm() //_ ,err, ex_stat,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Edit', '`+session_username+`', `+r.Form["h_id"][0] +`, '`+r.Form["trandate"][0]+`', `+r.Form["lbr_assign"][0] +`, '`+r.Form["remarks"][0]+`'`,3) //_ ,err, ex_stat,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Edit', '`+session_username+`', `+r.Form["h_id"][0] +`, '`+r.Form["trandate"][0]+`', `+r.Form["lbr_assign"][0] +`, '`+r.Form["remarks"][0]+`'`,3) _ ,err, ex_stat,_ := config.Ap_sql(`exec DailySumHdr_Save 'Add', 'Mars', `+r.Form["h_id"][0] +` , 1, `+r.Form["branch"][0]+`, '`+r.Form["date"][0]+`', '`+r.Form["remarks"][0]+`'`,3) //exec LBR_LogHdr_Save 'Edit', 'Ian', 3, '11 Jul 2016', 1, 'logfile abc', 'device abc', 'remarks abc' if err != nil { fmt.Println(err.Error) panic(err.Error()) } //lastinsertedID, _ := res.LastInsertId() if ex_stat == true{ profile := Profile{"Data Successfully Update ",true ,""} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Data Successfully Edited") w.Header().Set("Content-Type", "application/json") w.Write(js) }else{ profile := Profile{"Oppsss something went wrong",false,""} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Oppsss something went wrong") w.Header().Set("Content-Type", "application/json") w.Write(js) } } } func HDeleteHandler(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) if r.Method =="GET" { tconf := make(map[string]string) tconf["h_id"] = r.URL.Query().Get("h_id") tconf["delete_url"] = "/administrator/member_role/HDeleteHandler" tmpl := template.New("modal_delete_loghdr.html") var err error if tmpl, err = tmpl.ParseFiles("admin/member_role/modal_delete_loghdr.html"); err != nil { fmt.Println(err) } err = tmpl.Execute(w,tconf) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } }else{ //session_username,_ := login.Get_account_info(r) r.ParseForm() username, _ := login.Get_account_info(r) //_ ,err, _,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Delete', '`+session_username+`', `+r.Form["h_id"][0],3) //_ ,err, _,_ := config.Ap_sql(`Supplier_Save 'Delete','programmer',1, ` +r.Form["h_id"][0] ,3) //_ ,err, _,_ := config.Ap_sql(`SIS_ItemClass_Save 'Delete','programmer',` +r.Form["h_id"][0] ,3) arr_data := datatables.Data_row(`OrgMemRol_Save 'Delete','`+username+`',` +r.Form["h_id"][0] ) /* fmt.Println(arr_data) tconf["item_id"] = arr_data[0] tconf["item_org"] = arr_data[1] tconf["item_name"] = arr_data[2] tconf["item_parent"] = arr_data[3] tconf["item_parent_name"] = arr_data[4] tconf["item_remarks"] = arr_data[5] */ js, err := json.Marshal(arr_data) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json") w.Write(js) /*if err != nil { fmt.Println(err.Error) http.Error(w, err.Error(), http.StatusInternalServerError) panic(err.Error()) }*/ } }
{ //db_raw ,err, _,_ := config.Ap_sql(`LBR_OTHdr_Get 1 ,`+Hdr_id,1) db_raw ,err, _,_ := config.Ap_sql(`dailysumhdr_get 1,`+Hdr_id,1) if err != nil { panic(err.Error()) } var r Dailysumhdr_get for db_raw.Next() { err = db_raw.Scan(&r.ID, &r.Branch,&r.Docdate,&r.Remarks) if err != nil { panic(err.Error()) } } return r }
identifier_body
header.go
package member_role import ( "net/http" "fmt" "text/template" config "../../config" "strconv" "encoding/json" datatables "../../datatables" login "../../login" "strings" ) type Profile struct { Message string // error status output or validated data Status bool //pass or fail ID string } func HListHandler(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) //fmt.Fprint(w,"mars test here") //fmt.Fprint(w, r.URL.Query().Get("rights")) rights :=r.URL.Query().Get("rights") //rights :="static here" last_start :=r.URL.Query().Get("last_start") last_length :=r.URL.Query().Get("last_length") last_search :=r.URL.Query().Get("last_search") dec_rights := config.Decrypt(config.Encrypt_key,rights) //dec_rights :="static here" tconf := make(map[string]string) ln := r.URL.Query().Get("length") tconf["Panel_name"] ="Member Role Record" tconf["Add_new_button"] ="true" tconf["Rights"] = rights tconf["Add_back_button"] ="false" tconf["Add_new_button_details"] ="false" tconf["upload_button"] ="false" tconf["Add_new_button_url"] ="/administrator/member_role/HaddHandler?rights="+rights tconf["Action_baseURL"] ="/administrator/member_role?rights="+rights tconf["ActionCol_param"] ="ID" tconf["hdr_id"] = "0" tconf["Interpret_button"] ="false" tconf["Add_post_button"] ="false" tconf["Add_post_button_url"] ="" tconf["Add_cancel_button"] ="false" tconf["Add_cancel_button_url"] ="" tconf["level"] ="1" tconf["pageLength"] =last_length tconf["displayStart"] =last_start tconf["last_search"] = last_search if last_length==""{ tconf["pageLength"] ="10" tconf["displayStart"] ="0" } var header = []string{"Action", "ID", "Role Name", "Member Name"} //column header if ln=="" { // content access here if( strings.Contains(dec_rights, "HAdd") ){ tconf["Add_new_button"] ="true" //fmt.Println("run herer") } if( strings.Contains(dec_rights, "Dadd ") ){ } //end of content accesss var ajaxURLdata = "/administrator/member_role?rights="+rights type TblConf struct{ Headercol []string AjaxUrl string Tempconf map[string]string } tmpl := template.New("table.html") var err error //if tmpl, err = tmpl.ParseFiles("hris/assignments/assignment_list.html"); err != nil { if tmpl, err = tmpl.ParseFiles("admin/member_role/table.html"); err != nil { fmt.Println(err) } err1 := tmpl.Execute(w,&TblConf{header,ajaxURLdata,tconf}) if err1 != nil { http.Error(w, err1.Error(), http.StatusInternalServerError) } }else{ dr := r.URL.Query().Get("draw") sr := r.URL.Query().Get("search[value]") fmt.Println(sr) srt1,_ :=strconv.Atoi(r.URL.Query().Get("start")) ln1,_ :=strconv.Atoi(r.URL.Query().Get("length")) srt1 = (srt1 / ln1) + 1 str := strconv.Itoa(srt1) sortcol := r.URL.Query().Get("order[0][column]") if sortcol=="0"{ sortcol="1" } srtcolindexInt,_ :=strconv.Atoi(sortcol) var sortColName string for key , value := range header{ if ( srtcolindexInt==key){ sortColName = value } } fmt.Println(sortColName) tconf["ActionCol"] ="true" // config for action column tconf["ActionCol_param"] ="ID" // config for parameter of action tconf["ActionCol_edit"] ="true" // config for edit click tconf["ActionCol_edit_is_modal"] ="false" // config for edit click //tconf["ActionCol_edit_url"] ="/timekeeping/overtime_logs/OvertimeLogsHeaderEdit?rights="+rights+"&h_id=" // config for edit click tconf["ActionCol_edit_url"] ="/administrator/member_role/HEditHandler?rights="+rights+"&h_id=" // config for edit click tconf["ActionCol_delete"] ="true" // config for delete click tconf["ActionCol_delete_url"] ="/administrator/member_role/HDeleteHandler?h_id=" tconf["ActionCol_detail"] ="false" // config for details click tconf["ActionCol_add_child"] ="false" // config for addchild click tconf["style_size"] ="12" tconf["style_font"] ="" tconf["format_number_col"] ="Amount" tconf["dr"]=dr //draw //geting total record etc // rights for tables if( strings.Contains(dec_rights, "HEdit") ){ tconf["ActionCol_edit"] ="true" } if( strings.Contains(dec_rights, "HDelete") ){ tconf["ActionCol_delete"] ="true" // config for delete click } if( strings.Contains(dec_rights, "HDetails") ){ tconf["ActionCol_detail"] ="true" // config for delete click } //end rights for tables //_,session_user_id := login.Get_account_info(r) session_user_id := 1 //static here session_user_id_str := strconv.Itoa(session_user_id) fmt.Println(session_user_id_str) /* fmt.Println("exec LBR_LogHdr_List 1, 2, 1, 1, 1, '"+sortColName+"', '', '"+sr+"' " ) fmt.Println(sortColName,str,sr)*/ //fmt.Println(`exec LBR_OTHdr_List 1, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'`) //tconf["sql_total"] = `exec LBR_OTHdr_List 1, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'` //tconf["sql_data"] = `exec LBR_OTHdr_List 0, 2, `+session_user_id_str+` , `+str+`, `+ln+`, '`+sortColName+`', '', '`+sr+`'` //tconf["sql_total"] = `exec dailysumhdr_list 1, 0, 0 , `+str+`, `+ln+`, '`+sortcol+`','','`+sr+`'` //tconf["sql_data"] = `exec dailysumhdr_list 0, 0, 0 , `+str+`, `+ln+`, '`+sortcol+`','','`+sr+`'` Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) tconf["sql_total"] = `OrgMemRol_List 1, `+str_OrgID+`, `+str+`, `+ln+`, '`+sr+`'` tconf["sql_data"] = `OrgMemRol_List 0, `+str_OrgID+`, `+str+`, `+ln+`, '`+sr+`'` datatables.DatatableretArray(w,tconf) } } type RetMessage struct { RetNo int RetMsg string } func DoAdd(branch string, date string, remarks string , username string) (bool ,int) { //rowret ,err, _,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Add', '`+username+`', 0, '`+trandate+`', `+lbr_assign+`, '`+remarks+`'`,1) rowret ,err, _,_ := config.Ap_sql(`exec DailySumHdr_Save 'Add', 'Mars', 0, 1, `+branch+`, '`+date+`', '`+remarks+`'`,1) if err != nil { panic(err.Error()) } var r RetMessage if err != nil { fmt.Println(err.Error) panic(err.Error()) } for rowret.Next() { err = rowret.Scan(&r.RetNo,&r.RetMsg) if err != nil { panic(err.Error()) } r = RetMessage{r.RetNo,r.RetMsg} } if( strings.Contains(r.RetMsg, "Success") ){ return true ,r.RetNo }else{ return false ,r.RetNo } } var local_FuncMap = template.FuncMap{ "Sql_list": func(s_qry string , org_id string, tag_id string) [][]string{ fmt.Println(`sis_itemtags_get 1, `+org_id+`, 0 ,`+tag_id) retdata := datatables.DataList(`sis_itemtags_get 1, `+org_id+`, 0 ,`+tag_id) return retdata }, } func HAddHandler(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) username, ses_uID := login.Get_account_info(r) //username := "static" //ses_uID := 1 str_ses_uID :=strconv.Itoa(ses_uID) rights :=r.URL.Query().Get("rights") fmt.Println(str_ses_uID) //fmt.Fprint(w,rights) if r.Method =="GET" { Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) tconf := make(map[string]string) //tconf["parentID"] =r.URL.Query().Get("parentID") tconf["username"] =username tconf["org_id"] = str_OrgID tconf["test_js"] = `alert("from webserver")` arr_sysrole := datatables.DataList(`sysrole_get 2`) type Data struct { Rights string Conf map[string]string Arr_Sysrole [][]string } tmpl := template.New("Hadd.html").Funcs(local_FuncMap) var err error if tmpl, err = tmpl.ParseFiles("admin/member_role/Hadd.html"); err != nil { fmt.Println(err) } err1 := tmpl.Execute(w,&Data{rights , tconf ,arr_sysrole}) if err1 != nil { http.Error(w, err1.Error(), http.StatusInternalServerError) } }else { r.ParseForm() add_status, lastinsertedID := DoAdd(r.Form["branch"][0],r.Form["date"][0],r.Form["remarks"][0],username) if(add_status){ str_lastinsertedID :=strconv.Itoa(lastinsertedID) profile := Profile{"Data Successfully added ",true ,str_lastinsertedID} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Data Successfully added") w.Header().Set("Content-Type", "application/json") w.Write(js) }else{ profile := Profile{"Oppsss something went wrong",false,""} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Oppsss something went wrong") w.Header().Set("Content-Type", "application/json") w.Write(js) } } } func HAddTagHandler(w http.ResponseWriter, r *http.Request) { if r.Method =="POST"
} //edit here type LBR_OTHdr struct{ ID int Status string Trandate interface{} Lbr_assign int Remarks interface{} } type Dailysumhdr_get struct{ ID int Branch interface{} Docdate interface{} Remarks interface{} } func LBR_OTHdr_Get_id( Hdr_id string ) Dailysumhdr_get { //db_raw ,err, _,_ := config.Ap_sql(`LBR_OTHdr_Get 1 ,`+Hdr_id,1) db_raw ,err, _,_ := config.Ap_sql(`dailysumhdr_get 1,`+Hdr_id,1) if err != nil { panic(err.Error()) } var r Dailysumhdr_get for db_raw.Next() { err = db_raw.Scan(&r.ID, &r.Branch,&r.Docdate,&r.Remarks) if err != nil { panic(err.Error()) } } return r } func HEditHandler(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) rights :=r.URL.Query().Get("rights") Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) //rights :="rights" if r.Method =="GET" { username, _ := login.Get_account_info(r) tconf := make(map[string]string) tconf["h_id"] =r.URL.Query().Get("h_id") tconf["rights"]=rights tconf["username"] = username tconf["org_id"] = str_OrgID //tconf["data"] = datatables.DataList(sql) //tconf["Assign_ID"] = strconv.Itoa( LBR_LogHdr_Get_id(tconf["h_id"]).Lbr_assign ) //_,session_user_id := login.Get_account_info(r) session_user_id := 1 //static here session_user_id_str := strconv.Itoa(session_user_id) fmt.Println(session_user_id_str) type Data struct { Rights string Conf map[string]string } //arr_data := datatables.Data_row(`exec branch_get 1, 1,`+ tconf["h_id"] ) arr_data := datatables.Data_row(`select orgMem.id orgmemID ,orgMem.member MemberID , orgMem.status, orgMem.remarks , lastname, firstname,middlename,Username,txdate from member inner join orgMem on orgMem.member=member.id where orgMem.id= `+tconf["h_id"] ) fmt.Println(arr_data) tconf["orgmemID"] = arr_data[0] tconf["memberID"] = arr_data[1] tconf["status"] = arr_data[2] tconf["remarks"] = arr_data[3] tconf["lastname"] = arr_data[4] tconf["firstname"] = arr_data[5] tconf["middlename"] = arr_data[6] tconf["username"] = arr_data[7] tconf["txdate"] = arr_data[8] //arr_data_itemclass := datatables.DataList(`select id,dbo.SIS_Itemclass_Name(1, 3, id) [itemclass_name] from sis_itemclass` ) //arr_data_supplier := datatables.DataList(`select id,dbo.SIS_Supplier_Name(1, 3, id) [supplier_name] from SIS_Supplier` ) tmpl := template.New("Hedit.html").Funcs(local_FuncMap) var err error if tmpl, err = tmpl.ParseFiles("admin/member_role/Hedit.html"); err != nil { fmt.Println(err) } err1 := tmpl.Execute( w,&Data{rights,tconf} ) if err1 != nil { http.Error(w, err1.Error(), http.StatusInternalServerError) } }else{ //session_username := `static` r.ParseForm() //_ ,err, ex_stat,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Edit', '`+session_username+`', `+r.Form["h_id"][0] +`, '`+r.Form["trandate"][0]+`', `+r.Form["lbr_assign"][0] +`, '`+r.Form["remarks"][0]+`'`,3) //_ ,err, ex_stat,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Edit', '`+session_username+`', `+r.Form["h_id"][0] +`, '`+r.Form["trandate"][0]+`', `+r.Form["lbr_assign"][0] +`, '`+r.Form["remarks"][0]+`'`,3) _ ,err, ex_stat,_ := config.Ap_sql(`exec DailySumHdr_Save 'Add', 'Mars', `+r.Form["h_id"][0] +` , 1, `+r.Form["branch"][0]+`, '`+r.Form["date"][0]+`', '`+r.Form["remarks"][0]+`'`,3) //exec LBR_LogHdr_Save 'Edit', 'Ian', 3, '11 Jul 2016', 1, 'logfile abc', 'device abc', 'remarks abc' if err != nil { fmt.Println(err.Error) panic(err.Error()) } //lastinsertedID, _ := res.LastInsertId() if ex_stat == true{ profile := Profile{"Data Successfully Update ",true ,""} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Data Successfully Edited") w.Header().Set("Content-Type", "application/json") w.Write(js) }else{ profile := Profile{"Oppsss something went wrong",false,""} js, err := json.Marshal(profile) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } fmt.Println("Oppsss something went wrong") w.Header().Set("Content-Type", "application/json") w.Write(js) } } } func HDeleteHandler(w http.ResponseWriter, r *http.Request) { login.Session_renew(w,r) if r.Method =="GET" { tconf := make(map[string]string) tconf["h_id"] = r.URL.Query().Get("h_id") tconf["delete_url"] = "/administrator/member_role/HDeleteHandler" tmpl := template.New("modal_delete_loghdr.html") var err error if tmpl, err = tmpl.ParseFiles("admin/member_role/modal_delete_loghdr.html"); err != nil { fmt.Println(err) } err = tmpl.Execute(w,tconf) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } }else{ //session_username,_ := login.Get_account_info(r) r.ParseForm() username, _ := login.Get_account_info(r) //_ ,err, _,_ := config.Ap_sql(`exec LBR_OTHdr_Save 'Delete', '`+session_username+`', `+r.Form["h_id"][0],3) //_ ,err, _,_ := config.Ap_sql(`Supplier_Save 'Delete','programmer',1, ` +r.Form["h_id"][0] ,3) //_ ,err, _,_ := config.Ap_sql(`SIS_ItemClass_Save 'Delete','programmer',` +r.Form["h_id"][0] ,3) arr_data := datatables.Data_row(`OrgMemRol_Save 'Delete','`+username+`',` +r.Form["h_id"][0] ) /* fmt.Println(arr_data) tconf["item_id"] = arr_data[0] tconf["item_org"] = arr_data[1] tconf["item_name"] = arr_data[2] tconf["item_parent"] = arr_data[3] tconf["item_parent_name"] = arr_data[4] tconf["item_remarks"] = arr_data[5] */ js, err := json.Marshal(arr_data) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json") w.Write(js) /*if err != nil { fmt.Println(err.Error) http.Error(w, err.Error(), http.StatusInternalServerError) panic(err.Error()) }*/ } }
{ r.ParseForm() item_id := r.Form["item_id"][0] username, _ := login.Get_account_info(r) Org_id :=login.Get_session_org_id(r) str_OrgID :=strconv.Itoa(Org_id) var returnData[] string for key ,_ := range r.Form["tag"] { tag := r.Form["tag"][key] value_input := r.Form["value_input"][key] remarks := r.Form["remarks"][key] if(value_input!=""){ arr_data_itemtag := datatables.Data_row(`SIS_itemtags_save 'Update', '`+username+`',`+str_OrgID+`,`+item_id+`, `+tag+`, '`+value_input+`', '`+remarks+`'` ) fmt.Println(arr_data_itemtag) returnData = append(returnData, arr_data_itemtag[0]) returnData = append(returnData, arr_data_itemtag[1]) if(strings.Contains(arr_data_itemtag[1] , `Error`)){ returnData = append(returnData, `HasError`) } } } js, err := json.Marshal(returnData) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json") w.Write(js) }
conditional_block
main.rs
extern crate byteorder; ///Used to get screen resolution. extern crate screenshot; extern crate inputbot; extern crate ron; extern crate bincode; #[macro_use] extern crate serde_derive; extern crate serde; use prelude::*; use std::{ net::{TcpStream}, io::{self,BufRead,BufReader}, fs::{File}, cmp::{Ordering}, env, ops::{self,RangeInclusive}, path::{Path}, }; use byteorder::{NetworkEndian,ByteOrder,ReadBytesExt}; use inputbot::{MouseCursor}; use rect::*; use network::{Remote}; use absm::{AbsmSession,ServerInfo}; mod prelude { pub use std::error::Error as ErrorTrait; pub type Error = Box<ErrorTrait>; pub type Result<T> = ::std::result::Result<T,Error>; pub use std::{ io::{Write,Read}, fmt,mem, }; pub use serde::{Serialize,Deserialize}; pub use network::{Connection,LocalBuffer,NetBuffer}; pub enum Never {} impl Never { fn as_never(&self)->! {unsafe{::std::hint::unreachable_unchecked()}} } impl fmt::Display for Never { fn fmt(&self,_: &mut fmt::Formatter)->fmt::Result {self.as_never()} } impl fmt::Debug for Never { fn fmt(&self,_: &mut fmt::Formatter)->fmt::Result {self.as_never()} } impl ErrorTrait for Never {} } #[macro_use] mod rect; mod network; mod absm; pub struct Setup { ///Map from input device coordinates to output client coordinates. pub mapping: Mapping, ///Specify a minimum and a maximum on the final client coordinates. pub clip: Rect<i32>, ///Specify a range of pressures. ///Events with a pressure outside this range are ignored. pub pressure: [f32; 2], ///Specify a range of sizes, similarly to `pressure`. pub size: [f32; 2], } impl Setup { fn new(info: &ServerInfo,config: &Config)->Setup { //Target area is set immutably by the config let target=config.target; //Start off with source area as the entire device screen //Source area is more mutable than target area let mut source=Rect{min: Pair([0.0; 2]),max: info.server_screen_res}; println!("device screen area: {}",source); //Correct any device rotations if config.correct_device_orientation { if source.aspect()!=target.aspect() { //Source screen should be rotated 90ยฐ counterclockwise to correct orientation source.rotate_negative(); println!("rotated 90ยฐ counterclockwise to correct device orientation"); }else{ println!("device orientation is aligned with client orientation"); } }else{ println!("device orientation correction is disabled"); } //Apply config device source area proportions let mut source=Rect{ min: source.map(|int| int as f32).denormalizer().apply(config.source.min), max: source.map(|int| int as f32).denormalizer().apply(config.source.max), }; //Correct orientation if source and target don't have matching aspects if config.correct_orientation { if source.aspect()!=target.aspect() { source.rotate_negative(); println!("rotated 90ยฐ counterclockwise to correct orientation mismatch"); }else{ println!("final orientation matches target orientation"); } }else{ println!("final orientation correction is disabled"); } //Shrink a source axis to match target aspect ratio if config.keep_aspect_ratio { let shrink=|source: &mut Rect<f32>,shrink_axis: Axis| { let fixed_axis=shrink_axis.swap(); //Get the target size of the shrink axis let target=target.virtual_size(shrink_axis) as f32*source.virtual_size(fixed_axis) / target.virtual_size(fixed_axis) as f32; source.resize_virtual_axis(shrink_axis,target); }; match target.map(|int| int as f32).aspect_ratio().partial_cmp(&source.aspect_ratio()).unwrap() { Ordering::Greater=>{ //Shrink vertically to match aspect ratio let old=source.virtual_size(Axis::Y); shrink(&mut source,Axis::Y); println!( "shrank source area vertically from {} to {} to match target aspect ratio", old,source.virtual_size(Axis::Y) ); }, Ordering::Less=>{ //Shrink horizontally to match aspect ratio let old=source.virtual_size(Axis::X); shrink(&mut source,Axis::X); println!( "shrank source area horizontally from {} to {} to match target aspect ratio", old,source.virtual_size(Axis::X) ); }, Ordering::Equal=>{ println!("source aspect ratio matches target aspect ratio"); }, } }else{ println!("aspect ratio correction is disabled"); } println!("mapping source area {} to target area {}",source,target); let pressure=[ config.pressure_range[0].unwrap_or(-std::f32::INFINITY), config.pressure_range[1].unwrap_or(std::f32::INFINITY), ]; let size=[ config.size_range[0].unwrap_or(-std::f32::INFINITY), config.size_range[1].unwrap_or(std::f32::INFINITY), ]; println!("clipping target to {}",config.clip); println!("only allowing touches with pressures inside {:?} and sizes inside {:?}",pressure,size); Setup{ mapping: source.normalizer().chain(&target.map(|int| int as f32).denormalizer()), clip: config.clip, pressure,size, } } fn consume(&mut self,ev: MouseMove) { if ev.pressure<self.pressure[0] || ev.pressure>self.pressure[1] {return} if ev.size<self.size[0] || ev.size>self.size[1] {return} let pos=self.mapping.apply(ev.pos); let adjusted=pair!(i=> (pos[i] as i32).max(self.clip.min[i]).min(self.clip.max[i])); MouseCursor.move_abs(adjusted[Axis::X],adjusted[Axis::Y]); } } #[derive(Deserialize,Serialize)] #[serde(default)] pub struct Config { ///The target area to be mapped, in screen pixels. pub target: Rect<i32>, ///The source area to be mapped, in normalized coordinates from `0.0` to `1.0`. pub source: Rect<f32>, ///After all transformations, clip mouse positions to this rectangle. pub clip: Rect<i32>, ///If the device screen is rotated, rotate it back to compensate. pub correct_device_orientation: bool, ///If after all transformations the source area is rotated, rotate it back to match target ///orientation (landscape or portrait). pub correct_orientation: bool, ///If the source area does not have the same aspect ratio as the target area, shrink it a bit ///in a single axis to fit. pub keep_aspect_ratio: bool, ///Only allow touches within this pressure range to go through. pub pressure_range: [Option<f32>; 2], ///Only allow touches within this size range to go through. pub size_range: [Option<f32>; 2], ///Connect to this remote. pub remote: Remote, ///When ADB port forwarding, map this port on the device. pub android_usb_port: u16,
impl Default for Config { fn default()->Config { let screen_res=get_screen_resolution(); Config{ target: Rect{min: pair!(_=>0),max: screen_res}, source: Rect{min: pair!(_=>0.05),max: pair!(_=>0.95)}, clip: Rect{min: pair!(_=>0),max: screen_res}, correct_device_orientation: true, correct_orientation: true, keep_aspect_ratio: true, pressure_range: [None; 2], size_range: [None; 2], remote: Remote::Tcp("localhost".into(),8517), android_usb_port: 8517, android_attempt_usb_connection: true, } } } impl Config { fn load_path(cfg_path: &str)->Config { println!("loading config file at '{}'",cfg_path); match File::open(&cfg_path) { Err(err)=>{ println!("failed to open config at '{}', using defaults:\n {}",cfg_path,err); let config=Config::default(); match File::create(&cfg_path) { Err(err)=>{ println!("failed to create config file on '{}':\n {}",cfg_path,err); }, Ok(mut file)=>{ let cfg=ron::ser::to_string_pretty(&config,Default::default()).expect("error serializing default config"); file.write_all(cfg.as_bytes()).expect("failed to write config file"); println!("created default config file on '{}'",cfg_path); }, } config }, Ok(file)=>{ let config=ron::de::from_reader(file).expect("malformed configuration file"); println!("loaded config file '{}'",cfg_path); config }, } } } #[derive(Deserialize)] struct MouseMove { pos: Pair<f32>, pressure: f32, size: f32, } fn get_screen_resolution()->Pair<i32> { let screenshot=screenshot::get_screenshot(0).expect("failed to get screen dimensions"); Pair([screenshot.width() as i32,screenshot.height() as i32]) } fn try_adb_forward<P: AsRef<Path>>(path: P,config: &Config)->Result<()> { use std::process::{Command}; let local_port=match config.remote { Remote::Tcp(_,port)=>port, _ => { println!("not connecting through tcp, skipping adb port forwarding"); return Ok(()) }, }; println!("attempting to adb port forward using executable on '{}'",path.as_ref().display()); let out=Command::new(path.as_ref()) .arg("forward") .arg(format!("tcp:{}",local_port)) .arg(format!("tcp:{}",config.android_usb_port)) .output(); match out { Ok(out)=>{ if out.status.success() { println!(" adb exit code indicates success"); Ok(()) }else{ println!(" adb exited with error exit code {:?}",out.status.code()); let lines=|out| for line in String::from_utf8_lossy(out).trim().lines() { println!(" {}",line.trim()); }; println!(" adb output:"); lines(&out.stdout); println!(" adb error output:"); lines(&out.stderr); println!(" device might be disconnected or usb debugging disabled"); Err("error exit code".into()) } }, Err(err)=>{ println!( " failed to run command: {}", err ); Err("failed to run command".into()) }, } } fn main() { //Parse arguments let exec_path; let cfg_path; { let mut args=env::args(); exec_path=args.next().expect("first argument should always be executable path!"); cfg_path=args.next().unwrap_or_else(|| String::from("config.txt")); } //Load configuration let config=Config::load_path(&cfg_path); //Try port forwarding using adb if config.android_attempt_usb_connection { let ok=try_adb_forward(&Path::new(&exec_path).with_file_name("adb"),&config) .or_else(|_err| try_adb_forward("adb",&config)); match ok { Ok(())=>println!( "opened communication tunnel to android device" ), Err(_err)=>println!( "failed to open communication to android device, is USB Debugging enabled?" ), } }else{ println!("usb android device connection is disabled"); } let mut session=AbsmSession::new(config); loop { session.wait_for_event(); } /* //Create tcp stream to device //Tcp is used instead of udp because adb can only forward tcp ports println!("connecting to device at {}:{}...",config.host,config.port); let mut conn=TcpStream::connect((&*config.host,config.port)).expect("failed to connect to server"); conn.set_nodelay(true).expect("failed to enable nodelay"); conn.set_read_timeout(None).expect("failed to set timeout"); conn.set_nonblocking(false).expect("failed to set nonblocking"); println!("connected"); let mut bincode_cfg=bincode::config(); bincode_cfg.big_endian(); loop { let mut msg_type=[0; 4]; conn.read_exact(&mut msg_type).expect("failed to receive message"); match &msg_type { b""=>{ //Mousemove message let mut data=[0; 16]; conn.read_exact(&mut data).expect("failed to read message data"); let mousemove=bincode_cfg.deserialize(&data).expect("malformed mousemove message"); if let Some(ref mut setup) = setup { setup.consume(mousemove); }else{ println!("failed to process mousemove, not setup yet!"); } }, [0xAD]=>{ //Setup message let mut data=[0; 8]; conn.read_exact(&mut data).expect("failed to read setup data"); let info=bincode_cfg.deserialize(&data).expect("malformed setup message"); setup=Some(Setup::new(info,&config)); }, [ty]=>println!("invalid message type {:x}",ty), } } */ }
///Whether to attempt to do ADB port forwarding automatically. ///The android device needs to have `USB Debugging` enabled. pub android_attempt_usb_connection: bool, }
random_line_split
main.rs
extern crate byteorder; ///Used to get screen resolution. extern crate screenshot; extern crate inputbot; extern crate ron; extern crate bincode; #[macro_use] extern crate serde_derive; extern crate serde; use prelude::*; use std::{ net::{TcpStream}, io::{self,BufRead,BufReader}, fs::{File}, cmp::{Ordering}, env, ops::{self,RangeInclusive}, path::{Path}, }; use byteorder::{NetworkEndian,ByteOrder,ReadBytesExt}; use inputbot::{MouseCursor}; use rect::*; use network::{Remote}; use absm::{AbsmSession,ServerInfo}; mod prelude { pub use std::error::Error as ErrorTrait; pub type Error = Box<ErrorTrait>; pub type Result<T> = ::std::result::Result<T,Error>; pub use std::{ io::{Write,Read}, fmt,mem, }; pub use serde::{Serialize,Deserialize}; pub use network::{Connection,LocalBuffer,NetBuffer}; pub enum Never {} impl Never { fn as_never(&self)->! {unsafe{::std::hint::unreachable_unchecked()}} } impl fmt::Display for Never { fn fmt(&self,_: &mut fmt::Formatter)->fmt::Result {self.as_never()} } impl fmt::Debug for Never { fn fmt(&self,_: &mut fmt::Formatter)->fmt::Result {self.as_never()} } impl ErrorTrait for Never {} } #[macro_use] mod rect; mod network; mod absm; pub struct Setup { ///Map from input device coordinates to output client coordinates. pub mapping: Mapping, ///Specify a minimum and a maximum on the final client coordinates. pub clip: Rect<i32>, ///Specify a range of pressures. ///Events with a pressure outside this range are ignored. pub pressure: [f32; 2], ///Specify a range of sizes, similarly to `pressure`. pub size: [f32; 2], } impl Setup { fn new(info: &ServerInfo,config: &Config)->Setup { //Target area is set immutably by the config let target=config.target; //Start off with source area as the entire device screen //Source area is more mutable than target area let mut source=Rect{min: Pair([0.0; 2]),max: info.server_screen_res}; println!("device screen area: {}",source); //Correct any device rotations if config.correct_device_orientation { if source.aspect()!=target.aspect() { //Source screen should be rotated 90ยฐ counterclockwise to correct orientation source.rotate_negative(); println!("rotated 90ยฐ counterclockwise to correct device orientation"); }else{ println!("device orientation is aligned with client orientation"); } }else{ println!("device orientation correction is disabled"); } //Apply config device source area proportions let mut source=Rect{ min: source.map(|int| int as f32).denormalizer().apply(config.source.min), max: source.map(|int| int as f32).denormalizer().apply(config.source.max), }; //Correct orientation if source and target don't have matching aspects if config.correct_orientation { if source.aspect()!=target.aspect() { source.rotate_negative(); println!("rotated 90ยฐ counterclockwise to correct orientation mismatch"); }else{ println!("final orientation matches target orientation"); } }else{ println!("final orientation correction is disabled"); } //Shrink a source axis to match target aspect ratio if config.keep_aspect_ratio { let shrink=|source: &mut Rect<f32>,shrink_axis: Axis| { let fixed_axis=shrink_axis.swap(); //Get the target size of the shrink axis let target=target.virtual_size(shrink_axis) as f32*source.virtual_size(fixed_axis) / target.virtual_size(fixed_axis) as f32; source.resize_virtual_axis(shrink_axis,target); }; match target.map(|int| int as f32).aspect_ratio().partial_cmp(&source.aspect_ratio()).unwrap() { Ordering::Greater=>{ //Shrink vertically to match aspect ratio let old=source.virtual_size(Axis::Y); shrink(&mut source,Axis::Y); println!( "shrank source area vertically from {} to {} to match target aspect ratio", old,source.virtual_size(Axis::Y) ); }, Ordering::Less=>{ //Shrink horizontally to match aspect ratio let old=source.virtual_size(Axis::X); shrink(&mut source,Axis::X); println!( "shrank source area horizontally from {} to {} to match target aspect ratio", old,source.virtual_size(Axis::X) ); }, Ordering::Equal=>{ println!("source aspect ratio matches target aspect ratio"); }, } }else{ println!("aspect ratio correction is disabled"); } println!("mapping source area {} to target area {}",source,target); let pressure=[ config.pressure_range[0].unwrap_or(-std::f32::INFINITY), config.pressure_range[1].unwrap_or(std::f32::INFINITY), ]; let size=[ config.size_range[0].unwrap_or(-std::f32::INFINITY), config.size_range[1].unwrap_or(std::f32::INFINITY), ]; println!("clipping target to {}",config.clip); println!("only allowing touches with pressures inside {:?} and sizes inside {:?}",pressure,size); Setup{ mapping: source.normalizer().chain(&target.map(|int| int as f32).denormalizer()), clip: config.clip, pressure,size, } } fn consume(&mut self,ev: MouseMove) { if ev.pressure<self.pressure[0] || ev.pressure>self.pressure[1] {return} if ev.size<self.size[0] || ev.size>self.size[1] {return} let pos=self.mapping.apply(ev.pos); let adjusted=pair!(i=> (pos[i] as i32).max(self.clip.min[i]).min(self.clip.max[i])); MouseCursor.move_abs(adjusted[Axis::X],adjusted[Axis::Y]); } } #[derive(Deserialize,Serialize)] #[serde(default)] pub struct Config { ///The target area to be mapped, in screen pixels. pub target: Rect<i32>, ///The source area to be mapped, in normalized coordinates from `0.0` to `1.0`. pub source: Rect<f32>, ///After all transformations, clip mouse positions to this rectangle. pub clip: Rect<i32>, ///If the device screen is rotated, rotate it back to compensate. pub correct_device_orientation: bool, ///If after all transformations the source area is rotated, rotate it back to match target ///orientation (landscape or portrait). pub correct_orientation: bool, ///If the source area does not have the same aspect ratio as the target area, shrink it a bit ///in a single axis to fit. pub keep_aspect_ratio: bool, ///Only allow touches within this pressure range to go through. pub pressure_range: [Option<f32>; 2], ///Only allow touches within this size range to go through. pub size_range: [Option<f32>; 2], ///Connect to this remote. pub remote: Remote, ///When ADB port forwarding, map this port on the device. pub android_usb_port: u16, ///Whether to attempt to do ADB port forwarding automatically. ///The android device needs to have `USB Debugging` enabled. pub android_attempt_usb_connection: bool, } impl Default for Config { fn default()->Config { let screen_res=get_screen_resolution(); Config{ target: Rect{min: pair!(_=>0),max: screen_res}, source: Rect{min: pair!(_=>0.05),max: pair!(_=>0.95)}, clip: Rect{min: pair!(_=>0),max: screen_res}, correct_device_orientation: true, correct_orientation: true, keep_aspect_ratio: true, pressure_range: [None; 2], size_range: [None; 2], remote: Remote::Tcp("localhost".into(),8517), android_usb_port: 8517, android_attempt_usb_connection: true, } } } impl Config { fn load_path(cfg_path: &str)->Config { println!("loading config file at '{}'",cfg_path); match File::open(&cfg_path) { Err(err)=>{ println!("failed to open config at '{}', using defaults:\n {}",cfg_path,err); let config=Config::default(); match File::create(&cfg_path) { Err(err)=>{ println!("failed to create config file on '{}':\n {}",cfg_path,err); }, Ok(mut file)=>{ let cfg=ron::ser::to_string_pretty(&config,Default::default()).expect("error serializing default config"); file.write_all(cfg.as_bytes()).expect("failed to write config file"); println!("created default config file on '{}'",cfg_path); }, } config }, Ok(file)=>{ let config=ron::de::from_reader(file).expect("malformed configuration file"); println!("loaded config file '{}'",cfg_path); config }, } } } #[derive(Deserialize)] struct MouseMove { pos: Pair<f32>, pressure: f32, size: f32, } fn get_screen_resolution()->Pair<i32> { let screenshot=screenshot::get_screenshot(0).expect("failed to get screen dimensions"); Pair([screenshot.width() as i32,screenshot.height() as i32]) } fn try_adb_forward<P: AsRef<Path>>(path: P,config: &Config)->Result<()> { use std::process::{Command}; let local_port=match config.remote { Remote::Tcp(_,port)=>port, _ => { println!("not connecting through tcp, skipping adb port forwarding"); return Ok(()) }, }; println!("attempting to adb port forward using executable on '{}'",path.as_ref().display()); let out=Command::new(path.as_ref()) .arg("forward") .arg(format!("tcp:{}",local_port)) .arg(format!("tcp:{}",config.android_usb_port)) .output(); match out { Ok(out)=>{ if out.status.success() { println!(" adb exit code indicates success"); Ok(()) }else{ println!(" adb exited with error exit code {:?}",out.status.code()); let lines=|out| for line in String::from_utf8_lossy(out).trim().lines() { println!(" {}",line.trim()); }; println!(" adb output:"); lines(&out.stdout); println!(" adb error output:"); lines(&out.stderr); println!(" device might be disconnected or usb debugging disabled"); Err("error exit code".into()) } }, Err(err)=>{ println!( " failed to run command: {}", err ); Err("failed to run command".into()) }, } } fn main() {
//Parse arguments let exec_path; let cfg_path; { let mut args=env::args(); exec_path=args.next().expect("first argument should always be executable path!"); cfg_path=args.next().unwrap_or_else(|| String::from("config.txt")); } //Load configuration let config=Config::load_path(&cfg_path); //Try port forwarding using adb if config.android_attempt_usb_connection { let ok=try_adb_forward(&Path::new(&exec_path).with_file_name("adb"),&config) .or_else(|_err| try_adb_forward("adb",&config)); match ok { Ok(())=>println!( "opened communication tunnel to android device" ), Err(_err)=>println!( "failed to open communication to android device, is USB Debugging enabled?" ), } }else{ println!("usb android device connection is disabled"); } let mut session=AbsmSession::new(config); loop { session.wait_for_event(); } /* //Create tcp stream to device //Tcp is used instead of udp because adb can only forward tcp ports println!("connecting to device at {}:{}...",config.host,config.port); let mut conn=TcpStream::connect((&*config.host,config.port)).expect("failed to connect to server"); conn.set_nodelay(true).expect("failed to enable nodelay"); conn.set_read_timeout(None).expect("failed to set timeout"); conn.set_nonblocking(false).expect("failed to set nonblocking"); println!("connected"); let mut bincode_cfg=bincode::config(); bincode_cfg.big_endian(); loop { let mut msg_type=[0; 4]; conn.read_exact(&mut msg_type).expect("failed to receive message"); match &msg_type { b""=>{ //Mousemove message let mut data=[0; 16]; conn.read_exact(&mut data).expect("failed to read message data"); let mousemove=bincode_cfg.deserialize(&data).expect("malformed mousemove message"); if let Some(ref mut setup) = setup { setup.consume(mousemove); }else{ println!("failed to process mousemove, not setup yet!"); } }, [0xAD]=>{ //Setup message let mut data=[0; 8]; conn.read_exact(&mut data).expect("failed to read setup data"); let info=bincode_cfg.deserialize(&data).expect("malformed setup message"); setup=Some(Setup::new(info,&config)); }, [ty]=>println!("invalid message type {:x}",ty), } } */ }
identifier_body
main.rs
extern crate byteorder; ///Used to get screen resolution. extern crate screenshot; extern crate inputbot; extern crate ron; extern crate bincode; #[macro_use] extern crate serde_derive; extern crate serde; use prelude::*; use std::{ net::{TcpStream}, io::{self,BufRead,BufReader}, fs::{File}, cmp::{Ordering}, env, ops::{self,RangeInclusive}, path::{Path}, }; use byteorder::{NetworkEndian,ByteOrder,ReadBytesExt}; use inputbot::{MouseCursor}; use rect::*; use network::{Remote}; use absm::{AbsmSession,ServerInfo}; mod prelude { pub use std::error::Error as ErrorTrait; pub type Error = Box<ErrorTrait>; pub type Result<T> = ::std::result::Result<T,Error>; pub use std::{ io::{Write,Read}, fmt,mem, }; pub use serde::{Serialize,Deserialize}; pub use network::{Connection,LocalBuffer,NetBuffer}; pub enum Never {} impl Never { fn as_never(&self)->! {unsafe{::std::hint::unreachable_unchecked()}} } impl fmt::Display for Never { fn fmt(&self,_: &mut fmt::Formatter)->fmt::Result {self.as_never()} } impl fmt::Debug for Never { fn fmt(&self,_: &mut fmt::Formatter)->fmt::Result {self.as_never()} } impl ErrorTrait for Never {} } #[macro_use] mod rect; mod network; mod absm; pub struct Setup { ///Map from input device coordinates to output client coordinates. pub mapping: Mapping, ///Specify a minimum and a maximum on the final client coordinates. pub clip: Rect<i32>, ///Specify a range of pressures. ///Events with a pressure outside this range are ignored. pub pressure: [f32; 2], ///Specify a range of sizes, similarly to `pressure`. pub size: [f32; 2], } impl Setup { fn new(info: &ServerInfo,config: &Config)->Setup { //Target area is set immutably by the config let target=config.target; //Start off with source area as the entire device screen //Source area is more mutable than target area let mut source=Rect{min: Pair([0.0; 2]),max: info.server_screen_res}; println!("device screen area: {}",source); //Correct any device rotations if config.correct_device_orientation { if source.aspect()!=target.aspect() { //Source screen should be rotated 90ยฐ counterclockwise to correct orientation source.rotate_negative(); println!("rotated 90ยฐ counterclockwise to correct device orientation"); }else{ println!("device orientation is aligned with client orientation"); } }else{ println!("device orientation correction is disabled"); } //Apply config device source area proportions let mut source=Rect{ min: source.map(|int| int as f32).denormalizer().apply(config.source.min), max: source.map(|int| int as f32).denormalizer().apply(config.source.max), }; //Correct orientation if source and target don't have matching aspects if config.correct_orientation { if source.aspect()!=target.aspect() { source.rotate_negative(); println!("rotated 90ยฐ counterclockwise to correct orientation mismatch"); }else{ println!("final orientation matches target orientation"); } }else{ println!("final orientation correction is disabled"); } //Shrink a source axis to match target aspect ratio if config.keep_aspect_ratio { let shrink=|source: &mut Rect<f32>,shrink_axis: Axis| { let fixed_axis=shrink_axis.swap(); //Get the target size of the shrink axis let target=target.virtual_size(shrink_axis) as f32*source.virtual_size(fixed_axis) / target.virtual_size(fixed_axis) as f32; source.resize_virtual_axis(shrink_axis,target); }; match target.map(|int| int as f32).aspect_ratio().partial_cmp(&source.aspect_ratio()).unwrap() { Ordering::Greater=>{ //Shrink vertically to match aspect ratio let old=source.virtual_size(Axis::Y); shrink(&mut source,Axis::Y); println!( "shrank source area vertically from {} to {} to match target aspect ratio", old,source.virtual_size(Axis::Y) ); }, Ordering::Less=>{ //Shrink horizontally to match aspect ratio let old=source.virtual_size(Axis::X); shrink(&mut source,Axis::X); println!( "shrank source area horizontally from {} to {} to match target aspect ratio", old,source.virtual_size(Axis::X) ); }, Ordering::Equal=>{ println!("source aspect ratio matches target aspect ratio"); }, } }else{ println!("aspect ratio correction is disabled"); } println!("mapping source area {} to target area {}",source,target); let pressure=[ config.pressure_range[0].unwrap_or(-std::f32::INFINITY), config.pressure_range[1].unwrap_or(std::f32::INFINITY), ]; let size=[ config.size_range[0].unwrap_or(-std::f32::INFINITY), config.size_range[1].unwrap_or(std::f32::INFINITY), ]; println!("clipping target to {}",config.clip); println!("only allowing touches with pressures inside {:?} and sizes inside {:?}",pressure,size); Setup{ mapping: source.normalizer().chain(&target.map(|int| int as f32).denormalizer()), clip: config.clip, pressure,size, } } fn consume(&mut self,ev: MouseMove) { if ev.pressure<self.pressure[0] || ev.pressure>self.pressure[1] {return} if ev.size<self.size[0] || ev.size>self.size[1] {return} let pos=self.mapping.apply(ev.pos); let adjusted=pair!(i=> (pos[i] as i32).max(self.clip.min[i]).min(self.clip.max[i])); MouseCursor.move_abs(adjusted[Axis::X],adjusted[Axis::Y]); } } #[derive(Deserialize,Serialize)] #[serde(default)] pub struct Config { ///The target area to be mapped, in screen pixels. pub target: Rect<i32>, ///The source area to be mapped, in normalized coordinates from `0.0` to `1.0`. pub source: Rect<f32>, ///After all transformations, clip mouse positions to this rectangle. pub clip: Rect<i32>, ///If the device screen is rotated, rotate it back to compensate. pub correct_device_orientation: bool, ///If after all transformations the source area is rotated, rotate it back to match target ///orientation (landscape or portrait). pub correct_orientation: bool, ///If the source area does not have the same aspect ratio as the target area, shrink it a bit ///in a single axis to fit. pub keep_aspect_ratio: bool, ///Only allow touches within this pressure range to go through. pub pressure_range: [Option<f32>; 2], ///Only allow touches within this size range to go through. pub size_range: [Option<f32>; 2], ///Connect to this remote. pub remote: Remote, ///When ADB port forwarding, map this port on the device. pub android_usb_port: u16, ///Whether to attempt to do ADB port forwarding automatically. ///The android device needs to have `USB Debugging` enabled. pub android_attempt_usb_connection: bool, } impl Default for Config { fn default()->Config { let screen_res=get_screen_resolution(); Config{ target: Rect{min: pair!(_=>0),max: screen_res}, source: Rect{min: pair!(_=>0.05),max: pair!(_=>0.95)}, clip: Rect{min: pair!(_=>0),max: screen_res}, correct_device_orientation: true, correct_orientation: true, keep_aspect_ratio: true, pressure_range: [None; 2], size_range: [None; 2], remote: Remote::Tcp("localhost".into(),8517), android_usb_port: 8517, android_attempt_usb_connection: true, } } } impl Config { fn load_path(cfg_path: &str)->Config { println!("loading config file at '{}'",cfg_path); match File::open(&cfg_path) { Err(err)=>{ println!("failed to open config at '{}', using defaults:\n {}",cfg_path,err); let config=Config::default(); match File::create(&cfg_path) { Err(err)=>{ println!("failed to create config file on '{}':\n {}",cfg_path,err); }, Ok(mut file)=>{ let cfg=ron::ser::to_string_pretty(&config,Default::default()).expect("error serializing default config"); file.write_all(cfg.as_bytes()).expect("failed to write config file"); println!("created default config file on '{}'",cfg_path); }, } config }, Ok(file)=>{ let config=ron::de::from_reader(file).expect("malformed configuration file"); println!("loaded config file '{}'",cfg_path); config }, } } } #[derive(Deserialize)] struct MouseMove { pos: Pair<f32>, pressure: f32, size: f32, } fn get_screen_resolution()->Pair<i32> { let screenshot=screenshot::get_screenshot(0).expect("failed to get screen dimensions"); Pair([screenshot.width() as i32,screenshot.height() as i32]) } fn try
AsRef<Path>>(path: P,config: &Config)->Result<()> { use std::process::{Command}; let local_port=match config.remote { Remote::Tcp(_,port)=>port, _ => { println!("not connecting through tcp, skipping adb port forwarding"); return Ok(()) }, }; println!("attempting to adb port forward using executable on '{}'",path.as_ref().display()); let out=Command::new(path.as_ref()) .arg("forward") .arg(format!("tcp:{}",local_port)) .arg(format!("tcp:{}",config.android_usb_port)) .output(); match out { Ok(out)=>{ if out.status.success() { println!(" adb exit code indicates success"); Ok(()) }else{ println!(" adb exited with error exit code {:?}",out.status.code()); let lines=|out| for line in String::from_utf8_lossy(out).trim().lines() { println!(" {}",line.trim()); }; println!(" adb output:"); lines(&out.stdout); println!(" adb error output:"); lines(&out.stderr); println!(" device might be disconnected or usb debugging disabled"); Err("error exit code".into()) } }, Err(err)=>{ println!( " failed to run command: {}", err ); Err("failed to run command".into()) }, } } fn main() { //Parse arguments let exec_path; let cfg_path; { let mut args=env::args(); exec_path=args.next().expect("first argument should always be executable path!"); cfg_path=args.next().unwrap_or_else(|| String::from("config.txt")); } //Load configuration let config=Config::load_path(&cfg_path); //Try port forwarding using adb if config.android_attempt_usb_connection { let ok=try_adb_forward(&Path::new(&exec_path).with_file_name("adb"),&config) .or_else(|_err| try_adb_forward("adb",&config)); match ok { Ok(())=>println!( "opened communication tunnel to android device" ), Err(_err)=>println!( "failed to open communication to android device, is USB Debugging enabled?" ), } }else{ println!("usb android device connection is disabled"); } let mut session=AbsmSession::new(config); loop { session.wait_for_event(); } /* //Create tcp stream to device //Tcp is used instead of udp because adb can only forward tcp ports println!("connecting to device at {}:{}...",config.host,config.port); let mut conn=TcpStream::connect((&*config.host,config.port)).expect("failed to connect to server"); conn.set_nodelay(true).expect("failed to enable nodelay"); conn.set_read_timeout(None).expect("failed to set timeout"); conn.set_nonblocking(false).expect("failed to set nonblocking"); println!("connected"); let mut bincode_cfg=bincode::config(); bincode_cfg.big_endian(); loop { let mut msg_type=[0; 4]; conn.read_exact(&mut msg_type).expect("failed to receive message"); match &msg_type { b""=>{ //Mousemove message let mut data=[0; 16]; conn.read_exact(&mut data).expect("failed to read message data"); let mousemove=bincode_cfg.deserialize(&data).expect("malformed mousemove message"); if let Some(ref mut setup) = setup { setup.consume(mousemove); }else{ println!("failed to process mousemove, not setup yet!"); } }, [0xAD]=>{ //Setup message let mut data=[0; 8]; conn.read_exact(&mut data).expect("failed to read setup data"); let info=bincode_cfg.deserialize(&data).expect("malformed setup message"); setup=Some(Setup::new(info,&config)); }, [ty]=>println!("invalid message type {:x}",ty), } } */ }
_adb_forward<P:
identifier_name
offset-monitor.rs
extern crate kafka; extern crate getopts; extern crate env_logger; extern crate time; #[macro_use] extern crate error_chain; use std::ascii::AsciiExt; use std::cmp; use std::env; use std::io::{self, stdout, stderr, BufWriter, Write}; use std::process; use std::thread; use std::time as stdtime; use kafka::client::{KafkaClient, FetchOffset, GroupOffsetStorage}; /// A very simple offset monitor for a particular topic able to show /// the lag for a particular consumer group. Dumps the offset/lag of /// the monitored topic/group to stdout every few seconds. fn main() { env_logger::init().unwrap(); macro_rules! abort { ($e:expr) => {{ let mut out = stderr(); let _ = write!(out, "error: {}\n", $e); let _ = out.flush(); process::exit(1); }} }; let cfg = match Config::from_cmdline() { Ok(cfg) => cfg, Err(e) => abort!(e), }; if let Err(e) = run(cfg) { abort!(e); } } fn run(cfg: Config) -> Result<()> { let mut client = KafkaClient::new(cfg.brokers.clone()); client.set_group_offset_storage(cfg.offset_storage); try!(client.load_metadata_all()); // ~ if no topic specified, print all available and be done. if cfg.topic.is_empty() { let ts = client.topics(); let num_topics = ts.len(); if num_topics == 0 { bail!("no topics available"); } let mut names: Vec<&str> = Vec::with_capacity(ts.len()); names.extend(ts.names()); names.sort(); let mut buf = BufWriter::with_capacity(1024, stdout()); for name in names { let _ = write!(buf, "topic: {}\n", name); } bail!("choose a topic"); } // ~ otherwise let's loop over the topic partition offsets let num_partitions = match client.topics().partitions(&cfg.topic) { None => bail!(format!("no such topic: {}", &cfg.topic)), Some(partitions) => partitions.len(), }; let mut state = State::new(num_partitions, cfg.commited_not_consumed); let mut printer = Printer::new(stdout(), &cfg); try!(printer.print_head(num_partitions)); // ~ initialize the state let mut first_time = true; loop { let t = time::now(); try!(state.update_partitions(&mut client, &cfg.topic, &cfg.group)); if first_time { state.curr_to_prev(); first_time = false; } try!(printer.print_offsets(&t, &state.offsets)); thread::sleep(cfg.period); } } #[derive(Copy, Clone)] struct Partition { prev_latest: i64, curr_latest: i64, curr_lag: i64, } impl Default for Partition { fn default() -> Self { Partition { prev_latest: -1, curr_latest: -1, curr_lag: -1, } } } struct State { offsets: Vec<Partition>, lag_decr: i64, } impl State { fn new(num_partitions: usize, commited_not_consumed: bool) -> State { State { offsets: vec![Default::default(); num_partitions], lag_decr: if commited_not_consumed { 0 } else { 1 }, } } fn update_partitions( &mut self, client: &mut KafkaClient, topic: &str, group: &str, ) -> Result<()> { // ~ get the latest topic offsets let latests = try!(client.fetch_topic_offsets(topic, FetchOffset::Latest)); for l in latests { let off = self.offsets.get_mut(l.partition as usize).expect( "[topic offset] non-existent partition", ); off.prev_latest = off.curr_latest; off.curr_latest = l.offset; } if !group.is_empty() { // ~ get the current group offsets let groups = try!(client.fetch_group_topic_offsets(group, topic)); for g in groups { let off = self.offsets.get_mut(g.partition as usize).expect( "[group offset] non-existent partition", ); // ~ it's quite likely that we fetched group offsets // which are a bit ahead of the topic's latest offset // since we issued the fetch-latest-offset request // earlier than the request for the group offsets off.curr_lag = cmp::max(0, off.curr_latest - g.offset - self.lag_decr); } } Ok(()) } fn curr_to_prev(&mut self) { for o in &mut self.offsets { o.prev_latest = o.curr_latest; } } } struct Printer<W> { out: W, timefmt: String, fmt_buf: String, out_buf: String, time_width: usize, offset_width: usize, diff_width: usize, lag_width: usize, print_diff: bool, print_lag: bool, print_summary: bool, } impl<W: Write> Printer<W> { fn new(out: W, cfg: &Config) -> Printer<W> { Printer { out: out, timefmt: "%H:%M:%S".into(), fmt_buf: String::with_capacity(30), out_buf: String::with_capacity(160), time_width: 10, offset_width: 11, diff_width: 8, lag_width: 6, print_diff: cfg.diff, print_lag: !cfg.group.is_empty(), print_summary: cfg.summary, } } fn print_head(&mut self, num_partitions: usize) -> Result<()> { self.out_buf.clear(); { // ~ format use std::fmt::Write; let _ = write!(self.out_buf, "{1:<0$}", self.time_width, "time"); if self.print_summary { let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, "topic"); if self.print_diff { let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, "growth"); } if self.print_lag { let _ = write!(self.out_buf, " {1:0$}", self.lag_width, "(lag)"); } } else { for i in 0..num_partitions { self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "p-{}", i); let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, self.fmt_buf); if self.print_diff { let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, "growth"); } if self.print_lag { let _ = write!(self.out_buf, " {1:0$}", self.lag_width, "(lag)"); } } } self.out_buf.push('\n'); } { // ~ print try!(self.out.write_all(self.out_buf.as_bytes())); Ok(()) } } fn print_offsets(&mut self, time: &time::Tm, partitions: &[Partition]) -> Result<()> { self.out_buf.clear(); { // ~ format use std::fmt::Write; self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "{}", time.strftime(&self.timefmt).expect("invalid timefmt")); let _ = write!(self.out_buf, "{1:<0$}", self.time_width, self.fmt_buf); if self.print_summary { let mut prev_latest = 0; let mut curr_latest = 0; let mut curr_lag = 0; for p in partitions { macro_rules! cond_add { ($v:ident) => { if $v != -1 { if p.$v < 0 { $v = -1; } else { $v += p.$v; } } } }; cond_add!(prev_latest); cond_add!(curr_latest); cond_add!(curr_lag); } let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, curr_latest); if self.print_diff { self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "{:+}", curr_latest - prev_latest); let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, self.fmt_buf); } if self.print_lag {
let _ = write!(self.fmt_buf, "({})", curr_lag); let _ = write!(self.out_buf, " {1:<0$}", self.lag_width, self.fmt_buf); } } else { for p in partitions { let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, p.curr_latest); if self.print_diff { self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "{:+}", p.curr_latest - p.prev_latest); let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, self.fmt_buf); } if self.print_lag { self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "({})", p.curr_lag); let _ = write!(self.out_buf, " {1:<0$}", self.lag_width, self.fmt_buf); } } } } self.out_buf.push('\n'); try!(self.out.write_all(self.out_buf.as_bytes())); Ok(()) } } // -------------------------------------------------------------------- struct Config { brokers: Vec<String>, topic: String, group: String, offset_storage: GroupOffsetStorage, period: stdtime::Duration, commited_not_consumed: bool, summary: bool, diff: bool, } impl Config { fn from_cmdline() -> Result<Config> { let args: Vec<String> = env::args().collect(); let mut opts = getopts::Options::new(); opts.optflag("h", "help", "Print this help screen"); opts.optopt("", "brokers", "Specify kafka bootstrap brokers (comma separated)", "HOSTS"); opts.optopt("", "topic", "Specify the topic to monitor", "TOPIC"); opts.optopt("", "group", "Specify the group to monitor", "GROUP"); opts.optopt("", "storage", "Specify offset store [zookeeper, kafka]", "STORE"); opts.optopt("", "sleep", "Specify the sleep time", "SECS"); opts.optflag("", "partitions", "Print each partition instead of the summary"); opts.optflag("", "no-growth", "Don't print offset growth"); opts.optflag( "", "committed-not-yet-consumed", "Assume commited group offsets specify \ messages the group will start consuming \ (including those at these offsets)", ); let m = match opts.parse(&args[1..]) { Ok(m) => m, Err(e) => bail!(e), }; if m.opt_present("help") { let brief = format!("{} [options]", args[0]); bail!(opts.usage(&brief)); } let mut offset_storage = GroupOffsetStorage::Zookeeper; if let Some(s) = m.opt_str("storage") { if s.eq_ignore_ascii_case("zookeeper") { offset_storage = GroupOffsetStorage::Zookeeper; } else if s.eq_ignore_ascii_case("kafka") { offset_storage = GroupOffsetStorage::Kafka; } else { bail!(format!("unknown offset store: {}", s)); } } let mut period = stdtime::Duration::from_secs(5); if let Some(s) = m.opt_str("sleep") { match s.parse::<u64>() { Ok(n) if n != 0 => period = stdtime::Duration::from_secs(n), _ => bail!(format!("not a number greater than zero: {}", s)), } } Ok(Config { brokers: m.opt_str("brokers") .unwrap_or_else(|| "localhost:9092".to_owned()) .split(',') .map(|s| s.trim().to_owned()) .collect(), topic: m.opt_str("topic").unwrap_or_else(|| String::new()), group: m.opt_str("group").unwrap_or_else(|| String::new()), offset_storage: offset_storage, period: period, commited_not_consumed: m.opt_present("committed-not-yet-consumed"), summary: !m.opt_present("partitions"), diff: !m.opt_present("no-growth"), }) } } // -------------------------------------------------------------------- error_chain! { foreign_links { Kafka(kafka::error::Error); Io(io::Error); Opt(getopts::Fail); } }
self.fmt_buf.clear();
random_line_split
offset-monitor.rs
extern crate kafka; extern crate getopts; extern crate env_logger; extern crate time; #[macro_use] extern crate error_chain; use std::ascii::AsciiExt; use std::cmp; use std::env; use std::io::{self, stdout, stderr, BufWriter, Write}; use std::process; use std::thread; use std::time as stdtime; use kafka::client::{KafkaClient, FetchOffset, GroupOffsetStorage}; /// A very simple offset monitor for a particular topic able to show /// the lag for a particular consumer group. Dumps the offset/lag of /// the monitored topic/group to stdout every few seconds. fn main() { env_logger::init().unwrap(); macro_rules! abort { ($e:expr) => {{ let mut out = stderr(); let _ = write!(out, "error: {}\n", $e); let _ = out.flush(); process::exit(1); }} }; let cfg = match Config::from_cmdline() { Ok(cfg) => cfg, Err(e) => abort!(e), }; if let Err(e) = run(cfg) { abort!(e); } } fn run(cfg: Config) -> Result<()> { let mut client = KafkaClient::new(cfg.brokers.clone()); client.set_group_offset_storage(cfg.offset_storage); try!(client.load_metadata_all()); // ~ if no topic specified, print all available and be done. if cfg.topic.is_empty() { let ts = client.topics(); let num_topics = ts.len(); if num_topics == 0 { bail!("no topics available"); } let mut names: Vec<&str> = Vec::with_capacity(ts.len()); names.extend(ts.names()); names.sort(); let mut buf = BufWriter::with_capacity(1024, stdout()); for name in names { let _ = write!(buf, "topic: {}\n", name); } bail!("choose a topic"); } // ~ otherwise let's loop over the topic partition offsets let num_partitions = match client.topics().partitions(&cfg.topic) { None => bail!(format!("no such topic: {}", &cfg.topic)), Some(partitions) => partitions.len(), }; let mut state = State::new(num_partitions, cfg.commited_not_consumed); let mut printer = Printer::new(stdout(), &cfg); try!(printer.print_head(num_partitions)); // ~ initialize the state let mut first_time = true; loop { let t = time::now(); try!(state.update_partitions(&mut client, &cfg.topic, &cfg.group)); if first_time { state.curr_to_prev(); first_time = false; } try!(printer.print_offsets(&t, &state.offsets)); thread::sleep(cfg.period); } } #[derive(Copy, Clone)] struct
{ prev_latest: i64, curr_latest: i64, curr_lag: i64, } impl Default for Partition { fn default() -> Self { Partition { prev_latest: -1, curr_latest: -1, curr_lag: -1, } } } struct State { offsets: Vec<Partition>, lag_decr: i64, } impl State { fn new(num_partitions: usize, commited_not_consumed: bool) -> State { State { offsets: vec![Default::default(); num_partitions], lag_decr: if commited_not_consumed { 0 } else { 1 }, } } fn update_partitions( &mut self, client: &mut KafkaClient, topic: &str, group: &str, ) -> Result<()> { // ~ get the latest topic offsets let latests = try!(client.fetch_topic_offsets(topic, FetchOffset::Latest)); for l in latests { let off = self.offsets.get_mut(l.partition as usize).expect( "[topic offset] non-existent partition", ); off.prev_latest = off.curr_latest; off.curr_latest = l.offset; } if !group.is_empty() { // ~ get the current group offsets let groups = try!(client.fetch_group_topic_offsets(group, topic)); for g in groups { let off = self.offsets.get_mut(g.partition as usize).expect( "[group offset] non-existent partition", ); // ~ it's quite likely that we fetched group offsets // which are a bit ahead of the topic's latest offset // since we issued the fetch-latest-offset request // earlier than the request for the group offsets off.curr_lag = cmp::max(0, off.curr_latest - g.offset - self.lag_decr); } } Ok(()) } fn curr_to_prev(&mut self) { for o in &mut self.offsets { o.prev_latest = o.curr_latest; } } } struct Printer<W> { out: W, timefmt: String, fmt_buf: String, out_buf: String, time_width: usize, offset_width: usize, diff_width: usize, lag_width: usize, print_diff: bool, print_lag: bool, print_summary: bool, } impl<W: Write> Printer<W> { fn new(out: W, cfg: &Config) -> Printer<W> { Printer { out: out, timefmt: "%H:%M:%S".into(), fmt_buf: String::with_capacity(30), out_buf: String::with_capacity(160), time_width: 10, offset_width: 11, diff_width: 8, lag_width: 6, print_diff: cfg.diff, print_lag: !cfg.group.is_empty(), print_summary: cfg.summary, } } fn print_head(&mut self, num_partitions: usize) -> Result<()> { self.out_buf.clear(); { // ~ format use std::fmt::Write; let _ = write!(self.out_buf, "{1:<0$}", self.time_width, "time"); if self.print_summary { let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, "topic"); if self.print_diff { let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, "growth"); } if self.print_lag { let _ = write!(self.out_buf, " {1:0$}", self.lag_width, "(lag)"); } } else { for i in 0..num_partitions { self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "p-{}", i); let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, self.fmt_buf); if self.print_diff { let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, "growth"); } if self.print_lag { let _ = write!(self.out_buf, " {1:0$}", self.lag_width, "(lag)"); } } } self.out_buf.push('\n'); } { // ~ print try!(self.out.write_all(self.out_buf.as_bytes())); Ok(()) } } fn print_offsets(&mut self, time: &time::Tm, partitions: &[Partition]) -> Result<()> { self.out_buf.clear(); { // ~ format use std::fmt::Write; self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "{}", time.strftime(&self.timefmt).expect("invalid timefmt")); let _ = write!(self.out_buf, "{1:<0$}", self.time_width, self.fmt_buf); if self.print_summary { let mut prev_latest = 0; let mut curr_latest = 0; let mut curr_lag = 0; for p in partitions { macro_rules! cond_add { ($v:ident) => { if $v != -1 { if p.$v < 0 { $v = -1; } else { $v += p.$v; } } } }; cond_add!(prev_latest); cond_add!(curr_latest); cond_add!(curr_lag); } let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, curr_latest); if self.print_diff { self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "{:+}", curr_latest - prev_latest); let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, self.fmt_buf); } if self.print_lag { self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "({})", curr_lag); let _ = write!(self.out_buf, " {1:<0$}", self.lag_width, self.fmt_buf); } } else { for p in partitions { let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, p.curr_latest); if self.print_diff { self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "{:+}", p.curr_latest - p.prev_latest); let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, self.fmt_buf); } if self.print_lag { self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "({})", p.curr_lag); let _ = write!(self.out_buf, " {1:<0$}", self.lag_width, self.fmt_buf); } } } } self.out_buf.push('\n'); try!(self.out.write_all(self.out_buf.as_bytes())); Ok(()) } } // -------------------------------------------------------------------- struct Config { brokers: Vec<String>, topic: String, group: String, offset_storage: GroupOffsetStorage, period: stdtime::Duration, commited_not_consumed: bool, summary: bool, diff: bool, } impl Config { fn from_cmdline() -> Result<Config> { let args: Vec<String> = env::args().collect(); let mut opts = getopts::Options::new(); opts.optflag("h", "help", "Print this help screen"); opts.optopt("", "brokers", "Specify kafka bootstrap brokers (comma separated)", "HOSTS"); opts.optopt("", "topic", "Specify the topic to monitor", "TOPIC"); opts.optopt("", "group", "Specify the group to monitor", "GROUP"); opts.optopt("", "storage", "Specify offset store [zookeeper, kafka]", "STORE"); opts.optopt("", "sleep", "Specify the sleep time", "SECS"); opts.optflag("", "partitions", "Print each partition instead of the summary"); opts.optflag("", "no-growth", "Don't print offset growth"); opts.optflag( "", "committed-not-yet-consumed", "Assume commited group offsets specify \ messages the group will start consuming \ (including those at these offsets)", ); let m = match opts.parse(&args[1..]) { Ok(m) => m, Err(e) => bail!(e), }; if m.opt_present("help") { let brief = format!("{} [options]", args[0]); bail!(opts.usage(&brief)); } let mut offset_storage = GroupOffsetStorage::Zookeeper; if let Some(s) = m.opt_str("storage") { if s.eq_ignore_ascii_case("zookeeper") { offset_storage = GroupOffsetStorage::Zookeeper; } else if s.eq_ignore_ascii_case("kafka") { offset_storage = GroupOffsetStorage::Kafka; } else { bail!(format!("unknown offset store: {}", s)); } } let mut period = stdtime::Duration::from_secs(5); if let Some(s) = m.opt_str("sleep") { match s.parse::<u64>() { Ok(n) if n != 0 => period = stdtime::Duration::from_secs(n), _ => bail!(format!("not a number greater than zero: {}", s)), } } Ok(Config { brokers: m.opt_str("brokers") .unwrap_or_else(|| "localhost:9092".to_owned()) .split(',') .map(|s| s.trim().to_owned()) .collect(), topic: m.opt_str("topic").unwrap_or_else(|| String::new()), group: m.opt_str("group").unwrap_or_else(|| String::new()), offset_storage: offset_storage, period: period, commited_not_consumed: m.opt_present("committed-not-yet-consumed"), summary: !m.opt_present("partitions"), diff: !m.opt_present("no-growth"), }) } } // -------------------------------------------------------------------- error_chain! { foreign_links { Kafka(kafka::error::Error); Io(io::Error); Opt(getopts::Fail); } }
Partition
identifier_name
offset-monitor.rs
extern crate kafka; extern crate getopts; extern crate env_logger; extern crate time; #[macro_use] extern crate error_chain; use std::ascii::AsciiExt; use std::cmp; use std::env; use std::io::{self, stdout, stderr, BufWriter, Write}; use std::process; use std::thread; use std::time as stdtime; use kafka::client::{KafkaClient, FetchOffset, GroupOffsetStorage}; /// A very simple offset monitor for a particular topic able to show /// the lag for a particular consumer group. Dumps the offset/lag of /// the monitored topic/group to stdout every few seconds. fn main() { env_logger::init().unwrap(); macro_rules! abort { ($e:expr) => {{ let mut out = stderr(); let _ = write!(out, "error: {}\n", $e); let _ = out.flush(); process::exit(1); }} }; let cfg = match Config::from_cmdline() { Ok(cfg) => cfg, Err(e) => abort!(e), }; if let Err(e) = run(cfg) { abort!(e); } } fn run(cfg: Config) -> Result<()> { let mut client = KafkaClient::new(cfg.brokers.clone()); client.set_group_offset_storage(cfg.offset_storage); try!(client.load_metadata_all()); // ~ if no topic specified, print all available and be done. if cfg.topic.is_empty() { let ts = client.topics(); let num_topics = ts.len(); if num_topics == 0 { bail!("no topics available"); } let mut names: Vec<&str> = Vec::with_capacity(ts.len()); names.extend(ts.names()); names.sort(); let mut buf = BufWriter::with_capacity(1024, stdout()); for name in names { let _ = write!(buf, "topic: {}\n", name); } bail!("choose a topic"); } // ~ otherwise let's loop over the topic partition offsets let num_partitions = match client.topics().partitions(&cfg.topic) { None => bail!(format!("no such topic: {}", &cfg.topic)), Some(partitions) => partitions.len(), }; let mut state = State::new(num_partitions, cfg.commited_not_consumed); let mut printer = Printer::new(stdout(), &cfg); try!(printer.print_head(num_partitions)); // ~ initialize the state let mut first_time = true; loop { let t = time::now(); try!(state.update_partitions(&mut client, &cfg.topic, &cfg.group)); if first_time { state.curr_to_prev(); first_time = false; } try!(printer.print_offsets(&t, &state.offsets)); thread::sleep(cfg.period); } } #[derive(Copy, Clone)] struct Partition { prev_latest: i64, curr_latest: i64, curr_lag: i64, } impl Default for Partition { fn default() -> Self { Partition { prev_latest: -1, curr_latest: -1, curr_lag: -1, } } } struct State { offsets: Vec<Partition>, lag_decr: i64, } impl State { fn new(num_partitions: usize, commited_not_consumed: bool) -> State { State { offsets: vec![Default::default(); num_partitions], lag_decr: if commited_not_consumed { 0 } else { 1 }, } } fn update_partitions( &mut self, client: &mut KafkaClient, topic: &str, group: &str, ) -> Result<()> { // ~ get the latest topic offsets let latests = try!(client.fetch_topic_offsets(topic, FetchOffset::Latest)); for l in latests { let off = self.offsets.get_mut(l.partition as usize).expect( "[topic offset] non-existent partition", ); off.prev_latest = off.curr_latest; off.curr_latest = l.offset; } if !group.is_empty() { // ~ get the current group offsets let groups = try!(client.fetch_group_topic_offsets(group, topic)); for g in groups { let off = self.offsets.get_mut(g.partition as usize).expect( "[group offset] non-existent partition", ); // ~ it's quite likely that we fetched group offsets // which are a bit ahead of the topic's latest offset // since we issued the fetch-latest-offset request // earlier than the request for the group offsets off.curr_lag = cmp::max(0, off.curr_latest - g.offset - self.lag_decr); } } Ok(()) } fn curr_to_prev(&mut self) { for o in &mut self.offsets { o.prev_latest = o.curr_latest; } } } struct Printer<W> { out: W, timefmt: String, fmt_buf: String, out_buf: String, time_width: usize, offset_width: usize, diff_width: usize, lag_width: usize, print_diff: bool, print_lag: bool, print_summary: bool, } impl<W: Write> Printer<W> { fn new(out: W, cfg: &Config) -> Printer<W>
fn print_head(&mut self, num_partitions: usize) -> Result<()> { self.out_buf.clear(); { // ~ format use std::fmt::Write; let _ = write!(self.out_buf, "{1:<0$}", self.time_width, "time"); if self.print_summary { let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, "topic"); if self.print_diff { let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, "growth"); } if self.print_lag { let _ = write!(self.out_buf, " {1:0$}", self.lag_width, "(lag)"); } } else { for i in 0..num_partitions { self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "p-{}", i); let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, self.fmt_buf); if self.print_diff { let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, "growth"); } if self.print_lag { let _ = write!(self.out_buf, " {1:0$}", self.lag_width, "(lag)"); } } } self.out_buf.push('\n'); } { // ~ print try!(self.out.write_all(self.out_buf.as_bytes())); Ok(()) } } fn print_offsets(&mut self, time: &time::Tm, partitions: &[Partition]) -> Result<()> { self.out_buf.clear(); { // ~ format use std::fmt::Write; self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "{}", time.strftime(&self.timefmt).expect("invalid timefmt")); let _ = write!(self.out_buf, "{1:<0$}", self.time_width, self.fmt_buf); if self.print_summary { let mut prev_latest = 0; let mut curr_latest = 0; let mut curr_lag = 0; for p in partitions { macro_rules! cond_add { ($v:ident) => { if $v != -1 { if p.$v < 0 { $v = -1; } else { $v += p.$v; } } } }; cond_add!(prev_latest); cond_add!(curr_latest); cond_add!(curr_lag); } let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, curr_latest); if self.print_diff { self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "{:+}", curr_latest - prev_latest); let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, self.fmt_buf); } if self.print_lag { self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "({})", curr_lag); let _ = write!(self.out_buf, " {1:<0$}", self.lag_width, self.fmt_buf); } } else { for p in partitions { let _ = write!(self.out_buf, " {1:>0$}", self.offset_width, p.curr_latest); if self.print_diff { self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "{:+}", p.curr_latest - p.prev_latest); let _ = write!(self.out_buf, " [{1:>0$}]", self.diff_width, self.fmt_buf); } if self.print_lag { self.fmt_buf.clear(); let _ = write!(self.fmt_buf, "({})", p.curr_lag); let _ = write!(self.out_buf, " {1:<0$}", self.lag_width, self.fmt_buf); } } } } self.out_buf.push('\n'); try!(self.out.write_all(self.out_buf.as_bytes())); Ok(()) } } // -------------------------------------------------------------------- struct Config { brokers: Vec<String>, topic: String, group: String, offset_storage: GroupOffsetStorage, period: stdtime::Duration, commited_not_consumed: bool, summary: bool, diff: bool, } impl Config { fn from_cmdline() -> Result<Config> { let args: Vec<String> = env::args().collect(); let mut opts = getopts::Options::new(); opts.optflag("h", "help", "Print this help screen"); opts.optopt("", "brokers", "Specify kafka bootstrap brokers (comma separated)", "HOSTS"); opts.optopt("", "topic", "Specify the topic to monitor", "TOPIC"); opts.optopt("", "group", "Specify the group to monitor", "GROUP"); opts.optopt("", "storage", "Specify offset store [zookeeper, kafka]", "STORE"); opts.optopt("", "sleep", "Specify the sleep time", "SECS"); opts.optflag("", "partitions", "Print each partition instead of the summary"); opts.optflag("", "no-growth", "Don't print offset growth"); opts.optflag( "", "committed-not-yet-consumed", "Assume commited group offsets specify \ messages the group will start consuming \ (including those at these offsets)", ); let m = match opts.parse(&args[1..]) { Ok(m) => m, Err(e) => bail!(e), }; if m.opt_present("help") { let brief = format!("{} [options]", args[0]); bail!(opts.usage(&brief)); } let mut offset_storage = GroupOffsetStorage::Zookeeper; if let Some(s) = m.opt_str("storage") { if s.eq_ignore_ascii_case("zookeeper") { offset_storage = GroupOffsetStorage::Zookeeper; } else if s.eq_ignore_ascii_case("kafka") { offset_storage = GroupOffsetStorage::Kafka; } else { bail!(format!("unknown offset store: {}", s)); } } let mut period = stdtime::Duration::from_secs(5); if let Some(s) = m.opt_str("sleep") { match s.parse::<u64>() { Ok(n) if n != 0 => period = stdtime::Duration::from_secs(n), _ => bail!(format!("not a number greater than zero: {}", s)), } } Ok(Config { brokers: m.opt_str("brokers") .unwrap_or_else(|| "localhost:9092".to_owned()) .split(',') .map(|s| s.trim().to_owned()) .collect(), topic: m.opt_str("topic").unwrap_or_else(|| String::new()), group: m.opt_str("group").unwrap_or_else(|| String::new()), offset_storage: offset_storage, period: period, commited_not_consumed: m.opt_present("committed-not-yet-consumed"), summary: !m.opt_present("partitions"), diff: !m.opt_present("no-growth"), }) } } // -------------------------------------------------------------------- error_chain! { foreign_links { Kafka(kafka::error::Error); Io(io::Error); Opt(getopts::Fail); } }
{ Printer { out: out, timefmt: "%H:%M:%S".into(), fmt_buf: String::with_capacity(30), out_buf: String::with_capacity(160), time_width: 10, offset_width: 11, diff_width: 8, lag_width: 6, print_diff: cfg.diff, print_lag: !cfg.group.is_empty(), print_summary: cfg.summary, } }
identifier_body
export_animation.py
import argparse import glob import importlib import inspect import math import os import re import subprocess import sys import moviepy.audio.fx.all as afx import moviepy.video.fx.all as vfx import numpy as np from _appmanager import get_executable from _shutil import format_time, get_time_str, getch, print2 from moviepy.config import change_settings from moviepy.editor import * from open_with.open_with import open_with import codeapi import core import coreapi import datastruct SCRIPT_ROOT = os.path.dirname(os.path.abspath(__file__)) ignore_undefined = False if 1: change_settings({"FFMPEG_BINARY": get_executable("ffmpeg")}) # def _get_markers(file): # marker_file = file + ".marker.txt" # if os.path.exists(marker_file): # with open(marker_file, "r") as f: # s = f.read() # return [float(x) for x in s.split()] # else: # return None # def _load_and_expand_img(f): # fg = Image.open(f).convert("RGBA") # bg = Image.new("RGB", (1920, 1080)) # bg.paste(fg, ((bg.width - fg.width) // 2, (bg.height - fg.height) // 2), fg) # return np.array(bg) def _update_mpy_clip( clip, subclip, speed, frame, norm, loop, duration, pos, scale, vol, **kwargs, ): assert duration is not None # video clip operations / fx if subclip is not None: if isinstance(subclip, (int, float)): clip = clip.subclip(subclip).set_duration(duration) else: subclip_duration = subclip[1] - subclip[0] if duration > subclip_duration: c1 = clip.subclip(subclip[0], subclip[1]) c2 = clip.to_ImageClip(subclip[1]).set_duration( duration - subclip_duration ) clip = concatenate_videoclips([c1, c2]) # HACK: workaround for a bug: 'CompositeAudioClip' object has no attribute 'fps' if clip.audio is not None: clip = clip.set_audio(clip.audio.set_fps(44100)) else: clip = clip.subclip(subclip[0], subclip[1]).set_duration(duration) if speed is not None: clip = clip.fx( # pylint: disable=maybe-no-member vfx.speedx, speed, ) if frame is not None: clip = clip.to_ImageClip(frame).set_duration(duration) # Loop or change duration if loop: clip = clip.fx( # pylint: disable=maybe-no-member vfx.loop ) if subclip is None: clip = clip.set_duration(duration) if pos is not None: # (x, y) marks the center location of the of the clip instead of the top # left corner. if pos == "center": clip = clip.set_position(("center", "center")) elif isinstance(pos, (list, tuple)): pos = list(pos) half_size = [x // 2 for x in clip.size] for i in range(2): if isinstance(pos[i], (int, float)): pos[i] = pos[i] - half_size[i] pos[i] = int(coreapi.global_scale * pos[i]) clip = clip.set_position(pos) else: clip = clip.set_position(pos) if scale[0] != 1.0 or scale[1] != 1.0: clip = clip.resize((int(clip.w * scale[0]), int(clip.h * scale[1]))) return clip def _update_clip_duration(track): def is_connected(prev_clip, cur_clip): return math.isclose( prev_clip.start + prev_clip.duration, cur_clip.start, rel_tol=1e-3, ) prev_clip_info = None for clip_info in track: if prev_clip_info is not None: if prev_clip_info.auto_extend: prev_clip_info.duration = clip_info.start - prev_clip_info.start prev_clip_info.auto_extend = False assert prev_clip_info.duration > 0 # Apply fadeout to previous clip if it's not connected with # current clip. if prev_clip_info.crossfade > 0 and not is_connected( prev_clip_info, clip_info ): prev_clip_info.fadeout = prev_clip_info.crossfade prev_clip_info = clip_info # Update last clip duration if prev_clip_info is not None: if prev_clip_info.auto_extend: duration = prev_clip_info.duration # Extend the last video clip to match the voice track if "re" in coreapi.pos_dict: duration = max(duration, coreapi.pos_dict["re"] - clip_info.start) prev_clip_info.duration = duration prev_clip_info.auto_extend = False if prev_clip_info.crossfade > 0: prev_clip_info.fadeout = prev_clip_info.crossfade def _export_video(*, resolution, audio_only): resolution = [int(x * coreapi.global_scale) for x in resolution] audio_clips = [] # Update clip duration for each track for track in datastruct.video_tracks.values(): _update_clip_duration(track) # TODO: post-process video track clips # Update MoviePy clip object in each track. video_clips = [] for track_name, track in datastruct.video_tracks.items(): for i, clip_info in enumerate(track): assert clip_info.mpy_clip is not None assert clip_info.duration is not None # Unlink audio clip from video clip (adjust audio duration) if clip_info.no_audio: clip_info.mpy_clip = clip_info.mpy_clip.set_audio(None) elif clip_info.mpy_clip.audio is not None: audio_clip = clip_info.mpy_clip.audio clip_info.mpy_clip = clip_info.mpy_clip.set_audio(None) # Audio timing # TODO: audio subclip if clip_info.subclip is not None: duration = clip_info.subclip[1] - clip_info.subclip[0] audio_clip = audio_clip.subclip( clip_info.subclip[0], clip_info.subclip[1] ) else: duration = clip_info.duration duration = min(duration, audio_clip.duration) audio_clip = audio_clip.set_duration(duration) audio_clip = audio_clip.set_start(clip_info.start) # Adjust volume if clip_info.norm: audio_clip = audio_clip.fx( # pylint: disable=maybe-no-member afx.audio_normalize ) if clip_info.vol is not None: if isinstance(clip_info.vol, (int, float)): audio_clip = audio_clip.fx( # pylint: disable=maybe-no-member afx.volumex, clip_info.vol, ) else: audio_clip = _adjust_mpy_audio_clip_volume( audio_clip, clip_info.vol ) audio_clips.append(audio_clip) # If the next clip has crossfade enabled crossfade_duration = track[i + 1].crossfade if (i < len(track) - 1) else 0 if crossfade_duration: # clip_info.fadeout = crossfade_duration # Fadeout current clip clip_info.duration += crossfade_duration clip_info.mpy_clip = _update_mpy_clip(clip_info.mpy_clip, **vars(clip_info)) # Deal with video fade in / out / crossfade if clip_info.fadein: assert isinstance(clip_info.fadein, (int, float)) # TODO: crossfadein and crossfadeout is very slow in moviepy if track_name != "vid": clip_info.mpy_clip = clip_info.mpy_clip.crossfadein( clip_info.fadein ) else: clip_info.mpy_clip = clip_info.mpy_clip.fx( # pylint: disable=maybe-no-member vfx.fadein, clip_info.fadein, ) elif ( clip_info.crossfade > 0 ): # crossfade and fadein should not happen at the same time video_clips.append( clip_info.mpy_clip.set_duration(clip_info.crossfade) .crossfadein(clip_info.crossfade) .set_start(clip_info.start) ) clip_info.mpy_clip = clip_info.mpy_clip.subclip(clip_info.crossfade) clip_info.start += clip_info.crossfade if clip_info.fadeout: assert isinstance(clip_info.fadeout, (int, float)) if track_name != "vid": # pylint: disable=maybe-no-member clip_info.mpy_clip = clip_info.mpy_clip.crossfadeout( clip_info.fadeout ) else: clip_info.mpy_clip = clip_info.mpy_clip.fx( # pylint: disable=maybe-no-member vfx.fadeout, clip_info.fadeout, ) video_clips.append(clip_info.mpy_clip.set_start(clip_info.start)) if len(video_clips) == 0: video_clips.append(ColorClip((200, 200), color=(0, 1, 0)).set_duration(2)) # raise Exception("no video clips??") final_clip = CompositeVideoClip(video_clips, size=resolution) # Resize here is too late, does not speed up the video encoding at all. # final_clip = final_clip.resize(width=480) # Deal with audio clips for _, track in datastruct.audio_tracks.items(): clips = [] for clip_info in track.clips: if clip_info.loop: # HACK: reload the clip. # # still don't know why using loaded mpy_clip directly will cause # "IndexError: index -200001 is out of bounds for axis 0 with # size 0"... clip = AudioFileClip(clip_info.file, buffersize=400000) else: clip = clip_info.mpy_clip if clip_info.subclip is not None: clip = clip.subclip(clip_info.subclip[0], clip_info.subclip[1]) duration = clip_info.duration if duration is not None:
if clip_info.start is not None: clip = clip.set_start(clip_info.start) # Adjust volume by keypoints if len(clip_info.vol_keypoints) > 0: clip = _adjust_mpy_audio_clip_volume(clip, clip_info.vol_keypoints) clips.append(clip) if len(clips) > 0: clip = CompositeAudioClip(clips) audio_clips.append(clip) if final_clip.audio: audio_clips.append(final_clip.audio) if len(audio_clips) > 0: final_audio_clip = CompositeAudioClip(audio_clips) # XXX: Workaround for exception: 'CompositeAudioClip' object has no attribute 'fps'. # See: https://github.com/Zulko/moviepy/issues/863 # final_audio_clip.fps = 44100 final_clip = final_clip.set_audio(final_audio_clip) # final_clip.show(10.5, interactive=True) os.makedirs("tmp/out", exist_ok=True) if audio_only: final_audio_clip.fps = 44100 final_audio_clip.write_audiofile("%s.mp3" % out_filename) open_with("%s.mp3" % out_filename, program_id=0) else: final_clip.write_videofile( "%s.mp4" % out_filename, temp_audiofile="%s.mp3" % out_filename, remove_temp=False, codec="libx264", threads=8, fps=coreapi.FPS, ffmpeg_params=["-crf", "19"], ) subprocess.Popen( ["mpv", "--force-window", "--geometry=1920x1080", f"{out_filename}.mp4"], close_fds=True, ) def _adjust_mpy_audio_clip_volume(clip, vol_keypoints): xp = [] fp = [] print("vol_keypoints:", vol_keypoints) for (p, vol) in vol_keypoints: if isinstance(vol, (int, float)): xp.append(p) fp.append(vol) else: raise Exception("unsupported bgm parameter type:" % type(vol)) def volume_adjust(gf, t): factor = np.interp(t, xp, fp) factor = np.vstack([factor, factor]).T return factor * gf(t) return clip.fl(volume_adjust) # def _export_srt(): # with open("out.srt", "w", encoding="utf-8") as f: # f.write("\n".join(_srt_lines)) def _convert_to_readable_time(seconds): seconds = int(seconds) seconds = seconds % (24 * 3600) hour = seconds // 3600 seconds %= 3600 minutes = seconds // 60 seconds %= 60 if hour > 0: return "%d:%02d:%02d" % (hour, minutes, seconds) else: return "%02d:%02d" % (minutes, seconds) def _write_timestamp(t, section_name): os.makedirs(os.path.dirname(out_filename), exist_ok=True) if not hasattr(_write_timestamp, "f"): _write_timestamp.f = open("%s.txt" % out_filename, "w", encoding="utf-8") _write_timestamp.f.write("%s (%s)\n" % (section_name, _convert_to_readable_time(t))) _write_timestamp.f.flush() @core.api def include(file): with open(file, "r", encoding="utf-8") as f: s = f.read() cwd = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(file))) _parse_text(s) os.chdir(cwd) def _remove_unused_recordings(s): used_recordings = set() unused_recordings = [] apis = {"record": (lambda f, **kargs: used_recordings.add(f))} _parse_text(s, apis=apis) files = [f for f in glob.glob("record/*") if os.path.isfile(f)] files = [f.replace("\\", "/") for f in files] for f in files: if f not in used_recordings: unused_recordings.append(f) print2("Used : %d" % len(used_recordings), color="green") print2("Unused : %d" % len(unused_recordings), color="red") assert len(used_recordings) + len(unused_recordings) == len(files) print("Press y to clean up: ", end="", flush=True) if getch() == "y": for f in unused_recordings: try: os.remove(f) except: print("WARNING: failed to remove: %s" % f) def _parse_text(text, apis=core.apis, **kwargs): def find_next(text, needle, p): pos = text.find(needle, p) if pos < 0: pos = len(text) return pos # Remove all comments text = re.sub(r"<!--[\d\D]*?-->", "", text) p = 0 # Current position while p < len(text): if text[p : p + 2] == "{{": end = find_next(text, "}}", p) python_code = text[p + 2 : end].strip() p = end + 2 if ignore_undefined: try: exec(python_code, apis) except NameError: # API is not defined pass # simply ignore else: exec(python_code, apis) continue if text[p : p + 1] == "#": end = find_next(text, "\n", p) line = text[p:end].strip() _write_timestamp(coreapi.pos_dict["a"], line) p = end + 1 continue match = re.match("---((?:[0-9]*[.])?[0-9]+)?\n", text[p:]) if match is not None: if match.group(1) is not None: coreapi.audio_gap(float(match.group(1))) else: coreapi.audio_gap(0.2) p += match.end(0) + 1 continue # Parse regular text end = find_next(text, "\n", p) line = text[p:end].strip() p = end + 1 if line != "" and "parse_line" in apis: apis["parse_line"](line) # Call it at the end core.on_api_func(None) def _show_stats(s): TIME_PER_CHAR = 0.1334154351395731 total = 0 def parse_line(line): nonlocal total total += len(line) _parse_text(s, apis={"parse_line": parse_line}, ignore_undefined=True) total_secs = TIME_PER_CHAR * total print("Estimated Time: %s" % format_time(total_secs)) input() def load_config(): import yaml CONFIG_FILE = "config.yaml" DEFAULT_CONFIG = {"fps": 30} if os.path.exists(CONFIG_FILE): with open(CONFIG_FILE, "r") as f: config = yaml.load(f.read(), Loader=yaml.FullLoader) else: with open(CONFIG_FILE, "w", newline="\n") as f: yaml.dump(DEFAULT_CONFIG, f, default_flow_style=False) config = DEFAULT_CONFIG coreapi.fps(config["fps"]) if __name__ == "__main__": out_filename = "tmp/out/" + get_time_str() parser = argparse.ArgumentParser() parser.add_argument("--stdin", default=False, action="store_true") parser.add_argument("--proj_dir", type=str, default=None) parser.add_argument("-i", "--input", type=str, default=None) parser.add_argument("-a", "--audio_only", action="store_true", default=False) parser.add_argument( "--remove_unused_recordings", action="store_true", default=False ) parser.add_argument("--show_stats", action="store_true", default=False) parser.add_argument("--preview", action="store_true", default=False) args = parser.parse_args() if args.proj_dir is not None: os.chdir(args.proj_dir) elif args.input: os.chdir(os.path.dirname(args.input)) print("Project dir: %s" % os.getcwd()) # Load custom APIs (api.py) if exists if os.path.exists("api.py"): sys.path.append(os.getcwd()) mymodule = importlib.import_module("api") global_functions = inspect.getmembers(mymodule, inspect.isfunction) core.apis.update({k: v for k, v in global_functions}) # HACK if args.audio_only: coreapi.audio_only() # Read text if args.stdin: s = sys.stdin.read() elif args.input: with open(args.input, "r", encoding="utf-8") as f: s = f.read() else: raise Exception("Either --stdin or --input should be specified.") load_config() if args.preview: coreapi.preview() if args.remove_unused_recordings: ignore_undefined = True _remove_unused_recordings(s) elif args.show_stats: ignore_undefined = True _show_stats(s) else: _parse_text(s, apis=core.apis) _export_video(resolution=(1920, 1080), audio_only=args.audio_only)
if clip_info.loop: # pylint: disable=maybe-no-member clip = clip.fx(afx.audio_loop, duration=duration) else: duration = min(duration, clip.duration) if clip_info.subclip: duration = min( duration, clip_info.subclip[1] - clip_info.subclip[0] ) clip = clip.set_duration(duration)
conditional_block
export_animation.py
import argparse import glob import importlib import inspect import math import os import re import subprocess import sys import moviepy.audio.fx.all as afx import moviepy.video.fx.all as vfx import numpy as np from _appmanager import get_executable from _shutil import format_time, get_time_str, getch, print2 from moviepy.config import change_settings from moviepy.editor import * from open_with.open_with import open_with import codeapi import core import coreapi import datastruct SCRIPT_ROOT = os.path.dirname(os.path.abspath(__file__)) ignore_undefined = False if 1: change_settings({"FFMPEG_BINARY": get_executable("ffmpeg")}) # def _get_markers(file): # marker_file = file + ".marker.txt" # if os.path.exists(marker_file): # with open(marker_file, "r") as f: # s = f.read() # return [float(x) for x in s.split()] # else: # return None # def _load_and_expand_img(f): # fg = Image.open(f).convert("RGBA") # bg = Image.new("RGB", (1920, 1080)) # bg.paste(fg, ((bg.width - fg.width) // 2, (bg.height - fg.height) // 2), fg) # return np.array(bg) def _update_mpy_clip( clip, subclip, speed, frame, norm, loop, duration, pos, scale, vol, **kwargs, ): assert duration is not None # video clip operations / fx if subclip is not None: if isinstance(subclip, (int, float)): clip = clip.subclip(subclip).set_duration(duration) else: subclip_duration = subclip[1] - subclip[0] if duration > subclip_duration: c1 = clip.subclip(subclip[0], subclip[1]) c2 = clip.to_ImageClip(subclip[1]).set_duration( duration - subclip_duration ) clip = concatenate_videoclips([c1, c2]) # HACK: workaround for a bug: 'CompositeAudioClip' object has no attribute 'fps' if clip.audio is not None: clip = clip.set_audio(clip.audio.set_fps(44100)) else: clip = clip.subclip(subclip[0], subclip[1]).set_duration(duration) if speed is not None: clip = clip.fx( # pylint: disable=maybe-no-member vfx.speedx, speed, ) if frame is not None: clip = clip.to_ImageClip(frame).set_duration(duration) # Loop or change duration if loop: clip = clip.fx( # pylint: disable=maybe-no-member vfx.loop ) if subclip is None: clip = clip.set_duration(duration) if pos is not None: # (x, y) marks the center location of the of the clip instead of the top # left corner. if pos == "center": clip = clip.set_position(("center", "center")) elif isinstance(pos, (list, tuple)): pos = list(pos) half_size = [x // 2 for x in clip.size] for i in range(2): if isinstance(pos[i], (int, float)): pos[i] = pos[i] - half_size[i] pos[i] = int(coreapi.global_scale * pos[i]) clip = clip.set_position(pos) else: clip = clip.set_position(pos) if scale[0] != 1.0 or scale[1] != 1.0: clip = clip.resize((int(clip.w * scale[0]), int(clip.h * scale[1]))) return clip def _update_clip_duration(track): def is_connected(prev_clip, cur_clip): return math.isclose( prev_clip.start + prev_clip.duration, cur_clip.start, rel_tol=1e-3, ) prev_clip_info = None for clip_info in track: if prev_clip_info is not None: if prev_clip_info.auto_extend: prev_clip_info.duration = clip_info.start - prev_clip_info.start prev_clip_info.auto_extend = False assert prev_clip_info.duration > 0 # Apply fadeout to previous clip if it's not connected with # current clip. if prev_clip_info.crossfade > 0 and not is_connected( prev_clip_info, clip_info ): prev_clip_info.fadeout = prev_clip_info.crossfade prev_clip_info = clip_info # Update last clip duration if prev_clip_info is not None: if prev_clip_info.auto_extend: duration = prev_clip_info.duration # Extend the last video clip to match the voice track if "re" in coreapi.pos_dict: duration = max(duration, coreapi.pos_dict["re"] - clip_info.start) prev_clip_info.duration = duration prev_clip_info.auto_extend = False if prev_clip_info.crossfade > 0: prev_clip_info.fadeout = prev_clip_info.crossfade def _export_video(*, resolution, audio_only): resolution = [int(x * coreapi.global_scale) for x in resolution] audio_clips = [] # Update clip duration for each track for track in datastruct.video_tracks.values(): _update_clip_duration(track) # TODO: post-process video track clips # Update MoviePy clip object in each track. video_clips = [] for track_name, track in datastruct.video_tracks.items(): for i, clip_info in enumerate(track): assert clip_info.mpy_clip is not None assert clip_info.duration is not None # Unlink audio clip from video clip (adjust audio duration) if clip_info.no_audio: clip_info.mpy_clip = clip_info.mpy_clip.set_audio(None) elif clip_info.mpy_clip.audio is not None: audio_clip = clip_info.mpy_clip.audio clip_info.mpy_clip = clip_info.mpy_clip.set_audio(None) # Audio timing # TODO: audio subclip if clip_info.subclip is not None: duration = clip_info.subclip[1] - clip_info.subclip[0] audio_clip = audio_clip.subclip( clip_info.subclip[0], clip_info.subclip[1] ) else: duration = clip_info.duration duration = min(duration, audio_clip.duration) audio_clip = audio_clip.set_duration(duration) audio_clip = audio_clip.set_start(clip_info.start) # Adjust volume if clip_info.norm: audio_clip = audio_clip.fx( # pylint: disable=maybe-no-member afx.audio_normalize ) if clip_info.vol is not None: if isinstance(clip_info.vol, (int, float)): audio_clip = audio_clip.fx( # pylint: disable=maybe-no-member afx.volumex, clip_info.vol, ) else: audio_clip = _adjust_mpy_audio_clip_volume( audio_clip, clip_info.vol ) audio_clips.append(audio_clip) # If the next clip has crossfade enabled crossfade_duration = track[i + 1].crossfade if (i < len(track) - 1) else 0 if crossfade_duration: # clip_info.fadeout = crossfade_duration # Fadeout current clip clip_info.duration += crossfade_duration clip_info.mpy_clip = _update_mpy_clip(clip_info.mpy_clip, **vars(clip_info)) # Deal with video fade in / out / crossfade if clip_info.fadein: assert isinstance(clip_info.fadein, (int, float)) # TODO: crossfadein and crossfadeout is very slow in moviepy if track_name != "vid": clip_info.mpy_clip = clip_info.mpy_clip.crossfadein( clip_info.fadein ) else: clip_info.mpy_clip = clip_info.mpy_clip.fx( # pylint: disable=maybe-no-member vfx.fadein, clip_info.fadein, ) elif ( clip_info.crossfade > 0 ): # crossfade and fadein should not happen at the same time video_clips.append( clip_info.mpy_clip.set_duration(clip_info.crossfade) .crossfadein(clip_info.crossfade) .set_start(clip_info.start) ) clip_info.mpy_clip = clip_info.mpy_clip.subclip(clip_info.crossfade) clip_info.start += clip_info.crossfade if clip_info.fadeout: assert isinstance(clip_info.fadeout, (int, float)) if track_name != "vid": # pylint: disable=maybe-no-member clip_info.mpy_clip = clip_info.mpy_clip.crossfadeout( clip_info.fadeout ) else: clip_info.mpy_clip = clip_info.mpy_clip.fx( # pylint: disable=maybe-no-member vfx.fadeout, clip_info.fadeout, ) video_clips.append(clip_info.mpy_clip.set_start(clip_info.start)) if len(video_clips) == 0: video_clips.append(ColorClip((200, 200), color=(0, 1, 0)).set_duration(2)) # raise Exception("no video clips??") final_clip = CompositeVideoClip(video_clips, size=resolution) # Resize here is too late, does not speed up the video encoding at all. # final_clip = final_clip.resize(width=480) # Deal with audio clips for _, track in datastruct.audio_tracks.items(): clips = [] for clip_info in track.clips: if clip_info.loop: # HACK: reload the clip. # # still don't know why using loaded mpy_clip directly will cause # "IndexError: index -200001 is out of bounds for axis 0 with # size 0"... clip = AudioFileClip(clip_info.file, buffersize=400000) else: clip = clip_info.mpy_clip if clip_info.subclip is not None: clip = clip.subclip(clip_info.subclip[0], clip_info.subclip[1]) duration = clip_info.duration if duration is not None: if clip_info.loop: # pylint: disable=maybe-no-member clip = clip.fx(afx.audio_loop, duration=duration) else: duration = min(duration, clip.duration) if clip_info.subclip: duration = min( duration, clip_info.subclip[1] - clip_info.subclip[0] ) clip = clip.set_duration(duration) if clip_info.start is not None: clip = clip.set_start(clip_info.start) # Adjust volume by keypoints if len(clip_info.vol_keypoints) > 0: clip = _adjust_mpy_audio_clip_volume(clip, clip_info.vol_keypoints) clips.append(clip) if len(clips) > 0: clip = CompositeAudioClip(clips) audio_clips.append(clip) if final_clip.audio: audio_clips.append(final_clip.audio) if len(audio_clips) > 0: final_audio_clip = CompositeAudioClip(audio_clips) # XXX: Workaround for exception: 'CompositeAudioClip' object has no attribute 'fps'. # See: https://github.com/Zulko/moviepy/issues/863 # final_audio_clip.fps = 44100 final_clip = final_clip.set_audio(final_audio_clip) # final_clip.show(10.5, interactive=True) os.makedirs("tmp/out", exist_ok=True) if audio_only: final_audio_clip.fps = 44100 final_audio_clip.write_audiofile("%s.mp3" % out_filename) open_with("%s.mp3" % out_filename, program_id=0) else: final_clip.write_videofile( "%s.mp4" % out_filename, temp_audiofile="%s.mp3" % out_filename, remove_temp=False, codec="libx264", threads=8, fps=coreapi.FPS, ffmpeg_params=["-crf", "19"], ) subprocess.Popen( ["mpv", "--force-window", "--geometry=1920x1080", f"{out_filename}.mp4"], close_fds=True, ) def _adjust_mpy_audio_clip_volume(clip, vol_keypoints): xp = [] fp = [] print("vol_keypoints:", vol_keypoints) for (p, vol) in vol_keypoints: if isinstance(vol, (int, float)): xp.append(p) fp.append(vol) else: raise Exception("unsupported bgm parameter type:" % type(vol)) def volume_adjust(gf, t): factor = np.interp(t, xp, fp) factor = np.vstack([factor, factor]).T return factor * gf(t) return clip.fl(volume_adjust) # def _export_srt(): # with open("out.srt", "w", encoding="utf-8") as f: # f.write("\n".join(_srt_lines)) def _convert_to_readable_time(seconds): seconds = int(seconds) seconds = seconds % (24 * 3600) hour = seconds // 3600 seconds %= 3600 minutes = seconds // 60 seconds %= 60 if hour > 0: return "%d:%02d:%02d" % (hour, minutes, seconds) else: return "%02d:%02d" % (minutes, seconds) def _write_timestamp(t, section_name): os.makedirs(os.path.dirname(out_filename), exist_ok=True) if not hasattr(_write_timestamp, "f"): _write_timestamp.f = open("%s.txt" % out_filename, "w", encoding="utf-8") _write_timestamp.f.write("%s (%s)\n" % (section_name, _convert_to_readable_time(t))) _write_timestamp.f.flush() @core.api def include(file): with open(file, "r", encoding="utf-8") as f:
os.chdir(os.path.dirname(os.path.abspath(file))) _parse_text(s) os.chdir(cwd) def _remove_unused_recordings(s): used_recordings = set() unused_recordings = [] apis = {"record": (lambda f, **kargs: used_recordings.add(f))} _parse_text(s, apis=apis) files = [f for f in glob.glob("record/*") if os.path.isfile(f)] files = [f.replace("\\", "/") for f in files] for f in files: if f not in used_recordings: unused_recordings.append(f) print2("Used : %d" % len(used_recordings), color="green") print2("Unused : %d" % len(unused_recordings), color="red") assert len(used_recordings) + len(unused_recordings) == len(files) print("Press y to clean up: ", end="", flush=True) if getch() == "y": for f in unused_recordings: try: os.remove(f) except: print("WARNING: failed to remove: %s" % f) def _parse_text(text, apis=core.apis, **kwargs): def find_next(text, needle, p): pos = text.find(needle, p) if pos < 0: pos = len(text) return pos # Remove all comments text = re.sub(r"<!--[\d\D]*?-->", "", text) p = 0 # Current position while p < len(text): if text[p : p + 2] == "{{": end = find_next(text, "}}", p) python_code = text[p + 2 : end].strip() p = end + 2 if ignore_undefined: try: exec(python_code, apis) except NameError: # API is not defined pass # simply ignore else: exec(python_code, apis) continue if text[p : p + 1] == "#": end = find_next(text, "\n", p) line = text[p:end].strip() _write_timestamp(coreapi.pos_dict["a"], line) p = end + 1 continue match = re.match("---((?:[0-9]*[.])?[0-9]+)?\n", text[p:]) if match is not None: if match.group(1) is not None: coreapi.audio_gap(float(match.group(1))) else: coreapi.audio_gap(0.2) p += match.end(0) + 1 continue # Parse regular text end = find_next(text, "\n", p) line = text[p:end].strip() p = end + 1 if line != "" and "parse_line" in apis: apis["parse_line"](line) # Call it at the end core.on_api_func(None) def _show_stats(s): TIME_PER_CHAR = 0.1334154351395731 total = 0 def parse_line(line): nonlocal total total += len(line) _parse_text(s, apis={"parse_line": parse_line}, ignore_undefined=True) total_secs = TIME_PER_CHAR * total print("Estimated Time: %s" % format_time(total_secs)) input() def load_config(): import yaml CONFIG_FILE = "config.yaml" DEFAULT_CONFIG = {"fps": 30} if os.path.exists(CONFIG_FILE): with open(CONFIG_FILE, "r") as f: config = yaml.load(f.read(), Loader=yaml.FullLoader) else: with open(CONFIG_FILE, "w", newline="\n") as f: yaml.dump(DEFAULT_CONFIG, f, default_flow_style=False) config = DEFAULT_CONFIG coreapi.fps(config["fps"]) if __name__ == "__main__": out_filename = "tmp/out/" + get_time_str() parser = argparse.ArgumentParser() parser.add_argument("--stdin", default=False, action="store_true") parser.add_argument("--proj_dir", type=str, default=None) parser.add_argument("-i", "--input", type=str, default=None) parser.add_argument("-a", "--audio_only", action="store_true", default=False) parser.add_argument( "--remove_unused_recordings", action="store_true", default=False ) parser.add_argument("--show_stats", action="store_true", default=False) parser.add_argument("--preview", action="store_true", default=False) args = parser.parse_args() if args.proj_dir is not None: os.chdir(args.proj_dir) elif args.input: os.chdir(os.path.dirname(args.input)) print("Project dir: %s" % os.getcwd()) # Load custom APIs (api.py) if exists if os.path.exists("api.py"): sys.path.append(os.getcwd()) mymodule = importlib.import_module("api") global_functions = inspect.getmembers(mymodule, inspect.isfunction) core.apis.update({k: v for k, v in global_functions}) # HACK if args.audio_only: coreapi.audio_only() # Read text if args.stdin: s = sys.stdin.read() elif args.input: with open(args.input, "r", encoding="utf-8") as f: s = f.read() else: raise Exception("Either --stdin or --input should be specified.") load_config() if args.preview: coreapi.preview() if args.remove_unused_recordings: ignore_undefined = True _remove_unused_recordings(s) elif args.show_stats: ignore_undefined = True _show_stats(s) else: _parse_text(s, apis=core.apis) _export_video(resolution=(1920, 1080), audio_only=args.audio_only)
s = f.read() cwd = os.getcwd()
random_line_split
export_animation.py
import argparse import glob import importlib import inspect import math import os import re import subprocess import sys import moviepy.audio.fx.all as afx import moviepy.video.fx.all as vfx import numpy as np from _appmanager import get_executable from _shutil import format_time, get_time_str, getch, print2 from moviepy.config import change_settings from moviepy.editor import * from open_with.open_with import open_with import codeapi import core import coreapi import datastruct SCRIPT_ROOT = os.path.dirname(os.path.abspath(__file__)) ignore_undefined = False if 1: change_settings({"FFMPEG_BINARY": get_executable("ffmpeg")}) # def _get_markers(file): # marker_file = file + ".marker.txt" # if os.path.exists(marker_file): # with open(marker_file, "r") as f: # s = f.read() # return [float(x) for x in s.split()] # else: # return None # def _load_and_expand_img(f): # fg = Image.open(f).convert("RGBA") # bg = Image.new("RGB", (1920, 1080)) # bg.paste(fg, ((bg.width - fg.width) // 2, (bg.height - fg.height) // 2), fg) # return np.array(bg) def _update_mpy_clip( clip, subclip, speed, frame, norm, loop, duration, pos, scale, vol, **kwargs, ): assert duration is not None # video clip operations / fx if subclip is not None: if isinstance(subclip, (int, float)): clip = clip.subclip(subclip).set_duration(duration) else: subclip_duration = subclip[1] - subclip[0] if duration > subclip_duration: c1 = clip.subclip(subclip[0], subclip[1]) c2 = clip.to_ImageClip(subclip[1]).set_duration( duration - subclip_duration ) clip = concatenate_videoclips([c1, c2]) # HACK: workaround for a bug: 'CompositeAudioClip' object has no attribute 'fps' if clip.audio is not None: clip = clip.set_audio(clip.audio.set_fps(44100)) else: clip = clip.subclip(subclip[0], subclip[1]).set_duration(duration) if speed is not None: clip = clip.fx( # pylint: disable=maybe-no-member vfx.speedx, speed, ) if frame is not None: clip = clip.to_ImageClip(frame).set_duration(duration) # Loop or change duration if loop: clip = clip.fx( # pylint: disable=maybe-no-member vfx.loop ) if subclip is None: clip = clip.set_duration(duration) if pos is not None: # (x, y) marks the center location of the of the clip instead of the top # left corner. if pos == "center": clip = clip.set_position(("center", "center")) elif isinstance(pos, (list, tuple)): pos = list(pos) half_size = [x // 2 for x in clip.size] for i in range(2): if isinstance(pos[i], (int, float)): pos[i] = pos[i] - half_size[i] pos[i] = int(coreapi.global_scale * pos[i]) clip = clip.set_position(pos) else: clip = clip.set_position(pos) if scale[0] != 1.0 or scale[1] != 1.0: clip = clip.resize((int(clip.w * scale[0]), int(clip.h * scale[1]))) return clip def _update_clip_duration(track): def is_connected(prev_clip, cur_clip): return math.isclose( prev_clip.start + prev_clip.duration, cur_clip.start, rel_tol=1e-3, ) prev_clip_info = None for clip_info in track: if prev_clip_info is not None: if prev_clip_info.auto_extend: prev_clip_info.duration = clip_info.start - prev_clip_info.start prev_clip_info.auto_extend = False assert prev_clip_info.duration > 0 # Apply fadeout to previous clip if it's not connected with # current clip. if prev_clip_info.crossfade > 0 and not is_connected( prev_clip_info, clip_info ): prev_clip_info.fadeout = prev_clip_info.crossfade prev_clip_info = clip_info # Update last clip duration if prev_clip_info is not None: if prev_clip_info.auto_extend: duration = prev_clip_info.duration # Extend the last video clip to match the voice track if "re" in coreapi.pos_dict: duration = max(duration, coreapi.pos_dict["re"] - clip_info.start) prev_clip_info.duration = duration prev_clip_info.auto_extend = False if prev_clip_info.crossfade > 0: prev_clip_info.fadeout = prev_clip_info.crossfade def _export_video(*, resolution, audio_only): resolution = [int(x * coreapi.global_scale) for x in resolution] audio_clips = [] # Update clip duration for each track for track in datastruct.video_tracks.values(): _update_clip_duration(track) # TODO: post-process video track clips # Update MoviePy clip object in each track. video_clips = [] for track_name, track in datastruct.video_tracks.items(): for i, clip_info in enumerate(track): assert clip_info.mpy_clip is not None assert clip_info.duration is not None # Unlink audio clip from video clip (adjust audio duration) if clip_info.no_audio: clip_info.mpy_clip = clip_info.mpy_clip.set_audio(None) elif clip_info.mpy_clip.audio is not None: audio_clip = clip_info.mpy_clip.audio clip_info.mpy_clip = clip_info.mpy_clip.set_audio(None) # Audio timing # TODO: audio subclip if clip_info.subclip is not None: duration = clip_info.subclip[1] - clip_info.subclip[0] audio_clip = audio_clip.subclip( clip_info.subclip[0], clip_info.subclip[1] ) else: duration = clip_info.duration duration = min(duration, audio_clip.duration) audio_clip = audio_clip.set_duration(duration) audio_clip = audio_clip.set_start(clip_info.start) # Adjust volume if clip_info.norm: audio_clip = audio_clip.fx( # pylint: disable=maybe-no-member afx.audio_normalize ) if clip_info.vol is not None: if isinstance(clip_info.vol, (int, float)): audio_clip = audio_clip.fx( # pylint: disable=maybe-no-member afx.volumex, clip_info.vol, ) else: audio_clip = _adjust_mpy_audio_clip_volume( audio_clip, clip_info.vol ) audio_clips.append(audio_clip) # If the next clip has crossfade enabled crossfade_duration = track[i + 1].crossfade if (i < len(track) - 1) else 0 if crossfade_duration: # clip_info.fadeout = crossfade_duration # Fadeout current clip clip_info.duration += crossfade_duration clip_info.mpy_clip = _update_mpy_clip(clip_info.mpy_clip, **vars(clip_info)) # Deal with video fade in / out / crossfade if clip_info.fadein: assert isinstance(clip_info.fadein, (int, float)) # TODO: crossfadein and crossfadeout is very slow in moviepy if track_name != "vid": clip_info.mpy_clip = clip_info.mpy_clip.crossfadein( clip_info.fadein ) else: clip_info.mpy_clip = clip_info.mpy_clip.fx( # pylint: disable=maybe-no-member vfx.fadein, clip_info.fadein, ) elif ( clip_info.crossfade > 0 ): # crossfade and fadein should not happen at the same time video_clips.append( clip_info.mpy_clip.set_duration(clip_info.crossfade) .crossfadein(clip_info.crossfade) .set_start(clip_info.start) ) clip_info.mpy_clip = clip_info.mpy_clip.subclip(clip_info.crossfade) clip_info.start += clip_info.crossfade if clip_info.fadeout: assert isinstance(clip_info.fadeout, (int, float)) if track_name != "vid": # pylint: disable=maybe-no-member clip_info.mpy_clip = clip_info.mpy_clip.crossfadeout( clip_info.fadeout ) else: clip_info.mpy_clip = clip_info.mpy_clip.fx( # pylint: disable=maybe-no-member vfx.fadeout, clip_info.fadeout, ) video_clips.append(clip_info.mpy_clip.set_start(clip_info.start)) if len(video_clips) == 0: video_clips.append(ColorClip((200, 200), color=(0, 1, 0)).set_duration(2)) # raise Exception("no video clips??") final_clip = CompositeVideoClip(video_clips, size=resolution) # Resize here is too late, does not speed up the video encoding at all. # final_clip = final_clip.resize(width=480) # Deal with audio clips for _, track in datastruct.audio_tracks.items(): clips = [] for clip_info in track.clips: if clip_info.loop: # HACK: reload the clip. # # still don't know why using loaded mpy_clip directly will cause # "IndexError: index -200001 is out of bounds for axis 0 with # size 0"... clip = AudioFileClip(clip_info.file, buffersize=400000) else: clip = clip_info.mpy_clip if clip_info.subclip is not None: clip = clip.subclip(clip_info.subclip[0], clip_info.subclip[1]) duration = clip_info.duration if duration is not None: if clip_info.loop: # pylint: disable=maybe-no-member clip = clip.fx(afx.audio_loop, duration=duration) else: duration = min(duration, clip.duration) if clip_info.subclip: duration = min( duration, clip_info.subclip[1] - clip_info.subclip[0] ) clip = clip.set_duration(duration) if clip_info.start is not None: clip = clip.set_start(clip_info.start) # Adjust volume by keypoints if len(clip_info.vol_keypoints) > 0: clip = _adjust_mpy_audio_clip_volume(clip, clip_info.vol_keypoints) clips.append(clip) if len(clips) > 0: clip = CompositeAudioClip(clips) audio_clips.append(clip) if final_clip.audio: audio_clips.append(final_clip.audio) if len(audio_clips) > 0: final_audio_clip = CompositeAudioClip(audio_clips) # XXX: Workaround for exception: 'CompositeAudioClip' object has no attribute 'fps'. # See: https://github.com/Zulko/moviepy/issues/863 # final_audio_clip.fps = 44100 final_clip = final_clip.set_audio(final_audio_clip) # final_clip.show(10.5, interactive=True) os.makedirs("tmp/out", exist_ok=True) if audio_only: final_audio_clip.fps = 44100 final_audio_clip.write_audiofile("%s.mp3" % out_filename) open_with("%s.mp3" % out_filename, program_id=0) else: final_clip.write_videofile( "%s.mp4" % out_filename, temp_audiofile="%s.mp3" % out_filename, remove_temp=False, codec="libx264", threads=8, fps=coreapi.FPS, ffmpeg_params=["-crf", "19"], ) subprocess.Popen( ["mpv", "--force-window", "--geometry=1920x1080", f"{out_filename}.mp4"], close_fds=True, ) def _adjust_mpy_audio_clip_volume(clip, vol_keypoints): xp = [] fp = [] print("vol_keypoints:", vol_keypoints) for (p, vol) in vol_keypoints: if isinstance(vol, (int, float)): xp.append(p) fp.append(vol) else: raise Exception("unsupported bgm parameter type:" % type(vol)) def volume_adjust(gf, t): factor = np.interp(t, xp, fp) factor = np.vstack([factor, factor]).T return factor * gf(t) return clip.fl(volume_adjust) # def _export_srt(): # with open("out.srt", "w", encoding="utf-8") as f: # f.write("\n".join(_srt_lines)) def _convert_to_readable_time(seconds): seconds = int(seconds) seconds = seconds % (24 * 3600) hour = seconds // 3600 seconds %= 3600 minutes = seconds // 60 seconds %= 60 if hour > 0: return "%d:%02d:%02d" % (hour, minutes, seconds) else: return "%02d:%02d" % (minutes, seconds) def _write_timestamp(t, section_name): os.makedirs(os.path.dirname(out_filename), exist_ok=True) if not hasattr(_write_timestamp, "f"): _write_timestamp.f = open("%s.txt" % out_filename, "w", encoding="utf-8") _write_timestamp.f.write("%s (%s)\n" % (section_name, _convert_to_readable_time(t))) _write_timestamp.f.flush() @core.api def include(file): with open(file, "r", encoding="utf-8") as f: s = f.read() cwd = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(file))) _parse_text(s) os.chdir(cwd) def _remove_unused_recordings(s):
def _parse_text(text, apis=core.apis, **kwargs): def find_next(text, needle, p): pos = text.find(needle, p) if pos < 0: pos = len(text) return pos # Remove all comments text = re.sub(r"<!--[\d\D]*?-->", "", text) p = 0 # Current position while p < len(text): if text[p : p + 2] == "{{": end = find_next(text, "}}", p) python_code = text[p + 2 : end].strip() p = end + 2 if ignore_undefined: try: exec(python_code, apis) except NameError: # API is not defined pass # simply ignore else: exec(python_code, apis) continue if text[p : p + 1] == "#": end = find_next(text, "\n", p) line = text[p:end].strip() _write_timestamp(coreapi.pos_dict["a"], line) p = end + 1 continue match = re.match("---((?:[0-9]*[.])?[0-9]+)?\n", text[p:]) if match is not None: if match.group(1) is not None: coreapi.audio_gap(float(match.group(1))) else: coreapi.audio_gap(0.2) p += match.end(0) + 1 continue # Parse regular text end = find_next(text, "\n", p) line = text[p:end].strip() p = end + 1 if line != "" and "parse_line" in apis: apis["parse_line"](line) # Call it at the end core.on_api_func(None) def _show_stats(s): TIME_PER_CHAR = 0.1334154351395731 total = 0 def parse_line(line): nonlocal total total += len(line) _parse_text(s, apis={"parse_line": parse_line}, ignore_undefined=True) total_secs = TIME_PER_CHAR * total print("Estimated Time: %s" % format_time(total_secs)) input() def load_config(): import yaml CONFIG_FILE = "config.yaml" DEFAULT_CONFIG = {"fps": 30} if os.path.exists(CONFIG_FILE): with open(CONFIG_FILE, "r") as f: config = yaml.load(f.read(), Loader=yaml.FullLoader) else: with open(CONFIG_FILE, "w", newline="\n") as f: yaml.dump(DEFAULT_CONFIG, f, default_flow_style=False) config = DEFAULT_CONFIG coreapi.fps(config["fps"]) if __name__ == "__main__": out_filename = "tmp/out/" + get_time_str() parser = argparse.ArgumentParser() parser.add_argument("--stdin", default=False, action="store_true") parser.add_argument("--proj_dir", type=str, default=None) parser.add_argument("-i", "--input", type=str, default=None) parser.add_argument("-a", "--audio_only", action="store_true", default=False) parser.add_argument( "--remove_unused_recordings", action="store_true", default=False ) parser.add_argument("--show_stats", action="store_true", default=False) parser.add_argument("--preview", action="store_true", default=False) args = parser.parse_args() if args.proj_dir is not None: os.chdir(args.proj_dir) elif args.input: os.chdir(os.path.dirname(args.input)) print("Project dir: %s" % os.getcwd()) # Load custom APIs (api.py) if exists if os.path.exists("api.py"): sys.path.append(os.getcwd()) mymodule = importlib.import_module("api") global_functions = inspect.getmembers(mymodule, inspect.isfunction) core.apis.update({k: v for k, v in global_functions}) # HACK if args.audio_only: coreapi.audio_only() # Read text if args.stdin: s = sys.stdin.read() elif args.input: with open(args.input, "r", encoding="utf-8") as f: s = f.read() else: raise Exception("Either --stdin or --input should be specified.") load_config() if args.preview: coreapi.preview() if args.remove_unused_recordings: ignore_undefined = True _remove_unused_recordings(s) elif args.show_stats: ignore_undefined = True _show_stats(s) else: _parse_text(s, apis=core.apis) _export_video(resolution=(1920, 1080), audio_only=args.audio_only)
used_recordings = set() unused_recordings = [] apis = {"record": (lambda f, **kargs: used_recordings.add(f))} _parse_text(s, apis=apis) files = [f for f in glob.glob("record/*") if os.path.isfile(f)] files = [f.replace("\\", "/") for f in files] for f in files: if f not in used_recordings: unused_recordings.append(f) print2("Used : %d" % len(used_recordings), color="green") print2("Unused : %d" % len(unused_recordings), color="red") assert len(used_recordings) + len(unused_recordings) == len(files) print("Press y to clean up: ", end="", flush=True) if getch() == "y": for f in unused_recordings: try: os.remove(f) except: print("WARNING: failed to remove: %s" % f)
identifier_body
export_animation.py
import argparse import glob import importlib import inspect import math import os import re import subprocess import sys import moviepy.audio.fx.all as afx import moviepy.video.fx.all as vfx import numpy as np from _appmanager import get_executable from _shutil import format_time, get_time_str, getch, print2 from moviepy.config import change_settings from moviepy.editor import * from open_with.open_with import open_with import codeapi import core import coreapi import datastruct SCRIPT_ROOT = os.path.dirname(os.path.abspath(__file__)) ignore_undefined = False if 1: change_settings({"FFMPEG_BINARY": get_executable("ffmpeg")}) # def _get_markers(file): # marker_file = file + ".marker.txt" # if os.path.exists(marker_file): # with open(marker_file, "r") as f: # s = f.read() # return [float(x) for x in s.split()] # else: # return None # def _load_and_expand_img(f): # fg = Image.open(f).convert("RGBA") # bg = Image.new("RGB", (1920, 1080)) # bg.paste(fg, ((bg.width - fg.width) // 2, (bg.height - fg.height) // 2), fg) # return np.array(bg) def _update_mpy_clip( clip, subclip, speed, frame, norm, loop, duration, pos, scale, vol, **kwargs, ): assert duration is not None # video clip operations / fx if subclip is not None: if isinstance(subclip, (int, float)): clip = clip.subclip(subclip).set_duration(duration) else: subclip_duration = subclip[1] - subclip[0] if duration > subclip_duration: c1 = clip.subclip(subclip[0], subclip[1]) c2 = clip.to_ImageClip(subclip[1]).set_duration( duration - subclip_duration ) clip = concatenate_videoclips([c1, c2]) # HACK: workaround for a bug: 'CompositeAudioClip' object has no attribute 'fps' if clip.audio is not None: clip = clip.set_audio(clip.audio.set_fps(44100)) else: clip = clip.subclip(subclip[0], subclip[1]).set_duration(duration) if speed is not None: clip = clip.fx( # pylint: disable=maybe-no-member vfx.speedx, speed, ) if frame is not None: clip = clip.to_ImageClip(frame).set_duration(duration) # Loop or change duration if loop: clip = clip.fx( # pylint: disable=maybe-no-member vfx.loop ) if subclip is None: clip = clip.set_duration(duration) if pos is not None: # (x, y) marks the center location of the of the clip instead of the top # left corner. if pos == "center": clip = clip.set_position(("center", "center")) elif isinstance(pos, (list, tuple)): pos = list(pos) half_size = [x // 2 for x in clip.size] for i in range(2): if isinstance(pos[i], (int, float)): pos[i] = pos[i] - half_size[i] pos[i] = int(coreapi.global_scale * pos[i]) clip = clip.set_position(pos) else: clip = clip.set_position(pos) if scale[0] != 1.0 or scale[1] != 1.0: clip = clip.resize((int(clip.w * scale[0]), int(clip.h * scale[1]))) return clip def _update_clip_duration(track): def is_connected(prev_clip, cur_clip): return math.isclose( prev_clip.start + prev_clip.duration, cur_clip.start, rel_tol=1e-3, ) prev_clip_info = None for clip_info in track: if prev_clip_info is not None: if prev_clip_info.auto_extend: prev_clip_info.duration = clip_info.start - prev_clip_info.start prev_clip_info.auto_extend = False assert prev_clip_info.duration > 0 # Apply fadeout to previous clip if it's not connected with # current clip. if prev_clip_info.crossfade > 0 and not is_connected( prev_clip_info, clip_info ): prev_clip_info.fadeout = prev_clip_info.crossfade prev_clip_info = clip_info # Update last clip duration if prev_clip_info is not None: if prev_clip_info.auto_extend: duration = prev_clip_info.duration # Extend the last video clip to match the voice track if "re" in coreapi.pos_dict: duration = max(duration, coreapi.pos_dict["re"] - clip_info.start) prev_clip_info.duration = duration prev_clip_info.auto_extend = False if prev_clip_info.crossfade > 0: prev_clip_info.fadeout = prev_clip_info.crossfade def _export_video(*, resolution, audio_only): resolution = [int(x * coreapi.global_scale) for x in resolution] audio_clips = [] # Update clip duration for each track for track in datastruct.video_tracks.values(): _update_clip_duration(track) # TODO: post-process video track clips # Update MoviePy clip object in each track. video_clips = [] for track_name, track in datastruct.video_tracks.items(): for i, clip_info in enumerate(track): assert clip_info.mpy_clip is not None assert clip_info.duration is not None # Unlink audio clip from video clip (adjust audio duration) if clip_info.no_audio: clip_info.mpy_clip = clip_info.mpy_clip.set_audio(None) elif clip_info.mpy_clip.audio is not None: audio_clip = clip_info.mpy_clip.audio clip_info.mpy_clip = clip_info.mpy_clip.set_audio(None) # Audio timing # TODO: audio subclip if clip_info.subclip is not None: duration = clip_info.subclip[1] - clip_info.subclip[0] audio_clip = audio_clip.subclip( clip_info.subclip[0], clip_info.subclip[1] ) else: duration = clip_info.duration duration = min(duration, audio_clip.duration) audio_clip = audio_clip.set_duration(duration) audio_clip = audio_clip.set_start(clip_info.start) # Adjust volume if clip_info.norm: audio_clip = audio_clip.fx( # pylint: disable=maybe-no-member afx.audio_normalize ) if clip_info.vol is not None: if isinstance(clip_info.vol, (int, float)): audio_clip = audio_clip.fx( # pylint: disable=maybe-no-member afx.volumex, clip_info.vol, ) else: audio_clip = _adjust_mpy_audio_clip_volume( audio_clip, clip_info.vol ) audio_clips.append(audio_clip) # If the next clip has crossfade enabled crossfade_duration = track[i + 1].crossfade if (i < len(track) - 1) else 0 if crossfade_duration: # clip_info.fadeout = crossfade_duration # Fadeout current clip clip_info.duration += crossfade_duration clip_info.mpy_clip = _update_mpy_clip(clip_info.mpy_clip, **vars(clip_info)) # Deal with video fade in / out / crossfade if clip_info.fadein: assert isinstance(clip_info.fadein, (int, float)) # TODO: crossfadein and crossfadeout is very slow in moviepy if track_name != "vid": clip_info.mpy_clip = clip_info.mpy_clip.crossfadein( clip_info.fadein ) else: clip_info.mpy_clip = clip_info.mpy_clip.fx( # pylint: disable=maybe-no-member vfx.fadein, clip_info.fadein, ) elif ( clip_info.crossfade > 0 ): # crossfade and fadein should not happen at the same time video_clips.append( clip_info.mpy_clip.set_duration(clip_info.crossfade) .crossfadein(clip_info.crossfade) .set_start(clip_info.start) ) clip_info.mpy_clip = clip_info.mpy_clip.subclip(clip_info.crossfade) clip_info.start += clip_info.crossfade if clip_info.fadeout: assert isinstance(clip_info.fadeout, (int, float)) if track_name != "vid": # pylint: disable=maybe-no-member clip_info.mpy_clip = clip_info.mpy_clip.crossfadeout( clip_info.fadeout ) else: clip_info.mpy_clip = clip_info.mpy_clip.fx( # pylint: disable=maybe-no-member vfx.fadeout, clip_info.fadeout, ) video_clips.append(clip_info.mpy_clip.set_start(clip_info.start)) if len(video_clips) == 0: video_clips.append(ColorClip((200, 200), color=(0, 1, 0)).set_duration(2)) # raise Exception("no video clips??") final_clip = CompositeVideoClip(video_clips, size=resolution) # Resize here is too late, does not speed up the video encoding at all. # final_clip = final_clip.resize(width=480) # Deal with audio clips for _, track in datastruct.audio_tracks.items(): clips = [] for clip_info in track.clips: if clip_info.loop: # HACK: reload the clip. # # still don't know why using loaded mpy_clip directly will cause # "IndexError: index -200001 is out of bounds for axis 0 with # size 0"... clip = AudioFileClip(clip_info.file, buffersize=400000) else: clip = clip_info.mpy_clip if clip_info.subclip is not None: clip = clip.subclip(clip_info.subclip[0], clip_info.subclip[1]) duration = clip_info.duration if duration is not None: if clip_info.loop: # pylint: disable=maybe-no-member clip = clip.fx(afx.audio_loop, duration=duration) else: duration = min(duration, clip.duration) if clip_info.subclip: duration = min( duration, clip_info.subclip[1] - clip_info.subclip[0] ) clip = clip.set_duration(duration) if clip_info.start is not None: clip = clip.set_start(clip_info.start) # Adjust volume by keypoints if len(clip_info.vol_keypoints) > 0: clip = _adjust_mpy_audio_clip_volume(clip, clip_info.vol_keypoints) clips.append(clip) if len(clips) > 0: clip = CompositeAudioClip(clips) audio_clips.append(clip) if final_clip.audio: audio_clips.append(final_clip.audio) if len(audio_clips) > 0: final_audio_clip = CompositeAudioClip(audio_clips) # XXX: Workaround for exception: 'CompositeAudioClip' object has no attribute 'fps'. # See: https://github.com/Zulko/moviepy/issues/863 # final_audio_clip.fps = 44100 final_clip = final_clip.set_audio(final_audio_clip) # final_clip.show(10.5, interactive=True) os.makedirs("tmp/out", exist_ok=True) if audio_only: final_audio_clip.fps = 44100 final_audio_clip.write_audiofile("%s.mp3" % out_filename) open_with("%s.mp3" % out_filename, program_id=0) else: final_clip.write_videofile( "%s.mp4" % out_filename, temp_audiofile="%s.mp3" % out_filename, remove_temp=False, codec="libx264", threads=8, fps=coreapi.FPS, ffmpeg_params=["-crf", "19"], ) subprocess.Popen( ["mpv", "--force-window", "--geometry=1920x1080", f"{out_filename}.mp4"], close_fds=True, ) def _adjust_mpy_audio_clip_volume(clip, vol_keypoints): xp = [] fp = [] print("vol_keypoints:", vol_keypoints) for (p, vol) in vol_keypoints: if isinstance(vol, (int, float)): xp.append(p) fp.append(vol) else: raise Exception("unsupported bgm parameter type:" % type(vol)) def volume_adjust(gf, t): factor = np.interp(t, xp, fp) factor = np.vstack([factor, factor]).T return factor * gf(t) return clip.fl(volume_adjust) # def _export_srt(): # with open("out.srt", "w", encoding="utf-8") as f: # f.write("\n".join(_srt_lines)) def _convert_to_readable_time(seconds): seconds = int(seconds) seconds = seconds % (24 * 3600) hour = seconds // 3600 seconds %= 3600 minutes = seconds // 60 seconds %= 60 if hour > 0: return "%d:%02d:%02d" % (hour, minutes, seconds) else: return "%02d:%02d" % (minutes, seconds) def _write_timestamp(t, section_name): os.makedirs(os.path.dirname(out_filename), exist_ok=True) if not hasattr(_write_timestamp, "f"): _write_timestamp.f = open("%s.txt" % out_filename, "w", encoding="utf-8") _write_timestamp.f.write("%s (%s)\n" % (section_name, _convert_to_readable_time(t))) _write_timestamp.f.flush() @core.api def include(file): with open(file, "r", encoding="utf-8") as f: s = f.read() cwd = os.getcwd() os.chdir(os.path.dirname(os.path.abspath(file))) _parse_text(s) os.chdir(cwd) def _remove_unused_recordings(s): used_recordings = set() unused_recordings = [] apis = {"record": (lambda f, **kargs: used_recordings.add(f))} _parse_text(s, apis=apis) files = [f for f in glob.glob("record/*") if os.path.isfile(f)] files = [f.replace("\\", "/") for f in files] for f in files: if f not in used_recordings: unused_recordings.append(f) print2("Used : %d" % len(used_recordings), color="green") print2("Unused : %d" % len(unused_recordings), color="red") assert len(used_recordings) + len(unused_recordings) == len(files) print("Press y to clean up: ", end="", flush=True) if getch() == "y": for f in unused_recordings: try: os.remove(f) except: print("WARNING: failed to remove: %s" % f) def
(text, apis=core.apis, **kwargs): def find_next(text, needle, p): pos = text.find(needle, p) if pos < 0: pos = len(text) return pos # Remove all comments text = re.sub(r"<!--[\d\D]*?-->", "", text) p = 0 # Current position while p < len(text): if text[p : p + 2] == "{{": end = find_next(text, "}}", p) python_code = text[p + 2 : end].strip() p = end + 2 if ignore_undefined: try: exec(python_code, apis) except NameError: # API is not defined pass # simply ignore else: exec(python_code, apis) continue if text[p : p + 1] == "#": end = find_next(text, "\n", p) line = text[p:end].strip() _write_timestamp(coreapi.pos_dict["a"], line) p = end + 1 continue match = re.match("---((?:[0-9]*[.])?[0-9]+)?\n", text[p:]) if match is not None: if match.group(1) is not None: coreapi.audio_gap(float(match.group(1))) else: coreapi.audio_gap(0.2) p += match.end(0) + 1 continue # Parse regular text end = find_next(text, "\n", p) line = text[p:end].strip() p = end + 1 if line != "" and "parse_line" in apis: apis["parse_line"](line) # Call it at the end core.on_api_func(None) def _show_stats(s): TIME_PER_CHAR = 0.1334154351395731 total = 0 def parse_line(line): nonlocal total total += len(line) _parse_text(s, apis={"parse_line": parse_line}, ignore_undefined=True) total_secs = TIME_PER_CHAR * total print("Estimated Time: %s" % format_time(total_secs)) input() def load_config(): import yaml CONFIG_FILE = "config.yaml" DEFAULT_CONFIG = {"fps": 30} if os.path.exists(CONFIG_FILE): with open(CONFIG_FILE, "r") as f: config = yaml.load(f.read(), Loader=yaml.FullLoader) else: with open(CONFIG_FILE, "w", newline="\n") as f: yaml.dump(DEFAULT_CONFIG, f, default_flow_style=False) config = DEFAULT_CONFIG coreapi.fps(config["fps"]) if __name__ == "__main__": out_filename = "tmp/out/" + get_time_str() parser = argparse.ArgumentParser() parser.add_argument("--stdin", default=False, action="store_true") parser.add_argument("--proj_dir", type=str, default=None) parser.add_argument("-i", "--input", type=str, default=None) parser.add_argument("-a", "--audio_only", action="store_true", default=False) parser.add_argument( "--remove_unused_recordings", action="store_true", default=False ) parser.add_argument("--show_stats", action="store_true", default=False) parser.add_argument("--preview", action="store_true", default=False) args = parser.parse_args() if args.proj_dir is not None: os.chdir(args.proj_dir) elif args.input: os.chdir(os.path.dirname(args.input)) print("Project dir: %s" % os.getcwd()) # Load custom APIs (api.py) if exists if os.path.exists("api.py"): sys.path.append(os.getcwd()) mymodule = importlib.import_module("api") global_functions = inspect.getmembers(mymodule, inspect.isfunction) core.apis.update({k: v for k, v in global_functions}) # HACK if args.audio_only: coreapi.audio_only() # Read text if args.stdin: s = sys.stdin.read() elif args.input: with open(args.input, "r", encoding="utf-8") as f: s = f.read() else: raise Exception("Either --stdin or --input should be specified.") load_config() if args.preview: coreapi.preview() if args.remove_unused_recordings: ignore_undefined = True _remove_unused_recordings(s) elif args.show_stats: ignore_undefined = True _show_stats(s) else: _parse_text(s, apis=core.apis) _export_video(resolution=(1920, 1080), audio_only=args.audio_only)
_parse_text
identifier_name
mock.rs
// Copyright 2019-2021 PureStake Inc. // This file is part of Moonbeam. // Moonbeam is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Moonbeam is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Moonbeam. If not, see <http://www.gnu.org/licenses/>. //! Test utilities use crate as stake; use crate::{pallet, AwardedPts, Config, InflationInfo, Points, Range}; use frame_support::{ construct_runtime, parameter_types, traits::{Everything, GenesisBuild, OnFinalize, OnInitialize}, weights::Weight, }; use sp_core::H256; use sp_runtime::{ testing::{Header, UintAuthorityId}, traits::{BlakeTwo256, IdentityLookup, OpaqueKeys}, Perbill, Percent, RuntimeAppPublic, }; pub type AccountId = u64; pub type Balance = u128; pub type BlockNumber = u64; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Test>; type Block = frame_system::mocking::MockBlock<Test>; // Configure a mock runtime to test the pallet. construct_runtime!( pub enum Test where Block = Block, NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event<T>}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>}, Stake: stake::{Pallet, Call, Storage, Config<T>, Event<T>}, Session: pallet_session::{Pallet, Call, Storage, Event, Config<T>}, Aura: pallet_aura::{Pallet, Storage, Config<T>}, } ); parameter_types! { pub const BlockHashCount: u64 = 250; pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); pub const SS58Prefix: u8 = 42; } impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData<Balance>; type AccountId = AccountId; type BaseCallFilter = Everything; type BlockHashCount = BlockHashCount; type BlockLength = (); type BlockNumber = BlockNumber; type BlockWeights = (); type Call = Call; type DbWeight = (); type Event = Event; type Hash = H256; type Hashing = BlakeTwo256; type Header = Header; type Index = u64; type Lookup = IdentityLookup<Self::AccountId>; type OnKilledAccount = (); type OnNewAccount = (); type OnSetCode = (); type Origin = Origin; type PalletInfo = PalletInfo; type SS58Prefix = SS58Prefix; type SystemWeightInfo = (); type Version = (); } parameter_types! { pub const ExistentialDeposit: u128 = 1; } impl pallet_balances::Config for Test { type AccountStore = System; type Balance = Balance; type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; type MaxLocks = (); type MaxReserves = (); type ReserveIdentifier = [u8; 4]; type WeightInfo = (); } parameter_types! { pub const MinimumPeriod: u64 = 1; } impl pallet_timestamp::Config for Test { type MinimumPeriod = MinimumPeriod; type Moment = u64; type OnTimestampSet = Aura; type WeightInfo = (); } parameter_types! { pub const MaxAuthorities: u32 = 100_000; } impl pallet_aura::Config for Test { type AuthorityId = sp_consensus_aura::sr25519::AuthorityId; type DisabledValidators = (); type MaxAuthorities = MaxAuthorities; } sp_runtime::impl_opaque_keys! { pub struct MockSessionKeys { // a key for aura authoring pub aura: UintAuthorityId, } } impl From<UintAuthorityId> for MockSessionKeys { fn from(aura: sp_runtime::testing::UintAuthorityId) -> Self { Self { aura } } } parameter_types! { pub static SessionHandlerCollators: Vec<u64> = vec![]; pub static SessionChangeBlock: u64 = 0; } pub struct TestSessionHandler; impl pallet_session::SessionHandler<u64> for TestSessionHandler { const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID]; fn on_genesis_session<Ks: OpaqueKeys>(keys: &[(u64, Ks)]) { SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>()) } fn on_new_session<Ks: OpaqueKeys>(_: bool, keys: &[(u64, Ks)], _: &[(u64, Ks)]) { SessionChangeBlock::set(System::block_number()); dbg!(keys.len()); SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>()) } fn on_before_session_ending() {} fn on_disabled(_: u32) {} } impl pallet_session::Config for Test { type Event = Event; type Keys = MockSessionKeys; type NextSessionRotation = Stake; type SessionHandler = TestSessionHandler; type SessionManager = Stake; type ShouldEndSession = Stake; type ValidatorId = <Self as frame_system::Config>::AccountId; // we don't have stash and controller, thus we don't need the convert as well. type ValidatorIdOf = crate::IdentityCollator; type WeightInfo = (); } parameter_types! { pub const MinBlocksPerRound: u32 = 3; pub const BlocksPerRound: u32 = 5; pub const LeaveCandidatesDelay: u32 = 2; pub const LeaveNominatorsDelay: u32 = 2; pub const RevokeNominationDelay: u32 = 2; pub const RewardPaymentDelay: u32 = 2; pub const MinSelectedCandidates: u32 = 5; pub const MaxNominatorsPerCollator: u32 = 4; pub const MaxCollatorsPerNominator: u32 = 4; pub const DefaultCollatorCommission: Perbill = Perbill::from_percent(20); pub const DefaultParachainBondReservePercent: Percent = Percent::from_percent(30); pub const MinCollatorStk: u128 = 10; pub const MinNominatorStk: u128 = 5; pub const MinNomination: u128 = 3; } impl Config for Test { type BlocksPerRound = BlocksPerRound; type Currency = Balances; type DefaultCollatorCommission = DefaultCollatorCommission; type DefaultParachainBondReservePercent = DefaultParachainBondReservePercent; type Event = Event; type LeaveCandidatesDelay = LeaveCandidatesDelay; type LeaveNominatorsDelay = LeaveNominatorsDelay; type MaxCollatorsPerNominator = MaxCollatorsPerNominator; type MaxNominatorsPerCollator = MaxNominatorsPerCollator; type MinBlocksPerRound = MinBlocksPerRound; type MinCollatorCandidateStk = MinCollatorStk; type MinCollatorStk = MinCollatorStk; type MinNomination = MinNomination; type MinNominatorStk = MinNominatorStk; type MinSelectedCandidates = MinSelectedCandidates; type MonetaryGovernanceOrigin = frame_system::EnsureRoot<AccountId>; type RevokeNominationDelay = RevokeNominationDelay; type RewardPaymentDelay = RewardPaymentDelay; type WeightInfo = (); } pub(crate) struct ExtBuilder { // endowed accounts with balances balances: Vec<(AccountId, Balance)>, // [collator, amount] collators: Vec<(AccountId, Balance)>, // [nominator, collator, nomination_amount] nominations: Vec<(AccountId, AccountId, Balance)>, // inflation config inflation: InflationInfo<Balance>, } impl Default for ExtBuilder { fn default() -> ExtBuilder { ExtBuilder { balances: vec![], nominations: vec![], collators: vec![], inflation: InflationInfo { expect: Range { min: 700, ideal: 700, max: 700, }, // not used annual: Range { min: Perbill::from_percent(50), ideal: Perbill::from_percent(50), max: Perbill::from_percent(50), }, // unrealistically high parameterization, only for testing round: Range { min: Perbill::from_percent(5), ideal: Perbill::from_percent(5), max: Perbill::from_percent(5), }, }, } } } impl ExtBuilder { pub(crate) fn with_balances(mut self, balances: Vec<(AccountId, Balance)>) -> Self { self.balances = balances; self } pub(crate) fn with_candidates(mut self, collators: Vec<(AccountId, Balance)>) -> Self { self.collators = collators; self } pub(crate) fn with_nominations(mut self, nominations: Vec<(AccountId, AccountId, Balance)>) -> Self { self.nominations = nominations; self } #[allow(dead_code)] pub(crate) fn with_inflation(mut self, inflation: InflationInfo<Balance>) -> Self { self.inflation = inflation; self } pub(crate) fn build(self) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default() .build_storage::<Test>() .expect("Frame system builds valid default genesis config"); pallet_balances::GenesisConfig::<Test> { balances: self.balances, } .assimilate_storage(&mut t) .expect("Pallet balances storage can be assimilated"); stake::GenesisConfig::<Test> { candidates: self.collators, nominations: self.nominations, inflation_config: self.inflation, } .assimilate_storage(&mut t) .expect("Parachain Staking's storage can be assimilated"); let validators = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let keys = validators .iter() .map(|i| { (*i, *i, MockSessionKeys { aura: UintAuthorityId(*i), }) }) .collect::<Vec<_>>(); pallet_session::GenesisConfig::<Test> { keys } .assimilate_storage(&mut t) .expect("Pallet session storage can be assimilated"); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext } } pub(crate) fn roll_to(n: u64) { while System::block_number() < n { Balances::on_finalize(System::block_number()); Stake::on_finalize(System::block_number()); Session::on_finalize(System::block_number()); Aura::on_finalize(System::block_number()); System::on_finalize(System::block_number()); System::set_block_number(System::block_number() + 1); System::on_initialize(System::block_number()); Timestamp::on_initialize(System::block_number()); Balances::on_initialize(System::block_number()); Stake::on_initialize(System::block_number()); Session::on_initialize(System::block_number()); Aura::on_initialize(System::block_number()); } } pub(crate) fn last_event() -> Event
pub(crate) fn events() -> Vec<pallet::Event<Test>> { System::events() .into_iter() .map(|r| r.event) .filter_map(|e| if let Event::Stake(inner) = e { Some(inner) } else { None }) .collect::<Vec<_>>() } // Same storage changes as EventHandler::note_author impl pub(crate) fn set_author(round: u32, acc: u64, pts: u32) { <Points<Test>>::mutate(round, |p| *p += pts); <AwardedPts<Test>>::mutate(round, acc, |p| *p += pts); } #[test] fn geneses() { ExtBuilder::default() .with_balances(vec![ (1, 1000), (2, 300), (3, 100), (4, 100), (5, 100), (6, 100), (7, 100), (8, 9), (9, 4), ]) .with_candidates(vec![(1, 500), (2, 200)]) .with_nominations(vec![(3, 1, 100), (4, 1, 100), (5, 2, 100), (6, 2, 100)]) .build() .execute_with(|| { assert!(System::events().is_empty()); // collators assert_eq!(Balances::reserved_balance(&1), 500); assert_eq!(Balances::free_balance(&1), 500); assert!(Stake::is_candidate(&1)); assert_eq!(Balances::reserved_balance(&2), 200); assert_eq!(Balances::free_balance(&2), 100); assert!(Stake::is_candidate(&2)); // nominators for x in 3..7 { assert!(Stake::is_nominator(&x)); assert_eq!(Balances::free_balance(&x), 0); assert_eq!(Balances::reserved_balance(&x), 100); } // uninvolved for x in 7..10 { assert!(!Stake::is_nominator(&x)); } assert_eq!(Balances::free_balance(&7), 100); assert_eq!(Balances::reserved_balance(&7), 0); assert_eq!(Balances::free_balance(&8), 9); assert_eq!(Balances::reserved_balance(&8), 0); assert_eq!(Balances::free_balance(&9), 4); assert_eq!(Balances::reserved_balance(&9), 0); }); ExtBuilder::default() .with_balances(vec![ (1, 100), (2, 100), (3, 100), (4, 100), (5, 100), (6, 100), (7, 100), (8, 100), (9, 100), (10, 100), ]) .with_candidates(vec![(1, 20), (2, 20), (3, 20), (4, 20), (5, 10)]) .with_nominations(vec![(6, 1, 10), (7, 1, 10), (8, 2, 10), (9, 2, 10), (10, 1, 10)]) .build() .execute_with(|| { assert!(System::events().is_empty()); // collators for x in 1..5 { assert!(Stake::is_candidate(&x)); assert_eq!(Balances::free_balance(&x), 80); assert_eq!(Balances::reserved_balance(&x), 20); } assert!(Stake::is_candidate(&5)); assert_eq!(Balances::free_balance(&5), 90); assert_eq!(Balances::reserved_balance(&5), 10); // nominators for x in 6..11 { assert!(Stake::is_nominator(&x)); assert_eq!(Balances::free_balance(&x), 90); assert_eq!(Balances::reserved_balance(&x), 10); } }); }
{ System::events().pop().expect("Event expected").event }
identifier_body
mock.rs
// Copyright 2019-2021 PureStake Inc. // This file is part of Moonbeam. // Moonbeam is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Moonbeam is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Moonbeam. If not, see <http://www.gnu.org/licenses/>. //! Test utilities use crate as stake; use crate::{pallet, AwardedPts, Config, InflationInfo, Points, Range}; use frame_support::{ construct_runtime, parameter_types, traits::{Everything, GenesisBuild, OnFinalize, OnInitialize}, weights::Weight, }; use sp_core::H256; use sp_runtime::{ testing::{Header, UintAuthorityId}, traits::{BlakeTwo256, IdentityLookup, OpaqueKeys}, Perbill, Percent, RuntimeAppPublic, }; pub type AccountId = u64;
type Block = frame_system::mocking::MockBlock<Test>; // Configure a mock runtime to test the pallet. construct_runtime!( pub enum Test where Block = Block, NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event<T>}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>}, Stake: stake::{Pallet, Call, Storage, Config<T>, Event<T>}, Session: pallet_session::{Pallet, Call, Storage, Event, Config<T>}, Aura: pallet_aura::{Pallet, Storage, Config<T>}, } ); parameter_types! { pub const BlockHashCount: u64 = 250; pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); pub const SS58Prefix: u8 = 42; } impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData<Balance>; type AccountId = AccountId; type BaseCallFilter = Everything; type BlockHashCount = BlockHashCount; type BlockLength = (); type BlockNumber = BlockNumber; type BlockWeights = (); type Call = Call; type DbWeight = (); type Event = Event; type Hash = H256; type Hashing = BlakeTwo256; type Header = Header; type Index = u64; type Lookup = IdentityLookup<Self::AccountId>; type OnKilledAccount = (); type OnNewAccount = (); type OnSetCode = (); type Origin = Origin; type PalletInfo = PalletInfo; type SS58Prefix = SS58Prefix; type SystemWeightInfo = (); type Version = (); } parameter_types! { pub const ExistentialDeposit: u128 = 1; } impl pallet_balances::Config for Test { type AccountStore = System; type Balance = Balance; type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; type MaxLocks = (); type MaxReserves = (); type ReserveIdentifier = [u8; 4]; type WeightInfo = (); } parameter_types! { pub const MinimumPeriod: u64 = 1; } impl pallet_timestamp::Config for Test { type MinimumPeriod = MinimumPeriod; type Moment = u64; type OnTimestampSet = Aura; type WeightInfo = (); } parameter_types! { pub const MaxAuthorities: u32 = 100_000; } impl pallet_aura::Config for Test { type AuthorityId = sp_consensus_aura::sr25519::AuthorityId; type DisabledValidators = (); type MaxAuthorities = MaxAuthorities; } sp_runtime::impl_opaque_keys! { pub struct MockSessionKeys { // a key for aura authoring pub aura: UintAuthorityId, } } impl From<UintAuthorityId> for MockSessionKeys { fn from(aura: sp_runtime::testing::UintAuthorityId) -> Self { Self { aura } } } parameter_types! { pub static SessionHandlerCollators: Vec<u64> = vec![]; pub static SessionChangeBlock: u64 = 0; } pub struct TestSessionHandler; impl pallet_session::SessionHandler<u64> for TestSessionHandler { const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID]; fn on_genesis_session<Ks: OpaqueKeys>(keys: &[(u64, Ks)]) { SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>()) } fn on_new_session<Ks: OpaqueKeys>(_: bool, keys: &[(u64, Ks)], _: &[(u64, Ks)]) { SessionChangeBlock::set(System::block_number()); dbg!(keys.len()); SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>()) } fn on_before_session_ending() {} fn on_disabled(_: u32) {} } impl pallet_session::Config for Test { type Event = Event; type Keys = MockSessionKeys; type NextSessionRotation = Stake; type SessionHandler = TestSessionHandler; type SessionManager = Stake; type ShouldEndSession = Stake; type ValidatorId = <Self as frame_system::Config>::AccountId; // we don't have stash and controller, thus we don't need the convert as well. type ValidatorIdOf = crate::IdentityCollator; type WeightInfo = (); } parameter_types! { pub const MinBlocksPerRound: u32 = 3; pub const BlocksPerRound: u32 = 5; pub const LeaveCandidatesDelay: u32 = 2; pub const LeaveNominatorsDelay: u32 = 2; pub const RevokeNominationDelay: u32 = 2; pub const RewardPaymentDelay: u32 = 2; pub const MinSelectedCandidates: u32 = 5; pub const MaxNominatorsPerCollator: u32 = 4; pub const MaxCollatorsPerNominator: u32 = 4; pub const DefaultCollatorCommission: Perbill = Perbill::from_percent(20); pub const DefaultParachainBondReservePercent: Percent = Percent::from_percent(30); pub const MinCollatorStk: u128 = 10; pub const MinNominatorStk: u128 = 5; pub const MinNomination: u128 = 3; } impl Config for Test { type BlocksPerRound = BlocksPerRound; type Currency = Balances; type DefaultCollatorCommission = DefaultCollatorCommission; type DefaultParachainBondReservePercent = DefaultParachainBondReservePercent; type Event = Event; type LeaveCandidatesDelay = LeaveCandidatesDelay; type LeaveNominatorsDelay = LeaveNominatorsDelay; type MaxCollatorsPerNominator = MaxCollatorsPerNominator; type MaxNominatorsPerCollator = MaxNominatorsPerCollator; type MinBlocksPerRound = MinBlocksPerRound; type MinCollatorCandidateStk = MinCollatorStk; type MinCollatorStk = MinCollatorStk; type MinNomination = MinNomination; type MinNominatorStk = MinNominatorStk; type MinSelectedCandidates = MinSelectedCandidates; type MonetaryGovernanceOrigin = frame_system::EnsureRoot<AccountId>; type RevokeNominationDelay = RevokeNominationDelay; type RewardPaymentDelay = RewardPaymentDelay; type WeightInfo = (); } pub(crate) struct ExtBuilder { // endowed accounts with balances balances: Vec<(AccountId, Balance)>, // [collator, amount] collators: Vec<(AccountId, Balance)>, // [nominator, collator, nomination_amount] nominations: Vec<(AccountId, AccountId, Balance)>, // inflation config inflation: InflationInfo<Balance>, } impl Default for ExtBuilder { fn default() -> ExtBuilder { ExtBuilder { balances: vec![], nominations: vec![], collators: vec![], inflation: InflationInfo { expect: Range { min: 700, ideal: 700, max: 700, }, // not used annual: Range { min: Perbill::from_percent(50), ideal: Perbill::from_percent(50), max: Perbill::from_percent(50), }, // unrealistically high parameterization, only for testing round: Range { min: Perbill::from_percent(5), ideal: Perbill::from_percent(5), max: Perbill::from_percent(5), }, }, } } } impl ExtBuilder { pub(crate) fn with_balances(mut self, balances: Vec<(AccountId, Balance)>) -> Self { self.balances = balances; self } pub(crate) fn with_candidates(mut self, collators: Vec<(AccountId, Balance)>) -> Self { self.collators = collators; self } pub(crate) fn with_nominations(mut self, nominations: Vec<(AccountId, AccountId, Balance)>) -> Self { self.nominations = nominations; self } #[allow(dead_code)] pub(crate) fn with_inflation(mut self, inflation: InflationInfo<Balance>) -> Self { self.inflation = inflation; self } pub(crate) fn build(self) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default() .build_storage::<Test>() .expect("Frame system builds valid default genesis config"); pallet_balances::GenesisConfig::<Test> { balances: self.balances, } .assimilate_storage(&mut t) .expect("Pallet balances storage can be assimilated"); stake::GenesisConfig::<Test> { candidates: self.collators, nominations: self.nominations, inflation_config: self.inflation, } .assimilate_storage(&mut t) .expect("Parachain Staking's storage can be assimilated"); let validators = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let keys = validators .iter() .map(|i| { (*i, *i, MockSessionKeys { aura: UintAuthorityId(*i), }) }) .collect::<Vec<_>>(); pallet_session::GenesisConfig::<Test> { keys } .assimilate_storage(&mut t) .expect("Pallet session storage can be assimilated"); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext } } pub(crate) fn roll_to(n: u64) { while System::block_number() < n { Balances::on_finalize(System::block_number()); Stake::on_finalize(System::block_number()); Session::on_finalize(System::block_number()); Aura::on_finalize(System::block_number()); System::on_finalize(System::block_number()); System::set_block_number(System::block_number() + 1); System::on_initialize(System::block_number()); Timestamp::on_initialize(System::block_number()); Balances::on_initialize(System::block_number()); Stake::on_initialize(System::block_number()); Session::on_initialize(System::block_number()); Aura::on_initialize(System::block_number()); } } pub(crate) fn last_event() -> Event { System::events().pop().expect("Event expected").event } pub(crate) fn events() -> Vec<pallet::Event<Test>> { System::events() .into_iter() .map(|r| r.event) .filter_map(|e| if let Event::Stake(inner) = e { Some(inner) } else { None }) .collect::<Vec<_>>() } // Same storage changes as EventHandler::note_author impl pub(crate) fn set_author(round: u32, acc: u64, pts: u32) { <Points<Test>>::mutate(round, |p| *p += pts); <AwardedPts<Test>>::mutate(round, acc, |p| *p += pts); } #[test] fn geneses() { ExtBuilder::default() .with_balances(vec![ (1, 1000), (2, 300), (3, 100), (4, 100), (5, 100), (6, 100), (7, 100), (8, 9), (9, 4), ]) .with_candidates(vec![(1, 500), (2, 200)]) .with_nominations(vec![(3, 1, 100), (4, 1, 100), (5, 2, 100), (6, 2, 100)]) .build() .execute_with(|| { assert!(System::events().is_empty()); // collators assert_eq!(Balances::reserved_balance(&1), 500); assert_eq!(Balances::free_balance(&1), 500); assert!(Stake::is_candidate(&1)); assert_eq!(Balances::reserved_balance(&2), 200); assert_eq!(Balances::free_balance(&2), 100); assert!(Stake::is_candidate(&2)); // nominators for x in 3..7 { assert!(Stake::is_nominator(&x)); assert_eq!(Balances::free_balance(&x), 0); assert_eq!(Balances::reserved_balance(&x), 100); } // uninvolved for x in 7..10 { assert!(!Stake::is_nominator(&x)); } assert_eq!(Balances::free_balance(&7), 100); assert_eq!(Balances::reserved_balance(&7), 0); assert_eq!(Balances::free_balance(&8), 9); assert_eq!(Balances::reserved_balance(&8), 0); assert_eq!(Balances::free_balance(&9), 4); assert_eq!(Balances::reserved_balance(&9), 0); }); ExtBuilder::default() .with_balances(vec![ (1, 100), (2, 100), (3, 100), (4, 100), (5, 100), (6, 100), (7, 100), (8, 100), (9, 100), (10, 100), ]) .with_candidates(vec![(1, 20), (2, 20), (3, 20), (4, 20), (5, 10)]) .with_nominations(vec![(6, 1, 10), (7, 1, 10), (8, 2, 10), (9, 2, 10), (10, 1, 10)]) .build() .execute_with(|| { assert!(System::events().is_empty()); // collators for x in 1..5 { assert!(Stake::is_candidate(&x)); assert_eq!(Balances::free_balance(&x), 80); assert_eq!(Balances::reserved_balance(&x), 20); } assert!(Stake::is_candidate(&5)); assert_eq!(Balances::free_balance(&5), 90); assert_eq!(Balances::reserved_balance(&5), 10); // nominators for x in 6..11 { assert!(Stake::is_nominator(&x)); assert_eq!(Balances::free_balance(&x), 90); assert_eq!(Balances::reserved_balance(&x), 10); } }); }
pub type Balance = u128; pub type BlockNumber = u64; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Test>;
random_line_split
mock.rs
// Copyright 2019-2021 PureStake Inc. // This file is part of Moonbeam. // Moonbeam is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Moonbeam is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Moonbeam. If not, see <http://www.gnu.org/licenses/>. //! Test utilities use crate as stake; use crate::{pallet, AwardedPts, Config, InflationInfo, Points, Range}; use frame_support::{ construct_runtime, parameter_types, traits::{Everything, GenesisBuild, OnFinalize, OnInitialize}, weights::Weight, }; use sp_core::H256; use sp_runtime::{ testing::{Header, UintAuthorityId}, traits::{BlakeTwo256, IdentityLookup, OpaqueKeys}, Perbill, Percent, RuntimeAppPublic, }; pub type AccountId = u64; pub type Balance = u128; pub type BlockNumber = u64; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Test>; type Block = frame_system::mocking::MockBlock<Test>; // Configure a mock runtime to test the pallet. construct_runtime!( pub enum Test where Block = Block, NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event<T>}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>}, Stake: stake::{Pallet, Call, Storage, Config<T>, Event<T>}, Session: pallet_session::{Pallet, Call, Storage, Event, Config<T>}, Aura: pallet_aura::{Pallet, Storage, Config<T>}, } ); parameter_types! { pub const BlockHashCount: u64 = 250; pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); pub const SS58Prefix: u8 = 42; } impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData<Balance>; type AccountId = AccountId; type BaseCallFilter = Everything; type BlockHashCount = BlockHashCount; type BlockLength = (); type BlockNumber = BlockNumber; type BlockWeights = (); type Call = Call; type DbWeight = (); type Event = Event; type Hash = H256; type Hashing = BlakeTwo256; type Header = Header; type Index = u64; type Lookup = IdentityLookup<Self::AccountId>; type OnKilledAccount = (); type OnNewAccount = (); type OnSetCode = (); type Origin = Origin; type PalletInfo = PalletInfo; type SS58Prefix = SS58Prefix; type SystemWeightInfo = (); type Version = (); } parameter_types! { pub const ExistentialDeposit: u128 = 1; } impl pallet_balances::Config for Test { type AccountStore = System; type Balance = Balance; type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; type MaxLocks = (); type MaxReserves = (); type ReserveIdentifier = [u8; 4]; type WeightInfo = (); } parameter_types! { pub const MinimumPeriod: u64 = 1; } impl pallet_timestamp::Config for Test { type MinimumPeriod = MinimumPeriod; type Moment = u64; type OnTimestampSet = Aura; type WeightInfo = (); } parameter_types! { pub const MaxAuthorities: u32 = 100_000; } impl pallet_aura::Config for Test { type AuthorityId = sp_consensus_aura::sr25519::AuthorityId; type DisabledValidators = (); type MaxAuthorities = MaxAuthorities; } sp_runtime::impl_opaque_keys! { pub struct MockSessionKeys { // a key for aura authoring pub aura: UintAuthorityId, } } impl From<UintAuthorityId> for MockSessionKeys { fn from(aura: sp_runtime::testing::UintAuthorityId) -> Self { Self { aura } } } parameter_types! { pub static SessionHandlerCollators: Vec<u64> = vec![]; pub static SessionChangeBlock: u64 = 0; } pub struct TestSessionHandler; impl pallet_session::SessionHandler<u64> for TestSessionHandler { const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID]; fn on_genesis_session<Ks: OpaqueKeys>(keys: &[(u64, Ks)]) { SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>()) } fn
<Ks: OpaqueKeys>(_: bool, keys: &[(u64, Ks)], _: &[(u64, Ks)]) { SessionChangeBlock::set(System::block_number()); dbg!(keys.len()); SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>()) } fn on_before_session_ending() {} fn on_disabled(_: u32) {} } impl pallet_session::Config for Test { type Event = Event; type Keys = MockSessionKeys; type NextSessionRotation = Stake; type SessionHandler = TestSessionHandler; type SessionManager = Stake; type ShouldEndSession = Stake; type ValidatorId = <Self as frame_system::Config>::AccountId; // we don't have stash and controller, thus we don't need the convert as well. type ValidatorIdOf = crate::IdentityCollator; type WeightInfo = (); } parameter_types! { pub const MinBlocksPerRound: u32 = 3; pub const BlocksPerRound: u32 = 5; pub const LeaveCandidatesDelay: u32 = 2; pub const LeaveNominatorsDelay: u32 = 2; pub const RevokeNominationDelay: u32 = 2; pub const RewardPaymentDelay: u32 = 2; pub const MinSelectedCandidates: u32 = 5; pub const MaxNominatorsPerCollator: u32 = 4; pub const MaxCollatorsPerNominator: u32 = 4; pub const DefaultCollatorCommission: Perbill = Perbill::from_percent(20); pub const DefaultParachainBondReservePercent: Percent = Percent::from_percent(30); pub const MinCollatorStk: u128 = 10; pub const MinNominatorStk: u128 = 5; pub const MinNomination: u128 = 3; } impl Config for Test { type BlocksPerRound = BlocksPerRound; type Currency = Balances; type DefaultCollatorCommission = DefaultCollatorCommission; type DefaultParachainBondReservePercent = DefaultParachainBondReservePercent; type Event = Event; type LeaveCandidatesDelay = LeaveCandidatesDelay; type LeaveNominatorsDelay = LeaveNominatorsDelay; type MaxCollatorsPerNominator = MaxCollatorsPerNominator; type MaxNominatorsPerCollator = MaxNominatorsPerCollator; type MinBlocksPerRound = MinBlocksPerRound; type MinCollatorCandidateStk = MinCollatorStk; type MinCollatorStk = MinCollatorStk; type MinNomination = MinNomination; type MinNominatorStk = MinNominatorStk; type MinSelectedCandidates = MinSelectedCandidates; type MonetaryGovernanceOrigin = frame_system::EnsureRoot<AccountId>; type RevokeNominationDelay = RevokeNominationDelay; type RewardPaymentDelay = RewardPaymentDelay; type WeightInfo = (); } pub(crate) struct ExtBuilder { // endowed accounts with balances balances: Vec<(AccountId, Balance)>, // [collator, amount] collators: Vec<(AccountId, Balance)>, // [nominator, collator, nomination_amount] nominations: Vec<(AccountId, AccountId, Balance)>, // inflation config inflation: InflationInfo<Balance>, } impl Default for ExtBuilder { fn default() -> ExtBuilder { ExtBuilder { balances: vec![], nominations: vec![], collators: vec![], inflation: InflationInfo { expect: Range { min: 700, ideal: 700, max: 700, }, // not used annual: Range { min: Perbill::from_percent(50), ideal: Perbill::from_percent(50), max: Perbill::from_percent(50), }, // unrealistically high parameterization, only for testing round: Range { min: Perbill::from_percent(5), ideal: Perbill::from_percent(5), max: Perbill::from_percent(5), }, }, } } } impl ExtBuilder { pub(crate) fn with_balances(mut self, balances: Vec<(AccountId, Balance)>) -> Self { self.balances = balances; self } pub(crate) fn with_candidates(mut self, collators: Vec<(AccountId, Balance)>) -> Self { self.collators = collators; self } pub(crate) fn with_nominations(mut self, nominations: Vec<(AccountId, AccountId, Balance)>) -> Self { self.nominations = nominations; self } #[allow(dead_code)] pub(crate) fn with_inflation(mut self, inflation: InflationInfo<Balance>) -> Self { self.inflation = inflation; self } pub(crate) fn build(self) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default() .build_storage::<Test>() .expect("Frame system builds valid default genesis config"); pallet_balances::GenesisConfig::<Test> { balances: self.balances, } .assimilate_storage(&mut t) .expect("Pallet balances storage can be assimilated"); stake::GenesisConfig::<Test> { candidates: self.collators, nominations: self.nominations, inflation_config: self.inflation, } .assimilate_storage(&mut t) .expect("Parachain Staking's storage can be assimilated"); let validators = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let keys = validators .iter() .map(|i| { (*i, *i, MockSessionKeys { aura: UintAuthorityId(*i), }) }) .collect::<Vec<_>>(); pallet_session::GenesisConfig::<Test> { keys } .assimilate_storage(&mut t) .expect("Pallet session storage can be assimilated"); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext } } pub(crate) fn roll_to(n: u64) { while System::block_number() < n { Balances::on_finalize(System::block_number()); Stake::on_finalize(System::block_number()); Session::on_finalize(System::block_number()); Aura::on_finalize(System::block_number()); System::on_finalize(System::block_number()); System::set_block_number(System::block_number() + 1); System::on_initialize(System::block_number()); Timestamp::on_initialize(System::block_number()); Balances::on_initialize(System::block_number()); Stake::on_initialize(System::block_number()); Session::on_initialize(System::block_number()); Aura::on_initialize(System::block_number()); } } pub(crate) fn last_event() -> Event { System::events().pop().expect("Event expected").event } pub(crate) fn events() -> Vec<pallet::Event<Test>> { System::events() .into_iter() .map(|r| r.event) .filter_map(|e| if let Event::Stake(inner) = e { Some(inner) } else { None }) .collect::<Vec<_>>() } // Same storage changes as EventHandler::note_author impl pub(crate) fn set_author(round: u32, acc: u64, pts: u32) { <Points<Test>>::mutate(round, |p| *p += pts); <AwardedPts<Test>>::mutate(round, acc, |p| *p += pts); } #[test] fn geneses() { ExtBuilder::default() .with_balances(vec![ (1, 1000), (2, 300), (3, 100), (4, 100), (5, 100), (6, 100), (7, 100), (8, 9), (9, 4), ]) .with_candidates(vec![(1, 500), (2, 200)]) .with_nominations(vec![(3, 1, 100), (4, 1, 100), (5, 2, 100), (6, 2, 100)]) .build() .execute_with(|| { assert!(System::events().is_empty()); // collators assert_eq!(Balances::reserved_balance(&1), 500); assert_eq!(Balances::free_balance(&1), 500); assert!(Stake::is_candidate(&1)); assert_eq!(Balances::reserved_balance(&2), 200); assert_eq!(Balances::free_balance(&2), 100); assert!(Stake::is_candidate(&2)); // nominators for x in 3..7 { assert!(Stake::is_nominator(&x)); assert_eq!(Balances::free_balance(&x), 0); assert_eq!(Balances::reserved_balance(&x), 100); } // uninvolved for x in 7..10 { assert!(!Stake::is_nominator(&x)); } assert_eq!(Balances::free_balance(&7), 100); assert_eq!(Balances::reserved_balance(&7), 0); assert_eq!(Balances::free_balance(&8), 9); assert_eq!(Balances::reserved_balance(&8), 0); assert_eq!(Balances::free_balance(&9), 4); assert_eq!(Balances::reserved_balance(&9), 0); }); ExtBuilder::default() .with_balances(vec![ (1, 100), (2, 100), (3, 100), (4, 100), (5, 100), (6, 100), (7, 100), (8, 100), (9, 100), (10, 100), ]) .with_candidates(vec![(1, 20), (2, 20), (3, 20), (4, 20), (5, 10)]) .with_nominations(vec![(6, 1, 10), (7, 1, 10), (8, 2, 10), (9, 2, 10), (10, 1, 10)]) .build() .execute_with(|| { assert!(System::events().is_empty()); // collators for x in 1..5 { assert!(Stake::is_candidate(&x)); assert_eq!(Balances::free_balance(&x), 80); assert_eq!(Balances::reserved_balance(&x), 20); } assert!(Stake::is_candidate(&5)); assert_eq!(Balances::free_balance(&5), 90); assert_eq!(Balances::reserved_balance(&5), 10); // nominators for x in 6..11 { assert!(Stake::is_nominator(&x)); assert_eq!(Balances::free_balance(&x), 90); assert_eq!(Balances::reserved_balance(&x), 10); } }); }
on_new_session
identifier_name
mock.rs
// Copyright 2019-2021 PureStake Inc. // This file is part of Moonbeam. // Moonbeam is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Moonbeam is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Moonbeam. If not, see <http://www.gnu.org/licenses/>. //! Test utilities use crate as stake; use crate::{pallet, AwardedPts, Config, InflationInfo, Points, Range}; use frame_support::{ construct_runtime, parameter_types, traits::{Everything, GenesisBuild, OnFinalize, OnInitialize}, weights::Weight, }; use sp_core::H256; use sp_runtime::{ testing::{Header, UintAuthorityId}, traits::{BlakeTwo256, IdentityLookup, OpaqueKeys}, Perbill, Percent, RuntimeAppPublic, }; pub type AccountId = u64; pub type Balance = u128; pub type BlockNumber = u64; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Test>; type Block = frame_system::mocking::MockBlock<Test>; // Configure a mock runtime to test the pallet. construct_runtime!( pub enum Test where Block = Block, NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event<T>}, Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>}, Stake: stake::{Pallet, Call, Storage, Config<T>, Event<T>}, Session: pallet_session::{Pallet, Call, Storage, Event, Config<T>}, Aura: pallet_aura::{Pallet, Storage, Config<T>}, } ); parameter_types! { pub const BlockHashCount: u64 = 250; pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); pub const SS58Prefix: u8 = 42; } impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData<Balance>; type AccountId = AccountId; type BaseCallFilter = Everything; type BlockHashCount = BlockHashCount; type BlockLength = (); type BlockNumber = BlockNumber; type BlockWeights = (); type Call = Call; type DbWeight = (); type Event = Event; type Hash = H256; type Hashing = BlakeTwo256; type Header = Header; type Index = u64; type Lookup = IdentityLookup<Self::AccountId>; type OnKilledAccount = (); type OnNewAccount = (); type OnSetCode = (); type Origin = Origin; type PalletInfo = PalletInfo; type SS58Prefix = SS58Prefix; type SystemWeightInfo = (); type Version = (); } parameter_types! { pub const ExistentialDeposit: u128 = 1; } impl pallet_balances::Config for Test { type AccountStore = System; type Balance = Balance; type DustRemoval = (); type Event = Event; type ExistentialDeposit = ExistentialDeposit; type MaxLocks = (); type MaxReserves = (); type ReserveIdentifier = [u8; 4]; type WeightInfo = (); } parameter_types! { pub const MinimumPeriod: u64 = 1; } impl pallet_timestamp::Config for Test { type MinimumPeriod = MinimumPeriod; type Moment = u64; type OnTimestampSet = Aura; type WeightInfo = (); } parameter_types! { pub const MaxAuthorities: u32 = 100_000; } impl pallet_aura::Config for Test { type AuthorityId = sp_consensus_aura::sr25519::AuthorityId; type DisabledValidators = (); type MaxAuthorities = MaxAuthorities; } sp_runtime::impl_opaque_keys! { pub struct MockSessionKeys { // a key for aura authoring pub aura: UintAuthorityId, } } impl From<UintAuthorityId> for MockSessionKeys { fn from(aura: sp_runtime::testing::UintAuthorityId) -> Self { Self { aura } } } parameter_types! { pub static SessionHandlerCollators: Vec<u64> = vec![]; pub static SessionChangeBlock: u64 = 0; } pub struct TestSessionHandler; impl pallet_session::SessionHandler<u64> for TestSessionHandler { const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID]; fn on_genesis_session<Ks: OpaqueKeys>(keys: &[(u64, Ks)]) { SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>()) } fn on_new_session<Ks: OpaqueKeys>(_: bool, keys: &[(u64, Ks)], _: &[(u64, Ks)]) { SessionChangeBlock::set(System::block_number()); dbg!(keys.len()); SessionHandlerCollators::set(keys.iter().map(|(a, _)| *a).collect::<Vec<_>>()) } fn on_before_session_ending() {} fn on_disabled(_: u32) {} } impl pallet_session::Config for Test { type Event = Event; type Keys = MockSessionKeys; type NextSessionRotation = Stake; type SessionHandler = TestSessionHandler; type SessionManager = Stake; type ShouldEndSession = Stake; type ValidatorId = <Self as frame_system::Config>::AccountId; // we don't have stash and controller, thus we don't need the convert as well. type ValidatorIdOf = crate::IdentityCollator; type WeightInfo = (); } parameter_types! { pub const MinBlocksPerRound: u32 = 3; pub const BlocksPerRound: u32 = 5; pub const LeaveCandidatesDelay: u32 = 2; pub const LeaveNominatorsDelay: u32 = 2; pub const RevokeNominationDelay: u32 = 2; pub const RewardPaymentDelay: u32 = 2; pub const MinSelectedCandidates: u32 = 5; pub const MaxNominatorsPerCollator: u32 = 4; pub const MaxCollatorsPerNominator: u32 = 4; pub const DefaultCollatorCommission: Perbill = Perbill::from_percent(20); pub const DefaultParachainBondReservePercent: Percent = Percent::from_percent(30); pub const MinCollatorStk: u128 = 10; pub const MinNominatorStk: u128 = 5; pub const MinNomination: u128 = 3; } impl Config for Test { type BlocksPerRound = BlocksPerRound; type Currency = Balances; type DefaultCollatorCommission = DefaultCollatorCommission; type DefaultParachainBondReservePercent = DefaultParachainBondReservePercent; type Event = Event; type LeaveCandidatesDelay = LeaveCandidatesDelay; type LeaveNominatorsDelay = LeaveNominatorsDelay; type MaxCollatorsPerNominator = MaxCollatorsPerNominator; type MaxNominatorsPerCollator = MaxNominatorsPerCollator; type MinBlocksPerRound = MinBlocksPerRound; type MinCollatorCandidateStk = MinCollatorStk; type MinCollatorStk = MinCollatorStk; type MinNomination = MinNomination; type MinNominatorStk = MinNominatorStk; type MinSelectedCandidates = MinSelectedCandidates; type MonetaryGovernanceOrigin = frame_system::EnsureRoot<AccountId>; type RevokeNominationDelay = RevokeNominationDelay; type RewardPaymentDelay = RewardPaymentDelay; type WeightInfo = (); } pub(crate) struct ExtBuilder { // endowed accounts with balances balances: Vec<(AccountId, Balance)>, // [collator, amount] collators: Vec<(AccountId, Balance)>, // [nominator, collator, nomination_amount] nominations: Vec<(AccountId, AccountId, Balance)>, // inflation config inflation: InflationInfo<Balance>, } impl Default for ExtBuilder { fn default() -> ExtBuilder { ExtBuilder { balances: vec![], nominations: vec![], collators: vec![], inflation: InflationInfo { expect: Range { min: 700, ideal: 700, max: 700, }, // not used annual: Range { min: Perbill::from_percent(50), ideal: Perbill::from_percent(50), max: Perbill::from_percent(50), }, // unrealistically high parameterization, only for testing round: Range { min: Perbill::from_percent(5), ideal: Perbill::from_percent(5), max: Perbill::from_percent(5), }, }, } } } impl ExtBuilder { pub(crate) fn with_balances(mut self, balances: Vec<(AccountId, Balance)>) -> Self { self.balances = balances; self } pub(crate) fn with_candidates(mut self, collators: Vec<(AccountId, Balance)>) -> Self { self.collators = collators; self } pub(crate) fn with_nominations(mut self, nominations: Vec<(AccountId, AccountId, Balance)>) -> Self { self.nominations = nominations; self } #[allow(dead_code)] pub(crate) fn with_inflation(mut self, inflation: InflationInfo<Balance>) -> Self { self.inflation = inflation; self } pub(crate) fn build(self) -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default() .build_storage::<Test>() .expect("Frame system builds valid default genesis config"); pallet_balances::GenesisConfig::<Test> { balances: self.balances, } .assimilate_storage(&mut t) .expect("Pallet balances storage can be assimilated"); stake::GenesisConfig::<Test> { candidates: self.collators, nominations: self.nominations, inflation_config: self.inflation, } .assimilate_storage(&mut t) .expect("Parachain Staking's storage can be assimilated"); let validators = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let keys = validators .iter() .map(|i| { (*i, *i, MockSessionKeys { aura: UintAuthorityId(*i), }) }) .collect::<Vec<_>>(); pallet_session::GenesisConfig::<Test> { keys } .assimilate_storage(&mut t) .expect("Pallet session storage can be assimilated"); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(|| System::set_block_number(1)); ext } } pub(crate) fn roll_to(n: u64) { while System::block_number() < n { Balances::on_finalize(System::block_number()); Stake::on_finalize(System::block_number()); Session::on_finalize(System::block_number()); Aura::on_finalize(System::block_number()); System::on_finalize(System::block_number()); System::set_block_number(System::block_number() + 1); System::on_initialize(System::block_number()); Timestamp::on_initialize(System::block_number()); Balances::on_initialize(System::block_number()); Stake::on_initialize(System::block_number()); Session::on_initialize(System::block_number()); Aura::on_initialize(System::block_number()); } } pub(crate) fn last_event() -> Event { System::events().pop().expect("Event expected").event } pub(crate) fn events() -> Vec<pallet::Event<Test>> { System::events() .into_iter() .map(|r| r.event) .filter_map(|e| if let Event::Stake(inner) = e { Some(inner) } else
) .collect::<Vec<_>>() } // Same storage changes as EventHandler::note_author impl pub(crate) fn set_author(round: u32, acc: u64, pts: u32) { <Points<Test>>::mutate(round, |p| *p += pts); <AwardedPts<Test>>::mutate(round, acc, |p| *p += pts); } #[test] fn geneses() { ExtBuilder::default() .with_balances(vec![ (1, 1000), (2, 300), (3, 100), (4, 100), (5, 100), (6, 100), (7, 100), (8, 9), (9, 4), ]) .with_candidates(vec![(1, 500), (2, 200)]) .with_nominations(vec![(3, 1, 100), (4, 1, 100), (5, 2, 100), (6, 2, 100)]) .build() .execute_with(|| { assert!(System::events().is_empty()); // collators assert_eq!(Balances::reserved_balance(&1), 500); assert_eq!(Balances::free_balance(&1), 500); assert!(Stake::is_candidate(&1)); assert_eq!(Balances::reserved_balance(&2), 200); assert_eq!(Balances::free_balance(&2), 100); assert!(Stake::is_candidate(&2)); // nominators for x in 3..7 { assert!(Stake::is_nominator(&x)); assert_eq!(Balances::free_balance(&x), 0); assert_eq!(Balances::reserved_balance(&x), 100); } // uninvolved for x in 7..10 { assert!(!Stake::is_nominator(&x)); } assert_eq!(Balances::free_balance(&7), 100); assert_eq!(Balances::reserved_balance(&7), 0); assert_eq!(Balances::free_balance(&8), 9); assert_eq!(Balances::reserved_balance(&8), 0); assert_eq!(Balances::free_balance(&9), 4); assert_eq!(Balances::reserved_balance(&9), 0); }); ExtBuilder::default() .with_balances(vec![ (1, 100), (2, 100), (3, 100), (4, 100), (5, 100), (6, 100), (7, 100), (8, 100), (9, 100), (10, 100), ]) .with_candidates(vec![(1, 20), (2, 20), (3, 20), (4, 20), (5, 10)]) .with_nominations(vec![(6, 1, 10), (7, 1, 10), (8, 2, 10), (9, 2, 10), (10, 1, 10)]) .build() .execute_with(|| { assert!(System::events().is_empty()); // collators for x in 1..5 { assert!(Stake::is_candidate(&x)); assert_eq!(Balances::free_balance(&x), 80); assert_eq!(Balances::reserved_balance(&x), 20); } assert!(Stake::is_candidate(&5)); assert_eq!(Balances::free_balance(&5), 90); assert_eq!(Balances::reserved_balance(&5), 10); // nominators for x in 6..11 { assert!(Stake::is_nominator(&x)); assert_eq!(Balances::free_balance(&x), 90); assert_eq!(Balances::reserved_balance(&x), 10); } }); }
{ None }
conditional_block
test_agent.py
"""Run this file to train the User Agent""" from __future__ import absolute_import, division, print_function import argparse import os from functools import reduce from math import log from tqdm import tqdm from kg_env import BatchKGEnvironment from train_agent import ActorCritic from utils import * def evaluate(topk_matches, test_user_products, num_recommendations, brand_dict): """Compute metrics for predicted recommendations. Args: topk_matches: a list or dict of product ids in ascending order. """ invalid_users = [] # Compute metrics precisions, recalls, ndcgs, hits, fairness = [], [], [], [], [] test_user_idxs = list(test_user_products.keys()) for uid in test_user_idxs: if uid not in topk_matches or len(topk_matches[uid]) < num_recommendations: invalid_users.append(uid) continue pred_list, rel_set = topk_matches[uid][::-1], test_user_products[uid] if len(pred_list) == 0: continue dcg = 0.0 hit_num = 0.0 for i in range(len(pred_list)): if pred_list[i] in rel_set: dcg += 1. / (log(i + 2) / log(2)) hit_num += 1 # idcg idcg = 0.0 for i in range(min(len(rel_set), len(pred_list))): idcg += 1. / (log(i + 2) / log(2)) ndcg = dcg / idcg recall = hit_num / len(rel_set) precision = hit_num / len(pred_list) hit = 1.0 if hit_num > 0.0 else 0.0 ndcgs.append(ndcg) recalls.append(recall) precisions.append(precision) hits.append(hit) fairness.append(calculate_fairness(pred_list, brand_dict)) avg_precision = np.mean(precisions) * 100 avg_recall = np.mean(recalls) * 100 avg_ndcg = np.mean(ndcgs) * 100 avg_hit = np.mean(hits) * 100 avg_fairness = np.mean(fairness) print('NDCG={:.3f} | Recall={:.3f} | HR={:.3f} | Precision={:.3f} | Fairness={:.3f} | Invalid users={}'.format( avg_ndcg, avg_recall, avg_hit, avg_precision, avg_fairness, len(invalid_users))) def batch_beam_search(env, model, uids, device, topk=[25, 5, 1]): def _batch_acts_to_masks(batch_acts): batch_masks = [] for acts in batch_acts: num_acts = len(acts) act_mask = np.zeros(model.act_dim, dtype=np.uint8) act_mask[:num_acts] = 1 batch_masks.append(act_mask) return np.vstack(batch_masks) state_pool = env.reset(uids) # numpy of [bs, dim] path_pool = env._batch_path # list of list, size=bs probs_pool = [[] for _ in uids] model.eval() for hop in range(3): state_tensor = torch.FloatTensor(state_pool).to(device) acts_pool = env._batch_get_actions(path_pool, False) # list of list, size=bs actmask_pool = _batch_acts_to_masks(acts_pool) # numpy of [bs, dim] actmask_tensor = torch.ByteTensor(actmask_pool).to(device) probs, _ = model((state_tensor, actmask_tensor)) # Tensor of [bs, act_dim] probs = probs + actmask_tensor.float() # In order to differ from masked actions topk_probs, topk_idxs = torch.topk(probs, topk[hop], dim=1) # LongTensor of [bs, k] topk_idxs = topk_idxs.detach().cpu().numpy() topk_probs = topk_probs.detach().cpu().numpy() new_path_pool, new_probs_pool = [], [] for row in range(topk_idxs.shape[0]): path = path_pool[row] probs = probs_pool[row] for idx, p in zip(topk_idxs[row], topk_probs[row]): if idx >= len(acts_pool[row]): # act idx is invalid continue relation, next_node_id = acts_pool[row][idx] # (relation, next_node_id) if relation == SELF_LOOP: next_node_type = path[-1][1] else:
new_path = path + [(relation, next_node_type, next_node_id)] new_path_pool.append(new_path) new_probs_pool.append(probs + [p]) path_pool = new_path_pool probs_pool = new_probs_pool if hop < 2: state_pool = env._batch_get_state(path_pool) return path_pool, probs_pool def predict_paths(policy_file, path_file, args): print('Predicting paths...') env = BatchKGEnvironment(args.dataset, args.max_acts, max_path_len=args.max_path_len, state_history=args.state_history) pretrain_sd = torch.load(policy_file) model = ActorCritic(env.state_dim, env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device) model_sd = model.state_dict() model_sd.update(pretrain_sd) model.load_state_dict(model_sd) test_labels = load_labels(args.dataset, 'test') test_uids = list(test_labels.keys()) batch_size = 16 start_idx = 0 all_paths, all_probs = [], [] pbar = tqdm(total=len(test_uids)) while start_idx < len(test_uids): end_idx = min(start_idx + batch_size, len(test_uids)) batch_uids = test_uids[start_idx:end_idx] paths, probs = batch_beam_search(env, model, batch_uids, args.device, topk=args.topk) all_paths.extend(paths) all_probs.extend(probs) start_idx = end_idx pbar.update(batch_size) predicts = {'paths': all_paths, 'probs': all_probs} pickle.dump(predicts, open(path_file, 'wb')) def evaluate_paths(path_file, train_labels, test_labels, num_recommendations, args): embeds = load_embed(args.dataset) user_embeds = embeds[USER] purchase_embeds = embeds[PURCHASE][0] product_embeds = embeds[PRODUCT] scores = np.dot(user_embeds + purchase_embeds, product_embeds.T) # 1) Get all valid paths for each user, compute path score and path probability. results = pickle.load(open(path_file, 'rb')) pred_paths = {uid: {} for uid in test_labels} for path, probs in zip(results['paths'], results['probs']): if path[-1][1] != PRODUCT: continue uid = path[0][2] if uid not in pred_paths: continue pid = path[-1][2] if pid not in pred_paths[uid]: pred_paths[uid][pid] = [] path_score = scores[uid][pid] path_prob = reduce(lambda x, y: x * y, probs) pred_paths[uid][pid].append((path_score, path_prob, path)) # 2) Pick best path for each user-product pair, also remove pid if it is in train set. best_pred_paths = {} for uid in pred_paths: train_pids = set(train_labels[uid]) best_pred_paths[uid] = [] for pid in pred_paths[uid]: if pid in train_pids: continue # Get the path with highest probability sorted_path = sorted(pred_paths[uid][pid], key=lambda x: x[1], reverse=True) best_pred_paths[uid].append(sorted_path[0]) # 3) Compute top 10 recommended products for each user. sort_by = 'score' pred_labels = {} for uid in best_pred_paths: if sort_by == 'score': sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[0], x[1]), reverse=True) elif sort_by == 'prob': sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[1], x[0]), reverse=True) topk_pids = [p[-1][2] for _, _, p in sorted_path[:num_recommendations]] # from largest to smallest # add up to 10 pids if not enough if args.add_products and len(topk_pids) < num_recommendations: train_pids = set(train_labels[uid]) cand_pids = np.argsort(scores[uid]) for cand_pid in cand_pids[::-1]: if cand_pid in train_pids or cand_pid in topk_pids: continue topk_pids.append(cand_pid) if len(topk_pids) >= num_recommendations: break # end of add pred_labels[uid] = topk_pids[::-1] # change order to from smallest to largest! return pred_labels def test(args): policy_file = args.log_dir + '/policy_model_epoch_{}.ckpt'.format(args.epochs) path_file = args.log_dir + '/policy_paths_epoch{}.pkl'.format(args.epochs) train_labels = load_labels(args.dataset, 'train') test_labels = load_labels(args.dataset, 'test') if args.run_path: predict_paths(policy_file, path_file, args) if args.run_eval: pred_labels = evaluate_paths(path_file, train_labels, test_labels, args.num_recommendations, args) evaluate(pred_labels, test_labels, args.num_recommendations, args.brand_dict) if __name__ == '__main__': boolean = lambda x: (str(x).lower() == 'true') parser = argparse.ArgumentParser() parser.add_argument('--dataset', type=str, default=BEAUTY, help='One of {cloth, beauty, cell, cd}') parser.add_argument('--name', type=str, default='train_agent', help='directory name.') parser.add_argument('--gpu', type=str, default='0', help='gpu device.') parser.add_argument('--epochs', type=int, default=50, help='num of epochs.') parser.add_argument('--max_acts', type=int, default=250, help='Max number of actions.') parser.add_argument('--max_path_len', type=int, default=3, help='Max path length.') parser.add_argument('--gamma', type=float, default=0.99, help='reward discount factor.') parser.add_argument('--state_history', type=int, default=1, help='state history length') parser.add_argument('--hidden', type=int, nargs='*', default=[512, 256], help='number of samples') parser.add_argument('--add_products', type=boolean, default=False, help='Add predicted products up to 10') parser.add_argument('--topk', type=int, nargs='*', default=[25, 5, 1], help='number of samples') parser.add_argument('--run_path', type=boolean, default=True, help='Generate predicted path? (takes long time)') parser.add_argument('--run_eval', type=boolean, default=True, help='Run evaluation?') parser.add_argument('--num_recommendations', type=int, default=10, help='The number of recommendations that ' 'will be predicted for each user') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu args.device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu' args.log_dir = TMP_DIR[args.dataset] + '/' + args.name pickle_in = open(BRAND_FILE[args.dataset], "rb") args.brand_dict = pickle.load(pickle_in) test(args)
next_node_type = KG_RELATION[path[-1][1]][relation]
conditional_block
test_agent.py
"""Run this file to train the User Agent""" from __future__ import absolute_import, division, print_function import argparse import os from functools import reduce from math import log from tqdm import tqdm from kg_env import BatchKGEnvironment from train_agent import ActorCritic from utils import * def evaluate(topk_matches, test_user_products, num_recommendations, brand_dict): """Compute metrics for predicted recommendations. Args: topk_matches: a list or dict of product ids in ascending order. """ invalid_users = [] # Compute metrics precisions, recalls, ndcgs, hits, fairness = [], [], [], [], [] test_user_idxs = list(test_user_products.keys()) for uid in test_user_idxs: if uid not in topk_matches or len(topk_matches[uid]) < num_recommendations: invalid_users.append(uid) continue pred_list, rel_set = topk_matches[uid][::-1], test_user_products[uid] if len(pred_list) == 0: continue dcg = 0.0 hit_num = 0.0 for i in range(len(pred_list)): if pred_list[i] in rel_set: dcg += 1. / (log(i + 2) / log(2)) hit_num += 1 # idcg idcg = 0.0 for i in range(min(len(rel_set), len(pred_list))): idcg += 1. / (log(i + 2) / log(2)) ndcg = dcg / idcg recall = hit_num / len(rel_set) precision = hit_num / len(pred_list) hit = 1.0 if hit_num > 0.0 else 0.0 ndcgs.append(ndcg) recalls.append(recall) precisions.append(precision) hits.append(hit) fairness.append(calculate_fairness(pred_list, brand_dict)) avg_precision = np.mean(precisions) * 100 avg_recall = np.mean(recalls) * 100 avg_ndcg = np.mean(ndcgs) * 100 avg_hit = np.mean(hits) * 100 avg_fairness = np.mean(fairness) print('NDCG={:.3f} | Recall={:.3f} | HR={:.3f} | Precision={:.3f} | Fairness={:.3f} | Invalid users={}'.format( avg_ndcg, avg_recall, avg_hit, avg_precision, avg_fairness, len(invalid_users))) def batch_beam_search(env, model, uids, device, topk=[25, 5, 1]): def _batch_acts_to_masks(batch_acts): batch_masks = [] for acts in batch_acts: num_acts = len(acts) act_mask = np.zeros(model.act_dim, dtype=np.uint8) act_mask[:num_acts] = 1 batch_masks.append(act_mask) return np.vstack(batch_masks) state_pool = env.reset(uids) # numpy of [bs, dim] path_pool = env._batch_path # list of list, size=bs probs_pool = [[] for _ in uids] model.eval() for hop in range(3): state_tensor = torch.FloatTensor(state_pool).to(device) acts_pool = env._batch_get_actions(path_pool, False) # list of list, size=bs actmask_pool = _batch_acts_to_masks(acts_pool) # numpy of [bs, dim] actmask_tensor = torch.ByteTensor(actmask_pool).to(device) probs, _ = model((state_tensor, actmask_tensor)) # Tensor of [bs, act_dim] probs = probs + actmask_tensor.float() # In order to differ from masked actions topk_probs, topk_idxs = torch.topk(probs, topk[hop], dim=1) # LongTensor of [bs, k] topk_idxs = topk_idxs.detach().cpu().numpy() topk_probs = topk_probs.detach().cpu().numpy() new_path_pool, new_probs_pool = [], [] for row in range(topk_idxs.shape[0]): path = path_pool[row] probs = probs_pool[row] for idx, p in zip(topk_idxs[row], topk_probs[row]): if idx >= len(acts_pool[row]): # act idx is invalid continue relation, next_node_id = acts_pool[row][idx] # (relation, next_node_id) if relation == SELF_LOOP: next_node_type = path[-1][1] else: next_node_type = KG_RELATION[path[-1][1]][relation] new_path = path + [(relation, next_node_type, next_node_id)] new_path_pool.append(new_path) new_probs_pool.append(probs + [p]) path_pool = new_path_pool probs_pool = new_probs_pool if hop < 2: state_pool = env._batch_get_state(path_pool) return path_pool, probs_pool def predict_paths(policy_file, path_file, args): print('Predicting paths...') env = BatchKGEnvironment(args.dataset, args.max_acts, max_path_len=args.max_path_len, state_history=args.state_history) pretrain_sd = torch.load(policy_file) model = ActorCritic(env.state_dim, env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device) model_sd = model.state_dict() model_sd.update(pretrain_sd) model.load_state_dict(model_sd) test_labels = load_labels(args.dataset, 'test') test_uids = list(test_labels.keys()) batch_size = 16 start_idx = 0 all_paths, all_probs = [], [] pbar = tqdm(total=len(test_uids)) while start_idx < len(test_uids): end_idx = min(start_idx + batch_size, len(test_uids)) batch_uids = test_uids[start_idx:end_idx] paths, probs = batch_beam_search(env, model, batch_uids, args.device, topk=args.topk) all_paths.extend(paths) all_probs.extend(probs) start_idx = end_idx pbar.update(batch_size) predicts = {'paths': all_paths, 'probs': all_probs} pickle.dump(predicts, open(path_file, 'wb')) def evaluate_paths(path_file, train_labels, test_labels, num_recommendations, args): embeds = load_embed(args.dataset) user_embeds = embeds[USER] purchase_embeds = embeds[PURCHASE][0] product_embeds = embeds[PRODUCT] scores = np.dot(user_embeds + purchase_embeds, product_embeds.T) # 1) Get all valid paths for each user, compute path score and path probability. results = pickle.load(open(path_file, 'rb')) pred_paths = {uid: {} for uid in test_labels} for path, probs in zip(results['paths'], results['probs']): if path[-1][1] != PRODUCT: continue
uid = path[0][2] if uid not in pred_paths: continue pid = path[-1][2] if pid not in pred_paths[uid]: pred_paths[uid][pid] = [] path_score = scores[uid][pid] path_prob = reduce(lambda x, y: x * y, probs) pred_paths[uid][pid].append((path_score, path_prob, path)) # 2) Pick best path for each user-product pair, also remove pid if it is in train set. best_pred_paths = {} for uid in pred_paths: train_pids = set(train_labels[uid]) best_pred_paths[uid] = [] for pid in pred_paths[uid]: if pid in train_pids: continue # Get the path with highest probability sorted_path = sorted(pred_paths[uid][pid], key=lambda x: x[1], reverse=True) best_pred_paths[uid].append(sorted_path[0]) # 3) Compute top 10 recommended products for each user. sort_by = 'score' pred_labels = {} for uid in best_pred_paths: if sort_by == 'score': sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[0], x[1]), reverse=True) elif sort_by == 'prob': sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[1], x[0]), reverse=True) topk_pids = [p[-1][2] for _, _, p in sorted_path[:num_recommendations]] # from largest to smallest # add up to 10 pids if not enough if args.add_products and len(topk_pids) < num_recommendations: train_pids = set(train_labels[uid]) cand_pids = np.argsort(scores[uid]) for cand_pid in cand_pids[::-1]: if cand_pid in train_pids or cand_pid in topk_pids: continue topk_pids.append(cand_pid) if len(topk_pids) >= num_recommendations: break # end of add pred_labels[uid] = topk_pids[::-1] # change order to from smallest to largest! return pred_labels def test(args): policy_file = args.log_dir + '/policy_model_epoch_{}.ckpt'.format(args.epochs) path_file = args.log_dir + '/policy_paths_epoch{}.pkl'.format(args.epochs) train_labels = load_labels(args.dataset, 'train') test_labels = load_labels(args.dataset, 'test') if args.run_path: predict_paths(policy_file, path_file, args) if args.run_eval: pred_labels = evaluate_paths(path_file, train_labels, test_labels, args.num_recommendations, args) evaluate(pred_labels, test_labels, args.num_recommendations, args.brand_dict) if __name__ == '__main__': boolean = lambda x: (str(x).lower() == 'true') parser = argparse.ArgumentParser() parser.add_argument('--dataset', type=str, default=BEAUTY, help='One of {cloth, beauty, cell, cd}') parser.add_argument('--name', type=str, default='train_agent', help='directory name.') parser.add_argument('--gpu', type=str, default='0', help='gpu device.') parser.add_argument('--epochs', type=int, default=50, help='num of epochs.') parser.add_argument('--max_acts', type=int, default=250, help='Max number of actions.') parser.add_argument('--max_path_len', type=int, default=3, help='Max path length.') parser.add_argument('--gamma', type=float, default=0.99, help='reward discount factor.') parser.add_argument('--state_history', type=int, default=1, help='state history length') parser.add_argument('--hidden', type=int, nargs='*', default=[512, 256], help='number of samples') parser.add_argument('--add_products', type=boolean, default=False, help='Add predicted products up to 10') parser.add_argument('--topk', type=int, nargs='*', default=[25, 5, 1], help='number of samples') parser.add_argument('--run_path', type=boolean, default=True, help='Generate predicted path? (takes long time)') parser.add_argument('--run_eval', type=boolean, default=True, help='Run evaluation?') parser.add_argument('--num_recommendations', type=int, default=10, help='The number of recommendations that ' 'will be predicted for each user') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu args.device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu' args.log_dir = TMP_DIR[args.dataset] + '/' + args.name pickle_in = open(BRAND_FILE[args.dataset], "rb") args.brand_dict = pickle.load(pickle_in) test(args)
random_line_split
test_agent.py
"""Run this file to train the User Agent""" from __future__ import absolute_import, division, print_function import argparse import os from functools import reduce from math import log from tqdm import tqdm from kg_env import BatchKGEnvironment from train_agent import ActorCritic from utils import * def evaluate(topk_matches, test_user_products, num_recommendations, brand_dict):
def batch_beam_search(env, model, uids, device, topk=[25, 5, 1]): def _batch_acts_to_masks(batch_acts): batch_masks = [] for acts in batch_acts: num_acts = len(acts) act_mask = np.zeros(model.act_dim, dtype=np.uint8) act_mask[:num_acts] = 1 batch_masks.append(act_mask) return np.vstack(batch_masks) state_pool = env.reset(uids) # numpy of [bs, dim] path_pool = env._batch_path # list of list, size=bs probs_pool = [[] for _ in uids] model.eval() for hop in range(3): state_tensor = torch.FloatTensor(state_pool).to(device) acts_pool = env._batch_get_actions(path_pool, False) # list of list, size=bs actmask_pool = _batch_acts_to_masks(acts_pool) # numpy of [bs, dim] actmask_tensor = torch.ByteTensor(actmask_pool).to(device) probs, _ = model((state_tensor, actmask_tensor)) # Tensor of [bs, act_dim] probs = probs + actmask_tensor.float() # In order to differ from masked actions topk_probs, topk_idxs = torch.topk(probs, topk[hop], dim=1) # LongTensor of [bs, k] topk_idxs = topk_idxs.detach().cpu().numpy() topk_probs = topk_probs.detach().cpu().numpy() new_path_pool, new_probs_pool = [], [] for row in range(topk_idxs.shape[0]): path = path_pool[row] probs = probs_pool[row] for idx, p in zip(topk_idxs[row], topk_probs[row]): if idx >= len(acts_pool[row]): # act idx is invalid continue relation, next_node_id = acts_pool[row][idx] # (relation, next_node_id) if relation == SELF_LOOP: next_node_type = path[-1][1] else: next_node_type = KG_RELATION[path[-1][1]][relation] new_path = path + [(relation, next_node_type, next_node_id)] new_path_pool.append(new_path) new_probs_pool.append(probs + [p]) path_pool = new_path_pool probs_pool = new_probs_pool if hop < 2: state_pool = env._batch_get_state(path_pool) return path_pool, probs_pool def predict_paths(policy_file, path_file, args): print('Predicting paths...') env = BatchKGEnvironment(args.dataset, args.max_acts, max_path_len=args.max_path_len, state_history=args.state_history) pretrain_sd = torch.load(policy_file) model = ActorCritic(env.state_dim, env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device) model_sd = model.state_dict() model_sd.update(pretrain_sd) model.load_state_dict(model_sd) test_labels = load_labels(args.dataset, 'test') test_uids = list(test_labels.keys()) batch_size = 16 start_idx = 0 all_paths, all_probs = [], [] pbar = tqdm(total=len(test_uids)) while start_idx < len(test_uids): end_idx = min(start_idx + batch_size, len(test_uids)) batch_uids = test_uids[start_idx:end_idx] paths, probs = batch_beam_search(env, model, batch_uids, args.device, topk=args.topk) all_paths.extend(paths) all_probs.extend(probs) start_idx = end_idx pbar.update(batch_size) predicts = {'paths': all_paths, 'probs': all_probs} pickle.dump(predicts, open(path_file, 'wb')) def evaluate_paths(path_file, train_labels, test_labels, num_recommendations, args): embeds = load_embed(args.dataset) user_embeds = embeds[USER] purchase_embeds = embeds[PURCHASE][0] product_embeds = embeds[PRODUCT] scores = np.dot(user_embeds + purchase_embeds, product_embeds.T) # 1) Get all valid paths for each user, compute path score and path probability. results = pickle.load(open(path_file, 'rb')) pred_paths = {uid: {} for uid in test_labels} for path, probs in zip(results['paths'], results['probs']): if path[-1][1] != PRODUCT: continue uid = path[0][2] if uid not in pred_paths: continue pid = path[-1][2] if pid not in pred_paths[uid]: pred_paths[uid][pid] = [] path_score = scores[uid][pid] path_prob = reduce(lambda x, y: x * y, probs) pred_paths[uid][pid].append((path_score, path_prob, path)) # 2) Pick best path for each user-product pair, also remove pid if it is in train set. best_pred_paths = {} for uid in pred_paths: train_pids = set(train_labels[uid]) best_pred_paths[uid] = [] for pid in pred_paths[uid]: if pid in train_pids: continue # Get the path with highest probability sorted_path = sorted(pred_paths[uid][pid], key=lambda x: x[1], reverse=True) best_pred_paths[uid].append(sorted_path[0]) # 3) Compute top 10 recommended products for each user. sort_by = 'score' pred_labels = {} for uid in best_pred_paths: if sort_by == 'score': sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[0], x[1]), reverse=True) elif sort_by == 'prob': sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[1], x[0]), reverse=True) topk_pids = [p[-1][2] for _, _, p in sorted_path[:num_recommendations]] # from largest to smallest # add up to 10 pids if not enough if args.add_products and len(topk_pids) < num_recommendations: train_pids = set(train_labels[uid]) cand_pids = np.argsort(scores[uid]) for cand_pid in cand_pids[::-1]: if cand_pid in train_pids or cand_pid in topk_pids: continue topk_pids.append(cand_pid) if len(topk_pids) >= num_recommendations: break # end of add pred_labels[uid] = topk_pids[::-1] # change order to from smallest to largest! return pred_labels def test(args): policy_file = args.log_dir + '/policy_model_epoch_{}.ckpt'.format(args.epochs) path_file = args.log_dir + '/policy_paths_epoch{}.pkl'.format(args.epochs) train_labels = load_labels(args.dataset, 'train') test_labels = load_labels(args.dataset, 'test') if args.run_path: predict_paths(policy_file, path_file, args) if args.run_eval: pred_labels = evaluate_paths(path_file, train_labels, test_labels, args.num_recommendations, args) evaluate(pred_labels, test_labels, args.num_recommendations, args.brand_dict) if __name__ == '__main__': boolean = lambda x: (str(x).lower() == 'true') parser = argparse.ArgumentParser() parser.add_argument('--dataset', type=str, default=BEAUTY, help='One of {cloth, beauty, cell, cd}') parser.add_argument('--name', type=str, default='train_agent', help='directory name.') parser.add_argument('--gpu', type=str, default='0', help='gpu device.') parser.add_argument('--epochs', type=int, default=50, help='num of epochs.') parser.add_argument('--max_acts', type=int, default=250, help='Max number of actions.') parser.add_argument('--max_path_len', type=int, default=3, help='Max path length.') parser.add_argument('--gamma', type=float, default=0.99, help='reward discount factor.') parser.add_argument('--state_history', type=int, default=1, help='state history length') parser.add_argument('--hidden', type=int, nargs='*', default=[512, 256], help='number of samples') parser.add_argument('--add_products', type=boolean, default=False, help='Add predicted products up to 10') parser.add_argument('--topk', type=int, nargs='*', default=[25, 5, 1], help='number of samples') parser.add_argument('--run_path', type=boolean, default=True, help='Generate predicted path? (takes long time)') parser.add_argument('--run_eval', type=boolean, default=True, help='Run evaluation?') parser.add_argument('--num_recommendations', type=int, default=10, help='The number of recommendations that ' 'will be predicted for each user') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu args.device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu' args.log_dir = TMP_DIR[args.dataset] + '/' + args.name pickle_in = open(BRAND_FILE[args.dataset], "rb") args.brand_dict = pickle.load(pickle_in) test(args)
"""Compute metrics for predicted recommendations. Args: topk_matches: a list or dict of product ids in ascending order. """ invalid_users = [] # Compute metrics precisions, recalls, ndcgs, hits, fairness = [], [], [], [], [] test_user_idxs = list(test_user_products.keys()) for uid in test_user_idxs: if uid not in topk_matches or len(topk_matches[uid]) < num_recommendations: invalid_users.append(uid) continue pred_list, rel_set = topk_matches[uid][::-1], test_user_products[uid] if len(pred_list) == 0: continue dcg = 0.0 hit_num = 0.0 for i in range(len(pred_list)): if pred_list[i] in rel_set: dcg += 1. / (log(i + 2) / log(2)) hit_num += 1 # idcg idcg = 0.0 for i in range(min(len(rel_set), len(pred_list))): idcg += 1. / (log(i + 2) / log(2)) ndcg = dcg / idcg recall = hit_num / len(rel_set) precision = hit_num / len(pred_list) hit = 1.0 if hit_num > 0.0 else 0.0 ndcgs.append(ndcg) recalls.append(recall) precisions.append(precision) hits.append(hit) fairness.append(calculate_fairness(pred_list, brand_dict)) avg_precision = np.mean(precisions) * 100 avg_recall = np.mean(recalls) * 100 avg_ndcg = np.mean(ndcgs) * 100 avg_hit = np.mean(hits) * 100 avg_fairness = np.mean(fairness) print('NDCG={:.3f} | Recall={:.3f} | HR={:.3f} | Precision={:.3f} | Fairness={:.3f} | Invalid users={}'.format( avg_ndcg, avg_recall, avg_hit, avg_precision, avg_fairness, len(invalid_users)))
identifier_body
test_agent.py
"""Run this file to train the User Agent""" from __future__ import absolute_import, division, print_function import argparse import os from functools import reduce from math import log from tqdm import tqdm from kg_env import BatchKGEnvironment from train_agent import ActorCritic from utils import * def evaluate(topk_matches, test_user_products, num_recommendations, brand_dict): """Compute metrics for predicted recommendations. Args: topk_matches: a list or dict of product ids in ascending order. """ invalid_users = [] # Compute metrics precisions, recalls, ndcgs, hits, fairness = [], [], [], [], [] test_user_idxs = list(test_user_products.keys()) for uid in test_user_idxs: if uid not in topk_matches or len(topk_matches[uid]) < num_recommendations: invalid_users.append(uid) continue pred_list, rel_set = topk_matches[uid][::-1], test_user_products[uid] if len(pred_list) == 0: continue dcg = 0.0 hit_num = 0.0 for i in range(len(pred_list)): if pred_list[i] in rel_set: dcg += 1. / (log(i + 2) / log(2)) hit_num += 1 # idcg idcg = 0.0 for i in range(min(len(rel_set), len(pred_list))): idcg += 1. / (log(i + 2) / log(2)) ndcg = dcg / idcg recall = hit_num / len(rel_set) precision = hit_num / len(pred_list) hit = 1.0 if hit_num > 0.0 else 0.0 ndcgs.append(ndcg) recalls.append(recall) precisions.append(precision) hits.append(hit) fairness.append(calculate_fairness(pred_list, brand_dict)) avg_precision = np.mean(precisions) * 100 avg_recall = np.mean(recalls) * 100 avg_ndcg = np.mean(ndcgs) * 100 avg_hit = np.mean(hits) * 100 avg_fairness = np.mean(fairness) print('NDCG={:.3f} | Recall={:.3f} | HR={:.3f} | Precision={:.3f} | Fairness={:.3f} | Invalid users={}'.format( avg_ndcg, avg_recall, avg_hit, avg_precision, avg_fairness, len(invalid_users))) def batch_beam_search(env, model, uids, device, topk=[25, 5, 1]): def
(batch_acts): batch_masks = [] for acts in batch_acts: num_acts = len(acts) act_mask = np.zeros(model.act_dim, dtype=np.uint8) act_mask[:num_acts] = 1 batch_masks.append(act_mask) return np.vstack(batch_masks) state_pool = env.reset(uids) # numpy of [bs, dim] path_pool = env._batch_path # list of list, size=bs probs_pool = [[] for _ in uids] model.eval() for hop in range(3): state_tensor = torch.FloatTensor(state_pool).to(device) acts_pool = env._batch_get_actions(path_pool, False) # list of list, size=bs actmask_pool = _batch_acts_to_masks(acts_pool) # numpy of [bs, dim] actmask_tensor = torch.ByteTensor(actmask_pool).to(device) probs, _ = model((state_tensor, actmask_tensor)) # Tensor of [bs, act_dim] probs = probs + actmask_tensor.float() # In order to differ from masked actions topk_probs, topk_idxs = torch.topk(probs, topk[hop], dim=1) # LongTensor of [bs, k] topk_idxs = topk_idxs.detach().cpu().numpy() topk_probs = topk_probs.detach().cpu().numpy() new_path_pool, new_probs_pool = [], [] for row in range(topk_idxs.shape[0]): path = path_pool[row] probs = probs_pool[row] for idx, p in zip(topk_idxs[row], topk_probs[row]): if idx >= len(acts_pool[row]): # act idx is invalid continue relation, next_node_id = acts_pool[row][idx] # (relation, next_node_id) if relation == SELF_LOOP: next_node_type = path[-1][1] else: next_node_type = KG_RELATION[path[-1][1]][relation] new_path = path + [(relation, next_node_type, next_node_id)] new_path_pool.append(new_path) new_probs_pool.append(probs + [p]) path_pool = new_path_pool probs_pool = new_probs_pool if hop < 2: state_pool = env._batch_get_state(path_pool) return path_pool, probs_pool def predict_paths(policy_file, path_file, args): print('Predicting paths...') env = BatchKGEnvironment(args.dataset, args.max_acts, max_path_len=args.max_path_len, state_history=args.state_history) pretrain_sd = torch.load(policy_file) model = ActorCritic(env.state_dim, env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device) model_sd = model.state_dict() model_sd.update(pretrain_sd) model.load_state_dict(model_sd) test_labels = load_labels(args.dataset, 'test') test_uids = list(test_labels.keys()) batch_size = 16 start_idx = 0 all_paths, all_probs = [], [] pbar = tqdm(total=len(test_uids)) while start_idx < len(test_uids): end_idx = min(start_idx + batch_size, len(test_uids)) batch_uids = test_uids[start_idx:end_idx] paths, probs = batch_beam_search(env, model, batch_uids, args.device, topk=args.topk) all_paths.extend(paths) all_probs.extend(probs) start_idx = end_idx pbar.update(batch_size) predicts = {'paths': all_paths, 'probs': all_probs} pickle.dump(predicts, open(path_file, 'wb')) def evaluate_paths(path_file, train_labels, test_labels, num_recommendations, args): embeds = load_embed(args.dataset) user_embeds = embeds[USER] purchase_embeds = embeds[PURCHASE][0] product_embeds = embeds[PRODUCT] scores = np.dot(user_embeds + purchase_embeds, product_embeds.T) # 1) Get all valid paths for each user, compute path score and path probability. results = pickle.load(open(path_file, 'rb')) pred_paths = {uid: {} for uid in test_labels} for path, probs in zip(results['paths'], results['probs']): if path[-1][1] != PRODUCT: continue uid = path[0][2] if uid not in pred_paths: continue pid = path[-1][2] if pid not in pred_paths[uid]: pred_paths[uid][pid] = [] path_score = scores[uid][pid] path_prob = reduce(lambda x, y: x * y, probs) pred_paths[uid][pid].append((path_score, path_prob, path)) # 2) Pick best path for each user-product pair, also remove pid if it is in train set. best_pred_paths = {} for uid in pred_paths: train_pids = set(train_labels[uid]) best_pred_paths[uid] = [] for pid in pred_paths[uid]: if pid in train_pids: continue # Get the path with highest probability sorted_path = sorted(pred_paths[uid][pid], key=lambda x: x[1], reverse=True) best_pred_paths[uid].append(sorted_path[0]) # 3) Compute top 10 recommended products for each user. sort_by = 'score' pred_labels = {} for uid in best_pred_paths: if sort_by == 'score': sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[0], x[1]), reverse=True) elif sort_by == 'prob': sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[1], x[0]), reverse=True) topk_pids = [p[-1][2] for _, _, p in sorted_path[:num_recommendations]] # from largest to smallest # add up to 10 pids if not enough if args.add_products and len(topk_pids) < num_recommendations: train_pids = set(train_labels[uid]) cand_pids = np.argsort(scores[uid]) for cand_pid in cand_pids[::-1]: if cand_pid in train_pids or cand_pid in topk_pids: continue topk_pids.append(cand_pid) if len(topk_pids) >= num_recommendations: break # end of add pred_labels[uid] = topk_pids[::-1] # change order to from smallest to largest! return pred_labels def test(args): policy_file = args.log_dir + '/policy_model_epoch_{}.ckpt'.format(args.epochs) path_file = args.log_dir + '/policy_paths_epoch{}.pkl'.format(args.epochs) train_labels = load_labels(args.dataset, 'train') test_labels = load_labels(args.dataset, 'test') if args.run_path: predict_paths(policy_file, path_file, args) if args.run_eval: pred_labels = evaluate_paths(path_file, train_labels, test_labels, args.num_recommendations, args) evaluate(pred_labels, test_labels, args.num_recommendations, args.brand_dict) if __name__ == '__main__': boolean = lambda x: (str(x).lower() == 'true') parser = argparse.ArgumentParser() parser.add_argument('--dataset', type=str, default=BEAUTY, help='One of {cloth, beauty, cell, cd}') parser.add_argument('--name', type=str, default='train_agent', help='directory name.') parser.add_argument('--gpu', type=str, default='0', help='gpu device.') parser.add_argument('--epochs', type=int, default=50, help='num of epochs.') parser.add_argument('--max_acts', type=int, default=250, help='Max number of actions.') parser.add_argument('--max_path_len', type=int, default=3, help='Max path length.') parser.add_argument('--gamma', type=float, default=0.99, help='reward discount factor.') parser.add_argument('--state_history', type=int, default=1, help='state history length') parser.add_argument('--hidden', type=int, nargs='*', default=[512, 256], help='number of samples') parser.add_argument('--add_products', type=boolean, default=False, help='Add predicted products up to 10') parser.add_argument('--topk', type=int, nargs='*', default=[25, 5, 1], help='number of samples') parser.add_argument('--run_path', type=boolean, default=True, help='Generate predicted path? (takes long time)') parser.add_argument('--run_eval', type=boolean, default=True, help='Run evaluation?') parser.add_argument('--num_recommendations', type=int, default=10, help='The number of recommendations that ' 'will be predicted for each user') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu args.device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu' args.log_dir = TMP_DIR[args.dataset] + '/' + args.name pickle_in = open(BRAND_FILE[args.dataset], "rb") args.brand_dict = pickle.load(pickle_in) test(args)
_batch_acts_to_masks
identifier_name
non_blocking.rs
//! A non-blocking, off-thread writer. //! //! This spawns a dedicated worker thread which is responsible for writing log //! lines to the provided writer. When a line is written using the returned //! `NonBlocking` struct's `make_writer` method, it will be enqueued to be //! written by the worker thread. //! //! The queue has a fixed capacity, and if it becomes full, any logs written //! to it will be dropped until capacity is once again available. This may //! occur if logs are consistently produced faster than the worker thread can //! output them. The queue capacity and behavior when full (i.e., whether to //! drop logs or to exert backpressure to slow down senders) can be configured //! using [`NonBlockingBuilder::default()`][builder]. //! This function returns the default configuration. It is equivalent to: //! //! ```rust //! # use tracing_appender::non_blocking::{NonBlocking, WorkerGuard}; //! # fn doc() -> (NonBlocking, WorkerGuard) { //! tracing_appender::non_blocking(std::io::stdout()) //! # } //! ``` //! [builder]: NonBlockingBuilder::default() //! //! <br/> This function returns a tuple of `NonBlocking` and `WorkerGuard`. //! `NonBlocking` implements [`MakeWriter`] which integrates with `tracing_subscriber`. //! `WorkerGuard` is a drop guard that is responsible for flushing any remaining logs when //! the program terminates. //! //! Note that the `WorkerGuard` returned by `non_blocking` _must_ be assigned to a binding that //! is not `_`, as `_` will result in the `WorkerGuard` being dropped immediately. //! Unintentional drops of `WorkerGuard` remove the guarantee that logs will be flushed //! during a program's termination, in a panic or otherwise. //! //! See [`WorkerGuard`][worker_guard] for examples of using the guard. //! //! [worker_guard]: WorkerGuard //! //! # Examples //! //! ``` rust //! # fn docs() { //! let (non_blocking, _guard) = tracing_appender::non_blocking(std::io::stdout()); //! let collector = tracing_subscriber::fmt().with_writer(non_blocking); //! tracing::collect::with_default(collector.finish(), || { //! tracing::event!(tracing::Level::INFO, "Hello"); //! }); //! # } //! ``` use crate::worker::Worker; use crate::Msg; use crossbeam_channel::{bounded, SendTimeoutError, Sender}; use std::io; use std::io::Write; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use std::sync::Arc; use std::thread::JoinHandle; use std::time::Duration; use tracing_subscriber::fmt::MakeWriter; /// The default maximum number of buffered log lines. /// /// If [`NonBlocking`][non-blocking] is lossy, it will drop spans/events at capacity. /// If [`NonBlocking`][non-blocking] is _not_ lossy, /// backpressure will be exerted on senders, causing them to block their /// respective threads until there is available capacity. /// /// [non-blocking]: NonBlocking /// Recommended to be a power of 2. pub const DEFAULT_BUFFERED_LINES_LIMIT: usize = 128_000; /// A guard that flushes spans/events associated to a [`NonBlocking`] on a drop /// /// Writing to a [`NonBlocking`] writer will **not** immediately write a span or event to the underlying /// output. Instead, the span or event will be written by a dedicated logging thread at some later point. /// To increase throughput, the non-blocking writer will flush to the underlying output on /// a periodic basis rather than every time a span or event is written. This means that if the program /// terminates abruptly (such as through an uncaught `panic` or a `std::process::exit`), some spans /// or events may not be written. /// /// Since spans/events and events recorded near a crash are often necessary for diagnosing the failure, /// `WorkerGuard` provides a mechanism to ensure that _all_ buffered logs are flushed to their output. /// `WorkerGuard` should be assigned in the `main` function or whatever the entrypoint of the program is. /// This will ensure that the guard will be dropped during an unwinding or when `main` exits /// successfully. /// /// # Examples /// /// ``` rust /// # #[clippy::allow(needless_doctest_main)] /// fn main () { /// # fn doc() { /// let (non_blocking, _guard) = tracing_appender::non_blocking(std::io::stdout()); /// let collector = tracing_subscriber::fmt().with_writer(non_blocking); /// tracing::collect::with_default(collector.finish(), || { /// // Emit some tracing events within context of the non_blocking `_guard` and tracing subscriber /// tracing::event!(tracing::Level::INFO, "Hello"); /// }); /// // Exiting the context of `main` will drop the `_guard` and any remaining logs should get flushed /// # } /// } /// ``` #[must_use] #[derive(Debug)] pub struct WorkerGuard { handle: Option<JoinHandle<()>>, sender: Sender<Msg>, shutdown: Sender<()>, } /// A non-blocking writer. /// /// While the line between "blocking" and "non-blocking" IO is fuzzy, writing to a file is typically /// considered to be a _blocking_ operation. For an application whose `Collector` writes spans and events /// as they are emitted, an application might find the latency profile to be unacceptable. /// `NonBlocking` moves the writing out of an application's data path by sending spans and events /// to a dedicated logging thread. /// /// This struct implements [`MakeWriter`][make_writer] from the `tracing-subscriber` /// crate. Therefore, it can be used with the [`tracing_subscriber::fmt`][fmt] module /// or with any other collector/subscriber implementation that uses the `MakeWriter` trait. /// /// [make_writer]: tracing_subscriber::fmt::MakeWriter /// [fmt]: mod@tracing_subscriber::fmt #[derive(Clone, Debug)] pub struct NonBlocking { error_counter: ErrorCounter, channel: Sender<Msg>, is_lossy: bool, } /// Tracks the number of times a log line was dropped by the background thread. /// /// If the non-blocking writer is not configured in [lossy mode], the error /// count should always be 0. /// /// [lossy mode]: NonBlockingBuilder::lossy #[derive(Clone, Debug)] pub struct ErrorCounter(Arc<AtomicUsize>); impl NonBlocking { /// Returns a new `NonBlocking` writer wrapping the provided `writer`. /// /// The returned `NonBlocking` writer will have the [default configuration][default] values. /// Other configurations can be specified using the [builder] interface. /// /// [default]: NonBlockingBuilder::default() /// [builder]: NonBlockingBuilder pub fn new<T: Write + Send + Sync + 'static>(writer: T) -> (NonBlocking, WorkerGuard) { NonBlockingBuilder::default().finish(writer) } fn create<T: Write + Send + Sync + 'static>( writer: T, buffered_lines_limit: usize, is_lossy: bool, thread_name: String, ) -> (NonBlocking, WorkerGuard) { let (sender, receiver) = bounded(buffered_lines_limit); let (shutdown_sender, shutdown_receiver) = bounded(0); let worker = Worker::new(receiver, writer, shutdown_receiver); let worker_guard = WorkerGuard::new( worker.worker_thread(thread_name), sender.clone(), shutdown_sender, ); ( Self { channel: sender, error_counter: ErrorCounter(Arc::new(AtomicUsize::new(0))), is_lossy, }, worker_guard, ) } /// Returns a counter for the number of times logs where dropped. This will always return zero if /// `NonBlocking` is not lossy. pub fn error_counter(&self) -> ErrorCounter { self.error_counter.clone() } } /// A builder for [`NonBlocking`][non-blocking]. /// /// [non-blocking]: NonBlocking #[derive(Debug)] pub struct NonBlockingBuilder { buffered_lines_limit: usize, is_lossy: bool, thread_name: String, } impl NonBlockingBuilder { /// Sets the number of lines to buffer before dropping logs or exerting backpressure on senders pub fn buffered_lines_limit(mut self, buffered_lines_limit: usize) -> NonBlockingBuilder { self.buffered_lines_limit = buffered_lines_limit; self } /// Sets whether `NonBlocking` should be lossy or not. /// /// If set to `true`, logs will be dropped when the buffered limit is reached. If `false`, backpressure /// will be exerted on senders, blocking them until the buffer has capacity again. /// /// By default, the built `NonBlocking` will be lossy. pub fn lossy(mut self, is_lossy: bool) -> NonBlockingBuilder { self.is_lossy = is_lossy; self } /// Override the worker thread's name. /// /// The default worker thread name is "tracing-appender". pub fn thread_name(mut self, name: &str) -> NonBlockingBuilder { self.thread_name = name.to_string(); self } /// Completes the builder, returning the configured `NonBlocking`. pub fn finish<T: Write + Send + Sync + 'static>(self, writer: T) -> (NonBlocking, WorkerGuard) { NonBlocking::create( writer, self.buffered_lines_limit, self.is_lossy, self.thread_name, ) } } impl Default for NonBlockingBuilder { fn default() -> Self { NonBlockingBuilder { buffered_lines_limit: DEFAULT_BUFFERED_LINES_LIMIT, is_lossy: true, thread_name: "tracing-appender".to_string(), } } } impl std::io::Write for NonBlocking { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let buf_size = buf.len(); if self.is_lossy { if self.channel.try_send(Msg::Line(buf.to_vec())).is_err() { self.error_counter.incr_saturating(); } } else { return match self.channel.send(Msg::Line(buf.to_vec())) { Ok(_) => Ok(buf_size), Err(_) => Err(io::Error::from(io::ErrorKind::Other)), }; } Ok(buf_size) } fn flush(&mut self) -> io::Result<()> { Ok(()) } #[inline] fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { self.write(buf).map(|_| ()) } } impl<'a> MakeWriter<'a> for NonBlocking { type Writer = NonBlocking; fn make_writer(&'a self) -> Self::Writer { self.clone() } } impl WorkerGuard { fn new(handle: JoinHandle<()>, sender: Sender<Msg>, shutdown: Sender<()>) -> Self { WorkerGuard { handle: Some(handle), sender, shutdown, } } } impl Drop for WorkerGuard { fn drop(&mut self) { let timeout = Duration::from_millis(100); match self.sender.send_timeout(Msg::Shutdown, timeout) { Ok(_) => { // Attempt to wait for `Worker` to flush all messages before dropping. This happens // when the `Worker` calls `recv()` on a zero-capacity channel. Use `send_timeout` // so that drop is not blocked indefinitely. // TODO: Make timeout configurable. let timeout = Duration::from_millis(1000); match self.shutdown.send_timeout((), timeout) { Err(SendTimeoutError::Timeout(_)) => { eprintln!( "Shutting down logging worker timed out after {:?}.", timeout ); } _ => { // At this point it is safe to wait for `Worker` destruction without blocking if let Some(handle) = self.handle.take() { if handle.join().is_err() { eprintln!("Logging worker thread panicked"); } }; } } } Err(SendTimeoutError::Disconnected(_)) => (), Err(SendTimeoutError::Timeout(_)) => eprintln!( "Sending shutdown signal to logging worker timed out after {:?}", timeout ), } } } // === impl ErrorCounter === impl ErrorCounter { /// Returns the number of log lines that have been dropped. /// /// If the non-blocking writer is not configured in [lossy mode], the error /// count should always be 0. /// /// [lossy mode]: NonBlockingBuilder::lossy pub fn dropped_lines(&self) -> usize { self.0.load(Ordering::Acquire) } fn incr_saturating(&self) { let mut curr = self.0.load(Ordering::Acquire); // We don't need to enter the CAS loop if the current value is already // `usize::MAX`. if curr == usize::MAX { return; } // This is implemented as a CAS loop rather than as a simple // `fetch_add`, because we don't want to wrap on overflow. Instead, we // need to ensure that saturating addition is performed. loop { let val = curr.saturating_add(1); match self .0 .compare_exchange(curr, val, Ordering::AcqRel, Ordering::Acquire) { Ok(_) => return, Err(actual) => curr = actual, } } } } #[cfg(test)] mod test { use super::*; use std::sync::mpsc; use std::thread; use std::time::Duration; struct MockWriter { tx: mpsc::SyncSender<String>, } impl MockWriter { fn new(capacity: usize) -> (Self, mpsc::Receiver<String>) { let (tx, rx) = mpsc::sync_channel(capacity); (Self { tx }, rx) } } impl std::io::Write for MockWriter { fn
(&mut self, buf: &[u8]) -> std::io::Result<usize> { let buf_len = buf.len(); let _ = self.tx.send(String::from_utf8_lossy(buf).to_string()); Ok(buf_len) } fn flush(&mut self) -> std::io::Result<()> { Ok(()) } } #[test] fn backpressure_exerted() { let (mock_writer, rx) = MockWriter::new(1); let (mut non_blocking, _guard) = self::NonBlockingBuilder::default() .lossy(false) .buffered_lines_limit(1) .finish(mock_writer); let error_count = non_blocking.error_counter(); non_blocking.write_all(b"Hello").expect("Failed to write"); assert_eq!(0, error_count.dropped_lines()); let handle = thread::spawn(move || { non_blocking.write_all(b", World").expect("Failed to write"); }); // Sleep a little to ensure previously spawned thread gets blocked on write. thread::sleep(Duration::from_millis(100)); // We should not drop logs when blocked. assert_eq!(0, error_count.dropped_lines()); // Read the first message to unblock sender. let mut line = rx.recv().unwrap(); assert_eq!(line, "Hello"); // Wait for thread to finish. handle.join().expect("thread should not panic"); // Thread has joined, we should be able to read the message it sent. line = rx.recv().unwrap(); assert_eq!(line, ", World"); } fn write_non_blocking(non_blocking: &mut NonBlocking, msg: &[u8]) { non_blocking.write_all(msg).expect("Failed to write"); // Sleep a bit to prevent races. thread::sleep(Duration::from_millis(200)); } #[test] #[ignore] // flaky, see https://github.com/tokio-rs/tracing/issues/751 fn logs_dropped_if_lossy() { let (mock_writer, rx) = MockWriter::new(1); let (mut non_blocking, _guard) = self::NonBlockingBuilder::default() .lossy(true) .buffered_lines_limit(1) .finish(mock_writer); let error_count = non_blocking.error_counter(); // First write will not block write_non_blocking(&mut non_blocking, b"Hello"); assert_eq!(0, error_count.dropped_lines()); // Second write will not block as Worker will have called `recv` on channel. // "Hello" is not yet consumed. MockWriter call to write_all will block until // "Hello" is consumed. write_non_blocking(&mut non_blocking, b", World"); assert_eq!(0, error_count.dropped_lines()); // Will sit in NonBlocking channel's buffer. write_non_blocking(&mut non_blocking, b"Test"); assert_eq!(0, error_count.dropped_lines()); // Allow a line to be written. "Hello" message will be consumed. // ", World" will be able to write to MockWriter. // "Test" will block on call to MockWriter's `write_all` let line = rx.recv().unwrap(); assert_eq!(line, "Hello"); // This will block as NonBlocking channel is full. write_non_blocking(&mut non_blocking, b"Universe"); assert_eq!(1, error_count.dropped_lines()); // Finally the second message sent will be consumed. let line = rx.recv().unwrap(); assert_eq!(line, ", World"); assert_eq!(1, error_count.dropped_lines()); } #[test] fn multi_threaded_writes() { let (mock_writer, rx) = MockWriter::new(DEFAULT_BUFFERED_LINES_LIMIT); let (non_blocking, _guard) = self::NonBlockingBuilder::default() .lossy(true) .finish(mock_writer); let error_count = non_blocking.error_counter(); let mut join_handles: Vec<JoinHandle<()>> = Vec::with_capacity(10); for _ in 0..10 { let cloned_non_blocking = non_blocking.clone(); join_handles.push(thread::spawn(move || { let collector = tracing_subscriber::fmt().with_writer(cloned_non_blocking); tracing::collect::with_default(collector.finish(), || { tracing::event!(tracing::Level::INFO, "Hello"); }); })); } for handle in join_handles { handle.join().expect("Failed to join thread"); } let mut hello_count: u8 = 0; while let Ok(event_str) = rx.recv_timeout(Duration::from_secs(5)) { assert!(event_str.contains("Hello")); hello_count += 1; } assert_eq!(10, hello_count); assert_eq!(0, error_count.dropped_lines()); } }
write
identifier_name
non_blocking.rs
//! A non-blocking, off-thread writer. //! //! This spawns a dedicated worker thread which is responsible for writing log //! lines to the provided writer. When a line is written using the returned //! `NonBlocking` struct's `make_writer` method, it will be enqueued to be //! written by the worker thread. //! //! The queue has a fixed capacity, and if it becomes full, any logs written //! to it will be dropped until capacity is once again available. This may //! occur if logs are consistently produced faster than the worker thread can //! output them. The queue capacity and behavior when full (i.e., whether to //! drop logs or to exert backpressure to slow down senders) can be configured //! using [`NonBlockingBuilder::default()`][builder]. //! This function returns the default configuration. It is equivalent to: //! //! ```rust //! # use tracing_appender::non_blocking::{NonBlocking, WorkerGuard}; //! # fn doc() -> (NonBlocking, WorkerGuard) { //! tracing_appender::non_blocking(std::io::stdout()) //! # } //! ``` //! [builder]: NonBlockingBuilder::default() //! //! <br/> This function returns a tuple of `NonBlocking` and `WorkerGuard`. //! `NonBlocking` implements [`MakeWriter`] which integrates with `tracing_subscriber`. //! `WorkerGuard` is a drop guard that is responsible for flushing any remaining logs when //! the program terminates. //! //! Note that the `WorkerGuard` returned by `non_blocking` _must_ be assigned to a binding that //! is not `_`, as `_` will result in the `WorkerGuard` being dropped immediately. //! Unintentional drops of `WorkerGuard` remove the guarantee that logs will be flushed //! during a program's termination, in a panic or otherwise. //! //! See [`WorkerGuard`][worker_guard] for examples of using the guard. //! //! [worker_guard]: WorkerGuard //! //! # Examples //! //! ``` rust //! # fn docs() { //! let (non_blocking, _guard) = tracing_appender::non_blocking(std::io::stdout()); //! let collector = tracing_subscriber::fmt().with_writer(non_blocking); //! tracing::collect::with_default(collector.finish(), || { //! tracing::event!(tracing::Level::INFO, "Hello"); //! }); //! # } //! ``` use crate::worker::Worker; use crate::Msg; use crossbeam_channel::{bounded, SendTimeoutError, Sender}; use std::io; use std::io::Write; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use std::sync::Arc; use std::thread::JoinHandle; use std::time::Duration; use tracing_subscriber::fmt::MakeWriter; /// The default maximum number of buffered log lines. /// /// If [`NonBlocking`][non-blocking] is lossy, it will drop spans/events at capacity. /// If [`NonBlocking`][non-blocking] is _not_ lossy, /// backpressure will be exerted on senders, causing them to block their /// respective threads until there is available capacity. /// /// [non-blocking]: NonBlocking /// Recommended to be a power of 2. pub const DEFAULT_BUFFERED_LINES_LIMIT: usize = 128_000; /// A guard that flushes spans/events associated to a [`NonBlocking`] on a drop /// /// Writing to a [`NonBlocking`] writer will **not** immediately write a span or event to the underlying /// output. Instead, the span or event will be written by a dedicated logging thread at some later point. /// To increase throughput, the non-blocking writer will flush to the underlying output on /// a periodic basis rather than every time a span or event is written. This means that if the program /// terminates abruptly (such as through an uncaught `panic` or a `std::process::exit`), some spans /// or events may not be written. /// /// Since spans/events and events recorded near a crash are often necessary for diagnosing the failure, /// `WorkerGuard` provides a mechanism to ensure that _all_ buffered logs are flushed to their output. /// `WorkerGuard` should be assigned in the `main` function or whatever the entrypoint of the program is. /// This will ensure that the guard will be dropped during an unwinding or when `main` exits /// successfully. /// /// # Examples /// /// ``` rust /// # #[clippy::allow(needless_doctest_main)] /// fn main () { /// # fn doc() { /// let (non_blocking, _guard) = tracing_appender::non_blocking(std::io::stdout()); /// let collector = tracing_subscriber::fmt().with_writer(non_blocking); /// tracing::collect::with_default(collector.finish(), || { /// // Emit some tracing events within context of the non_blocking `_guard` and tracing subscriber /// tracing::event!(tracing::Level::INFO, "Hello"); /// }); /// // Exiting the context of `main` will drop the `_guard` and any remaining logs should get flushed /// # } /// } /// ``` #[must_use] #[derive(Debug)] pub struct WorkerGuard { handle: Option<JoinHandle<()>>, sender: Sender<Msg>, shutdown: Sender<()>, } /// A non-blocking writer. /// /// While the line between "blocking" and "non-blocking" IO is fuzzy, writing to a file is typically /// considered to be a _blocking_ operation. For an application whose `Collector` writes spans and events /// as they are emitted, an application might find the latency profile to be unacceptable. /// `NonBlocking` moves the writing out of an application's data path by sending spans and events /// to a dedicated logging thread. /// /// This struct implements [`MakeWriter`][make_writer] from the `tracing-subscriber` /// crate. Therefore, it can be used with the [`tracing_subscriber::fmt`][fmt] module /// or with any other collector/subscriber implementation that uses the `MakeWriter` trait. /// /// [make_writer]: tracing_subscriber::fmt::MakeWriter /// [fmt]: mod@tracing_subscriber::fmt #[derive(Clone, Debug)] pub struct NonBlocking { error_counter: ErrorCounter, channel: Sender<Msg>, is_lossy: bool, } /// Tracks the number of times a log line was dropped by the background thread. /// /// If the non-blocking writer is not configured in [lossy mode], the error /// count should always be 0. /// /// [lossy mode]: NonBlockingBuilder::lossy #[derive(Clone, Debug)] pub struct ErrorCounter(Arc<AtomicUsize>); impl NonBlocking { /// Returns a new `NonBlocking` writer wrapping the provided `writer`. /// /// The returned `NonBlocking` writer will have the [default configuration][default] values. /// Other configurations can be specified using the [builder] interface. /// /// [default]: NonBlockingBuilder::default() /// [builder]: NonBlockingBuilder pub fn new<T: Write + Send + Sync + 'static>(writer: T) -> (NonBlocking, WorkerGuard) { NonBlockingBuilder::default().finish(writer) } fn create<T: Write + Send + Sync + 'static>( writer: T, buffered_lines_limit: usize, is_lossy: bool, thread_name: String, ) -> (NonBlocking, WorkerGuard) { let (sender, receiver) = bounded(buffered_lines_limit); let (shutdown_sender, shutdown_receiver) = bounded(0); let worker = Worker::new(receiver, writer, shutdown_receiver); let worker_guard = WorkerGuard::new( worker.worker_thread(thread_name), sender.clone(), shutdown_sender, ); ( Self { channel: sender, error_counter: ErrorCounter(Arc::new(AtomicUsize::new(0))), is_lossy, }, worker_guard, ) } /// Returns a counter for the number of times logs where dropped. This will always return zero if /// `NonBlocking` is not lossy. pub fn error_counter(&self) -> ErrorCounter { self.error_counter.clone() } } /// A builder for [`NonBlocking`][non-blocking]. /// /// [non-blocking]: NonBlocking #[derive(Debug)] pub struct NonBlockingBuilder { buffered_lines_limit: usize, is_lossy: bool, thread_name: String, } impl NonBlockingBuilder { /// Sets the number of lines to buffer before dropping logs or exerting backpressure on senders pub fn buffered_lines_limit(mut self, buffered_lines_limit: usize) -> NonBlockingBuilder { self.buffered_lines_limit = buffered_lines_limit; self } /// Sets whether `NonBlocking` should be lossy or not. /// /// If set to `true`, logs will be dropped when the buffered limit is reached. If `false`, backpressure /// will be exerted on senders, blocking them until the buffer has capacity again. /// /// By default, the built `NonBlocking` will be lossy. pub fn lossy(mut self, is_lossy: bool) -> NonBlockingBuilder { self.is_lossy = is_lossy; self } /// Override the worker thread's name. /// /// The default worker thread name is "tracing-appender". pub fn thread_name(mut self, name: &str) -> NonBlockingBuilder { self.thread_name = name.to_string(); self } /// Completes the builder, returning the configured `NonBlocking`. pub fn finish<T: Write + Send + Sync + 'static>(self, writer: T) -> (NonBlocking, WorkerGuard) { NonBlocking::create( writer, self.buffered_lines_limit, self.is_lossy, self.thread_name, ) } } impl Default for NonBlockingBuilder { fn default() -> Self { NonBlockingBuilder { buffered_lines_limit: DEFAULT_BUFFERED_LINES_LIMIT, is_lossy: true, thread_name: "tracing-appender".to_string(), } } } impl std::io::Write for NonBlocking { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let buf_size = buf.len(); if self.is_lossy { if self.channel.try_send(Msg::Line(buf.to_vec())).is_err() { self.error_counter.incr_saturating(); } } else
Ok(buf_size) } fn flush(&mut self) -> io::Result<()> { Ok(()) } #[inline] fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { self.write(buf).map(|_| ()) } } impl<'a> MakeWriter<'a> for NonBlocking { type Writer = NonBlocking; fn make_writer(&'a self) -> Self::Writer { self.clone() } } impl WorkerGuard { fn new(handle: JoinHandle<()>, sender: Sender<Msg>, shutdown: Sender<()>) -> Self { WorkerGuard { handle: Some(handle), sender, shutdown, } } } impl Drop for WorkerGuard { fn drop(&mut self) { let timeout = Duration::from_millis(100); match self.sender.send_timeout(Msg::Shutdown, timeout) { Ok(_) => { // Attempt to wait for `Worker` to flush all messages before dropping. This happens // when the `Worker` calls `recv()` on a zero-capacity channel. Use `send_timeout` // so that drop is not blocked indefinitely. // TODO: Make timeout configurable. let timeout = Duration::from_millis(1000); match self.shutdown.send_timeout((), timeout) { Err(SendTimeoutError::Timeout(_)) => { eprintln!( "Shutting down logging worker timed out after {:?}.", timeout ); } _ => { // At this point it is safe to wait for `Worker` destruction without blocking if let Some(handle) = self.handle.take() { if handle.join().is_err() { eprintln!("Logging worker thread panicked"); } }; } } } Err(SendTimeoutError::Disconnected(_)) => (), Err(SendTimeoutError::Timeout(_)) => eprintln!( "Sending shutdown signal to logging worker timed out after {:?}", timeout ), } } } // === impl ErrorCounter === impl ErrorCounter { /// Returns the number of log lines that have been dropped. /// /// If the non-blocking writer is not configured in [lossy mode], the error /// count should always be 0. /// /// [lossy mode]: NonBlockingBuilder::lossy pub fn dropped_lines(&self) -> usize { self.0.load(Ordering::Acquire) } fn incr_saturating(&self) { let mut curr = self.0.load(Ordering::Acquire); // We don't need to enter the CAS loop if the current value is already // `usize::MAX`. if curr == usize::MAX { return; } // This is implemented as a CAS loop rather than as a simple // `fetch_add`, because we don't want to wrap on overflow. Instead, we // need to ensure that saturating addition is performed. loop { let val = curr.saturating_add(1); match self .0 .compare_exchange(curr, val, Ordering::AcqRel, Ordering::Acquire) { Ok(_) => return, Err(actual) => curr = actual, } } } } #[cfg(test)] mod test { use super::*; use std::sync::mpsc; use std::thread; use std::time::Duration; struct MockWriter { tx: mpsc::SyncSender<String>, } impl MockWriter { fn new(capacity: usize) -> (Self, mpsc::Receiver<String>) { let (tx, rx) = mpsc::sync_channel(capacity); (Self { tx }, rx) } } impl std::io::Write for MockWriter { fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { let buf_len = buf.len(); let _ = self.tx.send(String::from_utf8_lossy(buf).to_string()); Ok(buf_len) } fn flush(&mut self) -> std::io::Result<()> { Ok(()) } } #[test] fn backpressure_exerted() { let (mock_writer, rx) = MockWriter::new(1); let (mut non_blocking, _guard) = self::NonBlockingBuilder::default() .lossy(false) .buffered_lines_limit(1) .finish(mock_writer); let error_count = non_blocking.error_counter(); non_blocking.write_all(b"Hello").expect("Failed to write"); assert_eq!(0, error_count.dropped_lines()); let handle = thread::spawn(move || { non_blocking.write_all(b", World").expect("Failed to write"); }); // Sleep a little to ensure previously spawned thread gets blocked on write. thread::sleep(Duration::from_millis(100)); // We should not drop logs when blocked. assert_eq!(0, error_count.dropped_lines()); // Read the first message to unblock sender. let mut line = rx.recv().unwrap(); assert_eq!(line, "Hello"); // Wait for thread to finish. handle.join().expect("thread should not panic"); // Thread has joined, we should be able to read the message it sent. line = rx.recv().unwrap(); assert_eq!(line, ", World"); } fn write_non_blocking(non_blocking: &mut NonBlocking, msg: &[u8]) { non_blocking.write_all(msg).expect("Failed to write"); // Sleep a bit to prevent races. thread::sleep(Duration::from_millis(200)); } #[test] #[ignore] // flaky, see https://github.com/tokio-rs/tracing/issues/751 fn logs_dropped_if_lossy() { let (mock_writer, rx) = MockWriter::new(1); let (mut non_blocking, _guard) = self::NonBlockingBuilder::default() .lossy(true) .buffered_lines_limit(1) .finish(mock_writer); let error_count = non_blocking.error_counter(); // First write will not block write_non_blocking(&mut non_blocking, b"Hello"); assert_eq!(0, error_count.dropped_lines()); // Second write will not block as Worker will have called `recv` on channel. // "Hello" is not yet consumed. MockWriter call to write_all will block until // "Hello" is consumed. write_non_blocking(&mut non_blocking, b", World"); assert_eq!(0, error_count.dropped_lines()); // Will sit in NonBlocking channel's buffer. write_non_blocking(&mut non_blocking, b"Test"); assert_eq!(0, error_count.dropped_lines()); // Allow a line to be written. "Hello" message will be consumed. // ", World" will be able to write to MockWriter. // "Test" will block on call to MockWriter's `write_all` let line = rx.recv().unwrap(); assert_eq!(line, "Hello"); // This will block as NonBlocking channel is full. write_non_blocking(&mut non_blocking, b"Universe"); assert_eq!(1, error_count.dropped_lines()); // Finally the second message sent will be consumed. let line = rx.recv().unwrap(); assert_eq!(line, ", World"); assert_eq!(1, error_count.dropped_lines()); } #[test] fn multi_threaded_writes() { let (mock_writer, rx) = MockWriter::new(DEFAULT_BUFFERED_LINES_LIMIT); let (non_blocking, _guard) = self::NonBlockingBuilder::default() .lossy(true) .finish(mock_writer); let error_count = non_blocking.error_counter(); let mut join_handles: Vec<JoinHandle<()>> = Vec::with_capacity(10); for _ in 0..10 { let cloned_non_blocking = non_blocking.clone(); join_handles.push(thread::spawn(move || { let collector = tracing_subscriber::fmt().with_writer(cloned_non_blocking); tracing::collect::with_default(collector.finish(), || { tracing::event!(tracing::Level::INFO, "Hello"); }); })); } for handle in join_handles { handle.join().expect("Failed to join thread"); } let mut hello_count: u8 = 0; while let Ok(event_str) = rx.recv_timeout(Duration::from_secs(5)) { assert!(event_str.contains("Hello")); hello_count += 1; } assert_eq!(10, hello_count); assert_eq!(0, error_count.dropped_lines()); } }
{ return match self.channel.send(Msg::Line(buf.to_vec())) { Ok(_) => Ok(buf_size), Err(_) => Err(io::Error::from(io::ErrorKind::Other)), }; }
conditional_block
non_blocking.rs
//! A non-blocking, off-thread writer. //! //! This spawns a dedicated worker thread which is responsible for writing log //! lines to the provided writer. When a line is written using the returned //! `NonBlocking` struct's `make_writer` method, it will be enqueued to be //! written by the worker thread. //! //! The queue has a fixed capacity, and if it becomes full, any logs written //! to it will be dropped until capacity is once again available. This may //! occur if logs are consistently produced faster than the worker thread can //! output them. The queue capacity and behavior when full (i.e., whether to //! drop logs or to exert backpressure to slow down senders) can be configured //! using [`NonBlockingBuilder::default()`][builder]. //! This function returns the default configuration. It is equivalent to: //! //! ```rust //! # use tracing_appender::non_blocking::{NonBlocking, WorkerGuard}; //! # fn doc() -> (NonBlocking, WorkerGuard) { //! tracing_appender::non_blocking(std::io::stdout()) //! # } //! ``` //! [builder]: NonBlockingBuilder::default() //! //! <br/> This function returns a tuple of `NonBlocking` and `WorkerGuard`. //! `NonBlocking` implements [`MakeWriter`] which integrates with `tracing_subscriber`. //! `WorkerGuard` is a drop guard that is responsible for flushing any remaining logs when //! the program terminates. //! //! Note that the `WorkerGuard` returned by `non_blocking` _must_ be assigned to a binding that //! is not `_`, as `_` will result in the `WorkerGuard` being dropped immediately. //! Unintentional drops of `WorkerGuard` remove the guarantee that logs will be flushed //! during a program's termination, in a panic or otherwise. //! //! See [`WorkerGuard`][worker_guard] for examples of using the guard. //! //! [worker_guard]: WorkerGuard //! //! # Examples //! //! ``` rust //! # fn docs() { //! let (non_blocking, _guard) = tracing_appender::non_blocking(std::io::stdout()); //! let collector = tracing_subscriber::fmt().with_writer(non_blocking); //! tracing::collect::with_default(collector.finish(), || { //! tracing::event!(tracing::Level::INFO, "Hello"); //! }); //! # } //! ``` use crate::worker::Worker; use crate::Msg; use crossbeam_channel::{bounded, SendTimeoutError, Sender}; use std::io; use std::io::Write; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use std::sync::Arc; use std::thread::JoinHandle; use std::time::Duration; use tracing_subscriber::fmt::MakeWriter; /// The default maximum number of buffered log lines. /// /// If [`NonBlocking`][non-blocking] is lossy, it will drop spans/events at capacity. /// If [`NonBlocking`][non-blocking] is _not_ lossy, /// backpressure will be exerted on senders, causing them to block their /// respective threads until there is available capacity. /// /// [non-blocking]: NonBlocking /// Recommended to be a power of 2. pub const DEFAULT_BUFFERED_LINES_LIMIT: usize = 128_000; /// A guard that flushes spans/events associated to a [`NonBlocking`] on a drop /// /// Writing to a [`NonBlocking`] writer will **not** immediately write a span or event to the underlying /// output. Instead, the span or event will be written by a dedicated logging thread at some later point. /// To increase throughput, the non-blocking writer will flush to the underlying output on /// a periodic basis rather than every time a span or event is written. This means that if the program /// terminates abruptly (such as through an uncaught `panic` or a `std::process::exit`), some spans /// or events may not be written. /// /// Since spans/events and events recorded near a crash are often necessary for diagnosing the failure, /// `WorkerGuard` provides a mechanism to ensure that _all_ buffered logs are flushed to their output. /// `WorkerGuard` should be assigned in the `main` function or whatever the entrypoint of the program is. /// This will ensure that the guard will be dropped during an unwinding or when `main` exits /// successfully. /// /// # Examples /// /// ``` rust /// # #[clippy::allow(needless_doctest_main)] /// fn main () { /// # fn doc() { /// let (non_blocking, _guard) = tracing_appender::non_blocking(std::io::stdout()); /// let collector = tracing_subscriber::fmt().with_writer(non_blocking); /// tracing::collect::with_default(collector.finish(), || { /// // Emit some tracing events within context of the non_blocking `_guard` and tracing subscriber /// tracing::event!(tracing::Level::INFO, "Hello"); /// }); /// // Exiting the context of `main` will drop the `_guard` and any remaining logs should get flushed /// # } /// } /// ``` #[must_use] #[derive(Debug)] pub struct WorkerGuard { handle: Option<JoinHandle<()>>, sender: Sender<Msg>, shutdown: Sender<()>, } /// A non-blocking writer. /// /// While the line between "blocking" and "non-blocking" IO is fuzzy, writing to a file is typically /// considered to be a _blocking_ operation. For an application whose `Collector` writes spans and events /// as they are emitted, an application might find the latency profile to be unacceptable. /// `NonBlocking` moves the writing out of an application's data path by sending spans and events /// to a dedicated logging thread. /// /// This struct implements [`MakeWriter`][make_writer] from the `tracing-subscriber` /// crate. Therefore, it can be used with the [`tracing_subscriber::fmt`][fmt] module /// or with any other collector/subscriber implementation that uses the `MakeWriter` trait. /// /// [make_writer]: tracing_subscriber::fmt::MakeWriter /// [fmt]: mod@tracing_subscriber::fmt #[derive(Clone, Debug)] pub struct NonBlocking { error_counter: ErrorCounter, channel: Sender<Msg>, is_lossy: bool, } /// Tracks the number of times a log line was dropped by the background thread. /// /// If the non-blocking writer is not configured in [lossy mode], the error /// count should always be 0. /// /// [lossy mode]: NonBlockingBuilder::lossy #[derive(Clone, Debug)] pub struct ErrorCounter(Arc<AtomicUsize>); impl NonBlocking { /// Returns a new `NonBlocking` writer wrapping the provided `writer`. /// /// The returned `NonBlocking` writer will have the [default configuration][default] values. /// Other configurations can be specified using the [builder] interface. /// /// [default]: NonBlockingBuilder::default() /// [builder]: NonBlockingBuilder pub fn new<T: Write + Send + Sync + 'static>(writer: T) -> (NonBlocking, WorkerGuard) { NonBlockingBuilder::default().finish(writer) } fn create<T: Write + Send + Sync + 'static>( writer: T, buffered_lines_limit: usize, is_lossy: bool, thread_name: String, ) -> (NonBlocking, WorkerGuard) { let (sender, receiver) = bounded(buffered_lines_limit); let (shutdown_sender, shutdown_receiver) = bounded(0); let worker = Worker::new(receiver, writer, shutdown_receiver); let worker_guard = WorkerGuard::new( worker.worker_thread(thread_name), sender.clone(), shutdown_sender, ); ( Self { channel: sender, error_counter: ErrorCounter(Arc::new(AtomicUsize::new(0))), is_lossy, }, worker_guard, ) } /// Returns a counter for the number of times logs where dropped. This will always return zero if /// `NonBlocking` is not lossy. pub fn error_counter(&self) -> ErrorCounter { self.error_counter.clone() } } /// A builder for [`NonBlocking`][non-blocking]. /// /// [non-blocking]: NonBlocking #[derive(Debug)] pub struct NonBlockingBuilder { buffered_lines_limit: usize, is_lossy: bool, thread_name: String, } impl NonBlockingBuilder { /// Sets the number of lines to buffer before dropping logs or exerting backpressure on senders pub fn buffered_lines_limit(mut self, buffered_lines_limit: usize) -> NonBlockingBuilder { self.buffered_lines_limit = buffered_lines_limit; self } /// Sets whether `NonBlocking` should be lossy or not. /// /// If set to `true`, logs will be dropped when the buffered limit is reached. If `false`, backpressure /// will be exerted on senders, blocking them until the buffer has capacity again.
/// /// By default, the built `NonBlocking` will be lossy. pub fn lossy(mut self, is_lossy: bool) -> NonBlockingBuilder { self.is_lossy = is_lossy; self } /// Override the worker thread's name. /// /// The default worker thread name is "tracing-appender". pub fn thread_name(mut self, name: &str) -> NonBlockingBuilder { self.thread_name = name.to_string(); self } /// Completes the builder, returning the configured `NonBlocking`. pub fn finish<T: Write + Send + Sync + 'static>(self, writer: T) -> (NonBlocking, WorkerGuard) { NonBlocking::create( writer, self.buffered_lines_limit, self.is_lossy, self.thread_name, ) } } impl Default for NonBlockingBuilder { fn default() -> Self { NonBlockingBuilder { buffered_lines_limit: DEFAULT_BUFFERED_LINES_LIMIT, is_lossy: true, thread_name: "tracing-appender".to_string(), } } } impl std::io::Write for NonBlocking { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let buf_size = buf.len(); if self.is_lossy { if self.channel.try_send(Msg::Line(buf.to_vec())).is_err() { self.error_counter.incr_saturating(); } } else { return match self.channel.send(Msg::Line(buf.to_vec())) { Ok(_) => Ok(buf_size), Err(_) => Err(io::Error::from(io::ErrorKind::Other)), }; } Ok(buf_size) } fn flush(&mut self) -> io::Result<()> { Ok(()) } #[inline] fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { self.write(buf).map(|_| ()) } } impl<'a> MakeWriter<'a> for NonBlocking { type Writer = NonBlocking; fn make_writer(&'a self) -> Self::Writer { self.clone() } } impl WorkerGuard { fn new(handle: JoinHandle<()>, sender: Sender<Msg>, shutdown: Sender<()>) -> Self { WorkerGuard { handle: Some(handle), sender, shutdown, } } } impl Drop for WorkerGuard { fn drop(&mut self) { let timeout = Duration::from_millis(100); match self.sender.send_timeout(Msg::Shutdown, timeout) { Ok(_) => { // Attempt to wait for `Worker` to flush all messages before dropping. This happens // when the `Worker` calls `recv()` on a zero-capacity channel. Use `send_timeout` // so that drop is not blocked indefinitely. // TODO: Make timeout configurable. let timeout = Duration::from_millis(1000); match self.shutdown.send_timeout((), timeout) { Err(SendTimeoutError::Timeout(_)) => { eprintln!( "Shutting down logging worker timed out after {:?}.", timeout ); } _ => { // At this point it is safe to wait for `Worker` destruction without blocking if let Some(handle) = self.handle.take() { if handle.join().is_err() { eprintln!("Logging worker thread panicked"); } }; } } } Err(SendTimeoutError::Disconnected(_)) => (), Err(SendTimeoutError::Timeout(_)) => eprintln!( "Sending shutdown signal to logging worker timed out after {:?}", timeout ), } } } // === impl ErrorCounter === impl ErrorCounter { /// Returns the number of log lines that have been dropped. /// /// If the non-blocking writer is not configured in [lossy mode], the error /// count should always be 0. /// /// [lossy mode]: NonBlockingBuilder::lossy pub fn dropped_lines(&self) -> usize { self.0.load(Ordering::Acquire) } fn incr_saturating(&self) { let mut curr = self.0.load(Ordering::Acquire); // We don't need to enter the CAS loop if the current value is already // `usize::MAX`. if curr == usize::MAX { return; } // This is implemented as a CAS loop rather than as a simple // `fetch_add`, because we don't want to wrap on overflow. Instead, we // need to ensure that saturating addition is performed. loop { let val = curr.saturating_add(1); match self .0 .compare_exchange(curr, val, Ordering::AcqRel, Ordering::Acquire) { Ok(_) => return, Err(actual) => curr = actual, } } } } #[cfg(test)] mod test { use super::*; use std::sync::mpsc; use std::thread; use std::time::Duration; struct MockWriter { tx: mpsc::SyncSender<String>, } impl MockWriter { fn new(capacity: usize) -> (Self, mpsc::Receiver<String>) { let (tx, rx) = mpsc::sync_channel(capacity); (Self { tx }, rx) } } impl std::io::Write for MockWriter { fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { let buf_len = buf.len(); let _ = self.tx.send(String::from_utf8_lossy(buf).to_string()); Ok(buf_len) } fn flush(&mut self) -> std::io::Result<()> { Ok(()) } } #[test] fn backpressure_exerted() { let (mock_writer, rx) = MockWriter::new(1); let (mut non_blocking, _guard) = self::NonBlockingBuilder::default() .lossy(false) .buffered_lines_limit(1) .finish(mock_writer); let error_count = non_blocking.error_counter(); non_blocking.write_all(b"Hello").expect("Failed to write"); assert_eq!(0, error_count.dropped_lines()); let handle = thread::spawn(move || { non_blocking.write_all(b", World").expect("Failed to write"); }); // Sleep a little to ensure previously spawned thread gets blocked on write. thread::sleep(Duration::from_millis(100)); // We should not drop logs when blocked. assert_eq!(0, error_count.dropped_lines()); // Read the first message to unblock sender. let mut line = rx.recv().unwrap(); assert_eq!(line, "Hello"); // Wait for thread to finish. handle.join().expect("thread should not panic"); // Thread has joined, we should be able to read the message it sent. line = rx.recv().unwrap(); assert_eq!(line, ", World"); } fn write_non_blocking(non_blocking: &mut NonBlocking, msg: &[u8]) { non_blocking.write_all(msg).expect("Failed to write"); // Sleep a bit to prevent races. thread::sleep(Duration::from_millis(200)); } #[test] #[ignore] // flaky, see https://github.com/tokio-rs/tracing/issues/751 fn logs_dropped_if_lossy() { let (mock_writer, rx) = MockWriter::new(1); let (mut non_blocking, _guard) = self::NonBlockingBuilder::default() .lossy(true) .buffered_lines_limit(1) .finish(mock_writer); let error_count = non_blocking.error_counter(); // First write will not block write_non_blocking(&mut non_blocking, b"Hello"); assert_eq!(0, error_count.dropped_lines()); // Second write will not block as Worker will have called `recv` on channel. // "Hello" is not yet consumed. MockWriter call to write_all will block until // "Hello" is consumed. write_non_blocking(&mut non_blocking, b", World"); assert_eq!(0, error_count.dropped_lines()); // Will sit in NonBlocking channel's buffer. write_non_blocking(&mut non_blocking, b"Test"); assert_eq!(0, error_count.dropped_lines()); // Allow a line to be written. "Hello" message will be consumed. // ", World" will be able to write to MockWriter. // "Test" will block on call to MockWriter's `write_all` let line = rx.recv().unwrap(); assert_eq!(line, "Hello"); // This will block as NonBlocking channel is full. write_non_blocking(&mut non_blocking, b"Universe"); assert_eq!(1, error_count.dropped_lines()); // Finally the second message sent will be consumed. let line = rx.recv().unwrap(); assert_eq!(line, ", World"); assert_eq!(1, error_count.dropped_lines()); } #[test] fn multi_threaded_writes() { let (mock_writer, rx) = MockWriter::new(DEFAULT_BUFFERED_LINES_LIMIT); let (non_blocking, _guard) = self::NonBlockingBuilder::default() .lossy(true) .finish(mock_writer); let error_count = non_blocking.error_counter(); let mut join_handles: Vec<JoinHandle<()>> = Vec::with_capacity(10); for _ in 0..10 { let cloned_non_blocking = non_blocking.clone(); join_handles.push(thread::spawn(move || { let collector = tracing_subscriber::fmt().with_writer(cloned_non_blocking); tracing::collect::with_default(collector.finish(), || { tracing::event!(tracing::Level::INFO, "Hello"); }); })); } for handle in join_handles { handle.join().expect("Failed to join thread"); } let mut hello_count: u8 = 0; while let Ok(event_str) = rx.recv_timeout(Duration::from_secs(5)) { assert!(event_str.contains("Hello")); hello_count += 1; } assert_eq!(10, hello_count); assert_eq!(0, error_count.dropped_lines()); } }
random_line_split
non_blocking.rs
//! A non-blocking, off-thread writer. //! //! This spawns a dedicated worker thread which is responsible for writing log //! lines to the provided writer. When a line is written using the returned //! `NonBlocking` struct's `make_writer` method, it will be enqueued to be //! written by the worker thread. //! //! The queue has a fixed capacity, and if it becomes full, any logs written //! to it will be dropped until capacity is once again available. This may //! occur if logs are consistently produced faster than the worker thread can //! output them. The queue capacity and behavior when full (i.e., whether to //! drop logs or to exert backpressure to slow down senders) can be configured //! using [`NonBlockingBuilder::default()`][builder]. //! This function returns the default configuration. It is equivalent to: //! //! ```rust //! # use tracing_appender::non_blocking::{NonBlocking, WorkerGuard}; //! # fn doc() -> (NonBlocking, WorkerGuard) { //! tracing_appender::non_blocking(std::io::stdout()) //! # } //! ``` //! [builder]: NonBlockingBuilder::default() //! //! <br/> This function returns a tuple of `NonBlocking` and `WorkerGuard`. //! `NonBlocking` implements [`MakeWriter`] which integrates with `tracing_subscriber`. //! `WorkerGuard` is a drop guard that is responsible for flushing any remaining logs when //! the program terminates. //! //! Note that the `WorkerGuard` returned by `non_blocking` _must_ be assigned to a binding that //! is not `_`, as `_` will result in the `WorkerGuard` being dropped immediately. //! Unintentional drops of `WorkerGuard` remove the guarantee that logs will be flushed //! during a program's termination, in a panic or otherwise. //! //! See [`WorkerGuard`][worker_guard] for examples of using the guard. //! //! [worker_guard]: WorkerGuard //! //! # Examples //! //! ``` rust //! # fn docs() { //! let (non_blocking, _guard) = tracing_appender::non_blocking(std::io::stdout()); //! let collector = tracing_subscriber::fmt().with_writer(non_blocking); //! tracing::collect::with_default(collector.finish(), || { //! tracing::event!(tracing::Level::INFO, "Hello"); //! }); //! # } //! ``` use crate::worker::Worker; use crate::Msg; use crossbeam_channel::{bounded, SendTimeoutError, Sender}; use std::io; use std::io::Write; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use std::sync::Arc; use std::thread::JoinHandle; use std::time::Duration; use tracing_subscriber::fmt::MakeWriter; /// The default maximum number of buffered log lines. /// /// If [`NonBlocking`][non-blocking] is lossy, it will drop spans/events at capacity. /// If [`NonBlocking`][non-blocking] is _not_ lossy, /// backpressure will be exerted on senders, causing them to block their /// respective threads until there is available capacity. /// /// [non-blocking]: NonBlocking /// Recommended to be a power of 2. pub const DEFAULT_BUFFERED_LINES_LIMIT: usize = 128_000; /// A guard that flushes spans/events associated to a [`NonBlocking`] on a drop /// /// Writing to a [`NonBlocking`] writer will **not** immediately write a span or event to the underlying /// output. Instead, the span or event will be written by a dedicated logging thread at some later point. /// To increase throughput, the non-blocking writer will flush to the underlying output on /// a periodic basis rather than every time a span or event is written. This means that if the program /// terminates abruptly (such as through an uncaught `panic` or a `std::process::exit`), some spans /// or events may not be written. /// /// Since spans/events and events recorded near a crash are often necessary for diagnosing the failure, /// `WorkerGuard` provides a mechanism to ensure that _all_ buffered logs are flushed to their output. /// `WorkerGuard` should be assigned in the `main` function or whatever the entrypoint of the program is. /// This will ensure that the guard will be dropped during an unwinding or when `main` exits /// successfully. /// /// # Examples /// /// ``` rust /// # #[clippy::allow(needless_doctest_main)] /// fn main () { /// # fn doc() { /// let (non_blocking, _guard) = tracing_appender::non_blocking(std::io::stdout()); /// let collector = tracing_subscriber::fmt().with_writer(non_blocking); /// tracing::collect::with_default(collector.finish(), || { /// // Emit some tracing events within context of the non_blocking `_guard` and tracing subscriber /// tracing::event!(tracing::Level::INFO, "Hello"); /// }); /// // Exiting the context of `main` will drop the `_guard` and any remaining logs should get flushed /// # } /// } /// ``` #[must_use] #[derive(Debug)] pub struct WorkerGuard { handle: Option<JoinHandle<()>>, sender: Sender<Msg>, shutdown: Sender<()>, } /// A non-blocking writer. /// /// While the line between "blocking" and "non-blocking" IO is fuzzy, writing to a file is typically /// considered to be a _blocking_ operation. For an application whose `Collector` writes spans and events /// as they are emitted, an application might find the latency profile to be unacceptable. /// `NonBlocking` moves the writing out of an application's data path by sending spans and events /// to a dedicated logging thread. /// /// This struct implements [`MakeWriter`][make_writer] from the `tracing-subscriber` /// crate. Therefore, it can be used with the [`tracing_subscriber::fmt`][fmt] module /// or with any other collector/subscriber implementation that uses the `MakeWriter` trait. /// /// [make_writer]: tracing_subscriber::fmt::MakeWriter /// [fmt]: mod@tracing_subscriber::fmt #[derive(Clone, Debug)] pub struct NonBlocking { error_counter: ErrorCounter, channel: Sender<Msg>, is_lossy: bool, } /// Tracks the number of times a log line was dropped by the background thread. /// /// If the non-blocking writer is not configured in [lossy mode], the error /// count should always be 0. /// /// [lossy mode]: NonBlockingBuilder::lossy #[derive(Clone, Debug)] pub struct ErrorCounter(Arc<AtomicUsize>); impl NonBlocking { /// Returns a new `NonBlocking` writer wrapping the provided `writer`. /// /// The returned `NonBlocking` writer will have the [default configuration][default] values. /// Other configurations can be specified using the [builder] interface. /// /// [default]: NonBlockingBuilder::default() /// [builder]: NonBlockingBuilder pub fn new<T: Write + Send + Sync + 'static>(writer: T) -> (NonBlocking, WorkerGuard) { NonBlockingBuilder::default().finish(writer) } fn create<T: Write + Send + Sync + 'static>( writer: T, buffered_lines_limit: usize, is_lossy: bool, thread_name: String, ) -> (NonBlocking, WorkerGuard) { let (sender, receiver) = bounded(buffered_lines_limit); let (shutdown_sender, shutdown_receiver) = bounded(0); let worker = Worker::new(receiver, writer, shutdown_receiver); let worker_guard = WorkerGuard::new( worker.worker_thread(thread_name), sender.clone(), shutdown_sender, ); ( Self { channel: sender, error_counter: ErrorCounter(Arc::new(AtomicUsize::new(0))), is_lossy, }, worker_guard, ) } /// Returns a counter for the number of times logs where dropped. This will always return zero if /// `NonBlocking` is not lossy. pub fn error_counter(&self) -> ErrorCounter { self.error_counter.clone() } } /// A builder for [`NonBlocking`][non-blocking]. /// /// [non-blocking]: NonBlocking #[derive(Debug)] pub struct NonBlockingBuilder { buffered_lines_limit: usize, is_lossy: bool, thread_name: String, } impl NonBlockingBuilder { /// Sets the number of lines to buffer before dropping logs or exerting backpressure on senders pub fn buffered_lines_limit(mut self, buffered_lines_limit: usize) -> NonBlockingBuilder { self.buffered_lines_limit = buffered_lines_limit; self } /// Sets whether `NonBlocking` should be lossy or not. /// /// If set to `true`, logs will be dropped when the buffered limit is reached. If `false`, backpressure /// will be exerted on senders, blocking them until the buffer has capacity again. /// /// By default, the built `NonBlocking` will be lossy. pub fn lossy(mut self, is_lossy: bool) -> NonBlockingBuilder
/// Override the worker thread's name. /// /// The default worker thread name is "tracing-appender". pub fn thread_name(mut self, name: &str) -> NonBlockingBuilder { self.thread_name = name.to_string(); self } /// Completes the builder, returning the configured `NonBlocking`. pub fn finish<T: Write + Send + Sync + 'static>(self, writer: T) -> (NonBlocking, WorkerGuard) { NonBlocking::create( writer, self.buffered_lines_limit, self.is_lossy, self.thread_name, ) } } impl Default for NonBlockingBuilder { fn default() -> Self { NonBlockingBuilder { buffered_lines_limit: DEFAULT_BUFFERED_LINES_LIMIT, is_lossy: true, thread_name: "tracing-appender".to_string(), } } } impl std::io::Write for NonBlocking { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { let buf_size = buf.len(); if self.is_lossy { if self.channel.try_send(Msg::Line(buf.to_vec())).is_err() { self.error_counter.incr_saturating(); } } else { return match self.channel.send(Msg::Line(buf.to_vec())) { Ok(_) => Ok(buf_size), Err(_) => Err(io::Error::from(io::ErrorKind::Other)), }; } Ok(buf_size) } fn flush(&mut self) -> io::Result<()> { Ok(()) } #[inline] fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { self.write(buf).map(|_| ()) } } impl<'a> MakeWriter<'a> for NonBlocking { type Writer = NonBlocking; fn make_writer(&'a self) -> Self::Writer { self.clone() } } impl WorkerGuard { fn new(handle: JoinHandle<()>, sender: Sender<Msg>, shutdown: Sender<()>) -> Self { WorkerGuard { handle: Some(handle), sender, shutdown, } } } impl Drop for WorkerGuard { fn drop(&mut self) { let timeout = Duration::from_millis(100); match self.sender.send_timeout(Msg::Shutdown, timeout) { Ok(_) => { // Attempt to wait for `Worker` to flush all messages before dropping. This happens // when the `Worker` calls `recv()` on a zero-capacity channel. Use `send_timeout` // so that drop is not blocked indefinitely. // TODO: Make timeout configurable. let timeout = Duration::from_millis(1000); match self.shutdown.send_timeout((), timeout) { Err(SendTimeoutError::Timeout(_)) => { eprintln!( "Shutting down logging worker timed out after {:?}.", timeout ); } _ => { // At this point it is safe to wait for `Worker` destruction without blocking if let Some(handle) = self.handle.take() { if handle.join().is_err() { eprintln!("Logging worker thread panicked"); } }; } } } Err(SendTimeoutError::Disconnected(_)) => (), Err(SendTimeoutError::Timeout(_)) => eprintln!( "Sending shutdown signal to logging worker timed out after {:?}", timeout ), } } } // === impl ErrorCounter === impl ErrorCounter { /// Returns the number of log lines that have been dropped. /// /// If the non-blocking writer is not configured in [lossy mode], the error /// count should always be 0. /// /// [lossy mode]: NonBlockingBuilder::lossy pub fn dropped_lines(&self) -> usize { self.0.load(Ordering::Acquire) } fn incr_saturating(&self) { let mut curr = self.0.load(Ordering::Acquire); // We don't need to enter the CAS loop if the current value is already // `usize::MAX`. if curr == usize::MAX { return; } // This is implemented as a CAS loop rather than as a simple // `fetch_add`, because we don't want to wrap on overflow. Instead, we // need to ensure that saturating addition is performed. loop { let val = curr.saturating_add(1); match self .0 .compare_exchange(curr, val, Ordering::AcqRel, Ordering::Acquire) { Ok(_) => return, Err(actual) => curr = actual, } } } } #[cfg(test)] mod test { use super::*; use std::sync::mpsc; use std::thread; use std::time::Duration; struct MockWriter { tx: mpsc::SyncSender<String>, } impl MockWriter { fn new(capacity: usize) -> (Self, mpsc::Receiver<String>) { let (tx, rx) = mpsc::sync_channel(capacity); (Self { tx }, rx) } } impl std::io::Write for MockWriter { fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { let buf_len = buf.len(); let _ = self.tx.send(String::from_utf8_lossy(buf).to_string()); Ok(buf_len) } fn flush(&mut self) -> std::io::Result<()> { Ok(()) } } #[test] fn backpressure_exerted() { let (mock_writer, rx) = MockWriter::new(1); let (mut non_blocking, _guard) = self::NonBlockingBuilder::default() .lossy(false) .buffered_lines_limit(1) .finish(mock_writer); let error_count = non_blocking.error_counter(); non_blocking.write_all(b"Hello").expect("Failed to write"); assert_eq!(0, error_count.dropped_lines()); let handle = thread::spawn(move || { non_blocking.write_all(b", World").expect("Failed to write"); }); // Sleep a little to ensure previously spawned thread gets blocked on write. thread::sleep(Duration::from_millis(100)); // We should not drop logs when blocked. assert_eq!(0, error_count.dropped_lines()); // Read the first message to unblock sender. let mut line = rx.recv().unwrap(); assert_eq!(line, "Hello"); // Wait for thread to finish. handle.join().expect("thread should not panic"); // Thread has joined, we should be able to read the message it sent. line = rx.recv().unwrap(); assert_eq!(line, ", World"); } fn write_non_blocking(non_blocking: &mut NonBlocking, msg: &[u8]) { non_blocking.write_all(msg).expect("Failed to write"); // Sleep a bit to prevent races. thread::sleep(Duration::from_millis(200)); } #[test] #[ignore] // flaky, see https://github.com/tokio-rs/tracing/issues/751 fn logs_dropped_if_lossy() { let (mock_writer, rx) = MockWriter::new(1); let (mut non_blocking, _guard) = self::NonBlockingBuilder::default() .lossy(true) .buffered_lines_limit(1) .finish(mock_writer); let error_count = non_blocking.error_counter(); // First write will not block write_non_blocking(&mut non_blocking, b"Hello"); assert_eq!(0, error_count.dropped_lines()); // Second write will not block as Worker will have called `recv` on channel. // "Hello" is not yet consumed. MockWriter call to write_all will block until // "Hello" is consumed. write_non_blocking(&mut non_blocking, b", World"); assert_eq!(0, error_count.dropped_lines()); // Will sit in NonBlocking channel's buffer. write_non_blocking(&mut non_blocking, b"Test"); assert_eq!(0, error_count.dropped_lines()); // Allow a line to be written. "Hello" message will be consumed. // ", World" will be able to write to MockWriter. // "Test" will block on call to MockWriter's `write_all` let line = rx.recv().unwrap(); assert_eq!(line, "Hello"); // This will block as NonBlocking channel is full. write_non_blocking(&mut non_blocking, b"Universe"); assert_eq!(1, error_count.dropped_lines()); // Finally the second message sent will be consumed. let line = rx.recv().unwrap(); assert_eq!(line, ", World"); assert_eq!(1, error_count.dropped_lines()); } #[test] fn multi_threaded_writes() { let (mock_writer, rx) = MockWriter::new(DEFAULT_BUFFERED_LINES_LIMIT); let (non_blocking, _guard) = self::NonBlockingBuilder::default() .lossy(true) .finish(mock_writer); let error_count = non_blocking.error_counter(); let mut join_handles: Vec<JoinHandle<()>> = Vec::with_capacity(10); for _ in 0..10 { let cloned_non_blocking = non_blocking.clone(); join_handles.push(thread::spawn(move || { let collector = tracing_subscriber::fmt().with_writer(cloned_non_blocking); tracing::collect::with_default(collector.finish(), || { tracing::event!(tracing::Level::INFO, "Hello"); }); })); } for handle in join_handles { handle.join().expect("Failed to join thread"); } let mut hello_count: u8 = 0; while let Ok(event_str) = rx.recv_timeout(Duration::from_secs(5)) { assert!(event_str.contains("Hello")); hello_count += 1; } assert_eq!(10, hello_count); assert_eq!(0, error_count.dropped_lines()); } }
{ self.is_lossy = is_lossy; self }
identifier_body
key.rs
//! Protected key //! use std::cell::{Cell, Ref, RefCell, RefMut, BorrowState}; use std::fmt::{self, Debug}; use std::ops::{Deref, DerefMut}; use std::rc::Rc; use allocator::{Allocator, KeyAllocator, DefaultKeyAllocator}; use buf::ProtBuf; /// Key of bytes pub type ProtKey8<A = DefaultKeyAllocator> = ProtKey<u8, A>; const NOREAD: usize = 0; /// A protected key /// /// Transform a `ProtBuf` instance into a protected key `ProtKey` and provide /// tigher access control on its memory. /// /// By default a `ProtKey` cannot be read nor written to and will only /// provide separated accesses with limited scopes. Thus, RAII accessor /// methods must be used to read and write to a `ProtKey`. Accessing the /// underlying key is a bit similar to the way of manipulating an object /// wrapped in `RefCell`. /// /// ```rust /// # extern crate tars; /// # use tars::allocator::ProtectedKeyAllocator; /// # use tars::{ProtKey, ProtBuf, ProtKey8}; /// # fn encrypt(_: &[u8], _: &[u8]) {} /// # fn main() { /// // Instantiate a new buffer initialized with random bytes. /// // Same as an usual ProtBuf instance but with a different allocator /// let buf_rnd = ProtBuf::<u8, ProtectedKeyAllocator>::new_rand_os(32); /// /// // Until here memory buffer is read/write. Turns-it into a key /// let key = ProtKey::new(buf_rnd); /// /// // Or more simply, like this with exactly the same result /// let key: ProtKey8 = ProtBuf::new_rand_os(32).into_key(); /// /// { /// // Request access in read-mode /// let key_read = key.read(); /// let byte = key_read[16]; /// // ... /// } // Relinquish its read-access /// /// // Alternative way to read its content /// key.read_with(|k| encrypt(&k[..], b"abc")); /// /// // Access it in write-mode /// let key_write = key.try_write(); /// if let Some(mut kw) = key_write { /// kw[16] = 42; /// } /// # } /// ``` pub struct ProtKey<T: Copy, A: KeyAllocator = DefaultKeyAllocator> { key: RefCell<ProtBuf<T, A>>, read_ctr: Rc<Cell<usize>> } impl<T: Copy, A: KeyAllocator> ProtKey<T, A> { /// Take ownership of `prot_buf` and transform it into a `ProtKey`. By /// default prevent any access. pub fn new(prot_buf: ProtBuf<T, A>) -> ProtKey<T, A> { unsafe { <A as KeyAllocator>::protect_none(prot_buf.as_ptr() as *mut u8, prot_buf.len_bytes()); } ProtKey { key: RefCell::new(prot_buf), read_ctr: Rc::new(Cell::new(NOREAD)) } } /// Consume and copy `prot_buf` to force using `ProtKey`'s allocator. /// If `prot_buf` already uses a `KeyAllocator` there is no need to make /// a copy so directly call the default cstor `new` instead. pub fn from_buf<B: Allocator>(prot_buf: ProtBuf<T, B>) -> ProtKey<T, A> { let buf = ProtBuf::from_slice(&prot_buf); ProtKey::new(buf) } /// Return a wrapper to the key in read mode. This method `panic!` if /// this key is already accessed in write mode. // FIXME: Not sure if it's the best interface to provide a `try_read` // variant to this `fail`ing method. It would maybe be better to // implement a single method returning a `Result`. See this RFC // https://github.com/rust-lang/rfcs/blob/master/text/0236-error-conventions.md pub fn read(&self) -> ProtKeyRead<T, A> { ProtKeyRead::new(self.key.borrow(), self.read_ctr.clone()) } /// Return a wrapper to the key in read mode. Return `None` /// if the key is already accessed in write mode. pub fn try_read(&self) -> Option<ProtKeyRead<T, A>> { match self.key.borrow_state() { BorrowState::Reading|BorrowState::Unused => Some(self.read()), _ => None } } /// Access the key in read mode and pass a reference to closure `f`. /// The key can only be read during this call. This method will `panic!` /// if a read access cannot be acquired on this key. pub fn read_with<F>(&self, mut f: F) where F: FnMut(ProtKeyRead<T, A>){ f(self.read()) } /// Return a wrapper to the key in write mode. This method `panic!` if /// the key is already currently accessed in read or write mode. pub fn write(&self) -> ProtKeyWrite<T, A> { let key_write = ProtKeyWrite::new(self.key.borrow_mut()); assert_eq!(self.read_ctr.get(), NOREAD); key_write } /// Return a wrapper to the key in write mode. Return `None` /// if the key is already accessed in read or write mode. pub fn try_write(&self) -> Option<ProtKeyWrite<T, A>> { match self.key.borrow_state() { BorrowState::Unused => Some(self.write()), _ => None } } /// Access the key in write mode and pass a reference to closure `f`. /// The key can only be writtent during this call. This method will /// `panic!` if a write access cannot be acquired on this key. pub fn write_with<F>(&self, mut f: F) where F: FnMut(&mut ProtKeyWrite<T, A>) { f(&mut self.write()) } } impl<T: Copy, A: KeyAllocator> Drop for ProtKey<T, A> { fn drop(&mut self) { // FIXME: without this assert this drop is useless. assert_eq!(self.read_ctr.get(), NOREAD); } } impl<T: Copy, A: KeyAllocator> Clone for ProtKey<T, A> { fn clone(&self) -> ProtKey<T, A> { ProtKey::new(self.read().clone()) } } impl<T: Copy, A: KeyAllocator> PartialEq for ProtKey<T, A> { fn eq(&self, other: &ProtKey<T, A>) -> bool { match (self.try_read(), other.try_read()) { (Some(ref s), Some(ref o)) => *s == *o, (_, _) => false } } } impl<T: Debug + Copy, A: KeyAllocator> Debug for ProtKey<T, A> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.try_read() { Some(r) => r.fmt(f), None => Err(fmt::Error) } } } /// An RAII protected key with read access /// /// This instance is the result of a `read` request on a `ProtKey`. If no /// other similar instance on the same `ProtKey` exists, raw memory access /// will be revoked when this instance is destructed. pub struct ProtKeyRead<'a, T: Copy + 'a, A: KeyAllocator + 'a> { ref_key: Ref<'a, ProtBuf<T, A>>, read_ctr: Rc<Cell<usize>> } impl<'a, T: Copy, A: KeyAllocator> ProtKeyRead<'a, T, A> { fn new(ref_key: Ref<'a, ProtBuf<T, A>>, read_ctr: Rc<Cell<usize>>) -> ProtKeyRead<'a, T, A> { if read_ctr.get() == NOREAD { unsafe { <A as KeyAllocator>::protect_read(ref_key.as_ptr() as *mut u8, ref_key.len_bytes()); } } read_ctr.set(read_ctr.get().checked_add(1).unwrap()); ProtKeyRead { ref_key: ref_key, read_ctr: read_ctr } } /// Clone this instance. // FIXME: Currently does not implement `clone()` as it would interfere // with `ProtKey::clone()`. pub fn clone_it(&self) -> ProtKeyRead<T, A> { ProtKeyRead::new(Ref::clone(&self.ref_key), self.read_ctr.clone()) } } impl<'a, T: Copy, A: KeyAllocator> Drop for ProtKeyRead<'a, T, A> { fn drop(&mut self) { self.read_ctr.set(self.read_ctr.get().checked_sub(1).unwrap()); if self.read_ctr.get() == NOREAD { unsafe { <A as KeyAllocator>::protect_none( self.ref_key.as_ptr() as *mut u8, self.ref_key.len_bytes()); } } } } impl<'a, T: Copy, A: KeyAllocator> Deref for ProtKeyRead<'a, T, A> { type Target = ProtBuf<T, A>; fn deref(&self) -> &ProtBuf<T, A> { &*self.ref_key } } impl<'a, T: Copy, A: KeyAllocator> AsRef<[T]> for ProtKeyRead<'a, T, A> { fn as_ref(&self) -> &[T] { &***self } } impl<'a, T: Copy, A: KeyAllocator> PartialEq for ProtKeyRead<'a, T, A> { fn eq(&self, other: &ProtKeyRead<T, A>) -> bool { **self == **other } } impl<'a, T: Debug + Copy, A: KeyAllocator> Debug for ProtKeyRead<'a, T, A> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
} /// An RAII protected key with write access /// /// This instance is the result of a `write` request on a `ProtKey`. Its /// raw memory may only be written during the lifetime of this object. pub struct ProtKeyWrite<'a, T: Copy + 'a, A: KeyAllocator + 'a> { ref_key: RefMut<'a, ProtBuf<T, A>>, } impl<'a, T: Copy, A: KeyAllocator> ProtKeyWrite<'a, T, A> { fn new(ref_key: RefMut<'a, ProtBuf<T, A>>) -> ProtKeyWrite<'a, T, A> { unsafe { <A as KeyAllocator>::protect_write(ref_key.as_ptr() as *mut u8, ref_key.len_bytes()); } ProtKeyWrite { ref_key: ref_key, } } } impl<'a, T: Copy, A: KeyAllocator> Drop for ProtKeyWrite<'a, T, A> { fn drop(&mut self) { unsafe { <A as KeyAllocator>::protect_none(self.ref_key.as_ptr() as *mut u8, self.ref_key.len_bytes()); } } } /// This method is mandatory, but it should not be used for reading the /// content of the underlying key... #[allow(unreachable_code)] impl<'a, T: Copy, A: KeyAllocator> Deref for ProtKeyWrite<'a, T, A> { type Target = ProtBuf<T, A>; fn deref(&self) -> &ProtBuf<T, A> { unreachable!("key must only be written"); &*self.ref_key } } impl<'a, T: Copy, A: KeyAllocator> DerefMut for ProtKeyWrite<'a, T, A> { fn deref_mut(&mut self) -> &mut ProtBuf<T, A> { &mut *self.ref_key } } #[cfg(test)] mod test { use allocator::ProtectedKeyAllocator; use buf::ProtBuf; use key::{ProtKey, ProtKey8}; #[test] fn test_read() { let s1 = ProtBuf::<u8, ProtectedKeyAllocator>::new_rand_os(256); let s2 = s1.clone(); let key = ProtKey::new(s1); assert_eq!(&**key.read(), &*s2); assert_eq!(&key.read()[..], &s2[..]); assert_eq!(*key.read(), s2); { let r1 = key.read(); let r2 = key.try_read().unwrap(); assert_eq!(r1, r2); assert!(key.try_write().is_none()); let r3 = r1.clone_it(); assert_eq!(r3, r2); } key.read_with(|k| assert_eq!(&k[..], &*s2)); assert!(key.try_write().is_some()); } #[test] fn test_write() { let zero = ProtBuf::<u8, ProtectedKeyAllocator>::new_zero(256); let key = ProtBuf::<u8, ProtectedKeyAllocator>::new_rand_os(256).into_key(); for i in key.write().iter_mut() { *i = 0; } assert_eq!(*key.read(), zero); { let _w = key.write(); assert!(key.try_write().is_none()); assert!(key.try_read().is_none()); } let mut c = 0_usize; key.write_with(|k| {k[42] = 42; c = 1;}); assert_eq!(c, 1); assert!(key.try_write().is_some()); assert!(key.try_read().is_some()); } #[test] fn test_default_params() { let b = ProtBuf::new_zero(42); let _: ProtKey8 = ProtKey::new(b); let b = ProtBuf::new_zero(42); let _: ProtKey<u8> = ProtKey::new(b); } }
{ self.ref_key.fmt(f) }
identifier_body
key.rs
//! Protected key //! use std::cell::{Cell, Ref, RefCell, RefMut, BorrowState}; use std::fmt::{self, Debug}; use std::ops::{Deref, DerefMut}; use std::rc::Rc; use allocator::{Allocator, KeyAllocator, DefaultKeyAllocator}; use buf::ProtBuf; /// Key of bytes pub type ProtKey8<A = DefaultKeyAllocator> = ProtKey<u8, A>; const NOREAD: usize = 0; /// A protected key /// /// Transform a `ProtBuf` instance into a protected key `ProtKey` and provide /// tigher access control on its memory. /// /// By default a `ProtKey` cannot be read nor written to and will only /// provide separated accesses with limited scopes. Thus, RAII accessor /// methods must be used to read and write to a `ProtKey`. Accessing the /// underlying key is a bit similar to the way of manipulating an object /// wrapped in `RefCell`. /// /// ```rust /// # extern crate tars; /// # use tars::allocator::ProtectedKeyAllocator; /// # use tars::{ProtKey, ProtBuf, ProtKey8}; /// # fn encrypt(_: &[u8], _: &[u8]) {} /// # fn main() { /// // Instantiate a new buffer initialized with random bytes. /// // Same as an usual ProtBuf instance but with a different allocator /// let buf_rnd = ProtBuf::<u8, ProtectedKeyAllocator>::new_rand_os(32); /// /// // Until here memory buffer is read/write. Turns-it into a key /// let key = ProtKey::new(buf_rnd); /// /// // Or more simply, like this with exactly the same result /// let key: ProtKey8 = ProtBuf::new_rand_os(32).into_key(); /// /// { /// // Request access in read-mode /// let key_read = key.read(); /// let byte = key_read[16]; /// // ... /// } // Relinquish its read-access /// /// // Alternative way to read its content /// key.read_with(|k| encrypt(&k[..], b"abc")); /// /// // Access it in write-mode /// let key_write = key.try_write(); /// if let Some(mut kw) = key_write { /// kw[16] = 42; /// } /// # } /// ``` pub struct ProtKey<T: Copy, A: KeyAllocator = DefaultKeyAllocator> { key: RefCell<ProtBuf<T, A>>, read_ctr: Rc<Cell<usize>> } impl<T: Copy, A: KeyAllocator> ProtKey<T, A> { /// Take ownership of `prot_buf` and transform it into a `ProtKey`. By /// default prevent any access. pub fn new(prot_buf: ProtBuf<T, A>) -> ProtKey<T, A> { unsafe { <A as KeyAllocator>::protect_none(prot_buf.as_ptr() as *mut u8, prot_buf.len_bytes()); } ProtKey { key: RefCell::new(prot_buf), read_ctr: Rc::new(Cell::new(NOREAD)) } } /// Consume and copy `prot_buf` to force using `ProtKey`'s allocator. /// If `prot_buf` already uses a `KeyAllocator` there is no need to make /// a copy so directly call the default cstor `new` instead. pub fn from_buf<B: Allocator>(prot_buf: ProtBuf<T, B>) -> ProtKey<T, A> { let buf = ProtBuf::from_slice(&prot_buf); ProtKey::new(buf) } /// Return a wrapper to the key in read mode. This method `panic!` if /// this key is already accessed in write mode. // FIXME: Not sure if it's the best interface to provide a `try_read` // variant to this `fail`ing method. It would maybe be better to // implement a single method returning a `Result`. See this RFC // https://github.com/rust-lang/rfcs/blob/master/text/0236-error-conventions.md pub fn read(&self) -> ProtKeyRead<T, A> { ProtKeyRead::new(self.key.borrow(), self.read_ctr.clone()) } /// Return a wrapper to the key in read mode. Return `None` /// if the key is already accessed in write mode. pub fn try_read(&self) -> Option<ProtKeyRead<T, A>> { match self.key.borrow_state() { BorrowState::Reading|BorrowState::Unused => Some(self.read()), _ => None } } /// Access the key in read mode and pass a reference to closure `f`. /// The key can only be read during this call. This method will `panic!` /// if a read access cannot be acquired on this key. pub fn read_with<F>(&self, mut f: F) where F: FnMut(ProtKeyRead<T, A>){ f(self.read()) } /// Return a wrapper to the key in write mode. This method `panic!` if /// the key is already currently accessed in read or write mode. pub fn write(&self) -> ProtKeyWrite<T, A> { let key_write = ProtKeyWrite::new(self.key.borrow_mut()); assert_eq!(self.read_ctr.get(), NOREAD); key_write } /// Return a wrapper to the key in write mode. Return `None` /// if the key is already accessed in read or write mode. pub fn try_write(&self) -> Option<ProtKeyWrite<T, A>> { match self.key.borrow_state() { BorrowState::Unused => Some(self.write()), _ => None } } /// Access the key in write mode and pass a reference to closure `f`. /// The key can only be writtent during this call. This method will /// `panic!` if a write access cannot be acquired on this key. pub fn write_with<F>(&self, mut f: F) where F: FnMut(&mut ProtKeyWrite<T, A>) { f(&mut self.write()) } } impl<T: Copy, A: KeyAllocator> Drop for ProtKey<T, A> { fn drop(&mut self) { // FIXME: without this assert this drop is useless. assert_eq!(self.read_ctr.get(), NOREAD); } } impl<T: Copy, A: KeyAllocator> Clone for ProtKey<T, A> { fn clone(&self) -> ProtKey<T, A> { ProtKey::new(self.read().clone()) } } impl<T: Copy, A: KeyAllocator> PartialEq for ProtKey<T, A> { fn eq(&self, other: &ProtKey<T, A>) -> bool { match (self.try_read(), other.try_read()) { (Some(ref s), Some(ref o)) => *s == *o, (_, _) => false } } } impl<T: Debug + Copy, A: KeyAllocator> Debug for ProtKey<T, A> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.try_read() { Some(r) => r.fmt(f), None => Err(fmt::Error) } } } /// An RAII protected key with read access /// /// This instance is the result of a `read` request on a `ProtKey`. If no /// other similar instance on the same `ProtKey` exists, raw memory access /// will be revoked when this instance is destructed. pub struct ProtKeyRead<'a, T: Copy + 'a, A: KeyAllocator + 'a> { ref_key: Ref<'a, ProtBuf<T, A>>, read_ctr: Rc<Cell<usize>> } impl<'a, T: Copy, A: KeyAllocator> ProtKeyRead<'a, T, A> { fn new(ref_key: Ref<'a, ProtBuf<T, A>>, read_ctr: Rc<Cell<usize>>) -> ProtKeyRead<'a, T, A> { if read_ctr.get() == NOREAD { unsafe { <A as KeyAllocator>::protect_read(ref_key.as_ptr() as *mut u8, ref_key.len_bytes()); } } read_ctr.set(read_ctr.get().checked_add(1).unwrap()); ProtKeyRead { ref_key: ref_key, read_ctr: read_ctr } } /// Clone this instance. // FIXME: Currently does not implement `clone()` as it would interfere // with `ProtKey::clone()`.
} impl<'a, T: Copy, A: KeyAllocator> Drop for ProtKeyRead<'a, T, A> { fn drop(&mut self) { self.read_ctr.set(self.read_ctr.get().checked_sub(1).unwrap()); if self.read_ctr.get() == NOREAD { unsafe { <A as KeyAllocator>::protect_none( self.ref_key.as_ptr() as *mut u8, self.ref_key.len_bytes()); } } } } impl<'a, T: Copy, A: KeyAllocator> Deref for ProtKeyRead<'a, T, A> { type Target = ProtBuf<T, A>; fn deref(&self) -> &ProtBuf<T, A> { &*self.ref_key } } impl<'a, T: Copy, A: KeyAllocator> AsRef<[T]> for ProtKeyRead<'a, T, A> { fn as_ref(&self) -> &[T] { &***self } } impl<'a, T: Copy, A: KeyAllocator> PartialEq for ProtKeyRead<'a, T, A> { fn eq(&self, other: &ProtKeyRead<T, A>) -> bool { **self == **other } } impl<'a, T: Debug + Copy, A: KeyAllocator> Debug for ProtKeyRead<'a, T, A> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.ref_key.fmt(f) } } /// An RAII protected key with write access /// /// This instance is the result of a `write` request on a `ProtKey`. Its /// raw memory may only be written during the lifetime of this object. pub struct ProtKeyWrite<'a, T: Copy + 'a, A: KeyAllocator + 'a> { ref_key: RefMut<'a, ProtBuf<T, A>>, } impl<'a, T: Copy, A: KeyAllocator> ProtKeyWrite<'a, T, A> { fn new(ref_key: RefMut<'a, ProtBuf<T, A>>) -> ProtKeyWrite<'a, T, A> { unsafe { <A as KeyAllocator>::protect_write(ref_key.as_ptr() as *mut u8, ref_key.len_bytes()); } ProtKeyWrite { ref_key: ref_key, } } } impl<'a, T: Copy, A: KeyAllocator> Drop for ProtKeyWrite<'a, T, A> { fn drop(&mut self) { unsafe { <A as KeyAllocator>::protect_none(self.ref_key.as_ptr() as *mut u8, self.ref_key.len_bytes()); } } } /// This method is mandatory, but it should not be used for reading the /// content of the underlying key... #[allow(unreachable_code)] impl<'a, T: Copy, A: KeyAllocator> Deref for ProtKeyWrite<'a, T, A> { type Target = ProtBuf<T, A>; fn deref(&self) -> &ProtBuf<T, A> { unreachable!("key must only be written"); &*self.ref_key } } impl<'a, T: Copy, A: KeyAllocator> DerefMut for ProtKeyWrite<'a, T, A> { fn deref_mut(&mut self) -> &mut ProtBuf<T, A> { &mut *self.ref_key } } #[cfg(test)] mod test { use allocator::ProtectedKeyAllocator; use buf::ProtBuf; use key::{ProtKey, ProtKey8}; #[test] fn test_read() { let s1 = ProtBuf::<u8, ProtectedKeyAllocator>::new_rand_os(256); let s2 = s1.clone(); let key = ProtKey::new(s1); assert_eq!(&**key.read(), &*s2); assert_eq!(&key.read()[..], &s2[..]); assert_eq!(*key.read(), s2); { let r1 = key.read(); let r2 = key.try_read().unwrap(); assert_eq!(r1, r2); assert!(key.try_write().is_none()); let r3 = r1.clone_it(); assert_eq!(r3, r2); } key.read_with(|k| assert_eq!(&k[..], &*s2)); assert!(key.try_write().is_some()); } #[test] fn test_write() { let zero = ProtBuf::<u8, ProtectedKeyAllocator>::new_zero(256); let key = ProtBuf::<u8, ProtectedKeyAllocator>::new_rand_os(256).into_key(); for i in key.write().iter_mut() { *i = 0; } assert_eq!(*key.read(), zero); { let _w = key.write(); assert!(key.try_write().is_none()); assert!(key.try_read().is_none()); } let mut c = 0_usize; key.write_with(|k| {k[42] = 42; c = 1;}); assert_eq!(c, 1); assert!(key.try_write().is_some()); assert!(key.try_read().is_some()); } #[test] fn test_default_params() { let b = ProtBuf::new_zero(42); let _: ProtKey8 = ProtKey::new(b); let b = ProtBuf::new_zero(42); let _: ProtKey<u8> = ProtKey::new(b); } }
pub fn clone_it(&self) -> ProtKeyRead<T, A> { ProtKeyRead::new(Ref::clone(&self.ref_key), self.read_ctr.clone()) }
random_line_split
key.rs
//! Protected key //! use std::cell::{Cell, Ref, RefCell, RefMut, BorrowState}; use std::fmt::{self, Debug}; use std::ops::{Deref, DerefMut}; use std::rc::Rc; use allocator::{Allocator, KeyAllocator, DefaultKeyAllocator}; use buf::ProtBuf; /// Key of bytes pub type ProtKey8<A = DefaultKeyAllocator> = ProtKey<u8, A>; const NOREAD: usize = 0; /// A protected key /// /// Transform a `ProtBuf` instance into a protected key `ProtKey` and provide /// tigher access control on its memory. /// /// By default a `ProtKey` cannot be read nor written to and will only /// provide separated accesses with limited scopes. Thus, RAII accessor /// methods must be used to read and write to a `ProtKey`. Accessing the /// underlying key is a bit similar to the way of manipulating an object /// wrapped in `RefCell`. /// /// ```rust /// # extern crate tars; /// # use tars::allocator::ProtectedKeyAllocator; /// # use tars::{ProtKey, ProtBuf, ProtKey8}; /// # fn encrypt(_: &[u8], _: &[u8]) {} /// # fn main() { /// // Instantiate a new buffer initialized with random bytes. /// // Same as an usual ProtBuf instance but with a different allocator /// let buf_rnd = ProtBuf::<u8, ProtectedKeyAllocator>::new_rand_os(32); /// /// // Until here memory buffer is read/write. Turns-it into a key /// let key = ProtKey::new(buf_rnd); /// /// // Or more simply, like this with exactly the same result /// let key: ProtKey8 = ProtBuf::new_rand_os(32).into_key(); /// /// { /// // Request access in read-mode /// let key_read = key.read(); /// let byte = key_read[16]; /// // ... /// } // Relinquish its read-access /// /// // Alternative way to read its content /// key.read_with(|k| encrypt(&k[..], b"abc")); /// /// // Access it in write-mode /// let key_write = key.try_write(); /// if let Some(mut kw) = key_write { /// kw[16] = 42; /// } /// # } /// ``` pub struct ProtKey<T: Copy, A: KeyAllocator = DefaultKeyAllocator> { key: RefCell<ProtBuf<T, A>>, read_ctr: Rc<Cell<usize>> } impl<T: Copy, A: KeyAllocator> ProtKey<T, A> { /// Take ownership of `prot_buf` and transform it into a `ProtKey`. By /// default prevent any access. pub fn new(prot_buf: ProtBuf<T, A>) -> ProtKey<T, A> { unsafe { <A as KeyAllocator>::protect_none(prot_buf.as_ptr() as *mut u8, prot_buf.len_bytes()); } ProtKey { key: RefCell::new(prot_buf), read_ctr: Rc::new(Cell::new(NOREAD)) } } /// Consume and copy `prot_buf` to force using `ProtKey`'s allocator. /// If `prot_buf` already uses a `KeyAllocator` there is no need to make /// a copy so directly call the default cstor `new` instead. pub fn from_buf<B: Allocator>(prot_buf: ProtBuf<T, B>) -> ProtKey<T, A> { let buf = ProtBuf::from_slice(&prot_buf); ProtKey::new(buf) } /// Return a wrapper to the key in read mode. This method `panic!` if /// this key is already accessed in write mode. // FIXME: Not sure if it's the best interface to provide a `try_read` // variant to this `fail`ing method. It would maybe be better to // implement a single method returning a `Result`. See this RFC // https://github.com/rust-lang/rfcs/blob/master/text/0236-error-conventions.md pub fn read(&self) -> ProtKeyRead<T, A> { ProtKeyRead::new(self.key.borrow(), self.read_ctr.clone()) } /// Return a wrapper to the key in read mode. Return `None` /// if the key is already accessed in write mode. pub fn try_read(&self) -> Option<ProtKeyRead<T, A>> { match self.key.borrow_state() { BorrowState::Reading|BorrowState::Unused => Some(self.read()), _ => None } } /// Access the key in read mode and pass a reference to closure `f`. /// The key can only be read during this call. This method will `panic!` /// if a read access cannot be acquired on this key. pub fn read_with<F>(&self, mut f: F) where F: FnMut(ProtKeyRead<T, A>){ f(self.read()) } /// Return a wrapper to the key in write mode. This method `panic!` if /// the key is already currently accessed in read or write mode. pub fn write(&self) -> ProtKeyWrite<T, A> { let key_write = ProtKeyWrite::new(self.key.borrow_mut()); assert_eq!(self.read_ctr.get(), NOREAD); key_write } /// Return a wrapper to the key in write mode. Return `None` /// if the key is already accessed in read or write mode. pub fn try_write(&self) -> Option<ProtKeyWrite<T, A>> { match self.key.borrow_state() { BorrowState::Unused => Some(self.write()), _ => None } } /// Access the key in write mode and pass a reference to closure `f`. /// The key can only be writtent during this call. This method will /// `panic!` if a write access cannot be acquired on this key. pub fn write_with<F>(&self, mut f: F) where F: FnMut(&mut ProtKeyWrite<T, A>) { f(&mut self.write()) } } impl<T: Copy, A: KeyAllocator> Drop for ProtKey<T, A> { fn drop(&mut self) { // FIXME: without this assert this drop is useless. assert_eq!(self.read_ctr.get(), NOREAD); } } impl<T: Copy, A: KeyAllocator> Clone for ProtKey<T, A> { fn
(&self) -> ProtKey<T, A> { ProtKey::new(self.read().clone()) } } impl<T: Copy, A: KeyAllocator> PartialEq for ProtKey<T, A> { fn eq(&self, other: &ProtKey<T, A>) -> bool { match (self.try_read(), other.try_read()) { (Some(ref s), Some(ref o)) => *s == *o, (_, _) => false } } } impl<T: Debug + Copy, A: KeyAllocator> Debug for ProtKey<T, A> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.try_read() { Some(r) => r.fmt(f), None => Err(fmt::Error) } } } /// An RAII protected key with read access /// /// This instance is the result of a `read` request on a `ProtKey`. If no /// other similar instance on the same `ProtKey` exists, raw memory access /// will be revoked when this instance is destructed. pub struct ProtKeyRead<'a, T: Copy + 'a, A: KeyAllocator + 'a> { ref_key: Ref<'a, ProtBuf<T, A>>, read_ctr: Rc<Cell<usize>> } impl<'a, T: Copy, A: KeyAllocator> ProtKeyRead<'a, T, A> { fn new(ref_key: Ref<'a, ProtBuf<T, A>>, read_ctr: Rc<Cell<usize>>) -> ProtKeyRead<'a, T, A> { if read_ctr.get() == NOREAD { unsafe { <A as KeyAllocator>::protect_read(ref_key.as_ptr() as *mut u8, ref_key.len_bytes()); } } read_ctr.set(read_ctr.get().checked_add(1).unwrap()); ProtKeyRead { ref_key: ref_key, read_ctr: read_ctr } } /// Clone this instance. // FIXME: Currently does not implement `clone()` as it would interfere // with `ProtKey::clone()`. pub fn clone_it(&self) -> ProtKeyRead<T, A> { ProtKeyRead::new(Ref::clone(&self.ref_key), self.read_ctr.clone()) } } impl<'a, T: Copy, A: KeyAllocator> Drop for ProtKeyRead<'a, T, A> { fn drop(&mut self) { self.read_ctr.set(self.read_ctr.get().checked_sub(1).unwrap()); if self.read_ctr.get() == NOREAD { unsafe { <A as KeyAllocator>::protect_none( self.ref_key.as_ptr() as *mut u8, self.ref_key.len_bytes()); } } } } impl<'a, T: Copy, A: KeyAllocator> Deref for ProtKeyRead<'a, T, A> { type Target = ProtBuf<T, A>; fn deref(&self) -> &ProtBuf<T, A> { &*self.ref_key } } impl<'a, T: Copy, A: KeyAllocator> AsRef<[T]> for ProtKeyRead<'a, T, A> { fn as_ref(&self) -> &[T] { &***self } } impl<'a, T: Copy, A: KeyAllocator> PartialEq for ProtKeyRead<'a, T, A> { fn eq(&self, other: &ProtKeyRead<T, A>) -> bool { **self == **other } } impl<'a, T: Debug + Copy, A: KeyAllocator> Debug for ProtKeyRead<'a, T, A> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.ref_key.fmt(f) } } /// An RAII protected key with write access /// /// This instance is the result of a `write` request on a `ProtKey`. Its /// raw memory may only be written during the lifetime of this object. pub struct ProtKeyWrite<'a, T: Copy + 'a, A: KeyAllocator + 'a> { ref_key: RefMut<'a, ProtBuf<T, A>>, } impl<'a, T: Copy, A: KeyAllocator> ProtKeyWrite<'a, T, A> { fn new(ref_key: RefMut<'a, ProtBuf<T, A>>) -> ProtKeyWrite<'a, T, A> { unsafe { <A as KeyAllocator>::protect_write(ref_key.as_ptr() as *mut u8, ref_key.len_bytes()); } ProtKeyWrite { ref_key: ref_key, } } } impl<'a, T: Copy, A: KeyAllocator> Drop for ProtKeyWrite<'a, T, A> { fn drop(&mut self) { unsafe { <A as KeyAllocator>::protect_none(self.ref_key.as_ptr() as *mut u8, self.ref_key.len_bytes()); } } } /// This method is mandatory, but it should not be used for reading the /// content of the underlying key... #[allow(unreachable_code)] impl<'a, T: Copy, A: KeyAllocator> Deref for ProtKeyWrite<'a, T, A> { type Target = ProtBuf<T, A>; fn deref(&self) -> &ProtBuf<T, A> { unreachable!("key must only be written"); &*self.ref_key } } impl<'a, T: Copy, A: KeyAllocator> DerefMut for ProtKeyWrite<'a, T, A> { fn deref_mut(&mut self) -> &mut ProtBuf<T, A> { &mut *self.ref_key } } #[cfg(test)] mod test { use allocator::ProtectedKeyAllocator; use buf::ProtBuf; use key::{ProtKey, ProtKey8}; #[test] fn test_read() { let s1 = ProtBuf::<u8, ProtectedKeyAllocator>::new_rand_os(256); let s2 = s1.clone(); let key = ProtKey::new(s1); assert_eq!(&**key.read(), &*s2); assert_eq!(&key.read()[..], &s2[..]); assert_eq!(*key.read(), s2); { let r1 = key.read(); let r2 = key.try_read().unwrap(); assert_eq!(r1, r2); assert!(key.try_write().is_none()); let r3 = r1.clone_it(); assert_eq!(r3, r2); } key.read_with(|k| assert_eq!(&k[..], &*s2)); assert!(key.try_write().is_some()); } #[test] fn test_write() { let zero = ProtBuf::<u8, ProtectedKeyAllocator>::new_zero(256); let key = ProtBuf::<u8, ProtectedKeyAllocator>::new_rand_os(256).into_key(); for i in key.write().iter_mut() { *i = 0; } assert_eq!(*key.read(), zero); { let _w = key.write(); assert!(key.try_write().is_none()); assert!(key.try_read().is_none()); } let mut c = 0_usize; key.write_with(|k| {k[42] = 42; c = 1;}); assert_eq!(c, 1); assert!(key.try_write().is_some()); assert!(key.try_read().is_some()); } #[test] fn test_default_params() { let b = ProtBuf::new_zero(42); let _: ProtKey8 = ProtKey::new(b); let b = ProtBuf::new_zero(42); let _: ProtKey<u8> = ProtKey::new(b); } }
clone
identifier_name
laptop.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: laptop.proto package pc import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" timestamp "github.com/golang/protobuf/ptypes/timestamp" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type Laptop struct { ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Brand string `protobuf:"bytes,2,opt,name=brand,proto3" json:"brand,omitempty"` Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` Cpu *CPU `protobuf:"bytes,4,opt,name=cpu,proto3" json:"cpu,omitempty"` Ram *Memory `protobuf:"bytes,5,opt,name=ram,proto3" json:"ram,omitempty"` Gpus []*GPU `protobuf:"bytes,6,rep,name=gpus,proto3" json:"gpus,omitempty"` Storages []*Storage `protobuf:"bytes,7,rep,name=storages,proto3" json:"storages,omitempty"` Screen *Screen `protobuf:"bytes,8,opt,name=screen,proto3" json:"screen,omitempty"` Keyboard *Keyboard `protobuf:"bytes,9,opt,name=keyboard,proto3" json:"keyboard,omitempty"` // Types that are valid to be assigned to Weight: // *Laptop_WeightKg // *Laptop_WeightLb Weight isLaptop_Weight `protobuf_oneof:"weight"` PriceUsd float64 `protobuf:"fixed64,12,opt,name=price_usd,json=priceUsd,proto3" json:"price_usd,omitempty"` ReleaseYear uint32 `protobuf:"varint,13,opt,name=release_year,json=releaseYear,proto3" json:"release_year,omitempty"` UpdatedAt *timestamp.Timestamp `protobuf:"bytes,14,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Laptop) Reset() { *m = Laptop{} } func (m *Laptop) String() string { return proto.CompactTextString(m) } func (*Laptop) ProtoMessage() {} func (*Laptop) Descriptor() ([]byte, []int) { return fileDescriptor_28a7e4886f546705, []int{0} } func (m *Laptop) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Laptop.Unmarshal(m, b) } func (m *Laptop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Laptop.Marshal(b, m, deterministic) } func (m *Laptop) XXX_Merge(src proto.Message) { xxx_messageInfo_Laptop.Merge(m, src) } func (m *Laptop) XXX_Size() int { return xxx_messageInfo_Laptop.Size(m) } func (m *Laptop) XXX_DiscardUnknown() { xxx_messageInfo_Laptop.DiscardUnknown(m) } var xxx_messageInfo_Laptop proto.InternalMessageInfo func (m *Laptop) GetId() string { if m != nil { return m.ID } return "" } func (m *Laptop) GetBrand() string { if m != nil { return m.Brand } return "" } func (m *Laptop) GetName() string { if m != nil { return m.Name } return "" } func (m *Laptop) GetCpu() *CPU { if m != nil { return m.Cpu } return nil } func (m *Laptop) GetRam() *Memory { if m != nil { return m.Ram } return nil } func (m *Laptop) GetGpus() []*GPU { if m != nil { return m.Gpus } return nil } func (m *Laptop) GetStorages() []*Storage { if m != nil { return m.Storages } return nil } func (m *Laptop) GetScreen() *Screen { if m != nil { return m.Screen } return nil } func (m *Laptop) GetKeyboard() *Keyboard { if m != nil { return m.Keyboard } return nil } type isLaptop_Weight interface { isLaptop_Weight() } type Laptop_WeightKg struct { WeightKg float64 `protobuf:"fixed64,10,opt,name=weight_kg,json=weightKg,proto3,oneof"` } type Laptop_WeightLb struct { WeightLb float64 `protobuf:"fixed64,11,opt,name=weight_lb,json=weightLb,proto3,oneof"` } func (*Laptop_WeightKg) isLaptop_Weight() {} func (*Laptop_WeightLb) isLaptop_Weight() {} func (m *Laptop) GetWeight() isLaptop_Weight { if m != nil { return m.Weight } return nil } func (m *Laptop) GetWeightKg() float64 { if x, ok := m.GetWeight().(*Laptop_WeightKg); ok { return x.WeightKg } return 0 } func (m *Laptop) GetWeightLb() float64 { if x, ok := m.GetWeight().(*Laptop_WeightLb); ok { return x.WeightLb } return 0 } func (m *Laptop) GetPriceUsd() float64 { if m != nil { return m.PriceUsd } return 0 } func (m *Laptop) GetReleaseYear() uint32 { if m != nil { return m.ReleaseYear } return 0 } func (m *Laptop) GetUpdatedAt() *timestamp.Timestamp { if m != nil { return m.UpdatedAt } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*Laptop) XXX_OneofWrappers() []interface{} { return []interface{}{ (*Laptop_WeightKg)(nil), (*Laptop_WeightLb)(nil), } } type CreateLaptopRequest struct { Laptop *Laptop `protobuf:"bytes,1,opt,name=laptop,proto3" json:"laptop,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CreateLaptopRequest) Reset() { *m = CreateLaptopRequest{} } func (m *CreateLaptopRequest) String() string { return proto.CompactTextString(m) } func (*CreateLaptopRequest) ProtoMessage() {} func (*CreateLaptopRequest) Descriptor() ([]byte, []int) { return fileDescriptor_28a7e4886f546705, []int{1} } func (m *CreateLaptopRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CreateLaptopRequest.Unmarshal(m, b) } func (m *CreateLaptopRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CreateLaptopRequest.Marshal(b, m, deterministic) } func (m *CreateLaptopRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_CreateLaptopRequest.Merge(m, src) } func (m *CreateLaptopRequest) XXX_Size() int { return xxx_messageInfo_CreateLaptopRequest.Size(m) } func (m *CreateLaptopRequest) XXX_DiscardUnknown() { xxx_messageInfo_CreateLaptopRequest.DiscardUnknown(m) } var xxx_messageInfo_CreateLaptopRequest proto.InternalMessageInfo func (m *CreateLaptopRequest) GetLaptop() *Laptop { if m != nil { return m.Laptop } return nil } type CreateLaptopResponse struct { ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CreateLaptopResponse) Reset() { *m = CreateLaptopResponse{} } func (m *CreateLaptopResponse) String() string { return proto.CompactTextString(m) } func (*CreateLaptopResponse) ProtoMessage() {} func (*CreateLaptopResponse) Descriptor() ([]byte, []int) { return fileDescriptor_28a7e4886f546705, []int{2} } func (m *CreateLaptopResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CreateLaptopResponse.Unmarshal(m, b) } func (m *CreateLaptopResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CreateLaptopResponse.Marshal(b, m, deterministic) } func (m *CreateLaptopResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CreateLaptopResponse.Merge(m, src) } func (m *CreateLaptopResponse) XXX_Size() int { return xxx_messageInfo_CreateLaptopResponse.Size(m) } func (m *CreateLaptopResponse) XXX_DiscardUnknown() { xxx_messageInfo_CreateLaptopResponse.DiscardUnknown(m) } var xxx_messageInfo_CreateLaptopResponse proto.InternalMessageInfo func (m *CreateLaptopResponse) GetId() string { if m != nil
return "" } func init() { proto.RegisterType((*Laptop)(nil), "pc.Laptop") proto.RegisterType((*CreateLaptopRequest)(nil), "pc.CreateLaptopRequest") proto.RegisterType((*CreateLaptopResponse)(nil), "pc.CreateLaptopResponse") } func init() { proto.RegisterFile("laptop.proto", fileDescriptor_28a7e4886f546705) } var fileDescriptor_28a7e4886f546705 = []byte{ // 459 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xcf, 0x6f, 0xd3, 0x30, 0x14, 0xc7, 0x49, 0xd3, 0x76, 0xe9, 0x6b, 0x5a, 0x24, 0x33, 0x09, 0xd3, 0x81, 0x08, 0x3d, 0x40, 0x4e, 0xa9, 0x34, 0x4e, 0x3b, 0x6e, 0x3b, 0x80, 0xb4, 0x21, 0x4d, 0x2e, 0x3d, 0xc0, 0xa5, 0x72, 0x92, 0x47, 0x88, 0xd6, 0xd4, 0xc6, 0x76, 0x40, 0xfd, 0x37, 0xf9, 0x8b, 0x50, 0x6c, 0x67, 0xec, 0xc7, 0xcd, 0xef, 0xf3, 0xfd, 0x5a, 0xef, 0x27, 0xc4, 0x3b, 0x2e, 0x8d, 0x90, 0x99, 0x54, 0xc2, 0x08, 0x32, 0x90, 0xc5, 0xe2, 0xb9, 0x54, 0xa2, 0x40, 0xad, 0x85, 0x72, 0x70, 0x11, 0x37, 0xd8, 0x08, 0x75, 0xf0, 0xd1, 0x4c, 0x1b, 0xa1, 0x78, 0x85, 0xbd, 0xa8, 0x0b, 0x85, 0xb8, 0xf7, 0xd1, 0xfc, 0x16, 0x0f, 0xb9, 0xe0, 0xaa, 0xf4, 0xf1, 0xdb, 0x4a, 0x88, 0x6a, 0x87, 0x2b, 0x1b, 0xe5, 0xed, 0x8f, 0x95, 0xa9, 0x1b, 0xd4, 0x86, 0x37, 0x3e, 0xe1, 0xf2, 0x6f, 0x08, 0xe3, 0x6b, 0x5b, 0x01, 0x99, 0xc3, 0xa0, 0x2e, 0x69, 0x90, 0x04, 0xe9, 0x84, 0x0d, 0xea, 0x92, 0x1c, 0xc3, 0x28, 0x57, 0x7c, 0x5f, 0xd2, 0x81, 0x45, 0x2e, 0x20, 0x04, 0x86, 0x7b, 0xde, 0x20, 0x0d, 0x2d, 0xb4, 0x6f, 0xf2, 0x0a, 0xc2, 0x42, 0xb6, 0x74, 0x98, 0x04, 0xe9, 0xf4, 0xf4, 0x28, 0x93, 0x45, 0x76, 0x79, 0xb3, 0x61, 0x1d, 0x23, 0xaf, 0x21, 0x54, 0xbc, 0xa1, 0x23, 0x2b, 0x41, 0x27, 0x7d, 0xb1, 0xcd, 0xb0, 0x0e, 0x93, 0x13, 0x18, 0x56, 0xb2, 0xd5, 0x74, 0x9c, 0x84, 0xfd, 0xcf, 0x4f, 0x37, 0x1b, 0x66, 0x21, 0xf9, 0x00, 0x91, 0x6f, 0x55, 0xd3, 0x23, 0x6b, 0x98, 0x76, 0x86, 0xb5, 0x63, 0xec, 0x4e, 0x24, 0x4b, 0x18, 0xbb, 0x21, 0xd0, 0xe8, 0x7f, 0x9a, 0xb5, 0x25, 0xcc, 0x2b, 0x24, 0x85, 0xa8, 0x1f, 0x0d, 0x9d, 0x58, 0x57, 0xdc, 0xb9, 0xae, 0x3c, 0x63, 0x77, 0x2a, 0x79, 0x03, 0x93, 0x3f, 0x58, 0x57, 0x3f, 0xcd, 0xf6, 0xb6, 0xa2, 0x90, 0x04, 0x69, 0xf0, 0xf9, 0x19, 0x8b, 0x1c, 0xba, 0xaa, 0xee, 0xc9, 0xbb, 0x9c, 0x4e, 0x1f, 0xca, 0xd7, 0x39, 0x39, 0x81, 0x89, 0x54, 0x75, 0x81, 0xdb, 0x56, 0x97, 0x34, 0xee, 0x64, 0x16, 0x59, 0xb0, 0xd1, 0x25, 0x79, 0x07, 0xb1, 0xc2, 0x1d, 0x72, 0x8d, 0xdb, 0x03, 0x72, 0x45, 0x67, 0x49, 0x90, 0xce, 0xd8, 0xd4, 0xb3, 0x6f, 0xc8, 0x15, 0x39, 0x03, 0x68, 0x65, 0xc9, 0x0d, 0x96, 0x5b, 0x6e, 0xe8, 0xdc, 0x56, 0xba, 0xc8, 0xdc, 0x16, 0xb3, 0x7e, 0x8b, 0xd9, 0xd7, 0x7e, 0x8b, 0x6c, 0xe2, 0xdd, 0xe7, 0xe6, 0x22, 0x82, 0xb1, 0x2b, 0x63, 0x79, 0x06, 0x2f, 0x2e, 0x15, 0x72, 0x83, 0x6e, 0xb3, 0x0c, 0x7f, 0xb5, 0xa8, 0x4d, 0x37, 0x27, 0x77, 0x6c, 0x76, 0xc9, 0x7e, 0x4e, 0xde, 0xe2, 0x95, 0xe5, 0x7b, 0x38, 0x7e, 0xf8, 0x55, 0x4b, 0xb1, 0xd7, 0xf8, 0xf8, 0x38, 0x4e, 0x19, 0xcc, 0x9c, 0x63, 0x8d, 0xea, 0x77, 0x5d, 0x20, 0x39, 0x87, 0xf8, 0xfe, 0x47, 0xf2, 0xd2, 0x9e, 0xc1, 0xd3, 0x2a, 0x16, 0xf4, 0xa9, 0xe0, 0x72, 0x5c, 0x8c, 0xbe, 0x87, 0x2b, 0x59, 0xe4, 0x63, 0xdb, 0xe6, 0xc7, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x25, 0xab, 0x41, 0x1a, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // LaptopServiceClient is the client API for LaptopService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type LaptopServiceClient interface { CreateLaptop(ctx context.Context, in *CreateLaptopRequest, opts ...grpc.CallOption) (*CreateLaptopResponse, error) } type laptopServiceClient struct { cc grpc.ClientConnInterface } func NewLaptopServiceClient(cc grpc.ClientConnInterface) LaptopServiceClient { return &laptopServiceClient{cc} } func (c *laptopServiceClient) CreateLaptop(ctx context.Context, in *CreateLaptopRequest, opts ...grpc.CallOption) (*CreateLaptopResponse, error) { out := new(CreateLaptopResponse) err := c.cc.Invoke(ctx, "/pc.LaptopService/CreateLaptop", in, out, opts...) if err != nil { return nil, err } return out, nil } // LaptopServiceServer is the server API for LaptopService service. type LaptopServiceServer interface { CreateLaptop(context.Context, *CreateLaptopRequest) (*CreateLaptopResponse, error) } // UnimplementedLaptopServiceServer can be embedded to have forward compatible implementations. type UnimplementedLaptopServiceServer struct { } func (*UnimplementedLaptopServiceServer) CreateLaptop(ctx context.Context, req *CreateLaptopRequest) (*CreateLaptopResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateLaptop not implemented") } func RegisterLaptopServiceServer(s *grpc.Server, srv LaptopServiceServer) { s.RegisterService(&_LaptopService_serviceDesc, srv) } func _LaptopService_CreateLaptop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateLaptopRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(LaptopServiceServer).CreateLaptop(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pc.LaptopService/CreateLaptop", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(LaptopServiceServer).CreateLaptop(ctx, req.(*CreateLaptopRequest)) } return interceptor(ctx, in, info, handler) } var _LaptopService_serviceDesc = grpc.ServiceDesc{ ServiceName: "pc.LaptopService", HandlerType: (*LaptopServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "CreateLaptop", Handler: _LaptopService_CreateLaptop_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "laptop.proto", }
{ return m.ID }
conditional_block
laptop.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: laptop.proto package pc import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" timestamp "github.com/golang/protobuf/ptypes/timestamp" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type Laptop struct { ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Brand string `protobuf:"bytes,2,opt,name=brand,proto3" json:"brand,omitempty"` Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` Cpu *CPU `protobuf:"bytes,4,opt,name=cpu,proto3" json:"cpu,omitempty"` Ram *Memory `protobuf:"bytes,5,opt,name=ram,proto3" json:"ram,omitempty"` Gpus []*GPU `protobuf:"bytes,6,rep,name=gpus,proto3" json:"gpus,omitempty"` Storages []*Storage `protobuf:"bytes,7,rep,name=storages,proto3" json:"storages,omitempty"` Screen *Screen `protobuf:"bytes,8,opt,name=screen,proto3" json:"screen,omitempty"` Keyboard *Keyboard `protobuf:"bytes,9,opt,name=keyboard,proto3" json:"keyboard,omitempty"` // Types that are valid to be assigned to Weight: // *Laptop_WeightKg // *Laptop_WeightLb Weight isLaptop_Weight `protobuf_oneof:"weight"` PriceUsd float64 `protobuf:"fixed64,12,opt,name=price_usd,json=priceUsd,proto3" json:"price_usd,omitempty"` ReleaseYear uint32 `protobuf:"varint,13,opt,name=release_year,json=releaseYear,proto3" json:"release_year,omitempty"` UpdatedAt *timestamp.Timestamp `protobuf:"bytes,14,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Laptop) Reset() { *m = Laptop{} } func (m *Laptop) String() string { return proto.CompactTextString(m) } func (*Laptop) ProtoMessage() {} func (*Laptop) Descriptor() ([]byte, []int) { return fileDescriptor_28a7e4886f546705, []int{0} } func (m *Laptop) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Laptop.Unmarshal(m, b) } func (m *Laptop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Laptop.Marshal(b, m, deterministic) } func (m *Laptop) XXX_Merge(src proto.Message) { xxx_messageInfo_Laptop.Merge(m, src) } func (m *Laptop) XXX_Size() int { return xxx_messageInfo_Laptop.Size(m) } func (m *Laptop) XXX_DiscardUnknown() { xxx_messageInfo_Laptop.DiscardUnknown(m) } var xxx_messageInfo_Laptop proto.InternalMessageInfo func (m *Laptop) GetId() string { if m != nil { return m.ID } return "" } func (m *Laptop) GetBrand() string { if m != nil { return m.Brand } return "" } func (m *Laptop) GetName() string { if m != nil { return m.Name } return "" } func (m *Laptop) GetCpu() *CPU { if m != nil { return m.Cpu } return nil } func (m *Laptop) GetRam() *Memory { if m != nil { return m.Ram } return nil } func (m *Laptop) GetGpus() []*GPU { if m != nil { return m.Gpus } return nil } func (m *Laptop) GetStorages() []*Storage { if m != nil { return m.Storages } return nil } func (m *Laptop) GetScreen() *Screen { if m != nil { return m.Screen } return nil } func (m *Laptop) GetKeyboard() *Keyboard { if m != nil { return m.Keyboard } return nil } type isLaptop_Weight interface { isLaptop_Weight() } type Laptop_WeightKg struct { WeightKg float64 `protobuf:"fixed64,10,opt,name=weight_kg,json=weightKg,proto3,oneof"` } type Laptop_WeightLb struct { WeightLb float64 `protobuf:"fixed64,11,opt,name=weight_lb,json=weightLb,proto3,oneof"` } func (*Laptop_WeightKg) isLaptop_Weight() {} func (*Laptop_WeightLb) isLaptop_Weight() {} func (m *Laptop) GetWeight() isLaptop_Weight { if m != nil { return m.Weight } return nil } func (m *Laptop) GetWeightKg() float64 { if x, ok := m.GetWeight().(*Laptop_WeightKg); ok { return x.WeightKg } return 0 } func (m *Laptop) GetWeightLb() float64 { if x, ok := m.GetWeight().(*Laptop_WeightLb); ok { return x.WeightLb } return 0 } func (m *Laptop) GetPriceUsd() float64 { if m != nil { return m.PriceUsd } return 0 } func (m *Laptop) GetReleaseYear() uint32 { if m != nil { return m.ReleaseYear } return 0 } func (m *Laptop) GetUpdatedAt() *timestamp.Timestamp { if m != nil { return m.UpdatedAt } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*Laptop) XXX_OneofWrappers() []interface{} { return []interface{}{ (*Laptop_WeightKg)(nil), (*Laptop_WeightLb)(nil), } } type CreateLaptopRequest struct { Laptop *Laptop `protobuf:"bytes,1,opt,name=laptop,proto3" json:"laptop,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CreateLaptopRequest) Reset() { *m = CreateLaptopRequest{} } func (m *CreateLaptopRequest) String() string { return proto.CompactTextString(m) } func (*CreateLaptopRequest) ProtoMessage() {} func (*CreateLaptopRequest) Descriptor() ([]byte, []int) { return fileDescriptor_28a7e4886f546705, []int{1} } func (m *CreateLaptopRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CreateLaptopRequest.Unmarshal(m, b) } func (m *CreateLaptopRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CreateLaptopRequest.Marshal(b, m, deterministic) } func (m *CreateLaptopRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_CreateLaptopRequest.Merge(m, src) } func (m *CreateLaptopRequest) XXX_Size() int { return xxx_messageInfo_CreateLaptopRequest.Size(m) } func (m *CreateLaptopRequest) XXX_DiscardUnknown() { xxx_messageInfo_CreateLaptopRequest.DiscardUnknown(m) } var xxx_messageInfo_CreateLaptopRequest proto.InternalMessageInfo func (m *CreateLaptopRequest) GetLaptop() *Laptop { if m != nil { return m.Laptop } return nil } type CreateLaptopResponse struct { ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
func (m *CreateLaptopResponse) Reset() { *m = CreateLaptopResponse{} } func (m *CreateLaptopResponse) String() string { return proto.CompactTextString(m) } func (*CreateLaptopResponse) ProtoMessage() {} func (*CreateLaptopResponse) Descriptor() ([]byte, []int) { return fileDescriptor_28a7e4886f546705, []int{2} } func (m *CreateLaptopResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CreateLaptopResponse.Unmarshal(m, b) } func (m *CreateLaptopResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CreateLaptopResponse.Marshal(b, m, deterministic) } func (m *CreateLaptopResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CreateLaptopResponse.Merge(m, src) } func (m *CreateLaptopResponse) XXX_Size() int { return xxx_messageInfo_CreateLaptopResponse.Size(m) } func (m *CreateLaptopResponse) XXX_DiscardUnknown() { xxx_messageInfo_CreateLaptopResponse.DiscardUnknown(m) } var xxx_messageInfo_CreateLaptopResponse proto.InternalMessageInfo func (m *CreateLaptopResponse) GetId() string { if m != nil { return m.ID } return "" } func init() { proto.RegisterType((*Laptop)(nil), "pc.Laptop") proto.RegisterType((*CreateLaptopRequest)(nil), "pc.CreateLaptopRequest") proto.RegisterType((*CreateLaptopResponse)(nil), "pc.CreateLaptopResponse") } func init() { proto.RegisterFile("laptop.proto", fileDescriptor_28a7e4886f546705) } var fileDescriptor_28a7e4886f546705 = []byte{ // 459 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xcf, 0x6f, 0xd3, 0x30, 0x14, 0xc7, 0x49, 0xd3, 0x76, 0xe9, 0x6b, 0x5a, 0x24, 0x33, 0x09, 0xd3, 0x81, 0x08, 0x3d, 0x40, 0x4e, 0xa9, 0x34, 0x4e, 0x3b, 0x6e, 0x3b, 0x80, 0xb4, 0x21, 0x4d, 0x2e, 0x3d, 0xc0, 0xa5, 0x72, 0x92, 0x47, 0x88, 0xd6, 0xd4, 0xc6, 0x76, 0x40, 0xfd, 0x37, 0xf9, 0x8b, 0x50, 0x6c, 0x67, 0xec, 0xc7, 0xcd, 0xef, 0xf3, 0xfd, 0x5a, 0xef, 0x27, 0xc4, 0x3b, 0x2e, 0x8d, 0x90, 0x99, 0x54, 0xc2, 0x08, 0x32, 0x90, 0xc5, 0xe2, 0xb9, 0x54, 0xa2, 0x40, 0xad, 0x85, 0x72, 0x70, 0x11, 0x37, 0xd8, 0x08, 0x75, 0xf0, 0xd1, 0x4c, 0x1b, 0xa1, 0x78, 0x85, 0xbd, 0xa8, 0x0b, 0x85, 0xb8, 0xf7, 0xd1, 0xfc, 0x16, 0x0f, 0xb9, 0xe0, 0xaa, 0xf4, 0xf1, 0xdb, 0x4a, 0x88, 0x6a, 0x87, 0x2b, 0x1b, 0xe5, 0xed, 0x8f, 0x95, 0xa9, 0x1b, 0xd4, 0x86, 0x37, 0x3e, 0xe1, 0xf2, 0x6f, 0x08, 0xe3, 0x6b, 0x5b, 0x01, 0x99, 0xc3, 0xa0, 0x2e, 0x69, 0x90, 0x04, 0xe9, 0x84, 0x0d, 0xea, 0x92, 0x1c, 0xc3, 0x28, 0x57, 0x7c, 0x5f, 0xd2, 0x81, 0x45, 0x2e, 0x20, 0x04, 0x86, 0x7b, 0xde, 0x20, 0x0d, 0x2d, 0xb4, 0x6f, 0xf2, 0x0a, 0xc2, 0x42, 0xb6, 0x74, 0x98, 0x04, 0xe9, 0xf4, 0xf4, 0x28, 0x93, 0x45, 0x76, 0x79, 0xb3, 0x61, 0x1d, 0x23, 0xaf, 0x21, 0x54, 0xbc, 0xa1, 0x23, 0x2b, 0x41, 0x27, 0x7d, 0xb1, 0xcd, 0xb0, 0x0e, 0x93, 0x13, 0x18, 0x56, 0xb2, 0xd5, 0x74, 0x9c, 0x84, 0xfd, 0xcf, 0x4f, 0x37, 0x1b, 0x66, 0x21, 0xf9, 0x00, 0x91, 0x6f, 0x55, 0xd3, 0x23, 0x6b, 0x98, 0x76, 0x86, 0xb5, 0x63, 0xec, 0x4e, 0x24, 0x4b, 0x18, 0xbb, 0x21, 0xd0, 0xe8, 0x7f, 0x9a, 0xb5, 0x25, 0xcc, 0x2b, 0x24, 0x85, 0xa8, 0x1f, 0x0d, 0x9d, 0x58, 0x57, 0xdc, 0xb9, 0xae, 0x3c, 0x63, 0x77, 0x2a, 0x79, 0x03, 0x93, 0x3f, 0x58, 0x57, 0x3f, 0xcd, 0xf6, 0xb6, 0xa2, 0x90, 0x04, 0x69, 0xf0, 0xf9, 0x19, 0x8b, 0x1c, 0xba, 0xaa, 0xee, 0xc9, 0xbb, 0x9c, 0x4e, 0x1f, 0xca, 0xd7, 0x39, 0x39, 0x81, 0x89, 0x54, 0x75, 0x81, 0xdb, 0x56, 0x97, 0x34, 0xee, 0x64, 0x16, 0x59, 0xb0, 0xd1, 0x25, 0x79, 0x07, 0xb1, 0xc2, 0x1d, 0x72, 0x8d, 0xdb, 0x03, 0x72, 0x45, 0x67, 0x49, 0x90, 0xce, 0xd8, 0xd4, 0xb3, 0x6f, 0xc8, 0x15, 0x39, 0x03, 0x68, 0x65, 0xc9, 0x0d, 0x96, 0x5b, 0x6e, 0xe8, 0xdc, 0x56, 0xba, 0xc8, 0xdc, 0x16, 0xb3, 0x7e, 0x8b, 0xd9, 0xd7, 0x7e, 0x8b, 0x6c, 0xe2, 0xdd, 0xe7, 0xe6, 0x22, 0x82, 0xb1, 0x2b, 0x63, 0x79, 0x06, 0x2f, 0x2e, 0x15, 0x72, 0x83, 0x6e, 0xb3, 0x0c, 0x7f, 0xb5, 0xa8, 0x4d, 0x37, 0x27, 0x77, 0x6c, 0x76, 0xc9, 0x7e, 0x4e, 0xde, 0xe2, 0x95, 0xe5, 0x7b, 0x38, 0x7e, 0xf8, 0x55, 0x4b, 0xb1, 0xd7, 0xf8, 0xf8, 0x38, 0x4e, 0x19, 0xcc, 0x9c, 0x63, 0x8d, 0xea, 0x77, 0x5d, 0x20, 0x39, 0x87, 0xf8, 0xfe, 0x47, 0xf2, 0xd2, 0x9e, 0xc1, 0xd3, 0x2a, 0x16, 0xf4, 0xa9, 0xe0, 0x72, 0x5c, 0x8c, 0xbe, 0x87, 0x2b, 0x59, 0xe4, 0x63, 0xdb, 0xe6, 0xc7, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x25, 0xab, 0x41, 0x1a, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // LaptopServiceClient is the client API for LaptopService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type LaptopServiceClient interface { CreateLaptop(ctx context.Context, in *CreateLaptopRequest, opts ...grpc.CallOption) (*CreateLaptopResponse, error) } type laptopServiceClient struct { cc grpc.ClientConnInterface } func NewLaptopServiceClient(cc grpc.ClientConnInterface) LaptopServiceClient { return &laptopServiceClient{cc} } func (c *laptopServiceClient) CreateLaptop(ctx context.Context, in *CreateLaptopRequest, opts ...grpc.CallOption) (*CreateLaptopResponse, error) { out := new(CreateLaptopResponse) err := c.cc.Invoke(ctx, "/pc.LaptopService/CreateLaptop", in, out, opts...) if err != nil { return nil, err } return out, nil } // LaptopServiceServer is the server API for LaptopService service. type LaptopServiceServer interface { CreateLaptop(context.Context, *CreateLaptopRequest) (*CreateLaptopResponse, error) } // UnimplementedLaptopServiceServer can be embedded to have forward compatible implementations. type UnimplementedLaptopServiceServer struct { } func (*UnimplementedLaptopServiceServer) CreateLaptop(ctx context.Context, req *CreateLaptopRequest) (*CreateLaptopResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateLaptop not implemented") } func RegisterLaptopServiceServer(s *grpc.Server, srv LaptopServiceServer) { s.RegisterService(&_LaptopService_serviceDesc, srv) } func _LaptopService_CreateLaptop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateLaptopRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(LaptopServiceServer).CreateLaptop(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pc.LaptopService/CreateLaptop", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(LaptopServiceServer).CreateLaptop(ctx, req.(*CreateLaptopRequest)) } return interceptor(ctx, in, info, handler) } var _LaptopService_serviceDesc = grpc.ServiceDesc{ ServiceName: "pc.LaptopService", HandlerType: (*LaptopServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "CreateLaptop", Handler: _LaptopService_CreateLaptop_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "laptop.proto", }
random_line_split
laptop.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: laptop.proto package pc import ( context "context" fmt "fmt" proto "github.com/golang/protobuf/proto" timestamp "github.com/golang/protobuf/ptypes/timestamp" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type Laptop struct { ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Brand string `protobuf:"bytes,2,opt,name=brand,proto3" json:"brand,omitempty"` Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` Cpu *CPU `protobuf:"bytes,4,opt,name=cpu,proto3" json:"cpu,omitempty"` Ram *Memory `protobuf:"bytes,5,opt,name=ram,proto3" json:"ram,omitempty"` Gpus []*GPU `protobuf:"bytes,6,rep,name=gpus,proto3" json:"gpus,omitempty"` Storages []*Storage `protobuf:"bytes,7,rep,name=storages,proto3" json:"storages,omitempty"` Screen *Screen `protobuf:"bytes,8,opt,name=screen,proto3" json:"screen,omitempty"` Keyboard *Keyboard `protobuf:"bytes,9,opt,name=keyboard,proto3" json:"keyboard,omitempty"` // Types that are valid to be assigned to Weight: // *Laptop_WeightKg // *Laptop_WeightLb Weight isLaptop_Weight `protobuf_oneof:"weight"` PriceUsd float64 `protobuf:"fixed64,12,opt,name=price_usd,json=priceUsd,proto3" json:"price_usd,omitempty"` ReleaseYear uint32 `protobuf:"varint,13,opt,name=release_year,json=releaseYear,proto3" json:"release_year,omitempty"` UpdatedAt *timestamp.Timestamp `protobuf:"bytes,14,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Laptop) Reset() { *m = Laptop{} } func (m *Laptop) String() string { return proto.CompactTextString(m) } func (*Laptop) ProtoMessage() {} func (*Laptop) Descriptor() ([]byte, []int) { return fileDescriptor_28a7e4886f546705, []int{0} } func (m *Laptop) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Laptop.Unmarshal(m, b) } func (m *Laptop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Laptop.Marshal(b, m, deterministic) } func (m *Laptop) XXX_Merge(src proto.Message) { xxx_messageInfo_Laptop.Merge(m, src) } func (m *Laptop) XXX_Size() int { return xxx_messageInfo_Laptop.Size(m) } func (m *Laptop) XXX_DiscardUnknown() { xxx_messageInfo_Laptop.DiscardUnknown(m) } var xxx_messageInfo_Laptop proto.InternalMessageInfo func (m *Laptop) GetId() string { if m != nil { return m.ID } return "" } func (m *Laptop) GetBrand() string { if m != nil { return m.Brand } return "" } func (m *Laptop) GetName() string { if m != nil { return m.Name } return "" } func (m *Laptop) GetCpu() *CPU { if m != nil { return m.Cpu } return nil } func (m *Laptop) GetRam() *Memory { if m != nil { return m.Ram } return nil } func (m *Laptop) GetGpus() []*GPU { if m != nil { return m.Gpus } return nil } func (m *Laptop) GetStorages() []*Storage { if m != nil { return m.Storages } return nil } func (m *Laptop)
() *Screen { if m != nil { return m.Screen } return nil } func (m *Laptop) GetKeyboard() *Keyboard { if m != nil { return m.Keyboard } return nil } type isLaptop_Weight interface { isLaptop_Weight() } type Laptop_WeightKg struct { WeightKg float64 `protobuf:"fixed64,10,opt,name=weight_kg,json=weightKg,proto3,oneof"` } type Laptop_WeightLb struct { WeightLb float64 `protobuf:"fixed64,11,opt,name=weight_lb,json=weightLb,proto3,oneof"` } func (*Laptop_WeightKg) isLaptop_Weight() {} func (*Laptop_WeightLb) isLaptop_Weight() {} func (m *Laptop) GetWeight() isLaptop_Weight { if m != nil { return m.Weight } return nil } func (m *Laptop) GetWeightKg() float64 { if x, ok := m.GetWeight().(*Laptop_WeightKg); ok { return x.WeightKg } return 0 } func (m *Laptop) GetWeightLb() float64 { if x, ok := m.GetWeight().(*Laptop_WeightLb); ok { return x.WeightLb } return 0 } func (m *Laptop) GetPriceUsd() float64 { if m != nil { return m.PriceUsd } return 0 } func (m *Laptop) GetReleaseYear() uint32 { if m != nil { return m.ReleaseYear } return 0 } func (m *Laptop) GetUpdatedAt() *timestamp.Timestamp { if m != nil { return m.UpdatedAt } return nil } // XXX_OneofWrappers is for the internal use of the proto package. func (*Laptop) XXX_OneofWrappers() []interface{} { return []interface{}{ (*Laptop_WeightKg)(nil), (*Laptop_WeightLb)(nil), } } type CreateLaptopRequest struct { Laptop *Laptop `protobuf:"bytes,1,opt,name=laptop,proto3" json:"laptop,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CreateLaptopRequest) Reset() { *m = CreateLaptopRequest{} } func (m *CreateLaptopRequest) String() string { return proto.CompactTextString(m) } func (*CreateLaptopRequest) ProtoMessage() {} func (*CreateLaptopRequest) Descriptor() ([]byte, []int) { return fileDescriptor_28a7e4886f546705, []int{1} } func (m *CreateLaptopRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CreateLaptopRequest.Unmarshal(m, b) } func (m *CreateLaptopRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CreateLaptopRequest.Marshal(b, m, deterministic) } func (m *CreateLaptopRequest) XXX_Merge(src proto.Message) { xxx_messageInfo_CreateLaptopRequest.Merge(m, src) } func (m *CreateLaptopRequest) XXX_Size() int { return xxx_messageInfo_CreateLaptopRequest.Size(m) } func (m *CreateLaptopRequest) XXX_DiscardUnknown() { xxx_messageInfo_CreateLaptopRequest.DiscardUnknown(m) } var xxx_messageInfo_CreateLaptopRequest proto.InternalMessageInfo func (m *CreateLaptopRequest) GetLaptop() *Laptop { if m != nil { return m.Laptop } return nil } type CreateLaptopResponse struct { ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *CreateLaptopResponse) Reset() { *m = CreateLaptopResponse{} } func (m *CreateLaptopResponse) String() string { return proto.CompactTextString(m) } func (*CreateLaptopResponse) ProtoMessage() {} func (*CreateLaptopResponse) Descriptor() ([]byte, []int) { return fileDescriptor_28a7e4886f546705, []int{2} } func (m *CreateLaptopResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CreateLaptopResponse.Unmarshal(m, b) } func (m *CreateLaptopResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CreateLaptopResponse.Marshal(b, m, deterministic) } func (m *CreateLaptopResponse) XXX_Merge(src proto.Message) { xxx_messageInfo_CreateLaptopResponse.Merge(m, src) } func (m *CreateLaptopResponse) XXX_Size() int { return xxx_messageInfo_CreateLaptopResponse.Size(m) } func (m *CreateLaptopResponse) XXX_DiscardUnknown() { xxx_messageInfo_CreateLaptopResponse.DiscardUnknown(m) } var xxx_messageInfo_CreateLaptopResponse proto.InternalMessageInfo func (m *CreateLaptopResponse) GetId() string { if m != nil { return m.ID } return "" } func init() { proto.RegisterType((*Laptop)(nil), "pc.Laptop") proto.RegisterType((*CreateLaptopRequest)(nil), "pc.CreateLaptopRequest") proto.RegisterType((*CreateLaptopResponse)(nil), "pc.CreateLaptopResponse") } func init() { proto.RegisterFile("laptop.proto", fileDescriptor_28a7e4886f546705) } var fileDescriptor_28a7e4886f546705 = []byte{ // 459 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xcf, 0x6f, 0xd3, 0x30, 0x14, 0xc7, 0x49, 0xd3, 0x76, 0xe9, 0x6b, 0x5a, 0x24, 0x33, 0x09, 0xd3, 0x81, 0x08, 0x3d, 0x40, 0x4e, 0xa9, 0x34, 0x4e, 0x3b, 0x6e, 0x3b, 0x80, 0xb4, 0x21, 0x4d, 0x2e, 0x3d, 0xc0, 0xa5, 0x72, 0x92, 0x47, 0x88, 0xd6, 0xd4, 0xc6, 0x76, 0x40, 0xfd, 0x37, 0xf9, 0x8b, 0x50, 0x6c, 0x67, 0xec, 0xc7, 0xcd, 0xef, 0xf3, 0xfd, 0x5a, 0xef, 0x27, 0xc4, 0x3b, 0x2e, 0x8d, 0x90, 0x99, 0x54, 0xc2, 0x08, 0x32, 0x90, 0xc5, 0xe2, 0xb9, 0x54, 0xa2, 0x40, 0xad, 0x85, 0x72, 0x70, 0x11, 0x37, 0xd8, 0x08, 0x75, 0xf0, 0xd1, 0x4c, 0x1b, 0xa1, 0x78, 0x85, 0xbd, 0xa8, 0x0b, 0x85, 0xb8, 0xf7, 0xd1, 0xfc, 0x16, 0x0f, 0xb9, 0xe0, 0xaa, 0xf4, 0xf1, 0xdb, 0x4a, 0x88, 0x6a, 0x87, 0x2b, 0x1b, 0xe5, 0xed, 0x8f, 0x95, 0xa9, 0x1b, 0xd4, 0x86, 0x37, 0x3e, 0xe1, 0xf2, 0x6f, 0x08, 0xe3, 0x6b, 0x5b, 0x01, 0x99, 0xc3, 0xa0, 0x2e, 0x69, 0x90, 0x04, 0xe9, 0x84, 0x0d, 0xea, 0x92, 0x1c, 0xc3, 0x28, 0x57, 0x7c, 0x5f, 0xd2, 0x81, 0x45, 0x2e, 0x20, 0x04, 0x86, 0x7b, 0xde, 0x20, 0x0d, 0x2d, 0xb4, 0x6f, 0xf2, 0x0a, 0xc2, 0x42, 0xb6, 0x74, 0x98, 0x04, 0xe9, 0xf4, 0xf4, 0x28, 0x93, 0x45, 0x76, 0x79, 0xb3, 0x61, 0x1d, 0x23, 0xaf, 0x21, 0x54, 0xbc, 0xa1, 0x23, 0x2b, 0x41, 0x27, 0x7d, 0xb1, 0xcd, 0xb0, 0x0e, 0x93, 0x13, 0x18, 0x56, 0xb2, 0xd5, 0x74, 0x9c, 0x84, 0xfd, 0xcf, 0x4f, 0x37, 0x1b, 0x66, 0x21, 0xf9, 0x00, 0x91, 0x6f, 0x55, 0xd3, 0x23, 0x6b, 0x98, 0x76, 0x86, 0xb5, 0x63, 0xec, 0x4e, 0x24, 0x4b, 0x18, 0xbb, 0x21, 0xd0, 0xe8, 0x7f, 0x9a, 0xb5, 0x25, 0xcc, 0x2b, 0x24, 0x85, 0xa8, 0x1f, 0x0d, 0x9d, 0x58, 0x57, 0xdc, 0xb9, 0xae, 0x3c, 0x63, 0x77, 0x2a, 0x79, 0x03, 0x93, 0x3f, 0x58, 0x57, 0x3f, 0xcd, 0xf6, 0xb6, 0xa2, 0x90, 0x04, 0x69, 0xf0, 0xf9, 0x19, 0x8b, 0x1c, 0xba, 0xaa, 0xee, 0xc9, 0xbb, 0x9c, 0x4e, 0x1f, 0xca, 0xd7, 0x39, 0x39, 0x81, 0x89, 0x54, 0x75, 0x81, 0xdb, 0x56, 0x97, 0x34, 0xee, 0x64, 0x16, 0x59, 0xb0, 0xd1, 0x25, 0x79, 0x07, 0xb1, 0xc2, 0x1d, 0x72, 0x8d, 0xdb, 0x03, 0x72, 0x45, 0x67, 0x49, 0x90, 0xce, 0xd8, 0xd4, 0xb3, 0x6f, 0xc8, 0x15, 0x39, 0x03, 0x68, 0x65, 0xc9, 0x0d, 0x96, 0x5b, 0x6e, 0xe8, 0xdc, 0x56, 0xba, 0xc8, 0xdc, 0x16, 0xb3, 0x7e, 0x8b, 0xd9, 0xd7, 0x7e, 0x8b, 0x6c, 0xe2, 0xdd, 0xe7, 0xe6, 0x22, 0x82, 0xb1, 0x2b, 0x63, 0x79, 0x06, 0x2f, 0x2e, 0x15, 0x72, 0x83, 0x6e, 0xb3, 0x0c, 0x7f, 0xb5, 0xa8, 0x4d, 0x37, 0x27, 0x77, 0x6c, 0x76, 0xc9, 0x7e, 0x4e, 0xde, 0xe2, 0x95, 0xe5, 0x7b, 0x38, 0x7e, 0xf8, 0x55, 0x4b, 0xb1, 0xd7, 0xf8, 0xf8, 0x38, 0x4e, 0x19, 0xcc, 0x9c, 0x63, 0x8d, 0xea, 0x77, 0x5d, 0x20, 0x39, 0x87, 0xf8, 0xfe, 0x47, 0xf2, 0xd2, 0x9e, 0xc1, 0xd3, 0x2a, 0x16, 0xf4, 0xa9, 0xe0, 0x72, 0x5c, 0x8c, 0xbe, 0x87, 0x2b, 0x59, 0xe4, 0x63, 0xdb, 0xe6, 0xc7, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x25, 0xab, 0x41, 0x1a, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // LaptopServiceClient is the client API for LaptopService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type LaptopServiceClient interface { CreateLaptop(ctx context.Context, in *CreateLaptopRequest, opts ...grpc.CallOption) (*CreateLaptopResponse, error) } type laptopServiceClient struct { cc grpc.ClientConnInterface } func NewLaptopServiceClient(cc grpc.ClientConnInterface) LaptopServiceClient { return &laptopServiceClient{cc} } func (c *laptopServiceClient) CreateLaptop(ctx context.Context, in *CreateLaptopRequest, opts ...grpc.CallOption) (*CreateLaptopResponse, error) { out := new(CreateLaptopResponse) err := c.cc.Invoke(ctx, "/pc.LaptopService/CreateLaptop", in, out, opts...) if err != nil { return nil, err } return out, nil } // LaptopServiceServer is the server API for LaptopService service. type LaptopServiceServer interface { CreateLaptop(context.Context, *CreateLaptopRequest) (*CreateLaptopResponse, error) } // UnimplementedLaptopServiceServer can be embedded to have forward compatible implementations. type UnimplementedLaptopServiceServer struct { } func (*UnimplementedLaptopServiceServer) CreateLaptop(ctx context.Context, req *CreateLaptopRequest) (*CreateLaptopResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateLaptop not implemented") } func RegisterLaptopServiceServer(s *grpc.Server, srv LaptopServiceServer) { s.RegisterService(&_LaptopService_serviceDesc, srv) } func _LaptopService_CreateLaptop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateLaptopRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(LaptopServiceServer).CreateLaptop(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/pc.LaptopService/CreateLaptop", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(LaptopServiceServer).CreateLaptop(ctx, req.(*CreateLaptopRequest)) } return interceptor(ctx, in, info, handler) } var _LaptopService_serviceDesc = grpc.ServiceDesc{ ServiceName: "pc.LaptopService", HandlerType: (*LaptopServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "CreateLaptop", Handler: _LaptopService_CreateLaptop_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "laptop.proto", }
GetScreen
identifier_name