file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
downloadtaskmgr.go | InLDB := LoopTasksInLDB()
if taskInLDB == nil {
return nil
}
list := []*DownloadTask{}
for _, v := range taskInLDB {
list = append(list, v)
}
return list
}
func TaskSuccess(task *DownloadTask) {
logger.Debug("Task Success", "id", task.Id)
//从map中删除任务
DelTaskFromLDB(task.Id)
DeleteDownloadingTask(task.Id)
if onTaskSuccess == nil {
logger.Error("not define onTaskSuccess")
return
}
onTaskSuccess(task)
}
func TaskFail(task *DownloadTask) {
logger.Debug("Task Fail", "id", task.Id)
//从map中删除任务
DelTaskFromLDB(task.Id)
DeleteDownloadingTask(task.Id)
if onTaskFailed == nil {
logger.Error("not define onTaskFailed")
return
}
onTaskFailed(task)
}
func TaskBreak(task *DownloadTask) {
logger.Debug("Task Break", "id", task.Id)
//delete from runningMap
DeleteDownloadingTask(task.Id)
task.Status = Task_UnStart
//add to queue
channel := task.DownloadChannel
if channel == nil {
logger.Error("Break Task not set channel,back to global list", "taskid", task.Id)
globalDownloadTaskChan <- task
return
}
channel.IdleChan <- task
logger.Debug("add break task to idleChan", "speedLimit", channel.SpeedLimitKBs, "chanLen", len(channel.IdleChan), "taskid", task.Id)
}
func TaskRetry(task *DownloadTask) {
logger.Debug("Task Retry", "id", task.Id)
DeleteDownloadingTask(task.Id)
task.TryTimes++
task.Status = Task_UnStart
globalDownloadTaskChan <- task
}
func StartTask(task *DownloadTask) {
if panicCatcher != nil {
defer panicCatcher()
}
result := ExecDownloadTask(task)
switch result {
case Success:
//logger.Debug("download task success", "id", task.Id)
TaskSuccess(task)
case Fail:
//logger.Debug("download task fail", "id", task.Id)
if task.TryTimes >= 2 {
TaskFail(task)
} else {
//继续放入任务队列
TaskRetry(task)
}
case Break:
//logger.Debug("download task idle", "id", task.Id)
TaskBreak(task)
}
}
func (dc *DownloadChannel) ChannelDownload() {
go func() {
for true {
//拿到自己队列的token
<-dc.RunningCountControlChan
select {
case task := <-dc.IdleChan:
go func() {
defer func() {
dc.RunningCountControlChan <- true
}()
logger.Debug("get a task from idle list", "channel speed", dc.SpeedLimitKBs, "id", task.Id, "chanlen", len(dc.IdleChan))
//执行任务
StartTask(task)
}()
}
}
}()
}
func Run() {
RunNewTask()
RunChannelDownload()
//scanloop
go func() {
if panicCatcher != nil {
defer panicCatcher()
}
for true {
time.Sleep(5 * time.Second)
LoopScanRunningTask()
}
}()
}
func RunChannelDownload() {
for _, v := range channelArray {
v.ChannelDownload()
}
}
func RunNewTask() {
go func() {
for true {
<-newRunningTaskControlChan
select {
case task := <-globalDownloadTaskChan:
//开始一个新下载任务
go func() {
//任务结束,放回token
defer func() {
newRunningTaskControlChan <- true
}()
//执行任务
//logger.Debug("start a new task", "id", task.Id)
task.Status = Task_Downloading
AddTaskToDownloadingMap(task)
StartTask(task)
}()
}
}
}()
}
func TimeoutDialer(cTimeout time.Duration, rwTimeout time.Duration) func(net, addr string) (c net.Conn, err error) {
return func(netw, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(netw, addr, cTimeout)
if err != nil {
return nil, err
}
if rwTimeout > 0 {
err := conn.SetDeadline(time.Now().Add(rwTimeout))
if err != nil {
logger.Error("set download process rwTimeout error", "err", err)
return nil, err
}
}
return conn, nil
}
}
func ExecDownloadTask(task *DownloadTask) ExecResult {
connectTimeout := 10 * time.Second
readWriteTimeout := 3600 * 12 * time.Second
//readWriteTimeout := time.Duration(0)
url := task.TargetUrl
distFilePath := task.SavePath
cHead := http.Client{
Transport: &http.Transport{
Dial: TimeoutDialer(connectTimeout, readWriteTimeout),
},
}
//get
reqHead, err := http.NewRequest(http.MethodHead, url, nil)
if err == nil {
responseHead, err := cHead.Do(reqHead)
if err == nil {
if responseHead.StatusCode == 200 && responseHead.ContentLength > 0 {
task.FileSize = responseHead.ContentLength
}
}
}
//http client
c := http.Client{
Transport: &http.Transport{
Dial: TimeoutDialer(connectTimeout, readWriteTimeout),
},
}
//get
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
logger.Error("create request error", "err", err)
return Fail
}
//download
response, err := c.Do(req)
if err != nil {
logger.Error("get file url "+url+" error", "err", err)
return Fail
}
if response.StatusCode != 200 {
logger.Error("get file url "+url+" error", "err", err, "statusCode", response.StatusCode)
return Fail
}
//creat folder and file
distDir := path.Dir(distFilePath)
err = os.MkdirAll(distDir, os.ModePerm)
if err != nil {
return Fail
}
file, err := os.Create(distFilePath)
if err != nil {
return Fail
}
defer file.Close()
if response.Body == nil {
logger.Error("Download responseBody is null")
return Fail
}
defer response.Body.Close()
task.StartTime = time.Now().Unix()
if onDownloadStart != nil {
go onDownloadStart(task)
}
_, err = copyBuffer(file, response.Body, nil, task)
if err != nil {
os.Remove(distFilePath)
if err.Error() == string(Break) {
//logger.Debug("task break","id",task.Id)
return Break
}
return Fail
}
fileInfo, err := os.Stat(distFilePath)
if err != nil {
logger.Error("Get file Stat error", "err", err)
os.Remove(distFilePath)
return Fail
}
size := fileInfo.Size()
logger.Debug("donwload file,fileInfo", "size", size)
if size == 0 {
os.Remove(distFilePath)
logger.Error("download file size error")
return Fail
}
return Success
}
func copyBuffer(dst io.Writer, src io.Reader, buf []byte, task *DownloadTask) (written int64, err error) {
// If the reader has a WriteTo method, use it to do the copy.
// Avoids an allocation and a copy.
if wt, ok := src.(io.WriterTo); ok {
return wt.WriteTo(dst)
}
// Similarly, if the writer has a ReadFrom method, use it to do the copy.
if rt, ok := dst.(io.ReaderFrom); ok {
return rt.ReadFrom(src)
}
if buf == nil {
size := 32 * 1024
if l, ok := src.(*io.LimitedReader); ok && int64(size) > l.N {
if l.N < 1 {
size = 1
} else {
size = int(l.N)
}
}
buf = make([]byte, size)
}
stop := false
srcWithCloser, ok := src.(io.ReadCloser)
if ok == false {
err = errors.New("to io.ReadCloser error")
return written, err
}
go func() {
for {
time.Sleep(500 * time.Millisecond) //for test
nr, er := srcWithCloser.Read(buf)
if nr > 0 | {
nw, ew := dst.Write(buf[0:nr])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
//fmt.Println(ew.Error())
if task.Status == Task_Break &&
(strings.Contains(err.Error(), "http: read on closed response body") ||
strings.Contains(err.Error(), "use of closed network connection")) {
err = errors.New(string(Break))
}
break
}
if nr != nw {
err = io.ErrShortWrite
break
} | conditional_block |
|
main.go |
// OnError 错误报告函数定义
OnError func(err *Error)
)
// Runner 结构体
type Runner struct {
// 用于对外接口文档
APIInfo *graph.APIInfo
// 用于执行服务
service *runner.Runners
// 日志
log context.Log
// logRegister 记录 api 注册过程
logRegister bool
// 注册的信息
loaders []rj.Loader
// 注入管理
injector *inject.InjectorManager
// 请求参数管理
requestObjectManager *request.ObjectManager
//groups []*runner.Group
//funcs map[string]*runner.JSONRunner
beforeRun BeforeRun
beforeExecute BeforeExecute
afterRun AfterRun
afterExecute AfterExecute
onError OnError
}
// SetLogger 设置日志输出
func (r *Runner) SetLogger(log context.Log) *Runner {
r.log = log
return r
}
// SetLogRegister 设置时候记录注册 api 过程
func (r *Runner) SetLogRegister(log bool) *Runner {
r.logRegister = log
return r
}
// ErrorHandler 错误处理函数
func (r *Runner) ErrorHandler(handler OnError) *Runner {
r.onError = handler
return r
}
// BeforeRun 在批量执行前拦截
func (r *Runner) BeforeRun(fn BeforeRun) *Runner {
r.beforeRun = fn
return r
}
// BeforeExecute 在单个任务执行时拦截
func (r *Runner) BeforeExecute(fn BeforeExecute) *Runner {
r.beforeExecute = fn
return r
}
// AfterRun 在批量执行后执行
func (r *Runner) AfterRun(fn AfterRun) *Runner {
r.afterRun = fn
return r
}
// AfterExecute 在单个任务执行后拦截
func (r *Runner) AfterExecute(fn AfterExecute) *Runner {
r.afterExecute = fn
return r
}
type results struct {
response rj.Response
run *Runner
count int
index int
}
// CallCount 调用个数
func (r *results) CallCount() int {
return r.count
}
// CallIndex 调用次序
func (r *results) CallIndex() int {
return r.index
}
func (r *results) Get(method interface{}) ([]*rj.ResponseItem, error) {
jr, err := r.run.service.Find(method)
if err != nil {
return nil, err
}
rsp, exists := r.response[jr.Name]
if !exists {
return ni | lt of [%s] not found", jr.Name)
}
return rsp, nil
}
// New 新建 Runner
func New() *Runner {
//log := logrus.Logger{
// Level: logrus.WarnLevel,
// Formatter: &logrus.TextFormatter{},
//}
return &Runner{
APIInfo: &graph.APIInfo{
Groups: nil,
Request: map[string]*graph.ObjectInfo{},
Response: map[string]*graph.ObjectInfo{},
},
log: &util.Logger{},
loaders: nil,
service: runner.New(),
injector: inject.NewManager(),
requestObjectManager: request.NewRequestObjectManager(),
}
}
// Register 注册功能
func (r *Runner) Register(loaders ...rj.Loader) {
r.loaders = append(r.loaders, loaders...)
}
// RegisterProvider 注册注入函数
func (r *Runner) RegisterProvider(fns ...interface{}) error {
for _, fn := range fns {
if err := r.injector.Register(fn); err != nil {
return err
}
}
return nil
}
// RegisterAccessController 注册兼顾权限控制的注入函数
func (r *Runner) RegisterAccessController(fn interface{}) error {
return r.injector.RegisterAccessController(fn)
}
// InjectProxy 注册代理注入
// func (r *Runner) InjectProxy(fn interface{}, injectType reflect.Type, proxyFn interface{}) error {
// return r.injector.RegisterWithProxy(fn, injectType, proxyFn)
// }
func (r *Runner) execute(ctx *context.Context, injectMap map[reflect.Type]reflect.Value, request *rj.Request, rslt *results, onResponse func(key string, rsp *rj.ResponseItem)) {
defer func() {
if err := recover(); err != nil {
onResponse(request.Service, &rj.ResponseItem{
Error: fmt.Sprintf("%v", err),
Data: nil,
})
}
}()
//resKey := request.Service
var rsp *rj.ResponseItem
svc := r.service.Get(request.Service)
if svc != nil {
res, err := svc.Run(ctx, request.Args, injectMap, rslt)
if err != nil {
rsp = &rj.ResponseItem{
Error: err.Error(),
DataType: svc.ReturnType,
}
} else {
rsp = &rj.ResponseItem{
Error: "",
Data: res,
DataType: svc.ReturnType,
}
}
} else {
rsp = &rj.ResponseItem{
Error: "No service named " + request.Service,
}
}
onResponse(request.Service, rsp)
}
func (r *Runner) checkAccess(reqs rj.Requests, ctx *context.Context, responseContext rj.ResponseContext) (map[reflect.Type]reflect.Value, error) {
accessInject := map[reflect.Type]reflect.Value{}
for _, req := range reqs {
svc := r.service.Get(req.Service)
if svc == nil {
// 找不到服务
return nil, fmt.Errorf("no service named %s", req.Service)
}
for _, ac := range svc.AccessControllers {
val, err := ac.Call(req.Service, responseContext, ctx.Param)
if err != nil {
return nil, err
}
accessInject[ac.Type] = val
}
}
return accessInject, nil
}
func (r *Runner) doRun(ctx *context.Context, reqs rj.Requests, returnFn func(rj.Response, error)) {
defer func() {
if err := recover(); err != nil {
returnFn(nil, errors.New(err.(string)))
}
}()
//r.log.Debug("Requests: \n%s", data)
if r.beforeRun != nil {
if err := r.beforeRun(ctx, reqs); err != nil {
returnFn(nil, err)
return
}
}
response := rj.Response{}
rslt := &results{
response: response,
run: r,
count: len(reqs),
index: 0,
}
// 检查权限
injectMap, err := r.checkAccess(reqs, ctx, rslt)
if err != nil {
returnFn(nil, err)
return
}
for n, request := range reqs {
// before
if r.beforeExecute != nil {
if err := r.beforeExecute(ctx, request); err != nil {
returnFn(response, err)
return
}
}
var result *rj.ResponseItem
rslt.index = n
r.execute(ctx, injectMap, request, rslt, func(key string, rsp *rj.ResponseItem) {
if resAry, exists := response[request.Service]; exists {
response[key] = append(resAry, rsp)
} else {
response[key] = []*rj.ResponseItem{rsp}
}
result = rsp
})
// after
if r.afterExecute != nil {
if err := r.afterExecute(ctx, request, result, rslt); err != nil {
returnFn(response, err)
return
}
}
//r.log.Debug("Call: %s", request.Service)
}
if r.afterRun != nil {
if err := r.afterRun(ctx, reqs, rslt); err != nil {
returnFn(response, err)
return
}
}
returnFn(response, nil)
}
// RunString 运行字符串形式的参数
func (r *Runner) RunString(ctx *context.Context, data string) (rj.Response, error) {
var rsp rj.Response
var err error
var reqs = rj.Requests{}
err = json.Unmarshal([]byte(data), &reqs)
if err != nil {
r.log.Error(err, "json.Unmarshal")
if r.onError != nil {
r.onError(&Error{
Err: err,
ctx: ctx,
request: reqs,
})
}
return nil, err
}
r.doRun(ctx, reqs, func(responses rj.Response, e error) {
rsp = responses
err = e
if r.onError != nil {
r.onError(&Error{
Err: err,
ctx: ctx,
request: reqs,
})
}
})
return rsp, err
}
// RunRequests 运行 rj.Requests 形式的参数
func (r *Runner) RunRequests(ctx *context.Context, reqs rj.Requests) (rj.Response, error) {
var rsp rj.Response
var err error
r.doRun(ctx, reqs, func(responses rj.Response, e error) | l, fmt.Errorf("resu | identifier_body |
main.go | //}
return &Runner{
APIInfo: &graph.APIInfo{
Groups: nil,
Request: map[string]*graph.ObjectInfo{},
Response: map[string]*graph.ObjectInfo{},
},
log: &util.Logger{},
loaders: nil,
service: runner.New(),
injector: inject.NewManager(),
requestObjectManager: request.NewRequestObjectManager(),
}
}
// Register 注册功能
func (r *Runner) Register(loaders ...rj.Loader) {
r.loaders = append(r.loaders, loaders...)
}
// RegisterProvider 注册注入函数
func (r *Runner) RegisterProvider(fns ...interface{}) error {
for _, fn := range fns {
if err := r.injector.Register(fn); err != nil {
return err
}
}
return nil
}
// RegisterAccessController 注册兼顾权限控制的注入函数
func (r *Runner) RegisterAccessController(fn interface{}) error {
return r.injector.RegisterAccessController(fn)
}
// InjectProxy 注册代理注入
// func (r *Runner) InjectProxy(fn interface{}, injectType reflect.Type, proxyFn interface{}) error {
// return r.injector.RegisterWithProxy(fn, injectType, proxyFn)
// }
func (r *Runner) execute(ctx *context.Context, injectMap map[reflect.Type]reflect.Value, request *rj.Request, rslt *results, onResponse func(key string, rsp *rj.ResponseItem)) {
defer func() {
if err := recover(); err != nil {
onResponse(request.Service, &rj.ResponseItem{
Error: fmt.Sprintf("%v", err),
Data: nil,
})
}
}()
//resKey := request.Service
var rsp *rj.ResponseItem
svc := r.service.Get(request.Service)
if svc != nil {
res, err := svc.Run(ctx, request.Args, injectMap, rslt)
if err != nil {
rsp = &rj.ResponseItem{
Error: err.Error(),
DataType: svc.ReturnType,
}
} else {
rsp = &rj.ResponseItem{
Error: "",
Data: res,
DataType: svc.ReturnType,
}
}
} else {
rsp = &rj.ResponseItem{
Error: "No service named " + request.Service,
}
}
onResponse(request.Service, rsp)
}
func (r *Runner) checkAccess(reqs rj.Requests, ctx *context.Context, responseContext rj.ResponseContext) (map[reflect.Type]reflect.Value, error) {
accessInject := map[reflect.Type]reflect.Value{}
for _, req := range reqs {
svc := r.service.Get(req.Service)
if svc == nil {
// 找不到服务
return nil, fmt.Errorf("no service named %s", req.Service)
}
for _, ac := range svc.AccessControllers {
val, err := ac.Call(req.Service, responseContext, ctx.Param)
if err != nil {
return nil, err
}
accessInject[ac.Type] = val
}
}
return accessInject, nil
}
func (r *Runner) doRun(ctx *context.Context, reqs rj.Requests, returnFn func(rj.Response, error)) {
defer func() {
if err := recover(); err != nil {
returnFn(nil, errors.New(err.(string)))
}
}()
//r.log.Debug("Requests: \n%s", data)
if r.beforeRun != nil {
if err := r.beforeRun(ctx, reqs); err != nil {
returnFn(nil, err)
return
}
}
response := rj.Response{}
rslt := &results{
response: response,
run: r,
count: len(reqs),
index: 0,
}
// 检查权限
injectMap, err := r.checkAccess(reqs, ctx, rslt)
if err != nil {
returnFn(nil, err)
return
}
for n, request := range reqs {
// before
if r.beforeExecute != nil {
if err := r.beforeExecute(ctx, request); err != nil {
returnFn(response, err)
return
}
}
var result *rj.ResponseItem
rslt.index = n
r.execute(ctx, injectMap, request, rslt, func(key string, rsp *rj.ResponseItem) {
if resAry, exists := response[request.Service]; exists {
response[key] = append(resAry, rsp)
} else {
response[key] = []*rj.ResponseItem{rsp}
}
result = rsp
})
// after
if r.afterExecute != nil {
if err := r.afterExecute(ctx, request, result, rslt); err != nil {
returnFn(response, err)
return
}
}
//r.log.Debug("Call: %s", request.Service)
}
if r.afterRun != nil {
if err := r.afterRun(ctx, reqs, rslt); err != nil {
returnFn(response, err)
return
}
}
returnFn(response, nil)
}
// RunString 运行字符串形式的参数
func (r *Runner) RunString(ctx *context.Context, data string) (rj.Response, error) {
var rsp rj.Response
var err error
var reqs = rj.Requests{}
err = json.Unmarshal([]byte(data), &reqs)
if err != nil {
r.log.Error(err, "json.Unmarshal")
if r.onError != nil {
r.onError(&Error{
Err: err,
ctx: ctx,
request: reqs,
})
}
return nil, err
}
r.doRun(ctx, reqs, func(responses rj.Response, e error) {
rsp = responses
err = e
if r.onError != nil {
r.onError(&Error{
Err: err,
ctx: ctx,
request: reqs,
})
}
})
return rsp, err
}
// RunRequests 运行 rj.Requests 形式的参数
func (r *Runner) RunRequests(ctx *context.Context, reqs rj.Requests) (rj.Response, error) {
var rsp rj.Response
var err error
r.doRun(ctx, reqs, func(responses rj.Response, e error) {
rsp = responses
err = e
if r.onError != nil {
r.onError(&Error{
Err: err,
ctx: ctx,
request: reqs,
})
}
})
return rsp, err
}
// Engage 解析功能,以启动功能 from Star Trek
func (r *Runner) Engage() error {
// 用于接口的 API 文档信息
amap := r.APIInfo
for _, loader := range r.loaders {
grpInfo := loader.Group()
if grpInfo == nil {
return errors.New("a loader must belong a group")
}
if strings.TrimSpace(grpInfo.Name) == "" {
return errors.New("group's name shuld not be empty")
}
// 获得分组
grp := amap.GetGroup(grpInfo.Name, grpInfo.Description)
// 解析 functions
loaderTyp := reflect.TypeOf(loader)
// 遍历函数
nm := loaderTyp.NumMethod()
for n := 0; n < nm; n++ {
method := loaderTyp.Method(n)
if rj.GroupFunc == method.Name {
// 跳过 Loader 接口的函数
continue
}
// 生成 JSONRunner 名称
svcName, err := grp.GenerateServiceName(method.Name)
if err != nil {
// 同名 service 已经存在
r.log.Error(err, "JSONRunner exists")
continue
}
if r.logRegister {
r.log.Info("Try to register api %s ...", svcName)
}
// 解析服务函数
svc, err := runner.TryParserAsService(loaderTyp,
r.injector,
r.requestObjectManager,
method,
amap,
r.log)
if err != nil {
// 不是合法的服务函数
if r.logRegister {
r.log.Warn("[%s] is not a service function: %v\n", svcName, err)
}
// r.log.Warn("[%s] is not a service function: %v\n", svcName, err)
continue
}
if svc != nil {
// 解析完成
// 填写服务名称
svc.Name = svcName
svcInfo := &graph.ServiceInfo{
Name: svcName,
InputObjectID: svc.RequestObjectID,
InputIsArray: svc.RequestObjectIsArray,
OutputObjectID: svc.ReturnObjectID,
OutputIsArray: svc.ReturnObjectIsArray,
}
// 解析服务描述信息
info := runner.TryToParseFuncInfo(loader, | loaderTyp, method.Name)
if info != nil {
svcInfo.Description = info. | conditional_block |
|
main.go |
// OnError 错误报告函数定义
OnError func(err *Error)
)
// Runner 结构体
type Runner struct {
// 用于对外接口文档
APIInfo *graph.APIInfo
// 用于执行服务
service *runner.Runners
// 日志
log context.Log
// logRegister 记录 api 注册过程
logRegister bool
// 注册的信息
loaders []rj.Loader
// 注入管理
injector *inject.InjectorManager
// 请求参数管理
requestObjectManager *request.ObjectManager
//groups []*runner.Group
//funcs map[string]*runner.JSONRunner
beforeRun BeforeRun
beforeExecute BeforeExecute
afterRun AfterRun
afterExecute AfterExecute
onError OnError
}
// SetLogger 设置日志输出
func (r *Runner) SetLogger(log context.Log) *Runner {
r.log = log
return r
}
// SetLogRegister 设置时候记录注册 api 过程
func (r *Runner) SetLogRegister(log bool) *Runner {
r.logRegister = log
return r
}
// ErrorHandler 错误处理函数
func (r *Runner) ErrorHandler(handler OnError) *Runner {
r.onError = handler
return r
}
// BeforeRun 在批量执行前拦截
func (r *Runner) BeforeRun(fn BeforeRun) *Runner {
r.beforeRun = fn
return r
}
// BeforeExecute 在单个任务执行时拦截
func (r *Runner) BeforeExecute(fn BeforeExecute) *Runner {
r.beforeExecute = fn
return r
}
// AfterRun 在批量执行后执行
func (r *Runner) AfterRun(fn AfterRun) *Runner {
r.afterRun = fn
return r
}
// AfterExecute 在单个任务执行后拦截
func (r *Runner) AfterExecute(fn AfterExecute) *Runner {
r.afterExecute = fn
return r
}
type results struct {
response rj.Response
run *Runner
count int
index int
}
// CallCount 调用个数
func (r *results) CallCount() int {
return r.count
}
// CallIndex 调用次序
func (r *results) CallIndex() int {
return r.index
}
func (r *results) Get(method interface{}) ([]*rj.ResponseItem, error) {
jr, err := r.run.service.Find(method)
if err != nil {
return nil, err
}
rsp, exists := r.response[jr.Name]
if !exists {
return nil, fmt.Errorf("result of [%s] not found", jr.Name)
}
return rsp, nil
}
// New 新建 Runner
func New() *Runner {
//log := logrus.Logger{
// Level: logrus.WarnLevel,
// Formatter: &logrus.TextFormatter{},
//}
return &Runner{
APIInfo: &graph.APIInfo{
Groups: nil,
Request: map[string]*graph.ObjectInfo{},
Response: map[string]*graph.ObjectInfo{},
},
log: &util.Logger{},
| oaders: nil,
service: runner.New(),
injector: inject.NewManager(),
requestObjectManager: request.NewRequestObjectManager(),
}
}
// Register 注册功能
func (r *Runner) Register(loaders ...rj.Loader) {
r.loaders = append(r.loaders, loaders...)
}
// RegisterProvider 注册注入函数
func (r *Runner) RegisterProvider(fns ...interface{}) error {
for _, fn := range fns {
if err := r.injector.Register(fn); err != nil {
return err
}
}
return nil
}
// RegisterAccessController 注册兼顾权限控制的注入函数
func (r *Runner) RegisterAccessController(fn interface{}) error {
return r.injector.RegisterAccessController(fn)
}
// InjectProxy 注册代理注入
// func (r *Runner) InjectProxy(fn interface{}, injectType reflect.Type, proxyFn interface{}) error {
// return r.injector.RegisterWithProxy(fn, injectType, proxyFn)
// }
func (r *Runner) execute(ctx *context.Context, injectMap map[reflect.Type]reflect.Value, request *rj.Request, rslt *results, onResponse func(key string, rsp *rj.ResponseItem)) {
defer func() {
if err := recover(); err != nil {
onResponse(request.Service, &rj.ResponseItem{
Error: fmt.Sprintf("%v", err),
Data: nil,
})
}
}()
//resKey := request.Service
var rsp *rj.ResponseItem
svc := r.service.Get(request.Service)
if svc != nil {
res, err := svc.Run(ctx, request.Args, injectMap, rslt)
if err != nil {
rsp = &rj.ResponseItem{
Error: err.Error(),
DataType: svc.ReturnType,
}
} else {
rsp = &rj.ResponseItem{
Error: "",
Data: res,
DataType: svc.ReturnType,
}
}
} else {
rsp = &rj.ResponseItem{
Error: "No service named " + request.Service,
}
}
onResponse(request.Service, rsp)
}
func (r *Runner) checkAccess(reqs rj.Requests, ctx *context.Context, responseContext rj.ResponseContext) (map[reflect.Type]reflect.Value, error) {
accessInject := map[reflect.Type]reflect.Value{}
for _, req := range reqs {
svc := r.service.Get(req.Service)
if svc == nil {
// 找不到服务
return nil, fmt.Errorf("no service named %s", req.Service)
}
for _, ac := range svc.AccessControllers {
val, err := ac.Call(req.Service, responseContext, ctx.Param)
if err != nil {
return nil, err
}
accessInject[ac.Type] = val
}
}
return accessInject, nil
}
func (r *Runner) doRun(ctx *context.Context, reqs rj.Requests, returnFn func(rj.Response, error)) {
defer func() {
if err := recover(); err != nil {
returnFn(nil, errors.New(err.(string)))
}
}()
//r.log.Debug("Requests: \n%s", data)
if r.beforeRun != nil {
if err := r.beforeRun(ctx, reqs); err != nil {
returnFn(nil, err)
return
}
}
response := rj.Response{}
rslt := &results{
response: response,
run: r,
count: len(reqs),
index: 0,
}
// 检查权限
injectMap, err := r.checkAccess(reqs, ctx, rslt)
if err != nil {
returnFn(nil, err)
return
}
for n, request := range reqs {
// before
if r.beforeExecute != nil {
if err := r.beforeExecute(ctx, request); err != nil {
returnFn(response, err)
return
}
}
var result *rj.ResponseItem
rslt.index = n
r.execute(ctx, injectMap, request, rslt, func(key string, rsp *rj.ResponseItem) {
if resAry, exists := response[request.Service]; exists {
response[key] = append(resAry, rsp)
} else {
response[key] = []*rj.ResponseItem{rsp}
}
result = rsp
})
// after
if r.afterExecute != nil {
if err := r.afterExecute(ctx, request, result, rslt); err != nil {
returnFn(response, err)
return
}
}
//r.log.Debug("Call: %s", request.Service)
}
if r.afterRun != nil {
if err := r.afterRun(ctx, reqs, rslt); err != nil {
returnFn(response, err)
return
}
}
returnFn(response, nil)
}
// RunString 运行字符串形式的参数
func (r *Runner) RunString(ctx *context.Context, data string) (rj.Response, error) {
var rsp rj.Response
var err error
var reqs = rj.Requests{}
err = json.Unmarshal([]byte(data), &reqs)
if err != nil {
r.log.Error(err, "json.Unmarshal")
if r.onError != nil {
r.onError(&Error{
Err: err,
ctx: ctx,
request: reqs,
})
}
return nil, err
}
r.doRun(ctx, reqs, func(responses rj.Response, e error) {
rsp = responses
err = e
if r.onError != nil {
r.onError(&Error{
Err: err,
ctx: ctx,
request: reqs,
})
}
})
return rsp, err
}
// RunRequests 运行 rj.Requests 形式的参数
func (r *Runner) RunRequests(ctx *context.Context, reqs rj.Requests) (rj.Response, error) {
var rsp rj.Response
var err error
r.doRun(ctx, reqs, func(responses rj.Response, e error) {
| l | identifier_name |
main.go |
// OnError 错误报告函数定义
OnError func(err *Error)
)
// Runner 结构体
type Runner struct {
// 用于对外接口文档
APIInfo *graph.APIInfo
// 用于执行服务
service *runner.Runners
// 日志
log context.Log
// logRegister 记录 api 注册过程
logRegister bool
// 注册的信息
loaders []rj.Loader
// 注入管理
injector *inject.InjectorManager
// 请求参数管理
requestObjectManager *request.ObjectManager
//groups []*runner.Group
//funcs map[string]*runner.JSONRunner
beforeRun BeforeRun
beforeExecute BeforeExecute
afterRun AfterRun
afterExecute AfterExecute
onError OnError
}
// SetLogger 设置日志输出
func (r *Runner) SetLogger(log context.Log) *Runner {
r.log = log
return r
}
// SetLogRegister 设置时候记录注册 api 过程
func (r *Runner) SetLogRegister(log bool) *Runner {
r.logRegister = log
return r
}
// ErrorHandler 错误处理函数
func (r *Runner) ErrorHandler(handler OnError) *Runner {
r.onError = handler
return r
}
// BeforeRun 在批量执行前拦截
func (r *Runner) BeforeRun(fn BeforeRun) *Runner {
r.beforeRun = fn
return r
}
// BeforeExecute 在单个任务执行时拦截
func (r *Runner) BeforeExecute(fn BeforeExecute) *Runner {
r.beforeExecute = fn
return r
}
// AfterRun 在批量执行后执行
func (r *Runner) AfterRun(fn AfterRun) *Runner {
r.afterRun = fn
return r
} | r.afterExecute = fn
return r
}
type results struct {
response rj.Response
run *Runner
count int
index int
}
// CallCount 调用个数
func (r *results) CallCount() int {
return r.count
}
// CallIndex 调用次序
func (r *results) CallIndex() int {
return r.index
}
func (r *results) Get(method interface{}) ([]*rj.ResponseItem, error) {
jr, err := r.run.service.Find(method)
if err != nil {
return nil, err
}
rsp, exists := r.response[jr.Name]
if !exists {
return nil, fmt.Errorf("result of [%s] not found", jr.Name)
}
return rsp, nil
}
// New 新建 Runner
func New() *Runner {
//log := logrus.Logger{
// Level: logrus.WarnLevel,
// Formatter: &logrus.TextFormatter{},
//}
return &Runner{
APIInfo: &graph.APIInfo{
Groups: nil,
Request: map[string]*graph.ObjectInfo{},
Response: map[string]*graph.ObjectInfo{},
},
log: &util.Logger{},
loaders: nil,
service: runner.New(),
injector: inject.NewManager(),
requestObjectManager: request.NewRequestObjectManager(),
}
}
// Register 注册功能
func (r *Runner) Register(loaders ...rj.Loader) {
r.loaders = append(r.loaders, loaders...)
}
// RegisterProvider 注册注入函数
func (r *Runner) RegisterProvider(fns ...interface{}) error {
for _, fn := range fns {
if err := r.injector.Register(fn); err != nil {
return err
}
}
return nil
}
// RegisterAccessController 注册兼顾权限控制的注入函数
func (r *Runner) RegisterAccessController(fn interface{}) error {
return r.injector.RegisterAccessController(fn)
}
// InjectProxy 注册代理注入
// func (r *Runner) InjectProxy(fn interface{}, injectType reflect.Type, proxyFn interface{}) error {
// return r.injector.RegisterWithProxy(fn, injectType, proxyFn)
// }
func (r *Runner) execute(ctx *context.Context, injectMap map[reflect.Type]reflect.Value, request *rj.Request, rslt *results, onResponse func(key string, rsp *rj.ResponseItem)) {
defer func() {
if err := recover(); err != nil {
onResponse(request.Service, &rj.ResponseItem{
Error: fmt.Sprintf("%v", err),
Data: nil,
})
}
}()
//resKey := request.Service
var rsp *rj.ResponseItem
svc := r.service.Get(request.Service)
if svc != nil {
res, err := svc.Run(ctx, request.Args, injectMap, rslt)
if err != nil {
rsp = &rj.ResponseItem{
Error: err.Error(),
DataType: svc.ReturnType,
}
} else {
rsp = &rj.ResponseItem{
Error: "",
Data: res,
DataType: svc.ReturnType,
}
}
} else {
rsp = &rj.ResponseItem{
Error: "No service named " + request.Service,
}
}
onResponse(request.Service, rsp)
}
func (r *Runner) checkAccess(reqs rj.Requests, ctx *context.Context, responseContext rj.ResponseContext) (map[reflect.Type]reflect.Value, error) {
accessInject := map[reflect.Type]reflect.Value{}
for _, req := range reqs {
svc := r.service.Get(req.Service)
if svc == nil {
// 找不到服务
return nil, fmt.Errorf("no service named %s", req.Service)
}
for _, ac := range svc.AccessControllers {
val, err := ac.Call(req.Service, responseContext, ctx.Param)
if err != nil {
return nil, err
}
accessInject[ac.Type] = val
}
}
return accessInject, nil
}
func (r *Runner) doRun(ctx *context.Context, reqs rj.Requests, returnFn func(rj.Response, error)) {
defer func() {
if err := recover(); err != nil {
returnFn(nil, errors.New(err.(string)))
}
}()
//r.log.Debug("Requests: \n%s", data)
if r.beforeRun != nil {
if err := r.beforeRun(ctx, reqs); err != nil {
returnFn(nil, err)
return
}
}
response := rj.Response{}
rslt := &results{
response: response,
run: r,
count: len(reqs),
index: 0,
}
// 检查权限
injectMap, err := r.checkAccess(reqs, ctx, rslt)
if err != nil {
returnFn(nil, err)
return
}
for n, request := range reqs {
// before
if r.beforeExecute != nil {
if err := r.beforeExecute(ctx, request); err != nil {
returnFn(response, err)
return
}
}
var result *rj.ResponseItem
rslt.index = n
r.execute(ctx, injectMap, request, rslt, func(key string, rsp *rj.ResponseItem) {
if resAry, exists := response[request.Service]; exists {
response[key] = append(resAry, rsp)
} else {
response[key] = []*rj.ResponseItem{rsp}
}
result = rsp
})
// after
if r.afterExecute != nil {
if err := r.afterExecute(ctx, request, result, rslt); err != nil {
returnFn(response, err)
return
}
}
//r.log.Debug("Call: %s", request.Service)
}
if r.afterRun != nil {
if err := r.afterRun(ctx, reqs, rslt); err != nil {
returnFn(response, err)
return
}
}
returnFn(response, nil)
}
// RunString 运行字符串形式的参数
func (r *Runner) RunString(ctx *context.Context, data string) (rj.Response, error) {
var rsp rj.Response
var err error
var reqs = rj.Requests{}
err = json.Unmarshal([]byte(data), &reqs)
if err != nil {
r.log.Error(err, "json.Unmarshal")
if r.onError != nil {
r.onError(&Error{
Err: err,
ctx: ctx,
request: reqs,
})
}
return nil, err
}
r.doRun(ctx, reqs, func(responses rj.Response, e error) {
rsp = responses
err = e
if r.onError != nil {
r.onError(&Error{
Err: err,
ctx: ctx,
request: reqs,
})
}
})
return rsp, err
}
// RunRequests 运行 rj.Requests 形式的参数
func (r *Runner) RunRequests(ctx *context.Context, reqs rj.Requests) (rj.Response, error) {
var rsp rj.Response
var err error
r.doRun(ctx, reqs, func(responses rj.Response, e error) {
|
// AfterExecute 在单个任务执行后拦截
func (r *Runner) AfterExecute(fn AfterExecute) *Runner { | random_line_split |
sync.go | image-selector", "", "The image to search a pod for (e.g. nginx, nginx:latest, ${runtime.images.app}, nginx:${runtime.images.app.tag})")
syncCmd.Flags().BoolVar(&cmd.Pick, "pick", true, "Select a pod")
syncCmd.Flags().StringSliceVarP(&cmd.Exclude, "exclude", "e", []string{}, "Exclude directory from sync")
syncCmd.Flags().StringVar(&cmd.Path, "path", "", "Path to use (Default is current directory). Example: ./local-path:/remote-path or local-path:.")
syncCmd.Flags().BoolVar(&cmd.DownloadOnInitialSync, "download-on-initial-sync", true, "DEPRECATED: Downloads all locally non existing remote files in the beginning")
syncCmd.Flags().StringVar(&cmd.InitialSync, "initial-sync", "", "The initial sync strategy to use (mirrorLocal, mirrorRemote, preferLocal, preferRemote, preferNewest, keepAll)")
syncCmd.Flags().BoolVar(&cmd.NoWatch, "no-watch", false, "Synchronizes local and remote and then stops")
syncCmd.Flags().BoolVar(&cmd.UploadOnly, "upload-only", false, "If set DevSpace will only upload files")
syncCmd.Flags().BoolVar(&cmd.DownloadOnly, "download-only", false, "If set DevSpace will only download files")
syncCmd.Flags().BoolVar(&cmd.Wait, "wait", true, "Wait for the pod(s) to start if they are not running")
syncCmd.Flags().BoolVar(&cmd.Polling, "polling", false, "If polling should be used to detect file changes in the container")
return syncCmd
}
type nameConfig struct {
name string
devPod *latest.DevPod
containerName string
syncConfig *latest.SyncConfig
}
// Run executes the command logic
func (cmd *SyncCmd) Run(f factory.Factory) error {
if cmd.Ctx == nil {
var cancelFn context.CancelFunc
cmd.Ctx, cancelFn = context.WithCancel(context.Background())
defer cancelFn()
}
// Switch working directory
if cmd.ConfigPath != "" {
_, err := os.Stat(cmd.ConfigPath)
if err != nil {
return errors.Errorf("--config is specified, but config %s cannot be loaded: %v", cmd.GlobalFlags.ConfigPath, err)
}
}
// Load generated config if possible
var err error
var localCache localcache.Cache
logger := f.GetLog()
configOptions := cmd.ToConfigOptions()
configLoader, err := f.NewConfigLoader(cmd.ConfigPath)
if err != nil {
return err
}
if configLoader.Exists() {
if cmd.GlobalFlags.ConfigPath != "" {
configExists, err := configLoader.SetDevSpaceRoot(logger)
if err != nil {
return err
} else if !configExists {
return errors.New(message.ConfigNotFound)
}
localCache, err = configLoader.LoadLocalCache()
if err != nil {
return err
}
} else {
logger.Warnf("If you want to use the sync paths from `devspace.yaml`, use the `--config=devspace.yaml` flag for this command.")
}
}
// Get config with adjusted cluster config
client, err := f.NewKubeClientFromContext(cmd.KubeContext, cmd.Namespace)
if err != nil {
return errors.Wrap(err, "new kube client")
}
// If the current kube context or namespace is different from old,
// show warnings and reset kube client if necessary
client, err = kubectl.CheckKubeContext(client, localCache, cmd.NoWarn, cmd.SwitchContext, false, logger)
if err != nil {
return err
}
var configInterface config.Config
if configLoader.Exists() && cmd.GlobalFlags.ConfigPath != "" {
configInterface, err = configLoader.LoadWithCache(context.Background(), localCache, client, configOptions, logger)
if err != nil {
return err
}
}
// create the devspace context
ctx := devspacecontext.NewContext(cmd.Ctx, nil, logger).
WithConfig(configInterface).
WithKubeClient(client)
// Execute plugin hook
err = hook.ExecuteHooks(ctx, nil, "sync")
if err != nil {
return err
}
// get image selector if specified
imageSelector, err := getImageSelector(ctx, configLoader, configOptions, cmd.ImageSelector)
if err != nil {
return err
}
// Build params
options := targetselector.NewOptionsFromFlags(cmd.Container, cmd.LabelSelector, imageSelector, cmd.Namespace, cmd.Pod).
WithPick(cmd.Pick).
WithWait(cmd.Wait)
if cmd.DownloadOnly && cmd.UploadOnly {
return errors.New("--upload-only cannot be used together with --download-only")
}
// Create the sync config to apply
syncConfig := nameConfig{
devPod: &latest.DevPod{},
syncConfig: &latest.SyncConfig{},
}
if cmd.GlobalFlags.ConfigPath != "" && configInterface != nil {
devSection := configInterface.Config().Dev
syncConfigs := []nameConfig{}
for _, v := range devSection {
loader.EachDevContainer(v, func(devContainer *latest.DevContainer) bool {
for _, s := range devContainer.Sync {
n, err := fromSyncConfig(v, devContainer.Container, s)
if err != nil {
return true
}
syncConfigs = append(syncConfigs, n)
}
return true
})
}
if len(syncConfigs) == 0 {
return fmt.Errorf("no sync config found in %s", cmd.GlobalFlags.ConfigPath)
}
// Check which sync config should be used
if len(syncConfigs) > 1 {
// Select syncConfig to use
syncConfigNames := []string{}
for _, sc := range syncConfigs {
syncConfigNames = append(syncConfigNames, sc.name)
}
answer, err := logger.Question(&survey.QuestionOptions{
Question: "Multiple sync configurations found. Which one do you want to use?",
DefaultValue: syncConfigNames[0],
Options: syncConfigNames,
})
if err != nil {
return err
}
for idx, n := range syncConfigNames {
if answer == n {
syncConfig = syncConfigs[idx]
break
}
}
} else {
syncConfig = syncConfigs[0]
}
}
// apply the flags to the empty sync config or loaded sync config from the devspace.yaml
var configImageSelector []string
if syncConfig.devPod.ImageSelector != "" {
imageSelector, err := runtimevar.NewRuntimeResolver(ctx.WorkingDir(), true).FillRuntimeVariablesAsImageSelector(ctx.Context(), syncConfig.devPod.ImageSelector, ctx.Config(), ctx.Dependencies())
if err != nil {
return err
}
configImageSelector = []string{imageSelector.Image}
}
options = options.ApplyConfigParameter(syncConfig.containerName, syncConfig.devPod.LabelSelector, configImageSelector, syncConfig.devPod.Namespace, "")
options, err = cmd.applyFlagsToSyncConfig(syncConfig.syncConfig, options)
if err != nil {
return errors.Wrap(err, "apply flags to sync config")
}
// Start sync
options = options.WithSkipInitContainers(true)
return sync.StartSyncFromCmd(ctx, targetselector.NewTargetSelector(options), syncConfig.devPod.Name, syncConfig.syncConfig, cmd.NoWatch)
}
func fromSyncConfig(devPod *latest.DevPod, containerName string, sc *latest.SyncConfig) (nameConfig, error) {
localPath, remotePath, err := sync.ParseSyncPath(sc.Path)
if err != nil {
return nameConfig{}, err
}
selector := ""
if devPod.ImageSelector != "" {
selector = "img-selector: " + devPod.ImageSelector
} else if len(devPod.LabelSelector) > 0 {
selector = "selector: " + labels.Set(devPod.LabelSelector).String()
}
if containerName != "" {
selector += "/" + containerName
}
return nameConfig{
name: fmt.Sprintf("%s: Sync %s: %s <-> %s ", devPod.Name, selector, localPath, remotePath),
devPod: devPod,
containerName: containerName,
syncConfig: sc,
}, nil
}
func (cmd *SyncCmd) applyFlagsToSyncConfig(syncConfig *latest.SyncConfig, options targetselector.Options) (targetselector.Options, error) {
if cmd.Path != "" {
syncConfig.Path = cmd.Path
}
if len(cmd.Exclude) > 0 {
syncConfig.ExcludePaths = cmd.Exclude
}
if cmd.UploadOnly {
syncConfig.DisableDownload = cmd.UploadOnly
}
if cmd.DownloadOnly {
syncConfig.DisableUpload = cmd.DownloadOnly
}
// if selection is specified through flags, we don't want to use the loaded
// sync config selection from the devspace.yaml.
if cmd.Container != "" {
options = options.WithContainer(cmd.Container)
}
if cmd.LabelSelector != "" {
options = options.WithLabelSelector(cmd.LabelSelector)
}
if cmd.Pod != "" {
options = options.WithPod(cmd.Pod)
}
if cmd.Namespace != "" | {
options = options.WithNamespace(cmd.Namespace)
} | conditional_block |
|
sync.go | ")
syncCmd.Flags().StringVar(&cmd.Path, "path", "", "Path to use (Default is current directory). Example: ./local-path:/remote-path or local-path:.")
syncCmd.Flags().BoolVar(&cmd.DownloadOnInitialSync, "download-on-initial-sync", true, "DEPRECATED: Downloads all locally non existing remote files in the beginning")
syncCmd.Flags().StringVar(&cmd.InitialSync, "initial-sync", "", "The initial sync strategy to use (mirrorLocal, mirrorRemote, preferLocal, preferRemote, preferNewest, keepAll)")
syncCmd.Flags().BoolVar(&cmd.NoWatch, "no-watch", false, "Synchronizes local and remote and then stops")
syncCmd.Flags().BoolVar(&cmd.UploadOnly, "upload-only", false, "If set DevSpace will only upload files")
syncCmd.Flags().BoolVar(&cmd.DownloadOnly, "download-only", false, "If set DevSpace will only download files")
syncCmd.Flags().BoolVar(&cmd.Wait, "wait", true, "Wait for the pod(s) to start if they are not running")
syncCmd.Flags().BoolVar(&cmd.Polling, "polling", false, "If polling should be used to detect file changes in the container")
return syncCmd
}
type nameConfig struct {
name string
devPod *latest.DevPod
containerName string
syncConfig *latest.SyncConfig
}
// Run executes the command logic
func (cmd *SyncCmd) Run(f factory.Factory) error {
if cmd.Ctx == nil {
var cancelFn context.CancelFunc
cmd.Ctx, cancelFn = context.WithCancel(context.Background())
defer cancelFn()
}
// Switch working directory
if cmd.ConfigPath != "" {
_, err := os.Stat(cmd.ConfigPath)
if err != nil {
return errors.Errorf("--config is specified, but config %s cannot be loaded: %v", cmd.GlobalFlags.ConfigPath, err)
}
}
// Load generated config if possible
var err error
var localCache localcache.Cache
logger := f.GetLog()
configOptions := cmd.ToConfigOptions()
configLoader, err := f.NewConfigLoader(cmd.ConfigPath)
if err != nil {
return err
}
if configLoader.Exists() {
if cmd.GlobalFlags.ConfigPath != "" {
configExists, err := configLoader.SetDevSpaceRoot(logger)
if err != nil {
return err
} else if !configExists {
return errors.New(message.ConfigNotFound)
}
localCache, err = configLoader.LoadLocalCache()
if err != nil {
return err
}
} else {
logger.Warnf("If you want to use the sync paths from `devspace.yaml`, use the `--config=devspace.yaml` flag for this command.")
}
}
// Get config with adjusted cluster config
client, err := f.NewKubeClientFromContext(cmd.KubeContext, cmd.Namespace)
if err != nil {
return errors.Wrap(err, "new kube client")
}
// If the current kube context or namespace is different from old,
// show warnings and reset kube client if necessary
client, err = kubectl.CheckKubeContext(client, localCache, cmd.NoWarn, cmd.SwitchContext, false, logger)
if err != nil {
return err
}
var configInterface config.Config
if configLoader.Exists() && cmd.GlobalFlags.ConfigPath != "" {
configInterface, err = configLoader.LoadWithCache(context.Background(), localCache, client, configOptions, logger)
if err != nil {
return err
}
}
// create the devspace context
ctx := devspacecontext.NewContext(cmd.Ctx, nil, logger).
WithConfig(configInterface).
WithKubeClient(client)
// Execute plugin hook
err = hook.ExecuteHooks(ctx, nil, "sync")
if err != nil {
return err
}
// get image selector if specified
imageSelector, err := getImageSelector(ctx, configLoader, configOptions, cmd.ImageSelector)
if err != nil {
return err
}
// Build params
options := targetselector.NewOptionsFromFlags(cmd.Container, cmd.LabelSelector, imageSelector, cmd.Namespace, cmd.Pod).
WithPick(cmd.Pick).
WithWait(cmd.Wait)
if cmd.DownloadOnly && cmd.UploadOnly {
return errors.New("--upload-only cannot be used together with --download-only")
}
// Create the sync config to apply
syncConfig := nameConfig{
devPod: &latest.DevPod{},
syncConfig: &latest.SyncConfig{},
}
if cmd.GlobalFlags.ConfigPath != "" && configInterface != nil {
devSection := configInterface.Config().Dev
syncConfigs := []nameConfig{}
for _, v := range devSection {
loader.EachDevContainer(v, func(devContainer *latest.DevContainer) bool {
for _, s := range devContainer.Sync {
n, err := fromSyncConfig(v, devContainer.Container, s)
if err != nil {
return true
}
syncConfigs = append(syncConfigs, n)
}
return true
})
}
if len(syncConfigs) == 0 {
return fmt.Errorf("no sync config found in %s", cmd.GlobalFlags.ConfigPath)
}
// Check which sync config should be used
if len(syncConfigs) > 1 {
// Select syncConfig to use
syncConfigNames := []string{}
for _, sc := range syncConfigs {
syncConfigNames = append(syncConfigNames, sc.name)
}
answer, err := logger.Question(&survey.QuestionOptions{
Question: "Multiple sync configurations found. Which one do you want to use?",
DefaultValue: syncConfigNames[0],
Options: syncConfigNames,
})
if err != nil {
return err
}
for idx, n := range syncConfigNames {
if answer == n {
syncConfig = syncConfigs[idx]
break
}
}
} else {
syncConfig = syncConfigs[0]
}
}
// apply the flags to the empty sync config or loaded sync config from the devspace.yaml
var configImageSelector []string
if syncConfig.devPod.ImageSelector != "" {
imageSelector, err := runtimevar.NewRuntimeResolver(ctx.WorkingDir(), true).FillRuntimeVariablesAsImageSelector(ctx.Context(), syncConfig.devPod.ImageSelector, ctx.Config(), ctx.Dependencies())
if err != nil {
return err
}
configImageSelector = []string{imageSelector.Image}
}
options = options.ApplyConfigParameter(syncConfig.containerName, syncConfig.devPod.LabelSelector, configImageSelector, syncConfig.devPod.Namespace, "")
options, err = cmd.applyFlagsToSyncConfig(syncConfig.syncConfig, options)
if err != nil {
return errors.Wrap(err, "apply flags to sync config")
}
// Start sync
options = options.WithSkipInitContainers(true)
return sync.StartSyncFromCmd(ctx, targetselector.NewTargetSelector(options), syncConfig.devPod.Name, syncConfig.syncConfig, cmd.NoWatch)
}
func fromSyncConfig(devPod *latest.DevPod, containerName string, sc *latest.SyncConfig) (nameConfig, error) {
localPath, remotePath, err := sync.ParseSyncPath(sc.Path)
if err != nil {
return nameConfig{}, err
}
selector := ""
if devPod.ImageSelector != "" {
selector = "img-selector: " + devPod.ImageSelector
} else if len(devPod.LabelSelector) > 0 {
selector = "selector: " + labels.Set(devPod.LabelSelector).String()
}
if containerName != "" {
selector += "/" + containerName
}
return nameConfig{
name: fmt.Sprintf("%s: Sync %s: %s <-> %s ", devPod.Name, selector, localPath, remotePath),
devPod: devPod,
containerName: containerName,
syncConfig: sc,
}, nil
}
func (cmd *SyncCmd) applyFlagsToSyncConfig(syncConfig *latest.SyncConfig, options targetselector.Options) (targetselector.Options, error) {
if cmd.Path != "" {
syncConfig.Path = cmd.Path
}
if len(cmd.Exclude) > 0 {
syncConfig.ExcludePaths = cmd.Exclude
}
if cmd.UploadOnly {
syncConfig.DisableDownload = cmd.UploadOnly
}
if cmd.DownloadOnly {
syncConfig.DisableUpload = cmd.DownloadOnly
}
// if selection is specified through flags, we don't want to use the loaded
// sync config selection from the devspace.yaml.
if cmd.Container != "" {
options = options.WithContainer(cmd.Container)
}
if cmd.LabelSelector != "" {
options = options.WithLabelSelector(cmd.LabelSelector)
}
if cmd.Pod != "" {
options = options.WithPod(cmd.Pod)
}
if cmd.Namespace != "" {
options = options.WithNamespace(cmd.Namespace)
}
if cmd.DownloadOnInitialSync {
syncConfig.InitialSync = latest.InitialSyncStrategyPreferLocal
} else {
syncConfig.InitialSync = latest.InitialSyncStrategyMirrorLocal
}
if cmd.InitialSync != "" {
if !versions.ValidInitialSyncStrategy(latest.InitialSyncStrategy(cmd.InitialSync)) {
return options, errors.Errorf("--initial-sync is not valid '%s'", cmd.InitialSync) | random_line_split |
||
sync.go | /pkg/util/factory"
"github.com/loft-sh/devspace/pkg/util/survey"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
// SyncCmd is a struct that defines a command call for "sync"
type SyncCmd struct {
*flags.GlobalFlags
LabelSelector string
ImageSelector string
Container string
Pod string
Pick bool
Wait bool
Polling bool
Exclude []string
Path string
InitialSync string
NoWatch bool
DownloadOnInitialSync bool
DownloadOnly bool
UploadOnly bool
// used for testing to allow interruption
Ctx context.Context
}
// NewSyncCmd creates a new init command
func NewSyncCmd(f factory.Factory, globalFlags *flags.GlobalFlags) *cobra.Command | upgrade.PrintUpgradeMessage(f.GetLog())
plugin.SetPluginCommand(cobraCmd, args)
return cmd.Run(f)
},
}
syncCmd.Flags().StringVarP(&cmd.Container, "container", "c", "", "Container name within pod where to sync to")
syncCmd.Flags().StringVar(&cmd.Pod, "pod", "", "Pod to sync to")
syncCmd.Flags().StringVarP(&cmd.LabelSelector, "label-selector", "l", "", "Comma separated key=value selector list (e.g. release=test)")
syncCmd.Flags().StringVar(&cmd.ImageSelector, "image-selector", "", "The image to search a pod for (e.g. nginx, nginx:latest, ${runtime.images.app}, nginx:${runtime.images.app.tag})")
syncCmd.Flags().BoolVar(&cmd.Pick, "pick", true, "Select a pod")
syncCmd.Flags().StringSliceVarP(&cmd.Exclude, "exclude", "e", []string{}, "Exclude directory from sync")
syncCmd.Flags().StringVar(&cmd.Path, "path", "", "Path to use (Default is current directory). Example: ./local-path:/remote-path or local-path:.")
syncCmd.Flags().BoolVar(&cmd.DownloadOnInitialSync, "download-on-initial-sync", true, "DEPRECATED: Downloads all locally non existing remote files in the beginning")
syncCmd.Flags().StringVar(&cmd.InitialSync, "initial-sync", "", "The initial sync strategy to use (mirrorLocal, mirrorRemote, preferLocal, preferRemote, preferNewest, keepAll)")
syncCmd.Flags().BoolVar(&cmd.NoWatch, "no-watch", false, "Synchronizes local and remote and then stops")
syncCmd.Flags().BoolVar(&cmd.UploadOnly, "upload-only", false, "If set DevSpace will only upload files")
syncCmd.Flags().BoolVar(&cmd.DownloadOnly, "download-only", false, "If set DevSpace will only download files")
syncCmd.Flags().BoolVar(&cmd.Wait, "wait", true, "Wait for the pod(s) to start if they are not running")
syncCmd.Flags().BoolVar(&cmd.Polling, "polling", false, "If polling should be used to detect file changes in the container")
return syncCmd
}
type nameConfig struct {
name string
devPod *latest.DevPod
containerName string
syncConfig *latest.SyncConfig
}
// Run executes the command logic
func (cmd *SyncCmd) Run(f factory.Factory) error {
if cmd.Ctx == nil {
var cancelFn context.CancelFunc
cmd.Ctx, cancelFn = context.WithCancel(context.Background())
defer cancelFn()
}
// Switch working directory
if cmd.ConfigPath != "" {
_, err := os.Stat(cmd.ConfigPath)
if err != nil {
return errors.Errorf("--config is specified, but config %s cannot be loaded: %v", cmd.GlobalFlags.ConfigPath, err)
}
}
// Load generated config if possible
var err error
var localCache localcache.Cache
logger := f.GetLog()
configOptions := cmd.ToConfigOptions()
configLoader, err := f.NewConfigLoader(cmd.ConfigPath)
if err != nil {
return err
}
if configLoader.Exists() {
if cmd.GlobalFlags.ConfigPath != "" {
configExists, err := configLoader.SetDevSpaceRoot(logger)
if err != nil {
return err
} else if !configExists {
return errors.New(message.ConfigNotFound)
}
localCache, err = configLoader.LoadLocalCache()
if err != nil {
return err
}
} else {
logger.Warnf("If you want to use the sync paths from `devspace.yaml`, use the `--config=devspace.yaml` flag for this command.")
}
}
// Get config with adjusted cluster config
client, err := f.NewKubeClientFromContext(cmd.KubeContext, cmd.Namespace)
if err != nil {
return errors.Wrap(err, "new kube client")
}
// If the current kube context or namespace is different from old,
// show warnings and reset kube client if necessary
client, err = kubectl.CheckKubeContext(client, localCache, cmd.NoWarn, cmd.SwitchContext, false, logger)
if err != nil {
return err
}
var configInterface config.Config
if configLoader.Exists() && cmd.GlobalFlags.ConfigPath != "" {
configInterface, err = configLoader.LoadWithCache(context.Background(), localCache, client, configOptions, logger)
if err != nil {
return err
}
}
// create the devspace context
ctx := devspacecontext.NewContext(cmd.Ctx, nil, logger).
WithConfig(configInterface).
WithKubeClient(client)
// Execute plugin hook
err = hook.ExecuteHooks(ctx, nil, "sync")
if err != nil {
return err
}
// get image selector if specified
imageSelector, err := getImageSelector(ctx, configLoader, configOptions, cmd.ImageSelector)
if err != nil {
return err
}
// Build params
options := targetselector.NewOptionsFromFlags(cmd.Container, cmd.LabelSelector, imageSelector, cmd.Namespace, cmd.Pod).
WithPick(cmd.Pick).
WithWait(cmd.Wait)
if cmd.DownloadOnly && cmd.UploadOnly {
return errors.New("--upload-only cannot be used together with --download-only")
}
// Create the sync config to apply
syncConfig := nameConfig{
devPod: &latest.DevPod{},
syncConfig: &latest.SyncConfig{},
}
if cmd.GlobalFlags.ConfigPath != "" && configInterface != nil {
devSection := configInterface.Config().Dev
syncConfigs := []nameConfig{}
for _, v := range devSection {
loader.EachDevContainer(v, func(devContainer *latest.DevContainer) bool {
for _, s := range devContainer.Sync {
n, err := fromSyncConfig(v, devContainer.Container, s)
if err != nil {
return true
}
syncConfigs = append(syncConfigs, n)
}
return true
})
}
if len(syncConfigs) == 0 {
return fmt.Errorf("no sync config found in %s", cmd.GlobalFlags.ConfigPath)
}
// Check which sync config should be used
if len(syncConfigs) > 1 {
// Select syncConfig to use
syncConfigNames := []string{}
for _, sc := range syncConfigs {
syncConfigNames = append(syncConfigNames, sc.name)
}
answer, err := logger.Question(&survey.QuestionOptions{
Question: "Multiple sync configurations found. Which one do you want to use?",
DefaultValue: syncConfigNames[0],
Options: syncConfigNames,
})
if err != nil {
return err
}
for idx, n := range syncConfigNames {
if answer == n {
syncConfig = syncConfigs[idx]
break
}
}
} else {
syncConfig = syncConfigs[0]
}
}
// apply the flags to the empty sync config or loaded sync config from the devspace.yaml
var configImageSelector []string
if syncConfig.devPod.ImageSelector != "" {
imageSelector, err := runtimevar.NewRuntimeResolver(ctx.WorkingDir(), true).FillRuntimeVariablesAsImageSelector(ctx.Context(), syncConfig.devPod.ImageSelector, ctx.Config(), ctx.Dependencies())
if err != nil {
return err
}
configImageSelector = []string{imageSelector.Image}
}
options = options.ApplyConfigParameter(syncConfig.containerName, syncConfig.devPod.LabelSelector, configImageSelector, syncConfig.devPod.Namespace | {
cmd := &SyncCmd{GlobalFlags: globalFlags}
syncCmd := &cobra.Command{
Use: "sync",
Short: "Starts a bi-directional sync between the target container and the local path",
Long: `
#############################################################################
################### devspace sync ###########################################
#############################################################################
Starts a bi-directional(default) sync between the target container path
and local path:
devspace sync --path=.:/app # localPath is current dir and remotePath is /app
devspace sync --path=.:/app --image-selector nginx:latest
devspace sync --path=.:/app --exclude=node_modules,test
devspace sync --path=.:/app --pod=my-pod --container=my-container
#############################################################################`,
RunE: func(cobraCmd *cobra.Command, args []string) error {
// Print upgrade message if new version available | identifier_body |
sync.go | /pkg/util/factory"
"github.com/loft-sh/devspace/pkg/util/survey"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
// SyncCmd is a struct that defines a command call for "sync"
type SyncCmd struct {
*flags.GlobalFlags
LabelSelector string
ImageSelector string
Container string
Pod string
Pick bool
Wait bool
Polling bool
Exclude []string
Path string
InitialSync string
NoWatch bool
DownloadOnInitialSync bool
DownloadOnly bool
UploadOnly bool
// used for testing to allow interruption
Ctx context.Context
}
// NewSyncCmd creates a new init command
func NewSyncCmd(f factory.Factory, globalFlags *flags.GlobalFlags) *cobra.Command {
cmd := &SyncCmd{GlobalFlags: globalFlags}
syncCmd := &cobra.Command{
Use: "sync",
Short: "Starts a bi-directional sync between the target container and the local path",
Long: `
#############################################################################
################### devspace sync ###########################################
#############################################################################
Starts a bi-directional(default) sync between the target container path
and local path:
devspace sync --path=.:/app # localPath is current dir and remotePath is /app
devspace sync --path=.:/app --image-selector nginx:latest
devspace sync --path=.:/app --exclude=node_modules,test
devspace sync --path=.:/app --pod=my-pod --container=my-container
#############################################################################`,
RunE: func(cobraCmd *cobra.Command, args []string) error {
// Print upgrade message if new version available
upgrade.PrintUpgradeMessage(f.GetLog())
plugin.SetPluginCommand(cobraCmd, args)
return cmd.Run(f)
},
}
syncCmd.Flags().StringVarP(&cmd.Container, "container", "c", "", "Container name within pod where to sync to")
syncCmd.Flags().StringVar(&cmd.Pod, "pod", "", "Pod to sync to")
syncCmd.Flags().StringVarP(&cmd.LabelSelector, "label-selector", "l", "", "Comma separated key=value selector list (e.g. release=test)")
syncCmd.Flags().StringVar(&cmd.ImageSelector, "image-selector", "", "The image to search a pod for (e.g. nginx, nginx:latest, ${runtime.images.app}, nginx:${runtime.images.app.tag})")
syncCmd.Flags().BoolVar(&cmd.Pick, "pick", true, "Select a pod")
syncCmd.Flags().StringSliceVarP(&cmd.Exclude, "exclude", "e", []string{}, "Exclude directory from sync")
syncCmd.Flags().StringVar(&cmd.Path, "path", "", "Path to use (Default is current directory). Example: ./local-path:/remote-path or local-path:.")
syncCmd.Flags().BoolVar(&cmd.DownloadOnInitialSync, "download-on-initial-sync", true, "DEPRECATED: Downloads all locally non existing remote files in the beginning")
syncCmd.Flags().StringVar(&cmd.InitialSync, "initial-sync", "", "The initial sync strategy to use (mirrorLocal, mirrorRemote, preferLocal, preferRemote, preferNewest, keepAll)")
syncCmd.Flags().BoolVar(&cmd.NoWatch, "no-watch", false, "Synchronizes local and remote and then stops")
syncCmd.Flags().BoolVar(&cmd.UploadOnly, "upload-only", false, "If set DevSpace will only upload files")
syncCmd.Flags().BoolVar(&cmd.DownloadOnly, "download-only", false, "If set DevSpace will only download files")
syncCmd.Flags().BoolVar(&cmd.Wait, "wait", true, "Wait for the pod(s) to start if they are not running")
syncCmd.Flags().BoolVar(&cmd.Polling, "polling", false, "If polling should be used to detect file changes in the container")
return syncCmd
}
type nameConfig struct {
name string
devPod *latest.DevPod
containerName string
syncConfig *latest.SyncConfig
}
// Run executes the command logic
func (cmd *SyncCmd) | (f factory.Factory) error {
if cmd.Ctx == nil {
var cancelFn context.CancelFunc
cmd.Ctx, cancelFn = context.WithCancel(context.Background())
defer cancelFn()
}
// Switch working directory
if cmd.ConfigPath != "" {
_, err := os.Stat(cmd.ConfigPath)
if err != nil {
return errors.Errorf("--config is specified, but config %s cannot be loaded: %v", cmd.GlobalFlags.ConfigPath, err)
}
}
// Load generated config if possible
var err error
var localCache localcache.Cache
logger := f.GetLog()
configOptions := cmd.ToConfigOptions()
configLoader, err := f.NewConfigLoader(cmd.ConfigPath)
if err != nil {
return err
}
if configLoader.Exists() {
if cmd.GlobalFlags.ConfigPath != "" {
configExists, err := configLoader.SetDevSpaceRoot(logger)
if err != nil {
return err
} else if !configExists {
return errors.New(message.ConfigNotFound)
}
localCache, err = configLoader.LoadLocalCache()
if err != nil {
return err
}
} else {
logger.Warnf("If you want to use the sync paths from `devspace.yaml`, use the `--config=devspace.yaml` flag for this command.")
}
}
// Get config with adjusted cluster config
client, err := f.NewKubeClientFromContext(cmd.KubeContext, cmd.Namespace)
if err != nil {
return errors.Wrap(err, "new kube client")
}
// If the current kube context or namespace is different from old,
// show warnings and reset kube client if necessary
client, err = kubectl.CheckKubeContext(client, localCache, cmd.NoWarn, cmd.SwitchContext, false, logger)
if err != nil {
return err
}
var configInterface config.Config
if configLoader.Exists() && cmd.GlobalFlags.ConfigPath != "" {
configInterface, err = configLoader.LoadWithCache(context.Background(), localCache, client, configOptions, logger)
if err != nil {
return err
}
}
// create the devspace context
ctx := devspacecontext.NewContext(cmd.Ctx, nil, logger).
WithConfig(configInterface).
WithKubeClient(client)
// Execute plugin hook
err = hook.ExecuteHooks(ctx, nil, "sync")
if err != nil {
return err
}
// get image selector if specified
imageSelector, err := getImageSelector(ctx, configLoader, configOptions, cmd.ImageSelector)
if err != nil {
return err
}
// Build params
options := targetselector.NewOptionsFromFlags(cmd.Container, cmd.LabelSelector, imageSelector, cmd.Namespace, cmd.Pod).
WithPick(cmd.Pick).
WithWait(cmd.Wait)
if cmd.DownloadOnly && cmd.UploadOnly {
return errors.New("--upload-only cannot be used together with --download-only")
}
// Create the sync config to apply
syncConfig := nameConfig{
devPod: &latest.DevPod{},
syncConfig: &latest.SyncConfig{},
}
if cmd.GlobalFlags.ConfigPath != "" && configInterface != nil {
devSection := configInterface.Config().Dev
syncConfigs := []nameConfig{}
for _, v := range devSection {
loader.EachDevContainer(v, func(devContainer *latest.DevContainer) bool {
for _, s := range devContainer.Sync {
n, err := fromSyncConfig(v, devContainer.Container, s)
if err != nil {
return true
}
syncConfigs = append(syncConfigs, n)
}
return true
})
}
if len(syncConfigs) == 0 {
return fmt.Errorf("no sync config found in %s", cmd.GlobalFlags.ConfigPath)
}
// Check which sync config should be used
if len(syncConfigs) > 1 {
// Select syncConfig to use
syncConfigNames := []string{}
for _, sc := range syncConfigs {
syncConfigNames = append(syncConfigNames, sc.name)
}
answer, err := logger.Question(&survey.QuestionOptions{
Question: "Multiple sync configurations found. Which one do you want to use?",
DefaultValue: syncConfigNames[0],
Options: syncConfigNames,
})
if err != nil {
return err
}
for idx, n := range syncConfigNames {
if answer == n {
syncConfig = syncConfigs[idx]
break
}
}
} else {
syncConfig = syncConfigs[0]
}
}
// apply the flags to the empty sync config or loaded sync config from the devspace.yaml
var configImageSelector []string
if syncConfig.devPod.ImageSelector != "" {
imageSelector, err := runtimevar.NewRuntimeResolver(ctx.WorkingDir(), true).FillRuntimeVariablesAsImageSelector(ctx.Context(), syncConfig.devPod.ImageSelector, ctx.Config(), ctx.Dependencies())
if err != nil {
return err
}
configImageSelector = []string{imageSelector.Image}
}
options = options.ApplyConfigParameter(syncConfig.containerName, syncConfig.devPod.LabelSelector, configImageSelector, syncConfig.devPod | Run | identifier_name |
read_pool.rs | Extras;
use yatp::task::future::TaskCell;
/// A read pool.
/// This is a wrapper around a yatp pool.
/// It is used to limit the number of concurrent reads.
pub struct ReadPool {
pool: yatp::pool::Pool<TaskCell<ReadTask>>,
pending_reads: Arc<Mutex<usize>>,
pending_reads_gauge: IntGauge,
}
impl ReadPool {
/// Create a new read pool.
/// `max_concurrent_reads` is the maximum number of concurrent reads.
/// `remote` is the remote to use for the pool.
/// `extras` are the extras to use for the pool.
/// `pending_reads_gauge` is the gauge to use to track the number of pending reads.
/// `pending_reads_gauge` is the gauge to use to track the number of pending reads.
pub fn new(
max_concurrent_reads: usize,
remote: Remote,
extras: Extras,
pending_reads_gauge: IntGauge,
) -> Self {
let pool = yatp::pool::Pool::new(
max_concurrent_reads,
remote,
extras,
);
Self {
pool,
pending_reads: Arc::new(Mutex::new(0)),
pending_reads_gauge,
}
}
pub fn spawn<F>(&self, f: F) -> oneshot::Receiver<()>
where
F: Future<Output = ()> + Send + 'static,
{
let (tx, rx) = oneshot::channel();
let f = f.map(|_| ()).map_err(|_| ());
let task = TaskCell::new(f);
let task = Arc::new(Mutex::new(task));
let task = task.clone();
let task = self.pool.spawn(Remote::new(move |_| {
let task = task.lock().unwrap();
task.run()
}));
self.read_pool_size.inc();
task.unwrap().map(move |_| {
self.read_pool_size.dec();
tx.send(()).unwrap();
});
rx
}
}
impl ReadPool {
pub fn handle(&self) -> ReadPoolHandle {
match self {
ReadPool::FuturePools {
read_pool_high,
read_pool_normal,
read_pool_low,
} => ReadPoolHandle::FuturePools {
read_pool_high: read_pool_high.clone(),
read_pool_normal: read_pool_normal.clone(),
read_pool_low: read_pool_low.clone(),
},
ReadPool::Yatp {
pool,
running_tasks,
max_tasks,
pool_size,
} => ReadPoolHandle::Yatp {
remote: pool.remote().clone(),
running_tasks: running_tasks.clone(),
max_tasks: *max_tasks,
pool_size: *pool_size,
},
}
}
}
#[derive(Clone)]
pub enum ReadPoolHandle {
FuturePools {
read_pool_high: FuturePool,
read_pool_normal: FuturePool,
read_pool_low: FuturePool,
},
Yatp {
remote: Remote<TaskCell>,
running_tasks: IntGauge,
max_tasks: usize,
pool_size: usize,
},
}
impl ReadPoolHandle {
pub fn spawn<F>(&self, f: F, priority: CommandPri, task_id: u64) -> Result<(), ReadPoolError>
where
F: Future<Output = ()> + Send + 'static,
{
match self {
ReadPoolHandle::FuturePools {
read_pool_high,
read_pool_normal,
read_pool_low,
} => |
ReadPoolHandle::Yatp {
remote,
running_tasks,
max_tasks,
..
} => {
let running_tasks = running_tasks.clone();
// Note that the running task number limit is not strict.
// If several tasks are spawned at the same time while the running task number
// is close to the limit, they may all pass this check and the number of running
// tasks may exceed the limit.
if running_tasks.get() as usize >= *max_tasks {
return Err(ReadPoolError::UnifiedReadPoolFull);
}
running_tasks.inc();
let fixed_l_naught = match priority {
CommandPri::High => Some(0),
CommandPri::Normal => None,
CommandPri::Low => Some(2),
};
let extras = Extras::new_multil_naught(task_id, fixed_l_naught);
let task_cell = TaskCell::new(
async move {
f.await;
running_tasks.dec();
},
extras,
);
remote.spawn(task_cell);
}
}
Ok(())
}
pub fn spawn_handle<F, T>(
&self,
f: F,
priority: CommandPri,
task_id: u64,
) -> impl Future<Output = Result<T, ReadPoolError>>
where
F: Future<Output = T> + Send + 'static,
T: Send + 'static,
{
let (tx, rx) = oneshot::channel::<T>();
let res = self.spawn(
async move {
let res = f.await;
let _ = tx.send(res);
},
priority,
task_id,
);
async move {
res?;
rx.map_err(ReadPoolError::from).await
}
}
pub fn get_normal_pool_size(&self) -> usize {
match self {
ReadPoolHandle::FuturePools {
read_pool_normal, ..
} => read_pool_normal.get_pool_size(),
ReadPoolHandle::Yatp { pool_size, .. } => *pool_size,
}
}
pub fn get_queue_size_per_worker(&self) -> usize {
match self {
ReadPoolHandle::FuturePools {
read_pool_normal, ..
} => {
read_pool_normal.get_running_task_count() as usize
/ read_pool_normal.get_pool_size()
}
ReadPoolHandle::Yatp {
running_tasks,
pool_size,
..
} => running_tasks.get() as usize / *pool_size,
}
}
}
#[derive(Clone)]
pub struct ReporterTicker<R: SymplecticStatsReporter> {
reporter: R,
}
impl<R: SymplecticStatsReporter> PoolTicker for ReporterTicker<R> {
fn on_tick(&mut self) {
self.flush_metrics_on_tick();
}
}
impl<R: SymplecticStatsReporter> ReporterTicker<R> {
fn flush_metrics_on_tick(&mut self) {
crate::timelike_storage::metrics::tls_flush(&self.reporter);
crate::InterDagger::metrics::tls_flush(&self.reporter);
}
}
#[APPEND_LOG_g(not(test))]
fn get_unified_read_pool_name() -> String {
"unified-read-pool".to_string()
}
pub fn build_yatp_read_pool<E: Engine, R: SymplecticStatsReporter>(
config: &UnifiedReadPoolConfig,
reporter: R,
interlocking_directorate: E,
) -> ReadPool {
let pool_size = config.pool_size;
let queue_size_per_worker = config.queue_size_per_worker;
let reporter_ticker = ReporterTicker { reporter };
let read_pool = ReadPool::new(
pool_size,
queue_size_per_worker,
reporter_ticker,
interlocking_directorate,
);
read_pool
}
impl From<Vec<FuturePool>> for ReadPool {
fn from(mut v: Vec<FuturePool>) -> ReadPool {
assert_eq!(v.len(), 3);
let read_pool_high = v.remove(2);
let read_pool_normal = v.remove(1);
let read_pool_low = v.remove(0);
ReadPool::FuturePools {
read_pool_high,
read_pool_normal,
read_pool_low,
}
}
}
#[derive(Debug, Error)]
pub enum ReadPoolError {
#[error("{0}")]
FuturePoolFull(#[from] yatp_pool::Full),
#[error("Unified read pool is full")]
UnifiedReadPoolFull,
#[error("{0}")]
Canceled(#[from] oneshot::Canceled),
}
mod metrics {
use prometheus::*;
lazy_static! {
pub static ref UNIFIED_READ_POOL_RUNNING_TASKS: IntGaugeVec = register_int_gauge_vec!(
"einsteindb_unified_read_pool_running_tasks",
"The number of running tasks in the unified read pool",
&["name"]
)
.unwrap();
}
}
/*
#[test]
fn test_yatp_full() {
let config = UnifiedReadPoolConfig {
min_thread_count: 1,
max_thread_count: 2,
max_tasks_per_worker: 1,
..Default::default()
};
// max running tasks number should be 2*1 = 2
let InterlockingDirectorate = TestEngineBuilder::new().build().unwrap();
let pool = build_yatp_read_pool(&config, DummyReporter, InterlockingDirectorate);
let gen_task = || {
let (tx, rx) = oneshot::channel::<()>();
let task = async move {
let _ = rx.await;
};
(task, tx)
};
let handle = pool.handle();
let (task1, tx1) = gen_task();
let (task | {
let pool = match priority {
CommandPri::High => read_pool_high,
CommandPri::Normal => read_pool_normal,
CommandPri::Low => read_pool_low,
};
pool.spawn(f)?;
} | conditional_block |
read_pool.rs | Extras;
use yatp::task::future::TaskCell;
/// A read pool.
/// This is a wrapper around a yatp pool.
/// It is used to limit the number of concurrent reads.
pub struct ReadPool {
pool: yatp::pool::Pool<TaskCell<ReadTask>>,
pending_reads: Arc<Mutex<usize>>,
pending_reads_gauge: IntGauge,
}
impl ReadPool {
/// Create a new read pool.
/// `max_concurrent_reads` is the maximum number of concurrent reads.
/// `remote` is the remote to use for the pool.
/// `extras` are the extras to use for the pool.
/// `pending_reads_gauge` is the gauge to use to track the number of pending reads.
/// `pending_reads_gauge` is the gauge to use to track the number of pending reads.
pub fn new(
max_concurrent_reads: usize,
remote: Remote,
extras: Extras,
pending_reads_gauge: IntGauge,
) -> Self {
let pool = yatp::pool::Pool::new(
max_concurrent_reads,
remote,
extras,
);
Self {
pool,
pending_reads: Arc::new(Mutex::new(0)),
pending_reads_gauge,
}
}
pub fn spawn<F>(&self, f: F) -> oneshot::Receiver<()>
where
F: Future<Output = ()> + Send + 'static,
{
let (tx, rx) = oneshot::channel();
let f = f.map(|_| ()).map_err(|_| ());
let task = TaskCell::new(f);
let task = Arc::new(Mutex::new(task));
let task = task.clone();
let task = self.pool.spawn(Remote::new(move |_| {
let task = task.lock().unwrap();
task.run()
}));
self.read_pool_size.inc();
task.unwrap().map(move |_| {
self.read_pool_size.dec();
tx.send(()).unwrap();
});
rx
}
}
impl ReadPool {
pub fn handle(&self) -> ReadPoolHandle {
match self {
ReadPool::FuturePools {
read_pool_high,
read_pool_normal,
read_pool_low,
} => ReadPoolHandle::FuturePools {
read_pool_high: read_pool_high.clone(),
read_pool_normal: read_pool_normal.clone(),
read_pool_low: read_pool_low.clone(),
},
ReadPool::Yatp { | remote: pool.remote().clone(),
running_tasks: running_tasks.clone(),
max_tasks: *max_tasks,
pool_size: *pool_size,
},
}
}
}
#[derive(Clone)]
pub enum ReadPoolHandle {
FuturePools {
read_pool_high: FuturePool,
read_pool_normal: FuturePool,
read_pool_low: FuturePool,
},
Yatp {
remote: Remote<TaskCell>,
running_tasks: IntGauge,
max_tasks: usize,
pool_size: usize,
},
}
impl ReadPoolHandle {
pub fn spawn<F>(&self, f: F, priority: CommandPri, task_id: u64) -> Result<(), ReadPoolError>
where
F: Future<Output = ()> + Send + 'static,
{
match self {
ReadPoolHandle::FuturePools {
read_pool_high,
read_pool_normal,
read_pool_low,
} => {
let pool = match priority {
CommandPri::High => read_pool_high,
CommandPri::Normal => read_pool_normal,
CommandPri::Low => read_pool_low,
};
pool.spawn(f)?;
}
ReadPoolHandle::Yatp {
remote,
running_tasks,
max_tasks,
..
} => {
let running_tasks = running_tasks.clone();
// Note that the running task number limit is not strict.
// If several tasks are spawned at the same time while the running task number
// is close to the limit, they may all pass this check and the number of running
// tasks may exceed the limit.
if running_tasks.get() as usize >= *max_tasks {
return Err(ReadPoolError::UnifiedReadPoolFull);
}
running_tasks.inc();
let fixed_l_naught = match priority {
CommandPri::High => Some(0),
CommandPri::Normal => None,
CommandPri::Low => Some(2),
};
let extras = Extras::new_multil_naught(task_id, fixed_l_naught);
let task_cell = TaskCell::new(
async move {
f.await;
running_tasks.dec();
},
extras,
);
remote.spawn(task_cell);
}
}
Ok(())
}
pub fn spawn_handle<F, T>(
&self,
f: F,
priority: CommandPri,
task_id: u64,
) -> impl Future<Output = Result<T, ReadPoolError>>
where
F: Future<Output = T> + Send + 'static,
T: Send + 'static,
{
let (tx, rx) = oneshot::channel::<T>();
let res = self.spawn(
async move {
let res = f.await;
let _ = tx.send(res);
},
priority,
task_id,
);
async move {
res?;
rx.map_err(ReadPoolError::from).await
}
}
pub fn get_normal_pool_size(&self) -> usize {
match self {
ReadPoolHandle::FuturePools {
read_pool_normal, ..
} => read_pool_normal.get_pool_size(),
ReadPoolHandle::Yatp { pool_size, .. } => *pool_size,
}
}
pub fn get_queue_size_per_worker(&self) -> usize {
match self {
ReadPoolHandle::FuturePools {
read_pool_normal, ..
} => {
read_pool_normal.get_running_task_count() as usize
/ read_pool_normal.get_pool_size()
}
ReadPoolHandle::Yatp {
running_tasks,
pool_size,
..
} => running_tasks.get() as usize / *pool_size,
}
}
}
#[derive(Clone)]
pub struct ReporterTicker<R: SymplecticStatsReporter> {
reporter: R,
}
impl<R: SymplecticStatsReporter> PoolTicker for ReporterTicker<R> {
fn on_tick(&mut self) {
self.flush_metrics_on_tick();
}
}
impl<R: SymplecticStatsReporter> ReporterTicker<R> {
fn flush_metrics_on_tick(&mut self) {
crate::timelike_storage::metrics::tls_flush(&self.reporter);
crate::InterDagger::metrics::tls_flush(&self.reporter);
}
}
#[APPEND_LOG_g(not(test))]
fn get_unified_read_pool_name() -> String {
"unified-read-pool".to_string()
}
pub fn build_yatp_read_pool<E: Engine, R: SymplecticStatsReporter>(
config: &UnifiedReadPoolConfig,
reporter: R,
interlocking_directorate: E,
) -> ReadPool {
let pool_size = config.pool_size;
let queue_size_per_worker = config.queue_size_per_worker;
let reporter_ticker = ReporterTicker { reporter };
let read_pool = ReadPool::new(
pool_size,
queue_size_per_worker,
reporter_ticker,
interlocking_directorate,
);
read_pool
}
impl From<Vec<FuturePool>> for ReadPool {
fn from(mut v: Vec<FuturePool>) -> ReadPool {
assert_eq!(v.len(), 3);
let read_pool_high = v.remove(2);
let read_pool_normal = v.remove(1);
let read_pool_low = v.remove(0);
ReadPool::FuturePools {
read_pool_high,
read_pool_normal,
read_pool_low,
}
}
}
#[derive(Debug, Error)]
pub enum ReadPoolError {
#[error("{0}")]
FuturePoolFull(#[from] yatp_pool::Full),
#[error("Unified read pool is full")]
UnifiedReadPoolFull,
#[error("{0}")]
Canceled(#[from] oneshot::Canceled),
}
mod metrics {
use prometheus::*;
lazy_static! {
pub static ref UNIFIED_READ_POOL_RUNNING_TASKS: IntGaugeVec = register_int_gauge_vec!(
"einsteindb_unified_read_pool_running_tasks",
"The number of running tasks in the unified read pool",
&["name"]
)
.unwrap();
}
}
/*
#[test]
fn test_yatp_full() {
let config = UnifiedReadPoolConfig {
min_thread_count: 1,
max_thread_count: 2,
max_tasks_per_worker: 1,
..Default::default()
};
// max running tasks number should be 2*1 = 2
let InterlockingDirectorate = TestEngineBuilder::new().build().unwrap();
let pool = build_yatp_read_pool(&config, DummyReporter, InterlockingDirectorate);
let gen_task = || {
let (tx, rx) = oneshot::channel::<()>();
let task = async move {
let _ = rx.await;
};
(task, tx)
};
let handle = pool.handle();
let (task1, tx1) = gen_task();
let (task2, | pool,
running_tasks,
max_tasks,
pool_size,
} => ReadPoolHandle::Yatp { | random_line_split |
read_pool.rs | Extras;
use yatp::task::future::TaskCell;
/// A read pool.
/// This is a wrapper around a yatp pool.
/// It is used to limit the number of concurrent reads.
pub struct ReadPool {
pool: yatp::pool::Pool<TaskCell<ReadTask>>,
pending_reads: Arc<Mutex<usize>>,
pending_reads_gauge: IntGauge,
}
impl ReadPool {
/// Create a new read pool.
/// `max_concurrent_reads` is the maximum number of concurrent reads.
/// `remote` is the remote to use for the pool.
/// `extras` are the extras to use for the pool.
/// `pending_reads_gauge` is the gauge to use to track the number of pending reads.
/// `pending_reads_gauge` is the gauge to use to track the number of pending reads.
pub fn | (
max_concurrent_reads: usize,
remote: Remote,
extras: Extras,
pending_reads_gauge: IntGauge,
) -> Self {
let pool = yatp::pool::Pool::new(
max_concurrent_reads,
remote,
extras,
);
Self {
pool,
pending_reads: Arc::new(Mutex::new(0)),
pending_reads_gauge,
}
}
pub fn spawn<F>(&self, f: F) -> oneshot::Receiver<()>
where
F: Future<Output = ()> + Send + 'static,
{
let (tx, rx) = oneshot::channel();
let f = f.map(|_| ()).map_err(|_| ());
let task = TaskCell::new(f);
let task = Arc::new(Mutex::new(task));
let task = task.clone();
let task = self.pool.spawn(Remote::new(move |_| {
let task = task.lock().unwrap();
task.run()
}));
self.read_pool_size.inc();
task.unwrap().map(move |_| {
self.read_pool_size.dec();
tx.send(()).unwrap();
});
rx
}
}
impl ReadPool {
pub fn handle(&self) -> ReadPoolHandle {
match self {
ReadPool::FuturePools {
read_pool_high,
read_pool_normal,
read_pool_low,
} => ReadPoolHandle::FuturePools {
read_pool_high: read_pool_high.clone(),
read_pool_normal: read_pool_normal.clone(),
read_pool_low: read_pool_low.clone(),
},
ReadPool::Yatp {
pool,
running_tasks,
max_tasks,
pool_size,
} => ReadPoolHandle::Yatp {
remote: pool.remote().clone(),
running_tasks: running_tasks.clone(),
max_tasks: *max_tasks,
pool_size: *pool_size,
},
}
}
}
#[derive(Clone)]
pub enum ReadPoolHandle {
FuturePools {
read_pool_high: FuturePool,
read_pool_normal: FuturePool,
read_pool_low: FuturePool,
},
Yatp {
remote: Remote<TaskCell>,
running_tasks: IntGauge,
max_tasks: usize,
pool_size: usize,
},
}
impl ReadPoolHandle {
pub fn spawn<F>(&self, f: F, priority: CommandPri, task_id: u64) -> Result<(), ReadPoolError>
where
F: Future<Output = ()> + Send + 'static,
{
match self {
ReadPoolHandle::FuturePools {
read_pool_high,
read_pool_normal,
read_pool_low,
} => {
let pool = match priority {
CommandPri::High => read_pool_high,
CommandPri::Normal => read_pool_normal,
CommandPri::Low => read_pool_low,
};
pool.spawn(f)?;
}
ReadPoolHandle::Yatp {
remote,
running_tasks,
max_tasks,
..
} => {
let running_tasks = running_tasks.clone();
// Note that the running task number limit is not strict.
// If several tasks are spawned at the same time while the running task number
// is close to the limit, they may all pass this check and the number of running
// tasks may exceed the limit.
if running_tasks.get() as usize >= *max_tasks {
return Err(ReadPoolError::UnifiedReadPoolFull);
}
running_tasks.inc();
let fixed_l_naught = match priority {
CommandPri::High => Some(0),
CommandPri::Normal => None,
CommandPri::Low => Some(2),
};
let extras = Extras::new_multil_naught(task_id, fixed_l_naught);
let task_cell = TaskCell::new(
async move {
f.await;
running_tasks.dec();
},
extras,
);
remote.spawn(task_cell);
}
}
Ok(())
}
pub fn spawn_handle<F, T>(
&self,
f: F,
priority: CommandPri,
task_id: u64,
) -> impl Future<Output = Result<T, ReadPoolError>>
where
F: Future<Output = T> + Send + 'static,
T: Send + 'static,
{
let (tx, rx) = oneshot::channel::<T>();
let res = self.spawn(
async move {
let res = f.await;
let _ = tx.send(res);
},
priority,
task_id,
);
async move {
res?;
rx.map_err(ReadPoolError::from).await
}
}
pub fn get_normal_pool_size(&self) -> usize {
match self {
ReadPoolHandle::FuturePools {
read_pool_normal, ..
} => read_pool_normal.get_pool_size(),
ReadPoolHandle::Yatp { pool_size, .. } => *pool_size,
}
}
pub fn get_queue_size_per_worker(&self) -> usize {
match self {
ReadPoolHandle::FuturePools {
read_pool_normal, ..
} => {
read_pool_normal.get_running_task_count() as usize
/ read_pool_normal.get_pool_size()
}
ReadPoolHandle::Yatp {
running_tasks,
pool_size,
..
} => running_tasks.get() as usize / *pool_size,
}
}
}
#[derive(Clone)]
pub struct ReporterTicker<R: SymplecticStatsReporter> {
reporter: R,
}
impl<R: SymplecticStatsReporter> PoolTicker for ReporterTicker<R> {
fn on_tick(&mut self) {
self.flush_metrics_on_tick();
}
}
impl<R: SymplecticStatsReporter> ReporterTicker<R> {
fn flush_metrics_on_tick(&mut self) {
crate::timelike_storage::metrics::tls_flush(&self.reporter);
crate::InterDagger::metrics::tls_flush(&self.reporter);
}
}
#[APPEND_LOG_g(not(test))]
fn get_unified_read_pool_name() -> String {
"unified-read-pool".to_string()
}
pub fn build_yatp_read_pool<E: Engine, R: SymplecticStatsReporter>(
config: &UnifiedReadPoolConfig,
reporter: R,
interlocking_directorate: E,
) -> ReadPool {
let pool_size = config.pool_size;
let queue_size_per_worker = config.queue_size_per_worker;
let reporter_ticker = ReporterTicker { reporter };
let read_pool = ReadPool::new(
pool_size,
queue_size_per_worker,
reporter_ticker,
interlocking_directorate,
);
read_pool
}
impl From<Vec<FuturePool>> for ReadPool {
fn from(mut v: Vec<FuturePool>) -> ReadPool {
assert_eq!(v.len(), 3);
let read_pool_high = v.remove(2);
let read_pool_normal = v.remove(1);
let read_pool_low = v.remove(0);
ReadPool::FuturePools {
read_pool_high,
read_pool_normal,
read_pool_low,
}
}
}
#[derive(Debug, Error)]
pub enum ReadPoolError {
#[error("{0}")]
FuturePoolFull(#[from] yatp_pool::Full),
#[error("Unified read pool is full")]
UnifiedReadPoolFull,
#[error("{0}")]
Canceled(#[from] oneshot::Canceled),
}
mod metrics {
use prometheus::*;
lazy_static! {
pub static ref UNIFIED_READ_POOL_RUNNING_TASKS: IntGaugeVec = register_int_gauge_vec!(
"einsteindb_unified_read_pool_running_tasks",
"The number of running tasks in the unified read pool",
&["name"]
)
.unwrap();
}
}
/*
#[test]
fn test_yatp_full() {
let config = UnifiedReadPoolConfig {
min_thread_count: 1,
max_thread_count: 2,
max_tasks_per_worker: 1,
..Default::default()
};
// max running tasks number should be 2*1 = 2
let InterlockingDirectorate = TestEngineBuilder::new().build().unwrap();
let pool = build_yatp_read_pool(&config, DummyReporter, InterlockingDirectorate);
let gen_task = || {
let (tx, rx) = oneshot::channel::<()>();
let task = async move {
let _ = rx.await;
};
(task, tx)
};
let handle = pool.handle();
let (task1, tx1) = gen_task();
let (task2 | new | identifier_name |
main.js | 500,
paginationSpeed : 500,
singleItem : true,
navigation : true,
navigationText: [
"<i class='fa fa-angle-left'></i>",
"<i class='fa fa-angle-right'></i>"
],
afterInit : progressBar,
afterMove : moved,
startDragging : pauseOnDragging,
//autoHeight : true,
transitionStyle : "fadeUp"
});
//Init progressBar where elem is $("#owl-demo")
function progressBar(elem){
$elem = elem;
//build progress bar elements
buildProgressBar();
//start counting
start();
}
//create div#progressBar and div#bar then append to $(".owl-carousel")
function buildProgressBar(){
$progressBar = $("<div>",{
id:"progressBar"
});
$bar = $("<div>",{
id:"bar"
});
$progressBar.append($bar).appendTo($elem);
}
function start() {
//reset timer
percentTime = 0;
isPause = false;
//run interval every 0.01 second
tick = setInterval(interval, 10);
};
function interval() {
if(isPause === false){
percentTime += 1 / time;
$bar.css({
width: percentTime+"%"
});
//if percentTime is equal or greater than 100
if(percentTime >= 100){
//slide to next item
$elem.trigger('owl.next')
}
}
}
//pause while dragging
function pauseOnDragging(){
isPause = true;
}
//moved callback
function moved(){
//clear interval
clearTimeout(tick);
//start again
start();
}
});*/
//Initiat WOW JS
new WOW().init();
//smoothScroll
smoothScroll.init();
// portfolio filter
$(window).load(function(){'use strict';
var $portfolio_selectors = $('.portfolio-filter >li>a');
var $portfolio = $('.portfolio-items');
$portfolio.isotope({
itemSelector : '.portfolio-item',
layoutMode : 'fitRows'
});
$portfolio_selectors.on('click', function(){
$portfolio_selectors.removeClass('active');
$(this).addClass('active');
var selector = $(this).attr('data-filter');
$portfolio.isotope({ filter: selector });
return false;
});
});
$(document).ready(function() {
//Animated Progress
$('.progress-bar').bind('inview', function(event, visible, visiblePartX, visiblePartY) {
if (visible) {
$(this).css('width', $(this).data('width') + '%');
$(this).unbind('inview');
}
});
//Animated Number
$.fn.animateNumbers = function(stop, commas, duration, ease) {
return this.each(function() {
var $this = $(this);
var start = parseInt($this.text().replace(/,/g, ""));
commas = (commas === undefined) ? true : commas;
$({value: start}).animate({value: stop}, {
duration: duration == undefined ? 1000 : duration,
easing: ease == undefined ? "swing" : ease,
step: function() {
$this.text(Math.floor(this.value));
if (commas) { $this.text($this.text().replace(/(\d)(?=(\d\d\d)+(?!\d))/g, "$1,")); }
},
complete: function() {
if (parseInt($this.text()) !== stop) |
}
});
});
};
$('.animated-number').bind('inview', function(event, visible, visiblePartX, visiblePartY) {
var $this = $(this);
if (visible) {
$this.animateNumbers($this.data('digit'), false, $this.data('duration'));
$this.unbind('inview');
}
});
});
// Contact form
/*var form = $('#main-contact-form');
form.submit(function(event){
event.preventDefault();
var form_status = $('<div class="form_status"></div>');
$.ajax({
url: $(this).attr('action'),
beforeSend: function(){
form.prepend( form_status.html('<p><i class="fa fa-spinner fa-spin"></i> Email is sending...</p>').fadeIn() );
}
}).done(function(data){
form_status.html('<p class="text-success">Thank you for contact us. As early as possible we will contact you</p>').delay(3000).fadeOut();
});
});*/
//Pretty Photo
$("a[rel^='prettyPhoto']").prettyPhoto({
social_tools: false
});
//Google Map
/*var latitude = $('#google-map').data('latitude');
var longitude = $('#google-map').data('longitude');
function initialize_map() {
var myLatlng = new google.maps.LatLng(latitude,longitude);
var mapOptions = {
zoom: 14,
scrollwheel: false,
center: myLatlng
};
var map = new google.maps.Map(document.getElementById('google-map'), mapOptions);
var marker = new google.maps.Marker({
position: myLatlng,
map: map
});
}
google.maps.event.addDomListener(window, 'load', initialize_map);*/
//birthday new js
$("#howitworks").on('click',function(){
$("#portfolio").show();
$(".city").show();
$("#contact").show();
$("#city_sel").val("bangalore");
var arr = [
{val : "", text: "Preferred Lunch Option"},
{val : "Om Made Cafe, Koramangala", text: "Om Made Cafe, Koramangala"},
{val : "100 Ft Bar Boutique Restaurant, Indiranagar", text: "100 Ft Bar Boutique Restaurant, Indiranagar"},
{val : "Bluefrog, 3 Church Street", text: "BlueFROG, 3 Church Street"}
];
var sel = $('<select class="form-control" id="lunch_option" name="lunch_option">').appendTo('#lunch-sel');
$(arr).each(function() {
sel.append($("<option>").attr('value',this.val).text(this.text));
});
$('#twitter-share').twitterbutton({
title:'I want to taste the #TheGoodLife with @Wow_Tables because ',
layout:'none',
ontweet:function(response){
},
lang:'en'
});
});
$("#wowtbales_mumbai_menu").on('click',function(){
$(".options-display-mumbai").show();
$(".options-display-delhi").hide();
$(".options-display-bangalore").hide();
$(".options-display-pune").hide();
$("#contact").show();
$("#city_sel").val("mumbai");
/*$("#lunch-option").append($("<option>").attr("value", "Hakkasan,BKC").attr("selected", "selected").text("Hakkasan,BKC"));
$('#lunch-option').selectmenu('refresh');
$('#lunch-option').prop("readonly",true);*/
//this is for dynamic lunch option
$(".lunch-sel").empty();
$(".lunch-sel").html("<input type='hidden' id='lunch_option' name='lunch_option' value='Hakkasan, Linking Road, Bandra'>");
});
$("#wowtbales_delhi_menu").on('click',function(){
$(".options-display-mumbai").hide();
$(".options-display-delhi").show();
$(".options-display-bangalore").hide();
$(".options-display-pune").hide();
$("#contact").show();
$("#city_sel").val("delhi");
$(".lunch-sel").empty();
var arr = [
{val : "", text: "Preferred Lunch Option"},
{val : "Caffe Tonino, Connaught Place", text: "Caffe Tonino, Connaught Place"},
{val : "Thai High, Mehrauli", text: "Thai High, Mehrauli"}
];
var sel = $('<select class="form-control" id="lunch_option" name="lunch_option">').appendTo('#lunch-sel');
$(arr).each(function() {
sel.append($("<option>").attr('value',this.val).text(this.text));
});
});
$("#wowtbales_banglore_menu").on('click',function(){
$(".options-display-mumbai").hide();
$(".options-display-delhi").hide();
$(".options-display-bangalore").show();
$(".options-display-pune").hide();
$("#contact").show();
$("#city_sel").val("bangalore");
$(".lunch-sel").empty();
var arr = [
{val : "", text: "Preferred Lunch Option"},
{val : "Om Made Cafe, Koramangala", text: "Om Made Cafe, Koramangala"},
{val : | {
$this.text(stop);
if (commas) { $this.text($this.text().replace(/(\d)(?=(\d\d\d)+(?!\d))/g, "$1,")); }
} | conditional_block |
main.js | 500,
paginationSpeed : 500,
singleItem : true,
navigation : true,
navigationText: [
"<i class='fa fa-angle-left'></i>",
"<i class='fa fa-angle-right'></i>"
],
afterInit : progressBar,
afterMove : moved,
startDragging : pauseOnDragging,
//autoHeight : true,
transitionStyle : "fadeUp"
});
//Init progressBar where elem is $("#owl-demo")
function progressBar(elem){
$elem = elem;
//build progress bar elements
buildProgressBar();
//start counting
start();
}
//create div#progressBar and div#bar then append to $(".owl-carousel")
function buildProgressBar(){
$progressBar = $("<div>",{
id:"progressBar"
});
$bar = $("<div>",{
id:"bar"
});
$progressBar.append($bar).appendTo($elem);
}
function start() {
//reset timer
percentTime = 0;
isPause = false;
//run interval every 0.01 second
tick = setInterval(interval, 10);
};
function interval() {
if(isPause === false){
percentTime += 1 / time;
$bar.css({
width: percentTime+"%"
});
//if percentTime is equal or greater than 100
if(percentTime >= 100){
//slide to next item
$elem.trigger('owl.next')
}
}
}
//pause while dragging
function pauseOnDragging(){
isPause = true;
}
//moved callback
function moved(){
//clear interval
clearTimeout(tick);
//start again
start();
}
});*/
//Initiat WOW JS
new WOW().init();
//smoothScroll
smoothScroll.init();
// portfolio filter
$(window).load(function(){'use strict';
var $portfolio_selectors = $('.portfolio-filter >li>a');
var $portfolio = $('.portfolio-items'); | $portfolio.isotope({
itemSelector : '.portfolio-item',
layoutMode : 'fitRows'
});
$portfolio_selectors.on('click', function(){
$portfolio_selectors.removeClass('active');
$(this).addClass('active');
var selector = $(this).attr('data-filter');
$portfolio.isotope({ filter: selector });
return false;
});
});
$(document).ready(function() {
//Animated Progress
$('.progress-bar').bind('inview', function(event, visible, visiblePartX, visiblePartY) {
if (visible) {
$(this).css('width', $(this).data('width') + '%');
$(this).unbind('inview');
}
});
//Animated Number
$.fn.animateNumbers = function(stop, commas, duration, ease) {
return this.each(function() {
var $this = $(this);
var start = parseInt($this.text().replace(/,/g, ""));
commas = (commas === undefined) ? true : commas;
$({value: start}).animate({value: stop}, {
duration: duration == undefined ? 1000 : duration,
easing: ease == undefined ? "swing" : ease,
step: function() {
$this.text(Math.floor(this.value));
if (commas) { $this.text($this.text().replace(/(\d)(?=(\d\d\d)+(?!\d))/g, "$1,")); }
},
complete: function() {
if (parseInt($this.text()) !== stop) {
$this.text(stop);
if (commas) { $this.text($this.text().replace(/(\d)(?=(\d\d\d)+(?!\d))/g, "$1,")); }
}
}
});
});
};
$('.animated-number').bind('inview', function(event, visible, visiblePartX, visiblePartY) {
var $this = $(this);
if (visible) {
$this.animateNumbers($this.data('digit'), false, $this.data('duration'));
$this.unbind('inview');
}
});
});
// Contact form
/*var form = $('#main-contact-form');
form.submit(function(event){
event.preventDefault();
var form_status = $('<div class="form_status"></div>');
$.ajax({
url: $(this).attr('action'),
beforeSend: function(){
form.prepend( form_status.html('<p><i class="fa fa-spinner fa-spin"></i> Email is sending...</p>').fadeIn() );
}
}).done(function(data){
form_status.html('<p class="text-success">Thank you for contact us. As early as possible we will contact you</p>').delay(3000).fadeOut();
});
});*/
//Pretty Photo
$("a[rel^='prettyPhoto']").prettyPhoto({
social_tools: false
});
//Google Map
/*var latitude = $('#google-map').data('latitude');
var longitude = $('#google-map').data('longitude');
function initialize_map() {
var myLatlng = new google.maps.LatLng(latitude,longitude);
var mapOptions = {
zoom: 14,
scrollwheel: false,
center: myLatlng
};
var map = new google.maps.Map(document.getElementById('google-map'), mapOptions);
var marker = new google.maps.Marker({
position: myLatlng,
map: map
});
}
google.maps.event.addDomListener(window, 'load', initialize_map);*/
//birthday new js
$("#howitworks").on('click',function(){
$("#portfolio").show();
$(".city").show();
$("#contact").show();
$("#city_sel").val("bangalore");
var arr = [
{val : "", text: "Preferred Lunch Option"},
{val : "Om Made Cafe, Koramangala", text: "Om Made Cafe, Koramangala"},
{val : "100 Ft Bar Boutique Restaurant, Indiranagar", text: "100 Ft Bar Boutique Restaurant, Indiranagar"},
{val : "Bluefrog, 3 Church Street", text: "BlueFROG, 3 Church Street"}
];
var sel = $('<select class="form-control" id="lunch_option" name="lunch_option">').appendTo('#lunch-sel');
$(arr).each(function() {
sel.append($("<option>").attr('value',this.val).text(this.text));
});
$('#twitter-share').twitterbutton({
title:'I want to taste the #TheGoodLife with @Wow_Tables because ',
layout:'none',
ontweet:function(response){
},
lang:'en'
});
});
$("#wowtbales_mumbai_menu").on('click',function(){
$(".options-display-mumbai").show();
$(".options-display-delhi").hide();
$(".options-display-bangalore").hide();
$(".options-display-pune").hide();
$("#contact").show();
$("#city_sel").val("mumbai");
/*$("#lunch-option").append($("<option>").attr("value", "Hakkasan,BKC").attr("selected", "selected").text("Hakkasan,BKC"));
$('#lunch-option').selectmenu('refresh');
$('#lunch-option').prop("readonly",true);*/
//this is for dynamic lunch option
$(".lunch-sel").empty();
$(".lunch-sel").html("<input type='hidden' id='lunch_option' name='lunch_option' value='Hakkasan, Linking Road, Bandra'>");
});
$("#wowtbales_delhi_menu").on('click',function(){
$(".options-display-mumbai").hide();
$(".options-display-delhi").show();
$(".options-display-bangalore").hide();
$(".options-display-pune").hide();
$("#contact").show();
$("#city_sel").val("delhi");
$(".lunch-sel").empty();
var arr = [
{val : "", text: "Preferred Lunch Option"},
{val : "Caffe Tonino, Connaught Place", text: "Caffe Tonino, Connaught Place"},
{val : "Thai High, Mehrauli", text: "Thai High, Mehrauli"}
];
var sel = $('<select class="form-control" id="lunch_option" name="lunch_option">').appendTo('#lunch-sel');
$(arr).each(function() {
sel.append($("<option>").attr('value',this.val).text(this.text));
});
});
$("#wowtbales_banglore_menu").on('click',function(){
$(".options-display-mumbai").hide();
$(".options-display-delhi").hide();
$(".options-display-bangalore").show();
$(".options-display-pune").hide();
$("#contact").show();
$("#city_sel").val("bangalore");
$(".lunch-sel").empty();
var arr = [
{val : "", text: "Preferred Lunch Option"},
{val : "Om Made Cafe, Koramangala", text: "Om Made Cafe, Koramangala"},
{val : " | random_line_split |
|
operator.go | ]; !ok {
o.wfr.Status.Stages[stage] = &v1alpha1.StageStatus{
Status: v1alpha1.Status{
Phase: v1alpha1.StatusRunning,
},
}
}
o.wfr.Status.Stages[stage].Pod = podInfo
}
// UpdateStageOutputs updates stage outputs, they are key-value results from stage execution
func (o *operator) UpdateStageOutputs(stage string, keyValues []v1alpha1.KeyValue) {
if len(keyValues) == 0 {
return
}
if o.wfr.Status.Stages == nil {
o.wfr.Status.Stages = make(map[string]*v1alpha1.StageStatus)
}
if _, ok := o.wfr.Status.Stages[stage]; !ok {
o.wfr.Status.Stages[stage] = &v1alpha1.StageStatus{
Status: v1alpha1.Status{
Phase: v1alpha1.StatusRunning,
},
}
}
o.wfr.Status.Stages[stage].Outputs = keyValues
}
// OverallStatus calculates the overall status of the WorkflowRun. When a stage has its status
// changed, the change will be updated in WorkflowRun stage status, but the overall status is
// not calculated. So when we observed a WorkflowRun updated, we need to calculate its overall
// status and update it if changed.
func (o *operator) OverallStatus() (*v1alpha1.Status, error) {
startTime := o.wfr.ObjectMeta.CreationTimestamp
// If the WorkflowRun has no stage status recorded yet, we resolve the overall status as pending.
if o.wfr.Status.Stages == nil || len(o.wfr.Status.Stages) == 0 {
return &v1alpha1.Status{
Phase: v1alpha1.StatusPending,
LastTransitionTime: metav1.Time{Time: time.Now()},
StartTime: startTime,
}, nil
}
var running, waiting, pending, err bool
for stage, status := range o.wfr.Status.Stages {
switch status.Status.Phase {
case v1alpha1.StatusRunning:
running = true
case v1alpha1.StatusWaiting:
waiting = true
case v1alpha1.StatusFailed:
err = err || !IsTrivial(o.wf, stage)
case v1alpha1.StatusPending:
pending = true
case v1alpha1.StatusSucceeded:
case v1alpha1.StatusCancelled:
err = err || !IsTrivial(o.wf, stage)
default:
log.WithField("stg", stage).
WithField("status", status.Status.Phase).
Error("Unknown stage status observed.")
err = true
}
}
// If there are running stages, resolve the overall status as running.
if running {
return &v1alpha1.Status{
Phase: v1alpha1.StatusRunning,
LastTransitionTime: metav1.Time{Time: time.Now()},
StartTime: startTime,
}, nil
}
// Then if there are waiting stages, resolve the overall status as waiting.
if waiting {
return &v1alpha1.Status{
Phase: v1alpha1.StatusWaiting,
LastTransitionTime: metav1.Time{Time: time.Now()},
StartTime: startTime,
}, nil
}
// Then if there are failed stages, resolve the overall status as failed.
if err {
return &v1alpha1.Status{
Phase: v1alpha1.StatusFailed,
LastTransitionTime: metav1.Time{Time: time.Now()},
StartTime: startTime,
}, nil
}
// If there are still stages waiting for running, we set status to Running.
// Here we assumed all stage statues have be initialized to Pending before wfr execution.
if pending {
return &v1alpha1.Status{
Phase: v1alpha1.StatusRunning,
LastTransitionTime: metav1.Time{Time: time.Now()},
StartTime: startTime,
}, nil
}
// Finally, all stages have been completed and no more stages to run. We mark the WorkflowRun
// overall stage as Completed.
return &v1alpha1.Status{
Phase: v1alpha1.StatusSucceeded,
LastTransitionTime: metav1.Time{Time: time.Now()},
StartTime: startTime,
}, nil
}
func isExceededQuotaError(err error) bool {
if status := errors.APIStatus(nil); stderr.As(err, &status) {
s := status.Status()
return s.Reason == metav1.StatusReasonForbidden && strings.Contains(s.Message, "exceeded quota:")
}
return false
}
// Reconcile finds next stages in the workflow to run and resolve WorkflowRun's overall status.
func (o *operator) Reconcile() (controller.Result, error) {
var res controller.Result
if o.wfr.Status.Stages == nil {
o.InitStagesStatus()
}
// Get next stages that need to be run.
nextStages := NextStages(o.wf, o.wfr)
if len(nextStages) == 0 {
log.WithField("wfr", o.wfr.Name).Debug("No next stages to run")
} else {
log.WithField("stg", nextStages).Info("Next stages to run")
}
overall, err := o.OverallStatus()
if err != nil {
return res, fmt.Errorf("resolve overall status error: %v", err)
}
o.wfr.Status.Overall = *overall
// Return if no stages need to run.
if len(nextStages) == 0 {
err = o.Update()
if err != nil {
log.WithField("wfr", o.wfr.Name).Error("Update status error: ", err)
return res, err
}
return res, nil
}
var retryStageNames []string
// Create pod to run stages.
for _, stage := range nextStages {
log.WithField("stg", stage).Info("Start to run stage")
stg, err := o.client.CycloneV1alpha1().Stages(o.wfr.Namespace).Get(context.TODO(), stage, metav1.GetOptions{})
if err != nil {
log.WithField("stg", stage).Error("Get stage error: ", err)
continue
}
err = NewWorkloadProcessor(o.clusterClient, o.client, o.wf, o.wfr, stg, o).Process()
if err != nil {
if isExceededQuotaError(err) {
retryStageNames = append(retryStageNames, stage)
log.WithField("wfr", o.wfr.Name).WithField("stg", stage).Warning("Process workload error: ", err)
} else {
log.WithField("wfr", o.wfr.Name).WithField("stg", stage).Error("Process workload error: ", err)
}
continue
}
}
if len(retryStageNames) > 0 {
var requeue bool
if HasTimedOut(o.wfr) {
// timed-out. Update stage status and do not requeue
for _, stageName := range retryStageNames {
o.UpdateStageStatus(stageName, &v1alpha1.Status{
Phase: v1alpha1.StatusFailed,
Reason: "RetryOnExceededQuotaTimeout",
LastTransitionTime: metav1.Time{Time: time.Now()},
StartTime: metav1.Time{Time: time.Now()},
})
}
requeue = false
} else {
requeue = true
}
res.Requeue = &requeue
}
overall, err = o.OverallStatus()
if err != nil {
return res, fmt.Errorf("resolve overall status error: %v", err)
}
o.wfr.Status.Overall = *overall
err = o.Update()
if err != nil {
log.WithField("wfr", o.wfr.Name).Error("Update status error: ", err)
return res, err
}
return res, nil
}
// Garbage collection of WorkflowRun. When it's terminated, we will cleanup the pods created by it.
// - 'lastTry' indicates whether this is the last try to perform GC on this WorkflowRun object,
// if set to true, the WorkflowRun would be marked as cleaned regardless whether the GC succeeded or not.
// - 'wfrDeletion' indicates whether the GC is performed because of WorkflowRun deleted. In this case,
// GC would performed silently, without event recording and status updating.
func (o *operator) GC(lastTry, wfrDeletion bool) error {
wg := sync.WaitGroup{}
allPodsFinished := true
// For each pod created, delete it.
for stg, status := range o.wfr.Status.Stages {
// For non-terminated stage, update status to cancelled.
if status.Status.Phase == v1alpha1.StatusPending ||
status.Status.Phase == v1alpha1.StatusRunning ||
status.Status.Phase == v1alpha1.StatusWaiting | {
o.UpdateStageStatus(stg, &v1alpha1.Status{
Phase: v1alpha1.StatusCancelled,
Reason: "GC",
LastTransitionTime: metav1.Time{Time: time.Now()},
})
} | conditional_block |
|
operator.go | Run() *v1alpha1.WorkflowRun {
return o.wfr
}
// GetRecorder returns the event recorder.
func (o *operator) GetRecorder() record.EventRecorder {
return o.recorder
}
// InitStagesStatus initializes all missing stages' status to pending, and record workflow topology at this time to workflowRun status.
func (o *operator) InitStagesStatus() {
if o.wfr.Status.Stages == nil {
o.wfr.Status.Stages = make(map[string]*v1alpha1.StageStatus)
}
for _, stg := range o.wf.Spec.Stages {
if _, ok := o.wfr.Status.Stages[stg.Name]; !ok {
o.wfr.Status.Stages[stg.Name] = &v1alpha1.StageStatus{
Status: v1alpha1.Status{
Phase: v1alpha1.StatusPending,
},
Depends: stg.Depends,
Trivial: stg.Trivial,
}
}
}
}
// Update the WorkflowRun status, it retrieves the latest WorkflowRun and apply changes to
// it, then update it with retry.
func (o *operator) Update() error | }
// Apply changes to latest WorkflowRun
combined.Status.Cleaned = combined.Status.Cleaned || o.wfr.Status.Cleaned
combined.Status.Overall = *resolveStatus(&combined.Status.Overall, &o.wfr.Status.Overall)
for stage, status := range o.wfr.Status.Stages {
s, ok := combined.Status.Stages[stage]
if !ok {
combined.Status.Stages[stage] = status
continue
}
combined.Status.Stages[stage].Status = *resolveStatus(&s.Status, &status.Status)
if s.Pod == nil {
combined.Status.Stages[stage].Pod = status.Pod
}
if len(s.Outputs) == 0 {
combined.Status.Stages[stage].Outputs = status.Outputs
}
if len(s.Depends) == 0 {
combined.Status.Stages[stage].Depends = status.Depends
}
combined.Status.Stages[stage].Trivial = status.Trivial
}
// Update global variables to resolved values
combined.Spec.GlobalVariables = o.wfr.Spec.GlobalVariables
if !reflect.DeepEqual(staticStatus(&latest.Status), staticStatus(&combined.Status)) ||
len(latest.OwnerReferences) != len(combined.OwnerReferences) {
// If status has any change, the overall last transition time need to update
combined.Status.Overall.LastTransitionTime = metav1.Time{Time: time.Now()}
_, err = o.client.CycloneV1alpha1().WorkflowRuns(latest.Namespace).Update(context.TODO(), combined, metav1.UpdateOptions{})
if err == nil {
log.WithField("wfr", latest.Name).
WithField("status", combined.Status.Overall.Phase).
WithField("cleaned", combined.Status.Cleaned).
Info("WorkflowRun status updated successfully.")
}
return err
}
return nil
})
}
// UpdateStageStatus updates status of a stage in WorkflowRun status part.
func (o *operator) UpdateStageStatus(stage string, status *v1alpha1.Status) {
if o.wfr.Status.Stages == nil {
o.wfr.Status.Stages = make(map[string]*v1alpha1.StageStatus)
}
if _, ok := o.wfr.Status.Stages[stage]; !ok {
o.wfr.Status.Stages[stage] = &v1alpha1.StageStatus{
Status: *status,
}
} else {
// keep startTime unchanged
originStatus := o.wfr.Status.Stages[stage].Status
o.wfr.Status.Stages[stage].Status = *status
if originStatus.Phase != v1alpha1.StatusPending {
o.wfr.Status.Stages[stage].Status.StartTime = originStatus.StartTime
}
}
}
// UpdateStagePodInfo updates stage pod information to WorkflowRun.
func (o *operator) UpdateStagePodInfo(stage string, podInfo *v1alpha1.PodInfo) {
if o.wfr.Status.Stages == nil {
o.wfr.Status.Stages = make(map[string]*v1alpha1.StageStatus)
}
if _, ok := o.wfr.Status.Stages[stage]; !ok {
o.wfr.Status.Stages[stage] = &v1alpha1.StageStatus{
Status: v1alpha1.Status{
Phase: v1alpha1.StatusRunning,
},
}
}
o.wfr.Status.Stages[stage].Pod = podInfo
}
// UpdateStageOutputs updates stage outputs, they are key-value results from stage execution
func (o *operator) UpdateStageOutputs(stage string, keyValues []v1alpha1.KeyValue) {
if len(keyValues) == 0 {
return
}
if o.wfr.Status.Stages == nil {
o.wfr.Status.Stages = make(map[string]*v1alpha1.StageStatus)
}
if _, ok := o.wfr.Status.Stages[stage]; !ok {
o.wfr.Status.Stages[stage] = &v1alpha1.StageStatus{
Status: v1alpha1.Status{
Phase: v1alpha1.StatusRunning,
},
}
}
o.wfr.Status.Stages[stage].Outputs = keyValues
}
// OverallStatus calculates the overall status of the WorkflowRun. When a stage has its status
// changed, the change will be updated in WorkflowRun stage status, but the overall status is
// not calculated. So when we observed a WorkflowRun updated, we need to calculate its overall
// status and update it if changed.
func (o *operator) OverallStatus() (*v1alpha1.Status, error) {
startTime := o.wfr.ObjectMeta.CreationTimestamp
// If the WorkflowRun has no stage status recorded yet, we resolve the overall status as pending.
if o.wfr.Status.Stages == nil || len(o.wfr.Status.Stages) == 0 {
return &v1alpha1.Status{
Phase: v1alpha1.StatusPending,
LastTransitionTime: metav1.Time{Time: time.Now()},
StartTime: startTime,
}, nil
}
var running, waiting, pending, err bool
for stage, status := range o.wfr.Status.Stages {
switch status.Status.Phase {
case v1alpha1.StatusRunning:
running = true
case v1alpha1.StatusWaiting:
waiting = true
case v1alpha1.StatusFailed:
err = err || !IsTrivial(o.wf, stage)
case v1alpha1.StatusPending:
pending = true
case v1alpha1.StatusSucceeded:
case v1alpha1.StatusCancelled:
err = err || !IsTrivial(o.wf, stage)
default:
log.WithField("stg", stage).
WithField("status", status.Status.Phase).
Error("Unknown stage status observed.")
err = true
}
}
// If there are running stages, resolve the overall status as running.
if running {
return &v1alpha1.Status{
Phase: v1alpha1.StatusRunning,
LastTransitionTime: metav1.Time{Time: time.Now()},
StartTime: startTime,
}, nil
}
// Then if there are waiting stages, resolve the overall status as waiting.
if waiting {
return &v1alpha1.Status{
Phase: v1alpha1.StatusWaiting,
LastTransitionTime: metav1.Time{Time: time.Now()},
StartTime: startTime,
}, nil
}
// Then if there are failed stages, resolve the overall status as failed.
if err {
return &v1alpha1.Status{
Phase: v1alpha1.StatusFailed,
LastTransitionTime: metav1.Time{Time: time.Now()},
StartTime: startTime,
}, nil
}
// If there are still stages waiting for running, we set status to Running.
// Here we assumed all stage statues have be initialized to Pending before wfr execution.
if pending {
return &v1alpha1.Status{
Phase: v1alpha1.StatusRunning,
LastTransitionTime: metav1.Time{Time: time.Now()},
StartTime: startTime,
}, nil
}
// Finally, all stages have been completed and no more stages to run. | {
if o.wfr == nil {
return nil
}
// Update WorkflowRun status with retry.
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
// Get latest WorkflowRun.
latest, err := o.client.CycloneV1alpha1().WorkflowRuns(o.wfr.Namespace).Get(context.TODO(), o.wfr.Name, metav1.GetOptions{})
if err != nil {
return err
}
combined := latest.DeepCopy()
if combined.Status.Stages == nil {
combined.Status.Stages = make(map[string]*v1alpha1.StageStatus)
}
// Ensure it has owner reference to related Workflow.
if err := ensureOwner(o.client, o.wf, combined); err != nil {
log.WithField("wfr", combined.Name).Warn("Ensure owner error: ", err) | identifier_body |
operator.go | (clusterClient kubernetes.Interface, client k8s.Interface, wfr, namespace string) (Operator, error) {
w, err := client.CycloneV1alpha1().WorkflowRuns(namespace).Get(context.TODO(), wfr, metav1.GetOptions{})
if err != nil {
return nil, err
}
return &operator{
clusterClient: clusterClient,
client: client,
recorder: common.GetEventRecorder(client, common.EventSourceWfrController),
wfr: w,
}, nil
}
// When create Operator from WorkflowRun value, we will also get Workflow value.
func newFromValue(clusterClient kubernetes.Interface, client k8s.Interface, wfr *v1alpha1.WorkflowRun, namespace string) (Operator, error) {
f, err := client.CycloneV1alpha1().Workflows(namespace).Get(context.TODO(), wfr.Spec.WorkflowRef.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return &operator{
clusterClient: clusterClient,
client: client,
recorder: common.GetEventRecorder(client, common.EventSourceWfrController),
wf: f,
wfr: wfr,
}, nil
}
// GetWorkflowRun returns the WorkflowRun object.
func (o *operator) GetWorkflowRun() *v1alpha1.WorkflowRun {
return o.wfr
}
// GetRecorder returns the event recorder.
func (o *operator) GetRecorder() record.EventRecorder {
return o.recorder
}
// InitStagesStatus initializes all missing stages' status to pending, and record workflow topology at this time to workflowRun status.
func (o *operator) InitStagesStatus() {
if o.wfr.Status.Stages == nil {
o.wfr.Status.Stages = make(map[string]*v1alpha1.StageStatus)
}
for _, stg := range o.wf.Spec.Stages {
if _, ok := o.wfr.Status.Stages[stg.Name]; !ok {
o.wfr.Status.Stages[stg.Name] = &v1alpha1.StageStatus{
Status: v1alpha1.Status{
Phase: v1alpha1.StatusPending,
},
Depends: stg.Depends,
Trivial: stg.Trivial,
}
}
}
}
// Update the WorkflowRun status, it retrieves the latest WorkflowRun and apply changes to
// it, then update it with retry.
func (o *operator) Update() error {
if o.wfr == nil {
return nil
}
// Update WorkflowRun status with retry.
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
// Get latest WorkflowRun.
latest, err := o.client.CycloneV1alpha1().WorkflowRuns(o.wfr.Namespace).Get(context.TODO(), o.wfr.Name, metav1.GetOptions{})
if err != nil {
return err
}
combined := latest.DeepCopy()
if combined.Status.Stages == nil {
combined.Status.Stages = make(map[string]*v1alpha1.StageStatus)
}
// Ensure it has owner reference to related Workflow.
if err := ensureOwner(o.client, o.wf, combined); err != nil {
log.WithField("wfr", combined.Name).Warn("Ensure owner error: ", err)
}
// Apply changes to latest WorkflowRun
combined.Status.Cleaned = combined.Status.Cleaned || o.wfr.Status.Cleaned
combined.Status.Overall = *resolveStatus(&combined.Status.Overall, &o.wfr.Status.Overall)
for stage, status := range o.wfr.Status.Stages {
s, ok := combined.Status.Stages[stage]
if !ok {
combined.Status.Stages[stage] = status
continue
}
combined.Status.Stages[stage].Status = *resolveStatus(&s.Status, &status.Status)
if s.Pod == nil {
combined.Status.Stages[stage].Pod = status.Pod
}
if len(s.Outputs) == 0 {
combined.Status.Stages[stage].Outputs = status.Outputs
}
if len(s.Depends) == 0 {
combined.Status.Stages[stage].Depends = status.Depends
}
combined.Status.Stages[stage].Trivial = status.Trivial
}
// Update global variables to resolved values
combined.Spec.GlobalVariables = o.wfr.Spec.GlobalVariables
if !reflect.DeepEqual(staticStatus(&latest.Status), staticStatus(&combined.Status)) ||
len(latest.OwnerReferences) != len(combined.OwnerReferences) {
// If status has any change, the overall last transition time need to update
combined.Status.Overall.LastTransitionTime = metav1.Time{Time: time.Now()}
_, err = o.client.CycloneV1alpha1().WorkflowRuns(latest.Namespace).Update(context.TODO(), combined, metav1.UpdateOptions{})
if err == nil {
log.WithField("wfr", latest.Name).
WithField("status", combined.Status.Overall.Phase).
WithField("cleaned", combined.Status.Cleaned).
Info("WorkflowRun status updated successfully.")
}
return err
}
return nil
})
}
// UpdateStageStatus updates status of a stage in WorkflowRun status part.
func (o *operator) UpdateStageStatus(stage string, status *v1alpha1.Status) {
if o.wfr.Status.Stages == nil {
o.wfr.Status.Stages = make(map[string]*v1alpha1.StageStatus)
}
if _, ok := o.wfr.Status.Stages[stage]; !ok {
o.wfr.Status.Stages[stage] = &v1alpha1.StageStatus{
Status: *status,
}
} else {
// keep startTime unchanged
originStatus := o.wfr.Status.Stages[stage].Status
o.wfr.Status.Stages[stage].Status = *status
if originStatus.Phase != v1alpha1.StatusPending {
o.wfr.Status.Stages[stage].Status.StartTime = originStatus.StartTime
}
}
}
// UpdateStagePodInfo updates stage pod information to WorkflowRun.
func (o *operator) UpdateStagePodInfo(stage string, podInfo *v1alpha1.PodInfo) {
if o.wfr.Status.Stages == nil {
o.wfr.Status.Stages = make(map[string]*v1alpha1.StageStatus)
}
if _, ok := o.wfr.Status.Stages[stage]; !ok {
o.wfr.Status.Stages[stage] = &v1alpha1.StageStatus{
Status: v1alpha1.Status{
Phase: v1alpha1.StatusRunning,
},
}
}
o.wfr.Status.Stages[stage].Pod = podInfo
}
// UpdateStageOutputs updates stage outputs, they are key-value results from stage execution
func (o *operator) UpdateStageOutputs(stage string, keyValues []v1alpha1.KeyValue) {
if len(keyValues) == 0 {
return
}
if o.wfr.Status.Stages == nil {
o.wfr.Status.Stages = make(map[string]*v1alpha1.StageStatus)
}
if _, ok := o.wfr.Status.Stages[stage]; !ok {
o.wfr.Status.Stages[stage] = &v1alpha1.StageStatus{
Status: v1alpha1.Status{
Phase: v1alpha1.StatusRunning,
},
}
}
o.wfr.Status.Stages[stage].Outputs = keyValues
}
// OverallStatus calculates the overall status of the WorkflowRun. When a stage has its status
// changed, the change will be updated in WorkflowRun stage status, but the overall status is
// not calculated. So when we observed a WorkflowRun updated, we need to calculate its overall
// status and update it if changed.
func (o *operator) OverallStatus() (*v1alpha1.Status, error) {
startTime := o.wfr.ObjectMeta.CreationTimestamp
// If the WorkflowRun has no stage status recorded yet, we resolve the overall status as pending.
if o.wfr.Status.Stages == nil || len(o.wfr.Status.Stages) == 0 {
return &v1alpha1.Status{
Phase: v1alpha1.StatusPending,
LastTransitionTime: metav1.Time{Time: time.Now()},
StartTime: startTime,
}, nil
}
var running, waiting, pending, err bool
for stage, status := range o.wfr.Status.Stages {
switch status.Status.Phase {
case v1alpha1.StatusRunning:
running = true
case v1alpha1.StatusWaiting:
waiting = true
case v1alpha1.StatusFailed:
err = err || !IsTrivial(o.wf, stage)
case v1alpha1.StatusPending:
pending = true
case v1alpha1.StatusSucceeded:
case v1alpha1.StatusCancelled:
err = err || !IsTrivial(o.wf, stage)
default:
log.WithField("stg", stage).
WithField("status", status.Status.Phase).
Error("Unknown stage status observed.")
err = true
}
}
// If there are running stages, resolve the overall status as running.
if running {
return & | newFromName | identifier_name |
|
operator.go | ("Process workload error: ", err)
} else {
log.WithField("wfr", o.wfr.Name).WithField("stg", stage).Error("Process workload error: ", err)
}
continue
}
}
if len(retryStageNames) > 0 {
var requeue bool
if HasTimedOut(o.wfr) {
// timed-out. Update stage status and do not requeue
for _, stageName := range retryStageNames {
o.UpdateStageStatus(stageName, &v1alpha1.Status{
Phase: v1alpha1.StatusFailed,
Reason: "RetryOnExceededQuotaTimeout",
LastTransitionTime: metav1.Time{Time: time.Now()},
StartTime: metav1.Time{Time: time.Now()},
})
}
requeue = false
} else {
requeue = true
}
res.Requeue = &requeue
}
overall, err = o.OverallStatus()
if err != nil {
return res, fmt.Errorf("resolve overall status error: %v", err)
}
o.wfr.Status.Overall = *overall
err = o.Update()
if err != nil {
log.WithField("wfr", o.wfr.Name).Error("Update status error: ", err)
return res, err
}
return res, nil
}
// Garbage collection of WorkflowRun. When it's terminated, we will cleanup the pods created by it.
// - 'lastTry' indicates whether this is the last try to perform GC on this WorkflowRun object,
// if set to true, the WorkflowRun would be marked as cleaned regardless whether the GC succeeded or not.
// - 'wfrDeletion' indicates whether the GC is performed because of WorkflowRun deleted. In this case,
// GC would performed silently, without event recording and status updating.
func (o *operator) GC(lastTry, wfrDeletion bool) error {
wg := sync.WaitGroup{}
allPodsFinished := true
// For each pod created, delete it.
for stg, status := range o.wfr.Status.Stages {
// For non-terminated stage, update status to cancelled.
if status.Status.Phase == v1alpha1.StatusPending ||
status.Status.Phase == v1alpha1.StatusRunning ||
status.Status.Phase == v1alpha1.StatusWaiting {
o.UpdateStageStatus(stg, &v1alpha1.Status{
Phase: v1alpha1.StatusCancelled,
Reason: "GC",
LastTransitionTime: metav1.Time{Time: time.Now()},
})
}
if status.Pod == nil {
log.WithField("wfr", o.wfr.Name).
WithField("stg", stg).
Warn("Pod information is missing, can't clean the pod.")
continue
}
err := o.clusterClient.CoreV1().Pods(status.Pod.Namespace).Delete(context.TODO(), status.Pod.Name, metav1.DeleteOptions{})
if err != nil {
// If the pod not exist, just skip it without complain.
if errors.IsNotFound(err) {
continue
}
log.WithField("wfr", o.wfr.Name).
WithField("stg", stg).
WithField("pod", status.Pod.Name).
Warn("Delete pod error: ", err)
if !wfrDeletion {
o.recorder.Eventf(o.wfr, corev1.EventTypeWarning, "GC", "Delete pod '%s' error: %v", status.Pod.Name, err)
}
} else {
log.WithField("ns", status.Pod.Namespace).WithField("pod", status.Pod.Name).Info("Start to delete pod")
wg.Add(1)
go func(namespace, podName string) {
defer wg.Done()
timeout := time.After(5 * time.Minute)
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-timeout:
allPodsFinished = false
log.WithField("ns", namespace).WithField("pod", podName).Warn("Pod deletion timeout")
return
case <-ticker.C:
_, err := o.clusterClient.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
log.WithField("ns", namespace).WithField("pod", podName).Info("Pod deleted")
return
}
}
}
}(status.Pod.Namespace, status.Pod.Name)
}
}
// Wait all workflowRun related workload pods deleting completed, then start gc pod to clean data on PV.
// Otherwise, if the path which is used by workload pods in the PV is deleted before workload pods deletion,
// the pod deletion process will get stuck on Terminating status.
wg.Wait()
// If there are pods not finished and this is not the last gc try process, we will not start gc pod to clean
// data on PV. The last gc try process will ensure data could be cleaned.
if !allPodsFinished && !lastTry {
if !wfrDeletion {
o.recorder.Eventf(o.wfr, corev1.EventTypeWarning, "GC", "There are stage pods not Finished")
}
return nil
}
// Get execution context of the WorkflowRun, namespace and PVC are defined in the context.
executionContext := GetExecutionContext(o.wfr)
// Create a gc pod to clean data on PV if PVC is configured.
if executionContext.PVC != "" {
gcPod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: GCPodName(o.wfr.Name),
Namespace: executionContext.Namespace,
Labels: map[string]string{
meta.LabelProjectName: common.ResolveProjectName(*o.wfr),
meta.LabelWorkflowName: common.ResolveWorkflowName(*o.wfr),
meta.LabelWorkflowRunName: o.wfr.Name,
meta.LabelPodKind: meta.PodKindGC.String(),
meta.LabelPodCreatedBy: meta.CycloneCreator,
},
Annotations: map[string]string{
meta.AnnotationIstioInject: meta.AnnotationValueFalse,
},
},
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
Containers: []corev1.Container{
{
Name: common.GCContainerName,
Image: controller.Config.Images[controller.GCImage],
Command: []string{"rm", "-rf", common.GCDataPath + "/" + o.wfr.Name},
VolumeMounts: []corev1.VolumeMount{
{
Name: common.DefaultPvVolumeName,
MountPath: common.GCDataPath,
SubPath: common.WorkflowRunsPath(),
},
},
Resources: controller.Config.GC.ResourceRequirements,
},
},
Volumes: []corev1.Volume{
{
Name: common.DefaultPvVolumeName,
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: executionContext.PVC,
ReadOnly: false,
},
},
},
},
},
}
// If controller instance name is set, add label to the pod created.
if instance := os.Getenv(ccommon.ControllerInstanceEnvName); len(instance) != 0 {
gcPod.ObjectMeta.Labels[meta.LabelControllerInstance] = instance
}
_, err := o.clusterClient.CoreV1().Pods(executionContext.Namespace).Create(context.TODO(), gcPod, metav1.CreateOptions{})
if err != nil {
log.WithField("wfr", o.wfr.Name).Warn("Create GC pod error: ", err)
if !lastTry {
return err
}
if !wfrDeletion {
o.recorder.Eventf(o.wfr, corev1.EventTypeWarning, "GC", "Create GC pod error: %v", err)
}
}
}
if !wfrDeletion {
o.recorder.Event(o.wfr, corev1.EventTypeNormal, "GC", "GC is performed succeed.")
o.wfr.Status.Cleaned = true
err := o.Update()
if err != nil {
log.WithField("wfr", o.wfr.Name).Warn("Update wfr error: ", err)
}
}
return nil
}
// ResolveGlobalVariables will resolve global variables in workflowrun For example, generate final value for generation
// type value defined in workflow. For example, $(random:5) --> 'axyps'
func (o *operator) ResolveGlobalVariables() {
if o.wf == nil || o.wfr == nil {
return
}
var appendVariables []v1alpha1.GlobalVariable
for _, wfVariable := range o.wf.Spec.GlobalVariables {
var found bool
for _, variable := range o.wfr.Spec.GlobalVariables {
if variable.Name == wfVariable.Name {
found = true
break
}
}
if !found {
appendVariables = append(appendVariables, v1alpha1.GlobalVariable{
Name: wfVariable.Name,
Value: values.GenerateValue(wfVariable.Value),
})
} | }
| random_line_split |
|
flatdb.go | .
//
// All items stored in the flatDB will be marshalled in this format:
//
// +------------+-----+--------------+-------+
// | Key Length | Key | Value Length | Value |
// +------------+-----+--------------+-------+
//
// The flatDB can only be opened for read only mode(iteration) or write only
// mode. Each write operation will append the blob into the file with or without
// sync operation. But in order to make the flat database readable, it should
// call Commit after all write opts and after that the db is not writable.
type FlatDatabase struct {
lock sync.Mutex
path string // The directory for the flat database
data *os.File // File descriptor for the flat database.
index *os.File // File descriptor for the indexes.
read bool // Indicator whether the db is read or write mode
buff []byte // Auxiliary buffer for storing uncommitted data
items int // Auxiliary number for counting uncommitted data
iterating bool // Indicator whether the db is iterating. Concurrent iteration is not supported
offset uint64 // Global offset of entry in the file
}
func NewFlatDatabase(path string, read bool) (*FlatDatabase, error) {
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
var (
data *os.File
index *os.File
err error
)
if read {
data, err = os.OpenFile(filepath.Join(path, syncedName), os.O_RDONLY, 0644)
if err != nil {
return nil, err
}
index, err = os.OpenFile(filepath.Join(path, indexName), os.O_RDONLY, 0644)
if err != nil {
return nil, err
}
} else {
data, err = os.OpenFile(filepath.Join(path, temporaryName), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return nil, err
}
index, err = os.OpenFile(filepath.Join(path, indexName), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return nil, err
}
}
return &FlatDatabase{
path: path,
data: data,
index: index,
read: read,
}, nil
}
// Has retrieves if a key is present in the flat data store.
func (db *FlatDatabase) Has(key []byte) (bool, error) { panic("not supported") }
// Get retrieves the given key if it's present in the flat data store.
func (db *FlatDatabase) Get(key []byte) ([]byte, error) { panic("not supported") }
// Delete removes the key from the key-value data store.
func (db *FlatDatabase) Delete(key []byte) error { panic("not supported") }
// Put inserts the given value into the key-value data store.
func (db *FlatDatabase) Put(key []byte, value []byte) error {
if len(key) == 0 || len(value) == 0 {
return ErrEmptyEntry
}
db.lock.Lock()
defer db.lock.Unlock()
if db.read {
return ErrReadOnly
}
n := 2*binary.MaxVarintLen32 + len(key) + len(value)
db.grow(n)
offset, previous := len(db.buff), len(db.buff)
db.buff = db.buff[:offset+n]
offset += binary.PutUvarint(db.buff[offset:], uint64(len(key)))
offset += copy(db.buff[offset:], key)
offset += binary.PutUvarint(db.buff[offset:], uint64(len(value)))
offset += copy(db.buff[offset:], value)
db.buff = db.buff[:offset]
db.items += 1
// db.offset is monotonic increasing in "WRITE" mode which
// indicates the offset of the last written entry in GLOBAL
// view. So everytime only the diff is added.
db.offset += uint64(offset) - uint64(previous)
return db.writeChunk(false)
}
func (db *FlatDatabase) grow(n int) {
o := len(db.buff)
if cap(db.buff)-o < n {
div := 1
if db.items > bufferGrowRec {
div = db.items / bufferGrowRec
}
ndata := make([]byte, o, o+n+o/div)
copy(ndata, db.buff)
db.buff = ndata
}
}
func (db *FlatDatabase) writeChunk(force bool) error {
if len(db.buff) < chunkSize && !force {
return nil
}
// Step one, flush data | if n != len(db.buff) {
return ErrWriteFailure
}
db.buff = db.buff[:0]
db.items = 0
// Step two, flush chunk offset
var local [8]byte
binary.BigEndian.PutUint64(local[:], db.offset)
n, err = db.index.Write(local[:])
if err != nil {
return err
}
if n != 8 {
return ErrWriteFailure
}
return nil
}
func (db *FlatDatabase) readChunk() error {
// Step one, read chunk size
var local [8]byte
n, err := db.index.Read(local[:])
if err != nil {
return err // may return EOF
}
if n != 8 {
return ErrReadFailure
}
offset := binary.BigEndian.Uint64(local[:])
size := int(offset - db.offset)
db.offset = offset
db.grow(size)
db.buff = db.buff[:size]
n, err = db.data.Read(db.buff)
if err != nil {
return err // may return EOF
}
if n != size {
return ErrReadFailure
}
return nil
}
// Commit flushs all in-memory data into the disk and switchs the db to read mode.
func (db *FlatDatabase) Commit() error {
db.lock.Lock()
defer db.lock.Unlock()
if err := db.closeNoLock(); err != nil {
return err
}
if err := rename(filepath.Join(db.path, temporaryName), filepath.Join(db.path, syncedName)); err != nil {
return err
}
if err := syncDir(db.path); err != nil {
return err
}
db.read = true
db.offset = 0
// Reopen the files in read-only mode
var err error
db.data, err = os.OpenFile(filepath.Join(db.path, syncedName), os.O_RDONLY, 0644)
if err != nil {
return err
}
db.index, err = os.OpenFile(filepath.Join(db.path, indexName), os.O_RDONLY, 0644)
if err != nil {
return err
}
return nil
}
func (db *FlatDatabase) closeNoLock() error {
if err := db.writeChunk(true); err != nil {
return err
}
if err := db.data.Sync(); err != nil {
return err
}
if err := db.index.Sync(); err != nil {
return err
}
if err := db.data.Close(); err != nil {
return err
}
if err := db.index.Close(); err != nil {
return err
}
return nil
}
func (db *FlatDatabase) Close() error {
db.lock.Lock()
defer db.lock.Unlock()
return db.closeNoLock()
}
// NewBatch creates a write-only database that buffers changes to its host db
// until a final write is called.
func (db *FlatDatabase) NewBatch() *FlatBatch {
return &FlatBatch{db: db}
}
type FlatBatch struct {
db *FlatDatabase
keys [][]byte
vals [][]byte
keysize int
valsize int
lock sync.RWMutex
}
// Put inserts the given value into the key-value data store.
func (fb *FlatBatch) Put(key []byte, value []byte) error {
fb.lock.Lock()
defer fb.lock.Unlock()
fb.keys = append(fb.keys, key)
fb.vals = append(fb.vals, value)
fb.keysize += len(key)
fb.valsize += len(value)
return nil
}
// Delete removes the key from the key-value data store.
func (fb *FlatBatch) Delete(key []byte) error { panic("not supported") }
// ValueSize retrieves the amount of data queued up for writing.
func (fb *FlatBatch) ValueSize() int {
fb.lock.RLock()
defer fb.lock.RUnlock()
return fb.valsize
}
// Write flushes any accumulated data to disk.
func (fb *FlatBatch) Write() error {
fb.lock.Lock()
defer fb.lock.Unlock()
for i := 0; i < len(fb.keys); i++ {
if err := fb.db.Put(fb.keys[i], fb.vals[i]); err != nil {
return err
}
}
return nil
}
// Reset resets the batch for reuse.
func (fb *FlatBatch) Reset() {
fb.lock.Lock()
defer fb.lock.Unlock()
fb.keysize, fb.valsize = 0, 0
fb.keys = fb.keys[:0]
fb.vals = fb.vals[:0]
}
// NewIterator creates a iterator over the **whole** database with first-in-first-out
// | n, err := db.data.Write(db.buff)
if err != nil {
return err
} | random_line_split |
flatdb.go | data,
index: index,
read: read,
}, nil
}
// Has retrieves if a key is present in the flat data store.
func (db *FlatDatabase) Has(key []byte) (bool, error) { panic("not supported") }
// Get retrieves the given key if it's present in the flat data store.
func (db *FlatDatabase) Get(key []byte) ([]byte, error) { panic("not supported") }
// Delete removes the key from the key-value data store.
func (db *FlatDatabase) Delete(key []byte) error { panic("not supported") }
// Put inserts the given value into the key-value data store.
func (db *FlatDatabase) Put(key []byte, value []byte) error {
if len(key) == 0 || len(value) == 0 {
return ErrEmptyEntry
}
db.lock.Lock()
defer db.lock.Unlock()
if db.read {
return ErrReadOnly
}
n := 2*binary.MaxVarintLen32 + len(key) + len(value)
db.grow(n)
offset, previous := len(db.buff), len(db.buff)
db.buff = db.buff[:offset+n]
offset += binary.PutUvarint(db.buff[offset:], uint64(len(key)))
offset += copy(db.buff[offset:], key)
offset += binary.PutUvarint(db.buff[offset:], uint64(len(value)))
offset += copy(db.buff[offset:], value)
db.buff = db.buff[:offset]
db.items += 1
// db.offset is monotonic increasing in "WRITE" mode which
// indicates the offset of the last written entry in GLOBAL
// view. So everytime only the diff is added.
db.offset += uint64(offset) - uint64(previous)
return db.writeChunk(false)
}
func (db *FlatDatabase) grow(n int) {
o := len(db.buff)
if cap(db.buff)-o < n {
div := 1
if db.items > bufferGrowRec {
div = db.items / bufferGrowRec
}
ndata := make([]byte, o, o+n+o/div)
copy(ndata, db.buff)
db.buff = ndata
}
}
func (db *FlatDatabase) writeChunk(force bool) error {
if len(db.buff) < chunkSize && !force {
return nil
}
// Step one, flush data
n, err := db.data.Write(db.buff)
if err != nil {
return err
}
if n != len(db.buff) {
return ErrWriteFailure
}
db.buff = db.buff[:0]
db.items = 0
// Step two, flush chunk offset
var local [8]byte
binary.BigEndian.PutUint64(local[:], db.offset)
n, err = db.index.Write(local[:])
if err != nil {
return err
}
if n != 8 {
return ErrWriteFailure
}
return nil
}
func (db *FlatDatabase) readChunk() error {
// Step one, read chunk size
var local [8]byte
n, err := db.index.Read(local[:])
if err != nil {
return err // may return EOF
}
if n != 8 {
return ErrReadFailure
}
offset := binary.BigEndian.Uint64(local[:])
size := int(offset - db.offset)
db.offset = offset
db.grow(size)
db.buff = db.buff[:size]
n, err = db.data.Read(db.buff)
if err != nil {
return err // may return EOF
}
if n != size {
return ErrReadFailure
}
return nil
}
// Commit flushs all in-memory data into the disk and switchs the db to read mode.
func (db *FlatDatabase) Commit() error {
db.lock.Lock()
defer db.lock.Unlock()
if err := db.closeNoLock(); err != nil {
return err
}
if err := rename(filepath.Join(db.path, temporaryName), filepath.Join(db.path, syncedName)); err != nil {
return err
}
if err := syncDir(db.path); err != nil {
return err
}
db.read = true
db.offset = 0
// Reopen the files in read-only mode
var err error
db.data, err = os.OpenFile(filepath.Join(db.path, syncedName), os.O_RDONLY, 0644)
if err != nil {
return err
}
db.index, err = os.OpenFile(filepath.Join(db.path, indexName), os.O_RDONLY, 0644)
if err != nil {
return err
}
return nil
}
func (db *FlatDatabase) closeNoLock() error {
if err := db.writeChunk(true); err != nil {
return err
}
if err := db.data.Sync(); err != nil {
return err
}
if err := db.index.Sync(); err != nil {
return err
}
if err := db.data.Close(); err != nil {
return err
}
if err := db.index.Close(); err != nil {
return err
}
return nil
}
func (db *FlatDatabase) Close() error {
db.lock.Lock()
defer db.lock.Unlock()
return db.closeNoLock()
}
// NewBatch creates a write-only database that buffers changes to its host db
// until a final write is called.
func (db *FlatDatabase) NewBatch() *FlatBatch {
return &FlatBatch{db: db}
}
type FlatBatch struct {
db *FlatDatabase
keys [][]byte
vals [][]byte
keysize int
valsize int
lock sync.RWMutex
}
// Put inserts the given value into the key-value data store.
func (fb *FlatBatch) Put(key []byte, value []byte) error {
fb.lock.Lock()
defer fb.lock.Unlock()
fb.keys = append(fb.keys, key)
fb.vals = append(fb.vals, value)
fb.keysize += len(key)
fb.valsize += len(value)
return nil
}
// Delete removes the key from the key-value data store.
func (fb *FlatBatch) Delete(key []byte) error { panic("not supported") }
// ValueSize retrieves the amount of data queued up for writing.
func (fb *FlatBatch) ValueSize() int {
fb.lock.RLock()
defer fb.lock.RUnlock()
return fb.valsize
}
// Write flushes any accumulated data to disk.
func (fb *FlatBatch) Write() error {
fb.lock.Lock()
defer fb.lock.Unlock()
for i := 0; i < len(fb.keys); i++ {
if err := fb.db.Put(fb.keys[i], fb.vals[i]); err != nil {
return err
}
}
return nil
}
// Reset resets the batch for reuse.
func (fb *FlatBatch) Reset() {
fb.lock.Lock()
defer fb.lock.Unlock()
fb.keysize, fb.valsize = 0, 0
fb.keys = fb.keys[:0]
fb.vals = fb.vals[:0]
}
// NewIterator creates a iterator over the **whole** database with first-in-first-out
// order. The passed `prefix` and `start` is useless, just only to follow the interface.
//
// If there already exists a un-released iterator, the nil will be returned since
// iteration concurrently is not supported by flatdb.
func (db *FlatDatabase) NewIterator(prefix []byte, start []byte) *FlatIterator {
db.lock.Lock()
defer db.lock.Unlock()
if db.iterating {
return nil
}
db.iterating = true
db.data.Seek(0, 0)
db.index.Seek(0, 0)
db.offset = 0
db.buff = db.buff[:0]
return &FlatIterator{db: db}
}
// FlatIterator is the iterator used to itearate the whole db.
type FlatIterator struct {
db *FlatDatabase
key []byte
val []byte
err error
eof bool
}
// Next moves the iterator to the next key/value pair. It returns whether the
// iterator is exhausted.
func (iter *FlatIterator) Next() bool {
if len(iter.db.buff) == 0 && !iter.eof {
if err := iter.db.readChunk(); err != nil {
if err == io.EOF {
iter.eof = true
return false
} else {
iter.err = err
return false
}
}
}
var offset int
x, n := binary.Uvarint(iter.db.buff)
offset += n
if n <= 0 {
return false
}
key := iter.db.buff[offset : offset+int(x)]
offset += int(x)
x, n = binary.Uvarint(iter.db.buff[offset:])
offset += n
if n <= 0 {
return false
}
val := iter.db.buff[offset : offset+int(x)]
offset += int(x)
iter.key = key
iter.val = val
iter.db.buff = iter.db.buff[offset:]
return true
}
// Error returns any accumulated error. Exhausting all the key/value pairs
// is not considered to be an error.
func (iter *FlatIterator) Error() error {
return iter.err
}
// Key returns the key of the current key/value pair, or nil if done. The caller
// should not modify the contents of the returned slice, and its contents may
// change on the next call to Next.
func (iter *FlatIterator) Key() []byte | {
return iter.key
} | identifier_body |
|
flatdb.go | err = os.OpenFile(filepath.Join(path, syncedName), os.O_RDONLY, 0644)
if err != nil {
return nil, err
}
index, err = os.OpenFile(filepath.Join(path, indexName), os.O_RDONLY, 0644)
if err != nil {
return nil, err
}
} else {
data, err = os.OpenFile(filepath.Join(path, temporaryName), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return nil, err
}
index, err = os.OpenFile(filepath.Join(path, indexName), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return nil, err
}
}
return &FlatDatabase{
path: path,
data: data,
index: index,
read: read,
}, nil
}
// Has retrieves if a key is present in the flat data store.
func (db *FlatDatabase) Has(key []byte) (bool, error) { panic("not supported") }
// Get retrieves the given key if it's present in the flat data store.
func (db *FlatDatabase) Get(key []byte) ([]byte, error) { panic("not supported") }
// Delete removes the key from the key-value data store.
func (db *FlatDatabase) Delete(key []byte) error { panic("not supported") }
// Put inserts the given value into the key-value data store.
func (db *FlatDatabase) Put(key []byte, value []byte) error {
if len(key) == 0 || len(value) == 0 {
return ErrEmptyEntry
}
db.lock.Lock()
defer db.lock.Unlock()
if db.read {
return ErrReadOnly
}
n := 2*binary.MaxVarintLen32 + len(key) + len(value)
db.grow(n)
offset, previous := len(db.buff), len(db.buff)
db.buff = db.buff[:offset+n]
offset += binary.PutUvarint(db.buff[offset:], uint64(len(key)))
offset += copy(db.buff[offset:], key)
offset += binary.PutUvarint(db.buff[offset:], uint64(len(value)))
offset += copy(db.buff[offset:], value)
db.buff = db.buff[:offset]
db.items += 1
// db.offset is monotonic increasing in "WRITE" mode which
// indicates the offset of the last written entry in GLOBAL
// view. So everytime only the diff is added.
db.offset += uint64(offset) - uint64(previous)
return db.writeChunk(false)
}
func (db *FlatDatabase) grow(n int) {
o := len(db.buff)
if cap(db.buff)-o < n {
div := 1
if db.items > bufferGrowRec {
div = db.items / bufferGrowRec
}
ndata := make([]byte, o, o+n+o/div)
copy(ndata, db.buff)
db.buff = ndata
}
}
func (db *FlatDatabase) writeChunk(force bool) error {
if len(db.buff) < chunkSize && !force {
return nil
}
// Step one, flush data
n, err := db.data.Write(db.buff)
if err != nil {
return err
}
if n != len(db.buff) {
return ErrWriteFailure
}
db.buff = db.buff[:0]
db.items = 0
// Step two, flush chunk offset
var local [8]byte
binary.BigEndian.PutUint64(local[:], db.offset)
n, err = db.index.Write(local[:])
if err != nil {
return err
}
if n != 8 {
return ErrWriteFailure
}
return nil
}
func (db *FlatDatabase) readChunk() error {
// Step one, read chunk size
var local [8]byte
n, err := db.index.Read(local[:])
if err != nil {
return err // may return EOF
}
if n != 8 {
return ErrReadFailure
}
offset := binary.BigEndian.Uint64(local[:])
size := int(offset - db.offset)
db.offset = offset
db.grow(size)
db.buff = db.buff[:size]
n, err = db.data.Read(db.buff)
if err != nil {
return err // may return EOF
}
if n != size {
return ErrReadFailure
}
return nil
}
// Commit flushs all in-memory data into the disk and switchs the db to read mode.
func (db *FlatDatabase) Commit() error {
db.lock.Lock()
defer db.lock.Unlock()
if err := db.closeNoLock(); err != nil {
return err
}
if err := rename(filepath.Join(db.path, temporaryName), filepath.Join(db.path, syncedName)); err != nil {
return err
}
if err := syncDir(db.path); err != nil {
return err
}
db.read = true
db.offset = 0
// Reopen the files in read-only mode
var err error
db.data, err = os.OpenFile(filepath.Join(db.path, syncedName), os.O_RDONLY, 0644)
if err != nil {
return err
}
db.index, err = os.OpenFile(filepath.Join(db.path, indexName), os.O_RDONLY, 0644)
if err != nil {
return err
}
return nil
}
func (db *FlatDatabase) closeNoLock() error {
if err := db.writeChunk(true); err != nil {
return err
}
if err := db.data.Sync(); err != nil {
return err
}
if err := db.index.Sync(); err != nil {
return err
}
if err := db.data.Close(); err != nil {
return err
}
if err := db.index.Close(); err != nil {
return err
}
return nil
}
func (db *FlatDatabase) Close() error {
db.lock.Lock()
defer db.lock.Unlock()
return db.closeNoLock()
}
// NewBatch creates a write-only database that buffers changes to its host db
// until a final write is called.
func (db *FlatDatabase) NewBatch() *FlatBatch {
return &FlatBatch{db: db}
}
type FlatBatch struct {
db *FlatDatabase
keys [][]byte
vals [][]byte
keysize int
valsize int
lock sync.RWMutex
}
// Put inserts the given value into the key-value data store.
func (fb *FlatBatch) Put(key []byte, value []byte) error {
fb.lock.Lock()
defer fb.lock.Unlock()
fb.keys = append(fb.keys, key)
fb.vals = append(fb.vals, value)
fb.keysize += len(key)
fb.valsize += len(value)
return nil
}
// Delete removes the key from the key-value data store.
func (fb *FlatBatch) Delete(key []byte) error { panic("not supported") }
// ValueSize retrieves the amount of data queued up for writing.
func (fb *FlatBatch) ValueSize() int {
fb.lock.RLock()
defer fb.lock.RUnlock()
return fb.valsize
}
// Write flushes any accumulated data to disk.
func (fb *FlatBatch) Write() error {
fb.lock.Lock()
defer fb.lock.Unlock()
for i := 0; i < len(fb.keys); i++ {
if err := fb.db.Put(fb.keys[i], fb.vals[i]); err != nil {
return err
}
}
return nil
}
// Reset resets the batch for reuse.
func (fb *FlatBatch) Reset() {
fb.lock.Lock()
defer fb.lock.Unlock()
fb.keysize, fb.valsize = 0, 0
fb.keys = fb.keys[:0]
fb.vals = fb.vals[:0]
}
// NewIterator creates a iterator over the **whole** database with first-in-first-out
// order. The passed `prefix` and `start` is useless, just only to follow the interface.
//
// If there already exists a un-released iterator, the nil will be returned since
// iteration concurrently is not supported by flatdb.
func (db *FlatDatabase) NewIterator(prefix []byte, start []byte) *FlatIterator {
db.lock.Lock()
defer db.lock.Unlock()
if db.iterating {
return nil
}
db.iterating = true
db.data.Seek(0, 0)
db.index.Seek(0, 0)
db.offset = 0
db.buff = db.buff[:0]
return &FlatIterator{db: db}
}
// FlatIterator is the iterator used to itearate the whole db.
type FlatIterator struct {
db *FlatDatabase
key []byte
val []byte
err error
eof bool
}
// Next moves the iterator to the next key/value pair. It returns whether the
// iterator is exhausted.
func (iter *FlatIterator) Next() bool {
if len(iter.db.buff) == 0 && !iter.eof {
if err := iter.db.readChunk(); err != nil {
if err == io.EOF {
iter.eof = true
return false
} else {
iter.err = err
return false
}
}
}
var offset int
x, n := binary.Uvarint(iter.db.buff)
offset += n
if n <= 0 | {
return false
} | conditional_block |
|
flatdb.go | //
// All items stored in the flatDB will be marshalled in this format:
//
// +------------+-----+--------------+-------+
// | Key Length | Key | Value Length | Value |
// +------------+-----+--------------+-------+
//
// The flatDB can only be opened for read only mode(iteration) or write only
// mode. Each write operation will append the blob into the file with or without
// sync operation. But in order to make the flat database readable, it should
// call Commit after all write opts and after that the db is not writable.
type FlatDatabase struct {
lock sync.Mutex
path string // The directory for the flat database
data *os.File // File descriptor for the flat database.
index *os.File // File descriptor for the indexes.
read bool // Indicator whether the db is read or write mode
buff []byte // Auxiliary buffer for storing uncommitted data
items int // Auxiliary number for counting uncommitted data
iterating bool // Indicator whether the db is iterating. Concurrent iteration is not supported
offset uint64 // Global offset of entry in the file
}
func NewFlatDatabase(path string, read bool) (*FlatDatabase, error) {
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
var (
data *os.File
index *os.File
err error
)
if read {
data, err = os.OpenFile(filepath.Join(path, syncedName), os.O_RDONLY, 0644)
if err != nil {
return nil, err
}
index, err = os.OpenFile(filepath.Join(path, indexName), os.O_RDONLY, 0644)
if err != nil {
return nil, err
}
} else {
data, err = os.OpenFile(filepath.Join(path, temporaryName), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return nil, err
}
index, err = os.OpenFile(filepath.Join(path, indexName), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return nil, err
}
}
return &FlatDatabase{
path: path,
data: data,
index: index,
read: read,
}, nil
}
// Has retrieves if a key is present in the flat data store.
func (db *FlatDatabase) Has(key []byte) (bool, error) { panic("not supported") }
// Get retrieves the given key if it's present in the flat data store.
func (db *FlatDatabase) Get(key []byte) ([]byte, error) { panic("not supported") }
// Delete removes the key from the key-value data store.
func (db *FlatDatabase) Delete(key []byte) error { panic("not supported") }
// Put inserts the given value into the key-value data store.
func (db *FlatDatabase) Put(key []byte, value []byte) error {
if len(key) == 0 || len(value) == 0 {
return ErrEmptyEntry
}
db.lock.Lock()
defer db.lock.Unlock()
if db.read {
return ErrReadOnly
}
n := 2*binary.MaxVarintLen32 + len(key) + len(value)
db.grow(n)
offset, previous := len(db.buff), len(db.buff)
db.buff = db.buff[:offset+n]
offset += binary.PutUvarint(db.buff[offset:], uint64(len(key)))
offset += copy(db.buff[offset:], key)
offset += binary.PutUvarint(db.buff[offset:], uint64(len(value)))
offset += copy(db.buff[offset:], value)
db.buff = db.buff[:offset]
db.items += 1
// db.offset is monotonic increasing in "WRITE" mode which
// indicates the offset of the last written entry in GLOBAL
// view. So everytime only the diff is added.
db.offset += uint64(offset) - uint64(previous)
return db.writeChunk(false)
}
func (db *FlatDatabase) grow(n int) {
o := len(db.buff)
if cap(db.buff)-o < n {
div := 1
if db.items > bufferGrowRec {
div = db.items / bufferGrowRec
}
ndata := make([]byte, o, o+n+o/div)
copy(ndata, db.buff)
db.buff = ndata
}
}
func (db *FlatDatabase) writeChunk(force bool) error {
if len(db.buff) < chunkSize && !force {
return nil
}
// Step one, flush data
n, err := db.data.Write(db.buff)
if err != nil {
return err
}
if n != len(db.buff) {
return ErrWriteFailure
}
db.buff = db.buff[:0]
db.items = 0
// Step two, flush chunk offset
var local [8]byte
binary.BigEndian.PutUint64(local[:], db.offset)
n, err = db.index.Write(local[:])
if err != nil {
return err
}
if n != 8 {
return ErrWriteFailure
}
return nil
}
func (db *FlatDatabase) readChunk() error {
// Step one, read chunk size
var local [8]byte
n, err := db.index.Read(local[:])
if err != nil {
return err // may return EOF
}
if n != 8 {
return ErrReadFailure
}
offset := binary.BigEndian.Uint64(local[:])
size := int(offset - db.offset)
db.offset = offset
db.grow(size)
db.buff = db.buff[:size]
n, err = db.data.Read(db.buff)
if err != nil {
return err // may return EOF
}
if n != size {
return ErrReadFailure
}
return nil
}
// Commit flushs all in-memory data into the disk and switchs the db to read mode.
func (db *FlatDatabase) Commit() error {
db.lock.Lock()
defer db.lock.Unlock()
if err := db.closeNoLock(); err != nil {
return err
}
if err := rename(filepath.Join(db.path, temporaryName), filepath.Join(db.path, syncedName)); err != nil {
return err
}
if err := syncDir(db.path); err != nil {
return err
}
db.read = true
db.offset = 0
// Reopen the files in read-only mode
var err error
db.data, err = os.OpenFile(filepath.Join(db.path, syncedName), os.O_RDONLY, 0644)
if err != nil {
return err
}
db.index, err = os.OpenFile(filepath.Join(db.path, indexName), os.O_RDONLY, 0644)
if err != nil {
return err
}
return nil
}
func (db *FlatDatabase) closeNoLock() error {
if err := db.writeChunk(true); err != nil {
return err
}
if err := db.data.Sync(); err != nil {
return err
}
if err := db.index.Sync(); err != nil {
return err
}
if err := db.data.Close(); err != nil {
return err
}
if err := db.index.Close(); err != nil {
return err
}
return nil
}
func (db *FlatDatabase) Close() error {
db.lock.Lock()
defer db.lock.Unlock()
return db.closeNoLock()
}
// NewBatch creates a write-only database that buffers changes to its host db
// until a final write is called.
func (db *FlatDatabase) NewBatch() *FlatBatch {
return &FlatBatch{db: db}
}
type FlatBatch struct {
db *FlatDatabase
keys [][]byte
vals [][]byte
keysize int
valsize int
lock sync.RWMutex
}
// Put inserts the given value into the key-value data store.
func (fb *FlatBatch) Put(key []byte, value []byte) error {
fb.lock.Lock()
defer fb.lock.Unlock()
fb.keys = append(fb.keys, key)
fb.vals = append(fb.vals, value)
fb.keysize += len(key)
fb.valsize += len(value)
return nil
}
// Delete removes the key from the key-value data store.
func (fb *FlatBatch) Delete(key []byte) error { panic("not supported") }
// ValueSize retrieves the amount of data queued up for writing.
func (fb *FlatBatch) ValueSize() int {
fb.lock.RLock()
defer fb.lock.RUnlock()
return fb.valsize
}
// Write flushes any accumulated data to disk.
func (fb *FlatBatch) Write() error {
fb.lock.Lock()
defer fb.lock.Unlock()
for i := 0; i < len(fb.keys); i++ {
if err := fb.db.Put(fb.keys[i], fb.vals[i]); err != nil {
return err
}
}
return nil
}
// Reset resets the batch for reuse.
func (fb *FlatBatch) | () {
fb.lock.Lock()
defer fb.lock.Unlock()
fb.keysize, fb.valsize = 0, 0
fb.keys = fb.keys[:0]
fb.vals = fb.vals[:0]
}
// NewIterator creates a iterator over the **whole** database with first-in-first-out
| Reset | identifier_name |
write.rs | which is odd, and std::u{N}::MAX + 1 == 0 is even.
//
// note also that `ri` _may_ have been re-used since we last read into last_epochs.
// this is okay though, as a change still implies that the new reader must have
// arrived _after_ we did the atomic swap, and thus must also have seen the new
// pointer.
if self.last_epochs[ri] % 2 == 0 {
continue;
}
let now = epoch.load(Ordering::Acquire);
if now != self.last_epochs[ri] {
// reader must have seen the last swap, since they have done at least one
// operation since we last looked at their epoch, which _must_ mean that they
// are no longer using the old pointer value.
} else {
// reader may not have seen swap
// continue from this reader's epoch
starti = ii;
if !cfg!(loom) {
// how eagerly should we retry?
if iter != 20 {
iter += 1;
} else {
thread::yield_now();
}
}
#[cfg(loom)]
loom::thread::yield_now();
continue 'retry;
}
}
break;
}
#[cfg(test)]
{
self.is_waiting.store(false, Ordering::Relaxed);
}
}
/// Publish all operations append to the log to reads.
///
/// This method needs to wait for all readers to move to the "other" copy of the data so that
/// it can replay the operational log onto the stale copy the readers used to use. This can
/// take some time, especially if readers are executing slow operations, or if there are many
/// of them.
pub fn publish(&mut self) -> &mut Self {
// we need to wait until all epochs have changed since the swaps *or* until a "finished"
// flag has been observed to be on for two subsequent iterations (there still may be some
// readers present since we did the previous refresh)
//
// NOTE: it is safe for us to hold the lock for the entire duration of the swap. we will
// only block on pre-existing readers, and they are never waiting to push onto epochs
// unless they have finished reading.
let epochs = Arc::clone(&self.epochs);
let mut epochs = epochs.lock().unwrap();
self.wait(&mut epochs);
if !self.first {
// all the readers have left!
// safety: we haven't freed the Box, and no readers are accessing the w_handle
let w_handle = unsafe { self.w_handle.as_mut() };
// safety: we will not swap while we hold this reference
let r_handle = unsafe {
self.r_handle
.inner
.load(Ordering::Acquire)
.as_ref()
.unwrap()
};
if self.second {
Absorb::sync_with(w_handle, r_handle);
self.second = false
}
// the w_handle copy has not seen any of the writes in the oplog
// the r_handle copy has not seen any of the writes following swap_index
if self.swap_index != 0 {
// we can drain out the operations that only the w_handle copy needs
//
// NOTE: the if above is because drain(0..0) would remove 0
for op in self.oplog.drain(0..self.swap_index) {
T::absorb_second(w_handle, op, r_handle);
}
}
// we cannot give owned operations to absorb_first
// since they'll also be needed by the r_handle copy
for op in self.oplog.iter_mut() {
T::absorb_first(w_handle, op, r_handle);
}
// the w_handle copy is about to become the r_handle, and can ignore the oplog
self.swap_index = self.oplog.len();
// w_handle (the old r_handle) is now fully up to date!
} else {
self.first = false
}
// at this point, we have exclusive access to w_handle, and it is up-to-date with all
// writes. the stale r_handle is accessed by readers through an Arc clone of atomic pointer
// inside the ReadHandle. oplog contains all the changes that are in w_handle, but not in
// r_handle.
//
// it's now time for us to swap the copies so that readers see up-to-date results from
// w_handle.
// swap in our w_handle, and get r_handle in return
let r_handle = self
.r_handle
.inner
.swap(self.w_handle.as_ptr(), Ordering::Release);
// NOTE: at this point, there are likely still readers using r_handle.
// safety: r_handle was also created from a Box, so it is not null and is covariant.
self.w_handle = unsafe { NonNull::new_unchecked(r_handle) };
// ensure that the subsequent epoch reads aren't re-ordered to before the swap
fence(Ordering::SeqCst);
for (ri, epoch) in epochs.iter() {
self.last_epochs[ri] = epoch.load(Ordering::Acquire);
}
#[cfg(test)]
{
self.refreshes += 1;
}
self
}
/// Publish as necessary to ensure that all operations are visible to readers.
///
/// `WriteHandle::publish` will *always* wait for old readers to depart and swap the maps.
/// This method will only do so if there are pending operations.
pub fn flush(&mut self) {
if self.has_pending_operations() {
self.publish();
}
}
/// Returns true if there are operations in the operational log that have not yet been exposed
/// to readers.
pub fn has_pending_operations(&self) -> bool {
// NOTE: we don't use self.oplog.is_empty() here because it's not really that important if
// there are operations that have not yet been applied to the _write_ handle.
self.swap_index < self.oplog.len()
}
/// Append the given operation to the operational log.
///
/// Its effects will not be exposed to readers until you call [`publish`](Self::publish).
pub fn append(&mut self, op: O) -> &mut Self {
self.extend(std::iter::once(op));
self
}
/// Returns a raw pointer to the write copy of the data (the one readers are _not_ accessing).
///
/// Note that it is only safe to mutate through this pointer if you _know_ that there are no
/// readers still present in this copy. This is not normally something you know; even after
/// calling `publish`, readers may still be in the write copy for some time. In general, the
/// only time you know this is okay is before the first call to `publish` (since no readers
/// ever entered the write copy).
// TODO: Make this return `Option<&mut T>`,
// and only `Some` if there are indeed to readers in the write copy.
pub fn raw_write_handle(&mut self) -> NonNull<T> {
self.w_handle
}
/// Returns the backing data structure.
///
/// Makes sure that all the pending operations are applied and waits till all the read handles
/// have departed. Then it uses [`Absorb::drop_first`] to drop one of the copies of the data and
/// returns the other copy as a [`Taken`] smart pointer.
pub fn take(mut self) -> Taken<T, O> {
// It is always safe to `expect` here because `take_inner` is private
// and it is only called here and in the drop impl. Since we have an owned
// `self` we know the drop has not yet been called. And every first call of
// `take_inner` returns `Some`
self.take_inner()
.expect("inner is only taken here then self is dropped")
}
}
// allow using write handle for reads
use std::ops::Deref;
impl<T, O> Deref for WriteHandle<T, O>
where
T: Absorb<O>,
{
type Target = ReadHandle<T>;
fn deref(&self) -> &Self::Target {
&self.r_handle
}
}
impl<T, O> Extend<O> for WriteHandle<T, O>
where
T: Absorb<O>,
{
/// Add multiple operations to the operational log.
///
/// Their effects will not be exposed to readers until you call [`publish`](Self::publish)
fn extend<I>(&mut self, ops: I)
where
I: IntoIterator<Item = O>,
{
if self.first | {
// Safety: we know there are no outstanding w_handle readers, since we haven't
// refreshed ever before, so we can modify it directly!
let mut w_inner = self.raw_write_handle();
let w_inner = unsafe { w_inner.as_mut() };
let r_handle = self.enter().expect("map has not yet been destroyed");
// Because we are operating directly on the map, and nothing is aliased, we do want
// to perform drops, so we invoke absorb_second.
for op in ops {
Absorb::absorb_second(w_inner, op, &*r_handle);
}
} | conditional_block |
|
write.rs | <T>;
fn deref(&self) -> &Self::Target {
&self.r_handle
}
}
impl<T, O> Extend<O> for WriteHandle<T, O>
where
T: Absorb<O>,
{
/// Add multiple operations to the operational log.
///
/// Their effects will not be exposed to readers until you call [`publish`](Self::publish)
fn extend<I>(&mut self, ops: I)
where
I: IntoIterator<Item = O>,
{
if self.first {
// Safety: we know there are no outstanding w_handle readers, since we haven't
// refreshed ever before, so we can modify it directly!
let mut w_inner = self.raw_write_handle();
let w_inner = unsafe { w_inner.as_mut() };
let r_handle = self.enter().expect("map has not yet been destroyed");
// Because we are operating directly on the map, and nothing is aliased, we do want
// to perform drops, so we invoke absorb_second.
for op in ops {
Absorb::absorb_second(w_inner, op, &*r_handle);
}
} else {
self.oplog.extend(ops);
}
}
}
/// `WriteHandle` can be sent across thread boundaries:
///
/// ```
/// use left_right::WriteHandle;
///
/// struct Data;
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// fn sync_with(&mut self, _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
///
/// As long as the inner types allow that of course.
/// Namely, the data type has to be `Send`:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::rc::Rc;
///
/// struct Data(Rc<()>);
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
///
/// .. the operation type has to be `Send`:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::rc::Rc;
///
/// struct Data;
/// impl left_right::Absorb<Rc<()>> for Data {
/// fn absorb_first(&mut self, _: &mut Rc<()>, _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, Rc<()>>>()
/// ```
///
/// .. and the data type has to be `Sync` so it's still okay to read through `ReadHandle`s:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::cell::Cell;
///
/// struct Data(Cell<()>);
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
#[allow(dead_code)]
struct CheckWriteHandleSend;
#[cfg(test)]
mod tests {
use crate::sync::{AtomicUsize, Mutex, Ordering};
use crate::Absorb;
use slab::Slab;
include!("./utilities.rs");
#[test]
fn append_test() {
let (mut w, _r) = crate::new::<i32, _>();
assert_eq!(w.first, true);
w.append(CounterAddOp(1));
assert_eq!(w.oplog.len(), 0);
assert_eq!(w.first, true);
w.publish();
assert_eq!(w.first, false);
w.append(CounterAddOp(2));
w.append(CounterAddOp(3));
assert_eq!(w.oplog.len(), 2);
}
#[test]
fn take_test() {
// publish twice then take with no pending operations
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(1));
w.publish();
assert_eq!(*w.take(), 4);
// publish twice then pending operation published by take
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(2));
assert_eq!(*w.take(), 6);
// normal publish then pending operations published by take
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
w.publish();
w.append(CounterAddOp(1));
assert_eq!(*w.take(), 4);
// pending operations published by take
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
assert_eq!(*w.take(), 3);
// emptry op queue
let (mut w, _r) = crate::new_from_empty::<i32, _>(2);
w.append(CounterAddOp(1));
w.publish();
assert_eq!(*w.take(), 3);
// no operations
let (w, _r) = crate::new_from_empty::<i32, _>(2);
assert_eq!(*w.take(), 2);
}
#[test]
fn wait_test() {
use std::sync::{Arc, Barrier};
use std::thread;
let (mut w, _r) = crate::new::<i32, _>();
// Case 1: If epoch is set to default.
let test_epochs: crate::Epochs = Default::default();
let mut test_epochs = test_epochs.lock().unwrap();
// since there is no epoch to waiting for, wait function will return immediately.
w.wait(&mut test_epochs);
// Case 2: If one of the reader is still reading(epoch is odd and count is same as in last_epoch)
// and wait has been called.
let held_epoch = Arc::new(AtomicUsize::new(1));
w.last_epochs = vec![2, 2, 1];
let mut epochs_slab = Slab::new();
epochs_slab.insert(Arc::new(AtomicUsize::new(2)));
epochs_slab.insert(Arc::new(AtomicUsize::new(2)));
epochs_slab.insert(Arc::clone(&held_epoch));
let barrier = Arc::new(Barrier::new(2));
let is_waiting = Arc::clone(&w.is_waiting);
// check writers waiting state before calling wait.
let is_waiting_v = is_waiting.load(Ordering::Relaxed);
assert_eq!(false, is_waiting_v);
let barrier2 = Arc::clone(&barrier);
let test_epochs = Arc::new(Mutex::new(epochs_slab));
let wait_handle = thread::spawn(move || {
barrier2.wait();
let mut test_epochs = test_epochs.lock().unwrap();
w.wait(&mut test_epochs);
});
barrier.wait();
// make sure that writer wait() will call first, only then allow to updates the held epoch.
while !is_waiting.load(Ordering::Relaxed) {
thread::yield_now();
}
held_epoch.fetch_add(1, Ordering::SeqCst);
// join to make sure that wait must return after the progress/increment
// of held_epoch.
let _ = wait_handle.join();
}
#[test]
fn flush_noblock() {
let (mut w, r) = crate::new::<i32, _>();
w.append(CounterAddOp(42));
w.publish();
assert_eq!(*r.enter().unwrap(), 42);
// pin the epoch
let _count = r.enter();
// refresh would hang here
assert_eq!(w.oplog.iter().skip(w.swap_index).count(), 0);
assert!(!w.has_pending_operations());
}
#[test]
fn flush_no_refresh() | {
let (mut w, _) = crate::new::<i32, _>();
// Until we refresh, writes are written directly instead of going to the
// oplog (because there can't be any readers on the w_handle table).
assert!(!w.has_pending_operations());
w.publish();
assert!(!w.has_pending_operations());
assert_eq!(w.refreshes, 1);
w.append(CounterAddOp(42));
assert!(w.has_pending_operations());
w.publish();
assert!(!w.has_pending_operations());
assert_eq!(w.refreshes, 2);
w.append(CounterAddOp(42));
assert!(w.has_pending_operations());
w.publish();
assert!(!w.has_pending_operations()); | identifier_body |
|
write.rs | writes in the oplog
// the r_handle copy has not seen any of the writes following swap_index
if self.swap_index != 0 {
// we can drain out the operations that only the w_handle copy needs
//
// NOTE: the if above is because drain(0..0) would remove 0
for op in self.oplog.drain(0..self.swap_index) {
T::absorb_second(w_handle, op, r_handle);
}
}
// we cannot give owned operations to absorb_first
// since they'll also be needed by the r_handle copy
for op in self.oplog.iter_mut() {
T::absorb_first(w_handle, op, r_handle);
}
// the w_handle copy is about to become the r_handle, and can ignore the oplog
self.swap_index = self.oplog.len();
// w_handle (the old r_handle) is now fully up to date!
} else {
self.first = false
}
// at this point, we have exclusive access to w_handle, and it is up-to-date with all
// writes. the stale r_handle is accessed by readers through an Arc clone of atomic pointer
// inside the ReadHandle. oplog contains all the changes that are in w_handle, but not in
// r_handle.
//
// it's now time for us to swap the copies so that readers see up-to-date results from
// w_handle.
// swap in our w_handle, and get r_handle in return
let r_handle = self
.r_handle
.inner
.swap(self.w_handle.as_ptr(), Ordering::Release);
// NOTE: at this point, there are likely still readers using r_handle.
// safety: r_handle was also created from a Box, so it is not null and is covariant.
self.w_handle = unsafe { NonNull::new_unchecked(r_handle) };
// ensure that the subsequent epoch reads aren't re-ordered to before the swap
fence(Ordering::SeqCst);
for (ri, epoch) in epochs.iter() {
self.last_epochs[ri] = epoch.load(Ordering::Acquire);
}
#[cfg(test)]
{
self.refreshes += 1;
}
self
}
/// Publish as necessary to ensure that all operations are visible to readers.
///
/// `WriteHandle::publish` will *always* wait for old readers to depart and swap the maps.
/// This method will only do so if there are pending operations.
pub fn flush(&mut self) {
if self.has_pending_operations() {
self.publish();
}
}
/// Returns true if there are operations in the operational log that have not yet been exposed
/// to readers.
pub fn has_pending_operations(&self) -> bool {
// NOTE: we don't use self.oplog.is_empty() here because it's not really that important if
// there are operations that have not yet been applied to the _write_ handle.
self.swap_index < self.oplog.len()
}
/// Append the given operation to the operational log.
///
/// Its effects will not be exposed to readers until you call [`publish`](Self::publish).
pub fn append(&mut self, op: O) -> &mut Self {
self.extend(std::iter::once(op));
self
}
/// Returns a raw pointer to the write copy of the data (the one readers are _not_ accessing).
///
/// Note that it is only safe to mutate through this pointer if you _know_ that there are no
/// readers still present in this copy. This is not normally something you know; even after
/// calling `publish`, readers may still be in the write copy for some time. In general, the
/// only time you know this is okay is before the first call to `publish` (since no readers
/// ever entered the write copy).
// TODO: Make this return `Option<&mut T>`,
// and only `Some` if there are indeed to readers in the write copy.
pub fn raw_write_handle(&mut self) -> NonNull<T> {
self.w_handle
}
/// Returns the backing data structure.
///
/// Makes sure that all the pending operations are applied and waits till all the read handles
/// have departed. Then it uses [`Absorb::drop_first`] to drop one of the copies of the data and
/// returns the other copy as a [`Taken`] smart pointer.
pub fn take(mut self) -> Taken<T, O> {
// It is always safe to `expect` here because `take_inner` is private
// and it is only called here and in the drop impl. Since we have an owned
// `self` we know the drop has not yet been called. And every first call of
// `take_inner` returns `Some`
self.take_inner()
.expect("inner is only taken here then self is dropped")
}
}
// allow using write handle for reads
use std::ops::Deref;
impl<T, O> Deref for WriteHandle<T, O>
where
T: Absorb<O>,
{
type Target = ReadHandle<T>;
fn deref(&self) -> &Self::Target {
&self.r_handle
}
}
impl<T, O> Extend<O> for WriteHandle<T, O>
where
T: Absorb<O>,
{
/// Add multiple operations to the operational log.
///
/// Their effects will not be exposed to readers until you call [`publish`](Self::publish)
fn extend<I>(&mut self, ops: I)
where
I: IntoIterator<Item = O>,
{
if self.first {
// Safety: we know there are no outstanding w_handle readers, since we haven't
// refreshed ever before, so we can modify it directly!
let mut w_inner = self.raw_write_handle();
let w_inner = unsafe { w_inner.as_mut() };
let r_handle = self.enter().expect("map has not yet been destroyed");
// Because we are operating directly on the map, and nothing is aliased, we do want
// to perform drops, so we invoke absorb_second.
for op in ops {
Absorb::absorb_second(w_inner, op, &*r_handle);
}
} else {
self.oplog.extend(ops);
}
}
}
/// `WriteHandle` can be sent across thread boundaries:
///
/// ```
/// use left_right::WriteHandle;
///
/// struct Data;
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// fn sync_with(&mut self, _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
///
/// As long as the inner types allow that of course.
/// Namely, the data type has to be `Send`:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::rc::Rc;
///
/// struct Data(Rc<()>);
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
///
/// .. the operation type has to be `Send`:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::rc::Rc;
///
/// struct Data;
/// impl left_right::Absorb<Rc<()>> for Data {
/// fn absorb_first(&mut self, _: &mut Rc<()>, _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, Rc<()>>>()
/// ```
///
/// .. and the data type has to be `Sync` so it's still okay to read through `ReadHandle`s:
///
/// ```compile_fail
/// use left_right::WriteHandle;
/// use std::cell::Cell;
///
/// struct Data(Cell<()>);
/// impl left_right::Absorb<()> for Data {
/// fn absorb_first(&mut self, _: &mut (), _: &Self) {}
/// }
///
/// fn is_send<T: Send>() {
/// // dummy function just used for its parameterized type bound
/// }
///
/// is_send::<WriteHandle<Data, ()>>()
/// ```
#[allow(dead_code)]
struct CheckWriteHandleSend;
#[cfg(test)]
mod tests {
use crate::sync::{AtomicUsize, Mutex, Ordering};
use crate::Absorb;
use slab::Slab;
include!("./utilities.rs");
#[test]
fn append_test() {
let (mut w, _r) = crate::new::<i32, _>();
assert_eq!(w.first, true);
w.append(CounterAddOp(1));
assert_eq!(w.oplog.len(), 0);
assert_eq!(w.first, true);
w.publish();
assert_eq!(w.first, false);
w.append(CounterAddOp(2));
w.append(CounterAddOp(3)); | assert_eq!(w.oplog.len(), 2);
}
| random_line_split |
|
write.rs | <T, O>
where
T: Absorb<O>,
{
epochs: crate::Epochs,
w_handle: NonNull<T>,
oplog: VecDeque<O>,
swap_index: usize,
r_handle: ReadHandle<T>,
last_epochs: Vec<usize>,
#[cfg(test)]
refreshes: usize,
#[cfg(test)]
is_waiting: Arc<AtomicBool>,
/// Write directly to the write handle map, since no publish has happened.
first: bool,
/// A publish has happened, but the two copies have not been synchronized yet.
second: bool,
/// If we call `Self::take` the drop needs to be different.
taken: bool,
}
// safety: if a `WriteHandle` is sent across a thread boundary, we need to be able to take
// ownership of both Ts and Os across that thread boundary. since `WriteHandle` holds a
// `ReadHandle`, we also need to respect its Send requirements.
unsafe impl<T, O> Send for WriteHandle<T, O>
where
T: Absorb<O>,
T: Send,
O: Send,
ReadHandle<T>: Send,
{
}
impl<T, O> fmt::Debug for WriteHandle<T, O>
where
T: Absorb<O> + fmt::Debug,
O: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("WriteHandle")
.field("epochs", &self.epochs)
.field("w_handle", &self.w_handle)
.field("oplog", &self.oplog)
.field("swap_index", &self.swap_index)
.field("r_handle", &self.r_handle)
.field("first", &self.first)
.field("second", &self.second)
.finish()
}
}
/// A **smart pointer** to an owned backing data structure. This makes sure that the
/// data is dropped correctly (using [`Absorb::drop_second`]).
///
/// Additionally it allows for unsafely getting the inner data out using [`into_box()`](Taken::into_box).
pub struct Taken<T: Absorb<O>, O> {
inner: Option<Box<T>>,
_marker: PhantomData<O>,
}
impl<T: Absorb<O> + std::fmt::Debug, O> std::fmt::Debug for Taken<T, O> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Taken")
.field(
"inner",
self.inner
.as_ref()
.expect("inner is only taken in `into_box` which drops self"),
)
.finish()
}
}
impl<T: Absorb<O>, O> Deref for Taken<T, O> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.inner
.as_ref()
.expect("inner is only taken in `into_box` which drops self")
}
}
impl<T: Absorb<O>, O> DerefMut for Taken<T, O> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.inner
.as_mut()
.expect("inner is only taken in `into_box` which drops self")
}
}
impl<T: Absorb<O>, O> Taken<T, O> {
/// This is unsafe because you must call [`Absorb::drop_second`] in
/// case just dropping `T` is not safe and sufficient.
///
/// If you used the default implementation of [`Absorb::drop_second`] (which just calls [`drop`](Drop::drop))
/// you don't need to call [`Absorb::drop_second`].
pub unsafe fn into_box(mut self) -> Box<T> {
self.inner
.take()
.expect("inner is only taken here then self is dropped")
}
}
impl<T: Absorb<O>, O> Drop for Taken<T, O> {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
T::drop_second(inner);
}
}
}
impl<T, O> WriteHandle<T, O>
where
T: Absorb<O>,
{
/// Takes out the inner backing data structure if it hasn't been taken yet. Otherwise returns `None`.
///
/// Makes sure that all the pending operations are applied and waits till all the read handles
/// have departed. Then it uses [`Absorb::drop_first`] to drop one of the copies of the data and
/// returns the other copy as a [`Taken`] smart pointer.
fn take_inner(&mut self) -> Option<Taken<T, O>> {
use std::ptr;
// Can only take inner once.
if self.taken {
return None;
}
// Disallow taking again.
self.taken = true;
// first, ensure both copies are up to date
// (otherwise safely dropping the possibly duplicated w_handle data is a pain)
if self.first || !self.oplog.is_empty() {
self.publish();
}
if !self.oplog.is_empty() {
self.publish();
}
assert!(self.oplog.is_empty());
// next, grab the read handle and set it to NULL
let r_handle = self.r_handle.inner.swap(ptr::null_mut(), Ordering::Release);
// now, wait for all readers to depart
let epochs = Arc::clone(&self.epochs);
let mut epochs = epochs.lock().unwrap();
self.wait(&mut epochs);
// ensure that the subsequent epoch reads aren't re-ordered to before the swap
fence(Ordering::SeqCst);
// all readers have now observed the NULL, so we own both handles.
// all operations have been applied to both w_handle and r_handle.
// give the underlying data structure an opportunity to handle the one copy differently:
//
// safety: w_handle was initially crated from a `Box`, and is no longer aliased.
Absorb::drop_first(unsafe { Box::from_raw(self.w_handle.as_ptr()) });
// next we take the r_handle and return it as a boxed value.
//
// this is safe, since we know that no readers are using this pointer
// anymore (due to the .wait() following swapping the pointer with NULL).
//
// safety: r_handle was initially crated from a `Box`, and is no longer aliased.
let boxed_r_handle = unsafe { Box::from_raw(r_handle) };
Some(Taken {
inner: Some(boxed_r_handle),
_marker: PhantomData,
})
}
}
impl<T, O> Drop for WriteHandle<T, O>
where
T: Absorb<O>,
{
fn drop(&mut self) {
if let Some(inner) = self.take_inner() {
drop(inner);
}
}
}
impl<T, O> WriteHandle<T, O>
where
T: Absorb<O>,
{
pub(crate) fn new(w_handle: T, epochs: crate::Epochs, r_handle: ReadHandle<T>) -> Self {
Self {
epochs,
// safety: Box<T> is not null and covariant.
w_handle: unsafe { NonNull::new_unchecked(Box::into_raw(Box::new(w_handle))) },
oplog: VecDeque::new(),
swap_index: 0,
r_handle,
last_epochs: Vec::new(),
#[cfg(test)]
is_waiting: Arc::new(AtomicBool::new(false)),
#[cfg(test)]
refreshes: 0,
first: true,
second: true,
taken: false,
}
}
fn wait(&mut self, epochs: &mut MutexGuard<'_, slab::Slab<Arc<AtomicUsize>>>) {
let mut iter = 0;
let mut starti = 0;
#[cfg(test)]
{
self.is_waiting.store(true, Ordering::Relaxed);
}
// we're over-estimating here, but slab doesn't expose its max index
self.last_epochs.resize(epochs.capacity(), 0);
'retry: loop {
// read all and see if all have changed (which is likely)
for (ii, (ri, epoch)) in epochs.iter().enumerate().skip(starti) {
// if the reader's epoch was even last we read it (which was _after_ the swap),
// then they either do not have the pointer, or must have read the pointer strictly
// after the swap. in either case, they cannot be using the old pointer value (what
// is now w_handle).
//
// note that this holds even with wrap-around since std::u{N}::MAX == 2 ^ N - 1,
// which is odd, and std::u{N}::MAX + 1 == 0 is even.
//
// note also that `ri` _may_ have been re-used since we last read into last_epochs.
// this is okay though, as a change still implies that the new reader must have
// arrived _after_ we did the atomic swap, and thus must also have seen the new
// pointer.
if self.last_epochs[ri] % 2 == 0 {
continue;
}
let now = epoch.load(Ordering::Acquire);
if now != self.last_epochs[ri] {
// reader must have seen the last swap, | WriteHandle | identifier_name |
|
moveit_grasp.py | 0.02 # push the gripper forward to grasp objects more
self.is_clear_octomap = True
## ROS init
self.init_ROS_and_moveit()
## robot start working
self.get_basic_info()
# rospy.logwarn("Type in 'start' to set robot arm in home position, and start working... ")
# k = ""
# while k != "start":
# k = raw_input()
self.go_home(config=0)
self.open_gripper()
# self.close_gripper()
# self.open_gripper()
self.pick()
def pick(self):
self.pose_goal_subscriber_ = rospy.Subscriber("/pose_goal_generator/pose_goal", geometry_msgs.msg.Pose, self.cb_follow)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
def cb_follow(self, msg):
## here pose_grasp is the same as grasp_frame
pose_grasp = geometry_msgs.msg.PoseStamped()
pose_grasp.pose.orientation.w = 1.0
pose_grasp.pose.position.z -= self.pregrasp_stepback
## frame_transformation
pose_mdworld = self.pose_frame_trans(pose_grasp)
# FIXME: add a pre-grasp home position (avoid singularity and occlusion due to gripper in front of camera)
# self.go_home(config=1)
## plan and execute - pregrasp
self.group.set_pose_target(pose_mdworld)
plan = self.group.go(wait=True)
if not plan:
|
self.group.stop()
self.group.clear_pose_targets()
## approach -> close gripper -> retreive -> go home
if plan:
# self.clear_octomap()
# self.open_gripper()
self.approach_eef(pose_grasp)
self.close_gripper()
self.retrieve_eef(pose_grasp)
# self.go_home(config=1)
self.go_home(config=0)
self.open_gripper()
if self.is_clear_octomap:
self.clear_octomap()
def open_gripper(self):
gripper_goal = self.group_gripper.get_current_joint_values()
gripper_goal[0] = 0.2
gripper_goal[1] = 0.2
plan = self.group_gripper.go(gripper_goal, wait=True)
if not plan:
rospy.logerr("****** mico_gripper open_gripper || plan failed ******")
self.group_gripper.stop()
def close_gripper(self):
gripper_goal = self.group_gripper.get_current_joint_values()
gripper_goal[0] = 1.0
gripper_goal[1] = 1.0
plan = self.group_gripper.go(gripper_goal, wait=True)
if not plan:
rospy.logerr("****** mico_gripper close_gripper || plan failed ******")
self.group_gripper.stop()
def approach_eef(self, pose_grasp):
waypoints = []
scale = 1
pose_grasp.pose.position.z += scale * self.pregrasp_stepback
pose_grasp.pose.position.z += scale * self.grasp_push_dist
pose_mdworld = self.pose_frame_trans(pose_grasp)
wpose = pose_mdworld.pose
waypoints.append(copy.deepcopy(wpose))
(plan, fraction) = self.group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0) # jump_threshold
## display trajectory
display_trajectory = moveit_msgs.msg.DisplayTrajectory()
display_trajectory.trajectory_start = self.robot.get_current_state()
display_trajectory.trajectory.append(plan)
# Publish
self.display_trajectory_publisher.publish(display_trajectory)
## execute the planned path
self.group.execute(plan, wait=True)
def retrieve_eef(self, pose_grasp):
waypoints = []
scale = 1
pose_grasp.pose.position.z -= scale * self.pregrasp_stepback
pose_grasp.pose.position.z -= scale * self.grasp_push_dist
pose_mdworld = self.pose_frame_trans(pose_grasp)
wpose = pose_mdworld.pose
waypoints.append(copy.deepcopy(wpose))
(plan, fraction) = self.group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0) # jump_threshold
## display trajectory
display_trajectory = moveit_msgs.msg.DisplayTrajectory()
display_trajectory.trajectory_start = self.robot.get_current_state()
display_trajectory.trajectory.append(plan)
# Publish
self.display_trajectory_publisher.publish(display_trajectory)
## execute the planned path
self.group.execute(plan, wait=True)
def init_ROS_and_moveit(self):
## init MoveIt!
moveit_commander.roscpp_initialize(sys.argv)
self.robot = moveit_commander.RobotCommander()
self.scene = moveit_commander.PlanningSceneInterface()
self.group = moveit_commander.MoveGroupCommander("mico_arm")
self.group_gripper = moveit_commander.MoveGroupCommander("mico_gripper")
# self.home_joint_values = self.group.get_current_joint_values()
self.home_joint_values = self.set_home_joint_values(config=0)
## init ROS
rospy.init_node('moveit_pregrasp', anonymous=False)
self.display_trajectory_publisher = rospy.Publisher("/move_group/display_planned_path", moveit_msgs.msg.DisplayTrajectory, queue_size=20)
def pose_frame_trans(self, pose_g):
'''
transform grasp pose from grasp_frame to mdworld_frame
'''
## tf transform
tf_buffer = tf2_ros.Buffer(rospy.Duration(10.0)) # tf buffer length
tf_listener = tf2_ros.TransformListener(tf_buffer)
# try:
transform_grasp_world = tf_buffer.lookup_transform(
"mdworld", # target frame
"grasp_frame", # source frame
rospy.Time(0), # get the tf at first available time
rospy.Duration(10.0)) # wait for 10 second
# except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
# rospy.logerr("Some error with tf2_ros transformation.")
pose_mdworld = tf2_geometry_msgs.do_transform_pose(pose_g, transform_grasp_world)
return pose_mdworld
def clear_octomap(self):
rospy.loginfo("Clearing Octomap...")
rospy.wait_for_service("/clear_octomap")
try:
client = rospy.ServiceProxy("/clear_octomap", Empty)
client()
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
rospy.loginfo("Octomap cleared.")
def go_home(self, config=0):
if config == 0:
tmp_home_joint_values = self.set_home_joint_values(config=0)
elif config == 1:
tmp_home_joint_values = self.set_home_joint_values(config=1)
self.group.go(tmp_home_joint_values, wait=True)
self.group.stop()
def set_home_joint_values(self, config=0):
current_joint_values = self.group.get_current_joint_values()
if config == 0:
## home config 0 - prefered home joint values
current_joint_values[0] = -0.5024237154549698
current_joint_values[1] = -0.0461999859584763
current_joint_values[2] = 0.378261685955241
current_joint_values[3] = -2.7024837288386943
current_joint_values[4] = -1.0150675779092277
current_joint_values[5] = 2.789353646752098
elif config == 1:
## home config 1
current_joint_values[0] = -0.34151888731666213
current_joint_values[1] = -0.5833085097394489
current_joint_values[2] = 0.033693482792315536
current_joint_values[3] = -2.54547722780898
current_joint_values[4] = -0.9888911766777625
current_joint_values[5] = 2.9748245606556494
return current_joint_values
def plan_cartesian_path(self):
## plan a Cartesian path directly by specifying a list of waypoints for the end-effector to go through
waypoints = []
scale = 1
wpose = self.group.get_current_pose().pose
wpose.position.z -= scale * 0.1 # First move up (z)
wpose.position.y += scale * 0 | rospy.logerr("****** mico_arm pregrasp || plan failed ******") | conditional_block |
moveit_grasp.py | 0.02 # push the gripper forward to grasp objects more
self.is_clear_octomap = True
## ROS init
self.init_ROS_and_moveit()
## robot start working
self.get_basic_info()
# rospy.logwarn("Type in 'start' to set robot arm in home position, and start working... ")
# k = ""
# while k != "start":
# k = raw_input()
self.go_home(config=0)
self.open_gripper()
# self.close_gripper()
# self.open_gripper()
self.pick()
def pick(self):
self.pose_goal_subscriber_ = rospy.Subscriber("/pose_goal_generator/pose_goal", geometry_msgs.msg.Pose, self.cb_follow)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
def cb_follow(self, msg):
## here pose_grasp is the same as grasp_frame
pose_grasp = geometry_msgs.msg.PoseStamped()
pose_grasp.pose.orientation.w = 1.0
pose_grasp.pose.position.z -= self.pregrasp_stepback
## frame_transformation
pose_mdworld = self.pose_frame_trans(pose_grasp)
# FIXME: add a pre-grasp home position (avoid singularity and occlusion due to gripper in front of camera)
# self.go_home(config=1)
## plan and execute - pregrasp
self.group.set_pose_target(pose_mdworld)
plan = self.group.go(wait=True)
if not plan:
rospy.logerr("****** mico_arm pregrasp || plan failed ******")
self.group.stop()
self.group.clear_pose_targets()
## approach -> close gripper -> retreive -> go home
if plan:
# self.clear_octomap()
# self.open_gripper()
self.approach_eef(pose_grasp)
self.close_gripper()
self.retrieve_eef(pose_grasp)
# self.go_home(config=1)
self.go_home(config=0)
self.open_gripper()
if self.is_clear_octomap:
self.clear_octomap()
def open_gripper(self):
gripper_goal = self.group_gripper.get_current_joint_values()
gripper_goal[0] = 0.2
gripper_goal[1] = 0.2
plan = self.group_gripper.go(gripper_goal, wait=True)
if not plan:
rospy.logerr("****** mico_gripper open_gripper || plan failed ******")
self.group_gripper.stop()
def close_gripper(self):
gripper_goal = self.group_gripper.get_current_joint_values()
gripper_goal[0] = 1.0
gripper_goal[1] = 1.0
plan = self.group_gripper.go(gripper_goal, wait=True)
if not plan:
rospy.logerr("****** mico_gripper close_gripper || plan failed ******")
self.group_gripper.stop()
def approach_eef(self, pose_grasp):
waypoints = []
scale = 1
pose_grasp.pose.position.z += scale * self.pregrasp_stepback
pose_grasp.pose.position.z += scale * self.grasp_push_dist
pose_mdworld = self.pose_frame_trans(pose_grasp)
wpose = pose_mdworld.pose
waypoints.append(copy.deepcopy(wpose))
(plan, fraction) = self.group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0) # jump_threshold
## display trajectory
display_trajectory = moveit_msgs.msg.DisplayTrajectory()
display_trajectory.trajectory_start = self.robot.get_current_state()
display_trajectory.trajectory.append(plan)
# Publish
self.display_trajectory_publisher.publish(display_trajectory)
## execute the planned path
self.group.execute(plan, wait=True)
def retrieve_eef(self, pose_grasp):
waypoints = []
scale = 1
pose_grasp.pose.position.z -= scale * self.pregrasp_stepback
pose_grasp.pose.position.z -= scale * self.grasp_push_dist
pose_mdworld = self.pose_frame_trans(pose_grasp)
wpose = pose_mdworld.pose
waypoints.append(copy.deepcopy(wpose))
(plan, fraction) = self.group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0) # jump_threshold
## display trajectory
display_trajectory = moveit_msgs.msg.DisplayTrajectory()
display_trajectory.trajectory_start = self.robot.get_current_state()
display_trajectory.trajectory.append(plan)
# Publish
self.display_trajectory_publisher.publish(display_trajectory)
## execute the planned path
self.group.execute(plan, wait=True)
def init_ROS_and_moveit(self):
## init MoveIt!
moveit_commander.roscpp_initialize(sys.argv)
self.robot = moveit_commander.RobotCommander()
self.scene = moveit_commander.PlanningSceneInterface()
self.group = moveit_commander.MoveGroupCommander("mico_arm")
self.group_gripper = moveit_commander.MoveGroupCommander("mico_gripper")
# self.home_joint_values = self.group.get_current_joint_values()
self.home_joint_values = self.set_home_joint_values(config=0)
## init ROS
rospy.init_node('moveit_pregrasp', anonymous=False)
self.display_trajectory_publisher = rospy.Publisher("/move_group/display_planned_path", moveit_msgs.msg.DisplayTrajectory, queue_size=20)
def pose_frame_trans(self, pose_g):
'''
transform grasp pose from grasp_frame to mdworld_frame
'''
## tf transform
tf_buffer = tf2_ros.Buffer(rospy.Duration(10.0)) # tf buffer length
tf_listener = tf2_ros.TransformListener(tf_buffer)
# try:
transform_grasp_world = tf_buffer.lookup_transform(
"mdworld", # target frame
"grasp_frame", # source frame
rospy.Time(0), # get the tf at first available time
rospy.Duration(10.0)) # wait for 10 second
# except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
# rospy.logerr("Some error with tf2_ros transformation.")
pose_mdworld = tf2_geometry_msgs.do_transform_pose(pose_g, transform_grasp_world)
return pose_mdworld
def clear_octomap(self):
rospy.loginfo("Clearing Octomap...")
rospy.wait_for_service("/clear_octomap")
try:
client = rospy.ServiceProxy("/clear_octomap", Empty)
client()
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
rospy.loginfo("Octomap cleared.")
def go_home(self, config=0):
if config == 0:
tmp_home_joint_values = self.set_home_joint_values(config=0)
elif config == 1:
tmp_home_joint_values = self.set_home_joint_values(config=1)
self.group.go(tmp_home_joint_values, wait=True)
self.group.stop()
def | (self, config=0):
current_joint_values = self.group.get_current_joint_values()
if config == 0:
## home config 0 - prefered home joint values
current_joint_values[0] = -0.5024237154549698
current_joint_values[1] = -0.0461999859584763
current_joint_values[2] = 0.378261685955241
current_joint_values[3] = -2.7024837288386943
current_joint_values[4] = -1.0150675779092277
current_joint_values[5] = 2.789353646752098
elif config == 1:
## home config 1
current_joint_values[0] = -0.34151888731666213
current_joint_values[1] = -0.5833085097394489
current_joint_values[2] = 0.033693482792315536
current_joint_values[3] = -2.54547722780898
current_joint_values[4] = -0.9888911766777625
current_joint_values[5] = 2.9748245606556494
return current_joint_values
def plan_cartesian_path(self):
## plan a Cartesian path directly by specifying a list of waypoints for the end-effector to go through
waypoints = []
scale = 1
wpose = self.group.get_current_pose().pose
wpose.position.z -= scale * 0.1 # First move up (z)
wpose.position.y += scale * 0. | set_home_joint_values | identifier_name |
moveit_grasp.py | 0.02 # push the gripper forward to grasp objects more
self.is_clear_octomap = True
## ROS init
self.init_ROS_and_moveit()
## robot start working
self.get_basic_info()
# rospy.logwarn("Type in 'start' to set robot arm in home position, and start working... ")
# k = ""
# while k != "start":
# k = raw_input()
self.go_home(config=0)
self.open_gripper()
# self.close_gripper()
# self.open_gripper()
self.pick()
def pick(self):
self.pose_goal_subscriber_ = rospy.Subscriber("/pose_goal_generator/pose_goal", geometry_msgs.msg.Pose, self.cb_follow)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
def cb_follow(self, msg):
## here pose_grasp is the same as grasp_frame
pose_grasp = geometry_msgs.msg.PoseStamped()
pose_grasp.pose.orientation.w = 1.0
pose_grasp.pose.position.z -= self.pregrasp_stepback
## frame_transformation
pose_mdworld = self.pose_frame_trans(pose_grasp)
# FIXME: add a pre-grasp home position (avoid singularity and occlusion due to gripper in front of camera)
# self.go_home(config=1)
## plan and execute - pregrasp
self.group.set_pose_target(pose_mdworld)
plan = self.group.go(wait=True)
if not plan:
rospy.logerr("****** mico_arm pregrasp || plan failed ******")
self.group.stop()
self.group.clear_pose_targets()
## approach -> close gripper -> retreive -> go home
if plan:
# self.clear_octomap()
# self.open_gripper()
self.approach_eef(pose_grasp)
self.close_gripper()
self.retrieve_eef(pose_grasp)
# self.go_home(config=1)
self.go_home(config=0)
self.open_gripper()
if self.is_clear_octomap:
self.clear_octomap()
def open_gripper(self):
gripper_goal = self.group_gripper.get_current_joint_values()
gripper_goal[0] = 0.2
gripper_goal[1] = 0.2
plan = self.group_gripper.go(gripper_goal, wait=True)
if not plan:
rospy.logerr("****** mico_gripper open_gripper || plan failed ******")
self.group_gripper.stop()
def close_gripper(self):
gripper_goal = self.group_gripper.get_current_joint_values()
gripper_goal[0] = 1.0
gripper_goal[1] = 1.0
plan = self.group_gripper.go(gripper_goal, wait=True)
if not plan:
rospy.logerr("****** mico_gripper close_gripper || plan failed ******")
self.group_gripper.stop()
def approach_eef(self, pose_grasp):
waypoints = []
scale = 1
pose_grasp.pose.position.z += scale * self.pregrasp_stepback
pose_grasp.pose.position.z += scale * self.grasp_push_dist
pose_mdworld = self.pose_frame_trans(pose_grasp)
wpose = pose_mdworld.pose
waypoints.append(copy.deepcopy(wpose))
(plan, fraction) = self.group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0) # jump_threshold
## display trajectory
display_trajectory = moveit_msgs.msg.DisplayTrajectory()
display_trajectory.trajectory_start = self.robot.get_current_state()
display_trajectory.trajectory.append(plan)
# Publish
self.display_trajectory_publisher.publish(display_trajectory)
## execute the planned path
self.group.execute(plan, wait=True)
def retrieve_eef(self, pose_grasp):
waypoints = []
scale = 1
pose_grasp.pose.position.z -= scale * self.pregrasp_stepback
pose_grasp.pose.position.z -= scale * self.grasp_push_dist
pose_mdworld = self.pose_frame_trans(pose_grasp)
wpose = pose_mdworld.pose
waypoints.append(copy.deepcopy(wpose))
(plan, fraction) = self.group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0) # jump_threshold
## display trajectory
display_trajectory = moveit_msgs.msg.DisplayTrajectory()
display_trajectory.trajectory_start = self.robot.get_current_state()
display_trajectory.trajectory.append(plan)
# Publish
self.display_trajectory_publisher.publish(display_trajectory)
## execute the planned path
self.group.execute(plan, wait=True)
def init_ROS_and_moveit(self):
## init MoveIt!
moveit_commander.roscpp_initialize(sys.argv)
self.robot = moveit_commander.RobotCommander()
self.scene = moveit_commander.PlanningSceneInterface()
self.group = moveit_commander.MoveGroupCommander("mico_arm")
self.group_gripper = moveit_commander.MoveGroupCommander("mico_gripper")
# self.home_joint_values = self.group.get_current_joint_values()
self.home_joint_values = self.set_home_joint_values(config=0)
## init ROS
rospy.init_node('moveit_pregrasp', anonymous=False)
self.display_trajectory_publisher = rospy.Publisher("/move_group/display_planned_path", moveit_msgs.msg.DisplayTrajectory, queue_size=20)
def pose_frame_trans(self, pose_g):
|
def clear_octomap(self):
rospy.loginfo("Clearing Octomap...")
rospy.wait_for_service("/clear_octomap")
try:
client = rospy.ServiceProxy("/clear_octomap", Empty)
client()
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
rospy.loginfo("Octomap cleared.")
def go_home(self, config=0):
if config == 0:
tmp_home_joint_values = self.set_home_joint_values(config=0)
elif config == 1:
tmp_home_joint_values = self.set_home_joint_values(config=1)
self.group.go(tmp_home_joint_values, wait=True)
self.group.stop()
def set_home_joint_values(self, config=0):
current_joint_values = self.group.get_current_joint_values()
if config == 0:
## home config 0 - prefered home joint values
current_joint_values[0] = -0.5024237154549698
current_joint_values[1] = -0.0461999859584763
current_joint_values[2] = 0.378261685955241
current_joint_values[3] = -2.7024837288386943
current_joint_values[4] = -1.0150675779092277
current_joint_values[5] = 2.789353646752098
elif config == 1:
## home config 1
current_joint_values[0] = -0.34151888731666213
current_joint_values[1] = -0.5833085097394489
current_joint_values[2] = 0.033693482792315536
current_joint_values[3] = -2.54547722780898
current_joint_values[4] = -0.9888911766777625
current_joint_values[5] = 2.9748245606556494
return current_joint_values
def plan_cartesian_path(self):
## plan a Cartesian path directly by specifying a list of waypoints for the end-effector to go through
waypoints = []
scale = 1
wpose = self.group.get_current_pose().pose
wpose.position.z -= scale * 0.1 # First move up (z)
wpose.position.y += scale * 0.2 | '''
transform grasp pose from grasp_frame to mdworld_frame
'''
## tf transform
tf_buffer = tf2_ros.Buffer(rospy.Duration(10.0)) # tf buffer length
tf_listener = tf2_ros.TransformListener(tf_buffer)
# try:
transform_grasp_world = tf_buffer.lookup_transform(
"mdworld", # target frame
"grasp_frame", # source frame
rospy.Time(0), # get the tf at first available time
rospy.Duration(10.0)) # wait for 10 second
# except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
# rospy.logerr("Some error with tf2_ros transformation.")
pose_mdworld = tf2_geometry_msgs.do_transform_pose(pose_g, transform_grasp_world)
return pose_mdworld | identifier_body |
moveit_grasp.py | 0.02 # push the gripper forward to grasp objects more
self.is_clear_octomap = True
## ROS init
self.init_ROS_and_moveit()
## robot start working
self.get_basic_info()
# rospy.logwarn("Type in 'start' to set robot arm in home position, and start working... ")
# k = ""
# while k != "start":
# k = raw_input()
self.go_home(config=0)
self.open_gripper()
# self.close_gripper()
# self.open_gripper()
self.pick()
def pick(self):
self.pose_goal_subscriber_ = rospy.Subscriber("/pose_goal_generator/pose_goal", geometry_msgs.msg.Pose, self.cb_follow)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
def cb_follow(self, msg):
## here pose_grasp is the same as grasp_frame
pose_grasp = geometry_msgs.msg.PoseStamped()
pose_grasp.pose.orientation.w = 1.0
pose_grasp.pose.position.z -= self.pregrasp_stepback
## frame_transformation
pose_mdworld = self.pose_frame_trans(pose_grasp)
# FIXME: add a pre-grasp home position (avoid singularity and occlusion due to gripper in front of camera)
# self.go_home(config=1)
## plan and execute - pregrasp
self.group.set_pose_target(pose_mdworld)
plan = self.group.go(wait=True)
if not plan:
rospy.logerr("****** mico_arm pregrasp || plan failed ******")
self.group.stop()
self.group.clear_pose_targets()
## approach -> close gripper -> retreive -> go home
if plan:
# self.clear_octomap()
# self.open_gripper()
self.approach_eef(pose_grasp)
self.close_gripper()
self.retrieve_eef(pose_grasp) | # self.go_home(config=1)
self.go_home(config=0)
self.open_gripper()
if self.is_clear_octomap:
self.clear_octomap()
def open_gripper(self):
gripper_goal = self.group_gripper.get_current_joint_values()
gripper_goal[0] = 0.2
gripper_goal[1] = 0.2
plan = self.group_gripper.go(gripper_goal, wait=True)
if not plan:
rospy.logerr("****** mico_gripper open_gripper || plan failed ******")
self.group_gripper.stop()
def close_gripper(self):
gripper_goal = self.group_gripper.get_current_joint_values()
gripper_goal[0] = 1.0
gripper_goal[1] = 1.0
plan = self.group_gripper.go(gripper_goal, wait=True)
if not plan:
rospy.logerr("****** mico_gripper close_gripper || plan failed ******")
self.group_gripper.stop()
def approach_eef(self, pose_grasp):
waypoints = []
scale = 1
pose_grasp.pose.position.z += scale * self.pregrasp_stepback
pose_grasp.pose.position.z += scale * self.grasp_push_dist
pose_mdworld = self.pose_frame_trans(pose_grasp)
wpose = pose_mdworld.pose
waypoints.append(copy.deepcopy(wpose))
(plan, fraction) = self.group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0) # jump_threshold
## display trajectory
display_trajectory = moveit_msgs.msg.DisplayTrajectory()
display_trajectory.trajectory_start = self.robot.get_current_state()
display_trajectory.trajectory.append(plan)
# Publish
self.display_trajectory_publisher.publish(display_trajectory)
## execute the planned path
self.group.execute(plan, wait=True)
def retrieve_eef(self, pose_grasp):
waypoints = []
scale = 1
pose_grasp.pose.position.z -= scale * self.pregrasp_stepback
pose_grasp.pose.position.z -= scale * self.grasp_push_dist
pose_mdworld = self.pose_frame_trans(pose_grasp)
wpose = pose_mdworld.pose
waypoints.append(copy.deepcopy(wpose))
(plan, fraction) = self.group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0) # jump_threshold
## display trajectory
display_trajectory = moveit_msgs.msg.DisplayTrajectory()
display_trajectory.trajectory_start = self.robot.get_current_state()
display_trajectory.trajectory.append(plan)
# Publish
self.display_trajectory_publisher.publish(display_trajectory)
## execute the planned path
self.group.execute(plan, wait=True)
def init_ROS_and_moveit(self):
## init MoveIt!
moveit_commander.roscpp_initialize(sys.argv)
self.robot = moveit_commander.RobotCommander()
self.scene = moveit_commander.PlanningSceneInterface()
self.group = moveit_commander.MoveGroupCommander("mico_arm")
self.group_gripper = moveit_commander.MoveGroupCommander("mico_gripper")
# self.home_joint_values = self.group.get_current_joint_values()
self.home_joint_values = self.set_home_joint_values(config=0)
## init ROS
rospy.init_node('moveit_pregrasp', anonymous=False)
self.display_trajectory_publisher = rospy.Publisher("/move_group/display_planned_path", moveit_msgs.msg.DisplayTrajectory, queue_size=20)
def pose_frame_trans(self, pose_g):
'''
transform grasp pose from grasp_frame to mdworld_frame
'''
## tf transform
tf_buffer = tf2_ros.Buffer(rospy.Duration(10.0)) # tf buffer length
tf_listener = tf2_ros.TransformListener(tf_buffer)
# try:
transform_grasp_world = tf_buffer.lookup_transform(
"mdworld", # target frame
"grasp_frame", # source frame
rospy.Time(0), # get the tf at first available time
rospy.Duration(10.0)) # wait for 10 second
# except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
# rospy.logerr("Some error with tf2_ros transformation.")
pose_mdworld = tf2_geometry_msgs.do_transform_pose(pose_g, transform_grasp_world)
return pose_mdworld
def clear_octomap(self):
rospy.loginfo("Clearing Octomap...")
rospy.wait_for_service("/clear_octomap")
try:
client = rospy.ServiceProxy("/clear_octomap", Empty)
client()
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
rospy.loginfo("Octomap cleared.")
def go_home(self, config=0):
if config == 0:
tmp_home_joint_values = self.set_home_joint_values(config=0)
elif config == 1:
tmp_home_joint_values = self.set_home_joint_values(config=1)
self.group.go(tmp_home_joint_values, wait=True)
self.group.stop()
def set_home_joint_values(self, config=0):
current_joint_values = self.group.get_current_joint_values()
if config == 0:
## home config 0 - prefered home joint values
current_joint_values[0] = -0.5024237154549698
current_joint_values[1] = -0.0461999859584763
current_joint_values[2] = 0.378261685955241
current_joint_values[3] = -2.7024837288386943
current_joint_values[4] = -1.0150675779092277
current_joint_values[5] = 2.789353646752098
elif config == 1:
## home config 1
current_joint_values[0] = -0.34151888731666213
current_joint_values[1] = -0.5833085097394489
current_joint_values[2] = 0.033693482792315536
current_joint_values[3] = -2.54547722780898
current_joint_values[4] = -0.9888911766777625
current_joint_values[5] = 2.9748245606556494
return current_joint_values
def plan_cartesian_path(self):
## plan a Cartesian path directly by specifying a list of waypoints for the end-effector to go through
waypoints = []
scale = 1
wpose = self.group.get_current_pose().pose
wpose.position.z -= scale * 0.1 # First move up (z)
wpose.position.y += scale * 0.2 | random_line_split |
|
Form.py | = getattr(MCWidgets.Widgets, widget_type)
import Widget
if not issubclass(widget_type, Widget.Widget):
raise WidgetTypeNotValid, 'Widget type %s is not valid' % type
if name in self._widgets:
raise WidgetDuplicated, 'Widget "%s" have been already added' % name
w = widget_type(self, name, args)
self._widgets[name] = w
# Incializar el tipo, si aún no lo está
widget_type = w.get_type()
if not self._type_initialized(widget_type):
w.initialize_type()
self._type_mark_initialized(widget_type)
# Y el propio widget
w.initialize_widget()
# Decirle a los widgets que hay uno nuevo
for widget in self._widgets.values():
widget.update_widgets_list(self._widgets)
def remove_widget(self, name):
'''remove_widget(name)
Elimina un widget del formulario. Ojo, use este método sólo si
sabe lo que está haciendo. Eliminar widgets puede causar problemas
de dependencias con otros que lo estén usando.
'''
try:
widget = self._widgets[name]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % name
# Notificar al propio widget que se va a eliminar del
# formulario
widget.remove_widget()
del self._widgets[name]
# Y notificar al resto del cambio
for widget in self._widgets.values():
widget.update_widgets_list(self._widgets)
def has_widget(self, name):
'''has_widget(name) -> bool
Verdadero si el widget del nombre indicado está definido'''
return self._widgets.has_key(name)
def get_widget(self, name):
'''get_widget(name) -> widget
Devuelve el widget solicitado
'''
try:
return self._widgets[name]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % name
def get_widgets(self):
'''get_widgets() -> dict
Devuelve un diccionario con todos los widgets del formulario. Las
claves serán los nombres de los widgets y los valores el objeto
que lo representa.
'''
return dict(self._widgets)
# Valores del formulario. Los valores serán aquellos que
# devuelvan los propios widgets.
def define_form_values(self, values):
'''define_form_values(values)
Estable (mediante un diccionario) los valores que usarán los widgets
como fuente.
Para obtener los datos validados y transformados de los widgets, use
get_form_value y get_form_values'''
import copy
self._form_values = copy.copy(values)
def _get_raw_form_values(self):
'''_get_raw_form_values() -> dict
Devuelve el diccionario usado como fuente para los widgets'''
return self._form_values
def get_form_values(self):
'''get_form_values() -> dict
Devuelve un diccionario con los valores de los widgets. Las claves
serán el nombre del widget, y el valor lo que devuelve el propio
widget.'''
r = {}
for name, widget in self._widgets.items():
r[name] = widget.get_value()
return r
def get_form_value(self, name):
'''get_form_value(name) -> object
Devuelve el valor de un widget.'''
try:
w = self._widgets[name]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % name
return w.get_value()
# Renderizar los widgets
def render_widget(self, widget):
'''render_widget(widget) -> str
Devuelve una cadena con el código HTML del widget pedido'''
try:
w = self._widgets[widget]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % widget
return w.render()
def get_form_attrs(self):
'''get_form_attrs() -> dict
Devuelve los atributos para el formulario actual.'''
# Obtener la información para validad de cada widget
validation_info = []
for widget in self._widgets.values():
validation_info += widget.get_validation_js_info()
import json
return {
'method': 'post',
'name': self.get_name(),
'action': self.get_url_action(),
'onsubmit': 'try { %s; } catch(e) {}; return mcw_form_validate(%s);' % (
self.get_prop('before_submit', '0'),
json.write(validation_info))
}
def auto_render(self):
'''auto_render() -> str
Devuelve una cadena con el HTML de todo el formulario
'''
from MCWidgets.Utils import html_dict_attrs
res = '<form %s>\n<table>\n' % html_dict_attrs(self.get_form_attrs())
widgets = self._widgets.keys()
widgets.sort()
for w in widgets:
w = self._widgets[w]
res += '<tr><td>' + w.get_label() + '</td>'
res += '<td>' + w.render() + '</td></tr>\n'
return res + '</table>\n<input type="submit" />\n</form>\n'
# Funciones internas
def get_url_action(self):
'''get_url_action() -> str
Devuelve la URL a la que se mandará el formulario'''
return self._args.get('url_action', '')
def get_name(self):
'''get_name() -> str
Devuelve el nombre del formulario'''
return self._form_name
def _get_url_static_files(self):
'''_get_url_static_files() -> str
Devuelve la url donde están los ficheros estáticos para,
los widgets, como CSS, JavaScript, imágenes, etc'''
return self._args.get('url_static_files', '/staticmcw/')
def _make_var_name(self, widget, varname):
'''_make_var_name (widget, varname) -> str
Devuelve el nombre de variable para el widget. La idea de esta
función es que los widgets la usen para generar los nombres de sus
variables, de manera que se asegure que no se pisan variables entre
ellos.'''
# damos por hecho de que el nombre de la variables
# no contiene caracteres problemáticos
return '_'.join([self._form_name, widget, varname])
def _type_initialized(self, typename):
'''_type_initialized (type) -> bool
Devuelve True si el tipo de widget indicado ya está
inicializado'''
return typename in self._initialized_types
def _type_mark_initialized(self, typename):
'''_type_mark_initialized (typename)
Marca el tipo indicado como inicializado. Si ya estaba
inicializado no hace nada'''
l = self._initialized_types
if typename not in l:
l.append(typename)
def _add_js_file(self, url):
'''_add_js_file(url)
Añade a la cabecera una etiqueta para cargar el fichero indicado. La URL
será relativa a donde están todos los ficheros estáticos de MCWidgets.
Si la URL ya había sido incluido, sale sin hacer nada.'''
if url in self.__included_js:
return
self.__included_js.append(url)
self.add_prop(
'header',
'<script src="%s/%s"></script>\n' %
(self._get_url_static_files(), url))
def _add_css_file(self, url):
'''_add_css_file(url)
Añade a la cabecera una etiqueta para cargar el fichero indicado. La URL
será relativa a donde están todos los ficheros estáticos de MCWidgets.
Si la URL ya había sido incluido, sale sin hacer nada.'''
if url in self.__included_css:
return
self.__included_css.append(url)
self.add_prop(
'header',
'<link href="%s/%s" type="text/css" rel="stylesheet" />\n' %
(self._get_url_static_files(), url))
class FormTemplate:
'''FormTemplate
Esta clase se usa para poder usarse en plantillas. Funciona como
un diccionario para acceder a los widgets del formulario. Cuando
se accede a él por clave, llama al render del widget pedido.
ft = FormTemplate(form)
return ft['widget']
Es equivalente al
return form.render_widget('widget')
A través del método W()
La idea es poder usarlo fácilmente desde plantillas.
$form.nombre -> form.render_widget("nombre")
$form.W.nombre -> form.get_widget("nombre")
'''
def __init__(self, form):
self.__form = form
self.__cache = {}
def W(self):
return self.__form.get_widgets()
def form_attributes(self):
from MCWidgets.Utils import html_dict_attrs
return html_dict_attrs(self.__form.get_form_attrs())
def __getattr__(self, wid):
if not self.__cache.has_key(wid):
try:
self.__cache[wid] = self.__fo | rm.render_widget(wid)
except:
import traceback, sys
traceback.print_exc(file = sys.stderr)
raise
return self.__cache[wid]
def get_nam | conditional_block |
|
Form.py | Devuelve el argumento pedido. Si no lo encuentra, devuelve default'''
return self._args.get(arg, default)
def set_arg(self, arg, value):
'''set_arg(arg, value)
Cambia el valor del argumento indicado'''
self._args[arg] = value
# Propiedades. Se usan para los widgets puedan mandar datos
# complementarios al que dibujará el formulario
def add_prop(self, prop, value, unique = False):
'''add_prop(prop, value)
Añade el texto indicado por value a la propiedad.
Si unique es verdadero, el texo sobrescribirá el valor
actual de la propiedad. Si es falso, se añadirá al final.
'''
if unique:
self._props = value
else:
self._props[prop] = self._props.get(prop, '') + value
def get_prop(self, prop, default = ''):
'''get_prop(prop, default = '') -> str
Devuelve el valor de la propiedad. Si no la encuentra devuelve
el valor de default. '''
return self._props.get(prop, default)
def list_props(self):
'''list_props() -> list
Devuelve una lista de las propiedades definidas'''
return self._props.keys()
# Validación de valores.
def validate_widget(self, widget):
'''validate_widget(widget) -> list
Devuelve una lista con los errores encontrados en el valor
del widget. Si no hay fallos devuelve una lista vacía'''
try:
w = self._widgets[widget]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % widget
return w.validate_widget()
def validate_form(self):
'''validate_form() -> dict
Devuelve un diccionario con los errores encontrados en los widgets.
Las claves del diccionario serán los nombres de los widgets, y los
valores, listas con los errores.
Si no encuentra ningún error, devolverá un diccionario vacío'''
errors = {}
for name, widget in self._widgets.items():
r = widget.validate_widget()
if r:
errors[name] = r
return errors
# Manipular la lista de widgets
def add_widget(self, widget_type, name, args = {}):
'''add_widget(widget_type, name, args = {})
Añade un widget al formulario, del tipo widget_type.
widget_type debe ser una clase derivada de Widget.
name se usa para referirse al widget, o una cadena
para buscar en el paquete MCWidgets.Widgets
'''
if isinstance(widget_type, str):
import MCWidgets.Widgets
widget_type = getattr(MCWidgets.Widgets, widget_type)
import Widget
if not issubclass(widget_type, Widget.Widget):
raise WidgetTypeNotValid, 'Widget type %s is not valid' % type
if name in self._widgets:
raise WidgetDuplicated, 'Widget "%s" have been already added' % name
w = widget_type(self, name, args)
self._widgets[name] = w
# Incializar el tipo, si aún no lo está
widget_type = w.get_type()
if not self._type_initialized(widget_type):
w.initialize_type()
self._type_mark_initialized(widget_type)
# Y el propio widget
w.initialize_widget()
# Decirle a los widgets que hay uno nuevo
for widget in self._widgets.values():
widget.update_widgets_list(self._widgets)
def remove_widget(self, name):
'''remove_widget(name)
Elimina un widget del formulario. Ojo, use este método sólo si
sabe lo que está haciendo. Eliminar widgets puede causar problemas
de dependencias con otros que lo estén usando.
'''
try:
widget = self._widgets[name]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % name
# Notificar al propio widget que se va a eliminar del
# formulario
widget.remove_widget()
del self._widgets[name]
# Y notificar al resto del cambio
for widget in self._widgets.values():
widget.update_widgets_list(self._widgets)
def has_widget(self, name):
'''has_widget(name) -> bool
Verdadero si el widget del nombre indicado está definido'''
return self._widgets.has_key(name)
def get_widget(self, name):
'''get_widget(name) -> widget
Devuelve el widget solicitado
'''
try:
return self._widgets[name]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % name
def get_widgets(self):
'''get_widgets() -> dict
Devuelve un diccionario con todos los widgets del formulario. Las
claves serán los nombres de los widgets y los valores el objeto
que lo representa.
'''
return dict(self._widgets)
# Valores del formulario. Los valores serán aquellos que
# devuelvan los propios widgets.
def define_form_values(self, values):
'''define_form_values(values)
Estable (mediante un diccionario) los valores que usarán los widgets
como fuente.
Para obtener los datos validados y transformados de los widgets, use
get_form_value y get_form_values'''
import copy
self._form_values = copy.copy(values)
def _get_raw_form_values(self):
'''_get_raw_form_values() -> dict
Devuelve el diccionario usado como fuente para los widgets'''
return self._form_values
def get_form_values(self):
'''get_form_values() -> dict
Devuelve un diccionario con los valores de los widgets. Las claves
serán el nombre del widget, y el valor lo que devuelve el propio
widget.'''
r = {}
for name, widget in self._widgets.items():
r[name] = widget.get_value()
return r
def get_form_value(self, name):
'''get_form_value(name) -> object
Devuelve el valor de un widget.'''
try:
w = self._widgets[name]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % name
return w.get_value()
# Renderizar los widgets
def render_widget(self, widget):
'''render_widget(widget) -> str
Devuelve una cadena con el código HTML del widget pedido'''
try:
w = self._widgets[widget]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % widget
return w.render()
def get_form_attrs(self):
'''get_form_attrs() -> dict
Devuelve los atributos para el formulario actual.'''
# Obtener la información para validad de cada widget
validation_info = []
for widget in self._widgets.values():
validation_info += widget.get_validation_js_info()
import json
return {
'method': 'post',
'name': self.get_name(),
'action': self.get_url_action(),
'onsubmit': 'try { %s; } catch(e) {}; return mcw_form_validate(%s);' % (
self.get_prop('before_submit', '0'),
json.write(validation_info))
}
def auto_render(self):
'''auto_render() -> str
Devuelve una cadena con el HTML de todo el formulario
'''
from MCWidgets.Utils import html_dict_attrs
res = '<form %s>\n<table>\n' % html_dict_attrs(self.get_form_attrs())
widgets = self._widgets.keys()
widgets.sort()
for w in widgets:
w = self._widgets[w]
res += '<tr><td>' + w.get_label() + '</td>'
res += '<td>' + w.render() + '</td></tr>\n'
return res + '</table>\n<input type="submit" />\n</form>\n'
# Funciones internas
def get_url_action(self):
'''get_url_action() -> str
Devuelve la URL a la que se mandará el formulario'''
return self._args.get('url_action', '')
def get_name(self):
'''get_name() -> str
| s(self):
'''_get_url_static_files() -> str
Devuelve la url donde están los ficheros estáticos para,
los widgets, como CSS, JavaScript, imágenes, etc'''
return self._args.get('url_static_files', '/staticmcw/')
def _make_var_name(self, widget, varname):
'''_make_var_name (widget, varname) -> str
Devuelve el nombre de variable para el widget. La idea de esta
función es que los widgets la usen para generar los nombres de sus
variables, de manera que se asegure que no se pisan variables entre
ellos.'''
# damos por hecho de que el nombre de la variables
# no contiene caracteres problemáticos
return '_'.join([self._form_name, widget, varname])
def _type_initialized(self, typename):
'''_type_initialized (type) -> bool
Devuelve True si el tipo de widget indicado ya está
inicializado'''
return typename in self._initialized_types
| Devuelve el nombre del formulario'''
return self._form_name
def _get_url_static_file | identifier_body |
Form.py | Devuelve el argumento pedido. Si no lo encuentra, devuelve default'''
return self._args.get(arg, default)
def set_arg(self, arg, value):
'''set_arg(arg, value)
Cambia el valor del argumento indicado'''
self._args[arg] = value
# Propiedades. Se usan para los widgets puedan mandar datos
# complementarios al que dibujará el formulario
def add_prop(self, prop, value, unique = False):
'''add_prop(prop, value)
Añade el texto indicado por value a la propiedad.
Si unique es verdadero, el texo sobrescribirá el valor
actual de la propiedad. Si es falso, se añadirá al final.
'''
if unique:
self._props = value
else:
self._props[prop] = self._props.get(prop, '') + value
def get_prop(self, prop, default = ''):
'''get_prop(prop, default = '') -> str
Devuelve el valor de la propiedad. Si no la encuentra devuelve
el valor de default. '''
return self._props.get(prop, default)
def list_props(self):
'''list_props() -> list
Devuelve una lista de las propiedades definidas'''
return self._props.keys()
# Validación de valores.
def validate_widget(self, widget):
'''validate_widget(widget) -> list
Devuelve una lista con los errores encontrados en el valor
del widget. Si no hay fallos devuelve una lista vacía'''
try:
w = self._widgets[widget]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % widget
return w.validate_widget()
def validate_form(self):
'''validate_form() -> dict
Devuelve un diccionario con los errores encontrados en los widgets.
Las claves del diccionario serán los nombres de los widgets, y los
valores, listas con los errores.
Si no encuentra ningún error, devolverá un diccionario vacío'''
errors = {}
for name, widget in self._widgets.items():
r = widget.validate_widget()
if r:
errors[name] = r
return errors
# Manipular la lista de widgets
def add_widget(self, widget_type, name, args = {}):
'''add_widget(widget_type, name, args = {})
Añade un widget al formulario, del tipo widget_type.
widget_type debe ser una clase derivada de Widget.
name se usa para referirse al widget, o una cadena
para buscar en el paquete MCWidgets.Widgets
'''
if isinstance(widget_type, str):
import MCWidgets.Widgets
widget_type = getattr(MCWidgets.Widgets, widget_type)
import Widget
if not issubclass(widget_type, Widget.Widget):
raise WidgetTypeNotValid, 'Widget type %s is not valid' % type
if name in self._widgets:
raise WidgetDuplicated, 'Widget "%s" have been already added' % name
w = widget_type(self, name, args)
self._widgets[name] = w
# Incializar el tipo, si aún no lo está
widget_type = w.get_type()
if not self._type_initialized(widget_type):
w.initialize_type()
self._type_mark_initialized(widget_type)
# Y el propio widget
w.initialize_widget()
# Decirle a los widgets que hay uno nuevo
for widget in self._widgets.values():
widget.update_widgets_list(self._widgets)
def remove_widget(self, name):
'''remove_widget(name)
Elimina un widget del formulario. Ojo, use este método sólo si
sabe lo que está haciendo. Eliminar widgets puede causar problemas
de dependencias con otros que lo estén usando.
'''
try:
widget = self._widgets[name]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % name
# Notificar al propio widget que se va a eliminar del
# formulario
widget.remove_widget()
del self._widgets[name]
# Y notificar al resto del cambio
for widget in self._widgets.values():
widget.update_widgets_list(self._widgets)
def has_widget(self, name):
'''has_widget(name) -> bool
Verdadero si el widget del nombre indicado está definido'''
return self._widgets.has_key(name)
def get_widget(self, name):
'''get_widget(name) -> widget
Devuelve el widget solicitado
'''
try:
return self._widgets[name]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % name
def get_widgets(self):
'''get_widgets() -> dict
Devuelve un diccionario con todos los widgets del formulario. Las
claves serán los nombres de los widgets y los valores el objeto
que lo representa.
'''
return dict(self._widgets)
# Valores del formulario. Los valores serán aquellos que
# devuelvan los propios widgets.
def define_form_values(self, values):
'''define_form_values(values)
Estable (mediante un diccionario) los valores que usarán los widgets
como fuente.
Para obtener los datos validados y transformados de los widgets, use
get_form_value y get_form_values'''
import copy
self._form_values = copy.copy(values)
def _get_raw_form_values(self):
'''_get_raw_form_values() -> dict
Devuelve el diccionario usado como fuente para los widgets'''
return self._form_values
def get_form_values(self):
'''get_form_values() -> dict
Devuelve un diccionario con los valores de los widgets. Las claves
serán el nombre del widget, y el valor lo que devuelve el propio
widget.'''
r = {}
for name, widget in self._widgets.items():
r[name] = widget.get_value()
return r
def get_form_value(self, name):
'''get_form_value(name) -> object
Devuelve el valor de un widget.'''
try:
w = self._widgets[name]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % name
return w.get_value()
# Renderizar los widgets
def render_widget(self, widget):
'''render_widget(widget) -> str
Devuelve una cadena con el código HTML del widget pedido'''
try:
w = self._widgets[widget]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % widget
return w.render()
def get_form_attrs(self):
'''get_form_attrs() -> dict
Devuelve los atributos para el formulario actual.'''
# Obtener la información para validad de cada widget
validation_info = []
for widget in self._widgets.values():
validation_info += widget.get_validation_js_info()
import json
return {
'method': 'post',
'name': self.get_name(),
'action': self.get_url_action(),
'onsubmit': 'try { %s; } catch(e) {}; return mcw_form_validate(%s);' % (
self.get_prop('before_submit', '0'),
json.write(validation_info))
}
def auto_render(self):
'''auto_render() -> str
Devuelve una cadena con el HTML de todo el formulario
'''
from MCWidgets.Utils import html_dict_attrs
res = '<form %s>\n<table>\n' % html_dict_attrs(self.get_form_attrs())
widgets = self._widgets.keys()
widgets.sort()
for w in widgets:
w = self._widgets[w]
res += '<tr><td>' + w.get_label() + '</td>'
res += '<td>' + w.render() + '</td></tr>\n'
return res + '</table>\n<input type="submit" />\n</form>\n'
# Funciones internas
def get_url_action(self):
'''get_url_action() -> str
Devuelve la URL a la que se mandará el formulario'''
return self._args.get('url_action', '')
def get_name(self):
'''get_name() -> str
Devuelve el nombre del formulario'''
return self._form_name
def _get_url_static_files(self):
'''_get_url_static_files() -> str
Devuelve la url donde están los ficheros estáticos para,
los widgets, como CSS, JavaScript, imágenes, etc'''
return self._args.get('url_static_files', '/staticmcw/')
def _make_var_name(self, widget, varname):
'''_make_var_name (widget, varname) -> str
Devuelve el nombre de variable para el widget. La idea de esta
función es que los widgets la usen para generar los nombres de sus
variables, de manera que se asegure que no se pisan variables entre |
def _type_initialized(self, typename):
'''_type_initialized (type) -> bool
Devuelve True si el tipo de widget indicado ya está
inicializado'''
return typename in self._initialized_types
| ellos.'''
# damos por hecho de que el nombre de la variables
# no contiene caracteres problemáticos
return '_'.join([self._form_name, widget, varname]) | random_line_split |
Form.py | ):
'''add_widget(widget_type, name, args = {})
Añade un widget al formulario, del tipo widget_type.
widget_type debe ser una clase derivada de Widget.
name se usa para referirse al widget, o una cadena
para buscar en el paquete MCWidgets.Widgets
'''
if isinstance(widget_type, str):
import MCWidgets.Widgets
widget_type = getattr(MCWidgets.Widgets, widget_type)
import Widget
if not issubclass(widget_type, Widget.Widget):
raise WidgetTypeNotValid, 'Widget type %s is not valid' % type
if name in self._widgets:
raise WidgetDuplicated, 'Widget "%s" have been already added' % name
w = widget_type(self, name, args)
self._widgets[name] = w
# Incializar el tipo, si aún no lo está
widget_type = w.get_type()
if not self._type_initialized(widget_type):
w.initialize_type()
self._type_mark_initialized(widget_type)
# Y el propio widget
w.initialize_widget()
# Decirle a los widgets que hay uno nuevo
for widget in self._widgets.values():
widget.update_widgets_list(self._widgets)
def remove_widget(self, name):
'''remove_widget(name)
Elimina un widget del formulario. Ojo, use este método sólo si
sabe lo que está haciendo. Eliminar widgets puede causar problemas
de dependencias con otros que lo estén usando.
'''
try:
widget = self._widgets[name]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % name
# Notificar al propio widget que se va a eliminar del
# formulario
widget.remove_widget()
del self._widgets[name]
# Y notificar al resto del cambio
for widget in self._widgets.values():
widget.update_widgets_list(self._widgets)
def has_widget(self, name):
'''has_widget(name) -> bool
Verdadero si el widget del nombre indicado está definido'''
return self._widgets.has_key(name)
def get_widget(self, name):
'''get_widget(name) -> widget
Devuelve el widget solicitado
'''
try:
return self._widgets[name]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % name
def get_widgets(self):
'''get_widgets() -> dict
Devuelve un diccionario con todos los widgets del formulario. Las
claves serán los nombres de los widgets y los valores el objeto
que lo representa.
'''
return dict(self._widgets)
# Valores del formulario. Los valores serán aquellos que
# devuelvan los propios widgets.
def define_form_values(self, values):
'''define_form_values(values)
Estable (mediante un diccionario) los valores que usarán los widgets
como fuente.
Para obtener los datos validados y transformados de los widgets, use
get_form_value y get_form_values'''
import copy
self._form_values = copy.copy(values)
def _get_raw_form_values(self):
'''_get_raw_form_values() -> dict
Devuelve el diccionario usado como fuente para los widgets'''
return self._form_values
def get_form_values(self):
'''get_form_values() -> dict
Devuelve un diccionario con los valores de los widgets. Las claves
serán el nombre del widget, y el valor lo que devuelve el propio
widget.'''
r = {}
for name, widget in self._widgets.items():
r[name] = widget.get_value()
return r
def get_form_value(self, name):
'''get_form_value(name) -> object
Devuelve el valor de un widget.'''
try:
w = self._widgets[name]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % name
return w.get_value()
# Renderizar los widgets
def render_widget(self, widget):
'''render_widget(widget) -> str
Devuelve una cadena con el código HTML del widget pedido'''
try:
w = self._widgets[widget]
except KeyError:
raise WidgetNotFound, 'Widget %s not found' % widget
return w.render()
def get_form_attrs(self):
'''get_form_attrs() -> dict
Devuelve los atributos para el formulario actual.'''
# Obtener la información para validad de cada widget
validation_info = []
for widget in self._widgets.values():
validation_info += widget.get_validation_js_info()
import json
return {
'method': 'post',
'name': self.get_name(),
'action': self.get_url_action(),
'onsubmit': 'try { %s; } catch(e) {}; return mcw_form_validate(%s);' % (
self.get_prop('before_submit', '0'),
json.write(validation_info))
}
def auto_render(self):
'''auto_render() -> str
Devuelve una cadena con el HTML de todo el formulario
'''
from MCWidgets.Utils import html_dict_attrs
res = '<form %s>\n<table>\n' % html_dict_attrs(self.get_form_attrs())
widgets = self._widgets.keys()
widgets.sort()
for w in widgets:
w = self._widgets[w]
res += '<tr><td>' + w.get_label() + '</td>'
res += '<td>' + w.render() + '</td></tr>\n'
return res + '</table>\n<input type="submit" />\n</form>\n'
# Funciones internas
def get_url_action(self):
'''get_url_action() -> str
Devuelve la URL a la que se mandará el formulario'''
return self._args.get('url_action', '')
def get_name(self):
'''get_name() -> str
Devuelve el nombre del formulario'''
return self._form_name
def _get_url_static_files(self):
'''_get_url_static_files() -> str
Devuelve la url donde están los ficheros estáticos para,
los widgets, como CSS, JavaScript, imágenes, etc'''
return self._args.get('url_static_files', '/staticmcw/')
def _make_var_name(self, widget, varname):
'''_make_var_name (widget, varname) -> str
Devuelve el nombre de variable para el widget. La idea de esta
función es que los widgets la usen para generar los nombres de sus
variables, de manera que se asegure que no se pisan variables entre
ellos.'''
# damos por hecho de que el nombre de la variables
# no contiene caracteres problemáticos
return '_'.join([self._form_name, widget, varname])
def _type_initialized(self, typename):
'''_type_initialized (type) -> bool
Devuelve True si el tipo de widget indicado ya está
inicializado'''
return typename in self._initialized_types
def _type_mark_initialized(self, typename):
'''_type_mark_initialized (typename)
Marca el tipo indicado como inicializado. Si ya estaba
inicializado no hace nada'''
l = self._initialized_types
if typename not in l:
l.append(typename)
def _add_js_file(self, url):
'''_add_js_file(url)
Añade a la cabecera una etiqueta para cargar el fichero indicado. La URL
será relativa a donde están todos los ficheros estáticos de MCWidgets.
Si la URL ya había sido incluido, sale sin hacer nada.'''
if url in self.__included_js:
return
self.__included_js.append(url)
self.add_prop(
'header',
'<script src="%s/%s"></script>\n' %
(self._get_url_static_files(), url))
def _add_css_file(self, url):
'''_add_css_file(url)
Añade a la cabecera una etiqueta para cargar el fichero indicado. La URL
será relativa a donde están todos los ficheros estáticos de MCWidgets.
Si la URL ya había sido incluido, sale sin hacer nada.'''
if url in self.__included_css:
return
self.__included_css.append(url)
self.add_prop(
'header',
'<link href="%s/%s" type="text/css" rel="stylesheet" />\n' %
(self._get_url_static_files(), url))
class FormTemplate:
'''FormTemplate
Esta clase se usa para poder usarse en plantillas. Funciona como
un diccionario para acceder a los widgets del formulario. Cuando
se accede a él por clave, llama al render del widget pedido.
ft = FormTemplate(form)
return ft['widget']
Es equivalente al
return form.render_widget('widget')
A través del método W()
La idea es poder usarlo fácilmente desde plantillas.
$form.nombre -> form.render_widget("nombre")
$form.W.nombre -> form.get_widget("nombre")
'''
def __init__(self, form):
self.__form = form
self.__cache = {}
def W(self):
return self.__form.get_widgets()
def form_attributes(self):
from MCWidgets.Util | s import html_d | identifier_name |
|
customdomain_utils.go | 1.ConditionStatus, newReason, newMessage string,
updateConditionCheck UpdateConditionCheck,
) bool {
if oldStatus != newStatus {
return true
}
return updateConditionCheck(oldReason, oldMessage, newReason, newMessage)
}
// SetCustomDomainCondition sets a condition on a CustomDomain resource's status
func SetCustomDomainCondition(
conditions []customdomainv1alpha1.CustomDomainCondition,
conditionType customdomainv1alpha1.CustomDomainConditionType,
status corev1.ConditionStatus,
message string,
updateConditionCheck UpdateConditionCheck,
) []customdomainv1alpha1.CustomDomainCondition {
now := metav1.Now()
existingCondition := FindCustomDomainCondition(conditions, conditionType)
if existingCondition == nil {
if status == corev1.ConditionTrue {
conditions = append(
conditions,
customdomainv1alpha1.CustomDomainCondition{
Type: conditionType,
Status: status,
Reason: string(conditionType),
Message: message,
LastTransitionTime: now,
LastProbeTime: now,
},
)
}
} else {
if ShouldUpdateCondition(
existingCondition.Status, existingCondition.Reason, existingCondition.Message,
status, string(conditionType), message,
updateConditionCheck,
) {
if existingCondition.Status != status {
existingCondition.LastTransitionTime = now
}
existingCondition.Status = status
existingCondition.Reason = string(conditionType)
existingCondition.Message = message
existingCondition.LastProbeTime = now
}
}
return conditions
}
// FindCustomDomainCondition finds in the condition that has the
// specified condition type in the given list. If none exists, then returns nil.
func FindCustomDomainCondition(conditions []customdomainv1alpha1.CustomDomainCondition, conditionType customdomainv1alpha1.CustomDomainConditionType) *customdomainv1alpha1.CustomDomainCondition {
for i, condition := range conditions {
if condition.Type == conditionType {
return &conditions[i]
}
}
return nil
}
// Take an ingress controller managed by the custom domains operator and release it back to the
// cluster ingress operator. Also schedule it onto customer worker nodes from the Red Hat managed infra
// nodes.
func (r *CustomDomainReconciler) returnIngressToClusterIngressOperator(reqLogger logr.Logger, instance *customdomainv1alpha1.CustomDomain) (ctrl.Result, error) {
reqLogger.Info(fmt.Sprintf("Removing operator management labels from %s's underlying ingress controller", instance.Name))
ingressName := instance.Name
customIngress := &operatorv1.IngressController{}
reqLogger.Info(fmt.Sprintf("Fetching ingress controller: %s/%s", ingressOperatorNamespace, ingressName))
err := r.Client.Get(context.TODO(), types.NamespacedName{
Namespace: ingressOperatorNamespace,
Name: ingressName,
}, customIngress)
if err != nil {
if kerr.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
reqLogger.Error(err, fmt.Sprintf("Ingresscontroller %s in %s namespace not found", ingressName, ingressOperatorNamespace))
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
delete(customIngress.Labels, managedLabelName)
customIngress.Spec.NodePlacement = &operatorv1.NodePlacement{
NodeSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"node-role.kubernetes.io/worker": ""},
},
Tolerations: []corev1.Toleration{},
}
reqLogger.Info(fmt.Sprintf("Updating ingress %s with new node placement on worker node, removing tolerations for infra nodes", instance.Name))
err = r.Client.Update(context.TODO(), customIngress)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Error updating ingresscontroller %s in %s namespace", ingressName, ingressOperatorNamespace))
return reconcile.Result{}, err
}
userSecret := &corev1.Secret{}
err = r.Client.Get(context.TODO(), types.NamespacedName{
Namespace: instance.Spec.Certificate.Namespace,
Name: instance.Spec.Certificate.Name,
}, userSecret)
reqLogger.Info(fmt.Sprintf("Updating secret to remove custom domain labels from secret %s", userSecret.Name))
delete(userSecret.Labels, managedLabelName)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Error fetching secret %s in %s namespace", userSecret.Name, userSecret.Namespace))
return reconcile.Result{}, err
}
err = r.Client.Update(context.TODO(), userSecret)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Error updating secret %s in %s namespace", userSecret.Name, userSecret.Namespace))
// Requeue, as the dependent ingress controller has already been updated
return reconcile.Result{}, err
}
SetCustomDomainStatus(
reqLogger,
instance,
"Due to the deprecation of the custom domains operator on OSD/ROSA version 4.13 and above, this CustomDomain no longer manages an IngressController.",
customdomainv1alpha1.CustomDomainConditionDeprecated,
customdomainv1alpha1.CustomDomainStateNotReady)
err = r.statusUpdate(reqLogger, instance)
if err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// finalizeCustomDomain cleans up left over resources once a CustomDomain CR is deleted
func (r *CustomDomainReconciler) finalizeCustomDomain(reqLogger logr.Logger, instance *customdomainv1alpha1.CustomDomain) error {
reqLogger.Info("Deleting old resources...")
// get and delete the secret in openshift-ingress
ingressSecret := &corev1.Secret{}
err := r.Client.Get(context.TODO(), types.NamespacedName{
Namespace: ingressNamespace,
Name: instance.Name,
}, ingressSecret)
if err != nil {
if !kerr.IsNotFound(err) {
reqLogger.Error(err, fmt.Sprintf("Failed to get %s secret", instance.Name))
return err
}
reqLogger.Info(fmt.Sprintf("Secret %s was not found, skipping.", instance.Name))
} else {
if _, ok := ingressSecret.Labels[managedLabelName]; ok | else {
reqLogger.Info(fmt.Sprintf("Secret %s did not have proper labels, skipping.", ingressSecret.Name))
}
}
// get and delete the custom ingresscontroller
customIngress := &operatorv1.IngressController{}
err = r.Client.Get(context.TODO(), types.NamespacedName{
Namespace: ingressOperatorNamespace,
Name: instance.Name,
}, customIngress)
if err != nil {
if !kerr.IsNotFound(err) {
reqLogger.Error(err, fmt.Sprintf("Failed to get %s ingresscontroller", instance.Name))
return err
}
reqLogger.Info(fmt.Sprintf("IngressController %s was not found, skipping.", instance.Name))
} else {
// Only delete the IngressController if it has the proper labels and does not have a restricted name
if _, ok := customIngress.Labels[managedLabelName]; ok {
if !contains(restrictedIngressNames, customIngress.Name) {
err = r.Client.Delete(context.TODO(), customIngress)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Failed to delete %s ingresscontroller", customIngress.Name))
return err
}
} else {
reqLogger.Info(fmt.Sprintf("IngressController %s has a restricted name, not deleting.", customIngress.Name))
}
} else {
reqLogger.Info(fmt.Sprintf("IngressController %s did not have proper labels, not deleting.", customIngress.Name))
}
}
reqLogger.Info(fmt.Sprintf("Customdomain %s successfully finalized", instance.Name))
return nil
}
// addFinalizer is a function that adds a finalizer for the CustomDomain CR
func (r *CustomDomainReconciler) addFinalizer(reqLogger logr.Logger, m *customdomainv1alpha1.CustomDomain) error {
reqLogger.Info("Adding Finalizer for the CustomDomain")
m.SetFinalizers(append(m.GetFinalizers(), customDomainFinalizer))
// Update CR
err := r.Client.Update(context.TODO(), m)
if err != nil {
reqLogger.Error(err, "Failed to update CustomDomain with finalizer")
return err
}
return nil
}
// SetCustomDomainStatus sets the status of the custom domain resource
func SetCustomDomainStatus(reqLogger logr.Logger, instance *customdomainv1alpha1.CustomDomain, message string, condition customdomainv1alpha1.CustomDomainConditionType, state customdomainv1alpha1.CustomDomainStateType) {
instance.Status.Conditions = SetCustomDomainCondition(
instance.Status.Conditions,
condition,
corev1.ConditionTrue,
message,
UpdateConditionNever)
instance.Status.State = state
reqLogger.Info(fmt.Sprintf("CustomDomain (%s) status updated: condition: (%s), state: (%s)", instance.Name, string(condition), string(state)))
}
// statusUpdate | {
err = r.Client.Delete(context.TODO(), ingressSecret)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Failed to delete %s secret", instance.Name))
return err
}
} | conditional_block |
customdomain_utils.go | ConditionType,
status corev1.ConditionStatus,
message string,
updateConditionCheck UpdateConditionCheck,
) []customdomainv1alpha1.CustomDomainCondition {
now := metav1.Now()
existingCondition := FindCustomDomainCondition(conditions, conditionType)
if existingCondition == nil {
if status == corev1.ConditionTrue {
conditions = append(
conditions,
customdomainv1alpha1.CustomDomainCondition{
Type: conditionType,
Status: status,
Reason: string(conditionType),
Message: message,
LastTransitionTime: now,
LastProbeTime: now,
},
)
}
} else {
if ShouldUpdateCondition(
existingCondition.Status, existingCondition.Reason, existingCondition.Message,
status, string(conditionType), message,
updateConditionCheck,
) {
if existingCondition.Status != status {
existingCondition.LastTransitionTime = now
}
existingCondition.Status = status
existingCondition.Reason = string(conditionType)
existingCondition.Message = message
existingCondition.LastProbeTime = now
}
}
return conditions
}
// FindCustomDomainCondition finds in the condition that has the
// specified condition type in the given list. If none exists, then returns nil.
func FindCustomDomainCondition(conditions []customdomainv1alpha1.CustomDomainCondition, conditionType customdomainv1alpha1.CustomDomainConditionType) *customdomainv1alpha1.CustomDomainCondition {
for i, condition := range conditions {
if condition.Type == conditionType {
return &conditions[i]
}
}
return nil
}
// Take an ingress controller managed by the custom domains operator and release it back to the
// cluster ingress operator. Also schedule it onto customer worker nodes from the Red Hat managed infra
// nodes.
func (r *CustomDomainReconciler) returnIngressToClusterIngressOperator(reqLogger logr.Logger, instance *customdomainv1alpha1.CustomDomain) (ctrl.Result, error) {
reqLogger.Info(fmt.Sprintf("Removing operator management labels from %s's underlying ingress controller", instance.Name))
ingressName := instance.Name
customIngress := &operatorv1.IngressController{}
reqLogger.Info(fmt.Sprintf("Fetching ingress controller: %s/%s", ingressOperatorNamespace, ingressName))
err := r.Client.Get(context.TODO(), types.NamespacedName{
Namespace: ingressOperatorNamespace,
Name: ingressName,
}, customIngress)
if err != nil {
if kerr.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
reqLogger.Error(err, fmt.Sprintf("Ingresscontroller %s in %s namespace not found", ingressName, ingressOperatorNamespace))
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
delete(customIngress.Labels, managedLabelName)
customIngress.Spec.NodePlacement = &operatorv1.NodePlacement{
NodeSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"node-role.kubernetes.io/worker": ""},
},
Tolerations: []corev1.Toleration{},
}
reqLogger.Info(fmt.Sprintf("Updating ingress %s with new node placement on worker node, removing tolerations for infra nodes", instance.Name))
err = r.Client.Update(context.TODO(), customIngress)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Error updating ingresscontroller %s in %s namespace", ingressName, ingressOperatorNamespace))
return reconcile.Result{}, err
}
userSecret := &corev1.Secret{}
err = r.Client.Get(context.TODO(), types.NamespacedName{
Namespace: instance.Spec.Certificate.Namespace,
Name: instance.Spec.Certificate.Name,
}, userSecret)
reqLogger.Info(fmt.Sprintf("Updating secret to remove custom domain labels from secret %s", userSecret.Name))
delete(userSecret.Labels, managedLabelName)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Error fetching secret %s in %s namespace", userSecret.Name, userSecret.Namespace))
return reconcile.Result{}, err
}
err = r.Client.Update(context.TODO(), userSecret)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Error updating secret %s in %s namespace", userSecret.Name, userSecret.Namespace))
// Requeue, as the dependent ingress controller has already been updated
return reconcile.Result{}, err
}
SetCustomDomainStatus(
reqLogger,
instance,
"Due to the deprecation of the custom domains operator on OSD/ROSA version 4.13 and above, this CustomDomain no longer manages an IngressController.",
customdomainv1alpha1.CustomDomainConditionDeprecated,
customdomainv1alpha1.CustomDomainStateNotReady)
err = r.statusUpdate(reqLogger, instance)
if err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// finalizeCustomDomain cleans up left over resources once a CustomDomain CR is deleted
func (r *CustomDomainReconciler) finalizeCustomDomain(reqLogger logr.Logger, instance *customdomainv1alpha1.CustomDomain) error {
reqLogger.Info("Deleting old resources...")
// get and delete the secret in openshift-ingress
ingressSecret := &corev1.Secret{}
err := r.Client.Get(context.TODO(), types.NamespacedName{
Namespace: ingressNamespace,
Name: instance.Name,
}, ingressSecret)
if err != nil {
if !kerr.IsNotFound(err) {
reqLogger.Error(err, fmt.Sprintf("Failed to get %s secret", instance.Name))
return err
}
reqLogger.Info(fmt.Sprintf("Secret %s was not found, skipping.", instance.Name))
} else {
if _, ok := ingressSecret.Labels[managedLabelName]; ok {
err = r.Client.Delete(context.TODO(), ingressSecret)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Failed to delete %s secret", instance.Name))
return err
}
} else {
reqLogger.Info(fmt.Sprintf("Secret %s did not have proper labels, skipping.", ingressSecret.Name))
}
}
// get and delete the custom ingresscontroller
customIngress := &operatorv1.IngressController{}
err = r.Client.Get(context.TODO(), types.NamespacedName{
Namespace: ingressOperatorNamespace,
Name: instance.Name,
}, customIngress)
if err != nil {
if !kerr.IsNotFound(err) {
reqLogger.Error(err, fmt.Sprintf("Failed to get %s ingresscontroller", instance.Name))
return err
}
reqLogger.Info(fmt.Sprintf("IngressController %s was not found, skipping.", instance.Name))
} else {
// Only delete the IngressController if it has the proper labels and does not have a restricted name
if _, ok := customIngress.Labels[managedLabelName]; ok {
if !contains(restrictedIngressNames, customIngress.Name) {
err = r.Client.Delete(context.TODO(), customIngress)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Failed to delete %s ingresscontroller", customIngress.Name))
return err
}
} else {
reqLogger.Info(fmt.Sprintf("IngressController %s has a restricted name, not deleting.", customIngress.Name))
}
} else {
reqLogger.Info(fmt.Sprintf("IngressController %s did not have proper labels, not deleting.", customIngress.Name))
}
}
reqLogger.Info(fmt.Sprintf("Customdomain %s successfully finalized", instance.Name))
return nil
}
// addFinalizer is a function that adds a finalizer for the CustomDomain CR
func (r *CustomDomainReconciler) addFinalizer(reqLogger logr.Logger, m *customdomainv1alpha1.CustomDomain) error {
reqLogger.Info("Adding Finalizer for the CustomDomain")
m.SetFinalizers(append(m.GetFinalizers(), customDomainFinalizer))
// Update CR
err := r.Client.Update(context.TODO(), m)
if err != nil {
reqLogger.Error(err, "Failed to update CustomDomain with finalizer")
return err
}
return nil
}
// SetCustomDomainStatus sets the status of the custom domain resource
func SetCustomDomainStatus(reqLogger logr.Logger, instance *customdomainv1alpha1.CustomDomain, message string, condition customdomainv1alpha1.CustomDomainConditionType, state customdomainv1alpha1.CustomDomainStateType) {
instance.Status.Conditions = SetCustomDomainCondition(
instance.Status.Conditions,
condition,
corev1.ConditionTrue,
message,
UpdateConditionNever)
instance.Status.State = state
reqLogger.Info(fmt.Sprintf("CustomDomain (%s) status updated: condition: (%s), state: (%s)", instance.Name, string(condition), string(state)))
}
// statusUpdate helper function to set the actual status update
func (r *CustomDomainReconciler) statusUpdate(reqLogger logr.Logger, instance *customdomainv1alpha1.CustomDomain) error {
err := r.Client.Status().Update(context.TODO(), instance)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Status update for %s failed", instance.Name))
}
//reqLogger.Info(fmt.Sprintf("Status updated for %s", instance.Name))
return err | }
| random_line_split |
|
customdomain_utils.go | existingCondition == nil {
if status == corev1.ConditionTrue {
conditions = append(
conditions,
customdomainv1alpha1.CustomDomainCondition{
Type: conditionType,
Status: status,
Reason: string(conditionType),
Message: message,
LastTransitionTime: now,
LastProbeTime: now,
},
)
}
} else {
if ShouldUpdateCondition(
existingCondition.Status, existingCondition.Reason, existingCondition.Message,
status, string(conditionType), message,
updateConditionCheck,
) {
if existingCondition.Status != status {
existingCondition.LastTransitionTime = now
}
existingCondition.Status = status
existingCondition.Reason = string(conditionType)
existingCondition.Message = message
existingCondition.LastProbeTime = now
}
}
return conditions
}
// FindCustomDomainCondition finds in the condition that has the
// specified condition type in the given list. If none exists, then returns nil.
func FindCustomDomainCondition(conditions []customdomainv1alpha1.CustomDomainCondition, conditionType customdomainv1alpha1.CustomDomainConditionType) *customdomainv1alpha1.CustomDomainCondition {
for i, condition := range conditions {
if condition.Type == conditionType {
return &conditions[i]
}
}
return nil
}
// Take an ingress controller managed by the custom domains operator and release it back to the
// cluster ingress operator. Also schedule it onto customer worker nodes from the Red Hat managed infra
// nodes.
func (r *CustomDomainReconciler) returnIngressToClusterIngressOperator(reqLogger logr.Logger, instance *customdomainv1alpha1.CustomDomain) (ctrl.Result, error) {
reqLogger.Info(fmt.Sprintf("Removing operator management labels from %s's underlying ingress controller", instance.Name))
ingressName := instance.Name
customIngress := &operatorv1.IngressController{}
reqLogger.Info(fmt.Sprintf("Fetching ingress controller: %s/%s", ingressOperatorNamespace, ingressName))
err := r.Client.Get(context.TODO(), types.NamespacedName{
Namespace: ingressOperatorNamespace,
Name: ingressName,
}, customIngress)
if err != nil {
if kerr.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
reqLogger.Error(err, fmt.Sprintf("Ingresscontroller %s in %s namespace not found", ingressName, ingressOperatorNamespace))
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
delete(customIngress.Labels, managedLabelName)
customIngress.Spec.NodePlacement = &operatorv1.NodePlacement{
NodeSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"node-role.kubernetes.io/worker": ""},
},
Tolerations: []corev1.Toleration{},
}
reqLogger.Info(fmt.Sprintf("Updating ingress %s with new node placement on worker node, removing tolerations for infra nodes", instance.Name))
err = r.Client.Update(context.TODO(), customIngress)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Error updating ingresscontroller %s in %s namespace", ingressName, ingressOperatorNamespace))
return reconcile.Result{}, err
}
userSecret := &corev1.Secret{}
err = r.Client.Get(context.TODO(), types.NamespacedName{
Namespace: instance.Spec.Certificate.Namespace,
Name: instance.Spec.Certificate.Name,
}, userSecret)
reqLogger.Info(fmt.Sprintf("Updating secret to remove custom domain labels from secret %s", userSecret.Name))
delete(userSecret.Labels, managedLabelName)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Error fetching secret %s in %s namespace", userSecret.Name, userSecret.Namespace))
return reconcile.Result{}, err
}
err = r.Client.Update(context.TODO(), userSecret)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Error updating secret %s in %s namespace", userSecret.Name, userSecret.Namespace))
// Requeue, as the dependent ingress controller has already been updated
return reconcile.Result{}, err
}
SetCustomDomainStatus(
reqLogger,
instance,
"Due to the deprecation of the custom domains operator on OSD/ROSA version 4.13 and above, this CustomDomain no longer manages an IngressController.",
customdomainv1alpha1.CustomDomainConditionDeprecated,
customdomainv1alpha1.CustomDomainStateNotReady)
err = r.statusUpdate(reqLogger, instance)
if err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// finalizeCustomDomain cleans up left over resources once a CustomDomain CR is deleted
func (r *CustomDomainReconciler) finalizeCustomDomain(reqLogger logr.Logger, instance *customdomainv1alpha1.CustomDomain) error {
reqLogger.Info("Deleting old resources...")
// get and delete the secret in openshift-ingress
ingressSecret := &corev1.Secret{}
err := r.Client.Get(context.TODO(), types.NamespacedName{
Namespace: ingressNamespace,
Name: instance.Name,
}, ingressSecret)
if err != nil {
if !kerr.IsNotFound(err) {
reqLogger.Error(err, fmt.Sprintf("Failed to get %s secret", instance.Name))
return err
}
reqLogger.Info(fmt.Sprintf("Secret %s was not found, skipping.", instance.Name))
} else {
if _, ok := ingressSecret.Labels[managedLabelName]; ok {
err = r.Client.Delete(context.TODO(), ingressSecret)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Failed to delete %s secret", instance.Name))
return err
}
} else {
reqLogger.Info(fmt.Sprintf("Secret %s did not have proper labels, skipping.", ingressSecret.Name))
}
}
// get and delete the custom ingresscontroller
customIngress := &operatorv1.IngressController{}
err = r.Client.Get(context.TODO(), types.NamespacedName{
Namespace: ingressOperatorNamespace,
Name: instance.Name,
}, customIngress)
if err != nil {
if !kerr.IsNotFound(err) {
reqLogger.Error(err, fmt.Sprintf("Failed to get %s ingresscontroller", instance.Name))
return err
}
reqLogger.Info(fmt.Sprintf("IngressController %s was not found, skipping.", instance.Name))
} else {
// Only delete the IngressController if it has the proper labels and does not have a restricted name
if _, ok := customIngress.Labels[managedLabelName]; ok {
if !contains(restrictedIngressNames, customIngress.Name) {
err = r.Client.Delete(context.TODO(), customIngress)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Failed to delete %s ingresscontroller", customIngress.Name))
return err
}
} else {
reqLogger.Info(fmt.Sprintf("IngressController %s has a restricted name, not deleting.", customIngress.Name))
}
} else {
reqLogger.Info(fmt.Sprintf("IngressController %s did not have proper labels, not deleting.", customIngress.Name))
}
}
reqLogger.Info(fmt.Sprintf("Customdomain %s successfully finalized", instance.Name))
return nil
}
// addFinalizer is a function that adds a finalizer for the CustomDomain CR
func (r *CustomDomainReconciler) addFinalizer(reqLogger logr.Logger, m *customdomainv1alpha1.CustomDomain) error {
reqLogger.Info("Adding Finalizer for the CustomDomain")
m.SetFinalizers(append(m.GetFinalizers(), customDomainFinalizer))
// Update CR
err := r.Client.Update(context.TODO(), m)
if err != nil {
reqLogger.Error(err, "Failed to update CustomDomain with finalizer")
return err
}
return nil
}
// SetCustomDomainStatus sets the status of the custom domain resource
func SetCustomDomainStatus(reqLogger logr.Logger, instance *customdomainv1alpha1.CustomDomain, message string, condition customdomainv1alpha1.CustomDomainConditionType, state customdomainv1alpha1.CustomDomainStateType) {
instance.Status.Conditions = SetCustomDomainCondition(
instance.Status.Conditions,
condition,
corev1.ConditionTrue,
message,
UpdateConditionNever)
instance.Status.State = state
reqLogger.Info(fmt.Sprintf("CustomDomain (%s) status updated: condition: (%s), state: (%s)", instance.Name, string(condition), string(state)))
}
// statusUpdate helper function to set the actual status update
func (r *CustomDomainReconciler) statusUpdate(reqLogger logr.Logger, instance *customdomainv1alpha1.CustomDomain) error {
err := r.Client.Status().Update(context.TODO(), instance)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Status update for %s failed", instance.Name))
}
//reqLogger.Info(fmt.Sprintf("Status updated for %s", instance.Name))
return err
}
// contains is a helper function for finding a string in an array
func contains(list []string, s string) bool | {
for _, v := range list {
if v == s {
return true
}
}
return false
} | identifier_body |
|
customdomain_utils.go | // Error reading the object - requeue the request.
return reconcile.Result{}, err
}
delete(customIngress.Labels, managedLabelName)
customIngress.Spec.NodePlacement = &operatorv1.NodePlacement{
NodeSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"node-role.kubernetes.io/worker": ""},
},
Tolerations: []corev1.Toleration{},
}
reqLogger.Info(fmt.Sprintf("Updating ingress %s with new node placement on worker node, removing tolerations for infra nodes", instance.Name))
err = r.Client.Update(context.TODO(), customIngress)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Error updating ingresscontroller %s in %s namespace", ingressName, ingressOperatorNamespace))
return reconcile.Result{}, err
}
userSecret := &corev1.Secret{}
err = r.Client.Get(context.TODO(), types.NamespacedName{
Namespace: instance.Spec.Certificate.Namespace,
Name: instance.Spec.Certificate.Name,
}, userSecret)
reqLogger.Info(fmt.Sprintf("Updating secret to remove custom domain labels from secret %s", userSecret.Name))
delete(userSecret.Labels, managedLabelName)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Error fetching secret %s in %s namespace", userSecret.Name, userSecret.Namespace))
return reconcile.Result{}, err
}
err = r.Client.Update(context.TODO(), userSecret)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Error updating secret %s in %s namespace", userSecret.Name, userSecret.Namespace))
// Requeue, as the dependent ingress controller has already been updated
return reconcile.Result{}, err
}
SetCustomDomainStatus(
reqLogger,
instance,
"Due to the deprecation of the custom domains operator on OSD/ROSA version 4.13 and above, this CustomDomain no longer manages an IngressController.",
customdomainv1alpha1.CustomDomainConditionDeprecated,
customdomainv1alpha1.CustomDomainStateNotReady)
err = r.statusUpdate(reqLogger, instance)
if err != nil {
return reconcile.Result{}, err
}
return reconcile.Result{}, nil
}
// finalizeCustomDomain cleans up left over resources once a CustomDomain CR is deleted
func (r *CustomDomainReconciler) finalizeCustomDomain(reqLogger logr.Logger, instance *customdomainv1alpha1.CustomDomain) error {
reqLogger.Info("Deleting old resources...")
// get and delete the secret in openshift-ingress
ingressSecret := &corev1.Secret{}
err := r.Client.Get(context.TODO(), types.NamespacedName{
Namespace: ingressNamespace,
Name: instance.Name,
}, ingressSecret)
if err != nil {
if !kerr.IsNotFound(err) {
reqLogger.Error(err, fmt.Sprintf("Failed to get %s secret", instance.Name))
return err
}
reqLogger.Info(fmt.Sprintf("Secret %s was not found, skipping.", instance.Name))
} else {
if _, ok := ingressSecret.Labels[managedLabelName]; ok {
err = r.Client.Delete(context.TODO(), ingressSecret)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Failed to delete %s secret", instance.Name))
return err
}
} else {
reqLogger.Info(fmt.Sprintf("Secret %s did not have proper labels, skipping.", ingressSecret.Name))
}
}
// get and delete the custom ingresscontroller
customIngress := &operatorv1.IngressController{}
err = r.Client.Get(context.TODO(), types.NamespacedName{
Namespace: ingressOperatorNamespace,
Name: instance.Name,
}, customIngress)
if err != nil {
if !kerr.IsNotFound(err) {
reqLogger.Error(err, fmt.Sprintf("Failed to get %s ingresscontroller", instance.Name))
return err
}
reqLogger.Info(fmt.Sprintf("IngressController %s was not found, skipping.", instance.Name))
} else {
// Only delete the IngressController if it has the proper labels and does not have a restricted name
if _, ok := customIngress.Labels[managedLabelName]; ok {
if !contains(restrictedIngressNames, customIngress.Name) {
err = r.Client.Delete(context.TODO(), customIngress)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Failed to delete %s ingresscontroller", customIngress.Name))
return err
}
} else {
reqLogger.Info(fmt.Sprintf("IngressController %s has a restricted name, not deleting.", customIngress.Name))
}
} else {
reqLogger.Info(fmt.Sprintf("IngressController %s did not have proper labels, not deleting.", customIngress.Name))
}
}
reqLogger.Info(fmt.Sprintf("Customdomain %s successfully finalized", instance.Name))
return nil
}
// addFinalizer is a function that adds a finalizer for the CustomDomain CR
func (r *CustomDomainReconciler) addFinalizer(reqLogger logr.Logger, m *customdomainv1alpha1.CustomDomain) error {
reqLogger.Info("Adding Finalizer for the CustomDomain")
m.SetFinalizers(append(m.GetFinalizers(), customDomainFinalizer))
// Update CR
err := r.Client.Update(context.TODO(), m)
if err != nil {
reqLogger.Error(err, "Failed to update CustomDomain with finalizer")
return err
}
return nil
}
// SetCustomDomainStatus sets the status of the custom domain resource
func SetCustomDomainStatus(reqLogger logr.Logger, instance *customdomainv1alpha1.CustomDomain, message string, condition customdomainv1alpha1.CustomDomainConditionType, state customdomainv1alpha1.CustomDomainStateType) {
instance.Status.Conditions = SetCustomDomainCondition(
instance.Status.Conditions,
condition,
corev1.ConditionTrue,
message,
UpdateConditionNever)
instance.Status.State = state
reqLogger.Info(fmt.Sprintf("CustomDomain (%s) status updated: condition: (%s), state: (%s)", instance.Name, string(condition), string(state)))
}
// statusUpdate helper function to set the actual status update
func (r *CustomDomainReconciler) statusUpdate(reqLogger logr.Logger, instance *customdomainv1alpha1.CustomDomain) error {
err := r.Client.Status().Update(context.TODO(), instance)
if err != nil {
reqLogger.Error(err, fmt.Sprintf("Status update for %s failed", instance.Name))
}
//reqLogger.Info(fmt.Sprintf("Status updated for %s", instance.Name))
return err
}
// contains is a helper function for finding a string in an array
func contains(list []string, s string) bool {
for _, v := range list {
if v == s {
return true
}
}
return false
}
// remove is a helper function for finalizer
func remove(list []string, s string) []string {
for i, v := range list {
if v == s {
list = append(list[:i], list[i+1:]...)
}
}
return list
}
// letters used by randSeq
var letters = []rune("abcdefghijklmnopqrstuvwxyz")
// randSeq is a function to generate a fixed length string with random letters
func randSeq(n int) string {
b := make([]rune, n)
for i := range b {
// #nosec G404
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
// GetPlatformType returns the cloud platform type for the cluster
func GetPlatformType(kclient client.Client) (*configv1.PlatformType, error) {
infra, err := GetInfrastructureObject(kclient)
if err != nil {
return nil, err
}
return &infra.Status.PlatformStatus.Type, nil
}
// GetInfrastructureObject returns the canonical Infrastructure object
func GetInfrastructureObject(kclient client.Client) (*configv1.Infrastructure, error) {
infrastructure := &configv1.Infrastructure{}
if err := kclient.Get(context.TODO(), client.ObjectKey{Name: "cluster"}, infrastructure); err != nil {
return nil, fmt.Errorf("failed to get default infrastructure with name cluster: %w", err)
}
return infrastructure, nil
}
// Taken from https://github.com/openshift/cloud-ingress-operator/blob/master/pkg/utils/clusterversion.go
// GetClusterVersionObject returns the canonical ClusterVersion object
// To check current version: `output.Status.History[0].Version`
//
// `history contains a list of the most recent versions applied to the cluster.
// This value may be empty during cluster startup, and then will be updated when a new update is being applied.
// The newest update is first in the list and it is ordered by recency`
func (r *CustomDomainReconciler) GetClusterVersion(kclient client.Client) (string, error) {
versionObject := &configv1.ClusterVersion{}
ns := types.NamespacedName{
Namespace: "",
Name: "version",
}
err := kclient.Get(context.TODO(), ns, versionObject)
if err != nil {
return "", err
}
// handle when there's no object defined || no version found on history
if versionObject == nil || len(versionObject.Status.History) == 0 {
return "", fmt.Errorf("version couldn't be grabbed from clusterversion: %+v", versionObject) // (%+v) adds field names
}
return versionObject.Status.History[0].Version, nil
}
func | isUsingNewManagedIngressFeature | identifier_name |
|
codec.rs | /// `f` parity columns, where one or more columns is missing, reconstruct
/// the data from the missing columns. Takes as a parameter exactly `k`
/// surviving columns, even if more than `k` columns survive. These *must*
/// be the lowest `k` surviving columns. For example, in a 5+3 array where
/// the columns 0 and 3 are missing, Provide columns 1, 2, 4, 5, and 6 (data
/// columns 1, 2, and 4 and parity columns 0 and 1).
///
/// This method cannot reconstruct missing parity columns. In order to
/// reconstruct missing parity columns, you must first use this method to
/// regenerate all data columns, *and then* use `encode` to recreate the
/// parity.
///
/// # Parameters
///
/// - `len`: Size of each column, in bytes
/// - `surviving`: Exactly `k` columns of surviving data and parity,
/// sorted in order of the original column index, with
/// data columns preceding parity columns.
/// - `missing`: Reconstructed data (not parity!) columns. The
/// number should be no more than the ones count of
/// `erasures`. Upon return, they will be populated
/// with the original data of the missing columns.
/// - `erasures`: Bitmap of the column indices of the missing columns.
pub unsafe fn decode(&self, len: usize, surviving: &[*const u8],
missing: &mut [*mut u8], erasures: &FixedBitSet) {
let k = self.m - self.f;
let errs = erasures.count_ones(..k as usize) as u32;
assert!(errs > 0, "Only a fool would reconstruct an undamaged array!");
assert_eq!(errs as usize, missing.len());
let dec_tables = self.mk_decode_tables(erasures);
isa_l::ec_encode_data(len, k, errs, &dec_tables, surviving, missing);
}
/// Generate parity columns from a complete set of data columns
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encode(&self, len: usize, data: &[*const u8],
parity: &mut [*mut u8])
{
let k = self.m - self.f;
isa_l::ec_encode_data(len, k, self.f, &self.enc_tables, data, parity);
}
/// Encode parity, using vectored input
///
/// Like `encode`, but with discontiguous the data columns.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each. They may be
/// discontiguous, and each may have a different structure.
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encodev(&self, len: usize, data: &[SGList],
parity: &mut [*mut u8])
{
let mut cursors : Vec<SGCursor> =
data.iter()
.map(SGCursor::from)
.collect();
let mut l = 0;
while l < len {
let ncl =
cursors.iter()
.map(SGCursor::peek_len)
.min().unwrap();
let (refs, _iovecs) : (Vec<_>, Vec<_>) =
cursors.iter_mut()
.map(|sg| {
let iovec = sg.next(ncl).unwrap();
(iovec.as_ptr(), iovec)
})
.unzip();
let mut prefs: Vec<*mut u8> = parity.iter_mut()
.map(|iov| unsafe{iov.add(l)})
.collect();
self.encode(ncl, &refs, &mut prefs);
l += ncl;
}
}
/// Update parity columns from a single data column.
///
/// This method can be used to progressively update a set of parity columns
/// by feeding in one data column at a time.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: a single column of `len` bytes
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be updated upon return.
/// - `data_idx`: Column index of the supplied data column. Must lie in
/// the range `[0, k)`.
///
/// # Safety
///
/// Caller must ensure that the `parity` field is of sufficient size and
/// points to allocated memory. It need not be initialized.
pub unsafe fn encode_update(&self, len: usize, data: &[u8],
parity: &mut [*mut u8], data_idx: u32)
{
let k = self.m - self.f;
isa_l::ec_encode_data_update(len, k, self.f, data_idx, &self.enc_tables,
data, parity);
}
// Generate tables for RAID decoding
// Loosely based on erasure_code_perf.c from ISA-L's internal test suite
// NB: For reasonably small values of m and f, it should be possible to
// cache all possible decode tables.
fn mk_decode_tables(&self, erasures: &FixedBitSet) -> Box<[u8]> {
let k : usize = (self.m - self.f) as usize;
// Exclude missing parity columns from the list
let errs : usize = erasures.count_ones(..k);
let mut dec_tables = vec![0u8; 32 * k * errs].into_boxed_slice();
// To generate the decoding matrix, first select k healthy rows from the
// encoding matrix.
let mut dec_matrix_inv = vec![0u8; k * k].into_boxed_slice();
let mut skips = 0;
for i in 0..k {
while erasures.contains(i + skips) {
skips += 1;
}
let row = i + skips;
for j in 0..k {
dec_matrix_inv[k * i + j] =
self.enc_matrix[k * row + j];
}
}
// Then invert the result
let mut dec_matrix = vec![0u8; k * k].into_boxed_slice();
isa_l::gf_invert_matrix(&dec_matrix_inv, &mut dec_matrix, k as u32)
.unwrap();
// Finally, select the rows corresponding to missing data
let mut dec_rows = vec![0u8; k * errs].into_boxed_slice();
for (i, r) in erasures.ones().enumerate() {
if r >= k {
break; // Exclude missing parity columns
}
for j in 0..k {
dec_rows[k * i + j] =
dec_matrix[k * r + j];
}
}
// Finally generate the fast encoding tables
isa_l::ec_init_tables(k as u32, errs as u32, &dec_rows, &mut dec_tables);
dec_tables
}
/// Return the degree of redundancy
pub fn protection(&self) -> i16 {
self.f as i16
}
/// Return the total number of disks in the raid stripe
pub fn stripesize(&self) -> i16 {
self.m as i16
}
}
// LCOV_EXCL_START
#[cfg(test)]
mod tests {
use divbuf::DivBufShared;
use fixedbitset::FixedBitSet;
use itertools::Itertools;
use pretty_assertions::assert_eq;
use rand::{self, Rng};
use std::ops::Deref;
use super::*;
// Roundtrip data through the codec for various array sizes and erasure sets
#[test]
pub fn comprehensive() {
let cfgs = [
(3, 1), (9, 1),
(4, 2), (10, 2),
(6, 3), (19, 3),
(8, 4), (20, 4)
];
let len |
/// Reconstruct missing data from partial surviving columns
///
/// Given a `Codec` with `m` total columns composed of `k` data columns and | random_line_split |
|
codec.rs | Exactly `k` columns of surviving data and parity,
/// sorted in order of the original column index, with
/// data columns preceding parity columns.
/// - `missing`: Reconstructed data (not parity!) columns. The
/// number should be no more than the ones count of
/// `erasures`. Upon return, they will be populated
/// with the original data of the missing columns.
/// - `erasures`: Bitmap of the column indices of the missing columns.
pub unsafe fn decode(&self, len: usize, surviving: &[*const u8],
missing: &mut [*mut u8], erasures: &FixedBitSet) {
let k = self.m - self.f;
let errs = erasures.count_ones(..k as usize) as u32;
assert!(errs > 0, "Only a fool would reconstruct an undamaged array!");
assert_eq!(errs as usize, missing.len());
let dec_tables = self.mk_decode_tables(erasures);
isa_l::ec_encode_data(len, k, errs, &dec_tables, surviving, missing);
}
/// Generate parity columns from a complete set of data columns
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encode(&self, len: usize, data: &[*const u8],
parity: &mut [*mut u8])
{
let k = self.m - self.f;
isa_l::ec_encode_data(len, k, self.f, &self.enc_tables, data, parity);
}
/// Encode parity, using vectored input
///
/// Like `encode`, but with discontiguous the data columns.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each. They may be
/// discontiguous, and each may have a different structure.
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encodev(&self, len: usize, data: &[SGList],
parity: &mut [*mut u8])
{
let mut cursors : Vec<SGCursor> =
data.iter()
.map(SGCursor::from)
.collect();
let mut l = 0;
while l < len {
let ncl =
cursors.iter()
.map(SGCursor::peek_len)
.min().unwrap();
let (refs, _iovecs) : (Vec<_>, Vec<_>) =
cursors.iter_mut()
.map(|sg| {
let iovec = sg.next(ncl).unwrap();
(iovec.as_ptr(), iovec)
})
.unzip();
let mut prefs: Vec<*mut u8> = parity.iter_mut()
.map(|iov| unsafe{iov.add(l)})
.collect();
self.encode(ncl, &refs, &mut prefs);
l += ncl;
}
}
/// Update parity columns from a single data column.
///
/// This method can be used to progressively update a set of parity columns
/// by feeding in one data column at a time.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: a single column of `len` bytes
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be updated upon return.
/// - `data_idx`: Column index of the supplied data column. Must lie in
/// the range `[0, k)`.
///
/// # Safety
///
/// Caller must ensure that the `parity` field is of sufficient size and
/// points to allocated memory. It need not be initialized.
pub unsafe fn encode_update(&self, len: usize, data: &[u8],
parity: &mut [*mut u8], data_idx: u32)
{
let k = self.m - self.f;
isa_l::ec_encode_data_update(len, k, self.f, data_idx, &self.enc_tables,
data, parity);
}
// Generate tables for RAID decoding
// Loosely based on erasure_code_perf.c from ISA-L's internal test suite
// NB: For reasonably small values of m and f, it should be possible to
// cache all possible decode tables.
fn mk_decode_tables(&self, erasures: &FixedBitSet) -> Box<[u8]> {
let k : usize = (self.m - self.f) as usize;
// Exclude missing parity columns from the list
let errs : usize = erasures.count_ones(..k);
let mut dec_tables = vec![0u8; 32 * k * errs].into_boxed_slice();
// To generate the decoding matrix, first select k healthy rows from the
// encoding matrix.
let mut dec_matrix_inv = vec![0u8; k * k].into_boxed_slice();
let mut skips = 0;
for i in 0..k {
while erasures.contains(i + skips) {
skips += 1;
}
let row = i + skips;
for j in 0..k {
dec_matrix_inv[k * i + j] =
self.enc_matrix[k * row + j];
}
}
// Then invert the result
let mut dec_matrix = vec![0u8; k * k].into_boxed_slice();
isa_l::gf_invert_matrix(&dec_matrix_inv, &mut dec_matrix, k as u32)
.unwrap();
// Finally, select the rows corresponding to missing data
let mut dec_rows = vec![0u8; k * errs].into_boxed_slice();
for (i, r) in erasures.ones().enumerate() {
if r >= k |
for j in 0..k {
dec_rows[k * i + j] =
dec_matrix[k * r + j];
}
}
// Finally generate the fast encoding tables
isa_l::ec_init_tables(k as u32, errs as u32, &dec_rows, &mut dec_tables);
dec_tables
}
/// Return the degree of redundancy
pub fn protection(&self) -> i16 {
self.f as i16
}
/// Return the total number of disks in the raid stripe
pub fn stripesize(&self) -> i16 {
self.m as i16
}
}
// LCOV_EXCL_START
#[cfg(test)]
mod tests {
use divbuf::DivBufShared;
use fixedbitset::FixedBitSet;
use itertools::Itertools;
use pretty_assertions::assert_eq;
use rand::{self, Rng};
use std::ops::Deref;
use super::*;
// Roundtrip data through the codec for various array sizes and erasure sets
#[test]
pub fn comprehensive() {
let cfgs = [
(3, 1), (9, 1),
(4, 2), (10, 2),
(6, 3), (19, 3),
(8, 4), (20, 4)
];
let len = 64;
let maxdata = 28;
let maxparity = 4;
let mut rng = rand::thread_rng();
let mut data = Vec::<Vec<u8>>::new();
let mut parity = Vec::<Vec<u8>>::new();
let mut reconstructed = Vec::<Vec<u8>>::new();
for _ in 0..maxdata {
let mut column = Vec::<u8>::with_capacity(len);
for _ in 0..len {
column.push(rng.gen());
}
data.push(column);
}
for _ in 0..maxparity {
let column = vec![0u8; len];
parity.push(column);
}
for _ in 0..(maxparity as usize) {
reconstructed.push(vec![0u8; len]);
}
for cfg in &cfgs {
let m = cfg.0;
let f = cfg.1;
let k = m - f;
let codec = Codec::new(m, f);
// First encode
let mut input = Vec::<*const u8>::with_capacity(m as usize);
for x in data.iter().take(k as usize) {
| {
break; // Exclude missing parity columns
} | conditional_block |
codec.rs | (&self, _len: usize, _data: &[*const u8],
_parity: &[*const u8]) -> FixedBitSet {
panic!("Unimplemented");
}
/// Reconstruct missing data from partial surviving columns
///
/// Given a `Codec` with `m` total columns composed of `k` data columns and
/// `f` parity columns, where one or more columns is missing, reconstruct
/// the data from the missing columns. Takes as a parameter exactly `k`
/// surviving columns, even if more than `k` columns survive. These *must*
/// be the lowest `k` surviving columns. For example, in a 5+3 array where
/// the columns 0 and 3 are missing, Provide columns 1, 2, 4, 5, and 6 (data
/// columns 1, 2, and 4 and parity columns 0 and 1).
///
/// This method cannot reconstruct missing parity columns. In order to
/// reconstruct missing parity columns, you must first use this method to
/// regenerate all data columns, *and then* use `encode` to recreate the
/// parity.
///
/// # Parameters
///
/// - `len`: Size of each column, in bytes
/// - `surviving`: Exactly `k` columns of surviving data and parity,
/// sorted in order of the original column index, with
/// data columns preceding parity columns.
/// - `missing`: Reconstructed data (not parity!) columns. The
/// number should be no more than the ones count of
/// `erasures`. Upon return, they will be populated
/// with the original data of the missing columns.
/// - `erasures`: Bitmap of the column indices of the missing columns.
pub unsafe fn decode(&self, len: usize, surviving: &[*const u8],
missing: &mut [*mut u8], erasures: &FixedBitSet) {
let k = self.m - self.f;
let errs = erasures.count_ones(..k as usize) as u32;
assert!(errs > 0, "Only a fool would reconstruct an undamaged array!");
assert_eq!(errs as usize, missing.len());
let dec_tables = self.mk_decode_tables(erasures);
isa_l::ec_encode_data(len, k, errs, &dec_tables, surviving, missing);
}
/// Generate parity columns from a complete set of data columns
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encode(&self, len: usize, data: &[*const u8],
parity: &mut [*mut u8])
{
let k = self.m - self.f;
isa_l::ec_encode_data(len, k, self.f, &self.enc_tables, data, parity);
}
/// Encode parity, using vectored input
///
/// Like `encode`, but with discontiguous the data columns.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each. They may be
/// discontiguous, and each may have a different structure.
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encodev(&self, len: usize, data: &[SGList],
parity: &mut [*mut u8])
{
let mut cursors : Vec<SGCursor> =
data.iter()
.map(SGCursor::from)
.collect();
let mut l = 0;
while l < len {
let ncl =
cursors.iter()
.map(SGCursor::peek_len)
.min().unwrap();
let (refs, _iovecs) : (Vec<_>, Vec<_>) =
cursors.iter_mut()
.map(|sg| {
let iovec = sg.next(ncl).unwrap();
(iovec.as_ptr(), iovec)
})
.unzip();
let mut prefs: Vec<*mut u8> = parity.iter_mut()
.map(|iov| unsafe{iov.add(l)})
.collect();
self.encode(ncl, &refs, &mut prefs);
l += ncl;
}
}
/// Update parity columns from a single data column.
///
/// This method can be used to progressively update a set of parity columns
/// by feeding in one data column at a time.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: a single column of `len` bytes
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be updated upon return.
/// - `data_idx`: Column index of the supplied data column. Must lie in
/// the range `[0, k)`.
///
/// # Safety
///
/// Caller must ensure that the `parity` field is of sufficient size and
/// points to allocated memory. It need not be initialized.
pub unsafe fn encode_update(&self, len: usize, data: &[u8],
parity: &mut [*mut u8], data_idx: u32)
{
let k = self.m - self.f;
isa_l::ec_encode_data_update(len, k, self.f, data_idx, &self.enc_tables,
data, parity);
}
// Generate tables for RAID decoding
// Loosely based on erasure_code_perf.c from ISA-L's internal test suite
// NB: For reasonably small values of m and f, it should be possible to
// cache all possible decode tables.
fn mk_decode_tables(&self, erasures: &FixedBitSet) -> Box<[u8]> {
let k : usize = (self.m - self.f) as usize;
// Exclude missing parity columns from the list
let errs : usize = erasures.count_ones(..k);
let mut dec_tables = vec![0u8; 32 * k * errs].into_boxed_slice();
// To generate the decoding matrix, first select k healthy rows from the
// encoding matrix.
let mut dec_matrix_inv = vec![0u8; k * k].into_boxed_slice();
let mut skips = 0;
for i in 0..k {
while erasures.contains(i + skips) {
skips += 1;
}
let row = i + skips;
for j in 0..k {
dec_matrix_inv[k * i + j] =
self.enc_matrix[k * row + j];
}
}
// Then invert the result
let mut dec_matrix = vec![0u8; k * k].into_boxed_slice();
isa_l::gf_invert_matrix(&dec_matrix_inv, &mut dec_matrix, k as u32)
.unwrap();
// Finally, select the rows corresponding to missing data
let mut dec_rows = vec![0u8; k * errs].into_boxed_slice();
for (i, r) in erasures.ones().enumerate() {
if r >= k {
break; // Exclude missing parity columns
}
for j in 0..k {
dec_rows[k * i + j] =
dec_matrix[k * r + j];
}
}
// Finally generate the fast encoding tables
isa_l::ec_init_tables(k as u32, errs as u32, &dec_rows, &mut dec_tables);
dec_tables
}
/// Return the degree of redundancy
pub fn protection(&self) -> i16 {
self.f as i16
}
/// Return the total number of disks in the raid stripe
pub fn stripesize(&self) -> i16 {
self.m as i16
}
}
// LCOV_EXCL_START
#[cfg(test)]
mod tests {
use divbuf::DivBufShared;
use fixedbitset::FixedBitSet;
use itertools::Itertools;
use pretty_assertions::assert_eq;
use rand::{self, Rng};
use std::ops::Deref;
use super::*;
// Roundtrip data through the codec for various array sizes and erasure sets
#[test]
pub fn comprehensive() {
let cfgs = [
(3, 1), (9, 1),
(4, 2 | check | identifier_name |
|
codec.rs | Exactly `k` columns of surviving data and parity,
/// sorted in order of the original column index, with
/// data columns preceding parity columns.
/// - `missing`: Reconstructed data (not parity!) columns. The
/// number should be no more than the ones count of
/// `erasures`. Upon return, they will be populated
/// with the original data of the missing columns.
/// - `erasures`: Bitmap of the column indices of the missing columns.
pub unsafe fn decode(&self, len: usize, surviving: &[*const u8],
missing: &mut [*mut u8], erasures: &FixedBitSet) {
let k = self.m - self.f;
let errs = erasures.count_ones(..k as usize) as u32;
assert!(errs > 0, "Only a fool would reconstruct an undamaged array!");
assert_eq!(errs as usize, missing.len());
let dec_tables = self.mk_decode_tables(erasures);
isa_l::ec_encode_data(len, k, errs, &dec_tables, surviving, missing);
}
/// Generate parity columns from a complete set of data columns
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encode(&self, len: usize, data: &[*const u8],
parity: &mut [*mut u8])
{
let k = self.m - self.f;
isa_l::ec_encode_data(len, k, self.f, &self.enc_tables, data, parity);
}
/// Encode parity, using vectored input
///
/// Like `encode`, but with discontiguous the data columns.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: `k` columns of `len` bytes each. They may be
/// discontiguous, and each may have a different structure.
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be populated upon return.
///
/// # Safety
///
/// Caller must ensure that the `data` and `parity` fields are of sufficient
/// size and point to allocated memory. `parity` need not be initialized.
pub unsafe fn encodev(&self, len: usize, data: &[SGList],
parity: &mut [*mut u8])
{
let mut cursors : Vec<SGCursor> =
data.iter()
.map(SGCursor::from)
.collect();
let mut l = 0;
while l < len {
let ncl =
cursors.iter()
.map(SGCursor::peek_len)
.min().unwrap();
let (refs, _iovecs) : (Vec<_>, Vec<_>) =
cursors.iter_mut()
.map(|sg| {
let iovec = sg.next(ncl).unwrap();
(iovec.as_ptr(), iovec)
})
.unzip();
let mut prefs: Vec<*mut u8> = parity.iter_mut()
.map(|iov| unsafe{iov.add(l)})
.collect();
self.encode(ncl, &refs, &mut prefs);
l += ncl;
}
}
/// Update parity columns from a single data column.
///
/// This method can be used to progressively update a set of parity columns
/// by feeding in one data column at a time.
///
/// # Parameters
/// - `len`: Size of each column, in bytes
/// - `data`: Input array: a single column of `len` bytes
/// - `parity`: Storage for parity columns. `f` columns of `len` bytes
/// each: will be updated upon return.
/// - `data_idx`: Column index of the supplied data column. Must lie in
/// the range `[0, k)`.
///
/// # Safety
///
/// Caller must ensure that the `parity` field is of sufficient size and
/// points to allocated memory. It need not be initialized.
pub unsafe fn encode_update(&self, len: usize, data: &[u8],
parity: &mut [*mut u8], data_idx: u32)
{
let k = self.m - self.f;
isa_l::ec_encode_data_update(len, k, self.f, data_idx, &self.enc_tables,
data, parity);
}
// Generate tables for RAID decoding
// Loosely based on erasure_code_perf.c from ISA-L's internal test suite
// NB: For reasonably small values of m and f, it should be possible to
// cache all possible decode tables.
fn mk_decode_tables(&self, erasures: &FixedBitSet) -> Box<[u8]> {
let k : usize = (self.m - self.f) as usize;
// Exclude missing parity columns from the list
let errs : usize = erasures.count_ones(..k);
let mut dec_tables = vec![0u8; 32 * k * errs].into_boxed_slice();
// To generate the decoding matrix, first select k healthy rows from the
// encoding matrix.
let mut dec_matrix_inv = vec![0u8; k * k].into_boxed_slice();
let mut skips = 0;
for i in 0..k {
while erasures.contains(i + skips) {
skips += 1;
}
let row = i + skips;
for j in 0..k {
dec_matrix_inv[k * i + j] =
self.enc_matrix[k * row + j];
}
}
// Then invert the result
let mut dec_matrix = vec![0u8; k * k].into_boxed_slice();
isa_l::gf_invert_matrix(&dec_matrix_inv, &mut dec_matrix, k as u32)
.unwrap();
// Finally, select the rows corresponding to missing data
let mut dec_rows = vec![0u8; k * errs].into_boxed_slice();
for (i, r) in erasures.ones().enumerate() {
if r >= k {
break; // Exclude missing parity columns
}
for j in 0..k {
dec_rows[k * i + j] =
dec_matrix[k * r + j];
}
}
// Finally generate the fast encoding tables
isa_l::ec_init_tables(k as u32, errs as u32, &dec_rows, &mut dec_tables);
dec_tables
}
/// Return the degree of redundancy
pub fn protection(&self) -> i16 {
self.f as i16
}
/// Return the total number of disks in the raid stripe
pub fn stripesize(&self) -> i16 {
self.m as i16
}
}
// LCOV_EXCL_START
#[cfg(test)]
mod tests {
use divbuf::DivBufShared;
use fixedbitset::FixedBitSet;
use itertools::Itertools;
use pretty_assertions::assert_eq;
use rand::{self, Rng};
use std::ops::Deref;
use super::*;
// Roundtrip data through the codec for various array sizes and erasure sets
#[test]
pub fn comprehensive() | data.push(column);
}
for _ in 0..maxparity {
let column = vec![0u8; len];
parity.push(column);
}
for _ in 0..(maxparity as usize) {
reconstructed.push(vec![0u8; len]);
}
for cfg in &cfgs {
let m = cfg.0;
let f = cfg.1;
let k = m - f;
let codec = Codec::new(m, f);
// First encode
let mut input = Vec::<*const u8>::with_capacity(m as usize);
for x in data.iter().take(k as usize) {
| {
let cfgs = [
(3, 1), (9, 1),
(4, 2), (10, 2),
(6, 3), (19, 3),
(8, 4), (20, 4)
];
let len = 64;
let maxdata = 28;
let maxparity = 4;
let mut rng = rand::thread_rng();
let mut data = Vec::<Vec<u8>>::new();
let mut parity = Vec::<Vec<u8>>::new();
let mut reconstructed = Vec::<Vec<u8>>::new();
for _ in 0..maxdata {
let mut column = Vec::<u8>::with_capacity(len);
for _ in 0..len {
column.push(rng.gen());
} | identifier_body |
treerunner.js | dirty = false;
fn = lQueue.shift();
while (fn) {
fn();
fn = lQueue.shift();
}
}
function nextTick (fn) {
queue.push(fn);
if (dirty) return;
dirty = true;
trigger();
}
if (hasPostMessage) {
trigger = function () { window.postMessage(messageName, '*'); };
processQueue = function (event) {
if (event.source == window && event.data === messageName) {
if (event.stopPropagation) {
event.stopPropagation();
}
else {
event.returnValue = false;
}
flushQueue();
}
};
nextTick.listener = addListener(window, 'message', processQueue, true);
}
else {
trigger = function () { window.setTimeout(function () { processQueue(); }, 0); };
processQueue = flushQueue;
}
nextTick.removeListener = function () {
removeListener(window, 'message', processQueue, true);
};
return nextTick;
}());
return {
/**
* Construct a Tree Runner Object
* A tree runner, given a node, is responsible for the download and execution
* of the root node and any children it encounters.
* @constructs TreeRunner
* @param {Object} env - The context to run in
* @param {TreeNode} root - a Tree Node at the root of this tree
*/
init: function(env, root) {
this.env = env;
this.root = root;
},
/**
* Downloads the tree, starting from this node, and spanning into its children
* @method TreeRunner#download
* @public
* @param {Function} downloadComplete - a callback executed when the download is done
*/
download: function(downloadComplete) {
var root = this.root,
self = this,
rootData = root.data,
rootParent = root.getParent(),
communicatorGet;
// given original id & parent resolved id, create resolved id
// given resolved id & parent resolved url, create resolved url
// build a communicator
// communicator download (async)
// -- on complete (file) | // -- in a next-tick, create a new TreeDownloader at the new child (async)
// -- -- on complete, decrement children count by 1
// -- -- when children count hits 0, call downloadComplete()
if (rootParent) {
rootData.resolvedId = this.env.rulesEngine.resolveModule(rootData.originalId, rootParent.data.resolvedId);
}
else {
rootData.resolvedId = this.env.rulesEngine.resolveModule(rootData.originalId, '');
}
rootData.resolvedUrl = this.env.rulesEngine.resolveFile(rootData.resolvedId);
// Build a communicator.
communicatorGet = this.buildCommunicator(root);
// Download the file via communicator, get back contents
communicatorGet(rootData.originalId, rootData.resolvedUrl, function(content) {
// build a flow control to adjust the contents based on rules
var pointcuts = self.env.rulesEngine.getContentRules(rootData.resolvedUrl),
contentFlow = new Flow(),
i = 0,
len = pointcuts.length;
addContent = function(fn) {
contentFlow.seq(function (next, error, contents) {
try {
fn(function(data) {
next(null, data);
}, contents);
}
catch(e) {
next(e, contents);
}
});
};
contentFlow.seq(function (next) {
next(null, content);
});
for (i; i < len; i++) {
addContent(pointcuts[i]);
}
contentFlow.seq(function (next, error, contents) {
var circular = false,
searchIndex = {},
parent = rootParent,
module,
qualifiedId;
if (typeof contents === 'string') {
rootData.file = contents;
}
else {
rootData.exports = contents;
}
// determine if this is circular
searchIndex[rootData.originalId] = true;
while(parent && !circular) {
if (searchIndex[parent.data.originalId]) {
circular = true;
}
else {
searchIndex[parent.data.originalId] = true;
parent = parent.getParent();
}
}
rootData.circular = circular;
// kick off its children
if (rootData.exports) {
// when there are exports available, then we prematurely resolve this module
// this can happen when the an external rule for the communicator has resolved
// the export object for us
module = self.env.executor.createModule(rootData.resolvedId, self.env.requireContext.qualifiedId(root), rootData.resolvedUrl);
module.exec = true;
module.exports = contents;
downloadComplete();
}
else if (rootData.circular) {
// circular nodes do not need to download their children (again)
downloadComplete();
}
else {
// Analyze the file for depenencies and kick off a child download for each one.
self.downloadDependencies(root, proxy(downloadComplete, self));
}
});
});
},
/**
* Executes a tree, starting from the root node
* In order to ensure a tree has all of its dependencies available
* a post-order traversal is used
* http://en.wikipedia.org/wiki/Tree_traversal#Post-order
* This loads Bottom-To-Top, Left-to-Right
* @method TreeRunner#execute
* @public
* @param {Function} executeComplete - a callback function ran when all execution is done
*/
execute: function(executeComplete) {
var nodes = this.root.postOrder(),
self = this,
len = nodes.length,
i = 0,
runNode = function(node) {
var nodeData = node.data,
module,
result;
if (!nodeData.resolvedId) {
return;
}
// executor: create a module
// if not circular, executor: run module (otherwise, the circular reference begins as empty exports)
module = self.env.executor.createModule(nodeData.resolvedId, self.env.requireContext.qualifiedId(node), nodeData.resolvedUrl);
nodeData.module = module;
if (module.exec) {
return;
}
if (!nodeData.circular) {
if (nodeData.exports) {
// exports came pre set
module.exports = nodeData.exports;
module.exec = true;
}
else if (typeof nodeData.file === 'string') {
self.env.executor.runModule(module, nodeData.file);
module.exec = true;
// if this is an AMD module, it's exports are coming from define()
if (!module.amd) {
nodeData.exports = module.exports;
}
}
}
};
for (i; i < len; i++) {
runNode(nodes[i]);
}
executeComplete();
},
/**
* Build a communcator function. If there are fetch rules, create a flow control
* to handle communication (as opposed to the internal communicator).
*
* @private
* @param {TreeNode} node The TreeNode you're building the communicator for.
* @return {Function} The built communicator method.
*/
buildCommunicator: function(node) {
var nodeData = node.data,
self = this,
parentData = node.getParent() ? node.getParent().data : null,
fetchRules = this.env.rulesEngine.getFetchRules(nodeData.resolvedId),
commFlow = new Flow(),
commFlowResolver = {
module: function() { return self.env.rulesEngine.resolveModule.apply(self.env.rulesEngine, arguments); },
url: function() { return self.env.rulesEngine.resolveFile.apply(self.env.rulesEngine, arguments); }
},
commFlowCommunicator = {
get: function() { return self.env.communicator.get.apply(self.env.communicator, arguments); }
},
addComm = function(fn) {
commFlow.seq(function(next, error, contents) {
function onData(err, data) {
next(null, data);
}
function onError(err) {
next(err, contents);
}
try {
fn(onData, contents, commFlowResolver, commFlowCommunicator, {
moduleId: nodeData.originalId,
parentId: (parentData) ? parentData.originalId : '',
parentUrl: (parentData) ? parentData.resolvedUrl : ''
});
}
catch(e) {
onError(e);
}
});
};
// for non-AMD modules, if the module is already resolved, return an empty string
// which will cause the communicator to exit early and apply content rules if required
// for AMD modules, we re-fetch every time due to the nature of dynamic modules
if (nodeData.resolvedId.indexOf('!') === -1) {
// is this module already available? If so, don't redownload. This happens often when
// there was an inline define() on the page
if (this.env.executor.getModule(nodeData.resolvedId)) {
return function(a, b, cb) {
cb('');
};
}
else if (this.env.executor.getModule(this.env | // -- transform the contents (rules)
// -- assign file to child
// -- extract requires
// -- for each child, create children, up the children count by 1 | random_line_split |
treerunner.js |
function nextTick (fn) {
queue.push(fn);
if (dirty) return;
dirty = true;
trigger();
}
if (hasPostMessage) {
trigger = function () { window.postMessage(messageName, '*'); };
processQueue = function (event) {
if (event.source == window && event.data === messageName) {
if (event.stopPropagation) {
event.stopPropagation();
}
else {
event.returnValue = false;
}
flushQueue();
}
};
nextTick.listener = addListener(window, 'message', processQueue, true);
}
else {
trigger = function () { window.setTimeout(function () { processQueue(); }, 0); };
processQueue = flushQueue;
}
nextTick.removeListener = function () {
removeListener(window, 'message', processQueue, true);
};
return nextTick;
}());
return {
/**
* Construct a Tree Runner Object
* A tree runner, given a node, is responsible for the download and execution
* of the root node and any children it encounters.
* @constructs TreeRunner
* @param {Object} env - The context to run in
* @param {TreeNode} root - a Tree Node at the root of this tree
*/
init: function(env, root) {
this.env = env;
this.root = root;
},
/**
* Downloads the tree, starting from this node, and spanning into its children
* @method TreeRunner#download
* @public
* @param {Function} downloadComplete - a callback executed when the download is done
*/
download: function(downloadComplete) {
var root = this.root,
self = this,
rootData = root.data,
rootParent = root.getParent(),
communicatorGet;
// given original id & parent resolved id, create resolved id
// given resolved id & parent resolved url, create resolved url
// build a communicator
// communicator download (async)
// -- on complete (file)
// -- transform the contents (rules)
// -- assign file to child
// -- extract requires
// -- for each child, create children, up the children count by 1
// -- in a next-tick, create a new TreeDownloader at the new child (async)
// -- -- on complete, decrement children count by 1
// -- -- when children count hits 0, call downloadComplete()
if (rootParent) {
rootData.resolvedId = this.env.rulesEngine.resolveModule(rootData.originalId, rootParent.data.resolvedId);
}
else {
rootData.resolvedId = this.env.rulesEngine.resolveModule(rootData.originalId, '');
}
rootData.resolvedUrl = this.env.rulesEngine.resolveFile(rootData.resolvedId);
// Build a communicator.
communicatorGet = this.buildCommunicator(root);
// Download the file via communicator, get back contents
communicatorGet(rootData.originalId, rootData.resolvedUrl, function(content) {
// build a flow control to adjust the contents based on rules
var pointcuts = self.env.rulesEngine.getContentRules(rootData.resolvedUrl),
contentFlow = new Flow(),
i = 0,
len = pointcuts.length;
addContent = function(fn) {
contentFlow.seq(function (next, error, contents) {
try {
fn(function(data) {
next(null, data);
}, contents);
}
catch(e) {
next(e, contents);
}
});
};
contentFlow.seq(function (next) {
next(null, content);
});
for (i; i < len; i++) {
addContent(pointcuts[i]);
}
contentFlow.seq(function (next, error, contents) {
var circular = false,
searchIndex = {},
parent = rootParent,
module,
qualifiedId;
if (typeof contents === 'string') {
rootData.file = contents;
}
else {
rootData.exports = contents;
}
// determine if this is circular
searchIndex[rootData.originalId] = true;
while(parent && !circular) {
if (searchIndex[parent.data.originalId]) {
circular = true;
}
else {
searchIndex[parent.data.originalId] = true;
parent = parent.getParent();
}
}
rootData.circular = circular;
// kick off its children
if (rootData.exports) {
// when there are exports available, then we prematurely resolve this module
// this can happen when the an external rule for the communicator has resolved
// the export object for us
module = self.env.executor.createModule(rootData.resolvedId, self.env.requireContext.qualifiedId(root), rootData.resolvedUrl);
module.exec = true;
module.exports = contents;
downloadComplete();
}
else if (rootData.circular) {
// circular nodes do not need to download their children (again)
downloadComplete();
}
else {
// Analyze the file for depenencies and kick off a child download for each one.
self.downloadDependencies(root, proxy(downloadComplete, self));
}
});
});
},
/**
* Executes a tree, starting from the root node
* In order to ensure a tree has all of its dependencies available
* a post-order traversal is used
* http://en.wikipedia.org/wiki/Tree_traversal#Post-order
* This loads Bottom-To-Top, Left-to-Right
* @method TreeRunner#execute
* @public
* @param {Function} executeComplete - a callback function ran when all execution is done
*/
execute: function(executeComplete) {
var nodes = this.root.postOrder(),
self = this,
len = nodes.length,
i = 0,
runNode = function(node) {
var nodeData = node.data,
module,
result;
if (!nodeData.resolvedId) {
return;
}
// executor: create a module
// if not circular, executor: run module (otherwise, the circular reference begins as empty exports)
module = self.env.executor.createModule(nodeData.resolvedId, self.env.requireContext.qualifiedId(node), nodeData.resolvedUrl);
nodeData.module = module;
if (module.exec) {
return;
}
if (!nodeData.circular) {
if (nodeData.exports) {
// exports came pre set
module.exports = nodeData.exports;
module.exec = true;
}
else if (typeof nodeData.file === 'string') {
self.env.executor.runModule(module, nodeData.file);
module.exec = true;
// if this is an AMD module, it's exports are coming from define()
if (!module.amd) {
nodeData.exports = module.exports;
}
}
}
};
for (i; i < len; i++) {
runNode(nodes[i]);
}
executeComplete();
},
/**
* Build a communcator function. If there are fetch rules, create a flow control
* to handle communication (as opposed to the internal communicator).
*
* @private
* @param {TreeNode} node The TreeNode you're building the communicator for.
* @return {Function} The built communicator method.
*/
buildCommunicator: function(node) {
var nodeData = node.data,
self = this,
parentData = node.getParent() ? node.getParent().data : null,
fetchRules = this.env.rulesEngine.getFetchRules(nodeData.resolvedId),
commFlow = new Flow(),
commFlowResolver = {
module: function() { return self.env.rulesEngine.resolveModule.apply(self.env.rulesEngine, arguments); },
url: function() { return self.env.rulesEngine.resolveFile.apply(self.env.rulesEngine, arguments); }
},
commFlowCommunicator = {
get: function() { return self.env.communicator.get.apply(self.env.communicator, arguments); }
},
addComm = function(fn) {
commFlow.seq(function(next, error, contents) {
function onData(err, data) {
next(null, data);
}
function onError(err) {
next(err, contents);
}
try {
fn(onData, contents, commFlowResolver, commFlowCommunicator, {
moduleId: nodeData.originalId,
parentId: (parentData) ? parentData.originalId : '',
parentUrl: (parentData) ? parentData.resolvedUrl : ''
});
}
catch(e) {
onError(e);
}
});
};
// for non-AMD modules, if the module is already resolved, return an empty string
// which will cause the communicator to exit early and apply content rules if required
// for AMD modules, we re-fetch every time due to the nature of dynamic modules
if (nodeData.resolvedId.indexOf('!') === -1) {
// is this module already available? If so, don't redownload. This happens often when
// there was an inline define() on the page
if (this.env.executor.getModule(nodeData.resolvedId)) {
return function(a, b, cb) {
cb('');
| {
var lQueue = queue;
queue = [];
dirty = false;
fn = lQueue.shift();
while (fn) {
fn();
fn = lQueue.shift();
}
} | identifier_body |
|
treerunner.js | dirty = false;
fn = lQueue.shift();
while (fn) {
fn();
fn = lQueue.shift();
}
}
function nextTick (fn) {
queue.push(fn);
if (dirty) return;
dirty = true;
trigger();
}
if (hasPostMessage) {
trigger = function () { window.postMessage(messageName, '*'); };
processQueue = function (event) {
if (event.source == window && event.data === messageName) {
if (event.stopPropagation) {
event.stopPropagation();
}
else {
event.returnValue = false;
}
flushQueue();
}
};
nextTick.listener = addListener(window, 'message', processQueue, true);
}
else {
trigger = function () { window.setTimeout(function () { processQueue(); }, 0); };
processQueue = flushQueue;
}
nextTick.removeListener = function () {
removeListener(window, 'message', processQueue, true);
};
return nextTick;
}());
return {
/**
* Construct a Tree Runner Object
* A tree runner, given a node, is responsible for the download and execution
* of the root node and any children it encounters.
* @constructs TreeRunner
* @param {Object} env - The context to run in
* @param {TreeNode} root - a Tree Node at the root of this tree
*/
init: function(env, root) {
this.env = env;
this.root = root;
},
/**
* Downloads the tree, starting from this node, and spanning into its children
* @method TreeRunner#download
* @public
* @param {Function} downloadComplete - a callback executed when the download is done
*/
download: function(downloadComplete) {
var root = this.root,
self = this,
rootData = root.data,
rootParent = root.getParent(),
communicatorGet;
// given original id & parent resolved id, create resolved id
// given resolved id & parent resolved url, create resolved url
// build a communicator
// communicator download (async)
// -- on complete (file)
// -- transform the contents (rules)
// -- assign file to child
// -- extract requires
// -- for each child, create children, up the children count by 1
// -- in a next-tick, create a new TreeDownloader at the new child (async)
// -- -- on complete, decrement children count by 1
// -- -- when children count hits 0, call downloadComplete()
if (rootParent) {
rootData.resolvedId = this.env.rulesEngine.resolveModule(rootData.originalId, rootParent.data.resolvedId);
}
else {
rootData.resolvedId = this.env.rulesEngine.resolveModule(rootData.originalId, '');
}
rootData.resolvedUrl = this.env.rulesEngine.resolveFile(rootData.resolvedId);
// Build a communicator.
communicatorGet = this.buildCommunicator(root);
// Download the file via communicator, get back contents
communicatorGet(rootData.originalId, rootData.resolvedUrl, function(content) {
// build a flow control to adjust the contents based on rules
var pointcuts = self.env.rulesEngine.getContentRules(rootData.resolvedUrl),
contentFlow = new Flow(),
i = 0,
len = pointcuts.length;
addContent = function(fn) {
contentFlow.seq(function (next, error, contents) {
try {
fn(function(data) {
next(null, data);
}, contents);
}
catch(e) {
next(e, contents);
}
});
};
contentFlow.seq(function (next) {
next(null, content);
});
for (i; i < len; i++) {
addContent(pointcuts[i]);
}
contentFlow.seq(function (next, error, contents) {
var circular = false,
searchIndex = {},
parent = rootParent,
module,
qualifiedId;
if (typeof contents === 'string') {
rootData.file = contents;
}
else {
rootData.exports = contents;
}
// determine if this is circular
searchIndex[rootData.originalId] = true;
while(parent && !circular) {
if (searchIndex[parent.data.originalId]) {
circular = true;
}
else {
searchIndex[parent.data.originalId] = true;
parent = parent.getParent();
}
}
rootData.circular = circular;
// kick off its children
if (rootData.exports) {
// when there are exports available, then we prematurely resolve this module
// this can happen when the an external rule for the communicator has resolved
// the export object for us
module = self.env.executor.createModule(rootData.resolvedId, self.env.requireContext.qualifiedId(root), rootData.resolvedUrl);
module.exec = true;
module.exports = contents;
downloadComplete();
}
else if (rootData.circular) {
// circular nodes do not need to download their children (again)
downloadComplete();
}
else {
// Analyze the file for depenencies and kick off a child download for each one.
self.downloadDependencies(root, proxy(downloadComplete, self));
}
});
});
},
/**
* Executes a tree, starting from the root node
* In order to ensure a tree has all of its dependencies available
* a post-order traversal is used
* http://en.wikipedia.org/wiki/Tree_traversal#Post-order
* This loads Bottom-To-Top, Left-to-Right
* @method TreeRunner#execute
* @public
* @param {Function} executeComplete - a callback function ran when all execution is done
*/
execute: function(executeComplete) {
var nodes = this.root.postOrder(),
self = this,
len = nodes.length,
i = 0,
runNode = function(node) {
var nodeData = node.data,
module,
result;
if (!nodeData.resolvedId) {
return;
}
// executor: create a module
// if not circular, executor: run module (otherwise, the circular reference begins as empty exports)
module = self.env.executor.createModule(nodeData.resolvedId, self.env.requireContext.qualifiedId(node), nodeData.resolvedUrl);
nodeData.module = module;
if (module.exec) {
return;
}
if (!nodeData.circular) {
if (nodeData.exports) {
// exports came pre set
module.exports = nodeData.exports;
module.exec = true;
}
else if (typeof nodeData.file === 'string') {
self.env.executor.runModule(module, nodeData.file);
module.exec = true;
// if this is an AMD module, it's exports are coming from define()
if (!module.amd) {
nodeData.exports = module.exports;
}
}
}
};
for (i; i < len; i++) {
runNode(nodes[i]);
}
executeComplete();
},
/**
* Build a communcator function. If there are fetch rules, create a flow control
* to handle communication (as opposed to the internal communicator).
*
* @private
* @param {TreeNode} node The TreeNode you're building the communicator for.
* @return {Function} The built communicator method.
*/
buildCommunicator: function(node) {
var nodeData = node.data,
self = this,
parentData = node.getParent() ? node.getParent().data : null,
fetchRules = this.env.rulesEngine.getFetchRules(nodeData.resolvedId),
commFlow = new Flow(),
commFlowResolver = {
module: function() { return self.env.rulesEngine.resolveModule.apply(self.env.rulesEngine, arguments); },
url: function() { return self.env.rulesEngine.resolveFile.apply(self.env.rulesEngine, arguments); }
},
commFlowCommunicator = {
get: function() { return self.env.communicator.get.apply(self.env.communicator, arguments); }
},
addComm = function(fn) {
commFlow.seq(function(next, error, contents) {
function onData(err, data) {
next(null, data);
}
function | (err) {
next(err, contents);
}
try {
fn(onData, contents, commFlowResolver, commFlowCommunicator, {
moduleId: nodeData.originalId,
parentId: (parentData) ? parentData.originalId : '',
parentUrl: (parentData) ? parentData.resolvedUrl : ''
});
}
catch(e) {
onError(e);
}
});
};
// for non-AMD modules, if the module is already resolved, return an empty string
// which will cause the communicator to exit early and apply content rules if required
// for AMD modules, we re-fetch every time due to the nature of dynamic modules
if (nodeData.resolvedId.indexOf('!') === -1) {
// is this module already available? If so, don't redownload. This happens often when
// there was an inline define() on the page
if (this.env.executor.getModule(nodeData.resolvedId)) {
return function(a, b, cb) {
cb('');
};
}
else if (this.env.executor.get | onError | identifier_name |
credit_pipe.py |
#converts a string that is camelCase into snake_case
#https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
def camel_case(column_name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', column_name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
#Give data with specific column
def histogram(data_frame):
sns.distplot(data_frame)
plt.show()
#Given specific column or row, returns statistical summary
def summary(data_frame):
return data_frame.describe()
#Creating a correlation heat map from data set where var_name is the
#variable which has the most correlation
def cor_heat(data_frame,var_name):
corrmat = data_frame.corr()
k = 12
cols = corrmat.nlargest(k, var_name)[var_name].index
cm = np.corrcoef(data_frame[cols].values.T)
sns.set(font_scale=1.25)
hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values)
plt.show()
#Scatter plots of desired variables in list
def plotCorr(dataFrame, list):
sns.set()
sns.pairplot(dataFrame[list], size = 2.5)
return plt.show()
#Shows data is missing, we should delete the corresponding variable and pretend it never existed - threshold as parameter
def miss_data(data_frame):
total = data_frame.isnull().sum().sort_values(ascending=False)
percent = (data_frame.isnull().sum()/data_frame.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
#return missing_data.head(20)
return missing_data
#Dealing with missing data
def clean_miss(data_frame):
missing_data = miss_data(data_frame)
data_frame = data_frame.drop((missing_data[missing_data['Total'] > 1]).index,1)
data_frame.isnull().sum().max() #just checking that there's no missing data missing...
return data_frame
#Univariate analysis - scaling data, prints out low range and high range
def scale(data_frame, var_scale):
data_scaled = StandardScaler().fit_transform(data_frame[var_scale][:,np.newaxis]);
low_range = data_scaled[data_scaled[:,0].argsort()][:10]
high_range= data_scaled[data_scaled[:,0].argsort()][-10:]
print('outer range (low) of the distribution:')
print(low_range)
print('\nouter range (high) of the distribution:')
print(high_range)
#Bivariate analysis
def bivariate(data_frame, var_1,var_2):
varx = var_1
vary = var_2
data = pd.concat([data_frame[varx], data_frame[vary]], axis=1)
data.plot.scatter(x=varx, y=vary, ylim=(0,100));
plt.show()
#histogram and normal probability plot
def norm_plot(data_frame,var_name):
sns.distplot(data_frame[var_name], fit=norm);
fig = plt.figure()
res = stats.probplot((data_frame)[var_name], plot=plt)
plt.show()
#Fill in empty values
def fill_empty(data_frame,var, new_var):
return data_frame[var].fillna(new_var)
#Discretize continuous variables
def descretize(data_frame, var, num):
return pd.cut(data_frame[var],num,retbins=True)
#Creating dummy variables from categorical variables
def dummy_var(data_frame, var):
return pd.get_dummies(data_frame[var])
#Creating dictionary with no repeated column items
def column_dic(data_frame):
dict = {line[:1]:line[1:].split()[0] for line in data_frame}
print (dict)
#Logistic regression = iv, independent variable, var_list - dependent variables
def logReg(data_frame, IV, var_list):
#organizing variable list to independent and dependent variables
#taking care of hyphen if first word contains it
if '-' in var_list[0]:
formula = IV + "~"+'Q("'+var_list[0]+'")'
else:
formula = IV + "~"+var_list[0]
#taking care of the rest of the potential hyphens
for i in range(1, len(var_list)):
if '-' in var_list[i]:
formula = formula + "+"+'Q("'+var_list[i]+'")'
else:
formula = formula + "+"+ var_list[i]
y, X = dmatrices(formula,data_frame, return_type="dataframe")
y = np.ravel(y)
model = LogisticRegression()
model = model.fit(X, y)
print (pd.DataFrame(list(zip(X.columns, np.transpose(model.coef_)))))
return model.score(X,y)
#Nearest Neighbors -
def knearest(data_frame,train, test):
#data_frame = data_frame.reshape(-1,1)
X = data_frame[train].reshape(-1,1)
Y = data_frame[test].reshape(-1,1)
X_train = X[:100]
Y_train = Y[:100]
X_validate = X[100:]
Y_validate = Y[100:]
neighbor = KNeighborsClassifier(n_neighbors = 2, weights ='uniform')
neighbor.fit(X_train, Y_train)
predicted = neighbor.predict(X_validate)
print (predicted)
def merging_data(dataframe_1,dataframe_2):
return pd.merge(dataframe_1,dataframe_2)
def merging_data2(dataframe_1,dataframe_2):
dataframe_1['fully_funded'] = 1
return dataframe_1
def get_combos(param_grid_dict):
all = sorted(param_grid_dict)
all_combos=[]
combinations = it.product(*(param_grid_dict[Name] for Name in all))
for i in combinations:
lil_combo = {}
for iter,key in enumerate(all):
lil_combo[key] = i[iter]
all_combos.append(lil_combo)
return (all_combos)
#change items into binary columns
def to_binary(df,array_col):
for i in array_col:
print(i)
#df[i] = df[i].apply(lambda x: 1 if x == 't' else (0 if x =='f' else np.nan))
df[i] = df[i].apply(lambda x: 1 if x == 't' else 0)
return df
#analyzing results from classifiers
def get_metrics(y_pred, val_Y):
metric_results ={}
#predicting the majority class
ones = np.sum(val_Y)['SeriousDlqin2yrs']/float(len(val_Y))
zeros = 1-ones
try:
metric_results['baseline'] = max(ones, zeros)
except:
pdb.set_trace()
if ones > zeros:
metric_results['precision_base'] = precision_score(val_Y, np.ones(len(val_Y)))
metric_results['recall_base'] = recall_score(val_Y,np.ones[len(val_Y)])
else:
metric_results['precision_base'] = precision_score(val_Y, np.zeros(len(val_Y)))
metric_results['recall_base'] = recall_score(val_Y,np.zeros(len(val_Y)))
#loss = f1_score(y_pred,val_Y)
perf_metrics = [.01,.02,.05,.10,.20,.30,.50]
for i in perf_metrics:
#pdb.set_trace()
print("Recall AT \n")
print(recall_at_k(val_Y, y_pred, i))
#metric_results["precision at" + str([i])] = precision_score(val_Y, y_pred > 1 - i)
metric_results["precision at" + str([i])] = precision_at_k(val_Y, y_pred, i)
metric_results["recall at" + str([i])] = recall_score(val_Y, y_pred> 1 - i)
metric_results["F1 at" + str([i])] = f1_score(val_Y, y_pred > 1 - i)
metric_results["ROC"] = roc_auc_score(val_Y, y_pred)
prec,rec,thresh = precision_recall_curve(val_Y, y_pred)
metric_results["PREC"] = prec.tolist()
metric_results["REC"] = rec.tolist()
metric_results["THRESH"] = thresh.tolist()
#out.write(metric_results)
return (metric_results)
def recall_at_k(y_true, y_scores, k):
#y_scores_sorted, y_true_sorted = zip(*sorted(zip(y_scores, y_true), reverse=True))
y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))
preds_at_k = generate_binary_at_k(y_scores_sorted, k)
#precision, _, _, _ = metrics.precision_recall_fscore_support(y_true, preds_at_k)
#precision = precision[1] # only interested in precision for label 1
recall = recall_score(y_true_sorted, preds_at_k)
return recall
def joint_sort_descending(l1, l2):
# l1 and l2 have to | return pd.read_csv(csv_file,nrows=nrows) | identifier_body |
|
credit_pipe.py | aling with missing data
def clean_miss(data_frame):
missing_data = miss_data(data_frame)
data_frame = data_frame.drop((missing_data[missing_data['Total'] > 1]).index,1)
data_frame.isnull().sum().max() #just checking that there's no missing data missing...
return data_frame
#Univariate analysis - scaling data, prints out low range and high range
def | (data_frame, var_scale):
data_scaled = StandardScaler().fit_transform(data_frame[var_scale][:,np.newaxis]);
low_range = data_scaled[data_scaled[:,0].argsort()][:10]
high_range= data_scaled[data_scaled[:,0].argsort()][-10:]
print('outer range (low) of the distribution:')
print(low_range)
print('\nouter range (high) of the distribution:')
print(high_range)
#Bivariate analysis
def bivariate(data_frame, var_1,var_2):
varx = var_1
vary = var_2
data = pd.concat([data_frame[varx], data_frame[vary]], axis=1)
data.plot.scatter(x=varx, y=vary, ylim=(0,100));
plt.show()
#histogram and normal probability plot
def norm_plot(data_frame,var_name):
sns.distplot(data_frame[var_name], fit=norm);
fig = plt.figure()
res = stats.probplot((data_frame)[var_name], plot=plt)
plt.show()
#Fill in empty values
def fill_empty(data_frame,var, new_var):
return data_frame[var].fillna(new_var)
#Discretize continuous variables
def descretize(data_frame, var, num):
return pd.cut(data_frame[var],num,retbins=True)
#Creating dummy variables from categorical variables
def dummy_var(data_frame, var):
return pd.get_dummies(data_frame[var])
#Creating dictionary with no repeated column items
def column_dic(data_frame):
dict = {line[:1]:line[1:].split()[0] for line in data_frame}
print (dict)
#Logistic regression = iv, independent variable, var_list - dependent variables
def logReg(data_frame, IV, var_list):
#organizing variable list to independent and dependent variables
#taking care of hyphen if first word contains it
if '-' in var_list[0]:
formula = IV + "~"+'Q("'+var_list[0]+'")'
else:
formula = IV + "~"+var_list[0]
#taking care of the rest of the potential hyphens
for i in range(1, len(var_list)):
if '-' in var_list[i]:
formula = formula + "+"+'Q("'+var_list[i]+'")'
else:
formula = formula + "+"+ var_list[i]
y, X = dmatrices(formula,data_frame, return_type="dataframe")
y = np.ravel(y)
model = LogisticRegression()
model = model.fit(X, y)
print (pd.DataFrame(list(zip(X.columns, np.transpose(model.coef_)))))
return model.score(X,y)
#Nearest Neighbors -
def knearest(data_frame,train, test):
#data_frame = data_frame.reshape(-1,1)
X = data_frame[train].reshape(-1,1)
Y = data_frame[test].reshape(-1,1)
X_train = X[:100]
Y_train = Y[:100]
X_validate = X[100:]
Y_validate = Y[100:]
neighbor = KNeighborsClassifier(n_neighbors = 2, weights ='uniform')
neighbor.fit(X_train, Y_train)
predicted = neighbor.predict(X_validate)
print (predicted)
def merging_data(dataframe_1,dataframe_2):
return pd.merge(dataframe_1,dataframe_2)
def merging_data2(dataframe_1,dataframe_2):
dataframe_1['fully_funded'] = 1
return dataframe_1
def get_combos(param_grid_dict):
all = sorted(param_grid_dict)
all_combos=[]
combinations = it.product(*(param_grid_dict[Name] for Name in all))
for i in combinations:
lil_combo = {}
for iter,key in enumerate(all):
lil_combo[key] = i[iter]
all_combos.append(lil_combo)
return (all_combos)
#change items into binary columns
def to_binary(df,array_col):
for i in array_col:
print(i)
#df[i] = df[i].apply(lambda x: 1 if x == 't' else (0 if x =='f' else np.nan))
df[i] = df[i].apply(lambda x: 1 if x == 't' else 0)
return df
#analyzing results from classifiers
def get_metrics(y_pred, val_Y):
metric_results ={}
#predicting the majority class
ones = np.sum(val_Y)['SeriousDlqin2yrs']/float(len(val_Y))
zeros = 1-ones
try:
metric_results['baseline'] = max(ones, zeros)
except:
pdb.set_trace()
if ones > zeros:
metric_results['precision_base'] = precision_score(val_Y, np.ones(len(val_Y)))
metric_results['recall_base'] = recall_score(val_Y,np.ones[len(val_Y)])
else:
metric_results['precision_base'] = precision_score(val_Y, np.zeros(len(val_Y)))
metric_results['recall_base'] = recall_score(val_Y,np.zeros(len(val_Y)))
#loss = f1_score(y_pred,val_Y)
perf_metrics = [.01,.02,.05,.10,.20,.30,.50]
for i in perf_metrics:
#pdb.set_trace()
print("Recall AT \n")
print(recall_at_k(val_Y, y_pred, i))
#metric_results["precision at" + str([i])] = precision_score(val_Y, y_pred > 1 - i)
metric_results["precision at" + str([i])] = precision_at_k(val_Y, y_pred, i)
metric_results["recall at" + str([i])] = recall_score(val_Y, y_pred> 1 - i)
metric_results["F1 at" + str([i])] = f1_score(val_Y, y_pred > 1 - i)
metric_results["ROC"] = roc_auc_score(val_Y, y_pred)
prec,rec,thresh = precision_recall_curve(val_Y, y_pred)
metric_results["PREC"] = prec.tolist()
metric_results["REC"] = rec.tolist()
metric_results["THRESH"] = thresh.tolist()
#out.write(metric_results)
return (metric_results)
def recall_at_k(y_true, y_scores, k):
#y_scores_sorted, y_true_sorted = zip(*sorted(zip(y_scores, y_true), reverse=True))
y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))
preds_at_k = generate_binary_at_k(y_scores_sorted, k)
#precision, _, _, _ = metrics.precision_recall_fscore_support(y_true, preds_at_k)
#precision = precision[1] # only interested in precision for label 1
recall = recall_score(y_true_sorted, preds_at_k)
return recall
def joint_sort_descending(l1, l2):
# l1 and l2 have to be numpy arrays
idx = np.argsort(l1)[::-1]
return l1[idx], l2[idx]
def generate_binary_at_k(y_scores, k):
cutoff_index = int(len(y_scores) * (k / 100.0))
predictions_binary = [1 if x < cutoff_index else 0 for x in range(len(y_scores))]
return predictions_binary
def precision_at_k(y_true, y_scores, k):
#y_scores_sorted, y_true_sorted = zip(*sorted(zip(y_scores, y_true), reverse=True))
y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))
preds_at_k = generate_binary_at_k(y_scores_sorted, k)
#precision, _, _, _ = metrics.precision_recall_fscore_support(y_true, preds_at_k)
#precision = precision[1] # only interested in precision for label 1
precision = precision_score(y_true_sorted, preds_at_k)
return precision
#plotting precisison and recal graphs, input one column for y_pred in class_comp method
def plot_precision_recall(val_Y,y_pred,model_name,output_type):
#pdb.set_trace()
prec,rec,thresh = precision_recall_curve(val_Y, y_pred)
prec = prec[:-1]
recall_curve = rec[:-1]
pct_above_per_thresh = []
number_scored = len(y_pred)
for value in thresh:
num_above_thresh = len(y_pred[y_pred>=value])
pct_above_thresh = num_above_thresh / float(len(y_pred))
if pct_above_thresh <= 1:
pct_above_per_thresh.append(pct_above_thresh)
else:
raise Exception
pct_above_per_thresh = np.array(pct_above_per_thresh)
plt.clf()
fig, ax1 = plt.subplots()
ax1.plot(pct_above_per_thresh, prec, 'b')
print("PLOTTING STUFF")
print(pct_above_per_thresh)
print(prec[:-1])
ax1.set_xlabel('percent of population')
ax1.set_ylabel('precision', color='b')
ax2 | scale | identifier_name |
credit_pipe.py | the rest of the potential hyphens
for i in range(1, len(var_list)):
if '-' in var_list[i]:
formula = formula + "+"+'Q("'+var_list[i]+'")'
else:
formula = formula + "+"+ var_list[i]
y, X = dmatrices(formula,data_frame, return_type="dataframe")
y = np.ravel(y)
model = LogisticRegression()
model = model.fit(X, y)
print (pd.DataFrame(list(zip(X.columns, np.transpose(model.coef_)))))
return model.score(X,y)
#Nearest Neighbors -
def knearest(data_frame,train, test):
#data_frame = data_frame.reshape(-1,1)
X = data_frame[train].reshape(-1,1)
Y = data_frame[test].reshape(-1,1)
X_train = X[:100]
Y_train = Y[:100]
X_validate = X[100:]
Y_validate = Y[100:]
neighbor = KNeighborsClassifier(n_neighbors = 2, weights ='uniform')
neighbor.fit(X_train, Y_train)
predicted = neighbor.predict(X_validate)
print (predicted)
def merging_data(dataframe_1,dataframe_2):
return pd.merge(dataframe_1,dataframe_2)
def merging_data2(dataframe_1,dataframe_2):
dataframe_1['fully_funded'] = 1
return dataframe_1
def get_combos(param_grid_dict):
all = sorted(param_grid_dict)
all_combos=[]
combinations = it.product(*(param_grid_dict[Name] for Name in all))
for i in combinations:
lil_combo = {}
for iter,key in enumerate(all):
lil_combo[key] = i[iter]
all_combos.append(lil_combo)
return (all_combos)
#change items into binary columns
def to_binary(df,array_col):
for i in array_col:
print(i)
#df[i] = df[i].apply(lambda x: 1 if x == 't' else (0 if x =='f' else np.nan))
df[i] = df[i].apply(lambda x: 1 if x == 't' else 0)
return df
#analyzing results from classifiers
def get_metrics(y_pred, val_Y):
metric_results ={}
#predicting the majority class
ones = np.sum(val_Y)['SeriousDlqin2yrs']/float(len(val_Y))
zeros = 1-ones
try:
metric_results['baseline'] = max(ones, zeros)
except:
pdb.set_trace()
if ones > zeros:
metric_results['precision_base'] = precision_score(val_Y, np.ones(len(val_Y)))
metric_results['recall_base'] = recall_score(val_Y,np.ones[len(val_Y)])
else:
metric_results['precision_base'] = precision_score(val_Y, np.zeros(len(val_Y)))
metric_results['recall_base'] = recall_score(val_Y,np.zeros(len(val_Y)))
#loss = f1_score(y_pred,val_Y)
perf_metrics = [.01,.02,.05,.10,.20,.30,.50]
for i in perf_metrics:
#pdb.set_trace()
print("Recall AT \n")
print(recall_at_k(val_Y, y_pred, i))
#metric_results["precision at" + str([i])] = precision_score(val_Y, y_pred > 1 - i)
metric_results["precision at" + str([i])] = precision_at_k(val_Y, y_pred, i)
metric_results["recall at" + str([i])] = recall_score(val_Y, y_pred> 1 - i)
metric_results["F1 at" + str([i])] = f1_score(val_Y, y_pred > 1 - i)
metric_results["ROC"] = roc_auc_score(val_Y, y_pred)
prec,rec,thresh = precision_recall_curve(val_Y, y_pred)
metric_results["PREC"] = prec.tolist()
metric_results["REC"] = rec.tolist()
metric_results["THRESH"] = thresh.tolist()
#out.write(metric_results)
return (metric_results)
def recall_at_k(y_true, y_scores, k):
#y_scores_sorted, y_true_sorted = zip(*sorted(zip(y_scores, y_true), reverse=True))
y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))
preds_at_k = generate_binary_at_k(y_scores_sorted, k)
#precision, _, _, _ = metrics.precision_recall_fscore_support(y_true, preds_at_k)
#precision = precision[1] # only interested in precision for label 1
recall = recall_score(y_true_sorted, preds_at_k)
return recall
def joint_sort_descending(l1, l2):
# l1 and l2 have to be numpy arrays
idx = np.argsort(l1)[::-1]
return l1[idx], l2[idx]
def generate_binary_at_k(y_scores, k):
cutoff_index = int(len(y_scores) * (k / 100.0))
predictions_binary = [1 if x < cutoff_index else 0 for x in range(len(y_scores))]
return predictions_binary
def precision_at_k(y_true, y_scores, k):
#y_scores_sorted, y_true_sorted = zip(*sorted(zip(y_scores, y_true), reverse=True))
y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))
preds_at_k = generate_binary_at_k(y_scores_sorted, k)
#precision, _, _, _ = metrics.precision_recall_fscore_support(y_true, preds_at_k)
#precision = precision[1] # only interested in precision for label 1
precision = precision_score(y_true_sorted, preds_at_k)
return precision
#plotting precisison and recal graphs, input one column for y_pred in class_comp method
def plot_precision_recall(val_Y,y_pred,model_name,output_type):
#pdb.set_trace()
prec,rec,thresh = precision_recall_curve(val_Y, y_pred)
prec = prec[:-1]
recall_curve = rec[:-1]
pct_above_per_thresh = []
number_scored = len(y_pred)
for value in thresh:
num_above_thresh = len(y_pred[y_pred>=value])
pct_above_thresh = num_above_thresh / float(len(y_pred))
if pct_above_thresh <= 1:
pct_above_per_thresh.append(pct_above_thresh)
else:
raise Exception
pct_above_per_thresh = np.array(pct_above_per_thresh)
plt.clf()
fig, ax1 = plt.subplots()
ax1.plot(pct_above_per_thresh, prec, 'b')
print("PLOTTING STUFF")
print(pct_above_per_thresh)
print(prec[:-1])
ax1.set_xlabel('percent of population')
ax1.set_ylabel('precision', color='b')
ax2 = ax1.twinx()
ax2.plot(pct_above_per_thresh, recall_curve, 'r')
ax2.set_ylabel('recall', color='r')
ax1.set_ylim([0,1])
ax2.set_xlim([0,1])
name = model_name
plt.title(name)
#pdb.set_trace()
if (output_type == 'save'):
plt.savefig(name)
elif (output_type == 'show'):
plt.show()
pdb.set_trace()
else:
plt.show()
pdb.set_trace()
def temp_val(data_frame,target,features):
models_params = {
RandomForestClassifier:{'n_estimators':[100] , 'criterion':['gini','entropy'], 'max_features':['sqrt','log2'] , 'max_depth':[5,10],'n_jobs':[4], 'min_samples_leaf':[10,50,100]},
LogisticRegression: {'C':[10**-1,10**-2,10**-3],'penalty':['l1','l2']},
KNeighborsClassifier:{'n_neighbors':[5,10,25,100], 'p':[1,2,3],'n_jobs':[2]},
DecisionTreeClassifier:{'max_depth': [5,10,15],'min_samples_leaf':[2,5,10]},
GradientBoostingClassifier:{'learning_rate':[.1,.01],'n_estimators':[100] ,'max_features':['sqrt','log2'] , 'max_depth':[1,2,3]},
BaggingClassifier:{'max_samples':[.1,.25,.65], 'n_jobs':[4]},
#SVC:{'kernel':['linear','rbf'],'gamma':[10,1,.1,.01], 'C':[10,1,.1,.01], 'probability':[True]}
}
# start time of our data
#start_time = '2002-09-13'
start_time_date = data_frame['date_posted'].min()
#last date of data including labels and outcomes that we have
#end_time = '2014-05-12'
end_time_date = data_frame['date_posted'].max()
#how far out do we want to predict (let's say in months for now)
prediction_windows = [1]
#how often is this prediction being made? every day? every month? once a year? | update_window = 12 | random_line_split |
|
credit_pipe.py | dataframe_1
def get_combos(param_grid_dict):
all = sorted(param_grid_dict)
all_combos=[]
combinations = it.product(*(param_grid_dict[Name] for Name in all))
for i in combinations:
lil_combo = {}
for iter,key in enumerate(all):
lil_combo[key] = i[iter]
all_combos.append(lil_combo)
return (all_combos)
#change items into binary columns
def to_binary(df,array_col):
for i in array_col:
print(i)
#df[i] = df[i].apply(lambda x: 1 if x == 't' else (0 if x =='f' else np.nan))
df[i] = df[i].apply(lambda x: 1 if x == 't' else 0)
return df
#analyzing results from classifiers
def get_metrics(y_pred, val_Y):
metric_results ={}
#predicting the majority class
ones = np.sum(val_Y)['SeriousDlqin2yrs']/float(len(val_Y))
zeros = 1-ones
try:
metric_results['baseline'] = max(ones, zeros)
except:
pdb.set_trace()
if ones > zeros:
metric_results['precision_base'] = precision_score(val_Y, np.ones(len(val_Y)))
metric_results['recall_base'] = recall_score(val_Y,np.ones[len(val_Y)])
else:
metric_results['precision_base'] = precision_score(val_Y, np.zeros(len(val_Y)))
metric_results['recall_base'] = recall_score(val_Y,np.zeros(len(val_Y)))
#loss = f1_score(y_pred,val_Y)
perf_metrics = [.01,.02,.05,.10,.20,.30,.50]
for i in perf_metrics:
#pdb.set_trace()
print("Recall AT \n")
print(recall_at_k(val_Y, y_pred, i))
#metric_results["precision at" + str([i])] = precision_score(val_Y, y_pred > 1 - i)
metric_results["precision at" + str([i])] = precision_at_k(val_Y, y_pred, i)
metric_results["recall at" + str([i])] = recall_score(val_Y, y_pred> 1 - i)
metric_results["F1 at" + str([i])] = f1_score(val_Y, y_pred > 1 - i)
metric_results["ROC"] = roc_auc_score(val_Y, y_pred)
prec,rec,thresh = precision_recall_curve(val_Y, y_pred)
metric_results["PREC"] = prec.tolist()
metric_results["REC"] = rec.tolist()
metric_results["THRESH"] = thresh.tolist()
#out.write(metric_results)
return (metric_results)
def recall_at_k(y_true, y_scores, k):
#y_scores_sorted, y_true_sorted = zip(*sorted(zip(y_scores, y_true), reverse=True))
y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))
preds_at_k = generate_binary_at_k(y_scores_sorted, k)
#precision, _, _, _ = metrics.precision_recall_fscore_support(y_true, preds_at_k)
#precision = precision[1] # only interested in precision for label 1
recall = recall_score(y_true_sorted, preds_at_k)
return recall
def joint_sort_descending(l1, l2):
# l1 and l2 have to be numpy arrays
idx = np.argsort(l1)[::-1]
return l1[idx], l2[idx]
def generate_binary_at_k(y_scores, k):
cutoff_index = int(len(y_scores) * (k / 100.0))
predictions_binary = [1 if x < cutoff_index else 0 for x in range(len(y_scores))]
return predictions_binary
def precision_at_k(y_true, y_scores, k):
#y_scores_sorted, y_true_sorted = zip(*sorted(zip(y_scores, y_true), reverse=True))
y_scores_sorted, y_true_sorted = joint_sort_descending(np.array(y_scores), np.array(y_true))
preds_at_k = generate_binary_at_k(y_scores_sorted, k)
#precision, _, _, _ = metrics.precision_recall_fscore_support(y_true, preds_at_k)
#precision = precision[1] # only interested in precision for label 1
precision = precision_score(y_true_sorted, preds_at_k)
return precision
#plotting precisison and recal graphs, input one column for y_pred in class_comp method
def plot_precision_recall(val_Y,y_pred,model_name,output_type):
#pdb.set_trace()
prec,rec,thresh = precision_recall_curve(val_Y, y_pred)
prec = prec[:-1]
recall_curve = rec[:-1]
pct_above_per_thresh = []
number_scored = len(y_pred)
for value in thresh:
num_above_thresh = len(y_pred[y_pred>=value])
pct_above_thresh = num_above_thresh / float(len(y_pred))
if pct_above_thresh <= 1:
pct_above_per_thresh.append(pct_above_thresh)
else:
raise Exception
pct_above_per_thresh = np.array(pct_above_per_thresh)
plt.clf()
fig, ax1 = plt.subplots()
ax1.plot(pct_above_per_thresh, prec, 'b')
print("PLOTTING STUFF")
print(pct_above_per_thresh)
print(prec[:-1])
ax1.set_xlabel('percent of population')
ax1.set_ylabel('precision', color='b')
ax2 = ax1.twinx()
ax2.plot(pct_above_per_thresh, recall_curve, 'r')
ax2.set_ylabel('recall', color='r')
ax1.set_ylim([0,1])
ax2.set_xlim([0,1])
name = model_name
plt.title(name)
#pdb.set_trace()
if (output_type == 'save'):
plt.savefig(name)
elif (output_type == 'show'):
plt.show()
pdb.set_trace()
else:
plt.show()
pdb.set_trace()
def temp_val(data_frame,target,features):
models_params = {
RandomForestClassifier:{'n_estimators':[100] , 'criterion':['gini','entropy'], 'max_features':['sqrt','log2'] , 'max_depth':[5,10],'n_jobs':[4], 'min_samples_leaf':[10,50,100]},
LogisticRegression: {'C':[10**-1,10**-2,10**-3],'penalty':['l1','l2']},
KNeighborsClassifier:{'n_neighbors':[5,10,25,100], 'p':[1,2,3],'n_jobs':[2]},
DecisionTreeClassifier:{'max_depth': [5,10,15],'min_samples_leaf':[2,5,10]},
GradientBoostingClassifier:{'learning_rate':[.1,.01],'n_estimators':[100] ,'max_features':['sqrt','log2'] , 'max_depth':[1,2,3]},
BaggingClassifier:{'max_samples':[.1,.25,.65], 'n_jobs':[4]},
#SVC:{'kernel':['linear','rbf'],'gamma':[10,1,.1,.01], 'C':[10,1,.1,.01], 'probability':[True]}
}
# start time of our data
#start_time = '2002-09-13'
start_time_date = data_frame['date_posted'].min()
#last date of data including labels and outcomes that we have
#end_time = '2014-05-12'
end_time_date = data_frame['date_posted'].max()
#how far out do we want to predict (let's say in months for now)
prediction_windows = [1]
#how often is this prediction being made? every day? every month? once a year?
update_window = 12
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
#start_time_date = datetime.strptime(start_time, '%Y-%m-%d')
#end_time_date = datetime.strptime(end_time, '%Y-%m-%d')
for prediction_window in prediction_windows:
print(start_time_date,end_time_date)
test_end_time = end_time_date
while (test_end_time >= start_time_date + 2 * relativedelta(months=+prediction_window)):
| test_start_time = test_end_time - relativedelta(months=+prediction_window)
train_end_time = test_start_time - relativedelta(days=+1) # minus 1 day
train_start_time = train_end_time - relativedelta(months=+prediction_window)
while (train_start_time >= start_time_date ):
#pdb.set_trace()
print (train_start_time,train_end_time,test_start_time,test_end_time, prediction_window)
train_start_time -= relativedelta(months=+prediction_window)
# call function to get data
train_set, test_set = extract_train_test_sets(train_start_time, train_end_time, test_start_time, test_end_time,data_frame)
#pdb.set_trace()
class_comp(train_set,test_set,target,features,models_params)
# fit on train data
# predict on test data
test_end_time -= relativedelta(months=+update_window) | conditional_block |
|
blockchain.go |
type Block struct {
Index int
Timestamp int64
Proof int
PreviousHash string
Difficulty string
}
// add a function to the blockchain struct to get the previous block
func (bc *Blockchain) GetPreviousBlock() Block {
return bc.Chain[len(bc.Chain) - 1]
}
// function to print block information, not sure if we'll need long term
func (bc *Blockchain) PrintBlockInfo(index int) {
block := bc.Chain[index]
fmt.Println("Index of the block is " + strconv.Itoa(block.Index))
fmt.Println("Timestamp of the block is " + time.Unix(block.Timestamp, 0).Format(time.UnixDate))
fmt.Println("Proof of the block is " + strconv.Itoa(block.Proof))
fmt.Println("Hash of the previous block is " + block.PreviousHash)
fmt.Println("Hash of the current block is " + bc.HashBlock(block))
fmt.Println("Difficulty of the block is " + block.Difficulty)
fmt.Println("\n\n")
}
// Increment hex value by one lexicographically. Used to adjust difficulty
func hexInc(hash []byte) []byte {
for i := 0; i < len(hash) -1; i++ {
val := hash[i]
if (val == 48) { // this value is a zero
continue
} else {
carry := true
var start int
if (val == 102) { // leave it alone if it's an f
start = i - 1
} else {
start = i
}
for j := start; j >= 0; j-- {
val2 := hash[j]
// a->f
if val2 > 96 {
val2 -= 96-9
} else {
val2 -= 48
}
if carry {
val2 +=1
carry = false
}
if val2 == 16 {
val2 = 0
carry = true
}
if val2 >= 10 {
hash[j] = val2+96-9
} else {
hash[j] = val2+48
}
}
break
}
}
return hash
}
// Decrement the hex value by one lexicographically. Used to adjust difficulty
func hexDec(hash []byte) []byte {
var r = make([]byte, len(hash))
carry := true
for i := 0; i < len(hash); i++ {
val := hash[i]
if (val == 48) |
// a->f
if val > 96 {
val -= 96-9
} else {
val -= 48
}
if carry {
val -=1
carry = false
}
if (val+1) == 0 {
val = 15
carry = true
}
if val >= 10 {
r[i] = val+96-9
} else {
r[i] = val+48
}
}
return r
}
// A function to adjust the difficulty based on the average time between
// the last 720 blocks with 120 outliers removed
func (bc *Blockchain) AdjustDifficulty() string {
// check average time between last 10 blocks
if (len(bc.Chain) <= BLOCK_ADJUSTMENT) {
return bc.Chain[0].Difficulty
} else {
var timestamps []int64
for i := len(bc.Chain) - 1; i > len(bc.Chain) - BLOCK_ADJUSTMENT; i-- {
if (i > 0) {
timestamps = append(timestamps, bc.Chain[i].Timestamp - bc.Chain[i-1].Timestamp)
}
}
// Take out the highest and lowest OUTLIER_NUM timestamps
for i := 0; i < NUM_OUTLIERS; i++ {
// identify the highest and lowest
var min int64 = 99999999
var max int64 = -1
var max_index int = -1
var min_index int = -1
for j:= 0; j < len(timestamps); j++ {
if timestamps[j] > max {
max = timestamps[j]
max_index = j
}
if timestamps[j] < min {
min = timestamps[j]
min_index = j
}
}
// delete the min and max
timestamps[min_index] = timestamps[len(timestamps) - 1] //move min to last element
timestamps[max_index] = timestamps[len(timestamps) - 2] //move max to second to last element
timestamps = timestamps[:len(timestamps) - 2] //truncate
}
// calculate the average after taking out the outliers
var running_total int64 = 0
for j := 0; j < len(timestamps); j++ {
running_total = running_total + timestamps[j]
}
average := running_total / int64(len(timestamps))
b := []byte(bc.Chain[len(bc.Chain) - 1].Difficulty)
// either increase or decrease the difficulty based on the average
if (average > BLOCK_TIME) {
return string(hexInc(b))
} else {
return string(hexDec(b))
}
}
}
// add a function to the blockchain struct to add a new block
func (bc *Blockchain) AddBlock() {
newBlock := new(Block)
newBlock.Proof, newBlock.Timestamp = bc.ProofOfWork()
//newBlock.Timestamp = time.Now().Unix()
newBlock.Index = len(bc.Chain)
newBlock.PreviousHash = bc.HashBlock(bc.Chain[len(bc.Chain) - 1])
newBlock.Difficulty = bc.AdjustDifficulty()
bc.BlockMutex.Lock()
bc.Chain = append(bc.Chain, *newBlock)
bc.BlockMutex.Unlock()
}
// add a function to the blockchain struct to create a hash
func (bc *Blockchain) HashBlock(block Block) string {
var hash = sha256.New()
hash.Write([]byte(strconv.Itoa(block.Index) +
time.Unix(block.Timestamp, 0).Format(time.UnixDate) +
strconv.Itoa(block.Proof) +
block.PreviousHash +
block.Difficulty))
hashed := hash.Sum(nil)
return hex.EncodeToString(hashed)
}
// a function to perform proof of work calculation and return a hash string
func (bc *Blockchain) ProofOfWorkCalc(proof int, previous_proof int, Timestamp int64) string {
// calculate the proof of work function
var hash_PoW = sha256.New()
result := (proof * proof) - (previous_proof * previous_proof) - int(Timestamp)
hash_PoW.Write([]byte(strconv.Itoa(result)))
hashed_PoW := hash_PoW.Sum(nil)
result_hash := hex.EncodeToString(hashed_PoW)
return result_hash
}
// The core mining function, tries random numbers until finding a golden hash
func (bc *Blockchain) ProofOfWork() (int, int64) {
rand.Seed(time.Now().UnixNano())
var r int
var Timestamp int64
r = rand.Intn(2147483647)
for true {
Timestamp = time.Now().Unix()
previous_proof := bc.Chain[len(bc.Chain) - 1].Proof
result_hash := bc.ProofOfWorkCalc(r, previous_proof, Timestamp)
if strings.Compare(result_hash, bc.Chain[len(bc.Chain) - 1].Difficulty) < 1 {
break
}
r++
}
return r, Timestamp
}
// A function to use channels to send the blockchain height to the node package
func (bc *Blockchain) SendHeight() {
for true {
i := <-bc.HeightChannel
if i == 0 {
bc.HeightChannel <-len(bc.Chain)
}
}
}
// A function to use channels to send a block to the node package
func (bc *Blockchain) SendBlocks() {
for true {
i := <-bc.BlockIndexChannel
if i < len(bc.Chain) && i >= 0 {
bc.GetBlockChannel <-bc.Chain[i]
} else {
// make an "error" block
respBlock := Block {
Index: -1,
}
bc.GetBlockChannel <-respBlock
}
}
}
// A function to receive a new block from the node package
func (bc *Blockchain) AddRemoteBlocks() {
for true {
// listen for a block from the node goroutine
newBlock := <-bc.AddBlockChannel
bc.BlockMutex.Lock()
bc.Chain = append(bc.Chain, newBlock)
bc.BlockMutex.Unlock()
fmt.Println("Another miner found block " + strconv.Itoa(len(bc.Chain)))
if !bc.ValidateChain() {
// the new block is invalid, delete it
bc.BlockMutex.Lock()
bc.Chain = bc.Chain[:len(bc.Chain) - 1]
bc.BlockMutex.Unlock()
// let the node package know that the block was rejected
bc.BlockValidateChannel <- false
} else {
bc.BlockValidateChannel <- true
}
}
}
//add function to validate blockchain
func (bc *Blockchain) | {
r[i] = val
continue
} | conditional_block |
blockchain.go |
type Block struct {
Index int
Timestamp int64
Proof int
PreviousHash string
Difficulty string
}
// add a function to the blockchain struct to get the previous block
func (bc *Blockchain) GetPreviousBlock() Block {
return bc.Chain[len(bc.Chain) - 1]
}
// function to print block information, not sure if we'll need long term
func (bc *Blockchain) PrintBlockInfo(index int) {
block := bc.Chain[index]
fmt.Println("Index of the block is " + strconv.Itoa(block.Index))
fmt.Println("Timestamp of the block is " + time.Unix(block.Timestamp, 0).Format(time.UnixDate))
fmt.Println("Proof of the block is " + strconv.Itoa(block.Proof))
fmt.Println("Hash of the previous block is " + block.PreviousHash)
fmt.Println("Hash of the current block is " + bc.HashBlock(block))
fmt.Println("Difficulty of the block is " + block.Difficulty)
fmt.Println("\n\n")
}
// Increment hex value by one lexicographically. Used to adjust difficulty
func hexInc(hash []byte) []byte {
for i := 0; i < len(hash) -1; i++ {
val := hash[i]
if (val == 48) { // this value is a zero
continue
} else {
carry := true
var start int
if (val == 102) { // leave it alone if it's an f
start = i - 1
} else {
start = i
}
for j := start; j >= 0; j-- {
val2 := hash[j]
// a->f
if val2 > 96 {
val2 -= 96-9
} else {
val2 -= 48
}
if carry {
val2 +=1
carry = false
}
if val2 == 16 {
val2 = 0
carry = true
}
if val2 >= 10 {
hash[j] = val2+96-9
} else {
hash[j] = val2+48
}
}
break
}
}
return hash
}
// Decrement the hex value by one lexicographically. Used to adjust difficulty
func hexDec(hash []byte) []byte {
var r = make([]byte, len(hash))
carry := true
for i := 0; i < len(hash); i++ {
val := hash[i]
if (val == 48) {
r[i] = val
continue
}
// a->f
if val > 96 {
val -= 96-9
} else {
val -= 48
}
if carry {
val -=1
carry = false
}
if (val+1) == 0 {
val = 15
carry = true
}
if val >= 10 {
r[i] = val+96-9
} else {
r[i] = val+48
}
}
return r
}
// A function to adjust the difficulty based on the average time between
// the last 720 blocks with 120 outliers removed
func (bc *Blockchain) AdjustDifficulty() string {
// check average time between last 10 blocks
if (len(bc.Chain) <= BLOCK_ADJUSTMENT) {
return bc.Chain[0].Difficulty
} else {
var timestamps []int64
for i := len(bc.Chain) - 1; i > len(bc.Chain) - BLOCK_ADJUSTMENT; i-- {
if (i > 0) {
timestamps = append(timestamps, bc.Chain[i].Timestamp - bc.Chain[i-1].Timestamp)
}
}
// Take out the highest and lowest OUTLIER_NUM timestamps
for i := 0; i < NUM_OUTLIERS; i++ {
// identify the highest and lowest
var min int64 = 99999999
var max int64 = -1
var max_index int = -1
var min_index int = -1
for j:= 0; j < len(timestamps); j++ {
if timestamps[j] > max {
max = timestamps[j]
max_index = j
}
if timestamps[j] < min {
min = timestamps[j]
min_index = j
}
}
// delete the min and max
timestamps[min_index] = timestamps[len(timestamps) - 1] //move min to last element
timestamps[max_index] = timestamps[len(timestamps) - 2] //move max to second to last element
timestamps = timestamps[:len(timestamps) - 2] //truncate
}
// calculate the average after taking out the outliers
var running_total int64 = 0
for j := 0; j < len(timestamps); j++ {
running_total = running_total + timestamps[j]
}
average := running_total / int64(len(timestamps))
b := []byte(bc.Chain[len(bc.Chain) - 1].Difficulty)
// either increase or decrease the difficulty based on the average
if (average > BLOCK_TIME) {
return string(hexInc(b))
} else {
return string(hexDec(b))
}
}
}
// add a function to the blockchain struct to add a new block
func (bc *Blockchain) AddBlock() |
// add a function to the blockchain struct to create a hash
func (bc *Blockchain) HashBlock(block Block) string {
var hash = sha256.New()
hash.Write([]byte(strconv.Itoa(block.Index) +
time.Unix(block.Timestamp, 0).Format(time.UnixDate) +
strconv.Itoa(block.Proof) +
block.PreviousHash +
block.Difficulty))
hashed := hash.Sum(nil)
return hex.EncodeToString(hashed)
}
// a function to perform proof of work calculation and return a hash string
func (bc *Blockchain) ProofOfWorkCalc(proof int, previous_proof int, Timestamp int64) string {
// calculate the proof of work function
var hash_PoW = sha256.New()
result := (proof * proof) - (previous_proof * previous_proof) - int(Timestamp)
hash_PoW.Write([]byte(strconv.Itoa(result)))
hashed_PoW := hash_PoW.Sum(nil)
result_hash := hex.EncodeToString(hashed_PoW)
return result_hash
}
// The core mining function, tries random numbers until finding a golden hash
func (bc *Blockchain) ProofOfWork() (int, int64) {
rand.Seed(time.Now().UnixNano())
var r int
var Timestamp int64
r = rand.Intn(2147483647)
for true {
Timestamp = time.Now().Unix()
previous_proof := bc.Chain[len(bc.Chain) - 1].Proof
result_hash := bc.ProofOfWorkCalc(r, previous_proof, Timestamp)
if strings.Compare(result_hash, bc.Chain[len(bc.Chain) - 1].Difficulty) < 1 {
break
}
r++
}
return r, Timestamp
}
// A function to use channels to send the blockchain height to the node package
func (bc *Blockchain) SendHeight() {
for true {
i := <-bc.HeightChannel
if i == 0 {
bc.HeightChannel <-len(bc.Chain)
}
}
}
// A function to use channels to send a block to the node package
func (bc *Blockchain) SendBlocks() {
for true {
i := <-bc.BlockIndexChannel
if i < len(bc.Chain) && i >= 0 {
bc.GetBlockChannel <-bc.Chain[i]
} else {
// make an "error" block
respBlock := Block {
Index: -1,
}
bc.GetBlockChannel <-respBlock
}
}
}
// A function to receive a new block from the node package
func (bc *Blockchain) AddRemoteBlocks() {
for true {
// listen for a block from the node goroutine
newBlock := <-bc.AddBlockChannel
bc.BlockMutex.Lock()
bc.Chain = append(bc.Chain, newBlock)
bc.BlockMutex.Unlock()
fmt.Println("Another miner found block " + strconv.Itoa(len(bc.Chain)))
if !bc.ValidateChain() {
// the new block is invalid, delete it
bc.BlockMutex.Lock()
bc.Chain = bc.Chain[:len(bc.Chain) - 1]
bc.BlockMutex.Unlock()
// let the node package know that the block was rejected
bc.BlockValidateChannel <- false
} else {
bc.BlockValidateChannel <- true
}
}
}
//add function to validate blockchain
func (bc *Blockchain) | {
newBlock := new(Block)
newBlock.Proof, newBlock.Timestamp = bc.ProofOfWork()
//newBlock.Timestamp = time.Now().Unix()
newBlock.Index = len(bc.Chain)
newBlock.PreviousHash = bc.HashBlock(bc.Chain[len(bc.Chain) - 1])
newBlock.Difficulty = bc.AdjustDifficulty()
bc.BlockMutex.Lock()
bc.Chain = append(bc.Chain, *newBlock)
bc.BlockMutex.Unlock()
} | identifier_body |
blockchain.go |
type Block struct {
Index int
Timestamp int64
Proof int
PreviousHash string
Difficulty string
}
// add a function to the blockchain struct to get the previous block
func (bc *Blockchain) GetPreviousBlock() Block {
return bc.Chain[len(bc.Chain) - 1]
}
// function to print block information, not sure if we'll need long term
func (bc *Blockchain) PrintBlockInfo(index int) {
block := bc.Chain[index]
fmt.Println("Index of the block is " + strconv.Itoa(block.Index))
fmt.Println("Timestamp of the block is " + time.Unix(block.Timestamp, 0).Format(time.UnixDate))
fmt.Println("Proof of the block is " + strconv.Itoa(block.Proof))
fmt.Println("Hash of the previous block is " + block.PreviousHash)
fmt.Println("Hash of the current block is " + bc.HashBlock(block))
fmt.Println("Difficulty of the block is " + block.Difficulty)
fmt.Println("\n\n")
}
// Increment hex value by one lexicographically. Used to adjust difficulty
func hexInc(hash []byte) []byte {
for i := 0; i < len(hash) -1; i++ {
val := hash[i]
if (val == 48) { // this value is a zero
continue
} else {
carry := true
var start int
if (val == 102) { // leave it alone if it's an f
start = i - 1
} else {
start = i
}
for j := start; j >= 0; j-- {
val2 := hash[j]
// a->f
if val2 > 96 {
val2 -= 96-9
} else {
val2 -= 48
}
if carry {
val2 +=1
carry = false
}
if val2 == 16 {
val2 = 0
carry = true
}
if val2 >= 10 {
hash[j] = val2+96-9
} else {
hash[j] = val2+48
}
}
break
}
}
return hash
}
// Decrement the hex value by one lexicographically. Used to adjust difficulty
func hexDec(hash []byte) []byte {
var r = make([]byte, len(hash))
carry := true
for i := 0; i < len(hash); i++ {
val := hash[i]
if (val == 48) {
r[i] = val
continue
}
// a->f
if val > 96 {
val -= 96-9
} else {
val -= 48
}
if carry {
val -=1
carry = false
}
if (val+1) == 0 {
val = 15
carry = true
}
if val >= 10 {
r[i] = val+96-9
} else {
r[i] = val+48
}
}
return r
}
// A function to adjust the difficulty based on the average time between
// the last 720 blocks with 120 outliers removed
func (bc *Blockchain) AdjustDifficulty() string {
// check average time between last 10 blocks
if (len(bc.Chain) <= BLOCK_ADJUSTMENT) {
return bc.Chain[0].Difficulty
} else {
var timestamps []int64
for i := len(bc.Chain) - 1; i > len(bc.Chain) - BLOCK_ADJUSTMENT; i-- {
if (i > 0) {
timestamps = append(timestamps, bc.Chain[i].Timestamp - bc.Chain[i-1].Timestamp)
}
}
// Take out the highest and lowest OUTLIER_NUM timestamps
for i := 0; i < NUM_OUTLIERS; i++ {
// identify the highest and lowest
var min int64 = 99999999
var max int64 = -1
var max_index int = -1
var min_index int = -1
for j:= 0; j < len(timestamps); j++ {
if timestamps[j] > max {
max = timestamps[j]
max_index = j
}
if timestamps[j] < min {
min = timestamps[j]
min_index = j
}
}
// delete the min and max
timestamps[min_index] = timestamps[len(timestamps) - 1] //move min to last element
timestamps[max_index] = timestamps[len(timestamps) - 2] //move max to second to last element
timestamps = timestamps[:len(timestamps) - 2] //truncate
}
// calculate the average after taking out the outliers
var running_total int64 = 0
for j := 0; j < len(timestamps); j++ {
running_total = running_total + timestamps[j]
}
average := running_total / int64(len(timestamps))
b := []byte(bc.Chain[len(bc.Chain) - 1].Difficulty)
// either increase or decrease the difficulty based on the average
if (average > BLOCK_TIME) {
return string(hexInc(b))
} else {
return string(hexDec(b))
}
}
}
// add a function to the blockchain struct to add a new block
func (bc *Blockchain) | () {
newBlock := new(Block)
newBlock.Proof, newBlock.Timestamp = bc.ProofOfWork()
//newBlock.Timestamp = time.Now().Unix()
newBlock.Index = len(bc.Chain)
newBlock.PreviousHash = bc.HashBlock(bc.Chain[len(bc.Chain) - 1])
newBlock.Difficulty = bc.AdjustDifficulty()
bc.BlockMutex.Lock()
bc.Chain = append(bc.Chain, *newBlock)
bc.BlockMutex.Unlock()
}
// add a function to the blockchain struct to create a hash
func (bc *Blockchain) HashBlock(block Block) string {
var hash = sha256.New()
hash.Write([]byte(strconv.Itoa(block.Index) +
time.Unix(block.Timestamp, 0).Format(time.UnixDate) +
strconv.Itoa(block.Proof) +
block.PreviousHash +
block.Difficulty))
hashed := hash.Sum(nil)
return hex.EncodeToString(hashed)
}
// a function to perform proof of work calculation and return a hash string
func (bc *Blockchain) ProofOfWorkCalc(proof int, previous_proof int, Timestamp int64) string {
// calculate the proof of work function
var hash_PoW = sha256.New()
result := (proof * proof) - (previous_proof * previous_proof) - int(Timestamp)
hash_PoW.Write([]byte(strconv.Itoa(result)))
hashed_PoW := hash_PoW.Sum(nil)
result_hash := hex.EncodeToString(hashed_PoW)
return result_hash
}
// The core mining function, tries random numbers until finding a golden hash
func (bc *Blockchain) ProofOfWork() (int, int64) {
rand.Seed(time.Now().UnixNano())
var r int
var Timestamp int64
r = rand.Intn(2147483647)
for true {
Timestamp = time.Now().Unix()
previous_proof := bc.Chain[len(bc.Chain) - 1].Proof
result_hash := bc.ProofOfWorkCalc(r, previous_proof, Timestamp)
if strings.Compare(result_hash, bc.Chain[len(bc.Chain) - 1].Difficulty) < 1 {
break
}
r++
}
return r, Timestamp
}
// A function to use channels to send the blockchain height to the node package
func (bc *Blockchain) SendHeight() {
for true {
i := <-bc.HeightChannel
if i == 0 {
bc.HeightChannel <-len(bc.Chain)
}
}
}
// A function to use channels to send a block to the node package
func (bc *Blockchain) SendBlocks() {
for true {
i := <-bc.BlockIndexChannel
if i < len(bc.Chain) && i >= 0 {
bc.GetBlockChannel <-bc.Chain[i]
} else {
// make an "error" block
respBlock := Block {
Index: -1,
}
bc.GetBlockChannel <-respBlock
}
}
}
// A function to receive a new block from the node package
func (bc *Blockchain) AddRemoteBlocks() {
for true {
// listen for a block from the node goroutine
newBlock := <-bc.AddBlockChannel
bc.BlockMutex.Lock()
bc.Chain = append(bc.Chain, newBlock)
bc.BlockMutex.Unlock()
fmt.Println("Another miner found block " + strconv.Itoa(len(bc.Chain)))
if !bc.ValidateChain() {
// the new block is invalid, delete it
bc.BlockMutex.Lock()
bc.Chain = bc.Chain[:len(bc.Chain) - 1]
bc.BlockMutex.Unlock()
// let the node package know that the block was rejected
bc.BlockValidateChannel <- false
} else {
bc.BlockValidateChannel <- true
}
}
}
//add function to validate blockchain
func (bc *Blockchain) Validate | AddBlock | identifier_name |
blockchain.go | fmt.Println("Proof of the block is " + strconv.Itoa(block.Proof))
fmt.Println("Hash of the previous block is " + block.PreviousHash)
fmt.Println("Hash of the current block is " + bc.HashBlock(block))
fmt.Println("Difficulty of the block is " + block.Difficulty)
fmt.Println("\n\n")
}
// Increment hex value by one lexicographically. Used to adjust difficulty
func hexInc(hash []byte) []byte {
for i := 0; i < len(hash) -1; i++ {
val := hash[i]
if (val == 48) { // this value is a zero
continue
} else {
carry := true
var start int
if (val == 102) { // leave it alone if it's an f
start = i - 1
} else {
start = i
}
for j := start; j >= 0; j-- {
val2 := hash[j]
// a->f
if val2 > 96 {
val2 -= 96-9
} else {
val2 -= 48
}
if carry {
val2 +=1
carry = false
}
if val2 == 16 {
val2 = 0
carry = true
}
if val2 >= 10 {
hash[j] = val2+96-9
} else {
hash[j] = val2+48
}
}
break
}
}
return hash
}
// Decrement the hex value by one lexicographically. Used to adjust difficulty
func hexDec(hash []byte) []byte {
var r = make([]byte, len(hash))
carry := true
for i := 0; i < len(hash); i++ {
val := hash[i]
if (val == 48) {
r[i] = val
continue
}
// a->f
if val > 96 {
val -= 96-9
} else {
val -= 48
}
if carry {
val -=1
carry = false
}
if (val+1) == 0 {
val = 15
carry = true
}
if val >= 10 {
r[i] = val+96-9
} else {
r[i] = val+48
}
}
return r
}
// A function to adjust the difficulty based on the average time between
// the last 720 blocks with 120 outliers removed
func (bc *Blockchain) AdjustDifficulty() string {
// check average time between last 10 blocks
if (len(bc.Chain) <= BLOCK_ADJUSTMENT) {
return bc.Chain[0].Difficulty
} else {
var timestamps []int64
for i := len(bc.Chain) - 1; i > len(bc.Chain) - BLOCK_ADJUSTMENT; i-- {
if (i > 0) {
timestamps = append(timestamps, bc.Chain[i].Timestamp - bc.Chain[i-1].Timestamp)
}
}
// Take out the highest and lowest OUTLIER_NUM timestamps
for i := 0; i < NUM_OUTLIERS; i++ {
// identify the highest and lowest
var min int64 = 99999999
var max int64 = -1
var max_index int = -1
var min_index int = -1
for j:= 0; j < len(timestamps); j++ {
if timestamps[j] > max {
max = timestamps[j]
max_index = j
}
if timestamps[j] < min {
min = timestamps[j]
min_index = j
}
}
// delete the min and max
timestamps[min_index] = timestamps[len(timestamps) - 1] //move min to last element
timestamps[max_index] = timestamps[len(timestamps) - 2] //move max to second to last element
timestamps = timestamps[:len(timestamps) - 2] //truncate
}
// calculate the average after taking out the outliers
var running_total int64 = 0
for j := 0; j < len(timestamps); j++ {
running_total = running_total + timestamps[j]
}
average := running_total / int64(len(timestamps))
b := []byte(bc.Chain[len(bc.Chain) - 1].Difficulty)
// either increase or decrease the difficulty based on the average
if (average > BLOCK_TIME) {
return string(hexInc(b))
} else {
return string(hexDec(b))
}
}
}
// add a function to the blockchain struct to add a new block
func (bc *Blockchain) AddBlock() {
newBlock := new(Block)
newBlock.Proof, newBlock.Timestamp = bc.ProofOfWork()
//newBlock.Timestamp = time.Now().Unix()
newBlock.Index = len(bc.Chain)
newBlock.PreviousHash = bc.HashBlock(bc.Chain[len(bc.Chain) - 1])
newBlock.Difficulty = bc.AdjustDifficulty()
bc.BlockMutex.Lock()
bc.Chain = append(bc.Chain, *newBlock)
bc.BlockMutex.Unlock()
}
// add a function to the blockchain struct to create a hash
func (bc *Blockchain) HashBlock(block Block) string {
var hash = sha256.New()
hash.Write([]byte(strconv.Itoa(block.Index) +
time.Unix(block.Timestamp, 0).Format(time.UnixDate) +
strconv.Itoa(block.Proof) +
block.PreviousHash +
block.Difficulty))
hashed := hash.Sum(nil)
return hex.EncodeToString(hashed)
}
// a function to perform proof of work calculation and return a hash string
func (bc *Blockchain) ProofOfWorkCalc(proof int, previous_proof int, Timestamp int64) string {
// calculate the proof of work function
var hash_PoW = sha256.New()
result := (proof * proof) - (previous_proof * previous_proof) - int(Timestamp)
hash_PoW.Write([]byte(strconv.Itoa(result)))
hashed_PoW := hash_PoW.Sum(nil)
result_hash := hex.EncodeToString(hashed_PoW)
return result_hash
}
// The core mining function, tries random numbers until finding a golden hash
func (bc *Blockchain) ProofOfWork() (int, int64) {
rand.Seed(time.Now().UnixNano())
var r int
var Timestamp int64
r = rand.Intn(2147483647)
for true {
Timestamp = time.Now().Unix()
previous_proof := bc.Chain[len(bc.Chain) - 1].Proof
result_hash := bc.ProofOfWorkCalc(r, previous_proof, Timestamp)
if strings.Compare(result_hash, bc.Chain[len(bc.Chain) - 1].Difficulty) < 1 {
break
}
r++
}
return r, Timestamp
}
// A function to use channels to send the blockchain height to the node package
func (bc *Blockchain) SendHeight() {
for true {
i := <-bc.HeightChannel
if i == 0 {
bc.HeightChannel <-len(bc.Chain)
}
}
}
// A function to use channels to send a block to the node package
func (bc *Blockchain) SendBlocks() {
for true {
i := <-bc.BlockIndexChannel
if i < len(bc.Chain) && i >= 0 {
bc.GetBlockChannel <-bc.Chain[i]
} else {
// make an "error" block
respBlock := Block {
Index: -1,
}
bc.GetBlockChannel <-respBlock
}
}
}
// A function to receive a new block from the node package
func (bc *Blockchain) AddRemoteBlocks() {
for true {
// listen for a block from the node goroutine
newBlock := <-bc.AddBlockChannel
bc.BlockMutex.Lock()
bc.Chain = append(bc.Chain, newBlock)
bc.BlockMutex.Unlock()
fmt.Println("Another miner found block " + strconv.Itoa(len(bc.Chain)))
if !bc.ValidateChain() {
// the new block is invalid, delete it
bc.BlockMutex.Lock()
bc.Chain = bc.Chain[:len(bc.Chain) - 1]
bc.BlockMutex.Unlock()
// let the node package know that the block was rejected
bc.BlockValidateChannel <- false
} else {
bc.BlockValidateChannel <- true
}
}
}
//add function to validate blockchain
func (bc *Blockchain) ValidateChain() bool {
for i := 1; i <= len(bc.Chain); i++ {
//current block
block := bc.Chain[len(bc.Chain) - 1]
//previous block
prev_block := bc.Chain[len(bc.Chain) - 2]
proof_hash := bc.ProofOfWorkCalc(block.Proof, prev_block.Proof, block.Timestamp)
//verify index
if block.Index != prev_block.Index + 1 {
fmt.Println("the new block had the wrong index")
fmt.Println(block) | return false
}
//verify time stamp
if block.Timestamp < prev_block.Timestamp {
fmt.Println("the new block had a bad timestamp") | random_line_split |
|
daal_pca_test.py | .521547782, -0.418681224],
[0.316721742, 0.288319245, 0.499514144, 0.267566455,
-0.0338341451, -0.134086469, -0.184724393, -0.246523528,
0.593753078, -0.169969303],
[0.315335647, -0.258529064, 0.374780341, -0.169762381,
0.416093803, -0.118232778, 0.445019707, -0.395962728,
-0.337229123, -0.0937071881],
[0.314899154, -0.0294147958, -0.447870311, -0.258339192,
0.0794841625, -0.71141762, 0.110951688, 0.102784186,
0.292018251, 0.109836478],
[0.315542865, -0.236497774, -0.289051199, -0.452795684,
-0.12175352, 0.5265342, -0.0312645934, -0.180142504,
0.318334436, -0.359303747],
[0.315875856, 0.72196434, -0.239088332, -0.0259999274,
-0.0579153559, 0.244335633, 0.232808362, -0.233600306,
-0.181191102, 0.3413174]]
def setUp(self):
super(DaalPrincipalComponent, self).setUp()
schema = [("X1", int),
("X2", int),
("X3", int),
("X4", int),
("X5", int),
("X6", int),
("X7", int),
("X8", int),
("X9", int),
("X10", int)]
training_data = self.get_file("pcadata.csv")
self.frame = self.context.frame.import_csv(training_data, schema=schema)
@unittest.skip("daaltk: pca_model has no singular vectors")
def test_daal_principal_components_train_mean(self):
"""Test the train functionality with mean centering"""
pca_train_out = self.context.daaltk.models.dimreduction.pca.train(self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
True, 10)
# actual right-singular vectors
actual_R_singular_vec = pca_train_out.right_singular_vectors
# actual singular values
actual_singular_val = pca_train_out.singular_values
for c in self.frame.column_names:
mean = self.frame.column_summary_statistics(c)["mean"]
self.frame.add_columns(
lambda x: x[c] - mean, (c+"_n", float))
pcamodelmean = self.context.daaltk.models.dimreduction.pca.train(
self.frame,
["X1_n", "X2_n", "X3_n", "X4_n", "X5_n",
"X6_n", "X7_n", "X8_n", "X9_n", "X10_n"],
False, 10)
# actual right-singular vectors
actual_R_singular_vec_mean = pca_train_out.right_singular_vectors
# actual singular values
actual_singular_val_mean = pca_train_out.singular_values
expected_actual = zip(actual_singular_val, actual_singular_val_mean)
for expected, actual in expected_actual:
self.assertAlmostEqual(expected, actual, 8)
expected_actual = zip(
actual_R_singular_vec, actual_R_singular_vec_mean)
for expected, actual in expected_actual:
for f1, f2 in zip(expected, actual):
self.assertAlmostEqual(f1, f2, 4)
@unittest.skip("daaltk: pca_model has no singular vectors")
def test_daal_pca_predict(self):
"""Test the train functionality"""
pca_train_out = self.context.daaltk.models.dimreduction.pca.train(self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
False, 10)
pca_train_out.predict(self.frame, False)
pd_frame = self.frame.to_pandas(self.frame.count())
actual_R_singular_vec = map(
list, zip(*pca_train_out.right_singular_vectors))
for _, i in pd_frame.iterrows():
vec1 = i[0:10]
vec2 = i[10:]
dot_product = [sum([(r1)*(r2) for r1, r2 in zip(vec1, k)])
for k in actual_R_singular_vec]
for i, j in zip(vec2, dot_product):
self.assertAlmostEqual(i, j)
@unittest.skip("daaltk: pca_model has no singular vectors")
def test_daal_pca_train(self):
"""Test the train functionality"""
pca_train_out = self.context.daaltk.models.dimreduction.pca.train(self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
False, 10)
# actual right-singular vectors
actual_R_singular_vec = pca_train_out.right_singular_vectors
# actual singular values
actual_singular_val = pca_train_out.singular_values
expected_actual = zip(self.expected_singular_val, actual_singular_val)
for expected, actual in expected_actual:
self.assertAlmostEqual(expected, actual, 8)
expected_actual = zip(actual_R_singular_vec,
self.expected_R_singular_vec)
for expected, actual in expected_actual:
for f1, f2 in zip(expected, actual):
self.assertAlmostEqual(abs(f1), abs(f2), 4)
def test_daal_pca_publish(self):
"""Test the publish functionality"""
pcamodel = self.context.daaltk.models.dimreduction.pca.train(
self.frame,
["X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8", "X9", "X10"],
False, 10)
path = self.get_export_file(self.get_name("daaltk_pca"))
pcamodel.export_to_mar(path)
self.assertIn("hdfs", path)
@unittest.skip("daaltk: pca_model has no singular vectors")
def test_daal_pca_default(self):
"""Test default no. of k"""
pca_train_out = self.context.daaltk.models.dimreduction.pca.train(
self.frame,
["X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8", "X9", "X10"],
False)
# actual right-singular vectors
actual_R_singular_vec = pca_train_out.right_singular_vectors
# actual singular values
actual_singular_val = pca_train_out.singular_values
for ind in xrange(0, len(actual_singular_val)):
self.assertAlmostEqual(round(actual_singular_val[ind], 8),
self.expected_singular_val[ind])
for ind in xrange(0, len(actual_R_singular_vec)):
for ind2 in xrange(0, len(actual_R_singular_vec[ind])):
self.assertEqual(
abs(round(actual_R_singular_vec[ind][ind | 2], 6)),
abs(round(self.expected_R_singular_vec[ind][ind2], 6)))
def test_daal_pca_bad_no_of_k(self):
"""Test invalid k value in | conditional_block |
|
daal_pca_test.py | 762, 0.110951688, 0.102784186,
0.292018251, 0.109836478],
[0.315542865, -0.236497774, -0.289051199, -0.452795684,
-0.12175352, 0.5265342, -0.0312645934, -0.180142504,
0.318334436, -0.359303747],
[0.315875856, 0.72196434, -0.239088332, -0.0259999274,
-0.0579153559, 0.244335633, 0.232808362, -0.233600306,
-0.181191102, 0.3413174]]
def setUp(self):
super(DaalPrincipalComponent, self).setUp()
schema = [("X1", int),
("X2", int),
("X3", int),
("X4", int),
("X5", int),
("X6", int),
("X7", int),
("X8", int),
("X9", int),
("X10", int)]
training_data = self.get_file("pcadata.csv")
self.frame = self.context.frame.import_csv(training_data, schema=schema)
@unittest.skip("daaltk: pca_model has no singular vectors")
def test_daal_principal_components_train_mean(self):
"""Test the train functionality with mean centering"""
pca_train_out = self.context.daaltk.models.dimreduction.pca.train(self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
True, 10)
# actual right-singular vectors
actual_R_singular_vec = pca_train_out.right_singular_vectors
# actual singular values
actual_singular_val = pca_train_out.singular_values
for c in self.frame.column_names:
mean = self.frame.column_summary_statistics(c)["mean"]
self.frame.add_columns(
lambda x: x[c] - mean, (c+"_n", float))
pcamodelmean = self.context.daaltk.models.dimreduction.pca.train(
self.frame,
["X1_n", "X2_n", "X3_n", "X4_n", "X5_n",
"X6_n", "X7_n", "X8_n", "X9_n", "X10_n"],
False, 10)
# actual right-singular vectors
actual_R_singular_vec_mean = pca_train_out.right_singular_vectors
# actual singular values
actual_singular_val_mean = pca_train_out.singular_values
expected_actual = zip(actual_singular_val, actual_singular_val_mean)
for expected, actual in expected_actual:
self.assertAlmostEqual(expected, actual, 8)
expected_actual = zip(
actual_R_singular_vec, actual_R_singular_vec_mean)
for expected, actual in expected_actual:
for f1, f2 in zip(expected, actual):
self.assertAlmostEqual(f1, f2, 4)
@unittest.skip("daaltk: pca_model has no singular vectors")
def test_daal_pca_predict(self):
"""Test the train functionality"""
pca_train_out = self.context.daaltk.models.dimreduction.pca.train(self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
False, 10)
pca_train_out.predict(self.frame, False)
pd_frame = self.frame.to_pandas(self.frame.count())
actual_R_singular_vec = map(
list, zip(*pca_train_out.right_singular_vectors))
for _, i in pd_frame.iterrows():
vec1 = i[0:10]
vec2 = i[10:]
dot_product = [sum([(r1)*(r2) for r1, r2 in zip(vec1, k)])
for k in actual_R_singular_vec]
for i, j in zip(vec2, dot_product):
self.assertAlmostEqual(i, j)
@unittest.skip("daaltk: pca_model has no singular vectors")
def test_daal_pca_train(self):
"""Test the train functionality"""
pca_train_out = self.context.daaltk.models.dimreduction.pca.train(self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
False, 10)
# actual right-singular vectors
actual_R_singular_vec = pca_train_out.right_singular_vectors
# actual singular values
actual_singular_val = pca_train_out.singular_values
expected_actual = zip(self.expected_singular_val, actual_singular_val)
for expected, actual in expected_actual:
self.assertAlmostEqual(expected, actual, 8)
expected_actual = zip(actual_R_singular_vec,
self.expected_R_singular_vec)
for expected, actual in expected_actual:
for f1, f2 in zip(expected, actual):
self.assertAlmostEqual(abs(f1), abs(f2), 4)
def test_daal_pca_publish(self):
"""Test the publish functionality"""
pcamodel = self.context.daaltk.models.dimreduction.pca.train(
self.frame,
["X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8", "X9", "X10"],
False, 10)
path = self.get_export_file(self.get_name("daaltk_pca"))
pcamodel.export_to_mar(path)
self.assertIn("hdfs", path)
@unittest.skip("daaltk: pca_model has no singular vectors")
def test_daal_pca_default(self):
"""Test default no. of k"""
pca_train_out = self.context.daaltk.models.dimreduction.pca.train(
self.frame,
["X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8", "X9", "X10"],
False)
# actual right-singular vectors
actual_R_singular_vec = pca_train_out.right_singular_vectors
# actual singular values
actual_singular_val = pca_train_out.singular_values
for ind in xrange(0, len(actual_singular_val)):
self.assertAlmostEqual(round(actual_singular_val[ind], 8),
self.expected_singular_val[ind])
for ind in xrange(0, len(actual_R_singular_vec)):
for ind2 in xrange(0, len(actual_R_singular_vec[ind])):
self.assertEqual(
abs(round(actual_R_singular_vec[ind][ind2], 6)),
abs(round(self.expected_R_singular_vec[ind][ind2], 6)))
def test_daal_pca_bad_no_of_k(self):
"""Test invalid k value in train"""
with self.assertRaisesRegexp(Exception, "k must be less than or equal to number of observation columns"):
self.context.daaltk.models.dimreduction.pca.train(self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
k=11)
def test_daal_pca_invalid_k(self):
"""Test k < 1 in train"""
with self.assertRaisesRegexp(Exception, "k must be smaller than the number of observation columns"):
self.context.daaltk.models.dimreduction.pca.train(self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
k=0)
def test_daal_pca_bad_column_name(self):
"""Test bad feature column name"""
with self.assertRaisesRegexp(Exception, "column ERR was not found"):
self.context.daaltk.models.dimreduction.pca.train(self.frame,
["ERR", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
k=10)
def test_daal_pca_bad_column_type(self):
"""Test bad feature column name type"""
with self.assertRaisesRegexp(E | xception, "columns must be a list of strings"):
self.context.daaltk.models.dimreduction.pca.train(self.frame, 10, k=10)
if __name__ == '__main__':
unittest.main()
| identifier_body |
|
daal_pca_test.py | 9]
# expected right-singular vectors V
expected_R_singular_vec = \
[[0.315533916, -0.3942771, 0.258362247, -0.0738539198,
-0.460673735, 0.0643077298, -0.0837131184, 0.0257963888,
0.00376728499, 0.669876972],
[0.316500921, -0.165508013, -0.131017612, 0.581988787,
-0.0863507191, 0.160473134, 0.53134635, 0.41199152,
0.0823770991, -0.156517367],
[0.316777341, 0.244415549, 0.332413311, -0.377379981,
0.149653873, 0.0606339992, -0.163748261, 0.699502817,
-0.171189721, -0.124509149],
[0.318988109, -0.171520719, -0.250278714, 0.335635209,
0.580901954, 0.160427725, -0.531610364, -0.0304943121,
-0.0785743304, 0.201591811],
[0.3160833, 0.000386702461, -0.108022985, 0.167086405,
-0.470855879, -0.256296677, -0.318727111, -0.155621638,
-0.521547782, -0.418681224],
[0.316721742, 0.288319245, 0.499514144, 0.267566455,
-0.0338341451, -0.134086469, -0.184724393, -0.246523528,
0.593753078, -0.169969303],
[0.315335647, -0.258529064, 0.374780341, -0.169762381,
0.416093803, -0.118232778, 0.445019707, -0.395962728,
-0.337229123, -0.0937071881],
[0.314899154, -0.0294147958, -0.447870311, -0.258339192,
0.0794841625, -0.71141762, 0.110951688, 0.102784186,
0.292018251, 0.109836478],
[0.315542865, -0.236497774, -0.289051199, -0.452795684,
-0.12175352, 0.5265342, -0.0312645934, -0.180142504,
0.318334436, -0.359303747],
[0.315875856, 0.72196434, -0.239088332, -0.0259999274,
-0.0579153559, 0.244335633, 0.232808362, -0.233600306,
-0.181191102, 0.3413174]]
def setUp(self):
super(DaalPrincipalComponent, self).setUp()
schema = [("X1", int),
("X2", int),
("X3", int),
("X4", int),
("X5", int),
("X6", int),
("X7", int),
("X8", int),
("X9", int),
("X10", int)]
training_data = self.get_file("pcadata.csv")
self.frame = self.context.frame.import_csv(training_data, schema=schema)
@unittest.skip("daaltk: pca_model has no singular vectors")
def test_daal_principal_components_train_mean(self):
"""Test the train fun | pca_train_out = self.context.daaltk.models.dimreduction.pca.train(self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
True, 10)
# actual right-singular vectors
actual_R_singular_vec = pca_train_out.right_singular_vectors
# actual singular values
actual_singular_val = pca_train_out.singular_values
for c in self.frame.column_names:
mean = self.frame.column_summary_statistics(c)["mean"]
self.frame.add_columns(
lambda x: x[c] - mean, (c+"_n", float))
pcamodelmean = self.context.daaltk.models.dimreduction.pca.train(
self.frame,
["X1_n", "X2_n", "X3_n", "X4_n", "X5_n",
"X6_n", "X7_n", "X8_n", "X9_n", "X10_n"],
False, 10)
# actual right-singular vectors
actual_R_singular_vec_mean = pca_train_out.right_singular_vectors
# actual singular values
actual_singular_val_mean = pca_train_out.singular_values
expected_actual = zip(actual_singular_val, actual_singular_val_mean)
for expected, actual in expected_actual:
self.assertAlmostEqual(expected, actual, 8)
expected_actual = zip(
actual_R_singular_vec, actual_R_singular_vec_mean)
for expected, actual in expected_actual:
for f1, f2 in zip(expected, actual):
self.assertAlmostEqual(f1, f2, 4)
@unittest.skip("daaltk: pca_model has no singular vectors")
def test_daal_pca_predict(self):
"""Test the train functionality"""
pca_train_out = self.context.daaltk.models.dimreduction.pca.train(self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
False, 10)
pca_train_out.predict(self.frame, False)
pd_frame = self.frame.to_pandas(self.frame.count())
actual_R_singular_vec = map(
list, zip(*pca_train_out.right_singular_vectors))
for _, i in pd_frame.iterrows():
vec1 = i[0:10]
vec2 = i | ctionality with mean centering"""
| identifier_name |
daal_pca_test.py | 1, -0.165508013, -0.131017612, 0.581988787,
-0.0863507191, 0.160473134, 0.53134635, 0.41199152,
0.0823770991, -0.156517367],
[0.316777341, 0.244415549, 0.332413311, -0.377379981,
0.149653873, 0.0606339992, -0.163748261, 0.699502817,
-0.171189721, -0.124509149],
[0.318988109, -0.171520719, -0.250278714, 0.335635209,
0.580901954, 0.160427725, -0.531610364, -0.0304943121,
-0.0785743304, 0.201591811],
[0.3160833, 0.000386702461, -0.108022985, 0.167086405,
-0.470855879, -0.256296677, -0.318727111, -0.155621638,
-0.521547782, -0.418681224],
[0.316721742, 0.288319245, 0.499514144, 0.267566455,
-0.0338341451, -0.134086469, -0.184724393, -0.246523528,
0.593753078, -0.169969303],
[0.315335647, -0.258529064, 0.374780341, -0.169762381,
0.416093803, -0.118232778, 0.445019707, -0.395962728,
-0.337229123, -0.0937071881],
[0.314899154, -0.0294147958, -0.447870311, -0.258339192,
0.0794841625, -0.71141762, 0.110951688, 0.102784186,
0.292018251, 0.109836478],
[0.315542865, -0.236497774, -0.289051199, -0.452795684,
-0.12175352, 0.5265342, -0.0312645934, -0.180142504,
0.318334436, -0.359303747],
[0.315875856, 0.72196434, -0.239088332, -0.0259999274,
-0.0579153559, 0.244335633, 0.232808362, -0.233600306,
-0.181191102, 0.3413174]]
def setUp(self):
super(DaalPrincipalComponent, self).setUp()
schema = [("X1", int),
("X2", int),
("X3", int),
("X4", int),
("X5", int),
("X6", int),
("X7", int),
("X8", int),
("X9", int),
("X10", int)]
training_data = self.get_file("pcadata.csv")
self.frame = self.context.frame.import_csv(training_data, schema=schema)
@unittest.skip("daaltk: pca_model has no singular vectors")
def test_daal_principal_components_train_mean(self):
"""Test the train functionality with mean centering"""
pca_train_out = self.context.daaltk.models.dimreduction.pca.train(self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
True, 10)
# actual right-singular vectors
actual_R_singular_vec = pca_train_out.right_singular_vectors
# actual singular values
actual_singular_val = pca_train_out.singular_values
for c in self.frame.column_names:
mean = self.frame.column_summary_statistics(c)["mean"]
self.frame.add_columns(
lambda x: x[c] - mean, (c+"_n", float))
pcamodelmean = self.context.daaltk.models.dimreduction.pca.train(
self.frame,
["X1_n", "X2_n", "X3_n", "X4_n", "X5_n",
"X6_n", "X7_n", "X8_n", "X9_n", "X10_n"],
False, 10)
# actual right-singular vectors
actual_R_singular_vec_mean = pca_train_out.right_singular_vectors
# actual singular values
actual_singular_val_mean = pca_train_out.singular_values
expected_actual = zip(actual_singular_val, actual_singular_val_mean)
for expected, actual in expected_actual:
self.assertAlmostEqual(expected, actual, 8)
expected_actual = zip(
actual_R_singular_vec, actual_R_singular_vec_mean)
for expected, actual in expected_actual:
for f1, f2 in zip(expected, actual):
self.assertAlmostEqual(f1, f2, 4)
@unittest.skip("daaltk: pca_model has no singular vectors")
def test_daal_pca_predict(self):
"""Test the train functionality"""
pca_train_out = self.context.daaltk.models.dimreduction.pca.train(self.frame,
["X1", "X2", "X3", "X4", "X5",
"X6", "X7", "X8", "X9", "X10"],
False, 10)
pca_train_out.predict(self.frame, False)
pd_frame = self.frame.to_pandas(self.frame.count())
actual_R_singular_vec = map(
list, zip(*pca_train_out.right_singular_vectors))
for _, i in pd_frame.iterrows():
vec1 = i[0:10]
vec2 = i[10:]
dot_product = [sum([(r1)*(r2) for r1, r2 in zip(vec1, k)])
for k in actual_R_singular_vec]
for i, j in zip(vec2, dot_product):
self.assertAlmostEqual(i, j)
@unittest.skip("daaltk: pca_model has no singular vectors")
def test_daal_pca_train(self):
"""Test the train functionality"""
pca_train_out = self.context.daaltk.models.dimreduction.pca.train(self.frame,
["X1", "X2", "X3", "X4", "X5", | "X6", "X7", "X8", "X9", "X10"],
False, 10)
# actual right-singular vectors | random_line_split |
|
level.rs | ;
pub struct LevelScene {
done: bool,
car: warmy::Res<resources::Image>,
map: Map,
player_entity: specs::Entity,
dispatcher: specs::Dispatcher<'static, 'static>,
}
impl LevelScene {
pub fn new(ctx: &mut ggez::Context, world: &mut World) -> Self {
let done = false;
let car = world
.resources
.get::<resources::Image>(&resources::Key::from_path("/images/cars-spritesheet.png"), ctx)
.unwrap();
let mut load_image = |ctx: &mut ggez::Context, path: &str| -> graphics::Image {
let mut path_in_resources = String::from("/");
path_in_resources.push_str(path);
graphics::Image::new(ctx, path_in_resources).unwrap()
};
let tiled_map = tiled::parse_file(&std::path::Path::new("resources/map1.tmx")).unwrap();
let map = ggez_goodies::tilemap::Map::from_tiled(
ctx,
tiled_map,
&mut load_image
);
let half_height = 76.0 / 2.0;
let half_width = 76.0 / 4.0;
// shared collision properties
let car_shape = nc::shape::Cuboid::new(na::Vector2::new(half_width, half_height));
let car_collision_group = nc::pipeline::object::CollisionGroups::new();
let contacts_query = nc::pipeline::object::GeometricQueryType::Contacts(0.0, 0.0);
// player 1
let player_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 300.0), rotation: 0.0 })
.with(c::Motion { velocity: util::vec2(0.0, 0.0), acceleration: util::vec2(0.0, 0.0), is_blocked: false, orientation: 0.0})
.with(c::Sprite { clip: graphics::Rect { x: 0.0, y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// other car
let car_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 100.0), rotation: 0.0 })
.with(c::Sprite { clip: graphics::Rect { x: (76.0 / 384.0), y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// collisions
//
{
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
let (player_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 300.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
player_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(player_entity, c::Collider { handle: player_collider_handle }).expect("couldn't insert Collider");
let (car_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 100.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
car_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(car_entity, c::Collider { handle: car_collider_handle }).expect("couldn't insert Collider");
}
let mut dispatcher = Self::register_systems();
dispatcher.setup(&mut world.specs_world.res);
LevelScene {
done,
car,
map,
player_entity,
dispatcher,
}
}
fn register_systems() -> specs::Dispatcher<'static, 'static> |
fn update_collisions(&mut self, world: &mut World) {
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
collide_world.update();
let mut motions = world.specs_world.write_storage::<c::Motion>();
// gameworld.collide_world.update();
for e in collide_world.contact_events() {
match e {
ncollide2d::pipeline::narrow_phase::ContactEvent::Started(handle1, handle2) =>
{
println!("contact started!");
// look up collision object
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = true;
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update();
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = true;
// want to use reflect here.
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update();
// motion.velocity = rotation.transform_vector(&player_motion.acceleration);
}
}
ncollide2d::pipeline::narrow_phase::ContactEvent::Stopped(handle1, handle2) =>
{
println!("contact ended");
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = false;
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = false;
}
}
}
}
}
}
// fn camera_draw(ctx: &mut ggez::Context, drawable: &graphics::Drawable, params: graphics::DrawParam) -> ggez::GameResult<()> {
// Ok(())
// }
impl scene::Scene<World, input::Event> for LevelScene {
fn update(&mut self, gameworld: &mut World, _ctx: &mut ggez::Context) -> scenes::Switch {
self.dispatcher.dispatch(&mut gameworld.specs_world.res);
self.update_collisions(gameworld);
if self.done {
scene::SceneSwitch::Pop
} else {
scene::SceneSwitch::None
}
}
fn draw(&mut self, gameworld: &mut World, ctx: &mut ggez::Context) -> ggez::GameResult<()> {
let pos = gameworld.specs_world.read_storage::<c::Position>();
let mut camera_offset = na::Point2::new(0.0, 0.0);
// todo: update this elsewhere
let player_point = pos.get(self.player_entity).unwrap().point;
// window is 800 x 600
if player_point.x > 400.0 {
if player_point.x < (self.map.width as f32 - 400.0) {
camera_offset.x = 400.0 - player_point.x;
} else {
camera_offset.x = self.map.width as f32 + 400.0;
}
}
if player_point.y > 300.0 {
camera_offset.y = 300.0 - player_point.y;
}
// map
graphics::draw(ctx, &self.map, graphics::DrawParam::default().dest(camera_offset)).unwrap();
// camera_draw(ctx, &self.map, graphics::DrawParam::default()).unwrap();
// sprites
let sprite = gameworld.specs_world.read_storage::<c::Sprite>();
let offset_x: f32 = 0.5;
let offset_y: f32 = 0.5;
for (p, s) in (&pos, &sprite).join() {
let mut params = graphics::DrawParam::default();
params.src = s.clip;
params.rotation = p.rotation;
params.scale = s.scale;
params.offset = na::Point | {
let builder = specs::DispatcherBuilder::new()
.with(MovementSystem, "sys_movement", &[])
.with(CollisionSystem, "sys_collision", &[]);
// builder.add_thread_local(RenderSystem);
builder.build()
} | identifier_body |
level.rs | 0;
pub struct LevelScene {
done: bool,
car: warmy::Res<resources::Image>,
map: Map,
player_entity: specs::Entity,
dispatcher: specs::Dispatcher<'static, 'static>,
}
impl LevelScene {
pub fn new(ctx: &mut ggez::Context, world: &mut World) -> Self {
let done = false;
let car = world
.resources
.get::<resources::Image>(&resources::Key::from_path("/images/cars-spritesheet.png"), ctx)
.unwrap();
let mut load_image = |ctx: &mut ggez::Context, path: &str| -> graphics::Image {
let mut path_in_resources = String::from("/");
path_in_resources.push_str(path);
graphics::Image::new(ctx, path_in_resources).unwrap()
};
let tiled_map = tiled::parse_file(&std::path::Path::new("resources/map1.tmx")).unwrap();
let map = ggez_goodies::tilemap::Map::from_tiled(
ctx,
tiled_map,
&mut load_image
);
let half_height = 76.0 / 2.0;
let half_width = 76.0 / 4.0;
// shared collision properties
let car_shape = nc::shape::Cuboid::new(na::Vector2::new(half_width, half_height));
let car_collision_group = nc::pipeline::object::CollisionGroups::new();
let contacts_query = nc::pipeline::object::GeometricQueryType::Contacts(0.0, 0.0);
// player 1
let player_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 300.0), rotation: 0.0 })
.with(c::Motion { velocity: util::vec2(0.0, 0.0), acceleration: util::vec2(0.0, 0.0), is_blocked: false, orientation: 0.0})
.with(c::Sprite { clip: graphics::Rect { x: 0.0, y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// other car
let car_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 100.0), rotation: 0.0 })
.with(c::Sprite { clip: graphics::Rect { x: (76.0 / 384.0), y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// collisions
//
{
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
let (player_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 300.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
player_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(player_entity, c::Collider { handle: player_collider_handle }).expect("couldn't insert Collider");
let (car_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 100.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
car_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(car_entity, c::Collider { handle: car_collider_handle }).expect("couldn't insert Collider");
}
let mut dispatcher = Self::register_systems();
dispatcher.setup(&mut world.specs_world.res);
LevelScene {
done,
car,
map,
player_entity,
dispatcher,
}
}
fn register_systems() -> specs::Dispatcher<'static, 'static> {
let builder = specs::DispatcherBuilder::new()
.with(MovementSystem, "sys_movement", &[])
.with(CollisionSystem, "sys_collision", &[]);
// builder.add_thread_local(RenderSystem);
builder.build()
}
fn update_collisions(&mut self, world: &mut World) {
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
collide_world.update();
let mut motions = world.specs_world.write_storage::<c::Motion>();
// gameworld.collide_world.update();
for e in collide_world.contact_events() {
match e {
ncollide2d::pipeline::narrow_phase::ContactEvent::Started(handle1, handle2) =>
| // motion.velocity = rotation.transform_vector(&player_motion.acceleration);
}
}
ncollide2d::pipeline::narrow_phase::ContactEvent::Stopped(handle1, handle2) =>
{
println!("contact ended");
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = false;
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = false;
}
}
}
}
}
}
// fn camera_draw(ctx: &mut ggez::Context, drawable: &graphics::Drawable, params: graphics::DrawParam) -> ggez::GameResult<()> {
// Ok(())
// }
impl scene::Scene<World, input::Event> for LevelScene {
fn update(&mut self, gameworld: &mut World, _ctx: &mut ggez::Context) -> scenes::Switch {
self.dispatcher.dispatch(&mut gameworld.specs_world.res);
self.update_collisions(gameworld);
if self.done {
scene::SceneSwitch::Pop
} else {
scene::SceneSwitch::None
}
}
fn draw(&mut self, gameworld: &mut World, ctx: &mut ggez::Context) -> ggez::GameResult<()> {
let pos = gameworld.specs_world.read_storage::<c::Position>();
let mut camera_offset = na::Point2::new(0.0, 0.0);
// todo: update this elsewhere
let player_point = pos.get(self.player_entity).unwrap().point;
// window is 800 x 600
if player_point.x > 400.0 {
if player_point.x < (self.map.width as f32 - 400.0) {
camera_offset.x = 400.0 - player_point.x;
} else {
camera_offset.x = self.map.width as f32 + 400.0;
}
}
if player_point.y > 300.0 {
camera_offset.y = 300.0 - player_point.y;
}
// map
graphics::draw(ctx, &self.map, graphics::DrawParam::default().dest(camera_offset)).unwrap();
// camera_draw(ctx, &self.map, graphics::DrawParam::default()).unwrap();
// sprites
let sprite = gameworld.specs_world.read_storage::<c::Sprite>();
let offset_x: f32 = 0.5;
let offset_y: f32 = 0.5;
for (p, s) in (&pos, &sprite).join() {
let mut params = graphics::DrawParam::default();
params.src = s.clip;
params.rotation = p.rotation;
params.scale = s.scale;
params.offset = na::Point2 | {
println!("contact started!");
// look up collision object
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = true;
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update();
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = true;
// want to use reflect here.
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update(); | conditional_block |
level.rs | ;
pub struct LevelScene {
done: bool,
car: warmy::Res<resources::Image>,
map: Map,
player_entity: specs::Entity,
dispatcher: specs::Dispatcher<'static, 'static>,
}
impl LevelScene {
pub fn new(ctx: &mut ggez::Context, world: &mut World) -> Self {
let done = false;
let car = world
.resources
.get::<resources::Image>(&resources::Key::from_path("/images/cars-spritesheet.png"), ctx)
.unwrap();
let mut load_image = |ctx: &mut ggez::Context, path: &str| -> graphics::Image {
let mut path_in_resources = String::from("/");
path_in_resources.push_str(path);
graphics::Image::new(ctx, path_in_resources).unwrap()
};
let tiled_map = tiled::parse_file(&std::path::Path::new("resources/map1.tmx")).unwrap();
let map = ggez_goodies::tilemap::Map::from_tiled(
ctx,
tiled_map,
&mut load_image
);
let half_height = 76.0 / 2.0;
let half_width = 76.0 / 4.0;
// shared collision properties
let car_shape = nc::shape::Cuboid::new(na::Vector2::new(half_width, half_height));
let car_collision_group = nc::pipeline::object::CollisionGroups::new();
let contacts_query = nc::pipeline::object::GeometricQueryType::Contacts(0.0, 0.0);
// player 1
let player_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 300.0), rotation: 0.0 })
.with(c::Motion { velocity: util::vec2(0.0, 0.0), acceleration: util::vec2(0.0, 0.0), is_blocked: false, orientation: 0.0})
.with(c::Sprite { clip: graphics::Rect { x: 0.0, y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// other car
let car_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 100.0), rotation: 0.0 })
.with(c::Sprite { clip: graphics::Rect { x: (76.0 / 384.0), y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// collisions
//
{
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
let (player_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 300.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
player_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(player_entity, c::Collider { handle: player_collider_handle }).expect("couldn't insert Collider");
let (car_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 100.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
car_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(car_entity, c::Collider { handle: car_collider_handle }).expect("couldn't insert Collider");
}
let mut dispatcher = Self::register_systems();
dispatcher.setup(&mut world.specs_world.res);
LevelScene {
done,
car,
map,
player_entity,
dispatcher,
}
}
fn register_systems() -> specs::Dispatcher<'static, 'static> {
let builder = specs::DispatcherBuilder::new()
.with(MovementSystem, "sys_movement", &[])
.with(CollisionSystem, "sys_collision", &[]);
// builder.add_thread_local(RenderSystem);
builder.build()
}
fn update_collisions(&mut self, world: &mut World) {
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
collide_world.update();
let mut motions = world.specs_world.write_storage::<c::Motion>();
// gameworld.collide_world.update();
for e in collide_world.contact_events() {
match e {
ncollide2d::pipeline::narrow_phase::ContactEvent::Started(handle1, handle2) =>
{
println!("contact started!");
// look up collision object
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = true;
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update();
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = true;
// want to use reflect here.
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update();
// motion.velocity = rotation.transform_vector(&player_motion.acceleration);
}
}
ncollide2d::pipeline::narrow_phase::ContactEvent::Stopped(handle1, handle2) =>
{
println!("contact ended");
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = false;
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = false;
}
}
}
}
}
}
| fn update(&mut self, gameworld: &mut World, _ctx: &mut ggez::Context) -> scenes::Switch {
self.dispatcher.dispatch(&mut gameworld.specs_world.res);
self.update_collisions(gameworld);
if self.done {
scene::SceneSwitch::Pop
} else {
scene::SceneSwitch::None
}
}
fn draw(&mut self, gameworld: &mut World, ctx: &mut ggez::Context) -> ggez::GameResult<()> {
let pos = gameworld.specs_world.read_storage::<c::Position>();
let mut camera_offset = na::Point2::new(0.0, 0.0);
// todo: update this elsewhere
let player_point = pos.get(self.player_entity).unwrap().point;
// window is 800 x 600
if player_point.x > 400.0 {
if player_point.x < (self.map.width as f32 - 400.0) {
camera_offset.x = 400.0 - player_point.x;
} else {
camera_offset.x = self.map.width as f32 + 400.0;
}
}
if player_point.y > 300.0 {
camera_offset.y = 300.0 - player_point.y;
}
// map
graphics::draw(ctx, &self.map, graphics::DrawParam::default().dest(camera_offset)).unwrap();
// camera_draw(ctx, &self.map, graphics::DrawParam::default()).unwrap();
// sprites
let sprite = gameworld.specs_world.read_storage::<c::Sprite>();
let offset_x: f32 = 0.5;
let offset_y: f32 = 0.5;
for (p, s) in (&pos, &sprite).join() {
let mut params = graphics::DrawParam::default();
params.src = s.clip;
params.rotation = p.rotation;
params.scale = s.scale;
params.offset = na::Point2:: | // fn camera_draw(ctx: &mut ggez::Context, drawable: &graphics::Drawable, params: graphics::DrawParam) -> ggez::GameResult<()> {
// Ok(())
// }
impl scene::Scene<World, input::Event> for LevelScene { | random_line_split |
level.rs | 0;
pub struct LevelScene {
done: bool,
car: warmy::Res<resources::Image>,
map: Map,
player_entity: specs::Entity,
dispatcher: specs::Dispatcher<'static, 'static>,
}
impl LevelScene {
pub fn new(ctx: &mut ggez::Context, world: &mut World) -> Self {
let done = false;
let car = world
.resources
.get::<resources::Image>(&resources::Key::from_path("/images/cars-spritesheet.png"), ctx)
.unwrap();
let mut load_image = |ctx: &mut ggez::Context, path: &str| -> graphics::Image {
let mut path_in_resources = String::from("/");
path_in_resources.push_str(path);
graphics::Image::new(ctx, path_in_resources).unwrap()
};
let tiled_map = tiled::parse_file(&std::path::Path::new("resources/map1.tmx")).unwrap();
let map = ggez_goodies::tilemap::Map::from_tiled(
ctx,
tiled_map,
&mut load_image
);
let half_height = 76.0 / 2.0;
let half_width = 76.0 / 4.0;
// shared collision properties
let car_shape = nc::shape::Cuboid::new(na::Vector2::new(half_width, half_height));
let car_collision_group = nc::pipeline::object::CollisionGroups::new();
let contacts_query = nc::pipeline::object::GeometricQueryType::Contacts(0.0, 0.0);
// player 1
let player_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 300.0), rotation: 0.0 })
.with(c::Motion { velocity: util::vec2(0.0, 0.0), acceleration: util::vec2(0.0, 0.0), is_blocked: false, orientation: 0.0})
.with(c::Sprite { clip: graphics::Rect { x: 0.0, y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// other car
let car_entity = world.specs_world.create_entity()
.with(c::Position { point: util::point2(100.0, 100.0), rotation: 0.0 })
.with(c::Sprite { clip: graphics::Rect { x: (76.0 / 384.0), y: 0.0, h: 1.0, w: (76.0 / 384.0)}, scale: graphics::mint::Vector2 { x: 0.5f32, y: 0.5f32 }})
.build();
// collisions
//
{
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
let (player_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 300.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
player_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(player_entity, c::Collider { handle: player_collider_handle }).expect("couldn't insert Collider");
let (car_collider_handle, _) = collide_world.add(
na::Isometry2::new(na::Vector2::new(100.0, 100.0), na::zero()),
nc::shape::ShapeHandle::new(car_shape.clone()),
car_collision_group,
contacts_query,
car_entity,
);
world.specs_world.write_storage::<c::Collider>().insert(car_entity, c::Collider { handle: car_collider_handle }).expect("couldn't insert Collider");
}
let mut dispatcher = Self::register_systems();
dispatcher.setup(&mut world.specs_world.res);
LevelScene {
done,
car,
map,
player_entity,
dispatcher,
}
}
fn | () -> specs::Dispatcher<'static, 'static> {
let builder = specs::DispatcherBuilder::new()
.with(MovementSystem, "sys_movement", &[])
.with(CollisionSystem, "sys_collision", &[]);
// builder.add_thread_local(RenderSystem);
builder.build()
}
fn update_collisions(&mut self, world: &mut World) {
let mut collide_world = world.specs_world.write_resource::<nc::world::CollisionWorld<f32, specs::Entity>>();
collide_world.update();
let mut motions = world.specs_world.write_storage::<c::Motion>();
// gameworld.collide_world.update();
for e in collide_world.contact_events() {
match e {
ncollide2d::pipeline::narrow_phase::ContactEvent::Started(handle1, handle2) =>
{
println!("contact started!");
// look up collision object
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = true;
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update();
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = true;
// want to use reflect here.
motion.acceleration.y = motion.acceleration.y * -1.0;
motion.update();
// motion.velocity = rotation.transform_vector(&player_motion.acceleration);
}
}
ncollide2d::pipeline::narrow_phase::ContactEvent::Stopped(handle1, handle2) =>
{
println!("contact ended");
let obj1 = collide_world.collision_object(*handle1).expect("missing coll obj1");
// look up entity
let entity1: &specs::Entity = obj1.data();
if let Some(motion) = motions.get_mut(*entity1) {
motion.is_blocked = false;
}
let obj2 = collide_world.collision_object(*handle2).expect("missin coll obj2");
let entity2: &specs::Entity = obj2.data();
if let Some(motion) = motions.get_mut(*entity2) {
motion.is_blocked = false;
}
}
}
}
}
}
// fn camera_draw(ctx: &mut ggez::Context, drawable: &graphics::Drawable, params: graphics::DrawParam) -> ggez::GameResult<()> {
// Ok(())
// }
impl scene::Scene<World, input::Event> for LevelScene {
fn update(&mut self, gameworld: &mut World, _ctx: &mut ggez::Context) -> scenes::Switch {
self.dispatcher.dispatch(&mut gameworld.specs_world.res);
self.update_collisions(gameworld);
if self.done {
scene::SceneSwitch::Pop
} else {
scene::SceneSwitch::None
}
}
fn draw(&mut self, gameworld: &mut World, ctx: &mut ggez::Context) -> ggez::GameResult<()> {
let pos = gameworld.specs_world.read_storage::<c::Position>();
let mut camera_offset = na::Point2::new(0.0, 0.0);
// todo: update this elsewhere
let player_point = pos.get(self.player_entity).unwrap().point;
// window is 800 x 600
if player_point.x > 400.0 {
if player_point.x < (self.map.width as f32 - 400.0) {
camera_offset.x = 400.0 - player_point.x;
} else {
camera_offset.x = self.map.width as f32 + 400.0;
}
}
if player_point.y > 300.0 {
camera_offset.y = 300.0 - player_point.y;
}
// map
graphics::draw(ctx, &self.map, graphics::DrawParam::default().dest(camera_offset)).unwrap();
// camera_draw(ctx, &self.map, graphics::DrawParam::default()).unwrap();
// sprites
let sprite = gameworld.specs_world.read_storage::<c::Sprite>();
let offset_x: f32 = 0.5;
let offset_y: f32 = 0.5;
for (p, s) in (&pos, &sprite).join() {
let mut params = graphics::DrawParam::default();
params.src = s.clip;
params.rotation = p.rotation;
params.scale = s.scale;
params.offset = na::Point | register_systems | identifier_name |
lib.rs | ::Start => {
svg_text.assign("text-anchor", "start");
}
Anchor::Middle => {
svg_text.assign("text-anchor", "middle");
}
Anchor::End => {
svg_text.assign("text-anchor", "end");
}
};
let text_node = TextNode::new(s);
svg_text.append(text_node);
svg_text
}
/// The head of the meme
/// the face is the string in between
/// used in detecting if it's a valid meme or not
#[derive(Clone,Debug)]
struct Head{
// character position
start_position: usize,
// left x location x1
startx: usize,
face: String,
// right x location x2
endx: usize,
// end position
end_position: usize
}
impl Head{
fn distance(&self) -> usize {
self.endx - self.startx
}
fn get_svg_elements(&self, y: usize, settings:&Settings) -> Vec<Box<Node>> {
let mut elements: Vec<Box<Node>> = vec![];
elements.push(Box::new(self.get_circle(y, settings)));
elements.push(Box::new(self.get_face_text(y, settings)));
elements
}
fn get_face_text(&self, y:usize, settings: &Settings) -> SvgText{
let c = self.calc_circle(y, settings);
let sy = y as f32 * settings.text_height;
let face = format!("<tspan class='head'>(</tspan>{}<tspan class='head'>)</tspan>", escape_str(&self.face));
to_svg_text_pixel_escaped(&face, c.cx, sy, settings, Anchor::Middle)
}
fn calc_circle(&self, y:usize, settings: &Settings) -> Circle {
let text_width = settings.text_width;
let text_height = settings.text_height;
let radius = self.distance() as f32 / 2.0;
let center = self. startx as f32 + radius;
let cx = center * text_width;
let cy = y as f32 * text_height + text_height / 2.0;
let cr = radius * text_width;
Circle{
cx: cx,
cy: cy,
r: cr
}
}
fn get_circle(&self, y: usize, settings: &Settings)-> SvgCircle{
let c = self.calc_circle(y, settings);
let (offsetx, offsety) = settings.offset();
SvgCircle::new()
.set("cx",c.cx + offsetx)
.set("cy", c.cy + offsety)
.set("class", "donger")
.set("r", c.r)
}
}
#[derive(Debug)]
struct Circle{
cx: f32,
cy: f32,
r: f32,
}
/// detect whether the series of string could be a meme
/// has at least 1 full width character (width = 2)
/// has at least 1 zero sized width character (width = 0)
/// has at least 1 character that has more than 1 byte in size
/// unicode value is way up high
fn is_meme(ch: &str) -> bool{
let total_bytes = ch.len();
let total_width = ch.width();
let mut gte_bytes2 = 0;
let mut gte_width2 = 0;
let mut zero_width = 0;
let mut gte_unicode_1k = 0;
for c in ch.chars(){
if c as u32 >= 1000{
gte_unicode_1k += 1;
}
if c.len_utf8() >= 2{
gte_bytes2 += 1;
}
if let Some(uw) = c.width(){
if uw >= 2 {
gte_width2 += 1;
}
if uw == 0 {
zero_width += 1;
}
}
}
/*
println!("total_bytes: {}", total_bytes);
println!("gte_bytes2: {}", gte_bytes2);
println!("gte_width2: {}", gte_width2);
println!("zero_width: {}", zero_width);
println!("gte_unicode_1k {}", gte_unicode_1k);
println!("total_width: {}", total_width);
println!("");
*/
total_width <= 10 && // must be at most 10 character face
(gte_bytes2 > 0 || gte_width2 > 0
|| zero_width > 0 || gte_unicode_1k > 0
|| total_bytes > total_width
|| !is_expression(ch)
)
}
fn calc_dimension(s: &str) -> (usize, usize) {
let mut longest = 0;
for line in s.lines(){
let line_width = line.width();
if line_width > longest{
longest = line_width
}
}
let line_count = s.lines().count();
(longest, line_count)
}
/// return an SVG document base from the text infor string
pub fn to_svg(s: &str, text_width: f32, text_height: f32) -> SVG {
let settings = &Settings{
text_width: text_width,
text_height: text_height,
};
let mut svg = SVG::new()
.set("font-size", 14)
.set("font-family", "arial");
svg.append(get_styles());
let nodes = to_svg_lines(s,settings);
for elm in nodes{
let text_node = TextNode::new(elm.to_string());
svg.append(text_node);
}
let (offsetx, offsety) = settings.offset();
let (wide, high) = calc_dimension(s);
let width = wide as f32 * text_width + offsetx;
let height = (high + 2 ) as f32 * text_height + offsety;
svg.assign("width", width);
svg.assign("height", height);
svg
}
fn get_styles() -> Style {
let style = r#"
line, path {
stroke: black;
stroke-width: 2;
stroke-opacity: 1;
fill-opacity: 1;
stroke-linecap: round;
stroke-linejoin: miter;
}
circle {
stroke: black;
stroke-width: 2;
stroke-opacity: 1;
fill-opacity: 1;
stroke-linecap: round;
stroke-linejoin: miter;
fill:white;
}
circle.donger{
stroke-width: 1;
fill: white;
}
tspan.head{
fill: none;
stroke: none;
}
"#;
Style::new(style)
}
/// process and parses each line
fn to_svg_lines(s: &str, settings: &Settings) -> Vec<Box<Node>> {
let mut elements = vec![];
let mut y = 0;
for line in s.lines(){
let line_elm = get_svg_elements(y, line, settings);
elements.extend(line_elm);
y += 1;
}
elements
}
/// process only 1 line
fn get_svg_elements(y: usize, s: &str, settings: &Settings) -> Vec<Box<Node>> {
let body = parse_memes(s);
body.get_svg_elements(y, &settings)
}
/// return the SVG nodes per line and all the assembled rest of the string that is not a part of the memes
pub fn get_meme_svg(input: &str, text_width: f32, text_height: f32) -> (Vec<Box<Node>>, String, Style) {
let settings = &Settings{
text_width: text_width,
text_height: text_height,
};
let mut svg_elements:Vec<Box<Node + 'static>> = vec![];
let mut relines = String::new();
let mut y = 0;
for line in input.lines(){
match line_to_svg_with_excess_str(y, line, settings){
Some((svg_elm, rest_text)) => {
relines.push_str(&rest_text);
relines.push('\n');
svg_elements.extend(svg_elm);
},
None => |
}
y += 1;
}
(svg_elements, relines, get_styles())
}
/// parse the memes and return the svg together with the unmatched strings
fn line_to_svg_with_excess_str(y: usize, s: &str, settings:&Settings) -> Option<(Vec<Box<Node>>, String)>{
let body = parse_memes(s);
if body.has_memes(){
let nodes = body.get_svg_elements(y, settings);
Some((nodes, body.unify_rest_text()))
}else{
None
}
}
#[test]
fn test_1line(){
let meme = "";
let nodes = get_svg_elements(0, meme, &Settings::default());
assert_eq!(nodes.len(), 0);
}
/// TODO: include parsing the rest of the unused text
fn parse_memes(s: &str) -> Body{
let mut memes = vec![];
let mut paren_opened = false;
let mut meme_face = String::new();
let mut index = 0;
let mut total_width = 0;
let mut face_markers:Vec<Head> = | {
relines.push_str(line);
relines.push('\n');
} | conditional_block |
lib.rs | (s: &str, x: f32, y: f32, settings: &Settings, anchor: Anchor) -> SvgText {
to_svg_text_pixel_escaped(&escape_str(s), x, y, settings, anchor)
}
fn to_svg_text_pixel_escaped(s: &str, x: f32, y: f32, settings: &Settings, anchor: Anchor) -> SvgText {
let (offsetx, offsety) = settings.offset();
let sx = x + offsetx;
let sy = y + settings.text_height * 3.0 / 4.0 + offsety;
let mut svg_text = SvgText::new()
.set("x", sx)
.set("y", sy);
match anchor{
Anchor::Start => {
svg_text.assign("text-anchor", "start");
}
Anchor::Middle => {
svg_text.assign("text-anchor", "middle");
}
Anchor::End => {
svg_text.assign("text-anchor", "end");
}
};
let text_node = TextNode::new(s);
svg_text.append(text_node);
svg_text
}
/// The head of the meme
/// the face is the string in between
/// used in detecting if it's a valid meme or not
#[derive(Clone,Debug)]
struct Head{
// character position
start_position: usize,
// left x location x1
startx: usize,
face: String,
// right x location x2
endx: usize,
// end position
end_position: usize
}
impl Head{
fn distance(&self) -> usize {
self.endx - self.startx
}
fn get_svg_elements(&self, y: usize, settings:&Settings) -> Vec<Box<Node>> {
let mut elements: Vec<Box<Node>> = vec![];
elements.push(Box::new(self.get_circle(y, settings)));
elements.push(Box::new(self.get_face_text(y, settings)));
elements
}
fn get_face_text(&self, y:usize, settings: &Settings) -> SvgText{
let c = self.calc_circle(y, settings);
let sy = y as f32 * settings.text_height;
let face = format!("<tspan class='head'>(</tspan>{}<tspan class='head'>)</tspan>", escape_str(&self.face));
to_svg_text_pixel_escaped(&face, c.cx, sy, settings, Anchor::Middle)
}
fn calc_circle(&self, y:usize, settings: &Settings) -> Circle {
let text_width = settings.text_width;
let text_height = settings.text_height;
let radius = self.distance() as f32 / 2.0;
let center = self. startx as f32 + radius;
let cx = center * text_width;
let cy = y as f32 * text_height + text_height / 2.0;
let cr = radius * text_width;
Circle{
cx: cx,
cy: cy,
r: cr
}
}
fn get_circle(&self, y: usize, settings: &Settings)-> SvgCircle{
let c = self.calc_circle(y, settings);
let (offsetx, offsety) = settings.offset();
SvgCircle::new()
.set("cx",c.cx + offsetx)
.set("cy", c.cy + offsety)
.set("class", "donger")
.set("r", c.r)
}
}
#[derive(Debug)]
struct Circle{
cx: f32,
cy: f32,
r: f32,
}
/// detect whether the series of string could be a meme
/// has at least 1 full width character (width = 2)
/// has at least 1 zero sized width character (width = 0)
/// has at least 1 character that has more than 1 byte in size
/// unicode value is way up high
fn is_meme(ch: &str) -> bool{
let total_bytes = ch.len();
let total_width = ch.width();
let mut gte_bytes2 = 0;
let mut gte_width2 = 0;
let mut zero_width = 0;
let mut gte_unicode_1k = 0;
for c in ch.chars(){
if c as u32 >= 1000{
gte_unicode_1k += 1;
}
if c.len_utf8() >= 2{
gte_bytes2 += 1;
}
if let Some(uw) = c.width(){
if uw >= 2 {
gte_width2 += 1;
}
if uw == 0 {
zero_width += 1;
}
}
}
/*
println!("total_bytes: {}", total_bytes);
println!("gte_bytes2: {}", gte_bytes2);
println!("gte_width2: {}", gte_width2);
println!("zero_width: {}", zero_width);
println!("gte_unicode_1k {}", gte_unicode_1k);
println!("total_width: {}", total_width);
println!("");
*/
total_width <= 10 && // must be at most 10 character face
(gte_bytes2 > 0 || gte_width2 > 0
|| zero_width > 0 || gte_unicode_1k > 0
|| total_bytes > total_width
|| !is_expression(ch)
)
}
fn calc_dimension(s: &str) -> (usize, usize) {
let mut longest = 0;
for line in s.lines(){
let line_width = line.width();
if line_width > longest{
longest = line_width
}
}
let line_count = s.lines().count();
(longest, line_count)
}
/// return an SVG document base from the text infor string
pub fn to_svg(s: &str, text_width: f32, text_height: f32) -> SVG {
let settings = &Settings{
text_width: text_width,
text_height: text_height,
};
let mut svg = SVG::new()
.set("font-size", 14)
.set("font-family", "arial");
svg.append(get_styles());
let nodes = to_svg_lines(s,settings);
for elm in nodes{
let text_node = TextNode::new(elm.to_string());
svg.append(text_node);
}
let (offsetx, offsety) = settings.offset();
let (wide, high) = calc_dimension(s);
let width = wide as f32 * text_width + offsetx;
let height = (high + 2 ) as f32 * text_height + offsety;
svg.assign("width", width);
svg.assign("height", height);
svg
}
fn get_styles() -> Style {
let style = r#"
line, path {
stroke: black;
stroke-width: 2;
stroke-opacity: 1;
fill-opacity: 1;
stroke-linecap: round;
stroke-linejoin: miter;
}
circle {
stroke: black;
stroke-width: 2;
stroke-opacity: 1;
fill-opacity: 1;
stroke-linecap: round;
stroke-linejoin: miter;
fill:white;
}
circle.donger{
stroke-width: 1;
fill: white;
}
tspan.head{
fill: none;
stroke: none;
}
"#;
Style::new(style)
}
/// process and parses each line
fn to_svg_lines(s: &str, settings: &Settings) -> Vec<Box<Node>> {
let mut elements = vec![];
let mut y = 0;
for line in s.lines(){
let line_elm = get_svg_elements(y, line, settings);
elements.extend(line_elm);
y += 1;
}
elements
}
/// process only 1 line
fn get_svg_elements(y: usize, s: &str, settings: &Settings) -> Vec<Box<Node>> {
let body = parse_memes(s);
body.get_svg_elements(y, &settings)
}
/// return the SVG nodes per line and all the assembled rest of the string that is not a part of the memes
pub fn get_meme_svg(input: &str, text_width: f32, text_height: f32) -> (Vec<Box<Node>>, String, Style) {
let settings = &Settings{
text_width: text_width,
text_height: text_height,
};
let mut svg_elements:Vec<Box<Node + 'static>> = vec![];
let mut relines = String::new();
let mut y = 0;
for line in input.lines(){
match line_to_svg_with_excess_str(y, line, settings){
Some((svg_elm, rest_text)) => {
relines.push_str(&rest_text);
relines.push('\n');
svg_elements.extend(svg_elm);
},
None => {
relines.push_str(line);
relines.push('\n');
}
}
y += 1;
}
(svg_elements, relines, get_styles())
}
/// parse the memes and return the svg together with the unmatched strings
fn line_to_svg_with_excess_str(y: usize, s: &str, settings:&Settings) -> Option<(Vec<Box<Node>>, String)>{
let | to_svg_text_pixel | identifier_name |
|
lib.rs | )
.set("class", "donger")
.set("r", c.r)
}
}
#[derive(Debug)]
struct Circle{
cx: f32,
cy: f32,
r: f32,
}
/// detect whether the series of string could be a meme
/// has at least 1 full width character (width = 2)
/// has at least 1 zero sized width character (width = 0)
/// has at least 1 character that has more than 1 byte in size
/// unicode value is way up high
fn is_meme(ch: &str) -> bool{
let total_bytes = ch.len();
let total_width = ch.width();
let mut gte_bytes2 = 0;
let mut gte_width2 = 0;
let mut zero_width = 0;
let mut gte_unicode_1k = 0;
for c in ch.chars(){
if c as u32 >= 1000{
gte_unicode_1k += 1;
}
if c.len_utf8() >= 2{
gte_bytes2 += 1;
}
if let Some(uw) = c.width(){
if uw >= 2 {
gte_width2 += 1;
}
if uw == 0 {
zero_width += 1;
}
}
}
/*
println!("total_bytes: {}", total_bytes);
println!("gte_bytes2: {}", gte_bytes2);
println!("gte_width2: {}", gte_width2);
println!("zero_width: {}", zero_width);
println!("gte_unicode_1k {}", gte_unicode_1k);
println!("total_width: {}", total_width);
println!("");
*/
total_width <= 10 && // must be at most 10 character face
(gte_bytes2 > 0 || gte_width2 > 0
|| zero_width > 0 || gte_unicode_1k > 0
|| total_bytes > total_width
|| !is_expression(ch)
)
}
fn calc_dimension(s: &str) -> (usize, usize) {
let mut longest = 0;
for line in s.lines(){
let line_width = line.width();
if line_width > longest{
longest = line_width
}
}
let line_count = s.lines().count();
(longest, line_count)
}
/// return an SVG document base from the text infor string
pub fn to_svg(s: &str, text_width: f32, text_height: f32) -> SVG {
let settings = &Settings{
text_width: text_width,
text_height: text_height,
};
let mut svg = SVG::new()
.set("font-size", 14)
.set("font-family", "arial");
svg.append(get_styles());
let nodes = to_svg_lines(s,settings);
for elm in nodes{
let text_node = TextNode::new(elm.to_string());
svg.append(text_node);
}
let (offsetx, offsety) = settings.offset();
let (wide, high) = calc_dimension(s);
let width = wide as f32 * text_width + offsetx;
let height = (high + 2 ) as f32 * text_height + offsety;
svg.assign("width", width);
svg.assign("height", height);
svg
}
fn get_styles() -> Style {
let style = r#"
line, path {
stroke: black;
stroke-width: 2;
stroke-opacity: 1;
fill-opacity: 1;
stroke-linecap: round;
stroke-linejoin: miter;
}
circle {
stroke: black;
stroke-width: 2;
stroke-opacity: 1;
fill-opacity: 1;
stroke-linecap: round;
stroke-linejoin: miter;
fill:white;
}
circle.donger{
stroke-width: 1;
fill: white;
}
tspan.head{
fill: none;
stroke: none;
}
"#;
Style::new(style)
}
/// process and parses each line
fn to_svg_lines(s: &str, settings: &Settings) -> Vec<Box<Node>> {
let mut elements = vec![];
let mut y = 0;
for line in s.lines(){
let line_elm = get_svg_elements(y, line, settings);
elements.extend(line_elm);
y += 1;
}
elements
}
/// process only 1 line
fn get_svg_elements(y: usize, s: &str, settings: &Settings) -> Vec<Box<Node>> {
let body = parse_memes(s);
body.get_svg_elements(y, &settings)
}
/// return the SVG nodes per line and all the assembled rest of the string that is not a part of the memes
pub fn get_meme_svg(input: &str, text_width: f32, text_height: f32) -> (Vec<Box<Node>>, String, Style) {
let settings = &Settings{
text_width: text_width,
text_height: text_height,
};
let mut svg_elements:Vec<Box<Node + 'static>> = vec![];
let mut relines = String::new();
let mut y = 0;
for line in input.lines(){
match line_to_svg_with_excess_str(y, line, settings){
Some((svg_elm, rest_text)) => {
relines.push_str(&rest_text);
relines.push('\n');
svg_elements.extend(svg_elm);
},
None => {
relines.push_str(line);
relines.push('\n');
}
}
y += 1;
}
(svg_elements, relines, get_styles())
}
/// parse the memes and return the svg together with the unmatched strings
fn line_to_svg_with_excess_str(y: usize, s: &str, settings:&Settings) -> Option<(Vec<Box<Node>>, String)>{
let body = parse_memes(s);
if body.has_memes(){
let nodes = body.get_svg_elements(y, settings);
Some((nodes, body.unify_rest_text()))
}else{
None
}
}
#[test]
fn test_1line(){
let meme = "";
let nodes = get_svg_elements(0, meme, &Settings::default());
assert_eq!(nodes.len(), 0);
}
/// TODO: include parsing the rest of the unused text
fn parse_memes(s: &str) -> Body{
let mut memes = vec![];
let mut paren_opened = false;
let mut meme_face = String::new();
let mut index = 0;
let mut total_width = 0;
let mut face_markers:Vec<Head> = vec![];
let mut startx = 0;
let mut start_position = 0;
let mut meme_start = 0;
let mut meme_body = String::new();
let mut meme_left_side = String::new();
let mut meme_right_side = String::new();
let mut meme_head = None;
let total_chars = s.chars().count();
let mut rest_text:Vec<(usize, String)> = vec![];
for ch in s.chars(){
let last_char = index == total_chars - 1;
if meme_head.is_some(){
meme_right_side.push(ch);
}
if paren_opened && ch == ')'{ //if paren_opened and encountered a closing
paren_opened = false;
if is_meme(&meme_face){
let head = Head{
start_position: start_position,
startx: startx,
face: meme_face.clone(),
end_position: index,
endx: total_width,
};
meme_head = Some(head.clone());
face_markers.push(head);
meme_face.clear();
}
}
if paren_opened{
meme_face.push(ch);
}
if ch == '('{
paren_opened = true;
startx = total_width;
start_position = index;
meme_left_side = meme_body.clone();
meme_face.clear();
}
if meme_head.is_none() && (ch == ' ' || last_char){
meme_start = index + 1;
if !paren_opened{
let mut rest_word = meme_body.clone();
let rest_start = total_width - rest_word.width();
if last_char{
rest_word.push(ch);
rest_word.push_str(&meme_face);//the head is unmatched
}
rest_text.push((rest_start, rest_word));
}
meme_body.clear();
}
if meme_head.is_some() && (ch == ' ' || last_char){
let meme = Meme{
start_position: meme_start,
head: meme_head.clone().unwrap(),
end_position: index,
left_side: meme_left_side.clone(),
right_side: meme_right_side.clone(),
};
memes.push(meme);
meme_right_side.clear();
meme_left_side.clear();
meme_body.clear();
meme_head = None;
}
meme_body.push(ch);
if let Some(uw) = ch.width(){
total_width += uw;
}
index += 1;
}
Body{
memes: memes,
rest_str: regroup_rest_text(&rest_text) | }
}
fn regroup_rest_text(rest_text: &Vec<(usize, String)>)->Vec<(usize, String)>{ | random_line_split |
|
utils.py | 角坐标] 格式
pred:网络输出,tensor
anchors: 是一个list。表示锚框的大小。
YOLOv2官方配置文件中,anchors = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],
表示有5个锚框,第一个锚框大小[w, h]是[0.57273, 0.677385],第5个锚框大小是[9.77052, 9.16828]
锚框的大小都是表示在特征图13x13中的大小
num_classes:类别数
返回预测框pred_box, 最终输出数据保存在pred_box中,其形状是[N, num_anchors, 4, H, W,],4表示4个位置坐标
"""
num_rows = pred.shape[-2]
num_cols = pred.shape[-1]
num_anchors = len(anchors) // 2
# pred的形状是[batchsize, C, H, W],其中C = num_anchors * (5 + num_classes)
# 对pred进行reshape
pred = pred.reshape([-1, num_anchors, 5 + num_classes, num_rows, num_cols])
pred = paddle.transpose(pred, perm=[0, 3, 4, 1, 2])
# 取出与位置相关的数据
pred_location = pred[:, :, :, :, 0:4]
anchors_this = []
for ind in range(num_anchors):
anchors_this.append([anchors[ind * 2], anchors[ind * 2 + 1]])
# anchors_this = np.array(anchors_this).astype('float32')
anchors_this = paddle.to_tensor(anchors_this)
pred_box = paddle.zeros(pred_location.shape)
# for b in range(batchsize):
for i in range(num_rows):
for j in range(num_cols):
for k in range(num_anchors):
pred_box[:, i, j, k, 0] = j # 列
pred_box[:, i, j, k, 1] = i # 行
pred_box[:, i, j, k, 2] = anchors_this[k][0] # 先验框宽
pred_box[:, i, j, k, 3] = anchors_this[k][1] # 先验框高
# 这里使用相对坐标,pred_box的输出元素数值在0.~1.0之间, 相对于特征图大小的相对值
pred_box[:, :, :, :, 0] = (paddle.nn.functional.sigmoid(pred_location[:, :, :, :, 0]) + pred_box[:, :, :, :, 0]) / num_cols
pred_box[:, :, :, :, 1] = (paddle.nn.functional.sigmoid(pred_location[:, :, :, :, 1]) + pred_box[:, :, :, :, 1]) / num_rows
pred_box[:, :, :, :, 2] = paddle.exp(pred_location[:, :, :, :, 2]) * pred_box[:, :, :, :, 2] / num_cols
pred_box[:, :, :, :, 3] = paddle.exp(pred_location[:, :, :, :, 3]) * pred_box[:, :, :, :, 3] / num_rows
# 将坐标从xywh转化成xyxy,也就是 [左上角坐标,右上角坐标] 格式
pred_box[:, :, :, :, 0] = pred_box[:, :, :, :, 0] - pred_box[:, :, :, :, 2] / 2.
pred_box[:, :, :, :, 1] = pred_box[:, :, :, :, 1] - pred_box[:, :, :, :, 3] / 2.
pred_box[:, :, :, :, 2] = pred_box[:, :, :, :, 0] + pred_box[:, :, :, :, 2]
pred_box[:, :, :, :, 3] = pred_box[:, :, :, :, 1] + pred_box[:, :, :, :, 3]
return pred_box
# 获取标签
def get_label(pred, gt_bboxs, anchors, iou_threshold, step_less_12800, num_classes=6, rescore=False):
'''
pred:网络输出
gt_bboxs: 真实框信息,[class,x,y,w,h],其中x,y,w,h为归一化后的数据
anchors: 是一个list。表示锚框的大小。
YOLOv2官方配置文件中,anchors = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],(coco)
表示有5个锚框,第一个锚框大小[w, h]是[0.57273, 0.677385],第5个锚框大小是[9.77052, 9.16828]
锚框的大小都是表示在特征图13x13中的大小
step_less_12800:训练步数是否小于12800,bool
num_classes:类别数
返回:
label_objectness_confidence,label_location,label_classification,scale_location,object_mask,noobject_mask
'''
batchsize, _, h, w = pred.shape # h = w = 13 特征图大小13x13
_, nums, c = gt_bboxs.shape # nums 表示一张图中最多只能由nums个目标,c = 5
num_anchors = len(anchors) // 2 # num_anchors = 5
pred_box = get_yolo_box_xxyy(pred, anchors, num_classes) # 获取预测框,此时预测框中的坐标格式为(左上角坐标,右上角坐标)
# 形状为[batchsize, 13, 13, num_anchors, 4]
pred_box = pred_box.numpy()
gt_bboxs = gt_bboxs.numpy() # shape = (batchsize, nums, 5)
anchors_copy = np.array(anchors).reshape((num_anchors, 2)) # shape = (num_anchors,2)
anchors_copy = np.expand_dims(anchors_copy, 0).repeat(batchsize, axis=0) # shape = (batchsize, num_anchors,2)
# print(anchors_copy.shape)
# print(anchors_copy)
label_objectness_confidence = np.zeros(shape=(batchsize, h, w, num_anchors), dtype='float32')
label_location = np.zeros(shape=(batchsize, h, w, num_anchors, 4), dtype='float32')
label_classification = np.zeros(shape=(batchsize, h, w, num_anchors, num_classes), dtype='float32')
scale_location = 0.01 * np.ones((batchsize, h, w, num_anchors), dtype='float32') # 与位置损失相关的权重系数
object_mask = np.zeros(shape=(batchsize, h, w, num_anchors), dtype='float32') # 有目标掩码
noobject_mask = np.ones(shape=(batchsize, h, w, num_anchors), dtype='float32') # 无目标掩码
# 对于不负责预测目标的预测框,如果其与真实框的IOU大于iou_threshold(默认0.6),此预测框不参与任何损失计算
iou_above_thresh_indices = np.zeros((batchsize, h, w, num_anchors))
# 训练步数小于12800时,需要计算预测框与先验框的位置损失
if (step_less_12800):
label_location[:, :, :, :, 0] = 0.5
label_location[:, :, :, :, 1] = 0.5
gt_cls = gt_bboxs[:, :, 0].astype(np.int32) # shape = (batchsize , nums,)
gt_center_x = gt_bboxs[:, :, 1] # shape = (batchsize * nums)
gt_center_y = gt_bboxs[:, :, 2]
gt_w = gt_bboxs[:, :, 3] # shape = (batchsize , nums,)
| gt_h = gt_bboxs[:, :, 4]
gtx_min = gt_center_x - gt_w / 2.0
gtx_max = gt_center_x + gt_w / 2.0
gty_min = gt_center_y - gt_h / 2.0
gty_max = gt_center_y + gt_h / 2.0
| random_line_split |
|
utils.py | ]
gt_bbox[i, 2] = bbox[i*5+2]
gt_bbox[i, 3] = bbox[i*5+3]
gt_bbox[i, 4] = bbox[i*5+4]
if i >= max_num:
break
return gt_bbox
def calculate_iou(bbox1,bbox2):
"""计算bbox1=(x1,y1,x2,y2)和bbox2=(x3,y3,x4,y4)两个bbox的iou"""
intersect_bbox = [0., 0., 0., 0.] # bbox1和bbox2的交集
if bbox1[2]<bbox2[0] or bbox1[0]>bbox2[2] or bbox1[3]<bbox2[1] or bbox1[1]>bbox2[3]:
pass
else:
intersect_bbox[0] = max(bbox1[0],bbox2[0])
intersect_bbox[1] = max(bbox1[1],bbox2[1])
intersect_bbox[2] = min(bbox1[2],bbox2[2])
intersect_bbox[3] = min(bbox1[3],bbox2[3])
area1 = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1]) # bbox1面积
area2 = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1]) # bbox2面积
area_intersect = (intersect_bbox[2] - intersect_bbox[0]) * (intersect_bbox[3] - intersect_bbox[1]) # 交集面积
if area_intersect>0:
return area_intersect / (area1 + area2 - area_intersect) # 计算iou
else:
return 0
# 将网络输出的[tx, ty, th, tw]转化成预测框的坐标[x1, y1, x2, y2]
def get_yolo_box_xxyy(pred, anchors, num_classes):
"""
将网络输出的[tx, ty, th, tw]转化成预测框的坐标[x1, y1, x2, y2],也就是 [左上角坐标,右上角坐标] 格式
pred:网络输出,tensor
anchors: 是一个list。表示锚框的大小。
YOLOv2官方配置文件中,anchors = [0.57273, 0. | 85, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],
表示有5个锚框,第一个锚框大小[w, h]是[0.57273, 0.677385],第5个锚框大小是[9.77052, 9.16828]
锚框的大小都是表示在特征图13x13中的大小
num_classes:类别数
返回预测框pred_box, 最终输出数据保存在pred_box中,其形状是[N, num_anchors, 4, H, W,],4表示4个位置坐标
"""
num_rows = pred.shape[-2]
num_cols = pred.shape[-1]
num_anchors = len(anchors) // 2
# pred的形状是[batchsize, C, H, W],其中C = num_anchors * (5 + num_classes)
# 对pred进行reshape
pred = pred.reshape([-1, num_anchors, 5 + num_classes, num_rows, num_cols])
pred = paddle.transpose(pred, perm=[0, 3, 4, 1, 2])
# 取出与位置相关的数据
pred_location = pred[:, :, :, :, 0:4]
anchors_this = []
for ind in range(num_anchors):
anchors_this.append([anchors[ind * 2], anchors[ind * 2 + 1]])
# anchors_this = np.array(anchors_this).astype('float32')
anchors_this = paddle.to_tensor(anchors_this)
pred_box = paddle.zeros(pred_location.shape)
# for b in range(batchsize):
for i in range(num_rows):
for j in range(num_cols):
for k in range(num_anchors):
pred_box[:, i, j, k, 0] = j # 列
pred_box[:, i, j, k, 1] = i # 行
pred_box[:, i, j, k, 2] = anchors_this[k][0] # 先验框宽
pred_box[:, i, j, k, 3] = anchors_this[k][1] # 先验框高
# 这里使用相对坐标,pred_box的输出元素数值在0.~1.0之间, 相对于特征图大小的相对值
pred_box[:, :, :, :, 0] = (paddle.nn.functional.sigmoid(pred_location[:, :, :, :, 0]) + pred_box[:, :, :, :, 0]) / num_cols
pred_box[:, :, :, :, 1] = (paddle.nn.functional.sigmoid(pred_location[:, :, :, :, 1]) + pred_box[:, :, :, :, 1]) / num_rows
pred_box[:, :, :, :, 2] = paddle.exp(pred_location[:, :, :, :, 2]) * pred_box[:, :, :, :, 2] / num_cols
pred_box[:, :, :, :, 3] = paddle.exp(pred_location[:, :, :, :, 3]) * pred_box[:, :, :, :, 3] / num_rows
# 将坐标从xywh转化成xyxy,也就是 [左上角坐标,右上角坐标] 格式
pred_box[:, :, :, :, 0] = pred_box[:, :, :, :, 0] - pred_box[:, :, :, :, 2] / 2.
pred_box[:, :, :, :, 1] = pred_box[:, :, :, :, 1] - pred_box[:, :, :, :, 3] / 2.
pred_box[:, :, :, :, 2] = pred_box[:, :, :, :, 0] + pred_box[:, :, :, :, 2]
pred_box[:, :, :, :, 3] = pred_box[:, :, :, :, 1] + pred_box[:, :, :, :, 3]
return pred_box
# 获取标签
def get_label(pred, gt_bboxs, anchors, iou_threshold, step_less_12800, num_classes=6, rescore=False):
'''
pred:网络输出
gt_bboxs: 真实框信息,[class,x,y,w,h],其中x,y,w,h为归一化后的数据
anchors: 是一个list。表示锚框的大小。
YOLOv2官方配置文件中,anchors = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],(coco)
表示有5个锚框,第一个锚框大小[w, h]是[0.57273, 0.677385],第5个锚框大小是[9.77052, 9.16828]
锚框的大小都是表示在特征图13x13中的大小
step_less_12800:训练步数是否小于12800,bool
num_classes:类别数
返回:
label_objectness_confidence,label_location,label_classification,scale_location,object_mask,noobject_mask
'''
batchsize, _, h, w = pred.shape # h = w = 13 特征图大小13x13
_, nums, c = gt_bboxs.shape # nums 表示一张图中最多只能由nums个目标,c = 5
num_anchors = len(anchors) // 2 # num_anchors = 5
pred_box = get_yolo_box_xxyy(pred, anchors, num_classes) # 获取预测框,此时预测框中的坐标格式为(左上角坐标,右上角坐标)
# 形状为[batchsize, 13, 13, num_anchors, 4]
pred_box = pred_box.numpy()
gt_bboxs = gt_bboxs.numpy() # shape = (batchsize, nums, 5)
anchors_copy = np.array(anchors).reshape((num_anchors, 2)) # shape = (num_anchors,2)
anchors_copy = np.expand_dims(anchors_copy, 0).repeat(batchsize, axis=0) # shape = (batchsize, num_anchors,2)
# print(anchors_copy.shape)
# | 6773 | conditional_block |
utils.py | gt_bbox[i, 2] = bbox[i*5+2]
gt_bbox[i, 3] = bbox[i*5+3]
gt_bbox[i, 4] = bbox[i*5+4]
if i >= max_num:
break
return gt_bbox
def calculate_iou(bbox1,bbox2):
"""计算bbox1=(x1,y1,x2,y2)和bbox2=(x3,y3,x4,y4)两个bbox的iou"""
intersect_bbox = [0., 0., 0., 0.] # bbox1和bbox2的交集
if bbox1[2]<bbox2[0] or bbox1[0]>bbox2[2] or bbox1[3]<bbox2[1] or bbox1[1]>bbox2[3]:
pass
else:
intersect_bbox[0] = max(bbox1[0],bbox2[0])
intersect_bbox[1] = max(bbox1[1],bbox2[1])
intersect_bbox[2] = min(bbox1[2],bbox2[2])
intersect_bbox[3] = min(bbox1[3],bbox2[3])
area1 = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1]) # bbox1面积
area2 = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1]) # bbox2面积
area_intersect = (intersect_bbox[2] - intersect_bbox[0]) * (intersect_bbox[3] - intersect_bbox[1]) # 交集面积
if area_intersect>0:
return area_intersect / (area1 + area2 - area_intersect) # 计算iou
else:
return 0
# 将网络输出的[tx, ty, th, tw]转化成预测框的坐标[x1, y1, x2, y2]
def get_yolo_box_xxyy(pred, anchors, num_classes):
"""
将网络输出的[tx, ty, th, tw]转化成预测框的坐标[x1, y1, x2, y2],也就是 [左上角坐标,右上角坐标] 格式
pred:网络输出,tensor
anchors: 是一个list。表示锚框的大小。
YOLOv2官方配置文件中,anchors = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],
表示有5个锚框,第一个锚框大小[w, h]是[0.57273, 0.677385],第5个锚框大小是[9.77052, 9.16828]
锚框的大小都是表示在特征图13x13中的大小
num_classes:类别数
返回预测框pred_box, 最终输出数据保存在pred_box中,其形状是[N, num_anchors, 4, H, W,],4表示4个位置坐标
"""
num_rows = pred.shape[-2]
num_cols = pred.shape[-1]
num_anchors = len(anchors) // 2
# pred的形状是[batchsize, C, H, W],其中C = num_anchors * (5 + num_classes)
# 对pred进行reshape
pred = pred.reshape([-1, num_anchors, 5 + num_classes, num_rows, num_cols])
pred = paddle.transpose(pred, perm=[0, 3, 4, 1, 2])
# 取出与位置相关的数据
pred_location = pred[:, :, :, :, 0:4]
anchors_this = []
for ind in range(num_anchors):
| # 将坐标从xywh转化成xyxy,也就是 [左上角坐标,右上角坐标] 格式
pred_box[:, :, :, :, 0] = pred_box[:, :, :, :, 0] - pred_box[:, :, :, :, 2] / 2.
pred_box[:, :, :, :, 1] = pred_box[:, :, :, :, 1] - pred_box[:, :, :, :, 3] / 2.
pred_box[:, :, :, :, 2] = pred_box[:, :, :, :, 0] + pred_box[:, :, :, :, 2]
pred_box[:, :, :, :, 3] = pred_box[:, :, :, :, 1] + pred_box[:, :, :, :, 3]
return pred_box
# 获取标签
def get_label(pred, gt_bboxs, anchors, iou_threshold, step_less_12800, num_classes=6, rescore=False):
'''
pred:网络输出
gt_bboxs: 真实框信息,[class,x,y,w,h],其中x,y,w,h为归一化后的数据
anchors: 是一个list。表示锚框的大小。
YOLOv2官方配置文件中,anchors = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],(coco)
表示有5个锚框,第一个锚框大小[w, h]是[0.57273, 0.677385],第5个锚框大小是[9.77052, 9.16828]
锚框的大小都是表示在特征图13x13中的大小
step_less_12800:训练步数是否小于12800,bool
num_classes:类别数
返回:
label_objectness_confidence,label_location,label_classification,scale_location,object_mask,noobject_mask
'''
batchsize, _, h, w = pred.shape # h = w = 13 特征图大小13x13
_, nums, c = gt_bboxs.shape # nums 表示一张图中最多只能由nums个目标,c = 5
num_anchors = len(anchors) // 2 # num_anchors = 5
pred_box = get_yolo_box_xxyy(pred, anchors, num_classes) # 获取预测框,此时预测框中的坐标格式为(左上角坐标,右上角坐标)
# 形状为[batchsize, 13, 13, num_anchors, 4]
pred_box = pred_box.numpy()
gt_bboxs = gt_bboxs.numpy() # shape = (batchsize, nums, 5)
anchors_copy = np.array(anchors).reshape((num_anchors, 2)) # shape = (num_anchors,2)
anchors_copy = np.expand_dims(anchors_copy, 0).repeat(batchsize, axis=0) # shape = (batchsize, num_anchors,2)
# print(anchors_copy.shape)
| anchors_this.append([anchors[ind * 2], anchors[ind * 2 + 1]])
# anchors_this = np.array(anchors_this).astype('float32')
anchors_this = paddle.to_tensor(anchors_this)
pred_box = paddle.zeros(pred_location.shape)
# for b in range(batchsize):
for i in range(num_rows):
for j in range(num_cols):
for k in range(num_anchors):
pred_box[:, i, j, k, 0] = j # 列
pred_box[:, i, j, k, 1] = i # 行
pred_box[:, i, j, k, 2] = anchors_this[k][0] # 先验框宽
pred_box[:, i, j, k, 3] = anchors_this[k][1] # 先验框高
# 这里使用相对坐标,pred_box的输出元素数值在0.~1.0之间, 相对于特征图大小的相对值
pred_box[:, :, :, :, 0] = (paddle.nn.functional.sigmoid(pred_location[:, :, :, :, 0]) + pred_box[:, :, :, :, 0]) / num_cols
pred_box[:, :, :, :, 1] = (paddle.nn.functional.sigmoid(pred_location[:, :, :, :, 1]) + pred_box[:, :, :, :, 1]) / num_rows
pred_box[:, :, :, :, 2] = paddle.exp(pred_location[:, :, :, :, 2]) * pred_box[:, :, :, :, 2] / num_cols
pred_box[:, :, :, :, 3] = paddle.exp(pred_location[:, :, :, :, 3]) * pred_box[:, :, :, :, 3] / num_rows
| identifier_body |
utils.py | max = np.float64(xmax)
ymax = np.float64(ymax)
if xmax == xmin or ymax == ymin:
print(xml_file)
dataset.append([xmax - xmin, ymax - ymin]) # 宽与高的相对值
return np.array(dataset) # 转为numpy数组
def bbox2tensor(bbox,max_num=30):
'''
bbox:标签信息。信息格式为[cls,x,y,w,h, cls,x,y,w,h, cls,x,y,w,h] 每5个元素为一组标签信息
max_num: 一张图片中最大的目标数,默认最多只能有30个物体
返回标签信息,tensor
'''
gt_bbox = paddle.zeros(shape=[max_num, 5], dtype='float32')
for i in range(len(bbox)//5):
gt_bbox[i, 0] = bbox[i*5]
gt_bbox[i, 1] = bbox[i*5+1]
gt_bbox[i, 2] = bbox[i*5+2]
gt_bbox[i, 3] = bbox[i*5+3]
gt_bbox[i, 4] = bbox[i*5+4]
if i >= max_num:
break
return gt_bbox
def calculate_iou(bbox1,bbox2):
"""计算bbox1=(x1,y1,x2,y2)和bbox2=(x3,y3,x4,y4)两个bbox的iou"""
intersect_bbox = [0., 0., 0., 0.] # bbox1和bbox2的交集
if bbox1[2]<bbox2[0] or bbox1[0]>bbox2[2] or bbox1[3]<bbox2[1] or bbox1[1]>bbox2[3]:
pass
else:
intersect_bbox[0] = max(bbox1[0],bbox2[0])
intersect_bbox[1] = max(bbox1[1],bbox2[1])
intersect_bbox[2] = min(bbox1[2],bbox2[2])
intersect_bbox[3] = min(bbox1[3],bbox2[3])
area1 = (bbox1[2] - bbox1[0]) * (bbox1[3] - bbox1[1]) # bbox1面积
area2 = (bbox2[2] - bbox2[0]) * (bbox2[3] - bbox2[1]) # bbox2面积
area_intersect = (intersect_bbox[2] - intersect_bbox[0]) * (intersect_bbox[3] - intersect_bbox[1]) # 交集面积
if area_intersect>0:
return area_intersect / (area1 + area2 - area_intersect) # 计算iou
else:
return 0
# 将网络输出的[tx, ty, th, tw]转化成预测框的坐标[x1, y1, x2, y2]
def get_yolo_box_xxyy(pred, anchors, num_classes):
"""
将网络输出的[tx, ty, th, tw]转化成预测框的坐标[x1, y1, x2, y2],也就是 [左上角坐标,右上角坐标] 格式
pred:网络输出,tensor
anchors: 是一个list。表示锚框的大小。
YOLOv2官方配置文件中,anchors = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],
表示有5个锚框,第一个锚框大小[w, h]是[0.57273, 0.677385],第5个锚框大小是[9.77052, 9.16828]
锚框的大小都是表示在特征图13x13中的大小
num_classes:类别数
返回预测框pred_box, 最终输出数据保存在pred_box中,其形状是[N, num_anchors, 4, H, W,],4表示4个位置坐标
"""
num_rows = pred.shape[-2]
num_cols = pred.shape[-1]
num_anchors = len(anchors) // 2
# pred的形状是[batchsize, C, H, W],其中C = num_anchors * (5 + num_classes)
# 对pred进行reshape
pred = pred.reshape([-1, num_anchors, 5 + num_classes, num_rows, num_cols])
pred = paddle.transpose(pred, perm=[0, 3, 4, 1, 2])
# 取出与位置相关的数据
pred_location = pred[:, :, :, :, 0:4]
anchors_this = []
for ind in range(num_anchors):
anchors_this.append([anchors[ind * 2], anchors[ind * 2 + 1]])
# anchors_this = np.array(anchors_this).astype('float32')
anchors_this = paddle.to_tensor(anchors_this)
pred_box = paddle.zeros(pred_location.shape)
# for b in range(batchsize):
for i in range(num_rows):
for j in range(num_cols):
for k in range(num_anchors):
pred_box[:, i, j, k, 0] = j # 列
pred_box[:, i, j, k, 1] = i # 行
pred_box[:, i, j, k, 2] = anchors_this[k][0] # 先验框宽
pred_box[:, i, j, k, 3] = anchors_this[k][1] # 先验框高
# 这里使用相对坐标,pred_box的输出元素数值在0.~1.0之间, 相对于特征图大小的相对值
pred_box[:, :, :, :, 0] = (paddle.nn.functional.sigmoid(pred_location[:, :, :, :, 0]) + pred_box[:, :, :, :, 0]) / num_cols
pred_box[:, :, :, :, 1] = (paddle.nn.functional.sigmoid(pred_location[:, :, :, :, 1]) + pred_box[:, :, :, :, 1]) / num_rows
pred_box[:, :, :, :, 2] = paddle.exp(pred_location[:, :, :, :, 2]) * pred_box[:, :, :, :, 2] / num_cols
pred_box[:, :, :, :, 3] = paddle.exp(pred_location[:, :, :, :, 3]) * pred_box[:, :, :, :, 3] / num_rows
# 将坐标从xywh转化成xyxy,也就是 [左上角坐标,右上角坐标] 格式
pred_box[:, :, :, :, 0] = pred_box[:, :, :, :, 0] - pred_box[:, :, :, :, 2] / 2.
pred_box[:, :, :, :, 1] = pred_box[:, :, :, :, 1] - pred_box[:, :, :, :, 3] / 2.
pred_box[:, :, :, :, 2] = pred_box[:, :, :, :, 0] + pred_box[:, :, :, :, 2]
pred_box[:, :, :, :, 3] = pred_box[:, :, :, :, 1] + pred_box[:, :, :, :, 3]
return pred_box
# 获取标签
def get_label(pred, gt_bboxs, anchors, iou_threshold, step_less_12800, num_classes=6, rescore=False):
'''
pred:网络输出
gt_bboxs: 真实框信息,[class,x,y,w,h],其中x,y,w,h为归一化后的数据
anchors: 是一个list。表示锚框的大小。
YOLOv2官方配置文件中,anchors = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],(coco)
表示有5个锚框,第一个锚框大小[w, h]是[0.57273, 0.677385],第5个锚框大小是[9.77052, 9.16828]
锚框的大小都是表示在特征图13x13中的大小
step_less_12800:训练步数是否小于12800,bool
num_classes:类别数
返回:
label_objectness_confidence,label_location,label_classification,scale_location,object_mask,noobject_mask
'''
batchsize, _, h, w = pred.shape # h = w = 13 特征图大小13x13
_, nums, c = gt_bboxs.shape # nums � | x | identifier_name |
|
td_learning.py |
alpha_fig3 = 0.001
color_fig3 = '#da008a'
# Parameters for generating figure 4
training_sets_fig4 = 100
training_sequences_fig4 = 10
alphas_fig4 = np.array(range(70)) / 100
lambdas_fig4 = [0, 0.3, 0.8, 1.0]
colors_fig4 = ['#da008a', '#7400e3', '#009bdf', '#c5c23d']
# Parameters for generating figure 5
training_sets_fig5 = 100
training_sequences_fig5 = 10
alphas_fig5 = np.array(range(71)) / 100
lambdas_fig5 = np.array(range(51)) / 50
color_fig5 = '#00ddac'
# Font sizes for all plots
title_size = 24
label_size = 18
tick_size = 16
legend_size = 16
# Define functions for generating plot data
#-----------------------------------------------------------------------------------------------
def ta | um_steps, start_step, seq_per_set, num_sets, seed=-1):
"""
Create a list of lists of training sequences for random walks.
:param num_steps: The number of steps in the random walk
:param start_step: The starting step of the sequences. -1 for random
:param seq_per_set: Number of training sequences in each training set
:param num_sets: Number of training sets
:param seed: The random seed to use for generating steps. Use -1 for no seed
:return training: Training data. Access a sequence (matrix) with training[set][seq]
"""
# Set the random seed, if supplied
if seed > 0:
np.random.seed(seed)
# Preallocate the entire training data list of lists of NumPy arrays
training = num_sets * [seq_per_set * [None]]
# Iterate to build the training data randomly
for this_set in range(num_sets): # Each set
for seq in range(seq_per_set): # Each sequence
if start_step == -1: # Random start location
start_step = np.random.randint(1, num_steps)
# Initialize the sequence
step = start_step
sequence = np.zeros(num_steps).astype(int)
sequence[step] = 1
while (step != 0 and step != num_steps - 1): # While not in absorbing state
if np.random.uniform() >= 0.5: # Uniformly random L v R step
step += 1 # Go right
else:
step -= 1 # Go left
# Generate the vector representing this step
this_sequence = np.zeros(num_steps).astype(int)
# Set the appropriate element to 1
this_sequence[step] = 1
# Add this step to the sequence
sequence = np.vstack((sequence, this_sequence))
# Assign the sequence to its position in the training data
training[this_set][seq] = sequence
return training
#-----------------------------------------------------------------------------------------------
def learn_game(t_seq, lambda_val, alpha, z_vals, p, verbose=False):
"""
Given a set of training data, perform repeated weight updates as in eq 4 in Sutton (1988)
:param t_seq: The input training sequence
:param lambda_val: ...lambda.
:param alpha: Learning rate
:param z_vals: A tuple of the form (r for state 0, r for state[-1])
:param p: The weights coming in
:param verbose: Set to True to see values generated at each step
:return delta_w: A NumPy vector of weight values
"""
# Determine the number of steps taken in the sequence, by the number of rows
num_steps = t_seq.shape[0] - 1
# Number of non-terminal states
num_states = t_seq.shape[1] - 2
# Get the reward value
z = z_vals[t_seq[-1, -1]]
# Initialize the lambda sequence and weight updates
lambda_seq = np.ones(1)
just_one = np.ones(1)
delta_w = np.zeros(num_states)
# Chop off the reward step data
training = t_seq[:-1, 1:-1]
# Perform the weight updates
for step in range(num_steps):
these_steps = training[0:step + 1]
if verbose:
print('p =', p)
print('Training sequence:')
print(these_steps)
print('Lambda sequence:')
print(lambda_seq)
print('Lambda sequence * training sequence:')
print(np.sum(these_steps * lambda_seq[:, None], axis=0))
if step == num_steps - 1: # The step entering the absorbing state
if verbose: print("z =", z)
delta_p = z - np.dot(p, training[-1, :])
else: # Non-terminal state
delta_p = np.dot(p, training[step + 1, :]) - np.dot(p, training[step, :])
if verbose: print('delta_p =', delta_p)
delta_w += alpha * delta_p * np.sum(these_steps * lambda_seq[:, None], axis=0)
if verbose: print('delta_w =', delta_w)
lambda_seq = np.concatenate((lambda_seq * lambda_val, just_one))
return(delta_w)
# Experiment code
#-----------------------------------------------------------------------------------------------
if run_3: # Generate figure 3
# Generate a random walk
training = take_a_walk(7, 3, training_sequences_fig3, training_sets_fig3, 27)
# Setup initial RMSE vector
RMSE_vector = np.zeros(101)
for lambda_it in range(101):
# Reset weights and deltas
weights = 0.5 * np.ones(5)
deltas = np.zeros(5)
for iteration in range(iterations_fig3):
for tset in range(training_sets_fig3):
for tseq in range(training_sequences_fig3):
deltas += learn_game(t_seq=training[tset][tseq], lambda_val=lambda_it / 100,
alpha=alpha_fig3, z_vals=(0, 1), p=weights)
weights += deltas
deltas = np.zeros(5)
RMSE_vector[lambda_it] = np.sqrt(((weights - actual_values) ** 2).mean())
print(str(lambda_it) + '% done')
# Plot RMSE vs lambda
plt.plot(RMSE_vector, color=color_fig3)
plt.ylabel('RMSE', fontsize=label_size)
plt.xlabel('λ', fontsize=label_size)
plt.xticks([0, 20, 40, 60, 80, 100], ['0.0', '0.2', '0.4', '0.6', '0.8', '1.0'],
fontsize=tick_size)
plt.yticks(fontsize=tick_size)
plt.title('Replication of Figure 3 in Sutton (1988)', fontsize=title_size)
if run_4: # Generate figure 4
# Generate a random walk
training = take_a_walk(7, 3, training_sequences_fig4, training_sets_fig4, 27)
# Setup initial RMSE vector
RMSE_mat = np.zeros((len(lambdas_fig4), len(alphas_fig4)))
for ix, lambda_it in enumerate(lambdas_fig4): # Iterate over each lambda value
print('Generating data for lambda = ' + str(lambda_it))
for ii, alpha in enumerate(alphas_fig4): # Iterate over each alpha value
for tset in range(training_sets_fig4):
weights = 0.5 * np.ones(5) # Reset the weights
for tseq in range(training_sequences_fig4):
# Generate the weights
weights += learn_game(t_seq=training[tset][tseq], lambda_val=lambda_it,
alpha=alpha, z_vals=(0, 1), p=weights)
# Add the new RMSE to the RMSE matrix
RMSE_mat[ix, ii] += np.sqrt(((np.round(weights, decimals=20) -
actual_values) ** 2).mean())
# Divide the RMSE matrix by the number of training sets to get the average per-set error
RMSE_mat /= training_sets_fig4 * training_sequences_fig4
# Plot RMSE vs alpha for each lambda
for ix, lambda_pl in enumerate(lambdas_fig4):
plt.plot(alphas_fig4, RMSE_mat[ix, :], label='λ = ' + str(lambda_pl),
color=colors_fig4[ix])
# Format and show the plot
plt.xlabel('α', fontsize=label_size)
plt.ylabel('RMSE', fontsize=label_size)
plt.legend(loc='best', fontsize=legend_size)
plt.title('Replication of Figure 4 in Sutton (1988)', fontsize=title_size)
plt.xticks(fontsize=tick_size)
plt.yticks(fontsize=tick_size)
if run_5: # Generate figure 5
# Generate a random walk
training = take_a_walk(7, 3, training_sequences_fig5, training_sets_fig5, | ke_a_walk(n | identifier_name |
td_learning.py |
alpha_fig3 = 0.001
color_fig3 = '#da008a'
# Parameters for generating figure 4
training_sets_fig4 = 100
training_sequences_fig4 = 10
alphas_fig4 = np.array(range(70)) / 100
lambdas_fig4 = [0, 0.3, 0.8, 1.0]
colors_fig4 = ['#da008a', '#7400e3', '#009bdf', '#c5c23d']
# Parameters for generating figure 5
training_sets_fig5 = 100
training_sequences_fig5 = 10
alphas_fig5 = np.array(range(71)) / 100
lambdas_fig5 = np.array(range(51)) / 50
color_fig5 = '#00ddac'
# Font sizes for all plots
title_size = 24
label_size = 18
tick_size = 16
legend_size = 16
# Define functions for generating plot data
#-----------------------------------------------------------------------------------------------
def take_a_walk(num_steps, start_step, seq_per_set, num_sets, seed=-1):
"""
Create a list of lists of training sequences for random walks.
:param num_steps: The number of steps in the random walk
:param start_step: The starting step of the sequences. -1 for random
:param seq_per_set: Number of training sequences in each training set
:param num_sets: Number of training sets
:param seed: The random seed to use for generating steps. Use -1 for no seed
:return training: Training data. Access a sequence (matrix) with training[set][seq]
"""
# Set the random seed, if supplied
if seed > 0:
np.random.seed(seed)
# Preallocate the entire training data list of lists of NumPy arrays
training = num_sets * [seq_per_set * [None]]
# Iterate to build the training data randomly
for this_set in range(num_sets): # Each set
for seq in range(seq_per_set): # Each sequence
if start_step == -1: # Random start location
start_step = np.random.randint(1, num_steps)
# Initialize the sequence
step = start_step
sequence = np.zeros(num_steps).astype(int)
sequence[step] = 1
while (step != 0 and step != num_steps - 1): # While not in absorbing state
if np.random.uniform() >= 0.5: # Uniformly random L v R step
step += 1 # Go right
else:
step -= 1 # Go left
# Generate the vector representing this step
this_sequence = np.zeros(num_steps).astype(int)
# Set the appropriate element to 1
this_sequence[step] = 1
# Add this step to the sequence
sequence = np.vstack((sequence, this_sequence))
# Assign the sequence to its position in the training data
training[this_set][seq] = sequence
return training
#-----------------------------------------------------------------------------------------------
def learn_game(t_seq, lambda_val, alpha, z_vals, p, verbose=False):
"""
Given a set of training data, perform repeated weight updates as in eq 4 in Sutton (1988)
:param t_seq: The input training sequence
:param lambda_val: ...lambda.
:param alpha: Learning rate
:param z_vals: A tuple of the form (r for state 0, r for state[-1])
:param p: The weights coming in
:param verbose: Set to True to see values generated at each step
:return delta_w: A NumPy vector of weight values
"""
# Determine the number of steps taken in the sequence, by the number of rows
num_steps = t_seq.shape[0] - 1
# Number of non-terminal states
num_states = t_seq.shape[1] - 2
# Get the reward value
z = z_vals[t_seq[-1, -1]]
# Initialize the lambda sequence and weight updates
lambda_seq = np.ones(1)
just_one = np.ones(1)
delta_w = np.zeros(num_states)
# Chop off the reward step data
training = t_seq[:-1, 1:-1]
# Perform the weight updates
for step in range(num_steps):
these_steps = training[0:step + 1]
if verbose:
print('p =', p)
print('Training sequence:')
print(these_steps)
print('Lambda sequence:')
print(lambda_seq)
print('Lambda sequence * training sequence:')
print(np.sum(these_steps * lambda_seq[:, None], axis=0))
if step == num_steps - 1: # The step entering the absorbing state
if | else: # Non-terminal state
delta_p = np.dot(p, training[step + 1, :]) - np.dot(p, training[step, :])
if verbose: print('delta_p =', delta_p)
delta_w += alpha * delta_p * np.sum(these_steps * lambda_seq[:, None], axis=0)
if verbose: print('delta_w =', delta_w)
lambda_seq = np.concatenate((lambda_seq * lambda_val, just_one))
return(delta_w)
# Experiment code
#-----------------------------------------------------------------------------------------------
if run_3: # Generate figure 3
# Generate a random walk
training = take_a_walk(7, 3, training_sequences_fig3, training_sets_fig3, 27)
# Setup initial RMSE vector
RMSE_vector = np.zeros(101)
for lambda_it in range(101):
# Reset weights and deltas
weights = 0.5 * np.ones(5)
deltas = np.zeros(5)
for iteration in range(iterations_fig3):
for tset in range(training_sets_fig3):
for tseq in range(training_sequences_fig3):
deltas += learn_game(t_seq=training[tset][tseq], lambda_val=lambda_it / 100,
alpha=alpha_fig3, z_vals=(0, 1), p=weights)
weights += deltas
deltas = np.zeros(5)
RMSE_vector[lambda_it] = np.sqrt(((weights - actual_values) ** 2).mean())
print(str(lambda_it) + '% done')
# Plot RMSE vs lambda
plt.plot(RMSE_vector, color=color_fig3)
plt.ylabel('RMSE', fontsize=label_size)
plt.xlabel('λ', fontsize=label_size)
plt.xticks([0, 20, 40, 60, 80, 100], ['0.0', '0.2', '0.4', '0.6', '0.8', '1.0'],
fontsize=tick_size)
plt.yticks(fontsize=tick_size)
plt.title('Replication of Figure 3 in Sutton (1988)', fontsize=title_size)
if run_4: # Generate figure 4
# Generate a random walk
training = take_a_walk(7, 3, training_sequences_fig4, training_sets_fig4, 27)
# Setup initial RMSE vector
RMSE_mat = np.zeros((len(lambdas_fig4), len(alphas_fig4)))
for ix, lambda_it in enumerate(lambdas_fig4): # Iterate over each lambda value
print('Generating data for lambda = ' + str(lambda_it))
for ii, alpha in enumerate(alphas_fig4): # Iterate over each alpha value
for tset in range(training_sets_fig4):
weights = 0.5 * np.ones(5) # Reset the weights
for tseq in range(training_sequences_fig4):
# Generate the weights
weights += learn_game(t_seq=training[tset][tseq], lambda_val=lambda_it,
alpha=alpha, z_vals=(0, 1), p=weights)
# Add the new RMSE to the RMSE matrix
RMSE_mat[ix, ii] += np.sqrt(((np.round(weights, decimals=20) -
actual_values) ** 2).mean())
# Divide the RMSE matrix by the number of training sets to get the average per-set error
RMSE_mat /= training_sets_fig4 * training_sequences_fig4
# Plot RMSE vs alpha for each lambda
for ix, lambda_pl in enumerate(lambdas_fig4):
plt.plot(alphas_fig4, RMSE_mat[ix, :], label='λ = ' + str(lambda_pl),
color=colors_fig4[ix])
# Format and show the plot
plt.xlabel('α', fontsize=label_size)
plt.ylabel('RMSE', fontsize=label_size)
plt.legend(loc='best', fontsize=legend_size)
plt.title('Replication of Figure 4 in Sutton (1988)', fontsize=title_size)
plt.xticks(fontsize=tick_size)
plt.yticks(fontsize=tick_size)
if run_5: # Generate figure 5
# Generate a random walk
training = take_a_walk(7, 3, training_sequences_fig5, training_sets_fig5, 2 | verbose: print("z =", z)
delta_p = z - np.dot(p, training[-1, :])
| conditional_block |
td_learning.py | 0
alpha_fig3 = 0.001
color_fig3 = '#da008a'
# Parameters for generating figure 4
training_sets_fig4 = 100
training_sequences_fig4 = 10
alphas_fig4 = np.array(range(70)) / 100
lambdas_fig4 = [0, 0.3, 0.8, 1.0]
colors_fig4 = ['#da008a', '#7400e3', '#009bdf', '#c5c23d']
# Parameters for generating figure 5
training_sets_fig5 = 100
training_sequences_fig5 = 10
alphas_fig5 = np.array(range(71)) / 100
lambdas_fig5 = np.array(range(51)) / 50
color_fig5 = '#00ddac'
# Font sizes for all plots
title_size = 24
label_size = 18
tick_size = 16
legend_size = 16
# Define functions for generating plot data
#-----------------------------------------------------------------------------------------------
def take_a_walk(num_steps, start_step, seq_per_set, num_sets, seed=-1):
"""
Create a list of lists of training sequences for random walks.
:param num_steps: The number of steps in the random walk
:param start_step: The starting step of the sequences. -1 for random
:param seq_per_set: Number of training sequences in each training set
:param num_sets: Number of training sets
:param seed: The random seed to use for generating steps. Use -1 for no seed
:return training: Training data. Access a sequence (matrix) with training[set][seq]
"""
# Set the random seed, if supplied
if seed > 0:
np.random.seed(seed)
# Preallocate the entire training data list of lists of NumPy arrays
training = num_sets * [seq_per_set * [None]]
# Iterate to build the training data randomly
for this_set in range(num_sets): # Each set
for seq in range(seq_per_set): # Each sequence
if start_step == -1: # Random start location
start_step = np.random.randint(1, num_steps)
# Initialize the sequence
step = start_step
sequence = np.zeros(num_steps).astype(int)
sequence[step] = 1
while (step != 0 and step != num_steps - 1): # While not in absorbing state
if np.random.uniform() >= 0.5: # Uniformly random L v R step
step += 1 # Go right
else:
step -= 1 # Go left | this_sequence[step] = 1
# Add this step to the sequence
sequence = np.vstack((sequence, this_sequence))
# Assign the sequence to its position in the training data
training[this_set][seq] = sequence
return training
#-----------------------------------------------------------------------------------------------
def learn_game(t_seq, lambda_val, alpha, z_vals, p, verbose=False):
"""
Given a set of training data, perform repeated weight updates as in eq 4 in Sutton (1988)
:param t_seq: The input training sequence
:param lambda_val: ...lambda.
:param alpha: Learning rate
:param z_vals: A tuple of the form (r for state 0, r for state[-1])
:param p: The weights coming in
:param verbose: Set to True to see values generated at each step
:return delta_w: A NumPy vector of weight values
"""
# Determine the number of steps taken in the sequence, by the number of rows
num_steps = t_seq.shape[0] - 1
# Number of non-terminal states
num_states = t_seq.shape[1] - 2
# Get the reward value
z = z_vals[t_seq[-1, -1]]
# Initialize the lambda sequence and weight updates
lambda_seq = np.ones(1)
just_one = np.ones(1)
delta_w = np.zeros(num_states)
# Chop off the reward step data
training = t_seq[:-1, 1:-1]
# Perform the weight updates
for step in range(num_steps):
these_steps = training[0:step + 1]
if verbose:
print('p =', p)
print('Training sequence:')
print(these_steps)
print('Lambda sequence:')
print(lambda_seq)
print('Lambda sequence * training sequence:')
print(np.sum(these_steps * lambda_seq[:, None], axis=0))
if step == num_steps - 1: # The step entering the absorbing state
if verbose: print("z =", z)
delta_p = z - np.dot(p, training[-1, :])
else: # Non-terminal state
delta_p = np.dot(p, training[step + 1, :]) - np.dot(p, training[step, :])
if verbose: print('delta_p =', delta_p)
delta_w += alpha * delta_p * np.sum(these_steps * lambda_seq[:, None], axis=0)
if verbose: print('delta_w =', delta_w)
lambda_seq = np.concatenate((lambda_seq * lambda_val, just_one))
return(delta_w)
# Experiment code
#-----------------------------------------------------------------------------------------------
if run_3: # Generate figure 3
# Generate a random walk
training = take_a_walk(7, 3, training_sequences_fig3, training_sets_fig3, 27)
# Setup initial RMSE vector
RMSE_vector = np.zeros(101)
for lambda_it in range(101):
# Reset weights and deltas
weights = 0.5 * np.ones(5)
deltas = np.zeros(5)
for iteration in range(iterations_fig3):
for tset in range(training_sets_fig3):
for tseq in range(training_sequences_fig3):
deltas += learn_game(t_seq=training[tset][tseq], lambda_val=lambda_it / 100,
alpha=alpha_fig3, z_vals=(0, 1), p=weights)
weights += deltas
deltas = np.zeros(5)
RMSE_vector[lambda_it] = np.sqrt(((weights - actual_values) ** 2).mean())
print(str(lambda_it) + '% done')
# Plot RMSE vs lambda
plt.plot(RMSE_vector, color=color_fig3)
plt.ylabel('RMSE', fontsize=label_size)
plt.xlabel('λ', fontsize=label_size)
plt.xticks([0, 20, 40, 60, 80, 100], ['0.0', '0.2', '0.4', '0.6', '0.8', '1.0'],
fontsize=tick_size)
plt.yticks(fontsize=tick_size)
plt.title('Replication of Figure 3 in Sutton (1988)', fontsize=title_size)
if run_4: # Generate figure 4
# Generate a random walk
training = take_a_walk(7, 3, training_sequences_fig4, training_sets_fig4, 27)
# Setup initial RMSE vector
RMSE_mat = np.zeros((len(lambdas_fig4), len(alphas_fig4)))
for ix, lambda_it in enumerate(lambdas_fig4): # Iterate over each lambda value
print('Generating data for lambda = ' + str(lambda_it))
for ii, alpha in enumerate(alphas_fig4): # Iterate over each alpha value
for tset in range(training_sets_fig4):
weights = 0.5 * np.ones(5) # Reset the weights
for tseq in range(training_sequences_fig4):
# Generate the weights
weights += learn_game(t_seq=training[tset][tseq], lambda_val=lambda_it,
alpha=alpha, z_vals=(0, 1), p=weights)
# Add the new RMSE to the RMSE matrix
RMSE_mat[ix, ii] += np.sqrt(((np.round(weights, decimals=20) -
actual_values) ** 2).mean())
# Divide the RMSE matrix by the number of training sets to get the average per-set error
RMSE_mat /= training_sets_fig4 * training_sequences_fig4
# Plot RMSE vs alpha for each lambda
for ix, lambda_pl in enumerate(lambdas_fig4):
plt.plot(alphas_fig4, RMSE_mat[ix, :], label='λ = ' + str(lambda_pl),
color=colors_fig4[ix])
# Format and show the plot
plt.xlabel('α', fontsize=label_size)
plt.ylabel('RMSE', fontsize=label_size)
plt.legend(loc='best', fontsize=legend_size)
plt.title('Replication of Figure 4 in Sutton (1988)', fontsize=title_size)
plt.xticks(fontsize=tick_size)
plt.yticks(fontsize=tick_size)
if run_5: # Generate figure 5
# Generate a random walk
training = take_a_walk(7, 3, training_sequences_fig5, training_sets_fig5, 27)
| # Generate the vector representing this step
this_sequence = np.zeros(num_steps).astype(int)
# Set the appropriate element to 1 | random_line_split |
td_learning.py |
alpha_fig3 = 0.001
color_fig3 = '#da008a'
# Parameters for generating figure 4
training_sets_fig4 = 100
training_sequences_fig4 = 10
alphas_fig4 = np.array(range(70)) / 100
lambdas_fig4 = [0, 0.3, 0.8, 1.0]
colors_fig4 = ['#da008a', '#7400e3', '#009bdf', '#c5c23d']
# Parameters for generating figure 5
training_sets_fig5 = 100
training_sequences_fig5 = 10
alphas_fig5 = np.array(range(71)) / 100
lambdas_fig5 = np.array(range(51)) / 50
color_fig5 = '#00ddac'
# Font sizes for all plots
title_size = 24
label_size = 18
tick_size = 16
legend_size = 16
# Define functions for generating plot data
#-----------------------------------------------------------------------------------------------
def take_a_walk(num_steps, start_step, seq_per_set, num_sets, seed=-1):
"" | step = start_step
sequence = np.zeros(num_steps).astype(int)
sequence[step] = 1
while (step != 0 and step != num_steps - 1): # While not in absorbing state
if np.random.uniform() >= 0.5: # Uniformly random L v R step
step += 1 # Go right
else:
step -= 1 # Go left
# Generate the vector representing this step
this_sequence = np.zeros(num_steps).astype(int)
# Set the appropriate element to 1
this_sequence[step] = 1
# Add this step to the sequence
sequence = np.vstack((sequence, this_sequence))
# Assign the sequence to its position in the training data
training[this_set][seq] = sequence
return training
#-----------------------------------------------------------------------------------------------
def learn_game(t_seq, lambda_val, alpha, z_vals, p, verbose=False):
"""
Given a set of training data, perform repeated weight updates as in eq 4 in Sutton (1988)
:param t_seq: The input training sequence
:param lambda_val: ...lambda.
:param alpha: Learning rate
:param z_vals: A tuple of the form (r for state 0, r for state[-1])
:param p: The weights coming in
:param verbose: Set to True to see values generated at each step
:return delta_w: A NumPy vector of weight values
"""
# Determine the number of steps taken in the sequence, by the number of rows
num_steps = t_seq.shape[0] - 1
# Number of non-terminal states
num_states = t_seq.shape[1] - 2
# Get the reward value
z = z_vals[t_seq[-1, -1]]
# Initialize the lambda sequence and weight updates
lambda_seq = np.ones(1)
just_one = np.ones(1)
delta_w = np.zeros(num_states)
# Chop off the reward step data
training = t_seq[:-1, 1:-1]
# Perform the weight updates
for step in range(num_steps):
these_steps = training[0:step + 1]
if verbose:
print('p =', p)
print('Training sequence:')
print(these_steps)
print('Lambda sequence:')
print(lambda_seq)
print('Lambda sequence * training sequence:')
print(np.sum(these_steps * lambda_seq[:, None], axis=0))
if step == num_steps - 1: # The step entering the absorbing state
if verbose: print("z =", z)
delta_p = z - np.dot(p, training[-1, :])
else: # Non-terminal state
delta_p = np.dot(p, training[step + 1, :]) - np.dot(p, training[step, :])
if verbose: print('delta_p =', delta_p)
delta_w += alpha * delta_p * np.sum(these_steps * lambda_seq[:, None], axis=0)
if verbose: print('delta_w =', delta_w)
lambda_seq = np.concatenate((lambda_seq * lambda_val, just_one))
return(delta_w)
# Experiment code
#-----------------------------------------------------------------------------------------------
if run_3: # Generate figure 3
# Generate a random walk
training = take_a_walk(7, 3, training_sequences_fig3, training_sets_fig3, 27)
# Setup initial RMSE vector
RMSE_vector = np.zeros(101)
for lambda_it in range(101):
# Reset weights and deltas
weights = 0.5 * np.ones(5)
deltas = np.zeros(5)
for iteration in range(iterations_fig3):
for tset in range(training_sets_fig3):
for tseq in range(training_sequences_fig3):
deltas += learn_game(t_seq=training[tset][tseq], lambda_val=lambda_it / 100,
alpha=alpha_fig3, z_vals=(0, 1), p=weights)
weights += deltas
deltas = np.zeros(5)
RMSE_vector[lambda_it] = np.sqrt(((weights - actual_values) ** 2).mean())
print(str(lambda_it) + '% done')
# Plot RMSE vs lambda
plt.plot(RMSE_vector, color=color_fig3)
plt.ylabel('RMSE', fontsize=label_size)
plt.xlabel('λ', fontsize=label_size)
plt.xticks([0, 20, 40, 60, 80, 100], ['0.0', '0.2', '0.4', '0.6', '0.8', '1.0'],
fontsize=tick_size)
plt.yticks(fontsize=tick_size)
plt.title('Replication of Figure 3 in Sutton (1988)', fontsize=title_size)
if run_4: # Generate figure 4
# Generate a random walk
training = take_a_walk(7, 3, training_sequences_fig4, training_sets_fig4, 27)
# Setup initial RMSE vector
RMSE_mat = np.zeros((len(lambdas_fig4), len(alphas_fig4)))
for ix, lambda_it in enumerate(lambdas_fig4): # Iterate over each lambda value
print('Generating data for lambda = ' + str(lambda_it))
for ii, alpha in enumerate(alphas_fig4): # Iterate over each alpha value
for tset in range(training_sets_fig4):
weights = 0.5 * np.ones(5) # Reset the weights
for tseq in range(training_sequences_fig4):
# Generate the weights
weights += learn_game(t_seq=training[tset][tseq], lambda_val=lambda_it,
alpha=alpha, z_vals=(0, 1), p=weights)
# Add the new RMSE to the RMSE matrix
RMSE_mat[ix, ii] += np.sqrt(((np.round(weights, decimals=20) -
actual_values) ** 2).mean())
# Divide the RMSE matrix by the number of training sets to get the average per-set error
RMSE_mat /= training_sets_fig4 * training_sequences_fig4
# Plot RMSE vs alpha for each lambda
for ix, lambda_pl in enumerate(lambdas_fig4):
plt.plot(alphas_fig4, RMSE_mat[ix, :], label='λ = ' + str(lambda_pl),
color=colors_fig4[ix])
# Format and show the plot
plt.xlabel('α', fontsize=label_size)
plt.ylabel('RMSE', fontsize=label_size)
plt.legend(loc='best', fontsize=legend_size)
plt.title('Replication of Figure 4 in Sutton (1988)', fontsize=title_size)
plt.xticks(fontsize=tick_size)
plt.yticks(fontsize=tick_size)
if run_5: # Generate figure 5
# Generate a random walk
training = take_a_walk(7, 3, training_sequences_fig5, training_sets_fig5, 27 | "
Create a list of lists of training sequences for random walks.
:param num_steps: The number of steps in the random walk
:param start_step: The starting step of the sequences. -1 for random
:param seq_per_set: Number of training sequences in each training set
:param num_sets: Number of training sets
:param seed: The random seed to use for generating steps. Use -1 for no seed
:return training: Training data. Access a sequence (matrix) with training[set][seq]
"""
# Set the random seed, if supplied
if seed > 0:
np.random.seed(seed)
# Preallocate the entire training data list of lists of NumPy arrays
training = num_sets * [seq_per_set * [None]]
# Iterate to build the training data randomly
for this_set in range(num_sets): # Each set
for seq in range(seq_per_set): # Each sequence
if start_step == -1: # Random start location
start_step = np.random.randint(1, num_steps)
# Initialize the sequence | identifier_body |
service.rs | pub fn new(address: message::Address, private_key: box_::SecretKey) -> MessageService {
MessageService {
address,
private_key,
precomputed_keys: HashMap::new(),
message_handlers: HashMap::new(),
}
}
}
impl fmt::Debug for MessageService {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let keys: Vec<&message::MessageType> = self.message_handlers.keys().collect();
write!(f, "MessageService(message_type:{:?})", keys)
}
}
impl actix::Actor for MessageService {
type Context = actix::Context<Self>;
}
/// Used to register a message handler
#[derive(Clone)]
pub struct RegisterMessageHandler {
message_type: message::MessageType,
handler: actix::Recipient<Request>,
}
impl RegisterMessageHandler {
/// constructor
pub fn new(
message_type: message::MessageType,
handler: actix::Recipient<Request>,
) -> RegisterMessageHandler {
RegisterMessageHandler {
message_type,
handler,
}
}
}
impl fmt::Debug for RegisterMessageHandler {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RegisterMessageHandler({:?})", self.message_type)
}
}
impl actix::Message for RegisterMessageHandler {
type Result = ();
}
/// Message Request
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Request(pub message::EncodedMessage);
impl actix::Message for Request {
type Result = Result<message::EncodedMessage, Error>;
}
impl actix::Handler<RegisterMessageHandler> for MessageService {
type Result = actix::MessageResult<RegisterMessageHandler>;
fn handle(&mut self, msg: RegisterMessageHandler, _: &mut Self::Context) -> Self::Result {
self.message_handlers.insert(msg.message_type, msg.handler);
actix::MessageResult(())
}
}
/// Get the list of registered message types
#[derive(Debug, Copy, Clone)]
pub struct GetRegisteredMessageTypes;
impl actix::Message for GetRegisteredMessageTypes {
type Result = Vec<message::MessageType>;
}
impl actix::Handler<GetRegisteredMessageTypes> for MessageService {
type Result = actix::MessageResult<GetRegisteredMessageTypes>;
fn handle(&mut self, _: GetRegisteredMessageTypes, _: &mut Self::Context) -> Self::Result {
let message_types: Vec<message::MessageType> =
self.message_handlers.keys().cloned().collect();
actix::MessageResult(message_types)
}
}
/// Message that indicates that a client has disconnected.
/// - the server should clean up any client related resources
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)]
pub struct ClientDisconnect(message::Address);
impl actix::Message for ClientDisconnect {
type Result = ();
}
impl actix::Handler<ClientDisconnect> for MessageService {
type Result = actix::MessageResult<ClientDisconnect>;
fn handle(&mut self, msg: ClientDisconnect, _: &mut Self::Context) -> Self::Result {
self.precomputed_keys.remove(&msg.0);
actix::MessageResult(())
}
}
// TODO: add an optional token, which is used to pay for the request
/// Process the SealedEnvelope request
#[derive(Debug, Clone)]
pub struct SealedEnvelopeRequest(pub message::SealedEnvelope);
impl actix::Message for SealedEnvelopeRequest {
type Result = Result<message::SealedEnvelope, Error>;
}
impl actix::Handler<SealedEnvelopeRequest> for MessageService {
type Result = actix::Response<message::SealedEnvelope, Error>;
fn handle(&mut self, req: SealedEnvelopeRequest, _: &mut Self::Context) -> Self::Result {
let key = {
let private_key = &self.private_key;
self.precomputed_keys
.entry(*req.0.sender())
.or_insert_with(|| box_::precompute(req.0.sender().public_key(), private_key))
.clone()
};
fn process_message(
sender: message::Address,
handler: &actix::Recipient<Request>,
encoded_message: message::EncodedMessage,
key: box_::PrecomputedKey,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
let msg_type = encoded_message.metadata().message_type();
let send = {
if let Some(deadline) = encoded_message.metadata().deadline() {
let duration = deadline
.duration(encoded_message.metadata().instance_id().ulid().datetime())
.to_std()
.or(Ok(std::time::Duration::from_millis(0))
as Result<std::time::Duration, ()>)
.unwrap();
handler.send(Request(encoded_message)).timeout(duration)
} else {
handler.send(Request(encoded_message))
}
};
let fut = send
.map_err(move |err| {
op_error!(errors::MailboxDeliveryError::new(&sender, msg_type, err))
})
.and_then(|result| {
let result = match result {
Ok(encoded_message) => encoded_message.open_envelope(),
Err(e) => Err(e),
};
futures::future::result(result)
})
.and_then(move |result| futures::future::ok(result.seal(&key)));
Box::new(fut)
}
fn unsupported_message_type(
sender: message::Address,
msg_type: message::MessageType,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
let fut = futures::future::err(op_error!(errors::UnsupportedMessageType::new(
&sender, msg_type
)));
Box::new(fut)
}
fn message_error(
err: Error,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
Box::new(futures::future::err(err))
}
let sender = *req.0.sender();
let result = req
.0
.open(&key)
.and_then(|open_envelope| open_envelope.encoded_message())
.and_then(|encoded_message| {
let message_type = encoded_message.metadata().message_type();
let fut = match self.message_handlers.get(&message_type) {
Some(handler) => process_message(sender, handler, encoded_message, key),
None => {
unsupported_message_type(sender, encoded_message.metadata().message_type())
}
};
Ok(fut)
});
match result {
Ok(fut) => actix::Response::r#async(fut),
Err(err) => actix::Response::r#async(message_error(err)),
}
}
}
/// MessageService errors
pub mod errors {
use crate::message;
use oysterpack_errors::{Id, IsError, Level};
use std::fmt;
/// UnsupportedMessageType
#[derive(Debug)]
pub struct UnsupportedMessageType<'a> {
sender: &'a message::Address,
message_type: message::MessageType,
}
impl UnsupportedMessageType<'_> {
/// Error Id(01CYQ74Q46EAAPHBD95NJAXJGG)
pub const ERROR_ID: Id = Id(1867572772130723204709592385635404304);
/// Level::Alert because receiving a message for a type we do not support should be investigated
/// - this could be an attack
/// - this could be an app config issue
/// - this could be a client config issue - the client should be notified
pub const ERROR_LEVEL: Level = Level::Alert;
/// constructor
pub fn new(
sender: &message::Address,
message_type: message::MessageType,
) -> UnsupportedMessageType |
}
impl IsError for UnsupportedMessageType<'_> {
fn error_id(&self) -> Id {
Self::ERROR_ID
}
fn error_level(&self) -> Level {
Self::ERROR_LEVEL
}
}
impl fmt::Display for UnsupportedMessageType<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}: unsupported message type ({})",
self.sender, self.message_type
)
}
}
/// MailboxDeliveryError
#[derive(Debug)]
pub struct MailboxDeliveryError<'a> {
sender: &'a message::Address,
message_type: message::MessageType,
err: actix::MailboxError,
}
impl MailboxDeliveryError<'_> {
/// Error Id(01CYQ8F5HQMW5PETXBQAF3KF79)
pub const ERROR_ID: Id = Id(1867574453777009173831417913650298089);
/// Level::Critical
/// - if messages are timing out, then this is higher priority
/// - it could mean performance degradation issues
/// - it could mean clients are submitting requests with timeouts that are too low
/// - if the mailbox is closed, then this could mean a bug if it occurrs while the app is
/// running, i.e., not shutting down
/// - because of timing issues, the mailbox may be | {
UnsupportedMessageType {
sender,
message_type,
}
} | identifier_body |
service.rs | type Result = Result<message::EncodedMessage, Error>;
}
impl actix::Handler<RegisterMessageHandler> for MessageService {
type Result = actix::MessageResult<RegisterMessageHandler>;
fn handle(&mut self, msg: RegisterMessageHandler, _: &mut Self::Context) -> Self::Result {
self.message_handlers.insert(msg.message_type, msg.handler);
actix::MessageResult(())
}
}
/// Get the list of registered message types
#[derive(Debug, Copy, Clone)]
pub struct GetRegisteredMessageTypes;
impl actix::Message for GetRegisteredMessageTypes {
type Result = Vec<message::MessageType>;
}
impl actix::Handler<GetRegisteredMessageTypes> for MessageService {
type Result = actix::MessageResult<GetRegisteredMessageTypes>;
fn handle(&mut self, _: GetRegisteredMessageTypes, _: &mut Self::Context) -> Self::Result {
let message_types: Vec<message::MessageType> =
self.message_handlers.keys().cloned().collect();
actix::MessageResult(message_types)
}
}
/// Message that indicates that a client has disconnected.
/// - the server should clean up any client related resources
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)]
pub struct ClientDisconnect(message::Address);
impl actix::Message for ClientDisconnect {
type Result = ();
}
impl actix::Handler<ClientDisconnect> for MessageService {
type Result = actix::MessageResult<ClientDisconnect>;
fn handle(&mut self, msg: ClientDisconnect, _: &mut Self::Context) -> Self::Result {
self.precomputed_keys.remove(&msg.0);
actix::MessageResult(())
}
}
// TODO: add an optional token, which is used to pay for the request
/// Process the SealedEnvelope request
#[derive(Debug, Clone)]
pub struct SealedEnvelopeRequest(pub message::SealedEnvelope);
impl actix::Message for SealedEnvelopeRequest {
type Result = Result<message::SealedEnvelope, Error>;
}
impl actix::Handler<SealedEnvelopeRequest> for MessageService {
type Result = actix::Response<message::SealedEnvelope, Error>;
fn handle(&mut self, req: SealedEnvelopeRequest, _: &mut Self::Context) -> Self::Result {
let key = {
let private_key = &self.private_key;
self.precomputed_keys
.entry(*req.0.sender())
.or_insert_with(|| box_::precompute(req.0.sender().public_key(), private_key))
.clone()
};
fn process_message(
sender: message::Address,
handler: &actix::Recipient<Request>,
encoded_message: message::EncodedMessage,
key: box_::PrecomputedKey,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
let msg_type = encoded_message.metadata().message_type();
let send = {
if let Some(deadline) = encoded_message.metadata().deadline() {
let duration = deadline
.duration(encoded_message.metadata().instance_id().ulid().datetime())
.to_std()
.or(Ok(std::time::Duration::from_millis(0))
as Result<std::time::Duration, ()>)
.unwrap();
handler.send(Request(encoded_message)).timeout(duration)
} else {
handler.send(Request(encoded_message))
}
};
let fut = send
.map_err(move |err| {
op_error!(errors::MailboxDeliveryError::new(&sender, msg_type, err))
})
.and_then(|result| {
let result = match result {
Ok(encoded_message) => encoded_message.open_envelope(),
Err(e) => Err(e),
};
futures::future::result(result)
})
.and_then(move |result| futures::future::ok(result.seal(&key)));
Box::new(fut)
}
fn unsupported_message_type(
sender: message::Address,
msg_type: message::MessageType,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
let fut = futures::future::err(op_error!(errors::UnsupportedMessageType::new(
&sender, msg_type
)));
Box::new(fut)
}
fn message_error(
err: Error,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
Box::new(futures::future::err(err))
}
let sender = *req.0.sender();
let result = req
.0
.open(&key)
.and_then(|open_envelope| open_envelope.encoded_message())
.and_then(|encoded_message| {
let message_type = encoded_message.metadata().message_type();
let fut = match self.message_handlers.get(&message_type) {
Some(handler) => process_message(sender, handler, encoded_message, key),
None => {
unsupported_message_type(sender, encoded_message.metadata().message_type())
}
};
Ok(fut)
});
match result {
Ok(fut) => actix::Response::r#async(fut),
Err(err) => actix::Response::r#async(message_error(err)),
}
}
}
/// MessageService errors
pub mod errors {
use crate::message;
use oysterpack_errors::{Id, IsError, Level};
use std::fmt;
/// UnsupportedMessageType
#[derive(Debug)]
pub struct UnsupportedMessageType<'a> {
sender: &'a message::Address,
message_type: message::MessageType,
}
impl UnsupportedMessageType<'_> {
/// Error Id(01CYQ74Q46EAAPHBD95NJAXJGG)
pub const ERROR_ID: Id = Id(1867572772130723204709592385635404304);
/// Level::Alert because receiving a message for a type we do not support should be investigated
/// - this could be an attack
/// - this could be an app config issue
/// - this could be a client config issue - the client should be notified
pub const ERROR_LEVEL: Level = Level::Alert;
/// constructor
pub fn new(
sender: &message::Address,
message_type: message::MessageType,
) -> UnsupportedMessageType {
UnsupportedMessageType {
sender,
message_type,
}
}
}
impl IsError for UnsupportedMessageType<'_> {
fn error_id(&self) -> Id {
Self::ERROR_ID
}
fn error_level(&self) -> Level {
Self::ERROR_LEVEL
}
}
impl fmt::Display for UnsupportedMessageType<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}: unsupported message type ({})",
self.sender, self.message_type
)
}
}
/// MailboxDeliveryError
#[derive(Debug)]
pub struct MailboxDeliveryError<'a> {
sender: &'a message::Address,
message_type: message::MessageType,
err: actix::MailboxError,
}
impl MailboxDeliveryError<'_> {
/// Error Id(01CYQ8F5HQMW5PETXBQAF3KF79)
pub const ERROR_ID: Id = Id(1867574453777009173831417913650298089);
/// Level::Critical
/// - if messages are timing out, then this is higher priority
/// - it could mean performance degradation issues
/// - it could mean clients are submitting requests with timeouts that are too low
/// - if the mailbox is closed, then this could mean a bug if it occurrs while the app is
/// running, i.e., not shutting down
/// - because of timing issues, the mailbox may be closed during application shutdown
pub const ERROR_LEVEL: Level = Level::Critical;
/// constructor
pub fn new(
sender: &message::Address,
message_type: message::MessageType,
err: actix::MailboxError,
) -> MailboxDeliveryError {
MailboxDeliveryError {
sender,
message_type,
err,
}
}
}
impl IsError for MailboxDeliveryError<'_> {
fn error_id(&self) -> Id {
Self::ERROR_ID
}
fn error_level(&self) -> Level {
Self::ERROR_LEVEL
}
}
impl fmt::Display for MailboxDeliveryError<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}: mailbox delivery error for message type: {} : {}",
self.sender, self.message_type, self.err,
)
}
}
}
#[allow(warnings)]
#[cfg(test)]
mod tests {
use crate::actor;
use sodiumoxide::crypto::box_;
use futures::prelude::*;
struct EchoService;
impl actix::Actor for EchoService {
type Context = actix::Context<Self>;
}
impl actix::Handler<super::Request> for EchoService {
type Result = actix::MessageResult<super::Request>;
fn handle(&mut self, req: super::Request, _: &mut Self::Context) -> Self::Result { | actix::MessageResult(Ok(req.0)) | random_line_split |
|
service.rs | #[derive(Clone)]
pub struct RegisterMessageHandler {
message_type: message::MessageType,
handler: actix::Recipient<Request>,
}
impl RegisterMessageHandler {
/// constructor
pub fn new(
message_type: message::MessageType,
handler: actix::Recipient<Request>,
) -> RegisterMessageHandler {
RegisterMessageHandler {
message_type,
handler,
}
}
}
impl fmt::Debug for RegisterMessageHandler {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "RegisterMessageHandler({:?})", self.message_type)
}
}
impl actix::Message for RegisterMessageHandler {
type Result = ();
}
/// Message Request
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Request(pub message::EncodedMessage);
impl actix::Message for Request {
type Result = Result<message::EncodedMessage, Error>;
}
impl actix::Handler<RegisterMessageHandler> for MessageService {
type Result = actix::MessageResult<RegisterMessageHandler>;
fn handle(&mut self, msg: RegisterMessageHandler, _: &mut Self::Context) -> Self::Result {
self.message_handlers.insert(msg.message_type, msg.handler);
actix::MessageResult(())
}
}
/// Get the list of registered message types
#[derive(Debug, Copy, Clone)]
pub struct GetRegisteredMessageTypes;
impl actix::Message for GetRegisteredMessageTypes {
type Result = Vec<message::MessageType>;
}
impl actix::Handler<GetRegisteredMessageTypes> for MessageService {
type Result = actix::MessageResult<GetRegisteredMessageTypes>;
fn handle(&mut self, _: GetRegisteredMessageTypes, _: &mut Self::Context) -> Self::Result {
let message_types: Vec<message::MessageType> =
self.message_handlers.keys().cloned().collect();
actix::MessageResult(message_types)
}
}
/// Message that indicates that a client has disconnected.
/// - the server should clean up any client related resources
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Serialize, Deserialize)]
pub struct ClientDisconnect(message::Address);
impl actix::Message for ClientDisconnect {
type Result = ();
}
impl actix::Handler<ClientDisconnect> for MessageService {
type Result = actix::MessageResult<ClientDisconnect>;
fn handle(&mut self, msg: ClientDisconnect, _: &mut Self::Context) -> Self::Result {
self.precomputed_keys.remove(&msg.0);
actix::MessageResult(())
}
}
// TODO: add an optional token, which is used to pay for the request
/// Process the SealedEnvelope request
#[derive(Debug, Clone)]
pub struct SealedEnvelopeRequest(pub message::SealedEnvelope);
impl actix::Message for SealedEnvelopeRequest {
type Result = Result<message::SealedEnvelope, Error>;
}
impl actix::Handler<SealedEnvelopeRequest> for MessageService {
type Result = actix::Response<message::SealedEnvelope, Error>;
fn handle(&mut self, req: SealedEnvelopeRequest, _: &mut Self::Context) -> Self::Result {
let key = {
let private_key = &self.private_key;
self.precomputed_keys
.entry(*req.0.sender())
.or_insert_with(|| box_::precompute(req.0.sender().public_key(), private_key))
.clone()
};
fn process_message(
sender: message::Address,
handler: &actix::Recipient<Request>,
encoded_message: message::EncodedMessage,
key: box_::PrecomputedKey,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
let msg_type = encoded_message.metadata().message_type();
let send = {
if let Some(deadline) = encoded_message.metadata().deadline() {
let duration = deadline
.duration(encoded_message.metadata().instance_id().ulid().datetime())
.to_std()
.or(Ok(std::time::Duration::from_millis(0))
as Result<std::time::Duration, ()>)
.unwrap();
handler.send(Request(encoded_message)).timeout(duration)
} else {
handler.send(Request(encoded_message))
}
};
let fut = send
.map_err(move |err| {
op_error!(errors::MailboxDeliveryError::new(&sender, msg_type, err))
})
.and_then(|result| {
let result = match result {
Ok(encoded_message) => encoded_message.open_envelope(),
Err(e) => Err(e),
};
futures::future::result(result)
})
.and_then(move |result| futures::future::ok(result.seal(&key)));
Box::new(fut)
}
fn unsupported_message_type(
sender: message::Address,
msg_type: message::MessageType,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
let fut = futures::future::err(op_error!(errors::UnsupportedMessageType::new(
&sender, msg_type
)));
Box::new(fut)
}
fn message_error(
err: Error,
) -> Box<dyn Future<Item = message::SealedEnvelope, Error = Error>> {
Box::new(futures::future::err(err))
}
let sender = *req.0.sender();
let result = req
.0
.open(&key)
.and_then(|open_envelope| open_envelope.encoded_message())
.and_then(|encoded_message| {
let message_type = encoded_message.metadata().message_type();
let fut = match self.message_handlers.get(&message_type) {
Some(handler) => process_message(sender, handler, encoded_message, key),
None => {
unsupported_message_type(sender, encoded_message.metadata().message_type())
}
};
Ok(fut)
});
match result {
Ok(fut) => actix::Response::r#async(fut),
Err(err) => actix::Response::r#async(message_error(err)),
}
}
}
/// MessageService errors
pub mod errors {
use crate::message;
use oysterpack_errors::{Id, IsError, Level};
use std::fmt;
/// UnsupportedMessageType
#[derive(Debug)]
pub struct UnsupportedMessageType<'a> {
sender: &'a message::Address,
message_type: message::MessageType,
}
impl UnsupportedMessageType<'_> {
/// Error Id(01CYQ74Q46EAAPHBD95NJAXJGG)
pub const ERROR_ID: Id = Id(1867572772130723204709592385635404304);
/// Level::Alert because receiving a message for a type we do not support should be investigated
/// - this could be an attack
/// - this could be an app config issue
/// - this could be a client config issue - the client should be notified
pub const ERROR_LEVEL: Level = Level::Alert;
/// constructor
pub fn new(
sender: &message::Address,
message_type: message::MessageType,
) -> UnsupportedMessageType {
UnsupportedMessageType {
sender,
message_type,
}
}
}
impl IsError for UnsupportedMessageType<'_> {
fn error_id(&self) -> Id {
Self::ERROR_ID
}
fn error_level(&self) -> Level {
Self::ERROR_LEVEL
}
}
impl fmt::Display for UnsupportedMessageType<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}: unsupported message type ({})",
self.sender, self.message_type
)
}
}
/// MailboxDeliveryError
#[derive(Debug)]
pub struct MailboxDeliveryError<'a> {
sender: &'a message::Address,
message_type: message::MessageType,
err: actix::MailboxError,
}
impl MailboxDeliveryError<'_> {
/// Error Id(01CYQ8F5HQMW5PETXBQAF3KF79)
pub const ERROR_ID: Id = Id(1867574453777009173831417913650298089);
/// Level::Critical
/// - if messages are timing out, then this is higher priority
/// - it could mean performance degradation issues
/// - it could mean clients are submitting requests with timeouts that are too low
/// - if the mailbox is closed, then this could mean a bug if it occurrs while the app is
/// running, i.e., not shutting down
/// - because of timing issues, the mailbox may be closed during application shutdown
pub const ERROR_LEVEL: Level = Level::Critical;
/// constructor
pub fn new(
sender: &message::Address,
message_type: message::MessageType,
err: actix::MailboxError,
) -> MailboxDeliveryError {
MailboxDeliveryError {
sender,
message_type,
err,
}
}
}
impl IsError for MailboxDeliveryError<'_> {
fn error_id(&self) -> Id {
Self::ERROR_ID
}
fn error_level(&self) -> Level {
Self::ERROR_LEVEL
}
}
impl fmt::Display for MailboxDeliveryError<'_> {
fn | fmt | identifier_name |
|
tar_helper.rs | ()
.chain(std::iter::repeat(0))
.enumerate()
.take(gnu_header.name.len());
// FIXME: could revert back to the slice copying code since we never change this
for (i, b) in written {
gnu_header.name[i] = b;
}
}
long_filename_header.set_mtime(0);
long_filename_header.set_uid(0);
long_filename_header.set_gid(0);
long_filename_header
}
pub(super) fn apply_file(
&mut self,
path: &Path,
metadata: &Metadata,
total_size: u64,
) -> Result<[Option<Bytes>; 3], GetError> {
let mut ret: [Option<Bytes>; 3] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
self.header.set_size(total_size);
self.header.set_entry_type(EntryType::Regular);
Self::set_metadata(&mut self.header, metadata, 0o0644);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[2] = Some(self.bytes.split().freeze());
Ok(ret)
}
pub(super) fn buffer_file_contents(&mut self, contents: &[u8]) -> Bytes {
assert!(!contents.is_empty());
let remaining = contents.len();
let taken = self.bufsize.min(remaining);
// was initially thinking to check the capacity but we are round robining the buffers to
// get a lucky chance at either of them being empty at this point
self.bytes.put_slice(&contents[..taken]);
self.bytes.split().freeze()
}
pub(super) fn apply_directory(
&mut self,
path: &Path,
metadata: &Metadata,
) -> Result<[Option<Bytes>; 3], GetError> {
let mut ret: [Option<Bytes>; 3] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
self.header.set_size(0);
self.header.set_entry_type(EntryType::Directory);
Self::set_metadata(&mut self.header, metadata, 0o0755);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[2] = Some(self.bytes.split().freeze());
Ok(ret)
}
pub(super) fn apply_symlink(
&mut self,
path: &Path,
target: &Path,
metadata: &Metadata,
) -> Result<[Option<Bytes>; 5], GetError> {
let mut ret: [Option<Bytes>; 5] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
if self.header.set_link_name(target).is_err() {
let data = path2bytes(target);
if data.len() < self.header.as_old().linkname.len() {
return Err(GetError::InvalidLinkName(data.to_vec()));
}
// this is another long header trick, but this time we have a different entry type and
// similarly the long file name is written as a separate entry with its own headers.
self.long_filename_header.set_size(data.len() as u64 + 1);
self.long_filename_header
.set_entry_type(tar::EntryType::new(b'K'));
self.long_filename_header.set_cksum();
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[2] = Some(self.bytes.split().freeze());
ret[3] = self.pad(data.len() as u64 + 1);
}
Self::set_metadata(&mut self.header, metadata, 0o0644);
self.header.set_size(0);
self.header.set_entry_type(tar::EntryType::Symlink);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[4] = Some(self.bytes.split().freeze());
Ok(ret)
}
/// Content in tar is padded to 512 byte sectors which might be configurable as well.
pub(super) fn pad(&self, total_size: u64) -> Option<Bytes> {
let padding = 512 - (total_size % 512);
if padding < 512 {
Some(self.zeroes.slice(..padding as usize))
} else {
None
}
}
fn set_metadata(header: &mut tar::Header, metadata: &Metadata, default_mode: u32) {
header.set_mode(
metadata
.mode()
.map(|mode| mode & 0o7777)
.unwrap_or(default_mode),
);
header.set_mtime(
metadata
.mtime()
.and_then(|(seconds, _)| {
if seconds >= 0 {
Some(seconds as u64)
} else {
None
}
})
.unwrap_or(0),
);
}
}
/// Returns the raw bytes we need to write as a new entry into the tar.
fn prepare_long_header<'a>(
header: &mut tar::Header,
long_filename_header: &mut tar::Header,
path: &'a Path,
_error: std::io::Error,
) -> Result<&'a [u8], GetError> {
#[cfg(unix)]
/// On unix this operation can never fail.
pub(super) fn bytes2path(bytes: Cow<[u8]>) -> std::io::Result<Cow<Path>> {
use std::ffi::{OsStr, OsString};
use std::os::unix::prelude::*;
Ok(match bytes {
Cow::Borrowed(bytes) => Cow::Borrowed(Path::new(OsStr::from_bytes(bytes))),
Cow::Owned(bytes) => Cow::Owned(PathBuf::from(OsString::from_vec(bytes))),
})
}
#[cfg(windows)]
/// On windows we cannot accept non-Unicode bytes because it
/// is impossible to convert it to UTF-16.
pub(super) fn bytes2path(bytes: Cow<[u8]>) -> std::io::Result<Cow<Path>> {
match bytes {
Cow::Borrowed(bytes) => {
let s = std::str::from_utf8(bytes).map_err(|_| not_unicode(bytes))?;
Ok(Cow::Borrowed(Path::new(s)))
}
Cow::Owned(bytes) => {
let s = String::from_utf8(bytes).map_err(|uerr| not_unicode(&uerr.into_bytes()))?;
Ok(Cow::Owned(PathBuf::from(s)))
}
}
}
// Used with windows.
#[allow(dead_code)]
fn not_unicode(v: &[u8]) -> std::io::Error {
use std::io::{Error, ErrorKind};
Error::new(
ErrorKind::Other,
format!(
"only Unicode paths are supported on Windows: {}",
String::from_utf8_lossy(v)
),
)
}
// we **only** have utf8 paths as protobuf has already parsed this file
// name and all of the previous ones as utf8.
let data = path2bytes(path);
let max = header.as_old().name.len();
if data.len() < max {
return Err(GetError::InvalidFileName(data.to_vec()));
}
// the plus one is documented as compliance with GNU tar, probably the null byte
// termination?
long_filename_header.set_size(data.len() as u64 + 1);
long_filename_header.set_entry_type(tar::EntryType::new(b'L'));
long_filename_header.set_cksum();
// we still need to figure out the truncated path we put into the header
let path = bytes2path(Cow::Borrowed(&data[..max]))
.expect("quite certain we have no non-utf8 paths here");
header
.set_path(&path)
.expect("we already made sure the path is of fitting length");
Ok(data)
}
#[cfg(unix)]
fn path2bytes(p: &Path) -> &[u8] {
use std::os::unix::prelude::*;
p.as_os_str().as_bytes()
}
#[cfg(windows)] | random_line_split |
||
tar_helper.rs | is internal to `get` implementation. It uses some private parts of the `tar-rs`
/// crate to append the headers and the contents to a pair of `bytes::Bytes` operated in a
/// round-robin fashion.
pub(super) struct TarHelper {
bufsize: usize,
bytes: BytesMut,
header: Header,
long_filename_header: Header,
zeroes: Bytes,
}
impl TarHelper {
pub(super) fn with_capacity(n: usize) -> Self | }
fn new_default_header() -> tar::Header {
let mut header = tar::Header::new_gnu();
header.set_mtime(0);
header.set_uid(0);
header.set_gid(0);
header
}
fn new_long_filename_header() -> tar::Header {
let mut long_filename_header = tar::Header::new_gnu();
long_filename_header.set_mode(0o644);
{
let name = b"././@LongLink";
let gnu_header = long_filename_header.as_gnu_mut().unwrap();
// since we are reusing the header, zero out all of the bytes
let written = name
.iter()
.copied()
.chain(std::iter::repeat(0))
.enumerate()
.take(gnu_header.name.len());
// FIXME: could revert back to the slice copying code since we never change this
for (i, b) in written {
gnu_header.name[i] = b;
}
}
long_filename_header.set_mtime(0);
long_filename_header.set_uid(0);
long_filename_header.set_gid(0);
long_filename_header
}
pub(super) fn apply_file(
&mut self,
path: &Path,
metadata: &Metadata,
total_size: u64,
) -> Result<[Option<Bytes>; 3], GetError> {
let mut ret: [Option<Bytes>; 3] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
self.header.set_size(total_size);
self.header.set_entry_type(EntryType::Regular);
Self::set_metadata(&mut self.header, metadata, 0o0644);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[2] = Some(self.bytes.split().freeze());
Ok(ret)
}
pub(super) fn buffer_file_contents(&mut self, contents: &[u8]) -> Bytes {
assert!(!contents.is_empty());
let remaining = contents.len();
let taken = self.bufsize.min(remaining);
// was initially thinking to check the capacity but we are round robining the buffers to
// get a lucky chance at either of them being empty at this point
self.bytes.put_slice(&contents[..taken]);
self.bytes.split().freeze()
}
pub(super) fn apply_directory(
&mut self,
path: &Path,
metadata: &Metadata,
) -> Result<[Option<Bytes>; 3], GetError> {
let mut ret: [Option<Bytes>; 3] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
self.header.set_size(0);
self.header.set_entry_type(EntryType::Directory);
Self::set_metadata(&mut self.header, metadata, 0o0755);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[2] = Some(self.bytes.split().freeze());
Ok(ret)
}
pub(super) fn apply_symlink(
&mut self,
path: &Path,
target: &Path,
metadata: &Metadata,
) -> Result<[Option<Bytes>; 5], GetError> {
let mut ret: [Option<Bytes>; 5] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
if self.header.set_link_name(target).is_err() {
let data = path2bytes(target);
if data.len() < self.header.as_old().linkname.len() {
return Err(GetError::InvalidLinkName(data.to_vec()));
}
// this is another long header trick, but this time we have a different entry type and
// similarly the long file name is written as a separate entry with its own headers.
self.long_filename_header.set_size(data.len() as u64 + 1);
self.long_filename_header
.set_entry_type(tar::EntryType::new(b'K'));
self.long_filename_header.set_cksum();
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[2] = Some(self.bytes.split().freeze());
ret[3] = self.pad(data.len() as u64 + 1);
}
Self::set_metadata(&mut self.header, metadata, 0o0644);
self.header.set_size(0);
self.header.set_entry_type(tar::EntryType::Symlink);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[4] = Some(self.bytes.split().freeze());
Ok(ret)
}
/// Content in tar is padded to 512 byte sectors which might be configurable as well.
pub(super) fn pad(&self, total_size: u64) -> Option<Bytes> {
let padding = 512 - (total_size % 512);
if padding < 512 {
Some(self.zeroes.slice(..padding as usize))
} else {
None
}
}
fn set_metadata(header: &mut tar::Header, metadata: &Metadata, default_mode: u32) {
header.set_mode(
metadata
.mode()
.map(|mode| mode & 0o7777)
.unwrap_or(default_mode),
);
header.set_mtime(
metadata
.mtime()
.and_then(|(seconds, _)| {
if seconds >= 0 {
Some(seconds as u64)
} else {
None
}
})
.unwrap_or(0),
);
}
}
/// Returns the raw bytes we need to write as a new entry into the tar.
fn prepare_long_header<'a>(
header: &mut tar::Header,
long_filename_header: &mut tar::Header,
path: &'a Path,
_error: std::io::Error,
) -> Result<&'a [u8], GetError> {
#[cfg(unix)]
/// On unix this operation can never fail.
pub(super) fn bytes2path(bytes: Cow<[u8]>) -> std::io::Result<Cow<Path>> {
use std::ffi::{OsStr, OsString};
use std::os::unix::prelude::*;
Ok(match bytes {
Cow::Borrowed(bytes) => Cow::Borrowed(Path::new(OsStr::from_bytes(bytes))),
Cow::Owned(bytes) => Cow::Owned(PathBuf::from(OsString::from_vec(bytes))),
})
}
#[cfg(windows)]
/// On windows we cannot accept non-Unicode bytes because it
/// is impossible to convert it to UTF-16.
pub(super) fn bytes2path(bytes: Cow<[u8]>) -> std::io::Result<Cow<Path>> {
match bytes {
Cow::Borrowed(bytes) => {
let s = std::str::from_utf8(bytes).map_err(|_| not_unicode(bytes))?;
Ok(Cow::Borrowed(Path::new(s)))
}
Cow::Owned(bytes) => {
let s = | {
let bytes = BytesMut::with_capacity(n);
// these are 512 a piece
let header = Self::new_default_header();
let long_filename_header = Self::new_long_filename_header();
let mut zeroes = BytesMut::with_capacity(512);
for _ in 0..(512 / 8) {
zeroes.put_u64(0);
}
assert_eq!(zeroes.len(), 512);
let zeroes = zeroes.freeze();
Self {
bufsize: n,
bytes,
header,
long_filename_header,
zeroes,
} | identifier_body |
tar_helper.rs | is internal to `get` implementation. It uses some private parts of the `tar-rs`
/// crate to append the headers and the contents to a pair of `bytes::Bytes` operated in a
/// round-robin fashion.
pub(super) struct TarHelper {
bufsize: usize,
bytes: BytesMut,
header: Header,
long_filename_header: Header,
zeroes: Bytes,
}
impl TarHelper {
pub(super) fn with_capacity(n: usize) -> Self {
let bytes = BytesMut::with_capacity(n);
// these are 512 a piece
let header = Self::new_default_header();
let long_filename_header = Self::new_long_filename_header();
let mut zeroes = BytesMut::with_capacity(512);
for _ in 0..(512 / 8) {
zeroes.put_u64(0);
}
assert_eq!(zeroes.len(), 512);
let zeroes = zeroes.freeze();
Self {
bufsize: n,
bytes,
header,
long_filename_header,
zeroes,
}
}
fn new_default_header() -> tar::Header {
let mut header = tar::Header::new_gnu();
header.set_mtime(0);
header.set_uid(0);
header.set_gid(0);
header
}
fn new_long_filename_header() -> tar::Header {
let mut long_filename_header = tar::Header::new_gnu();
long_filename_header.set_mode(0o644);
{
let name = b"././@LongLink";
let gnu_header = long_filename_header.as_gnu_mut().unwrap();
// since we are reusing the header, zero out all of the bytes
let written = name
.iter()
.copied()
.chain(std::iter::repeat(0))
.enumerate()
.take(gnu_header.name.len());
// FIXME: could revert back to the slice copying code since we never change this
for (i, b) in written {
gnu_header.name[i] = b;
}
}
long_filename_header.set_mtime(0);
long_filename_header.set_uid(0);
long_filename_header.set_gid(0);
long_filename_header
}
pub(super) fn | (
&mut self,
path: &Path,
metadata: &Metadata,
total_size: u64,
) -> Result<[Option<Bytes>; 3], GetError> {
let mut ret: [Option<Bytes>; 3] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
self.header.set_size(total_size);
self.header.set_entry_type(EntryType::Regular);
Self::set_metadata(&mut self.header, metadata, 0o0644);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[2] = Some(self.bytes.split().freeze());
Ok(ret)
}
pub(super) fn buffer_file_contents(&mut self, contents: &[u8]) -> Bytes {
assert!(!contents.is_empty());
let remaining = contents.len();
let taken = self.bufsize.min(remaining);
// was initially thinking to check the capacity but we are round robining the buffers to
// get a lucky chance at either of them being empty at this point
self.bytes.put_slice(&contents[..taken]);
self.bytes.split().freeze()
}
pub(super) fn apply_directory(
&mut self,
path: &Path,
metadata: &Metadata,
) -> Result<[Option<Bytes>; 3], GetError> {
let mut ret: [Option<Bytes>; 3] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
self.header.set_size(0);
self.header.set_entry_type(EntryType::Directory);
Self::set_metadata(&mut self.header, metadata, 0o0755);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[2] = Some(self.bytes.split().freeze());
Ok(ret)
}
pub(super) fn apply_symlink(
&mut self,
path: &Path,
target: &Path,
metadata: &Metadata,
) -> Result<[Option<Bytes>; 5], GetError> {
let mut ret: [Option<Bytes>; 5] = Default::default();
if let Err(e) = self.header.set_path(path) {
let data =
prepare_long_header(&mut self.header, &mut self.long_filename_header, path, e)?;
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[0] = Some(self.bytes.split().freeze());
ret[1] = self.pad(data.len() as u64 + 1);
}
if self.header.set_link_name(target).is_err() {
let data = path2bytes(target);
if data.len() < self.header.as_old().linkname.len() {
return Err(GetError::InvalidLinkName(data.to_vec()));
}
// this is another long header trick, but this time we have a different entry type and
// similarly the long file name is written as a separate entry with its own headers.
self.long_filename_header.set_size(data.len() as u64 + 1);
self.long_filename_header
.set_entry_type(tar::EntryType::new(b'K'));
self.long_filename_header.set_cksum();
self.bytes.put_slice(self.long_filename_header.as_bytes());
self.bytes.put_slice(data);
self.bytes.put_u8(0);
ret[2] = Some(self.bytes.split().freeze());
ret[3] = self.pad(data.len() as u64 + 1);
}
Self::set_metadata(&mut self.header, metadata, 0o0644);
self.header.set_size(0);
self.header.set_entry_type(tar::EntryType::Symlink);
self.header.set_cksum();
self.bytes.put_slice(self.header.as_bytes());
ret[4] = Some(self.bytes.split().freeze());
Ok(ret)
}
/// Content in tar is padded to 512 byte sectors which might be configurable as well.
pub(super) fn pad(&self, total_size: u64) -> Option<Bytes> {
let padding = 512 - (total_size % 512);
if padding < 512 {
Some(self.zeroes.slice(..padding as usize))
} else {
None
}
}
fn set_metadata(header: &mut tar::Header, metadata: &Metadata, default_mode: u32) {
header.set_mode(
metadata
.mode()
.map(|mode| mode & 0o7777)
.unwrap_or(default_mode),
);
header.set_mtime(
metadata
.mtime()
.and_then(|(seconds, _)| {
if seconds >= 0 {
Some(seconds as u64)
} else {
None
}
})
.unwrap_or(0),
);
}
}
/// Returns the raw bytes we need to write as a new entry into the tar.
fn prepare_long_header<'a>(
header: &mut tar::Header,
long_filename_header: &mut tar::Header,
path: &'a Path,
_error: std::io::Error,
) -> Result<&'a [u8], GetError> {
#[cfg(unix)]
/// On unix this operation can never fail.
pub(super) fn bytes2path(bytes: Cow<[u8]>) -> std::io::Result<Cow<Path>> {
use std::ffi::{OsStr, OsString};
use std::os::unix::prelude::*;
Ok(match bytes {
Cow::Borrowed(bytes) => Cow::Borrowed(Path::new(OsStr::from_bytes(bytes))),
Cow::Owned(bytes) => Cow::Owned(PathBuf::from(OsString::from_vec(bytes))),
})
}
#[cfg(windows)]
/// On windows we cannot accept non-Unicode bytes because it
/// is impossible to convert it to UTF-16.
pub(super) fn bytes2path(bytes: Cow<[u8]>) -> std::io::Result<Cow<Path>> {
match bytes {
Cow::Borrowed(bytes) => {
let s = std::str::from_utf8(bytes).map_err(|_| not_unicode(bytes))?;
Ok(Cow::Borrowed(Path::new(s)))
}
Cow::Owned(bytes) => {
let s = | apply_file | identifier_name |
chip8.go | self.LoadGame("brix.c8")
}
//Read file in curent dir into Memory
func (self *Chip8) LoadGame(filename string) {
// rom, _ := ioutil.ReadFile(filename)
filename = self.Games[self.GameIndex]
fmt.Printf("Loading Game %s", filename)
f, _ := asset.Open(filename)
rom, _ := ioutil.ReadAll(f)
rom_length := len(rom)
if rom_length > 0 {
// fmt.Printf("Rom Length = %d\n", rom_length)
}
//If room to store ROM in RAM, start at 512 or 0x200
if (4096 - 512) > rom_length {
for i := 0; i < rom_length; i++ {
self.Memory[i+512] = rom[i]
}
}
}
//Tick to load next emulation cycle
func (self *Chip8) EmulateCycle() {
// Fetch Opcode
b1 := uint16(self.Memory[self.Pc])
b2 := uint16(self.Memory[self.Pc+1])
//Bitwise, add padding to end of first byte and append second byte to end
self.Opcode = (b1 << 8) | b2
x := (self.Opcode & 0x0F00) >> 8
y := self.Opcode & 0x00F0 >> 4
// 0x00E0 and 0x000E We have to do first because Golang seems to truncate 0x0000 into 0x00
switch self.Opcode {
case 0xE0: // 0x00E0: Clears the screen
for i := 0; i < 64*32; i++ {
self.Gfx[i] = 0
}
self.Draw_flag = true
self.Pc += 2
break
case 0xEE: // 0x00EE: Returns from subroutine
self.Sp-- // 16 levels of stack, decrease stack pointer to prevent overwrite
self.Pc = self.Stack[self.Sp] // Put the stored return address from the stack back into the program counter
self.Pc += 2 // Don't forget to increase the program counter!
break
}
switch self.Opcode & 0xF000 {
//1 to 7, jump, call and skip instructions
case 0x1000: // 0x1NNN: Jumps to address NNN
self.Pc = self.Opcode & 0x0FFF
break
case 0x2000: // 0x2NNN: Calls subroutine at NNN.
self.Stack[self.Sp] = self.Pc // Store current address in stack
self.Sp++ // Increment stack pointer
self.Pc = self.Opcode & 0x0FFF // Set the program counter to the address at NNN
break
case 0x3000: // 0x3XNN: Skips the next instruction if VX equals NN
if uint16(self.V[(self.Opcode&0x0F00)>>8]) == self.Opcode&0x00FF {
self.Pc += 4
} else {
self.Pc += 2
}
break
case 0x4000: // 0x4XNN: Skips the next instruction if VX doesn't equal NN.
if uint16(self.V[(self.Opcode&0x0F00)>>8]) != self.Opcode&0x00FF {
self.Pc += 4
} else {
self.Pc += 2
}
break
case 0x5000: // 0x5XY0: Skips the next instruction if VX equals VY.
x := (self.Opcode & 0x0F00) >> 8
y := (self.Opcode & 0x00F0) >> 4
// fmt.Printf("x = %02x and y= %02x", x, y)
// fmt.Printf("V0 = %02x v1= %02x", self.V[x], self.V[y])
if uint16(self.V[x]) == uint16(self.V[y]) {
self.Pc += 4
} else {
self.Pc += 2
}
break
case 0x6000: //6XNN Sets VX to NN.
NN := byte(self.Opcode & 0x00FF)
self.V[x] = NN
self.Pc += 2
break
case 0x7000: //0x7XNN Adds NN to VX.
x := (self.Opcode & 0xF00) >> 8
NN := byte(self.Opcode & 0x00FF)
self.V[x] += NN
self.Pc += 2
break
//0X8000 - 8 CASES
/*
8XY0 Sets VX to the value of VY.
8XY1 Sets VX to VX or VY.
8XY2 Sets VX to VX and VY.
8XY3 Sets VX to VX xor VY.
8XY4 Adds VY to VX. VF is set to 1 when there's a carry, and to 0 when there isn't.
*/
case 0x8000:
switch self.Opcode & 0x000F { // 8XY0 Sets VX to the value of VY.
case 0x0000: // 0x8XY0: Sets VX to the value of VY
self.V[x] = self.V[y]
self.Pc += 2
break
case 0x0001: // 0x8XY0: Sets VX to the value of VY
self.V[x] |= self.V[y]
self.Pc += 2
break
case 0x0002: // 0x8XY0: Sets VX to VX and VY.
self.V[x] &= self.V[y]
self.Pc += 2
break
case 0x0003: // 0x8XY3: Sets VX to VX xor VY.
self.V[x] ^= self.V[y]
self.Pc += 2
break
case 0x0004: // 0x8XY4: Adds VY to VX. VF is set to 1 when there's a carry, and to 0 when there isn't.
if self.V[y] > 0xFF-self.V[x] {
self.V[0xF] = 1
} else {
self.V[0xF] = 0
}
self.V[x] += self.V[y]
self.Pc += 2
break
case 0x0005: // 0x8XY5: VY is subtracted from VX. VF is set to 0 when there's a borrow, and 1 when there isn't
y := self.Opcode & 0x00F0 >> 4
if self.V[y] > self.V[x] {
self.V[0xF] = 0 //Borrow
} else {
self.V[0xF] = 1
}
self.V[x] -= self.V[y]
self.Pc += 2
break
case 0x0006: // 8XY6 Shifts VX right by one. VF set to the value of the least significant bit of VX before the shift
// y := self.Opcode & 0x00F0 >> 4
// fmt.Printf("Bit shifting CPU register %d", x)
self.V[0xF] = self.V[x] & 0x1
self.V[x] >>= 1
self.Pc += 2
break
case 0x0007: //8XY7: Sets VX to VY minus VX. VF is set to 0 when there's a borrow, | {
self.Pc = 0x200 // Program counter starts at 0x200, the Space of Memory after the interpreter
self.Opcode = 0 // Reset current Opcode
self.Index = 0 // Reset index register
self.Sp = 0 // Reset stack pointer
for x := 0; x < 16; x++ {
self.V[x] = 0
}
//
for i := 0; i < 80; i++ {
self.Memory[i] = Chip8_fontset[i]
}
// Clear display
for i := 0; i < 64*32; i++ {
self.Gfx[i] = 0
}
self.Games = []string{"brix.c8", "tetris.c8", "ufo.c8", "invaders.c8"} | identifier_body |
|
chip8.go | [0xF] = 0
}
self.V[x] += self.V[y]
self.Pc += 2
break
case 0x0005: // 0x8XY5: VY is subtracted from VX. VF is set to 0 when there's a borrow, and 1 when there isn't
y := self.Opcode & 0x00F0 >> 4
if self.V[y] > self.V[x] {
self.V[0xF] = 0 //Borrow
} else {
self.V[0xF] = 1
}
self.V[x] -= self.V[y]
self.Pc += 2
break
case 0x0006: // 8XY6 Shifts VX right by one. VF set to the value of the least significant bit of VX before the shift
// y := self.Opcode & 0x00F0 >> 4
// fmt.Printf("Bit shifting CPU register %d", x)
self.V[0xF] = self.V[x] & 0x1
self.V[x] >>= 1
self.Pc += 2
break
case 0x0007: //8XY7: Sets VX to VY minus VX. VF is set to 0 when there's a borrow, and 1 when there isn't.
y := self.Opcode & 0x00F0 >> 4
if self.V[x] > self.V[y] {
self.V[0xF] = 0 //Borrow
} else {
self.V[0xF] = 1
}
self.V[x] = self.V[y] - self.V[x]
self.Pc += 2
break
case 0x000E: //0x8XYE: Shifts VX left by one. VF is set to the value of the most significant bit of VX before the shift
//Because we're shifting left we need the left hand bit.
self.V[0xF] = self.V[x] >> 7
self.V[x] <<= 1
self.Pc += 2
break
}
case 0x9000: //9XY0 Skips the next instruction if VX doesn't equal VY. Same as 0x5XY0 but !
y := self.Opcode & 0x00F0 >> 4
if uint16(self.V[x]) != uint16(self.V[y]) {
self.Pc += 4
} else {
self.Pc += 2
}
break
case 0xA000: //ANNN Sets I to the address NNN.
self.Index = self.Opcode & 0x0FFF
self.Pc += 2
break
case 0xB000: //BNNN Jumps to the address NNN plus V0.
self.Pc = self.Opcode&0x0FFF + uint16(self.V[0])
break
case 0xC000: //Sets VX to the result of a bitwise and operation on a random number and NN
self.Pc += 2
self.V[x] = byte(uint16(rand.Intn(0xFF)) & (self.Opcode & 0x00FF))
case 0xD000: // DXYN: Draws a sprite at coordinate (VX, VY) that has a width of 8 pixels and a height of N pixels.
// Each row of 8 pixels is read as bit-coded starting from memory location I;
// I value doesn't change after the execution of this instruction.
// VF is set to 1 if any screen pixels are flipped from set to unset when the sprite is drawn,
// and to 0 if that doesn't happen
x := uint16(self.V[x])
y := uint16(self.V[y])
height := uint16(self.Opcode & 0x000F)
// fmt.Printf("Drawing - %02x, x=%d y=%d", self.Opcode, x, y)
var pixel byte
var yline uint16
var xline uint16
self.V[0xF] = 0
//For each scan line
for yline = 0; yline < height; yline++ {
pixel = self.Memory[self.Index+uint16(yline)]
//For each pixel in the scan line
for xline = 0; xline < 8; xline++ {
//if there is a pixel value
if pixel&(0x80>>xline) != 0 {
//If the pixel value is already 1, then we need to store V[0xf] as 1 to indicate
if (x + xline + ((y + yline) * 64)) < 2048 {
if self.Gfx[(x+xline+((y+yline)*64))] == 1 {
self.V[0xF] = 1
}
self.Gfx[x+xline+((y+yline)*64)] ^= 1
}
}
}
}
// fmt.Printf(format, ...)
self.Draw_flag = true
self.Pc += 2
break
case 0xE000:
switch self.Opcode & 0x00FF {
case 0x009E: // EX9E: Skips the next instruction if the key stored in VX is pressed
if self.Key[self.V[x]] != 0 {
self.Pc += 4
} else {
self.Pc += 2
}
break
case 0x00A1: // EX9E: Skips the next instruction if the key stored in VX is pressed
if self.Key[self.V[x]] == 0 {
self.Pc += 4
} else {
self.Pc += 2
}
break
}
break
//Final sett woop! fix:fe33
case 0xF000:
switch self.Opcode & 0x00FF {
case 0x0007: // FX07: Sets VX to the value of the delay timer
self.V[x] = self.Delay_timer
self.Pc += 2
break
case 0x000A: // FX0A: A key press is awaited, and then stored in VX.
keyPressed := false
for i := 0; i < 16; i++ {
if self.Key[i] != 0 {
self.V[x] = byte(i)
keyPressed = true
}
}
if keyPressed {
self.Pc += 2
}
break
case 0x0015: // FX15: Sets the delay timer to VX.
self.Delay_timer = self.V[x]
self.Pc += 2
break
case 0x0018: // FX18: Sets the delay timer to VX.
self.Sound_timer = byte(self.V[x])
self.Pc += 2
break
case 0x001E: // FX1E: Adds VX to I
// VF is set to 1 when range overflow (I+VX>0xFFF), and 0 when there isn't.
if self.Index+uint16(self.V[x]) > 0xFFF { // VF is set to 1 when range overflow (I+VX>0xFFF), and 0 when there isn't.
self.V[0xF] = 1
} else {
self.V[0xF] = 0
}
self.Index += uint16(self.V[x])
self.Pc += 2
break
case 0x0029: // FX29: Sets I to the location of the sprite for the character in VX. Characters 0-F (in hexadecimal) are represented by a 4x5 font
self.Index = uint16(self.V[x]) * 0x5
self.Pc += 2
break
case 0x0033: // FX33: Stores the Binary-coded decimal representation of VX at the addresses I, I plus 1, and I plus 2
self.Memory[self.Index] = self.V[x] / 100
self.Memory[self.Index+1] = (self.V[x] / 10) % 10
self.Memory[self.Index+2] = (self.V[x] % 100) % 10
self.Pc += 2
break
case 0x055: // FX55 Stores V0 to VX (including VX) in memory starting at address I.[4]
for i := 0; i < int(x); i++ {
self.M | emory[self.Index+uint16(i)] = self.V[i]
}
self.Ind | conditional_block |
|
chip8.go | Space of Memory after the interpreter
self.Opcode = 0 // Reset current Opcode
self.Index = 0 // Reset index register
self.Sp = 0 // Reset stack pointer
for x := 0; x < 16; x++ {
self.V[x] = 0
}
//
for i := 0; i < 80; i++ {
self.Memory[i] = Chip8_fontset[i]
}
// Clear display
for i := 0; i < 64*32; i++ {
self.Gfx[i] = 0
}
self.Games = []string{"brix.c8", "tetris.c8", "ufo.c8", "invaders.c8"}
self.LoadGame("brix.c8")
}
//Read file in curent dir into Memory
func (self *Chip8) LoadGame(filename string) {
// rom, _ := ioutil.ReadFile(filename)
filename = self.Games[self.GameIndex]
fmt.Printf("Loading Game %s", filename)
f, _ := asset.Open(filename)
rom, _ := ioutil.ReadAll(f)
rom_length := len(rom)
if rom_length > 0 {
// fmt.Printf("Rom Length = %d\n", rom_length)
}
//If room to store ROM in RAM, start at 512 or 0x200
if (4096 - 512) > rom_length {
for i := 0; i < rom_length; i++ {
self.Memory[i+512] = rom[i]
}
}
}
//Tick to load next emulation cycle
func (self *Chip8) EmulateCycle() {
// Fetch Opcode
b1 := uint16(self.Memory[self.Pc])
b2 := uint16(self.Memory[self.Pc+1])
//Bitwise, add padding to end of first byte and append second byte to end
self.Opcode = (b1 << 8) | b2
x := (self.Opcode & 0x0F00) >> 8
y := self.Opcode & 0x00F0 >> 4
// 0x00E0 and 0x000E We have to do first because Golang seems to truncate 0x0000 into 0x00
switch self.Opcode {
case 0xE0: // 0x00E0: Clears the screen
for i := 0; i < 64*32; i++ {
self.Gfx[i] = 0
}
self.Draw_flag = true
self.Pc += 2
break
case 0xEE: // 0x00EE: Returns from subroutine
self.Sp-- // 16 levels of stack, decrease stack pointer to prevent overwrite
self.Pc = self.Stack[self.Sp] // Put the stored return address from the stack back into the program counter
self.Pc += 2 // Don't forget to increase the program counter!
break
}
switch self.Opcode & 0xF000 {
//1 to 7, jump, call and skip instructions
case 0x1000: // 0x1NNN: Jumps to address NNN
self.Pc = self.Opcode & 0x0FFF
break
case 0x2000: // 0x2NNN: Calls subroutine at NNN.
self.Stack[self.Sp] = self.Pc // Store current address in stack
self.Sp++ // Increment stack pointer
self.Pc = self.Opcode & 0x0FFF // Set the program counter to the address at NNN
break
case 0x3000: // 0x3XNN: Skips the next instruction if VX equals NN
if uint16(self.V[(self.Opcode&0x0F00)>>8]) == self.Opcode&0x00FF {
self.Pc += 4
} else {
self.Pc += 2
}
break
case 0x4000: // 0x4XNN: Skips the next instruction if VX doesn't equal NN.
if uint16(self.V[(self.Opcode&0x0F00)>>8]) != self.Opcode&0x00FF {
self.Pc += 4
} else {
self.Pc += 2
}
break
case 0x5000: // 0x5XY0: Skips the next instruction if VX equals VY.
x := (self.Opcode & 0x0F00) >> 8
y := (self.Opcode & 0x00F0) >> 4
// fmt.Printf("x = %02x and y= %02x", x, y)
// fmt.Printf("V0 = %02x v1= %02x", self.V[x], self.V[y])
if uint16(self.V[x]) == uint16(self.V[y]) {
self.Pc += 4
} else {
self.Pc += 2
}
break
case 0x6000: //6XNN Sets VX to NN.
NN := byte(self.Opcode & 0x00FF)
self.V[x] = NN | self.V[x] += NN
self.Pc += 2
break
//0X8000 - 8 CASES
/*
8XY0 Sets VX to the value of VY.
8XY1 Sets VX to VX or VY.
8XY2 Sets VX to VX and VY.
8XY3 Sets VX to VX xor VY.
8XY4 Adds VY to VX. VF is set to 1 when there's a carry, and to 0 when there isn't.
*/
case 0x8000:
switch self.Opcode & 0x000F { // 8XY0 Sets VX to the value of VY.
case 0x0000: // 0x8XY0: Sets VX to the value of VY
self.V[x] = self.V[y]
self.Pc += 2
break
case 0x0001: // 0x8XY0: Sets VX to the value of VY
self.V[x] |= self.V[y]
self.Pc += 2
break
case 0x0002: // 0x8XY0: Sets VX to VX and VY.
self.V[x] &= self.V[y]
self.Pc += 2
break
case 0x0003: // 0x8XY3: Sets VX to VX xor VY.
self.V[x] ^= self.V[y]
self.Pc += 2
break
case 0x0004: // 0x8XY4: Adds VY to VX. VF is set to 1 when there's a carry, and to 0 when there isn't.
if self.V[y] > 0xFF-self.V[x] {
self.V[0xF] = 1
} else {
self.V[0xF] = 0
}
self.V[x] += self.V[y]
self.Pc += 2
break
case 0x0005: // 0x8XY5: VY is subtracted from VX. VF is set to 0 when there's a borrow, and 1 when there isn't
y := self.Opcode & 0x00F0 >> 4
if self.V[y] > self.V[x] {
self.V[0xF] = 0 //Borrow
} else {
self.V[0xF] = 1
}
self.V[x] -= self.V[y]
self.Pc += 2
break
case 0x0006: // 8XY6 Shifts VX right by one. VF set to the value of the least significant bit of VX before the shift
// y := self.Opcode & 0x00F0 >> 4
// fmt.Printf("Bit shifting CPU register %d", x)
self.V[0xF] = self.V[x] & 0x1
self.V[x] >>= 1
self.Pc += 2
break
case 0x0007: //8XY7: Sets VX to VY minus VX. VF is set to 0 when there's a borrow, and 1 when there isn't.
y := self.Opcode & 0x00F0 >> 4 | self.Pc += 2
break
case 0x7000: //0x7XNN Adds NN to VX.
x := (self.Opcode & 0xF00) >> 8
NN := byte(self.Opcode & 0x00FF) | random_line_split |
chip8.go | of Memory after the interpreter
self.Opcode = 0 // Reset current Opcode
self.Index = 0 // Reset index register
self.Sp = 0 // Reset stack pointer
for x := 0; x < 16; x++ {
self.V[x] = 0
}
//
for i := 0; i < 80; i++ {
self.Memory[i] = Chip8_fontset[i]
}
// Clear display
for i := 0; i < 64*32; i++ {
self.Gfx[i] = 0
}
self.Games = []string{"brix.c8", "tetris.c8", "ufo.c8", "invaders.c8"}
self.LoadGame("brix.c8")
}
//Read file in curent dir into Memory
func (self *Chip8) | (filename string) {
// rom, _ := ioutil.ReadFile(filename)
filename = self.Games[self.GameIndex]
fmt.Printf("Loading Game %s", filename)
f, _ := asset.Open(filename)
rom, _ := ioutil.ReadAll(f)
rom_length := len(rom)
if rom_length > 0 {
// fmt.Printf("Rom Length = %d\n", rom_length)
}
//If room to store ROM in RAM, start at 512 or 0x200
if (4096 - 512) > rom_length {
for i := 0; i < rom_length; i++ {
self.Memory[i+512] = rom[i]
}
}
}
//Tick to load next emulation cycle
func (self *Chip8) EmulateCycle() {
// Fetch Opcode
b1 := uint16(self.Memory[self.Pc])
b2 := uint16(self.Memory[self.Pc+1])
//Bitwise, add padding to end of first byte and append second byte to end
self.Opcode = (b1 << 8) | b2
x := (self.Opcode & 0x0F00) >> 8
y := self.Opcode & 0x00F0 >> 4
// 0x00E0 and 0x000E We have to do first because Golang seems to truncate 0x0000 into 0x00
switch self.Opcode {
case 0xE0: // 0x00E0: Clears the screen
for i := 0; i < 64*32; i++ {
self.Gfx[i] = 0
}
self.Draw_flag = true
self.Pc += 2
break
case 0xEE: // 0x00EE: Returns from subroutine
self.Sp-- // 16 levels of stack, decrease stack pointer to prevent overwrite
self.Pc = self.Stack[self.Sp] // Put the stored return address from the stack back into the program counter
self.Pc += 2 // Don't forget to increase the program counter!
break
}
switch self.Opcode & 0xF000 {
//1 to 7, jump, call and skip instructions
case 0x1000: // 0x1NNN: Jumps to address NNN
self.Pc = self.Opcode & 0x0FFF
break
case 0x2000: // 0x2NNN: Calls subroutine at NNN.
self.Stack[self.Sp] = self.Pc // Store current address in stack
self.Sp++ // Increment stack pointer
self.Pc = self.Opcode & 0x0FFF // Set the program counter to the address at NNN
break
case 0x3000: // 0x3XNN: Skips the next instruction if VX equals NN
if uint16(self.V[(self.Opcode&0x0F00)>>8]) == self.Opcode&0x00FF {
self.Pc += 4
} else {
self.Pc += 2
}
break
case 0x4000: // 0x4XNN: Skips the next instruction if VX doesn't equal NN.
if uint16(self.V[(self.Opcode&0x0F00)>>8]) != self.Opcode&0x00FF {
self.Pc += 4
} else {
self.Pc += 2
}
break
case 0x5000: // 0x5XY0: Skips the next instruction if VX equals VY.
x := (self.Opcode & 0x0F00) >> 8
y := (self.Opcode & 0x00F0) >> 4
// fmt.Printf("x = %02x and y= %02x", x, y)
// fmt.Printf("V0 = %02x v1= %02x", self.V[x], self.V[y])
if uint16(self.V[x]) == uint16(self.V[y]) {
self.Pc += 4
} else {
self.Pc += 2
}
break
case 0x6000: //6XNN Sets VX to NN.
NN := byte(self.Opcode & 0x00FF)
self.V[x] = NN
self.Pc += 2
break
case 0x7000: //0x7XNN Adds NN to VX.
x := (self.Opcode & 0xF00) >> 8
NN := byte(self.Opcode & 0x00FF)
self.V[x] += NN
self.Pc += 2
break
//0X8000 - 8 CASES
/*
8XY0 Sets VX to the value of VY.
8XY1 Sets VX to VX or VY.
8XY2 Sets VX to VX and VY.
8XY3 Sets VX to VX xor VY.
8XY4 Adds VY to VX. VF is set to 1 when there's a carry, and to 0 when there isn't.
*/
case 0x8000:
switch self.Opcode & 0x000F { // 8XY0 Sets VX to the value of VY.
case 0x0000: // 0x8XY0: Sets VX to the value of VY
self.V[x] = self.V[y]
self.Pc += 2
break
case 0x0001: // 0x8XY0: Sets VX to the value of VY
self.V[x] |= self.V[y]
self.Pc += 2
break
case 0x0002: // 0x8XY0: Sets VX to VX and VY.
self.V[x] &= self.V[y]
self.Pc += 2
break
case 0x0003: // 0x8XY3: Sets VX to VX xor VY.
self.V[x] ^= self.V[y]
self.Pc += 2
break
case 0x0004: // 0x8XY4: Adds VY to VX. VF is set to 1 when there's a carry, and to 0 when there isn't.
if self.V[y] > 0xFF-self.V[x] {
self.V[0xF] = 1
} else {
self.V[0xF] = 0
}
self.V[x] += self.V[y]
self.Pc += 2
break
case 0x0005: // 0x8XY5: VY is subtracted from VX. VF is set to 0 when there's a borrow, and 1 when there isn't
y := self.Opcode & 0x00F0 >> 4
if self.V[y] > self.V[x] {
self.V[0xF] = 0 //Borrow
} else {
self.V[0xF] = 1
}
self.V[x] -= self.V[y]
self.Pc += 2
break
case 0x0006: // 8XY6 Shifts VX right by one. VF set to the value of the least significant bit of VX before the shift
// y := self.Opcode & 0x00F0 >> 4
// fmt.Printf("Bit shifting CPU register %d", x)
self.V[0xF] = self.V[x] & 0x1
self.V[x] >>= 1
self.Pc += 2
break
case 0x0007: //8XY7: Sets VX to VY minus VX. VF is set to 0 when there's a borrow, and 1 when there isn't.
y := self.Opcode & 0x00F0 >> | LoadGame | identifier_name |
final.go | minorSep))
}
return strings.Join(strs, majorSep)
}
// Graph is the primary data structure for holding the highway data. The map's
// key is the 'origin' Place, and the EdgeMap contains 'destination' Places
// as keys and the travel distance and time as values.
type Graph map[Place]EdgeMap
// String returns a single string with the full data of the graph.
func (g Graph) String() string {
b := strings.Builder{}
for vertex, edges := range g {
// vertex;edge
b.WriteString(fmt.Sprintf("%[1]s%[3]s%[2]s\n", vertex, edges, majorSep))
}
return b.String()
}
// PrettyPrint writes a nicely formatted tabular list of places and distances
// between them to w. The resulting output is many lines long.
func (g Graph) PrettyPrint(w io.Writer) {
newline := ""
for k, v := range g {
fmt.Fprintf(w, "%s%s, %s (%g, %g)\n", newline, k.City, k.State, k.Latitude, k.Longitude)
newline = "\n" // redundant but this avoids if
for kk, vv := range v |
}
}
// Places provides a slice of unique Places in the graph.
// Use `ByState` or `ByCity` with pkg `sort` to sort the slice.
func (g Graph) Places() []Place {
places := make([]Place, 0, len(g))
for k := range g {
places = append(places, k)
}
return places
}
// Edge gets the Weight (edge data) for the connection between origin and
// destination if it exists. If the two places are not connected, data is
// the zero value and ok is false.
func (g Graph) Edge(origin, destination Place) (data Weight, ok bool) {
if dm, ok := g[origin]; ok {
if data, ok := dm[destination]; ok {
return data, true
}
}
return
}
//
//
//
// The following few types are used with Graph.Most()
//
//
//
// MinMax is a type of function that returns a min or max (or...?)
// of two floats.
type MinMax func(float64, float64) float64
// Predefined MinMaxes for Graph.Most().
var (
// The max of 2 floats.
Max MinMax = math.Max
// The min of 2 floats.
Min MinMax = math.Min
)
// Accessor ins a function that 'converts' a Weight to a float.
type Accessor func(Weight) float64
// Predefined Accessors for Graph.Most().
var (
// Gets Weight.Distance in meters.
Dist Accessor = func(w Weight) float64 { return w.Distance }
// Gets Weight.TravelTime in minutes.
Time Accessor = func(w Weight) float64 { return w.TravelTime.Minutes() }
)
// Most will find the "mostest" edge of vertex 'origin' given the predicate and
// Accessor 'by'. Generally, it'll return the farthest or closest city connected
// to 'origin' based on the value of Weight specified with 'by'.
//
// For example `g.Most(myHomeTown, Max, Dist)` returns the farthest by distance
// connected city to `myHomeTown`.
//
// ok is false if origin is not found in the Graph.
func (g Graph) Most(origin Place, predicate MinMax, by Accessor) (most Place, ok bool) {
dm, ok := g[origin]
if !ok {
return most, false
}
var cur *Weight
for k, v := range dm {
// start by setting the first value to the current best
if cur == nil {
most = k
*cur = v
continue
}
// get the mostest
bestval := predicate(by(*cur), by(v))
if bestval != by(*cur) {
// the other was chosen
most = k
*cur = v
}
}
return most, true
}
//
//
//
// Search
//
//
//
// FindPlace does a linear search of Places and returns the first match.
// If found, match is the place and found is true. Otherwise match is the
// zero value and found is false.
func (g Graph) FindPlace(city, state string) (match Place, found bool) {
city = strings.ToLower(city)
state = strings.ToLower(state)
for p := range g {
if strings.ToLower(p.City) == city &&
strings.ToLower(p.State) == state {
return p, true
}
}
return Place{}, false
}
// FindWithin performs a linear search of Places and returns the closest match
// that is within `radius` meters of the given latitude and longitude. The function
// uses the "spherical law of cosines" to calculate the distance. `found` is
// false if no Place was found.
func (g Graph) FindWithin(lat, lon, radius float64) (match Place, dist float64, found bool) {
best := radius + 1
for p := range g {
d := sphericalLawOfCos(lat, lon, p.Latitude, p.Longitude)
if d <= radius && d < best {
match = p
dist = d
found = true
best = d
}
}
if !found {
return Place{}, 0, false
}
return
}
// used for sphericalLawOfCos()
const (
earthradius = 6371e3 // 6371 km = 6,371,000 m
degtorad = math.Pi / 180.0
)
// sphericalLawOfCos uses said law to calculate the distance in meters (because
// `earthradius` is in meters) between (lat1,lon1) and (lat2,lon2).
//
// d = acos( sin φ1 ⋅ sin φ2 + cos φ1 ⋅ cos φ2 ⋅ cos Δλ ) ⋅ R
func sphericalLawOfCos(lat1, lon1, lat2, lon2 float64) float64 {
lat1 *= degtorad
lat2 *= degtorad
return earthradius * math.Acos(
math.Sin(lat1)*math.Sin(lat2)+
math.Cos(lat1)*math.Cos(lat2)*
math.Cos((lon2-lon1)*degtorad))
}
//
//
// Dijkstra's algorithm
//
//
type PathMap map[Place]pdata
type pdata struct {
visited bool
Dist float64
Hops int
parent Place
}
func (pm PathMap) Path(dest Place) (path []Place, sum float64) {
// prepare path if applicable
hops := pm[dest].Hops
if hops > 0 { // hops==0 -> no path found
path = make([]Place, hops+1, hops+1) // +1 to include origin in path
// build reverse path
// for n := dest; n != none; n = pm[n].parent {
// path = append(path, n)
// }
// // swap all into correct order
// for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
// path[i], path[j] = path[j], path[i]
// }
n := dest
for i := len(path) - 1; i >= 0; i-- {
path[i] = n
n = pm[n].parent
}
sum = pm[dest].Dist
}
return
}
// ShortestPath finds the shortest paths between orig and all other vertices
// using Dijkstra's algorithm.
//
// https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
func (g Graph) ShortestPath(orig Place, by Accessor) PathMap {
inf := math.Inf(1)
none := Place{} // zero val
var d pdata // temp var for data
// 1. mark all nodes unvisitied. create a set of all unvisited nodes
// call the unvisited set
// 2. assign to every node a tentative distance value: zero for initial node
// and infinity ("unvisited") for all others. Set initial node as current.
nodes := make(PathMap, len(g))
for k := range g {
nodes[k] = pdata{Dist: inf}
}
current := orig
d = nodes[current]
d.Dist = 0
nodes[current] = d
found := false // aka done
for !found {
// fmt.Println("current", current, nodes[current])
if current == none {
return nil
}
// 3. for the current node, consider all its unvisited neighbors and
// calculate their tentative distances through the current node. Compare
// the newly calculated tentative distance to the currently assigned | {
fmt.Fprintf(w, "\t%-16s%3s%7.1fmi%10s\n", kk.City, kk.State, vv.Distance*MetersToMiles, vv.TravelTime)
} | conditional_block |
final.go | bool) {
best := radius + 1
for p := range g {
d := sphericalLawOfCos(lat, lon, p.Latitude, p.Longitude)
if d <= radius && d < best {
match = p
dist = d
found = true
best = d
}
}
if !found {
return Place{}, 0, false
}
return
}
// used for sphericalLawOfCos()
const (
earthradius = 6371e3 // 6371 km = 6,371,000 m
degtorad = math.Pi / 180.0
)
// sphericalLawOfCos uses said law to calculate the distance in meters (because
// `earthradius` is in meters) between (lat1,lon1) and (lat2,lon2).
//
// d = acos( sin φ1 ⋅ sin φ2 + cos φ1 ⋅ cos φ2 ⋅ cos Δλ ) ⋅ R
func sphericalLawOfCos(lat1, lon1, lat2, lon2 float64) float64 {
lat1 *= degtorad
lat2 *= degtorad
return earthradius * math.Acos(
math.Sin(lat1)*math.Sin(lat2)+
math.Cos(lat1)*math.Cos(lat2)*
math.Cos((lon2-lon1)*degtorad))
}
//
//
// Dijkstra's algorithm
//
//
type PathMap map[Place]pdata
type pdata struct {
visited bool
Dist float64
Hops int
parent Place
}
func (pm PathMap) Path(dest Place) (path []Place, sum float64) {
// prepare path if applicable
hops := pm[dest].Hops
if hops > 0 { // hops==0 -> no path found
path = make([]Place, hops+1, hops+1) // +1 to include origin in path
// build reverse path
// for n := dest; n != none; n = pm[n].parent {
// path = append(path, n)
// }
// // swap all into correct order
// for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
// path[i], path[j] = path[j], path[i]
// }
n := dest
for i := len(path) - 1; i >= 0; i-- {
path[i] = n
n = pm[n].parent
}
sum = pm[dest].Dist
}
return
}
// ShortestPath finds the shortest paths between orig and all other vertices
// using Dijkstra's algorithm.
//
// https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
func (g Graph) ShortestPath(orig Place, by Accessor) PathMap {
inf := math.Inf(1)
none := Place{} // zero val
var d pdata // temp var for data
// 1. mark all nodes unvisitied. create a set of all unvisited nodes
// call the unvisited set
// 2. assign to every node a tentative distance value: zero for initial node
// and infinity ("unvisited") for all others. Set initial node as current.
nodes := make(PathMap, len(g))
for k := range g {
nodes[k] = pdata{Dist: inf}
}
current := orig
d = nodes[current]
d.Dist = 0
nodes[current] = d
found := false // aka done
for !found {
// fmt.Println("current", current, nodes[current])
if current == none {
return nil
}
// 3. for the current node, consider all its unvisited neighbors and
// calculate their tentative distances through the current node. Compare
// the newly calculated tentative distance to the currently assigned value
// and assign the smaller value.
for n, w := range g[current] {
if !nodes[n].visited { // n in unvisited set
tentative := nodes[current].Dist + by(w)
d = nodes[n]
if d.Dist > tentative {
d.Dist = tentative
d.parent = current
d.Hops = nodes[d.parent].Hops + 1
nodes[n] = d
}
}
}
// 4. when we are done considering all the unvisited neighbors of the
// current node, mark the current node as visited and remove it from the
// unvisited set. A visited node will never be checked again.
d = nodes[current]
d.visited = true
nodes[current] = d
// 5. A) if all nodes are marked visited (unvisited set is empty)
// OR B) if the smallest tentative distance among nodes in the unvisited set
// is infinity (no path possible)
// The algorithm is finished.
// TODO: termination case B
unvisitedcount := 0
for _, d := range nodes {
if !d.visited {
unvisitedcount++
}
}
found = unvisitedcount == 0
if found {
continue
}
// 6. Otherwise, select the unvisited node that is marked with the smallest
// tentative value, set it as the "current" and go back to step 3.
minDist := inf // pos infinity
minPlace := Place{}
for node, d := range nodes {
if !d.visited && d.Dist < minDist {
minDist = d.Dist
minPlace = node
}
}
current = minPlace
found = minDist == inf // termination case 5B above
}
return nodes
}
//
//
// parsing Graph and Place
//
//
// ParseGraph parses input from r, successively turning each line into a new
// entry in the graph. Lines beginning with "#" are ignored ascomments, and
// blank lines are skipped. Line format is:
// `<place:city,state,lat,lon>;<place>,<weight:distance,time>;<place>,<weight>;...`
func ParseGraph(r io.Reader) Graph {
s := bufio.NewScanner(r)
g := Graph{}
for s.Scan() {
line := s.Text()
// skip blank and comment
if len(line) == 0 || strings.TrimSpace(string(line[0])) == "#" {
continue
}
parts := strings.Split(line, majorSep)
vertex := ParsePlace(parts[0])
edges := EdgeMap{}
for _, part := range parts[1:] {
dest := ParsePlace(part) // this will work on strings with "extra" fields
dparts := strings.Split(part, minorSep)
w := Weight{} // TODO: refactor
w.Distance, _ = strconv.ParseFloat(dparts[4], 64)
w.TravelTime, _ = time.ParseDuration(dparts[5])
edges[dest] = w
}
g[vertex] = edges
}
return g
}
// ParsePlace parses a Place from a string in the format:
// `city,state,latitude,longitude`
func ParsePlace(str string) (p Place) {
parts := strings.Split(str, minorSep)
p.City = parts[0]
p.State = parts[1]
p.Latitude, _ = strconv.ParseFloat(parts[2], 64)
p.Longitude, _ = strconv.ParseFloat(parts[3], 64)
return
}
//
//
//
// sorting []Place
//
//
//
// ByState allows sorting []Place by state then city name.
type ByState []Place
// Len is the number of elements in the collection.
func (p ByState) Len() int {
return len(p)
}
// Less reports whether the element with
// index i should sort before the element with index j.
func (p ByState) Less(i int, j int) bool {
cmp := strings.Compare(p[i].State, p[j].State)
if cmp == 0 {
// same state, compare city
return strings.Compare(p[i].City, p[j].City) == -1
}
return cmp == -1
}
// Swap swaps the elements with indexes i and j.
func (p ByState) Swap(i int, j int) {
p[i], p[j] = p[j], p[i]
}
// ByCity allows sorting []Place by city name then state.
type ByCity []Place
// Len is the number of elements in the collection.
func (p ByCity) Len() int {
return len(p)
}
// Less reports whether the element with
// index i should sort before the element with index j.
func (p ByCity) Less(i int, j int) bool {
cmp := strings.Compare(p[i].City, p[j].City)
if cmp == 0 {
// same city, compare state
return strings.Compare(p[i].State, p[j].State) == -1
}
return cmp == -1
}
// Swap swaps the elements with indexes i and j.
func (p ByCity) Swap(i int, j int) {
p[i], p[j] | = p[j], p[i]
}
| identifier_body |
|
final.go | minorSep))
}
return strings.Join(strs, majorSep)
}
// Graph is the primary data structure for holding the highway data. The map's
// key is the 'origin' Place, and the EdgeMap contains 'destination' Places
// as keys and the travel distance and time as values.
type Graph map[Place]EdgeMap
// String returns a single string with the full data of the graph.
func (g Graph) String() string {
b := strings.Builder{}
for vertex, edges := range g {
// vertex;edge
b.WriteString(fmt.Sprintf("%[1]s%[3]s%[2]s\n", vertex, edges, majorSep))
}
return b.String()
}
// PrettyPrint writes a nicely formatted tabular list of places and distances
// between them to w. The resulting output is many lines long.
func (g Graph) PrettyPrint(w io.Writer) {
newline := ""
for k, v := range g {
fmt.Fprintf(w, "%s%s, %s (%g, %g)\n", newline, k.City, k.State, k.Latitude, k.Longitude)
newline = "\n" // redundant but this avoids if
for kk, vv := range v {
fmt.Fprintf(w, "\t%-16s%3s%7.1fmi%10s\n", kk.City, kk.State, vv.Distance*MetersToMiles, vv.TravelTime)
}
}
}
// Places provides a slice of unique Places in the graph.
// Use `ByState` or `ByCity` with pkg `sort` to sort the slice.
func (g Graph) Places() []Place {
places := make([]Place, 0, len(g))
for k := range g {
places = append(places, k)
}
return places
}
// Edge gets the Weight (edge data) for the connection between origin and
// destination if it exists. If the two places are not connected, data is
// the zero value and ok is false.
func (g Graph) Edge(origin, destination Place) (data Weight, ok bool) {
if dm, ok := g[origin]; ok {
if data, ok := dm[destination]; ok {
return data, true
}
}
return
}
//
//
//
// The following few types are used with Graph.Most()
//
//
//
// MinMax is a type of function that returns a min or max (or...?)
// of two floats.
type MinMax func(float64, float64) float64
// Predefined MinMaxes for Graph.Most().
var (
// The max of 2 floats.
Max MinMax = math.Max
// The min of 2 floats.
Min MinMax = math.Min
)
// Accessor ins a function that 'converts' a Weight to a float.
type Accessor func(Weight) float64
// Predefined Accessors for Graph.Most().
var (
// Gets Weight.Distance in meters.
Dist Accessor = func(w Weight) float64 { return w.Distance }
// Gets Weight.TravelTime in minutes.
Time Accessor = func(w Weight) float64 { return w.TravelTime.Minutes() }
)
// Most will find the "mostest" edge of vertex 'origin' given the predicate and
// Accessor 'by'. Generally, it'll return the farthest or closest city connected
// to 'origin' based on the value of Weight specified with 'by'.
//
// For example `g.Most(myHomeTown, Max, Dist)` returns the farthest by distance
// connected city to `myHomeTown`.
//
// ok is false if origin is not found in the Graph.
func (g Graph) | (origin Place, predicate MinMax, by Accessor) (most Place, ok bool) {
dm, ok := g[origin]
if !ok {
return most, false
}
var cur *Weight
for k, v := range dm {
// start by setting the first value to the current best
if cur == nil {
most = k
*cur = v
continue
}
// get the mostest
bestval := predicate(by(*cur), by(v))
if bestval != by(*cur) {
// the other was chosen
most = k
*cur = v
}
}
return most, true
}
//
//
//
// Search
//
//
//
// FindPlace does a linear search of Places and returns the first match.
// If found, match is the place and found is true. Otherwise match is the
// zero value and found is false.
func (g Graph) FindPlace(city, state string) (match Place, found bool) {
city = strings.ToLower(city)
state = strings.ToLower(state)
for p := range g {
if strings.ToLower(p.City) == city &&
strings.ToLower(p.State) == state {
return p, true
}
}
return Place{}, false
}
// FindWithin performs a linear search of Places and returns the closest match
// that is within `radius` meters of the given latitude and longitude. The function
// uses the "spherical law of cosines" to calculate the distance. `found` is
// false if no Place was found.
func (g Graph) FindWithin(lat, lon, radius float64) (match Place, dist float64, found bool) {
best := radius + 1
for p := range g {
d := sphericalLawOfCos(lat, lon, p.Latitude, p.Longitude)
if d <= radius && d < best {
match = p
dist = d
found = true
best = d
}
}
if !found {
return Place{}, 0, false
}
return
}
// used for sphericalLawOfCos()
const (
earthradius = 6371e3 // 6371 km = 6,371,000 m
degtorad = math.Pi / 180.0
)
// sphericalLawOfCos uses said law to calculate the distance in meters (because
// `earthradius` is in meters) between (lat1,lon1) and (lat2,lon2).
//
// d = acos( sin φ1 ⋅ sin φ2 + cos φ1 ⋅ cos φ2 ⋅ cos Δλ ) ⋅ R
func sphericalLawOfCos(lat1, lon1, lat2, lon2 float64) float64 {
lat1 *= degtorad
lat2 *= degtorad
return earthradius * math.Acos(
math.Sin(lat1)*math.Sin(lat2)+
math.Cos(lat1)*math.Cos(lat2)*
math.Cos((lon2-lon1)*degtorad))
}
//
//
// Dijkstra's algorithm
//
//
type PathMap map[Place]pdata
type pdata struct {
visited bool
Dist float64
Hops int
parent Place
}
func (pm PathMap) Path(dest Place) (path []Place, sum float64) {
// prepare path if applicable
hops := pm[dest].Hops
if hops > 0 { // hops==0 -> no path found
path = make([]Place, hops+1, hops+1) // +1 to include origin in path
// build reverse path
// for n := dest; n != none; n = pm[n].parent {
// path = append(path, n)
// }
// // swap all into correct order
// for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
// path[i], path[j] = path[j], path[i]
// }
n := dest
for i := len(path) - 1; i >= 0; i-- {
path[i] = n
n = pm[n].parent
}
sum = pm[dest].Dist
}
return
}
// ShortestPath finds the shortest paths between orig and all other vertices
// using Dijkstra's algorithm.
//
// https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
func (g Graph) ShortestPath(orig Place, by Accessor) PathMap {
inf := math.Inf(1)
none := Place{} // zero val
var d pdata // temp var for data
// 1. mark all nodes unvisitied. create a set of all unvisited nodes
// call the unvisited set
// 2. assign to every node a tentative distance value: zero for initial node
// and infinity ("unvisited") for all others. Set initial node as current.
nodes := make(PathMap, len(g))
for k := range g {
nodes[k] = pdata{Dist: inf}
}
current := orig
d = nodes[current]
d.Dist = 0
nodes[current] = d
found := false // aka done
for !found {
// fmt.Println("current", current, nodes[current])
if current == none {
return nil
}
// 3. for the current node, consider all its unvisited neighbors and
// calculate their tentative distances through the current node. Compare
// the newly calculated tentative distance to the currently assigned value | Most | identifier_name |
final.go | the zero value and ok is false.
func (g Graph) Edge(origin, destination Place) (data Weight, ok bool) {
if dm, ok := g[origin]; ok {
if data, ok := dm[destination]; ok {
return data, true
}
}
return
}
//
//
//
// The following few types are used with Graph.Most()
//
//
//
// MinMax is a type of function that returns a min or max (or...?)
// of two floats.
type MinMax func(float64, float64) float64
// Predefined MinMaxes for Graph.Most().
var (
// The max of 2 floats.
Max MinMax = math.Max
// The min of 2 floats.
Min MinMax = math.Min
)
// Accessor ins a function that 'converts' a Weight to a float.
type Accessor func(Weight) float64
// Predefined Accessors for Graph.Most().
var (
// Gets Weight.Distance in meters.
Dist Accessor = func(w Weight) float64 { return w.Distance }
// Gets Weight.TravelTime in minutes.
Time Accessor = func(w Weight) float64 { return w.TravelTime.Minutes() }
)
// Most will find the "mostest" edge of vertex 'origin' given the predicate and
// Accessor 'by'. Generally, it'll return the farthest or closest city connected
// to 'origin' based on the value of Weight specified with 'by'.
//
// For example `g.Most(myHomeTown, Max, Dist)` returns the farthest by distance
// connected city to `myHomeTown`.
//
// ok is false if origin is not found in the Graph.
func (g Graph) Most(origin Place, predicate MinMax, by Accessor) (most Place, ok bool) {
dm, ok := g[origin]
if !ok {
return most, false
}
var cur *Weight
for k, v := range dm {
// start by setting the first value to the current best
if cur == nil {
most = k
*cur = v
continue
}
// get the mostest
bestval := predicate(by(*cur), by(v))
if bestval != by(*cur) {
// the other was chosen
most = k
*cur = v
}
}
return most, true
}
//
//
//
// Search
//
//
//
// FindPlace does a linear search of Places and returns the first match.
// If found, match is the place and found is true. Otherwise match is the
// zero value and found is false.
func (g Graph) FindPlace(city, state string) (match Place, found bool) {
city = strings.ToLower(city)
state = strings.ToLower(state)
for p := range g {
if strings.ToLower(p.City) == city &&
strings.ToLower(p.State) == state {
return p, true
}
}
return Place{}, false
}
// FindWithin performs a linear search of Places and returns the closest match
// that is within `radius` meters of the given latitude and longitude. The function
// uses the "spherical law of cosines" to calculate the distance. `found` is
// false if no Place was found.
func (g Graph) FindWithin(lat, lon, radius float64) (match Place, dist float64, found bool) {
best := radius + 1
for p := range g {
d := sphericalLawOfCos(lat, lon, p.Latitude, p.Longitude)
if d <= radius && d < best {
match = p
dist = d
found = true
best = d
}
}
if !found {
return Place{}, 0, false
}
return
}
// used for sphericalLawOfCos()
const (
earthradius = 6371e3 // 6371 km = 6,371,000 m
degtorad = math.Pi / 180.0
)
// sphericalLawOfCos uses said law to calculate the distance in meters (because
// `earthradius` is in meters) between (lat1,lon1) and (lat2,lon2).
//
// d = acos( sin φ1 ⋅ sin φ2 + cos φ1 ⋅ cos φ2 ⋅ cos Δλ ) ⋅ R
func sphericalLawOfCos(lat1, lon1, lat2, lon2 float64) float64 {
lat1 *= degtorad
lat2 *= degtorad
return earthradius * math.Acos(
math.Sin(lat1)*math.Sin(lat2)+
math.Cos(lat1)*math.Cos(lat2)*
math.Cos((lon2-lon1)*degtorad))
}
//
//
// Dijkstra's algorithm
//
//
type PathMap map[Place]pdata
type pdata struct {
visited bool
Dist float64
Hops int
parent Place
}
func (pm PathMap) Path(dest Place) (path []Place, sum float64) {
// prepare path if applicable
hops := pm[dest].Hops
if hops > 0 { // hops==0 -> no path found
path = make([]Place, hops+1, hops+1) // +1 to include origin in path
// build reverse path
// for n := dest; n != none; n = pm[n].parent {
// path = append(path, n)
// }
// // swap all into correct order
// for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
// path[i], path[j] = path[j], path[i]
// }
n := dest
for i := len(path) - 1; i >= 0; i-- {
path[i] = n
n = pm[n].parent
}
sum = pm[dest].Dist
}
return
}
// ShortestPath finds the shortest paths between orig and all other vertices
// using Dijkstra's algorithm.
//
// https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
func (g Graph) ShortestPath(orig Place, by Accessor) PathMap {
inf := math.Inf(1)
none := Place{} // zero val
var d pdata // temp var for data
// 1. mark all nodes unvisitied. create a set of all unvisited nodes
// call the unvisited set
// 2. assign to every node a tentative distance value: zero for initial node
// and infinity ("unvisited") for all others. Set initial node as current.
nodes := make(PathMap, len(g))
for k := range g {
nodes[k] = pdata{Dist: inf}
}
current := orig
d = nodes[current]
d.Dist = 0
nodes[current] = d
found := false // aka done
for !found {
// fmt.Println("current", current, nodes[current])
if current == none {
return nil
}
// 3. for the current node, consider all its unvisited neighbors and
// calculate their tentative distances through the current node. Compare
// the newly calculated tentative distance to the currently assigned value
// and assign the smaller value.
for n, w := range g[current] {
if !nodes[n].visited { // n in unvisited set
tentative := nodes[current].Dist + by(w)
d = nodes[n]
if d.Dist > tentative {
d.Dist = tentative
d.parent = current
d.Hops = nodes[d.parent].Hops + 1
nodes[n] = d
}
}
}
// 4. when we are done considering all the unvisited neighbors of the
// current node, mark the current node as visited and remove it from the
// unvisited set. A visited node will never be checked again.
d = nodes[current]
d.visited = true
nodes[current] = d
// 5. A) if all nodes are marked visited (unvisited set is empty)
// OR B) if the smallest tentative distance among nodes in the unvisited set
// is infinity (no path possible)
// The algorithm is finished.
// TODO: termination case B
unvisitedcount := 0
for _, d := range nodes {
if !d.visited {
unvisitedcount++
}
}
found = unvisitedcount == 0
if found {
continue
}
// 6. Otherwise, select the unvisited node that is marked with the smallest
// tentative value, set it as the "current" and go back to step 3.
minDist := inf // pos infinity
minPlace := Place{}
for node, d := range nodes {
if !d.visited && d.Dist < minDist {
minDist = d.Dist
minPlace = node
}
}
current = minPlace
found = minDist == inf // termination case 5B above | }
return nodes
} | random_line_split |
|
nrml04.py | NRML04_LOWER_SEISMO_DEPTH = etree.QName(NRML04_NS,'lowerSeismoDepth')
NRML04_MAG_SCALE_REL = etree.QName(NRML04_NS,'magScaleRel')
NRML04_RUPT_ASPECT_RATIO = etree.QName(NRML04_NS,'ruptAspectRatio')
NRML04_INCREMENTAL_MFD = etree.QName(NRML04_NS,'incrementalMFD')
NRML04_TRUNCATED_GR = etree.QName(NRML04_NS,'truncGutenbergRichterMFD')
NRML04_OCCUR_RATES = etree.QName(NRML04_NS,'occurRates')
NRML04_NODAL_PLANE_DIST = etree.QName(NRML04_NS,'nodalPlaneDist')
NRML04_NODAL_PLANE = etree.QName(NRML04_NS,'nodalPlane')
NRML04_HYPO_DEPTH_DIST = etree.QName(NRML04_NS,'hypoDepthDist')
NRML04_HYPO_DEPTH = etree.QName(NRML04_NS,'hypoDepth')
NRML04_COMPLEX_FAUL_GEOMETRY = etree.QName(NRML04_NS, 'complexFaultGeometry')
NRML04_FAULT_TOP_EDGE = etree.QName(NRML04_NS, 'faultTopEdge')
NRML04_FAULT_BOTTOM_EDGE = etree.QName(NRML04_NS, 'faultBottomEdge')
NRML04_RAKE = etree.QName(NRML04_NS, 'rake')
NRML04_SIMPLE_FAULT_GEOMETRY = etree.QName(NRML04_NS, 'simpleFaultGeometry')
NRML04_DIP = etree.QName(NRML04_NS, 'dip')
NSMAP = {None: NRML04_NS, "gml": gml.GML_NS}
def _parse_source_model_file(source_model_file):
"""
Parse source model file in NRML 0.4.
"""
parse_args = dict(source=source_model_file)
srcs = []
for _, element in etree.iterparse(**parse_args):
if element.tag == NRML04_POINT_SOURCE.text:
srcs.append(_parse_point_source(element))
if element.tag == NRML04_AREA_SOURCE.text:
srcs.append(_parse_area_source(element))
return srcs
def _parse_area_source(element):
"""
Parse NRML 0.4 area source element.
"""
ID, name, tect_reg = _get_id_name_tect_reg(element)
polygon = _get_polygon(element)
mfd = _get_mfd(element)
return AreaSourceNRML04(polygon, mfd)
def _get_polygon(element):
"""
Return polygon coordinates from area source element.
"""
polygon = element.find('%s/%s/%s/%s/%s' %
(NRML04_AREA_GEOMETRY, gml.GML_POLYGON,
gml.GML_EXTERIOR, gml.GML_LINEAR_RING,
gml.GML_POS_LIST)).text
polygon = gml._get_polygon_from_2DLinestring(polygon)
return polygon
def _parse_point_source(element):
"""
Parse NRML 0.4 point source element.
"""
ID, name, tect_reg = _get_id_name_tect_reg(element)
lon, lat = _get_point_source_location(element)
mfd = _get_mfd(element)
return PointSourceNRML04(lon, lat, mfd)
def _get_id_name_tect_reg(element):
"""
Return id, name, and tectonic region of a source element.
"""
ID = element.attrib['id']
name = element.attrib['name']
tect_reg = element.attrib['tectonicRegion']
return ID, name, tect_reg
def _get_point_source_location(element):
"""
Return point source location (lon, lat).
"""
pos = element.find('%s/%s/%s' %
(NRML04_POINT_GEOMETRY, gml.GML_POINT, gml.GML_POS))
pos = pos.text.split()
return float(pos[0]), float(pos[1])
def _get_mfd(element):
"""
Get mfd from source element.
"""
mfd = element.find(NRML04_TRUNCATED_GR)
if mfd is None:
mfd = element.find(NRML04_INCREMENTAL_MFD)
if mfd.tag == NRML04_TRUNCATED_GR:
return TruncatedGRMfdNRML04(float(mfd.attrib['aValue']),
float(mfd.attrib['bValue']),
float(mfd.attrib['minMag']),
float(mfd.attrib['maxMag']))
elif mfd.tag == NRML04_INCREMENTAL_MFD:
min_mag = float(mfd.attrib['minMag'])
bin_width = float(mfd.attrib['binWidth'])
occur_rates = numpy.array(mfd.find(NRML04_OCCUR_RATES.text).
text.split(), dtype=float)
return IncrementalMfdNRML04(min_mag, bin_width, occur_rates)
else:
raise ValueError('MFD element nor recognized.')
def _create_nrml():
"""
Create and return NRML 0.4 root element.
"""
return etree.Element(NRML04_ROOT_TAG, nsmap=NSMAP)
def _append_source_model(element, name):
"""
Append and return NRML 0.4 source model element.
"""
attrib = {'name': name}
source_model = etree.Element(NRML04_SOURCE_MODEL,attrib=attrib)
element.append(source_model)
return source_model
def _append_id_name_tect_reg(element, NRML04_SOURCE, ID, name, tect_reg):
"""
Append id, name, tectonic region type for the given NRML 0.4 source
typology.
Returns the source element.
"""
attrib = {'id': ID, 'name': name, 'tectonicRegion': tect_reg}
source = etree.Element(NRML04_SOURCE, attrib=attrib)
element.append(source)
return source
def _append_geometry(element, NRML04_GEOMETRY):
"""
Append NRML 0.4 geometry to element and return the geometry element.
"""
geometry = etree.Element(NRML04_GEOMETRY)
element.append(geometry)
return geometry
def _append_dip(element, dip_value):
"""
Append NRML 0.4 dip element.
"""
dip = etree.Element(NRML04_DIP)
dip.text = str(dip_value)
element.append(dip)
def _append_upper_seismo_depth(element, upper_seismo_depth):
"""
Append NRML 0.4 upper seismogenic depth element.
"""
usd = etree.Element(NRML04_UPPER_SEISMO_DEPTH)
usd.text = str(upper_seismo_depth)
element.append(usd)
def _append_lower_seismo_depth(element, lower_seismo_depth):
"""
Append NRML 0.4 lower seismogenic depth element.
"""
lsd = etree.Element(NRML04_LOWER_SEISMO_DEPTH)
lsd.text = str(lower_seismo_depth)
element.append(lsd)
def _append_mag_scaling_rel(element, mag_scale_rel):
"""
Append NRML 0.4 magnitude scaling relationship element.
"""
msr = etree.Element(NRML04_MAG_SCALE_REL)
msr.text = mag_scale_rel
element.append(msr)
def _append_rupt_aspect_ratio(element, rupt_aspect_ratio):
"""
Append NRML 0.4 rupture aspect ratio.
"""
rar = etree.Element(NRML04_RUPT_ASPECT_RATIO)
rar.text = str(rupt_aspect_ratio)
element.append(rar)
def _append_incremental_mfd(element, mfd):
"""
Append NRML 0.4 incremental MFD.
mfd is an instance of class EvenlyDiscretizedIncrementalMfdNrml03.
"""
attrib = {'minMag': str(mfd.min_mag),
'binWidth': str(mfd.bin_size)}
incremental_mfd = etree.Element(NRML04_INCREMENTAL_MFD,
attrib=attrib)
occur_rates = etree.Element(NRML04_OCCUR_RATES)
occur_rates.text = ' '.join(str(v) for v in mfd.rates)
incremental_mfd.append(occur_rates) |
def _append_truncated_gr_mfd(element, mfd):
"""
Append NRML 0.4 truncated GR MFD.
mfd is an instance of TruncatedGutenbergRichterMfdNrml03.
"""
attrib = {'aValue': str(mfd.a_val),
'bValue': str(mfd.b_val),
'minMag': str(mfd.min_mag),
'maxMag': str(mfd.max_mag)}
truncated_gr = etree.Element(NRML04_TRUNCATED_GR,
attrib=attrib)
element.append(truncated_gr)
def _append_nodal_plane_dist(element, strikes_weights, dip, rake):
"""
Append NRML 0.4 nodal plane distribution for a set of strikes values (each
with its own weight). Dip and rake are the same for all values.
"""
nodal_plane_dist = etree.Element(NRML04_NODAL_PLANE | element.append(incremental_mfd) | random_line_split |
nrml04.py | ')
NRML04_LOWER_SEISMO_DEPTH = etree.QName(NRML04_NS,'lowerSeismoDepth')
NRML04_MAG_SCALE_REL = etree.QName(NRML04_NS,'magScaleRel')
NRML04_RUPT_ASPECT_RATIO = etree.QName(NRML04_NS,'ruptAspectRatio')
NRML04_INCREMENTAL_MFD = etree.QName(NRML04_NS,'incrementalMFD')
NRML04_TRUNCATED_GR = etree.QName(NRML04_NS,'truncGutenbergRichterMFD')
NRML04_OCCUR_RATES = etree.QName(NRML04_NS,'occurRates')
NRML04_NODAL_PLANE_DIST = etree.QName(NRML04_NS,'nodalPlaneDist')
NRML04_NODAL_PLANE = etree.QName(NRML04_NS,'nodalPlane')
NRML04_HYPO_DEPTH_DIST = etree.QName(NRML04_NS,'hypoDepthDist')
NRML04_HYPO_DEPTH = etree.QName(NRML04_NS,'hypoDepth')
NRML04_COMPLEX_FAUL_GEOMETRY = etree.QName(NRML04_NS, 'complexFaultGeometry')
NRML04_FAULT_TOP_EDGE = etree.QName(NRML04_NS, 'faultTopEdge')
NRML04_FAULT_BOTTOM_EDGE = etree.QName(NRML04_NS, 'faultBottomEdge')
NRML04_RAKE = etree.QName(NRML04_NS, 'rake')
NRML04_SIMPLE_FAULT_GEOMETRY = etree.QName(NRML04_NS, 'simpleFaultGeometry')
NRML04_DIP = etree.QName(NRML04_NS, 'dip')
NSMAP = {None: NRML04_NS, "gml": gml.GML_NS}
def _parse_source_model_file(source_model_file):
"""
Parse source model file in NRML 0.4.
"""
parse_args = dict(source=source_model_file)
srcs = []
for _, element in etree.iterparse(**parse_args):
if element.tag == NRML04_POINT_SOURCE.text:
srcs.append(_parse_point_source(element))
if element.tag == NRML04_AREA_SOURCE.text:
srcs.append(_parse_area_source(element))
return srcs
def _parse_area_source(element):
"""
Parse NRML 0.4 area source element.
"""
ID, name, tect_reg = _get_id_name_tect_reg(element)
polygon = _get_polygon(element)
mfd = _get_mfd(element)
return AreaSourceNRML04(polygon, mfd)
def _get_polygon(element):
"""
Return polygon coordinates from area source element.
"""
polygon = element.find('%s/%s/%s/%s/%s' %
(NRML04_AREA_GEOMETRY, gml.GML_POLYGON,
gml.GML_EXTERIOR, gml.GML_LINEAR_RING,
gml.GML_POS_LIST)).text
polygon = gml._get_polygon_from_2DLinestring(polygon)
return polygon
def _parse_point_source(element):
"""
Parse NRML 0.4 point source element.
"""
ID, name, tect_reg = _get_id_name_tect_reg(element)
lon, lat = _get_point_source_location(element)
mfd = _get_mfd(element)
return PointSourceNRML04(lon, lat, mfd)
def _get_id_name_tect_reg(element):
"""
Return id, name, and tectonic region of a source element.
"""
ID = element.attrib['id']
name = element.attrib['name']
tect_reg = element.attrib['tectonicRegion']
return ID, name, tect_reg
def _get_point_source_location(element):
"""
Return point source location (lon, lat).
"""
pos = element.find('%s/%s/%s' %
(NRML04_POINT_GEOMETRY, gml.GML_POINT, gml.GML_POS))
pos = pos.text.split()
return float(pos[0]), float(pos[1])
def _get_mfd(element):
"""
Get mfd from source element.
"""
mfd = element.find(NRML04_TRUNCATED_GR)
if mfd is None:
mfd = element.find(NRML04_INCREMENTAL_MFD)
if mfd.tag == NRML04_TRUNCATED_GR:
return TruncatedGRMfdNRML04(float(mfd.attrib['aValue']),
float(mfd.attrib['bValue']),
float(mfd.attrib['minMag']),
float(mfd.attrib['maxMag']))
elif mfd.tag == NRML04_INCREMENTAL_MFD:
min_mag = float(mfd.attrib['minMag'])
bin_width = float(mfd.attrib['binWidth'])
occur_rates = numpy.array(mfd.find(NRML04_OCCUR_RATES.text).
text.split(), dtype=float)
return IncrementalMfdNRML04(min_mag, bin_width, occur_rates)
else:
raise ValueError('MFD element nor recognized.')
def _create_nrml():
"""
Create and return NRML 0.4 root element.
"""
return etree.Element(NRML04_ROOT_TAG, nsmap=NSMAP)
def _append_source_model(element, name):
"""
Append and return NRML 0.4 source model element.
"""
attrib = {'name': name}
source_model = etree.Element(NRML04_SOURCE_MODEL,attrib=attrib)
element.append(source_model)
return source_model
def _append_id_name_tect_reg(element, NRML04_SOURCE, ID, name, tect_reg):
"""
Append id, name, tectonic region type for the given NRML 0.4 source
typology.
Returns the source element.
"""
attrib = {'id': ID, 'name': name, 'tectonicRegion': tect_reg}
source = etree.Element(NRML04_SOURCE, attrib=attrib)
element.append(source)
return source
def _append_geometry(element, NRML04_GEOMETRY):
"""
Append NRML 0.4 geometry to element and return the geometry element.
"""
geometry = etree.Element(NRML04_GEOMETRY)
element.append(geometry)
return geometry
def _append_dip(element, dip_value):
"""
Append NRML 0.4 dip element.
"""
dip = etree.Element(NRML04_DIP)
dip.text = str(dip_value)
element.append(dip)
def _append_upper_seismo_depth(element, upper_seismo_depth):
"""
Append NRML 0.4 upper seismogenic depth element.
"""
usd = etree.Element(NRML04_UPPER_SEISMO_DEPTH)
usd.text = str(upper_seismo_depth)
element.append(usd)
def _append_lower_seismo_depth(element, lower_seismo_depth):
"""
Append NRML 0.4 lower seismogenic depth element.
"""
lsd = etree.Element(NRML04_LOWER_SEISMO_DEPTH)
lsd.text = str(lower_seismo_depth)
element.append(lsd)
def | (element, mag_scale_rel):
"""
Append NRML 0.4 magnitude scaling relationship element.
"""
msr = etree.Element(NRML04_MAG_SCALE_REL)
msr.text = mag_scale_rel
element.append(msr)
def _append_rupt_aspect_ratio(element, rupt_aspect_ratio):
"""
Append NRML 0.4 rupture aspect ratio.
"""
rar = etree.Element(NRML04_RUPT_ASPECT_RATIO)
rar.text = str(rupt_aspect_ratio)
element.append(rar)
def _append_incremental_mfd(element, mfd):
"""
Append NRML 0.4 incremental MFD.
mfd is an instance of class EvenlyDiscretizedIncrementalMfdNrml03.
"""
attrib = {'minMag': str(mfd.min_mag),
'binWidth': str(mfd.bin_size)}
incremental_mfd = etree.Element(NRML04_INCREMENTAL_MFD,
attrib=attrib)
occur_rates = etree.Element(NRML04_OCCUR_RATES)
occur_rates.text = ' '.join(str(v) for v in mfd.rates)
incremental_mfd.append(occur_rates)
element.append(incremental_mfd)
def _append_truncated_gr_mfd(element, mfd):
"""
Append NRML 0.4 truncated GR MFD.
mfd is an instance of TruncatedGutenbergRichterMfdNrml03.
"""
attrib = {'aValue': str(mfd.a_val),
'bValue': str(mfd.b_val),
'minMag': str(mfd.min_mag),
'maxMag': str(mfd.max_mag)}
truncated_gr = etree.Element(NRML04_TRUNCATED_GR,
attrib=attrib)
element.append(truncated_gr)
def _append_nodal_plane_dist(element, strikes_weights, dip, rake):
"""
Append NRML 0.4 nodal plane distribution for a set of strikes values (each
with its own weight). Dip and rake are the same for all values.
"""
nodal_plane_dist = etree.Element(NRML04_NODAL_PLANE | _append_mag_scaling_rel | identifier_name |
nrml04.py | ')
NRML04_LOWER_SEISMO_DEPTH = etree.QName(NRML04_NS,'lowerSeismoDepth')
NRML04_MAG_SCALE_REL = etree.QName(NRML04_NS,'magScaleRel')
NRML04_RUPT_ASPECT_RATIO = etree.QName(NRML04_NS,'ruptAspectRatio')
NRML04_INCREMENTAL_MFD = etree.QName(NRML04_NS,'incrementalMFD')
NRML04_TRUNCATED_GR = etree.QName(NRML04_NS,'truncGutenbergRichterMFD')
NRML04_OCCUR_RATES = etree.QName(NRML04_NS,'occurRates')
NRML04_NODAL_PLANE_DIST = etree.QName(NRML04_NS,'nodalPlaneDist')
NRML04_NODAL_PLANE = etree.QName(NRML04_NS,'nodalPlane')
NRML04_HYPO_DEPTH_DIST = etree.QName(NRML04_NS,'hypoDepthDist')
NRML04_HYPO_DEPTH = etree.QName(NRML04_NS,'hypoDepth')
NRML04_COMPLEX_FAUL_GEOMETRY = etree.QName(NRML04_NS, 'complexFaultGeometry')
NRML04_FAULT_TOP_EDGE = etree.QName(NRML04_NS, 'faultTopEdge')
NRML04_FAULT_BOTTOM_EDGE = etree.QName(NRML04_NS, 'faultBottomEdge')
NRML04_RAKE = etree.QName(NRML04_NS, 'rake')
NRML04_SIMPLE_FAULT_GEOMETRY = etree.QName(NRML04_NS, 'simpleFaultGeometry')
NRML04_DIP = etree.QName(NRML04_NS, 'dip')
NSMAP = {None: NRML04_NS, "gml": gml.GML_NS}
def _parse_source_model_file(source_model_file):
"""
Parse source model file in NRML 0.4.
"""
parse_args = dict(source=source_model_file)
srcs = []
for _, element in etree.iterparse(**parse_args):
if element.tag == NRML04_POINT_SOURCE.text:
srcs.append(_parse_point_source(element))
if element.tag == NRML04_AREA_SOURCE.text:
srcs.append(_parse_area_source(element))
return srcs
def _parse_area_source(element):
"""
Parse NRML 0.4 area source element.
"""
ID, name, tect_reg = _get_id_name_tect_reg(element)
polygon = _get_polygon(element)
mfd = _get_mfd(element)
return AreaSourceNRML04(polygon, mfd)
def _get_polygon(element):
"""
Return polygon coordinates from area source element.
"""
polygon = element.find('%s/%s/%s/%s/%s' %
(NRML04_AREA_GEOMETRY, gml.GML_POLYGON,
gml.GML_EXTERIOR, gml.GML_LINEAR_RING,
gml.GML_POS_LIST)).text
polygon = gml._get_polygon_from_2DLinestring(polygon)
return polygon
def _parse_point_source(element):
"""
Parse NRML 0.4 point source element.
"""
ID, name, tect_reg = _get_id_name_tect_reg(element)
lon, lat = _get_point_source_location(element)
mfd = _get_mfd(element)
return PointSourceNRML04(lon, lat, mfd)
def _get_id_name_tect_reg(element):
"""
Return id, name, and tectonic region of a source element.
"""
ID = element.attrib['id']
name = element.attrib['name']
tect_reg = element.attrib['tectonicRegion']
return ID, name, tect_reg
def _get_point_source_location(element):
"""
Return point source location (lon, lat).
"""
pos = element.find('%s/%s/%s' %
(NRML04_POINT_GEOMETRY, gml.GML_POINT, gml.GML_POS))
pos = pos.text.split()
return float(pos[0]), float(pos[1])
def _get_mfd(element):
"""
Get mfd from source element.
"""
mfd = element.find(NRML04_TRUNCATED_GR)
if mfd is None:
mfd = element.find(NRML04_INCREMENTAL_MFD)
if mfd.tag == NRML04_TRUNCATED_GR:
return TruncatedGRMfdNRML04(float(mfd.attrib['aValue']),
float(mfd.attrib['bValue']),
float(mfd.attrib['minMag']),
float(mfd.attrib['maxMag']))
elif mfd.tag == NRML04_INCREMENTAL_MFD:
min_mag = float(mfd.attrib['minMag'])
bin_width = float(mfd.attrib['binWidth'])
occur_rates = numpy.array(mfd.find(NRML04_OCCUR_RATES.text).
text.split(), dtype=float)
return IncrementalMfdNRML04(min_mag, bin_width, occur_rates)
else:
raise ValueError('MFD element nor recognized.')
def _create_nrml():
"""
Create and return NRML 0.4 root element.
"""
return etree.Element(NRML04_ROOT_TAG, nsmap=NSMAP)
def _append_source_model(element, name):
|
def _append_id_name_tect_reg(element, NRML04_SOURCE, ID, name, tect_reg):
"""
Append id, name, tectonic region type for the given NRML 0.4 source
typology.
Returns the source element.
"""
attrib = {'id': ID, 'name': name, 'tectonicRegion': tect_reg}
source = etree.Element(NRML04_SOURCE, attrib=attrib)
element.append(source)
return source
def _append_geometry(element, NRML04_GEOMETRY):
"""
Append NRML 0.4 geometry to element and return the geometry element.
"""
geometry = etree.Element(NRML04_GEOMETRY)
element.append(geometry)
return geometry
def _append_dip(element, dip_value):
"""
Append NRML 0.4 dip element.
"""
dip = etree.Element(NRML04_DIP)
dip.text = str(dip_value)
element.append(dip)
def _append_upper_seismo_depth(element, upper_seismo_depth):
"""
Append NRML 0.4 upper seismogenic depth element.
"""
usd = etree.Element(NRML04_UPPER_SEISMO_DEPTH)
usd.text = str(upper_seismo_depth)
element.append(usd)
def _append_lower_seismo_depth(element, lower_seismo_depth):
"""
Append NRML 0.4 lower seismogenic depth element.
"""
lsd = etree.Element(NRML04_LOWER_SEISMO_DEPTH)
lsd.text = str(lower_seismo_depth)
element.append(lsd)
def _append_mag_scaling_rel(element, mag_scale_rel):
"""
Append NRML 0.4 magnitude scaling relationship element.
"""
msr = etree.Element(NRML04_MAG_SCALE_REL)
msr.text = mag_scale_rel
element.append(msr)
def _append_rupt_aspect_ratio(element, rupt_aspect_ratio):
"""
Append NRML 0.4 rupture aspect ratio.
"""
rar = etree.Element(NRML04_RUPT_ASPECT_RATIO)
rar.text = str(rupt_aspect_ratio)
element.append(rar)
def _append_incremental_mfd(element, mfd):
"""
Append NRML 0.4 incremental MFD.
mfd is an instance of class EvenlyDiscretizedIncrementalMfdNrml03.
"""
attrib = {'minMag': str(mfd.min_mag),
'binWidth': str(mfd.bin_size)}
incremental_mfd = etree.Element(NRML04_INCREMENTAL_MFD,
attrib=attrib)
occur_rates = etree.Element(NRML04_OCCUR_RATES)
occur_rates.text = ' '.join(str(v) for v in mfd.rates)
incremental_mfd.append(occur_rates)
element.append(incremental_mfd)
def _append_truncated_gr_mfd(element, mfd):
"""
Append NRML 0.4 truncated GR MFD.
mfd is an instance of TruncatedGutenbergRichterMfdNrml03.
"""
attrib = {'aValue': str(mfd.a_val),
'bValue': str(mfd.b_val),
'minMag': str(mfd.min_mag),
'maxMag': str(mfd.max_mag)}
truncated_gr = etree.Element(NRML04_TRUNCATED_GR,
attrib=attrib)
element.append(truncated_gr)
def _append_nodal_plane_dist(element, strikes_weights, dip, rake):
"""
Append NRML 0.4 nodal plane distribution for a set of strikes values (each
with its own weight). Dip and rake are the same for all values.
"""
nodal_plane_dist = etree.Element(NRML04_NODAL_PLANE | """
Append and return NRML 0.4 source model element.
"""
attrib = {'name': name}
source_model = etree.Element(NRML04_SOURCE_MODEL,attrib=attrib)
element.append(source_model)
return source_model | identifier_body |
nrml04.py | NRML04_LOWER_SEISMO_DEPTH = etree.QName(NRML04_NS,'lowerSeismoDepth')
NRML04_MAG_SCALE_REL = etree.QName(NRML04_NS,'magScaleRel')
NRML04_RUPT_ASPECT_RATIO = etree.QName(NRML04_NS,'ruptAspectRatio')
NRML04_INCREMENTAL_MFD = etree.QName(NRML04_NS,'incrementalMFD')
NRML04_TRUNCATED_GR = etree.QName(NRML04_NS,'truncGutenbergRichterMFD')
NRML04_OCCUR_RATES = etree.QName(NRML04_NS,'occurRates')
NRML04_NODAL_PLANE_DIST = etree.QName(NRML04_NS,'nodalPlaneDist')
NRML04_NODAL_PLANE = etree.QName(NRML04_NS,'nodalPlane')
NRML04_HYPO_DEPTH_DIST = etree.QName(NRML04_NS,'hypoDepthDist')
NRML04_HYPO_DEPTH = etree.QName(NRML04_NS,'hypoDepth')
NRML04_COMPLEX_FAUL_GEOMETRY = etree.QName(NRML04_NS, 'complexFaultGeometry')
NRML04_FAULT_TOP_EDGE = etree.QName(NRML04_NS, 'faultTopEdge')
NRML04_FAULT_BOTTOM_EDGE = etree.QName(NRML04_NS, 'faultBottomEdge')
NRML04_RAKE = etree.QName(NRML04_NS, 'rake')
NRML04_SIMPLE_FAULT_GEOMETRY = etree.QName(NRML04_NS, 'simpleFaultGeometry')
NRML04_DIP = etree.QName(NRML04_NS, 'dip')
NSMAP = {None: NRML04_NS, "gml": gml.GML_NS}
def _parse_source_model_file(source_model_file):
"""
Parse source model file in NRML 0.4.
"""
parse_args = dict(source=source_model_file)
srcs = []
for _, element in etree.iterparse(**parse_args):
if element.tag == NRML04_POINT_SOURCE.text:
srcs.append(_parse_point_source(element))
if element.tag == NRML04_AREA_SOURCE.text:
srcs.append(_parse_area_source(element))
return srcs
def _parse_area_source(element):
"""
Parse NRML 0.4 area source element.
"""
ID, name, tect_reg = _get_id_name_tect_reg(element)
polygon = _get_polygon(element)
mfd = _get_mfd(element)
return AreaSourceNRML04(polygon, mfd)
def _get_polygon(element):
"""
Return polygon coordinates from area source element.
"""
polygon = element.find('%s/%s/%s/%s/%s' %
(NRML04_AREA_GEOMETRY, gml.GML_POLYGON,
gml.GML_EXTERIOR, gml.GML_LINEAR_RING,
gml.GML_POS_LIST)).text
polygon = gml._get_polygon_from_2DLinestring(polygon)
return polygon
def _parse_point_source(element):
"""
Parse NRML 0.4 point source element.
"""
ID, name, tect_reg = _get_id_name_tect_reg(element)
lon, lat = _get_point_source_location(element)
mfd = _get_mfd(element)
return PointSourceNRML04(lon, lat, mfd)
def _get_id_name_tect_reg(element):
"""
Return id, name, and tectonic region of a source element.
"""
ID = element.attrib['id']
name = element.attrib['name']
tect_reg = element.attrib['tectonicRegion']
return ID, name, tect_reg
def _get_point_source_location(element):
"""
Return point source location (lon, lat).
"""
pos = element.find('%s/%s/%s' %
(NRML04_POINT_GEOMETRY, gml.GML_POINT, gml.GML_POS))
pos = pos.text.split()
return float(pos[0]), float(pos[1])
def _get_mfd(element):
"""
Get mfd from source element.
"""
mfd = element.find(NRML04_TRUNCATED_GR)
if mfd is None:
mfd = element.find(NRML04_INCREMENTAL_MFD)
if mfd.tag == NRML04_TRUNCATED_GR:
return TruncatedGRMfdNRML04(float(mfd.attrib['aValue']),
float(mfd.attrib['bValue']),
float(mfd.attrib['minMag']),
float(mfd.attrib['maxMag']))
elif mfd.tag == NRML04_INCREMENTAL_MFD:
|
else:
raise ValueError('MFD element nor recognized.')
def _create_nrml():
"""
Create and return NRML 0.4 root element.
"""
return etree.Element(NRML04_ROOT_TAG, nsmap=NSMAP)
def _append_source_model(element, name):
"""
Append and return NRML 0.4 source model element.
"""
attrib = {'name': name}
source_model = etree.Element(NRML04_SOURCE_MODEL,attrib=attrib)
element.append(source_model)
return source_model
def _append_id_name_tect_reg(element, NRML04_SOURCE, ID, name, tect_reg):
"""
Append id, name, tectonic region type for the given NRML 0.4 source
typology.
Returns the source element.
"""
attrib = {'id': ID, 'name': name, 'tectonicRegion': tect_reg}
source = etree.Element(NRML04_SOURCE, attrib=attrib)
element.append(source)
return source
def _append_geometry(element, NRML04_GEOMETRY):
"""
Append NRML 0.4 geometry to element and return the geometry element.
"""
geometry = etree.Element(NRML04_GEOMETRY)
element.append(geometry)
return geometry
def _append_dip(element, dip_value):
"""
Append NRML 0.4 dip element.
"""
dip = etree.Element(NRML04_DIP)
dip.text = str(dip_value)
element.append(dip)
def _append_upper_seismo_depth(element, upper_seismo_depth):
"""
Append NRML 0.4 upper seismogenic depth element.
"""
usd = etree.Element(NRML04_UPPER_SEISMO_DEPTH)
usd.text = str(upper_seismo_depth)
element.append(usd)
def _append_lower_seismo_depth(element, lower_seismo_depth):
"""
Append NRML 0.4 lower seismogenic depth element.
"""
lsd = etree.Element(NRML04_LOWER_SEISMO_DEPTH)
lsd.text = str(lower_seismo_depth)
element.append(lsd)
def _append_mag_scaling_rel(element, mag_scale_rel):
"""
Append NRML 0.4 magnitude scaling relationship element.
"""
msr = etree.Element(NRML04_MAG_SCALE_REL)
msr.text = mag_scale_rel
element.append(msr)
def _append_rupt_aspect_ratio(element, rupt_aspect_ratio):
"""
Append NRML 0.4 rupture aspect ratio.
"""
rar = etree.Element(NRML04_RUPT_ASPECT_RATIO)
rar.text = str(rupt_aspect_ratio)
element.append(rar)
def _append_incremental_mfd(element, mfd):
"""
Append NRML 0.4 incremental MFD.
mfd is an instance of class EvenlyDiscretizedIncrementalMfdNrml03.
"""
attrib = {'minMag': str(mfd.min_mag),
'binWidth': str(mfd.bin_size)}
incremental_mfd = etree.Element(NRML04_INCREMENTAL_MFD,
attrib=attrib)
occur_rates = etree.Element(NRML04_OCCUR_RATES)
occur_rates.text = ' '.join(str(v) for v in mfd.rates)
incremental_mfd.append(occur_rates)
element.append(incremental_mfd)
def _append_truncated_gr_mfd(element, mfd):
"""
Append NRML 0.4 truncated GR MFD.
mfd is an instance of TruncatedGutenbergRichterMfdNrml03.
"""
attrib = {'aValue': str(mfd.a_val),
'bValue': str(mfd.b_val),
'minMag': str(mfd.min_mag),
'maxMag': str(mfd.max_mag)}
truncated_gr = etree.Element(NRML04_TRUNCATED_GR,
attrib=attrib)
element.append(truncated_gr)
def _append_nodal_plane_dist(element, strikes_weights, dip, rake):
"""
Append NRML 0.4 nodal plane distribution for a set of strikes values (each
with its own weight). Dip and rake are the same for all values.
"""
nodal_plane_dist = etree.Element(NRML04_NODAL_PLANE | min_mag = float(mfd.attrib['minMag'])
bin_width = float(mfd.attrib['binWidth'])
occur_rates = numpy.array(mfd.find(NRML04_OCCUR_RATES.text).
text.split(), dtype=float)
return IncrementalMfdNRML04(min_mag, bin_width, occur_rates) | conditional_block |
code.go | in " + quotient + "\n"
divcode += "\t\t\t; Safe remainder in " + remainder + "\n"
if _, err := strconv.Atoi(divisor); err == nil {
divcode += "\tmov rax, " + divisor + "\n"
} else {
divcode += "\tmov rax, [" + divisor + "]\n"
}
if _, err := strconv.Atoi(dividend); err == nil {
divcode += "\tmov rbx, " + dividend + "\n"
} else {
divcode += "\tmov rbx, [" + dividend + "]\n"
}
divcode += "\tdiv rbx\n"
if quotient != "" {
divcode += "\tmov [" + quotient + "], rax\n"
}
if remainder != "" {
divcode += "\tmov [" + remainder + "], rdx\n"
}
pc.appendCode(divcode)
}
// End of Math
//*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*//*//
// createParams the code to copy values into argument registers
// as defined in the *amd64 System V calling convention*
// Marginal Note: MiniPython functions can'take any arguments still not a supported feature*
// FIXME: Is this in use?
// As long as argSlice delivers a value, it'll be thrown in one of the
// following registers as ordered in registerSlice. On this way six
// 64bit numbers may be passed over to the called function, which can
// easily read from the registers.
func (pc *programCode) createParams(argSlice []string) {
code := ""
registerSlice := []string{"rdi", "rsi", "rdx", "rcx", "r8", "r9"} // SysV ABI calling register for parameters
for i := 0; i < len(argSlice) && i < 6; i++ {
if _, err := strconv.Atoi(argSlice[i]); err == nil {
code += "\tmov " + registerSlice[i] + argSlice[i] + "\n"
} else {
code += "\tmov " + registerSlice[i] + "[" + argSlice[i] + "]\n"
}
}
pc.appendCode(code)
}
// createCall allows the label passed as string argument to be
// called.
//"call" executes a function inside assembly. It cleanes used
// registers before and after the function did its job. -> amd64 Sys V
// abi
// FIXME: 'jmp vs. call'?
func (pc *programCode) createCall(name string) {
code := ""
code += "\n\tcall " + name + "\t; call label " + name + "\n"
pc.appendCode(code)
}
// crateLabel marks a label inside the assembly source code. It also
// increments the indentLevel counter, in order to write the following
// code block into a separated buffer. Labels can be called or jumped
// to. createLabel accepts the label's name as a argument
func (pc *programCode) createLabel(name string) {
code := ""
code += "\n" + name + ":\n"
pc.funcSlice = append(pc.funcSlice, name)
pc.indentLevel += 1 // dive deeper -> next buffer.
// Please have a look to FIXME: Where can I find what?
pc.appendCode(code)
}
// createReturn leaves the innermost function/indent buffer by
// decrementing the pc.indentLevel. It is the last function appending
// information to a asm-Code block.
func (pc *programCode) createReturn() {
code := "\tret\n"
pc.appendCode(code)
pc.indentLevel-- // get back -> buffer before
}
// createJump allows the final program to jump to a label. This is
// used for functions. FIXME: Rest(for, if)?
func (pc *programCode) createJump(label string) {
code := ""
code += "\tjmp " + label + "\t; jmp to " + label + "\n"
pc.appendCode(code)
}
// createJumpBackLabel writes a label to the main code to which the
// final program can jump back after a functions, if-clause or
// for-loop was finished
// Interesting function call:
// pc.pushLastLabel(label) places the label (func, if, for, etc) onto
// a stack memory, in order to remind the program where it should jump
// next
func (pc *programCode) createJumpBackLabel(category string) {
code := ""
strlabelCounter := strconv.FormatInt(pc.labelCounter, 10)
label := category + strlabelCounter
pc.pushLastLabel(label)
code += "\t" + label + ":\t; return point\n"
pc.appendCode(code)
}
func (pc *programCode) createJumpBack() {
code := ""
label := pc.popLastLabel()
code += "\tjmp " + label + "\t; return to last place\n"
pc.appendCode(code)
pc.indentLevel--
}
// createResetLoopVar appends a code snippet to pc.code which resets a
// loopVarN to a given value.
// Is this funtions necessary? Why not use programCode.SetVar(int64, string)?
func (pc *programCode) createResetLoopVar(name string, val int) {
valStr := strconv.Itoa(val)
code := ""
code += "\tmov rax, " + valStr + "\t;reset LoopVar to" + valStr + "\n"
code += "\t mov [" + name + "], rax;\t done\n"
pc.appendCode(code)
}
// The compiler has a stack to manage nested functions, conditions and
// loops. It is still a so called Brechstangen-Methode due to the
// inflexibility of Go's slices compared to Python's lists. Slices
// refer to an underlying array of something. They are basically a pointer
// to the real chunk of date used, which has some dynamic aspects.
// pc.LastLabel[n] represents the postion of a label in the hierarchy
// of a running program.
// A generic funtion to let the stack grow and shrink is indispensable
// for a MiniPython program which consists of a lot branching for example
// conditions, loops, functions. The sad trueth is that a limited
// brechstangen-code sets the borders of a MiniPython system.
// Branching should work good enough within eight stack layers.
func (pc *programCode) pushLastLabel(name string) {
// errors happend often enough to place some debug logic here. The
// really ugly and terminal filling printed debug messages should
// mainly show the changes made to the stack.
if debug == 2 {
fmt.Println("Lastlabel stack before push")
for i, v := range pc.lastLabel { // iterate over the stack'n'print it.
fmt.Println("Number", i, ":", v)
}
}
// FIXME: Fix this!
// #Brechstangen Methode
pc.lastLabel[8] = pc.lastLabel[7]
pc.lastLabel[7] = pc.lastLabel[6]
pc.lastLabel[6] = pc.lastLabel[5]
pc.lastLabel[5] = pc.lastLabel[4]
pc.lastLabel[4] = pc.lastLabel[3]
pc.lastLabel[3] = pc.lastLabel[2]
pc.lastLabel[2] = pc.lastLabel[1]
pc.lastLabel[1] = pc.lastLabel[0]
pc.lastLabel[0] = name
if debug == 2 {
fmt.Println("Lastlabel stack after push:")
for i, v := range pc.lastLabel {
fmt.Println("Number", i, ":", v)
}
}
}
// popLastLabel() pops a lable from the stack. The label is returned as a string.
func (pc *programCode) popLastLabel() string {
// These debug messags show how the stack was changed. See
// pushLastLabel(name string) for more information
if debug == 2 {
fmt.Println("Lastlabel stack before pop:")
for i, v := range pc.lastLabel {
fmt.Println("Number", i, ":", v)
}
}
// Popping labels off the stack just works fine. No one fears a
// Brechstangen-Methode to appear here anytime soon.
label := ""
if len(pc.lastLabel) != 0 {
label = pc.lastLabel[0]
}
if len(pc.lastLabel)-1 > 1 {
pc.lastLabel = pc.lastLabel[1 : len(pc.lastLabel)-1]
}
// These debug messags show how the stack was changed
if debug == 2 {
fmt.Println("Lastlabel stack after pop:")
for i, v := range pc.lastLabel {
fmt.Println("Number", i, ":", v)
}
}
return label
}
// FIXME: DONE
// <s> The BAUSTELLE! : Solved on Monday July 27th
// For loops are working but still strange to use. The loopvariable
// can('t) be accessed by their predefined name and appended counter
// number e.g. loopVar0, loopVar1, loopVar3 counting is still
// necessarry. Todo: Change loopVar32 to something more general like </s>
//
// for loops just work fine |
// This is the code snipped checking the condition inside an assembly loop. | random_line_split |
|
code.go | , [var2]
mov [var3], eax
*/
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+//
// Mathsnippets The following code templates are appended to the
// output program to do ugly numbercrunshing work The following
// functions' parameters are names of variables inside the output
// program
// createAdd("AnzahlMurmeln", "MurmelSack3000", "AnzahlMurmeln") ?
// Addition
func (pc *programCode) createAdd(a, b, sum string) {
code := "\n\t\t\t; Add " + b + " to " +
a + " and save sum in " + sum + "\n"
if _, err := strconv.Atoi(a); err == nil {
code += "\tmov rax, " + a + "\n"
} else {
code += "\tmov rax, [" + a + "]\n"
}
if _, err := strconv.Atoi(b); err == nil {
code += "\tadd rax, " + b + "\n"
} else {
code += "\tadd rax, [" + b + "]\n"
}
code += "\tmov [" + sum + "], rax\n"
pc.appendCode(code)
}
// Subtraction
func (pc *programCode) createSub(m, s, dif string) {
code := "\n\t\t\t; Substract " + s + " from " +
m + " and save difference in " + dif + "\n"
if _, err := strconv.Atoi(m); err == nil {
code += "\tmov rax, " + m + "\n"
} else {
code += "\tmov rax, [" + m + "]\n"
}
if _, err := strconv.Atoi(s); err == nil {
code += "\tsub rax, " + s + "\n"
} else {
code += "\tsub rax, [" + s + "]\n"
}
code += "\tmov [" + dif + "], rax\n"
pc.appendCode(code)
}
// Multiplication
func (pc *programCode) createMul(a, b, prod string) {
code := "\n\t\t\t; Multiply " + a + " with " +
b + " and store product in " + prod + "\n"
if _, err := strconv.Atoi(a); err == nil {
code += "\tmov rax, " + a + "\n"
} else {
code += "\tmov rax, [" + a + "]\n"
}
if _, err := strconv.Atoi(b); err == nil {
code += "\timul rax, " + b + "\n"
} else {
code += "\timul rax, [" + b + "]\n"
}
code += "\tmov [" + prod + "], rax\n"
pc.appendCode(code)
}
// Division
/*
mov rax, [divisor] ;divides rax by rbx remainder is stored in rdx quotient is stored in rax
mov rbx, [dividend]
div rbx
mov [q], rax ;; quotient
mov [r], rdx ;; remainder
*/
// Make shure to not divide by zero. It'll cause a floting point error
// and program will crash. This feature is still buggy.
func (pc *programCode) createDiv(divisor, dividend, quotient, remainder string) {
divcode := "\n\t\t\t; Divide " + divisor + " by " +
dividend + " and safe quotient in " + quotient + "\n"
divcode += "\t\t\t; Safe remainder in " + remainder + "\n"
if _, err := strconv.Atoi(divisor); err == nil {
divcode += "\tmov rax, " + divisor + "\n"
} else {
divcode += "\tmov rax, [" + divisor + "]\n"
}
if _, err := strconv.Atoi(dividend); err == nil {
divcode += "\tmov rbx, " + dividend + "\n"
} else {
divcode += "\tmov rbx, [" + dividend + "]\n"
}
divcode += "\tdiv rbx\n"
if quotient != "" {
divcode += "\tmov [" + quotient + "], rax\n"
}
if remainder != "" {
divcode += "\tmov [" + remainder + "], rdx\n"
}
pc.appendCode(divcode)
}
// End of Math
//*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*//*//
// createParams the code to copy values into argument registers
// as defined in the *amd64 System V calling convention*
// Marginal Note: MiniPython functions can'take any arguments still not a supported feature*
// FIXME: Is this in use?
// As long as argSlice delivers a value, it'll be thrown in one of the
// following registers as ordered in registerSlice. On this way six
// 64bit numbers may be passed over to the called function, which can
// easily read from the registers.
func (pc *programCode) createParams(argSlice []string) {
code := ""
registerSlice := []string{"rdi", "rsi", "rdx", "rcx", "r8", "r9"} // SysV ABI calling register for parameters
for i := 0; i < len(argSlice) && i < 6; i++ |
pc.appendCode(code)
}
// createCall allows the label passed as string argument to be
// called.
//"call" executes a function inside assembly. It cleanes used
// registers before and after the function did its job. -> amd64 Sys V
// abi
// FIXME: 'jmp vs. call'?
func (pc *programCode) createCall(name string) {
code := ""
code += "\n\tcall " + name + "\t; call label " + name + "\n"
pc.appendCode(code)
}
// crateLabel marks a label inside the assembly source code. It also
// increments the indentLevel counter, in order to write the following
// code block into a separated buffer. Labels can be called or jumped
// to. createLabel accepts the label's name as a argument
func (pc *programCode) createLabel(name string) {
code := ""
code += "\n" + name + ":\n"
pc.funcSlice = append(pc.funcSlice, name)
pc.indentLevel += 1 // dive deeper -> next buffer.
// Please have a look to FIXME: Where can I find what?
pc.appendCode(code)
}
// createReturn leaves the innermost function/indent buffer by
// decrementing the pc.indentLevel. It is the last function appending
// information to a asm-Code block.
func (pc *programCode) createReturn() {
code := "\tret\n"
pc.appendCode(code)
pc.indentLevel-- // get back -> buffer before
}
// createJump allows the final program to jump to a label. This is
// used for functions. FIXME: Rest(for, if)?
func (pc *programCode) createJump(label string) {
code := ""
code += "\tjmp " + label + "\t; jmp to " + label + "\n"
pc.appendCode(code)
}
// createJumpBackLabel writes a label to the main code to which the
// final program can jump back after a functions, if-clause or
// for-loop was finished
// Interesting function call:
// pc.pushLastLabel(label) places the label (func, if, for, etc) onto
// a stack memory, in order to remind the program where it should jump
// next
func (pc *programCode) createJumpBackLabel(category string) {
code := ""
strlabelCounter := strconv.FormatInt(pc.labelCounter, 10)
label := category + strlabelCounter
pc.pushLastLabel(label)
code += "\t" + label + ":\t; return point\n"
pc.appendCode(code)
}
func (pc *programCode) createJumpBack() {
code := ""
label := pc.popLastLabel()
code += "\tjmp " + label + "\t; return to last place\n"
pc.appendCode(code)
pc.indentLevel--
}
// createResetLoopVar appends a code snippet to pc.code which resets a
// loopVarN to a given value.
// Is this funtions necessary? Why not use programCode.SetVar(int64, string)?
func (pc *programCode) createResetLoopVar(name string, val int) {
valStr := strconv.Itoa(val)
code := ""
code += "\tmov rax, " + valStr + "\t;reset LoopVar to" + valStr + "\n"
code += "\t mov [" + name + "], rax;\t done\n"
pc.appendCode(code)
}
// The compiler has a stack to manage nested functions, conditions and
// loops. It is still a so called Brechstangen-Methode due to the
// inflexibility of Go's slices compared to Python's lists. Slices
// refer to an underlying array of something. They are basically a pointer | {
if _, err := strconv.Atoi(argSlice[i]); err == nil {
code += "\tmov " + registerSlice[i] + argSlice[i] + "\n"
} else {
code += "\tmov " + registerSlice[i] + "[" + argSlice[i] + "]\n"
}
} | conditional_block |
code.go | , [var2]
mov [var3], eax
*/
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+//
// Mathsnippets The following code templates are appended to the
// output program to do ugly numbercrunshing work The following
// functions' parameters are names of variables inside the output
// program
// createAdd("AnzahlMurmeln", "MurmelSack3000", "AnzahlMurmeln") ?
// Addition
func (pc *programCode) createAdd(a, b, sum string) {
code := "\n\t\t\t; Add " + b + " to " +
a + " and save sum in " + sum + "\n"
if _, err := strconv.Atoi(a); err == nil {
code += "\tmov rax, " + a + "\n"
} else {
code += "\tmov rax, [" + a + "]\n"
}
if _, err := strconv.Atoi(b); err == nil {
code += "\tadd rax, " + b + "\n"
} else {
code += "\tadd rax, [" + b + "]\n"
}
code += "\tmov [" + sum + "], rax\n"
pc.appendCode(code)
}
// Subtraction
func (pc *programCode) createSub(m, s, dif string) {
code := "\n\t\t\t; Substract " + s + " from " +
m + " and save difference in " + dif + "\n"
if _, err := strconv.Atoi(m); err == nil {
code += "\tmov rax, " + m + "\n"
} else {
code += "\tmov rax, [" + m + "]\n"
}
if _, err := strconv.Atoi(s); err == nil {
code += "\tsub rax, " + s + "\n"
} else {
code += "\tsub rax, [" + s + "]\n"
}
code += "\tmov [" + dif + "], rax\n"
pc.appendCode(code)
}
// Multiplication
func (pc *programCode) createMul(a, b, prod string) {
code := "\n\t\t\t; Multiply " + a + " with " +
b + " and store product in " + prod + "\n"
if _, err := strconv.Atoi(a); err == nil {
code += "\tmov rax, " + a + "\n"
} else {
code += "\tmov rax, [" + a + "]\n"
}
if _, err := strconv.Atoi(b); err == nil {
code += "\timul rax, " + b + "\n"
} else {
code += "\timul rax, [" + b + "]\n"
}
code += "\tmov [" + prod + "], rax\n"
pc.appendCode(code)
}
// Division
/*
mov rax, [divisor] ;divides rax by rbx remainder is stored in rdx quotient is stored in rax
mov rbx, [dividend]
div rbx
mov [q], rax ;; quotient
mov [r], rdx ;; remainder
*/
// Make shure to not divide by zero. It'll cause a floting point error
// and program will crash. This feature is still buggy.
func (pc *programCode) createDiv(divisor, dividend, quotient, remainder string) {
divcode := "\n\t\t\t; Divide " + divisor + " by " +
dividend + " and safe quotient in " + quotient + "\n"
divcode += "\t\t\t; Safe remainder in " + remainder + "\n"
if _, err := strconv.Atoi(divisor); err == nil {
divcode += "\tmov rax, " + divisor + "\n"
} else {
divcode += "\tmov rax, [" + divisor + "]\n"
}
if _, err := strconv.Atoi(dividend); err == nil {
divcode += "\tmov rbx, " + dividend + "\n"
} else {
divcode += "\tmov rbx, [" + dividend + "]\n"
}
divcode += "\tdiv rbx\n"
if quotient != "" {
divcode += "\tmov [" + quotient + "], rax\n"
}
if remainder != "" {
divcode += "\tmov [" + remainder + "], rdx\n"
}
pc.appendCode(divcode)
}
// End of Math
//*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*//*//
// createParams the code to copy values into argument registers
// as defined in the *amd64 System V calling convention*
// Marginal Note: MiniPython functions can'take any arguments still not a supported feature*
// FIXME: Is this in use?
// As long as argSlice delivers a value, it'll be thrown in one of the
// following registers as ordered in registerSlice. On this way six
// 64bit numbers may be passed over to the called function, which can
// easily read from the registers.
func (pc *programCode) createParams(argSlice []string) {
code := ""
registerSlice := []string{"rdi", "rsi", "rdx", "rcx", "r8", "r9"} // SysV ABI calling register for parameters
for i := 0; i < len(argSlice) && i < 6; i++ {
if _, err := strconv.Atoi(argSlice[i]); err == nil {
code += "\tmov " + registerSlice[i] + argSlice[i] + "\n"
} else {
code += "\tmov " + registerSlice[i] + "[" + argSlice[i] + "]\n"
}
}
pc.appendCode(code)
}
// createCall allows the label passed as string argument to be
// called.
//"call" executes a function inside assembly. It cleanes used
// registers before and after the function did its job. -> amd64 Sys V
// abi
// FIXME: 'jmp vs. call'?
func (pc *programCode) createCall(name string) {
code := ""
code += "\n\tcall " + name + "\t; call label " + name + "\n"
pc.appendCode(code)
}
// crateLabel marks a label inside the assembly source code. It also
// increments the indentLevel counter, in order to write the following
// code block into a separated buffer. Labels can be called or jumped
// to. createLabel accepts the label's name as a argument
func (pc *programCode) | (name string) {
code := ""
code += "\n" + name + ":\n"
pc.funcSlice = append(pc.funcSlice, name)
pc.indentLevel += 1 // dive deeper -> next buffer.
// Please have a look to FIXME: Where can I find what?
pc.appendCode(code)
}
// createReturn leaves the innermost function/indent buffer by
// decrementing the pc.indentLevel. It is the last function appending
// information to a asm-Code block.
func (pc *programCode) createReturn() {
code := "\tret\n"
pc.appendCode(code)
pc.indentLevel-- // get back -> buffer before
}
// createJump allows the final program to jump to a label. This is
// used for functions. FIXME: Rest(for, if)?
func (pc *programCode) createJump(label string) {
code := ""
code += "\tjmp " + label + "\t; jmp to " + label + "\n"
pc.appendCode(code)
}
// createJumpBackLabel writes a label to the main code to which the
// final program can jump back after a functions, if-clause or
// for-loop was finished
// Interesting function call:
// pc.pushLastLabel(label) places the label (func, if, for, etc) onto
// a stack memory, in order to remind the program where it should jump
// next
func (pc *programCode) createJumpBackLabel(category string) {
code := ""
strlabelCounter := strconv.FormatInt(pc.labelCounter, 10)
label := category + strlabelCounter
pc.pushLastLabel(label)
code += "\t" + label + ":\t; return point\n"
pc.appendCode(code)
}
func (pc *programCode) createJumpBack() {
code := ""
label := pc.popLastLabel()
code += "\tjmp " + label + "\t; return to last place\n"
pc.appendCode(code)
pc.indentLevel--
}
// createResetLoopVar appends a code snippet to pc.code which resets a
// loopVarN to a given value.
// Is this funtions necessary? Why not use programCode.SetVar(int64, string)?
func (pc *programCode) createResetLoopVar(name string, val int) {
valStr := strconv.Itoa(val)
code := ""
code += "\tmov rax, " + valStr + "\t;reset LoopVar to" + valStr + "\n"
code += "\t mov [" + name + "], rax;\t done\n"
pc.appendCode(code)
}
// The compiler has a stack to manage nested functions, conditions and
// loops. It is still a so called Brechstangen-Methode due to the
// inflexibility of Go's slices compared to Python's lists. Slices
// refer to an underlying array of something. They are basically a pointer
| createLabel | identifier_name |
code.go |
/*
mov eax, [var1]
add eax, [var2]
mov [var3], eax
*/
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+//
// Mathsnippets The following code templates are appended to the
// output program to do ugly numbercrunshing work The following
// functions' parameters are names of variables inside the output
// program
// createAdd("AnzahlMurmeln", "MurmelSack3000", "AnzahlMurmeln") ?
// Addition
func (pc *programCode) createAdd(a, b, sum string) {
code := "\n\t\t\t; Add " + b + " to " +
a + " and save sum in " + sum + "\n"
if _, err := strconv.Atoi(a); err == nil {
code += "\tmov rax, " + a + "\n"
} else {
code += "\tmov rax, [" + a + "]\n"
}
if _, err := strconv.Atoi(b); err == nil {
code += "\tadd rax, " + b + "\n"
} else {
code += "\tadd rax, [" + b + "]\n"
}
code += "\tmov [" + sum + "], rax\n"
pc.appendCode(code)
}
// Subtraction
func (pc *programCode) createSub(m, s, dif string) {
code := "\n\t\t\t; Substract " + s + " from " +
m + " and save difference in " + dif + "\n"
if _, err := strconv.Atoi(m); err == nil {
code += "\tmov rax, " + m + "\n"
} else {
code += "\tmov rax, [" + m + "]\n"
}
if _, err := strconv.Atoi(s); err == nil {
code += "\tsub rax, " + s + "\n"
} else {
code += "\tsub rax, [" + s + "]\n"
}
code += "\tmov [" + dif + "], rax\n"
pc.appendCode(code)
}
// Multiplication
func (pc *programCode) createMul(a, b, prod string) {
code := "\n\t\t\t; Multiply " + a + " with " +
b + " and store product in " + prod + "\n"
if _, err := strconv.Atoi(a); err == nil {
code += "\tmov rax, " + a + "\n"
} else {
code += "\tmov rax, [" + a + "]\n"
}
if _, err := strconv.Atoi(b); err == nil {
code += "\timul rax, " + b + "\n"
} else {
code += "\timul rax, [" + b + "]\n"
}
code += "\tmov [" + prod + "], rax\n"
pc.appendCode(code)
}
// Division
/*
mov rax, [divisor] ;divides rax by rbx remainder is stored in rdx quotient is stored in rax
mov rbx, [dividend]
div rbx
mov [q], rax ;; quotient
mov [r], rdx ;; remainder
*/
// Make shure to not divide by zero. It'll cause a floting point error
// and program will crash. This feature is still buggy.
func (pc *programCode) createDiv(divisor, dividend, quotient, remainder string) {
divcode := "\n\t\t\t; Divide " + divisor + " by " +
dividend + " and safe quotient in " + quotient + "\n"
divcode += "\t\t\t; Safe remainder in " + remainder + "\n"
if _, err := strconv.Atoi(divisor); err == nil {
divcode += "\tmov rax, " + divisor + "\n"
} else {
divcode += "\tmov rax, [" + divisor + "]\n"
}
if _, err := strconv.Atoi(dividend); err == nil {
divcode += "\tmov rbx, " + dividend + "\n"
} else {
divcode += "\tmov rbx, [" + dividend + "]\n"
}
divcode += "\tdiv rbx\n"
if quotient != "" {
divcode += "\tmov [" + quotient + "], rax\n"
}
if remainder != "" {
divcode += "\tmov [" + remainder + "], rdx\n"
}
pc.appendCode(divcode)
}
// End of Math
//*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*/*//*//
// createParams the code to copy values into argument registers
// as defined in the *amd64 System V calling convention*
// Marginal Note: MiniPython functions can'take any arguments still not a supported feature*
// FIXME: Is this in use?
// As long as argSlice delivers a value, it'll be thrown in one of the
// following registers as ordered in registerSlice. On this way six
// 64bit numbers may be passed over to the called function, which can
// easily read from the registers.
func (pc *programCode) createParams(argSlice []string) {
code := ""
registerSlice := []string{"rdi", "rsi", "rdx", "rcx", "r8", "r9"} // SysV ABI calling register for parameters
for i := 0; i < len(argSlice) && i < 6; i++ {
if _, err := strconv.Atoi(argSlice[i]); err == nil {
code += "\tmov " + registerSlice[i] + argSlice[i] + "\n"
} else {
code += "\tmov " + registerSlice[i] + "[" + argSlice[i] + "]\n"
}
}
pc.appendCode(code)
}
// createCall allows the label passed as string argument to be
// called.
//"call" executes a function inside assembly. It cleanes used
// registers before and after the function did its job. -> amd64 Sys V
// abi
// FIXME: 'jmp vs. call'?
func (pc *programCode) createCall(name string) {
code := ""
code += "\n\tcall " + name + "\t; call label " + name + "\n"
pc.appendCode(code)
}
// crateLabel marks a label inside the assembly source code. It also
// increments the indentLevel counter, in order to write the following
// code block into a separated buffer. Labels can be called or jumped
// to. createLabel accepts the label's name as a argument
func (pc *programCode) createLabel(name string) {
code := ""
code += "\n" + name + ":\n"
pc.funcSlice = append(pc.funcSlice, name)
pc.indentLevel += 1 // dive deeper -> next buffer.
// Please have a look to FIXME: Where can I find what?
pc.appendCode(code)
}
// createReturn leaves the innermost function/indent buffer by
// decrementing the pc.indentLevel. It is the last function appending
// information to a asm-Code block.
func (pc *programCode) createReturn() {
code := "\tret\n"
pc.appendCode(code)
pc.indentLevel-- // get back -> buffer before
}
// createJump allows the final program to jump to a label. This is
// used for functions. FIXME: Rest(for, if)?
func (pc *programCode) createJump(label string) {
code := ""
code += "\tjmp " + label + "\t; jmp to " + label + "\n"
pc.appendCode(code)
}
// createJumpBackLabel writes a label to the main code to which the
// final program can jump back after a functions, if-clause or
// for-loop was finished
// Interesting function call:
// pc.pushLastLabel(label) places the label (func, if, for, etc) onto
// a stack memory, in order to remind the program where it should jump
// next
func (pc *programCode) createJumpBackLabel(category string) {
code := ""
strlabelCounter := strconv.FormatInt(pc.labelCounter, 10)
label := category + strlabelCounter
pc.pushLastLabel(label)
code += "\t" + label + ":\t; return point\n"
pc.appendCode(code)
}
func (pc *programCode) createJumpBack() {
code := ""
label := pc.popLastLabel()
code += "\tjmp " + label + "\t; return to last place\n"
pc.appendCode(code)
pc.indentLevel--
}
// createResetLoopVar appends a code snippet to pc.code which resets a
// loopVarN to a given value.
// Is this funtions necessary? Why not use programCode.SetVar(int64, string)?
func (pc *programCode) createResetLoopVar(name string | {
len := (int64)(len(pc.stringMap[sname])) - 2
// FIXME: WTF int64. Why not use int and strconv.Atoi(var string)
// and stringcon.Itoa(var int)
strlen := strconv.FormatInt(len, 10)
code := "\tmov rax, 1\t;print String" + sname + "\n\tmov rdi, 1\n\tmov rdx, " + strlen + "\n\tmov rsi, " + sname + "\n\tsyscall\n"
pc.appendCode(code)
} | identifier_body |
|
root.rs | pub hpre: u32,
pub prediv: u32,
}
#[allow(unused_labels)]
#[inline(never)]
pub fn handler(reg: Regs, thr_init: ThrsInit) {
let mut clock_mode = ClockMode::High64MHz;
let (thr, scb) = thr::init_extended(thr_init);
thr.hard_fault.add_once(|| panic!("Hard Fault"));
// Allocate the clock control resources.
let mut res = SystemRes {
sys_tick: periph_sys_tick!(reg),
thr_sys_tick: thr.sys_tick,
// ----------------------
// -- Clocks.
// The internal PLLs can be used to multiply the HSI or HSE
// output clock frequency.
pll: Pll::new(periph_pll!(reg)),
// The HSI clock signal is generated from an internal 8 MHz RC Oscillator.
hsi: Hsi::new(periph_hsi!(reg)),
// The LSE clock (32.768K oscillator, not used in this crate.)
lse: Lse::new(periph_lse!(reg)),
// The RCC component.
rcc: Rcc::new(periph_rcc!(reg)),
// The flash component,
flash: Flash::new(periph_flash!(reg)),
// ----------------------
// -- Factors and selectors.
// CAUTION: Setting wrong values may make your system unusable.
// Read the reference manual for detailed information.
//
// PLL multiplication factor.
// Possible values for pllmul:
// Caution: The PLL output frequency must not exceed 72 MHz.
// 0000: PLL input clock x 2
// 0001: PLL input clock x 3
// 0010: PLL input clock x 4
// 0011: PLL input clock x 5
// 0100: PLL input clock x 6
// 0101: PLL input clock x 7
// 0110: PLL input clock x 8
// 0111: PLL input clock x 9
// 1000: PLL input clock x 10
// 1001: PLL input clock x 11
// 1010: PLL input clock x 12
// 1011: PLL input clock x 13
// 1100: PLL input clock x 14
// 1101: PLL input clock x 15
// 1110: PLL input clock x 16
// 1111: Not applicable
pllmul: 0b1110, // Field RCC_CFGR PLLMUL in ref. manual RM0316.
// System clock switch.
// Possible values for clksrc:
// 00: HSI oscillator used as system clock.
// 01: HSE oscillator used as system clock.
// 10: PLL used as system clock
// 11: Not applicable.
clksrc: 0b10, // Field RCC_CFGR SW in ref. manual RM0316.
//
// Possible values for pllsrc:
// Caution: Different values for STM32F303xD/E and STM32F398xE!
// 00: HSI/2 selected as PLL input clock.
// 01: HSE/PREDIV selected as PLL input clock
// 10: Reserved.
// 11: Reserved.
pllsrc: 0b00, // Field RCC_CFGR PLLSRC in ref. manual RM0316.
// Division factor of the AHB clock (AHB prescaler).
// Possible values for hpre:
// 0xxx: SYSCLK not divided
// 1000: SYSCLK divided by 2
// 1001: SYSCLK divided by 4
// 1010: SYSCLK divided by 8
// 1011: SYSCLK divided by 16
// 1100: SYSCLK divided by 64
// 1101: SYSCLK divided by 128
// 1110: SYSCLK divided by 256
// 1111: SYSCLK divided by 512
hpre: 0b0000, // Field RCC_CFGR HPRE in ref. manual RM0316.
// PREDIV division factor.
prediv: 0b000, // Field RCC_CFGR2 PREDIV in ref. manual RM0316.
};
swo::flush();
swo::update_prescaler(HSI_CLK / log::baud_rate!() - 1);
System::delay(100, HSI_CLK, &res).root_wait();
// The on-board user LED is connected to GPIO bank B.
// Create register and pins mapping component.
let gpio_pins = drv_gpio_pins!(reg);
let mut gpio_b = GpioHead::new(periph_gpio_b_head!(reg));
// Enable and initialize.
let gpio_b_en = gpio_b.enable();
gpio_pins.init(gpio_b_en.inventory_token());
scb.scb_ccr_div_0_trp.set_bit();
unsafe {
fpu_init(true);
}
// Enable the system configuration controller clock.
reg.rcc_apb2enr.syscfgen.set_bit();
// Setup fault handlers.
thr.hard_fault.add_once(|| panic!("Hard Fault"));
// Exti configuration for the user button.
// There is no user button on the Nucleo-F303K8,
// but we use the PB4 pin to emulate it.
let exti5 = ExtiDrv::init(ExtiSetup {
exti: periph_exti5!(reg),
exti_int: thr.exti_9_5,
config: 0b001, // PB5 pin.
falling: false, // trigger the interrupt on a falling edge.
rising: true, // don't trigger the interrupt on a rising edge.
});
'user_button_pressed: loop {
// Reset the clock control registers to their default.
System::reset_rcc(&res);
// Apply the current clock tree configuration.
System::apply_clock_config(&res);
// Calculate the configured clock speed.
let hclk = System::calculate_hclk(&res);
swo::flush();
swo::update_prescaler(hclk / log::baud_rate!() - 1);
System::delay(50, hclk, &res).root_wait();
println!("Running at {} MHz", hclk);
listen(&res, &thr, &exti5, &gpio_pins, hclk).root_wait();
// Set different configuration for the clock tree
// depending on current configuration
match clock_mode {
ClockMode::Reset8MHz => {
clock_mode = ClockMode::Medium32MHz; // <- new mode.
res.pllsrc = 0b00; // HSI is PLL clock input.
res.clksrc = 0b10; // Use PLL output 32 MHz.
res.pllmul = 0b0110;
System::delay(50, 8_000_000, &res).root_wait();
}
ClockMode::Medium32MHz => {
clock_mode = ClockMode::High64MHz; // <- new mode.
res.pllsrc = 0b00; // HSI is PLL clock input.
res.clksrc = 0b10; // Use PLL output 64 MHz
res.pllmul = 0b1110;
System::delay(50, 32_000_000, &res).root_wait();
}
ClockMode::High64MHz => {
clock_mode = ClockMode::Reset8MHz; // <- new mode.
res.pllsrc = 0b00; // No PLL.
res.clksrc = 0b00; // Use HSI 8MHz.
res.pllmul = 0b0000;
System::delay(20, 64_000_0000, &res).root_wait();
}
}
}
}
async fn | (
res: &SystemRes,
thr: &Thrs,
exti5: &ExtiDrv<Exti5, thr::Exti95>,
gpio_pins: &GpioPins,
hclk: u32,
) -> Event {
println!("Enter listen, hclk={}", hclk);
// Attach a listener that will notify us on user button pressed.
let mut button_stream = exti5.create_saturating_stream();
// Attach a listener that will notify us on each SYS_TICK interrupt trigger.
let mut tick_stream = res.thr_sys_tick.add_pulse_try_stream(
// This closure will be called when a receiver no longer can store the
// number of ticks since | listen | identifier_name |
root.rs | 4
// 1101: SYSCLK divided by 128
// 1110: SYSCLK divided by 256
// 1111: SYSCLK divided by 512
hpre: 0b0000, // Field RCC_CFGR HPRE in ref. manual RM0316.
// PREDIV division factor.
prediv: 0b000, // Field RCC_CFGR2 PREDIV in ref. manual RM0316.
};
swo::flush();
swo::update_prescaler(HSI_CLK / log::baud_rate!() - 1);
System::delay(100, HSI_CLK, &res).root_wait();
// The on-board user LED is connected to GPIO bank B.
// Create register and pins mapping component.
let gpio_pins = drv_gpio_pins!(reg);
let mut gpio_b = GpioHead::new(periph_gpio_b_head!(reg));
// Enable and initialize.
let gpio_b_en = gpio_b.enable();
gpio_pins.init(gpio_b_en.inventory_token());
scb.scb_ccr_div_0_trp.set_bit();
unsafe {
fpu_init(true);
}
// Enable the system configuration controller clock.
reg.rcc_apb2enr.syscfgen.set_bit();
// Setup fault handlers.
thr.hard_fault.add_once(|| panic!("Hard Fault"));
// Exti configuration for the user button.
// There is no user button on the Nucleo-F303K8,
// but we use the PB4 pin to emulate it.
let exti5 = ExtiDrv::init(ExtiSetup {
exti: periph_exti5!(reg),
exti_int: thr.exti_9_5,
config: 0b001, // PB5 pin.
falling: false, // trigger the interrupt on a falling edge.
rising: true, // don't trigger the interrupt on a rising edge.
});
'user_button_pressed: loop {
// Reset the clock control registers to their default.
System::reset_rcc(&res);
// Apply the current clock tree configuration.
System::apply_clock_config(&res);
// Calculate the configured clock speed.
let hclk = System::calculate_hclk(&res);
swo::flush();
swo::update_prescaler(hclk / log::baud_rate!() - 1);
System::delay(50, hclk, &res).root_wait();
println!("Running at {} MHz", hclk);
listen(&res, &thr, &exti5, &gpio_pins, hclk).root_wait();
// Set different configuration for the clock tree
// depending on current configuration
match clock_mode {
ClockMode::Reset8MHz => {
clock_mode = ClockMode::Medium32MHz; // <- new mode.
res.pllsrc = 0b00; // HSI is PLL clock input.
res.clksrc = 0b10; // Use PLL output 32 MHz.
res.pllmul = 0b0110;
System::delay(50, 8_000_000, &res).root_wait();
}
ClockMode::Medium32MHz => {
clock_mode = ClockMode::High64MHz; // <- new mode.
res.pllsrc = 0b00; // HSI is PLL clock input.
res.clksrc = 0b10; // Use PLL output 64 MHz
res.pllmul = 0b1110;
System::delay(50, 32_000_000, &res).root_wait();
}
ClockMode::High64MHz => {
clock_mode = ClockMode::Reset8MHz; // <- new mode.
res.pllsrc = 0b00; // No PLL.
res.clksrc = 0b00; // Use HSI 8MHz.
res.pllmul = 0b0000;
System::delay(20, 64_000_0000, &res).root_wait();
}
}
}
}
async fn listen(
res: &SystemRes,
thr: &Thrs,
exti5: &ExtiDrv<Exti5, thr::Exti95>,
gpio_pins: &GpioPins,
hclk: u32,
) -> Event {
println!("Enter listen, hclk={}", hclk);
// Attach a listener that will notify us on user button pressed.
let mut button_stream = exti5.create_saturating_stream();
// Attach a listener that will notify us on each SYS_TICK interrupt trigger.
let mut tick_stream = res.thr_sys_tick.add_pulse_try_stream(
// This closure will be called when a receiver no longer can store the
// number of ticks since the last stream poll. If this happens, a
// `TickOverflow` error will be sent over the stream as is final value.
|| Err(TickOverflow),
// A fiber that will be called on each interrupt trigger. It sends a
// single tick over the stream.
fib::new_fn(|| fib::Yielded(Some(1))),
);
// Clear the current value of the timer.
res.sys_tick.stk_val.store(|r| r.write_current(0));
//
// The duration of setting the led ON is inversely proportional to the
// MCU clock speed. It shall be:
// 3.60 seconds when cpu clocks @ 4MHz
// 0.40 seconds when cpu clocks @ 36MHz
// 0.20 seconds when cpu clocks @ 72MHz
// The trigger is set so that it returns twice per interval
// at the highest speed, and proportionally more often per interval
// at lower speeds.
// That way, the Exti interrupt will happen every 100ms at all speeds
// and it can be used to for debounceing and doubleclick control.
let mut trigger = 4_000_000 / 8; // So many systick/sec at 4MHz.
trigger = trigger / 10; // So many in 100ms at 4MHz.
trigger = trigger * (hclk / 4_000_000); // More at higher speed
res.sys_tick.stk_load.store(|r| r.write_reload(trigger));
res.sys_tick.stk_ctrl.store(|r| {
r.set_tickint() // Counting down to 0 triggers the SysTick interrupt
.set_enable() // Start the counter in a multi-shot way
});
let mut green_led_on = true;
gpio_pins.output(Led::GreenLed as u8, true); // Start with red led ON.
// Enable the interrupt for the user button.
thr.exti_9_5.enable_int();
// Counters
let mut debounce_protection: i16 = 0;
let mut doubleclick_protection: i16 = 0;
let mut ticks_cnt: u32 = 0;
// Monitored interval lengths (accumulated ticks).
let debounce_ival = 2;
let doubleclick_ival = 4;
// This is dependent on mcu speed:
let ticks_ival: u32 = 40 / (hclk / 4_000_000);
'blinky: loop {
let evt = select_biased! {
_p = button_stream.next().fuse() => Event::Push,
_t = tick_stream.next().fuse() => Event::Tick,
};
match evt {
Event::Tick => {
if debounce_protection > i16::MIN {
debounce_protection = debounce_protection - 1;
};
if doubleclick_protection < i16::MAX {
doubleclick_protection = doubleclick_protection + 1;
};
if debounce_protection == 0 && doubleclick_protection >= doubleclick_ival {
println!("Switch to new speed");
break 'blinky;
}
// The low and the high interval is 'ticks_ival' ticks.
ticks_cnt = ticks_cnt + 1;
if ticks_cnt >= ticks_ival {
ticks_cnt = 0;
match green_led_on {
true => {
println!("LED off");
green_led_on = false;
gpio_pins.output(Led::GreenLed as u8, false);
}
_ => {
println!("LED on");
green_led_on = true;
gpio_pins.output(Led::GreenLed as u8, true);
}
}
}
}
Event::Push => {
// After disabling the interrupt or after re-enabling
// the interrupt, the stream needs to be flushed to protect
// the logic during the switching period against mechanical
// contact bouncing and doubleclicks.
if doubleclick_protection > doubleclick_ival {
println!("--");
thr.exti_9_5.disable_int();
debounce_protection = debounce_ival;
} else | {
doubleclick_protection = 0;
println!("++");
} | conditional_block |
|
root.rs | pub hpre: u32,
pub prediv: u32,
}
#[allow(unused_labels)]
#[inline(never)]
pub fn handler(reg: Regs, thr_init: ThrsInit) {
let mut clock_mode = ClockMode::High64MHz;
let (thr, scb) = thr::init_extended(thr_init);
thr.hard_fault.add_once(|| panic!("Hard Fault"));
// Allocate the clock control resources.
let mut res = SystemRes {
sys_tick: periph_sys_tick!(reg),
thr_sys_tick: thr.sys_tick,
// ----------------------
// -- Clocks.
// The internal PLLs can be used to multiply the HSI or HSE
// output clock frequency.
pll: Pll::new(periph_pll!(reg)),
// The HSI clock signal is generated from an internal 8 MHz RC Oscillator.
hsi: Hsi::new(periph_hsi!(reg)),
// The LSE clock (32.768K oscillator, not used in this crate.)
lse: Lse::new(periph_lse!(reg)),
// The RCC component.
rcc: Rcc::new(periph_rcc!(reg)),
// The flash component,
flash: Flash::new(periph_flash!(reg)),
// ----------------------
// -- Factors and selectors.
// CAUTION: Setting wrong values may make your system unusable.
// Read the reference manual for detailed information.
//
// PLL multiplication factor.
// Possible values for pllmul:
// Caution: The PLL output frequency must not exceed 72 MHz.
// 0000: PLL input clock x 2
// 0001: PLL input clock x 3
// 0010: PLL input clock x 4
// 0011: PLL input clock x 5
// 0100: PLL input clock x 6
// 0101: PLL input clock x 7
// 0110: PLL input clock x 8
// 0111: PLL input clock x 9
// 1000: PLL input clock x 10
// 1001: PLL input clock x 11
// 1010: PLL input clock x 12
// 1011: PLL input clock x 13
// 1100: PLL input clock x 14
// 1101: PLL input clock x 15
// 1110: PLL input clock x 16
// 1111: Not applicable
pllmul: 0b1110, // Field RCC_CFGR PLLMUL in ref. manual RM0316.
// System clock switch.
// Possible values for clksrc:
// 00: HSI oscillator used as system clock.
// 01: HSE oscillator used as system clock.
// 10: PLL used as system clock
// 11: Not applicable.
clksrc: 0b10, // Field RCC_CFGR SW in ref. manual RM0316.
//
// Possible values for pllsrc:
// Caution: Different values for STM32F303xD/E and STM32F398xE!
// 00: HSI/2 selected as PLL input clock.
// 01: HSE/PREDIV selected as PLL input clock
// 10: Reserved.
// 11: Reserved.
pllsrc: 0b00, // Field RCC_CFGR PLLSRC in ref. manual RM0316.
// Division factor of the AHB clock (AHB prescaler).
// Possible values for hpre:
// 0xxx: SYSCLK not divided
// 1000: SYSCLK divided by 2
// 1001: SYSCLK divided by 4
// 1010: SYSCLK divided by 8
// 1011: SYSCLK divided by 16
// 1100: SYSCLK divided by 64
// 1101: SYSCLK divided by 128
// 1110: SYSCLK divided by 256
// 1111: SYSCLK divided by 512
hpre: 0b0000, // Field RCC_CFGR HPRE in ref. manual RM0316.
// PREDIV division factor.
prediv: 0b000, // Field RCC_CFGR2 PREDIV in ref. manual RM0316.
};
swo::flush();
swo::update_prescaler(HSI_CLK / log::baud_rate!() - 1);
System::delay(100, HSI_CLK, &res).root_wait();
// The on-board user LED is connected to GPIO bank B.
// Create register and pins mapping component.
let gpio_pins = drv_gpio_pins!(reg);
let mut gpio_b = GpioHead::new(periph_gpio_b_head!(reg));
// Enable and initialize.
let gpio_b_en = gpio_b.enable();
gpio_pins.init(gpio_b_en.inventory_token());
scb.scb_ccr_div_0_trp.set_bit();
unsafe {
fpu_init(true);
}
// Enable the system configuration controller clock.
reg.rcc_apb2enr.syscfgen.set_bit();
// Setup fault handlers.
thr.hard_fault.add_once(|| panic!("Hard Fault"));
// Exti configuration for the user button.
// There is no user button on the Nucleo-F303K8,
// but we use the PB4 pin to emulate it.
let exti5 = ExtiDrv::init(ExtiSetup {
exti: periph_exti5!(reg),
exti_int: thr.exti_9_5,
config: 0b001, // PB5 pin. |
'user_button_pressed: loop {
// Reset the clock control registers to their default.
System::reset_rcc(&res);
// Apply the current clock tree configuration.
System::apply_clock_config(&res);
// Calculate the configured clock speed.
let hclk = System::calculate_hclk(&res);
swo::flush();
swo::update_prescaler(hclk / log::baud_rate!() - 1);
System::delay(50, hclk, &res).root_wait();
println!("Running at {} MHz", hclk);
listen(&res, &thr, &exti5, &gpio_pins, hclk).root_wait();
// Set different configuration for the clock tree
// depending on current configuration
match clock_mode {
ClockMode::Reset8MHz => {
clock_mode = ClockMode::Medium32MHz; // <- new mode.
res.pllsrc = 0b00; // HSI is PLL clock input.
res.clksrc = 0b10; // Use PLL output 32 MHz.
res.pllmul = 0b0110;
System::delay(50, 8_000_000, &res).root_wait();
}
ClockMode::Medium32MHz => {
clock_mode = ClockMode::High64MHz; // <- new mode.
res.pllsrc = 0b00; // HSI is PLL clock input.
res.clksrc = 0b10; // Use PLL output 64 MHz
res.pllmul = 0b1110;
System::delay(50, 32_000_000, &res).root_wait();
}
ClockMode::High64MHz => {
clock_mode = ClockMode::Reset8MHz; // <- new mode.
res.pllsrc = 0b00; // No PLL.
res.clksrc = 0b00; // Use HSI 8MHz.
res.pllmul = 0b0000;
System::delay(20, 64_000_0000, &res).root_wait();
}
}
}
}
async fn listen(
res: &SystemRes,
thr: &Thrs,
exti5: &ExtiDrv<Exti5, thr::Exti95>,
gpio_pins: &GpioPins,
hclk: u32,
) -> Event {
println!("Enter listen, hclk={}", hclk);
// Attach a listener that will notify us on user button pressed.
let mut button_stream = exti5.create_saturating_stream();
// Attach a listener that will notify us on each SYS_TICK interrupt trigger.
let mut tick_stream = res.thr_sys_tick.add_pulse_try_stream(
// This closure will be called when a receiver no longer can store the
// number of ticks since | falling: false, // trigger the interrupt on a falling edge.
rising: true, // don't trigger the interrupt on a rising edge.
}); | random_line_split |
wait_cell.rs | <'a> {
/// The [`WaitCell`] being waited on.
cell: &'a WaitCell,
}
#[derive(Eq, PartialEq, Copy, Clone)]
struct State(usize);
// === impl WaitCell ===
impl WaitCell {
loom_const_fn! {
/// Returns a new `WaitCell`, with no [`Waker`] stored in it.
#[must_use]
pub fn new() -> Self {
Self {
state: CachePadded::new(AtomicUsize::new(State::WAITING.0)),
waker: UnsafeCell::new(None),
}
}
}
}
impl WaitCell {
/// Poll to wait on this `WaitCell`, consuming a stored wakeup or
/// registering the [`Waker`] from the provided [`Context`] to be woken by
/// the next wakeup.
///
/// Once a [`Waker`] has been registered, a subsequent call to [`wake`] will
/// wake that [`Waker`].
///
/// # Returns
///
/// - [`Poll::Pending`] if the [`Waker`] was registered. If this method returns
/// [`Poll::Pending`], then the registered [`Waker`] will be woken by a
/// subsequent call to [`wake`].
/// - [`Poll::Ready`]`(`[`Ok`]`(()))` if the cell was woken by a call to
/// [`wake`] while the [`Waker`] was being registered.
/// - [`Poll::Ready`]`(`[`Err`]`(`[`PollWaitError::Closed`]`))` if the
/// [`WaitCell`] has been closed.
/// - [`Poll::Ready`]`(`[`Err`]`(`[`PollWaitError::Busy`]`))` if another
/// task was concurrently registering its [`Waker`] with this
/// [`WaitCell`].
///
/// [`wake`]: Self::wake
pub fn poll_wait(&self, cx: &mut Context<'_>) -> Poll<Result<(), PollWaitError>> {
enter_test_debug_span!("WaitCell::poll_wait", cell = ?fmt::ptr(self));
// this is based on tokio's AtomicWaker synchronization strategy
match test_dbg!(self.compare_exchange(State::WAITING, State::REGISTERING, Acquire)) {
Err(actual) if test_dbg!(actual.contains(State::CLOSED)) => {
return Poll::Ready(Err(PollWaitError::Closed));
}
Err(actual) if test_dbg!(actual.contains(State::WOKEN)) => {
// take the wakeup
self.fetch_and(!State::WOKEN, Release);
return Poll::Ready(Ok(()));
}
// someone else is notifying, so don't wait!
Err(actual) if test_dbg!(actual.contains(State::WAKING)) => {
return Poll::Ready(Ok(()));
}
Err(_) => return Poll::Ready(Err(PollWaitError::Busy)),
Ok(_) => {}
}
let waker = cx.waker();
trace!(wait_cell = ?fmt::ptr(self), ?waker, "registering waker");
let prev_waker = self.waker.with_mut(|old_waker| unsafe {
match &mut *old_waker {
Some(old_waker) if waker.will_wake(old_waker) => None,
old => old.replace(waker.clone()),
}
});
if let Some(prev_waker) = prev_waker {
test_debug!("Replaced an old waker in cell, waking");
prev_waker.wake();
}
if let Err(actual) =
test_dbg!(self.compare_exchange(State::REGISTERING, State::WAITING, AcqRel))
{
// If the `compare_exchange` fails above, this means that we were notified for one of
// two reasons: either the cell was awoken, or the cell was closed.
//
// Bail out of the parking state, and determine what to report to the caller.
test_trace!(state = ?actual, "was notified");
let waker = self.waker.with_mut(|waker| unsafe { (*waker).take() });
// Reset to the WAITING state by clearing everything *except*
// the closed bits (which must remain set). This `fetch_and`
// does *not* set the CLOSED bit if it is unset, it just doesn't
// clear it.
let state = test_dbg!(self.fetch_and(State::CLOSED, AcqRel));
// The only valid state transition while we were parking is to
// add the CLOSED bit.
debug_assert!(
state == actual || state == actual | State::CLOSED,
"state changed unexpectedly while parking!"
);
if let Some(waker) = waker {
waker.wake();
}
// Was the `CLOSED` bit set while we were clearing other bits?
// If so, the cell is closed. Otherwise, we must have been notified.
if state.contains(State::CLOSED) {
return Poll::Ready(Err(PollWaitError::Closed));
}
return Poll::Ready(Ok(()));
}
// Waker registered, time to yield!
Poll::Pending
}
/// Wait to be woken up by this cell.
///
/// # Returns
///
/// This future completes with the following values:
///
/// - [`Ok`]`(())` if the future was woken by a call to [`wake`] or another
/// task calling [`poll_wait`] or [`wait`] on this [`WaitCell`].
/// - [`Err`]`(`[`Closed`]`)` if the task was woken by a call to [`close`],
/// or the [`WaitCell`] was already closed.
///
/// **Note**: The calling task's [`Waker`] is not registered until AFTER the
/// first time the returned [`Wait`] future is polled. This means that if a
/// call to [`wake`] occurs between when [`wait`] is called and when the
/// future is first polled, the future will *not* complete. If the caller is
/// responsible for performing an operation which will result in an eventual
/// wakeup, prefer calling [`subscribe`] _before_ performing that operation
/// and `.await`ing the [`Wait`] future returned by [`subscribe`].
///
/// [`wake`]: Self::wake
/// [`poll_wait`]: Self::poll_wait
/// [`wait`]: Self::wait
/// [`close`]: Self::close
/// [`subscribe`]: Self::subscribe
pub fn wait(&self) -> Wait<'_> {
Wait {
cell: self,
presubscribe: Poll::Pending,
}
}
/// Eagerly subscribe to notifications from this `WaitCell`.
///
/// This method returns a [`Subscribe`] [`Future`], which outputs a [`Wait`]
/// [`Future`]. Awaiting the [`Subscribe`] future will eagerly register the
/// calling task to be woken by this [`WaitCell`], so that the returned
/// [`Wait`] future will be woken by any calls to [`wake`] (or [`close`])
/// that occur between when the [`Subscribe`] future completes and when the
/// returned [`Wait`] future is `.await`ed.
///
/// This is primarily intended for scenarios where the task that waits on a
/// [`WaitCell`] is responsible for performing some operation that
/// ultimately results in the [`WaitCell`] being woken. If the task were to
/// simply perform the operation and then call [`wait`] on the [`WaitCell`],
/// a potential race condition could occur where the operation completes and
/// wakes the [`WaitCell`] *before* the [`Wait`] future is first `.await`ed.
/// Using `subscribe`, the task can ensure that it is ready to be woken by
/// the cell *before* performing an operation that could result in it being
/// woken.
///
/// These scenarios occur when a wakeup is triggered by another thread/CPU
/// core in response to an operation performed in the task waiting on the
/// `WaitCell`, or when the wakeup is triggered by a hardware interrupt
/// resulting from operations performed in the task.
///
/// # Examples
///
/// ```
/// use maitake::sync::WaitCell;
///
/// // Perform an operation that results in a concurrent wakeup, such as
/// // unmasking an interrupt.
/// fn do_something_that_causes_a_wakeup() {
/// # WAIT_CELL.wake();
/// // ...
/// }
///
/// static WAIT_CELL: WaitCell = WaitCell::new();
///
/// # async fn dox() {
/// // Subscribe to notifications from the cell *before* calling
/// // `do_something_that_causes_a_wakeup()`, to ensure that we are
/// // ready to be woken when the interrupt is unmasked.
/// let wait = WAIT_CELL.subscribe().await;
///
/// // Actually perform the operation.
/// do_something_that_causes_a_wakeup();
///
/// // Wait for the wakeup. If the wakeup occurred *before* the first
/// // poll of the `wait` future had successfully subscribed to the
/// // `WaitCell`, we would still receive the wakeup, because the
/// | Subscribe | identifier_name |
|
wait_cell.rs | Subscribe to notifications from the cell *before* calling
/// // `do_something_that_causes_a_wakeup()`, to ensure that we are
/// // ready to be woken when the interrupt is unmasked.
/// let wait = WAIT_CELL.subscribe().await;
///
/// // Actually perform the operation.
/// do_something_that_causes_a_wakeup();
///
/// // Wait for the wakeup. If the wakeup occurred *before* the first
/// // poll of the `wait` future had successfully subscribed to the
/// // `WaitCell`, we would still receive the wakeup, because the
/// // `subscribe` future ensured that our waker was registered to be
/// // woken.
/// wait.await.expect("WaitCell is not closed");
/// # }
/// ```
///
/// [`wait`]: Self::wait
/// [`wake`]: Self::wake
/// [`close`]: Self::close
pub fn subscribe(&self) -> Subscribe<'_> {
Subscribe { cell: self }
}
/// Wake the [`Waker`] stored in this cell.
///
/// # Returns
///
/// - `true` if a waiting task was woken.
/// - `false` if no task was woken (no [`Waker`] was stored in the cell)
pub fn wake(&self) -> bool {
enter_test_debug_span!("WaitCell::wake", cell = ?fmt::ptr(self));
if let Some(waker) = self.take_waker(false) {
waker.wake();
true
} else {
false
}
}
/// Close the [`WaitCell`].
///
/// This wakes any waiting task with an error indicating the `WaitCell` is
/// closed. Subsequent calls to [`wait`] or [`poll_wait`] will return an
/// error indicating that the cell has been closed.
///
/// [`wait`]: Self::wait
/// [`poll_wait`]: Self::poll_wait
pub fn close(&self) -> bool {
enter_test_debug_span!("WaitCell::close", cell = ?fmt::ptr(self));
if let Some(waker) = self.take_waker(true) {
waker.wake();
true
} else {
false
}
}
// TODO(eliza): is this an API we want to have?
/*
/// Returns `true` if this `WaitCell` is [closed](Self::close).
pub(crate) fn is_closed(&self) -> bool {
self.current_state() == State::CLOSED
}
*/
/// Takes this `WaitCell`'s waker.
// TODO(eliza): could probably be made a public API...
pub(crate) fn take_waker(&self, close: bool) -> Option<Waker> {
trace!(wait_cell = ?fmt::ptr(self), ?close, "notifying");
// Set the WAKING bit (to indicate that we're touching the waker) and
// the WOKEN bit (to indicate that we intend to wake it up).
let state = {
let mut bits = State::WAKING | State::WOKEN;
if close {
bits.0 |= State::CLOSED.0;
}
test_dbg!(self.fetch_or(bits, AcqRel))
};
// Is anyone else touching the waker?
if !test_dbg!(state.contains(State::WAKING | State::REGISTERING | State::CLOSED)) {
// Ladies and gentlemen...we got him (the lock)!
let waker = self.waker.with_mut(|thread| unsafe { (*thread).take() });
// Release the lock.
self.fetch_and(!State::WAKING, Release);
if let Some(waker) = test_dbg!(waker) {
trace!(wait_cell = ?fmt::ptr(self), ?close, ?waker, "notified");
return Some(waker);
}
}
None
}
}
impl WaitCell {
#[inline(always)]
fn compare_exchange(
&self,
State(curr): State,
State(new): State,
success: Ordering,
) -> Result<State, State> {
self.state
.compare_exchange(curr, new, success, Acquire)
.map(State)
.map_err(State)
}
#[inline(always)]
fn fetch_and(&self, State(state): State, order: Ordering) -> State {
State(self.state.fetch_and(state, order))
}
#[inline(always)]
fn fetch_or(&self, State(state): State, order: Ordering) -> State {
State(self.state.fetch_or(state, order))
}
#[inline(always)]
fn current_state(&self) -> State {
State(self.state.load(Acquire))
}
}
unsafe impl Send for WaitCell {}
unsafe impl Sync for WaitCell {}
impl fmt::Debug for WaitCell {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("WaitCell")
.field("state", &self.current_state())
.field("waker", &fmt::display(".."))
.finish()
}
}
impl Drop for WaitCell {
fn drop(&mut self) {
self.close();
}
}
// === impl Wait ===
impl Future for Wait<'_> {
type Output = Result<(), Closed>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
enter_test_debug_span!("Wait::poll");
// Did a wakeup occur while we were pre-registering the future?
if test_dbg!(self.presubscribe.is_ready()) {
return self.presubscribe;
}
// Okay, actually poll the cell, then.
match task::ready!(test_dbg!(self.cell.poll_wait(cx))) {
Ok(()) => Poll::Ready(Ok(())),
Err(PollWaitError::Closed) => Poll::Ready(Err(Closed(()))),
Err(PollWaitError::Busy) => {
// If some other task was registering, yield and try to re-register
// our waker when that task is done.
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
}
// === impl Subscribe ===
impl<'cell> Future for Subscribe<'cell> {
type Output = Wait<'cell>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
enter_test_debug_span!("Subscribe::poll");
// Pre-register the waker in the cell.
let presubscribe = match test_dbg!(self.cell.poll_wait(cx)) {
Poll::Ready(Err(PollWaitError::Busy)) => {
// Someone else is in the process of registering. Yield now so we
// can wait until that task is done, and then try again.
cx.waker().wake_by_ref();
return Poll::Pending;
}
Poll::Ready(Err(PollWaitError::Closed)) => Poll::Ready(Err(Closed(()))),
Poll::Ready(Ok(())) => Poll::Ready(Ok(())),
Poll::Pending => Poll::Pending,
};
Poll::Ready(Wait {
cell: self.cell,
presubscribe,
})
}
}
// === impl State ===
impl State {
/// /!\ EXTREMELY SERIOUS WARNING! /!\
/// It is LOAD BEARING that the `WAITING` state is represented by zero!
/// This is because we return to the waiting state by `fetch_and`ing out all
/// other bits in a few places. If this state's bit representation is
/// changed to anything other than zero, that code will break! Don't do
/// that!
///
/// YES, FUTURE ELIZA, THIS DOES APPLY TO YOU. YOU ALREADY BROKE IT ONCE.
/// DON'T DO IT AGAIN.
const WAITING: Self = Self(0b0000);
const REGISTERING: Self = Self(0b0001);
const WAKING: Self = Self(0b0010);
const WOKEN: Self = Self(0b0100);
const CLOSED: Self = Self(0b1000);
fn contains(self, Self(state): Self) -> bool {
self.0 & state > 0
}
}
impl ops::BitOr for State {
type Output = Self;
fn bitor(self, Self(rhs): Self) -> Self::Output {
Self(self.0 | rhs)
}
}
impl ops::BitAnd for State {
type Output = Self;
fn bitand(self, Self(rhs): Self) -> Self::Output {
Self(self.0 & rhs)
}
}
impl ops::Not for State {
type Output = Self;
fn not(self) -> Self::Output {
Self(!self.0)
}
}
impl fmt::Debug for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut has_states = false;
fmt_bits!(self, f, has_states, REGISTERING, WAKING, CLOSED, WOKEN);
if !has_states | {
if *self == Self::WAITING {
return f.write_str("WAITING");
}
f.debug_tuple("UnknownState")
.field(&format_args!("{:#b}", self.0))
.finish()?;
} | conditional_block |
|
wait_cell.rs | and when the
/// future is first polled, the future will *not* complete. If the caller is
/// responsible for performing an operation which will result in an eventual
/// wakeup, prefer calling [`subscribe`] _before_ performing that operation
/// and `.await`ing the [`Wait`] future returned by [`subscribe`].
///
/// [`wake`]: Self::wake
/// [`poll_wait`]: Self::poll_wait
/// [`wait`]: Self::wait
/// [`close`]: Self::close
/// [`subscribe`]: Self::subscribe
pub fn wait(&self) -> Wait<'_> {
Wait {
cell: self,
presubscribe: Poll::Pending,
}
}
/// Eagerly subscribe to notifications from this `WaitCell`.
///
/// This method returns a [`Subscribe`] [`Future`], which outputs a [`Wait`]
/// [`Future`]. Awaiting the [`Subscribe`] future will eagerly register the
/// calling task to be woken by this [`WaitCell`], so that the returned
/// [`Wait`] future will be woken by any calls to [`wake`] (or [`close`])
/// that occur between when the [`Subscribe`] future completes and when the
/// returned [`Wait`] future is `.await`ed.
///
/// This is primarily intended for scenarios where the task that waits on a
/// [`WaitCell`] is responsible for performing some operation that
/// ultimately results in the [`WaitCell`] being woken. If the task were to
/// simply perform the operation and then call [`wait`] on the [`WaitCell`],
/// a potential race condition could occur where the operation completes and
/// wakes the [`WaitCell`] *before* the [`Wait`] future is first `.await`ed.
/// Using `subscribe`, the task can ensure that it is ready to be woken by
/// the cell *before* performing an operation that could result in it being
/// woken.
///
/// These scenarios occur when a wakeup is triggered by another thread/CPU
/// core in response to an operation performed in the task waiting on the
/// `WaitCell`, or when the wakeup is triggered by a hardware interrupt
/// resulting from operations performed in the task.
///
/// # Examples
///
/// ```
/// use maitake::sync::WaitCell;
///
/// // Perform an operation that results in a concurrent wakeup, such as
/// // unmasking an interrupt.
/// fn do_something_that_causes_a_wakeup() {
/// # WAIT_CELL.wake();
/// // ...
/// }
///
/// static WAIT_CELL: WaitCell = WaitCell::new();
///
/// # async fn dox() {
/// // Subscribe to notifications from the cell *before* calling
/// // `do_something_that_causes_a_wakeup()`, to ensure that we are
/// // ready to be woken when the interrupt is unmasked.
/// let wait = WAIT_CELL.subscribe().await;
///
/// // Actually perform the operation.
/// do_something_that_causes_a_wakeup();
///
/// // Wait for the wakeup. If the wakeup occurred *before* the first
/// // poll of the `wait` future had successfully subscribed to the
/// // `WaitCell`, we would still receive the wakeup, because the
/// // `subscribe` future ensured that our waker was registered to be
/// // woken.
/// wait.await.expect("WaitCell is not closed");
/// # }
/// ```
///
/// [`wait`]: Self::wait
/// [`wake`]: Self::wake
/// [`close`]: Self::close
pub fn subscribe(&self) -> Subscribe<'_> {
Subscribe { cell: self }
}
/// Wake the [`Waker`] stored in this cell.
///
/// # Returns
///
/// - `true` if a waiting task was woken.
/// - `false` if no task was woken (no [`Waker`] was stored in the cell)
pub fn wake(&self) -> bool {
enter_test_debug_span!("WaitCell::wake", cell = ?fmt::ptr(self));
if let Some(waker) = self.take_waker(false) {
waker.wake();
true
} else {
false
}
}
/// Close the [`WaitCell`].
///
/// This wakes any waiting task with an error indicating the `WaitCell` is
/// closed. Subsequent calls to [`wait`] or [`poll_wait`] will return an
/// error indicating that the cell has been closed.
///
/// [`wait`]: Self::wait
/// [`poll_wait`]: Self::poll_wait
pub fn close(&self) -> bool {
enter_test_debug_span!("WaitCell::close", cell = ?fmt::ptr(self));
if let Some(waker) = self.take_waker(true) {
waker.wake();
true
} else {
false
}
}
// TODO(eliza): is this an API we want to have?
/*
/// Returns `true` if this `WaitCell` is [closed](Self::close).
pub(crate) fn is_closed(&self) -> bool {
self.current_state() == State::CLOSED
}
*/
/// Takes this `WaitCell`'s waker.
// TODO(eliza): could probably be made a public API...
pub(crate) fn take_waker(&self, close: bool) -> Option<Waker> {
trace!(wait_cell = ?fmt::ptr(self), ?close, "notifying");
// Set the WAKING bit (to indicate that we're touching the waker) and
// the WOKEN bit (to indicate that we intend to wake it up).
let state = {
let mut bits = State::WAKING | State::WOKEN;
if close {
bits.0 |= State::CLOSED.0;
}
test_dbg!(self.fetch_or(bits, AcqRel))
};
// Is anyone else touching the waker?
if !test_dbg!(state.contains(State::WAKING | State::REGISTERING | State::CLOSED)) {
// Ladies and gentlemen...we got him (the lock)!
let waker = self.waker.with_mut(|thread| unsafe { (*thread).take() });
// Release the lock.
self.fetch_and(!State::WAKING, Release);
if let Some(waker) = test_dbg!(waker) {
trace!(wait_cell = ?fmt::ptr(self), ?close, ?waker, "notified");
return Some(waker);
}
}
None
}
}
impl WaitCell {
#[inline(always)]
fn compare_exchange(
&self,
State(curr): State,
State(new): State,
success: Ordering,
) -> Result<State, State> {
self.state
.compare_exchange(curr, new, success, Acquire)
.map(State)
.map_err(State)
}
#[inline(always)]
fn fetch_and(&self, State(state): State, order: Ordering) -> State {
State(self.state.fetch_and(state, order))
}
#[inline(always)]
fn fetch_or(&self, State(state): State, order: Ordering) -> State {
State(self.state.fetch_or(state, order))
}
#[inline(always)]
fn current_state(&self) -> State {
State(self.state.load(Acquire))
}
}
unsafe impl Send for WaitCell {}
unsafe impl Sync for WaitCell {}
impl fmt::Debug for WaitCell {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("WaitCell")
.field("state", &self.current_state())
.field("waker", &fmt::display(".."))
.finish()
}
}
impl Drop for WaitCell {
fn drop(&mut self) {
self.close();
}
}
// === impl Wait ===
impl Future for Wait<'_> {
type Output = Result<(), Closed>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
enter_test_debug_span!("Wait::poll");
// Did a wakeup occur while we were pre-registering the future?
if test_dbg!(self.presubscribe.is_ready()) {
return self.presubscribe;
}
// Okay, actually poll the cell, then.
match task::ready!(test_dbg!(self.cell.poll_wait(cx))) {
Ok(()) => Poll::Ready(Ok(())),
Err(PollWaitError::Closed) => Poll::Ready(Err(Closed(()))),
Err(PollWaitError::Busy) => {
// If some other task was registering, yield and try to re-register
// our waker when that task is done.
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
}
// === impl Subscribe ===
impl<'cell> Future for Subscribe<'cell> {
type Output = Wait<'cell>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
enter_test_debug_span!("Subscribe::poll");
// Pre-register the waker in the cell.
let presubscribe = match test_dbg!(self.cell.poll_wait(cx)) { | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.