file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
instances.go | package instance
import (
"errors"
"fmt"
"runtime/debug"
"github.com/TIBCOSoftware/flogo-contrib/action/flow/definition"
"github.com/TIBCOSoftware/flogo-contrib/action/flow/model"
"github.com/TIBCOSoftware/flogo-contrib/action/flow/support"
"github.com/TIBCOSoftware/flogo-lib/core/data"
"github.com/TIBCOSoftware/flogo-lib/logger"
"github.com/TIBCOSoftware/flogo-lib/util"
)
type IndependentInstance struct {
*Instance
id string
stepID int
workItemQueue *util.SyncQueue //todo: change to faster non-threadsafe queue
wiCounter int
ChangeTracker *InstanceChangeTracker
subFlowCtr int
flowModel *model.FlowModel
patch *support.Patch
interceptor *support.Interceptor
subFlows map[int]*Instance
}
// New creates a new Flow Instance from the specified Flow
func NewIndependentInstance(instanceID string, flowURI string, flow *definition.Definition) *IndependentInstance {
inst := &IndependentInstance{}
inst.Instance = &Instance{}
inst.master = inst
inst.id = instanceID
inst.stepID = 0
inst.workItemQueue = util.NewSyncQueue()
inst.flowDef = flow
inst.flowURI = flowURI
inst.flowModel = getFlowModel(flow)
inst.status = model.FlowStatusNotStarted
inst.ChangeTracker = NewInstanceChangeTracker()
inst.taskInsts = make(map[string]*TaskInst)
inst.linkInsts = make(map[int]*LinkInst)
return inst
}
func (inst *IndependentInstance) newEmbeddedInstance(taskInst *TaskInst, flowURI string, flow *definition.Definition) *Instance {
inst.subFlowCtr++
embeddedInst := &Instance{}
embeddedInst.subFlowId = inst.subFlowCtr
embeddedInst.master = inst
embeddedInst.host = taskInst
embeddedInst.flowDef = flow
embeddedInst.status = model.FlowStatusNotStarted
embeddedInst.taskInsts = make(map[string]*TaskInst)
embeddedInst.linkInsts = make(map[int]*LinkInst)
embeddedInst.flowURI = flowURI
if inst.subFlows == nil {
inst.subFlows = make(map[int]*Instance)
}
inst.subFlows[embeddedInst.subFlowId] = embeddedInst
inst.ChangeTracker.SubFlowChange(taskInst.flowInst.subFlowId, CtAdd, embeddedInst.subFlowId, "")
return embeddedInst
}
func (inst *IndependentInstance) startEmbedded(embedded *Instance, startAttrs map[string]*data.Attribute) error {
embedded.attrs = startAttrs
if embedded.master != inst {
errors.New("embedded instance is not from this independent instance")
}
//if inst.attrs == nil {
// inst.attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// inst.attrs[attr.Name()] = attr
//}
inst.startInstance(embedded)
return nil
}
func (inst *IndependentInstance) Start(startAttrs map[string]*data.Attribute) bool {
inst.attrs = startAttrs
//if inst.attrs == nil {
// inst.attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// inst.attrs[attr.Name()] = attr
//}
return inst.startInstance(inst.Instance)
}
func (inst *IndependentInstance) ApplyPatch(patch *support.Patch) {
if inst.patch == nil {
inst.patch = patch
inst.patch.Init()
}
}
func (inst *IndependentInstance) ApplyInterceptor(interceptor *support.Interceptor) {
if inst.interceptor == nil {
inst.interceptor = interceptor
inst.interceptor.Init()
}
}
// GetChanges returns the Change Tracker object
func (inst *IndependentInstance) GetChanges() *InstanceChangeTracker {
return inst.ChangeTracker
}
// ResetChanges resets an changes that were being tracked
func (inst *IndependentInstance) ResetChanges() {
if inst.ChangeTracker != nil {
inst.ChangeTracker.ResetChanges()
}
//todo: can we reuse this to avoid gc
inst.ChangeTracker = NewInstanceChangeTracker()
}
// StepID returns the current step ID of the Flow Instance
func (inst *IndependentInstance) StepID() int {
return inst.stepID
}
func (inst *IndependentInstance) DoStep() bool {
hasNext := false
inst.ResetChanges()
inst.stepID++
if inst.status == model.FlowStatusActive {
// get item to be worked on
item, ok := inst.workItemQueue.Pop()
if ok {
logger.Debug("Retrieved item from Flow Instance work queue")
workItem := item.(*WorkItem)
// get the corresponding behavior
behavior := inst.flowModel.GetDefaultTaskBehavior()
if typeID := workItem.taskInst.task.TypeID(); typeID != "" {
behavior = inst.flowModel.GetTaskBehavior(typeID)
}
// track the fact that the work item was removed from the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtDel, ID: workItem.ID, WorkItem: workItem})
inst.execTask(behavior, workItem.taskInst)
hasNext = true
} else {
logger.Debug("Flow Instance work queue empty")
}
}
return hasNext
}
func (inst *IndependentInstance) scheduleEval(taskInst *TaskInst) {
inst.wiCounter++
workItem := NewWorkItem(inst.wiCounter, taskInst)
logger.Debugf("Scheduling task '%s'", taskInst.task.ID())
inst.workItemQueue.Push(workItem)
// track the fact that the work item was added to the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtAdd, ID: workItem.ID, WorkItem: workItem})
}
// execTask executes the specified Work Item of the Flow Instance
func (inst *IndependentInstance) execTask(behavior model.TaskBehavior, taskInst *TaskInst) {
defer func() {
if r := recover(); r != nil {
err := fmt.Errorf("Unhandled Error executing task '%s' : %v", taskInst.task.Name(), r)
logger.Error(err)
// todo: useful for debugging
logger.Errorf("StackTrace: %s", debug.Stack())
if !taskInst.flowInst.isHandlingError {
taskInst.appendErrorData(NewActivityEvalError(taskInst.task.Name(), "unhandled", err.Error()))
inst.HandleGlobalError(taskInst.flowInst, err)
}
// else what should we do?
}
}()
var err error
var evalResult model.EvalResult
if taskInst.status == model.TaskStatusWaiting {
evalResult, err = behavior.PostEval(taskInst)
} else {
evalResult, err = behavior.Eval(taskInst)
}
if err != nil {
taskInst.returnError = err
inst.handleTaskError(behavior, taskInst, err)
return
}
switch evalResult {
case model.EVAL_DONE:
taskInst.SetStatus(model.TaskStatusDone)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_SKIP:
taskInst.SetStatus(model.TaskStatusSkipped)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_WAIT:
taskInst.SetStatus(model.TaskStatusWaiting)
case model.EVAL_FAIL:
taskInst.SetStatus(model.TaskStatusFailed)
case model.EVAL_REPEAT:
taskInst.SetStatus(model.TaskStatusReady)
//task needs to iterate or retry
inst.scheduleEval(taskInst)
}
}
// handleTaskDone handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskDone(taskBehavior model.TaskBehavior, taskInst *TaskInst) {
notifyFlow := false
var taskEntries []*model.TaskEntry
var err error
if taskInst.Status() == model.TaskStatusSkipped {
notifyFlow, taskEntries = taskBehavior.Skip(taskInst)
} else {
notifyFlow, taskEntries, err = taskBehavior.Done(taskInst)
}
containerInst := taskInst.flowInst
if err != nil {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
return
}
flowDone := false
task := taskInst.Task()
if notifyFlow {
flowBehavior := inst.flowModel.GetFlowBehavior()
flowDone = flowBehavior.TaskDone(containerInst)
}
if flowDone || containerInst.forceCompletion {
//flow completed or return was called explicitly, so lets complete the flow
flowBehavior := inst.flowModel.GetFlowBehavior()
flowBehavior.Done(containerInst)
flowDone = true
containerInst.SetStatus(model.FlowStatusCompleted)
if containerInst != inst.Instance {
//not top level flow so we have to schedule next step
// spawned from task instance
host, ok := containerInst.host.(*TaskInst)
if ok {
//if the flow failed, set the error
for _, value := range containerInst.returnData {
host.SetOutput(value.Name(), value.Value())
}
inst.scheduleEval(host)
}
//if containerInst.isHandlingError {
// //was the error handler, so directly under instance
// host,ok := containerInst.host.(*EmbeddedInstance)
// if ok {
// host.SetStatus(model.FlowStatusCompleted)
// host.returnData = containerInst.returnData
// host.returnError = containerInst.returnError
// }
// //todo if not a task inst, what should we do?
//} else {
// // spawned from task instance
//
// //todo if not a task inst, what should we do?
//}
// flow has completed so remove it
delete(inst.subFlows, containerInst.subFlowId)
}
} else {
// not done, so enter tasks specified by the Done behavior call
inst.enterTasks(containerInst, taskEntries)
}
// task is done, so we can release it
containerInst.releaseTask(task)
}
// handleTaskError handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskError(taskBehavior model.TaskBehavior, taskInst *TaskInst, err error) {
handled, taskEntries := taskBehavior.Error(taskInst, err)
containerInst := taskInst.flowInst
if !handled {
if containerInst.isHandlingError {
//fail
inst.SetStatus(model.FlowStatusFailed)
} else {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
}
return
}
if len(taskEntries) != 0 {
inst.enterTasks(containerInst, taskEntries)
}
containerInst.releaseTask(taskInst.Task())
}
// HandleGlobalError handles instance errors
func (inst *IndependentInstance) HandleGlobalError(containerInst *Instance, err error) {
if containerInst.isHandlingError {
//todo: log error information
containerInst.SetStatus(model.FlowStatusFailed)
return
}
containerInst.isHandlingError = true
flowBehavior := inst.flowModel.GetFlowBehavior()
//not currently handling error, so check if it has an error handler
if containerInst.flowDef.GetErrorHandler() != nil {
// todo: should we clear out the existing workitem queue for items from containerInst?
//clear existing instances
inst.taskInsts = make(map[string]*TaskInst)
taskEntries := flowBehavior.StartErrorHandler(containerInst)
inst.enterTasks(containerInst, taskEntries)
} else {
containerInst.SetStatus(model.FlowStatusFailed)
if containerInst != inst.Instance {
// spawned from task instance
host, ok := containerInst.host.(*TaskInst)
if ok {
behavior := inst.flowModel.GetDefaultTaskBehavior()
if typeID := host.task.TypeID(); typeID != "" {
behavior = inst.flowModel.GetTaskBehavior(typeID)
}
inst.handleTaskError(behavior, host, err)
//fail the task | }
}
}
func (inst *IndependentInstance) startInstance(toStart *Instance) bool {
toStart.SetStatus(model.FlowStatusActive)
//if pi.Attrs == nil {
// pi.Attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// pi.Attrs[attr.Name()] = attr
//}
//logger.Infof("FlowInstance Flow: %v", pi.FlowModel)
//need input mappings
flowBehavior := inst.flowModel.GetFlowBehavior()
ok, taskEntries := flowBehavior.Start(toStart)
if ok {
inst.enterTasks(toStart, taskEntries)
}
return ok
}
func (inst *IndependentInstance) enterTasks(activeInst *Instance, taskEntries []*model.TaskEntry) {
for _, taskEntry := range taskEntries {
//logger.Debugf("EnterTask - TaskEntry: %v", taskEntry)
taskToEnterBehavior := inst.flowModel.GetTaskBehavior(taskEntry.Task.TypeID())
enterTaskData, _ := activeInst.FindOrCreateTaskData(taskEntry.Task)
enterResult := taskToEnterBehavior.Enter(enterTaskData)
if enterResult == model.ENTER_EVAL {
inst.scheduleEval(enterTaskData)
} else if enterResult == model.ENTER_SKIP {
//todo optimize skip, just keep skipping and don't schedule eval
inst.scheduleEval(enterTaskData)
}
}
}
//////////////////////////////////////////////////////////////////
// WorkItem describes an item of work (event for a Task) that should be executed on Step
type WorkItem struct {
ID int `json:"id"`
taskInst *TaskInst `json:"-"`
TaskID string `json:"taskID"`
SubFlowID int `json:"subFlowId"`
}
// NewWorkItem constructs a new WorkItem for the specified TaskInst
func NewWorkItem(id int, taskInst *TaskInst) *WorkItem {
var workItem WorkItem
workItem.ID = id
workItem.taskInst = taskInst
workItem.TaskID = taskInst.task.ID()
workItem.SubFlowID = taskInst.flowInst.subFlowId
return &workItem
}
func NewActivityEvalError(taskName string, errorType string, errorText string) *ActivityEvalError {
return &ActivityEvalError{taskName: taskName, errType: errorType, errText: errorText}
}
type ActivityEvalError struct {
taskName string
errType string
errText string
}
func (e *ActivityEvalError) TaskName() string {
return e.taskName
}
func (e *ActivityEvalError) Type() string {
return e.errType
}
func (e *ActivityEvalError) Error() string {
return e.errText
}
//////////////
// todo fix the following
func getFlowModel(flow *definition.Definition) *model.FlowModel {
if flow.ModelID() == "" {
return model.Default()
} else {
return model.Get(flow.ModelID())
//todo if model not found, should throw error
}
}
//// Restart indicates that this FlowInstance was restarted
func (inst *IndependentInstance) Restart(id string, manager *support.FlowManager) error {
inst.id = id
var err error
inst.flowDef, err = manager.GetFlow(inst.flowURI)
if err != nil {
return err
}
if inst.flowDef == nil {
return errors.New("unable to resolve flow: " + inst.flowURI)
}
inst.flowModel = getFlowModel(inst.flowDef)
inst.master = inst
inst.init(inst.Instance)
return nil
}
func (inst *IndependentInstance) init(flowInst *Instance) {
for _, v := range flowInst.taskInsts {
v.flowInst = flowInst
v.task = flowInst.flowDef.GetTask(v.taskID)
}
for _, v := range flowInst.linkInsts {
v.flowInst = flowInst
v.link = flowInst.flowDef.GetLink(v.linkID)
}
} |
//inst.scheduleEval(host)
}
} else {
inst.returnError = err | random_line_split |
instances.go | package instance
import (
"errors"
"fmt"
"runtime/debug"
"github.com/TIBCOSoftware/flogo-contrib/action/flow/definition"
"github.com/TIBCOSoftware/flogo-contrib/action/flow/model"
"github.com/TIBCOSoftware/flogo-contrib/action/flow/support"
"github.com/TIBCOSoftware/flogo-lib/core/data"
"github.com/TIBCOSoftware/flogo-lib/logger"
"github.com/TIBCOSoftware/flogo-lib/util"
)
type IndependentInstance struct {
*Instance
id string
stepID int
workItemQueue *util.SyncQueue //todo: change to faster non-threadsafe queue
wiCounter int
ChangeTracker *InstanceChangeTracker
subFlowCtr int
flowModel *model.FlowModel
patch *support.Patch
interceptor *support.Interceptor
subFlows map[int]*Instance
}
// New creates a new Flow Instance from the specified Flow
func NewIndependentInstance(instanceID string, flowURI string, flow *definition.Definition) *IndependentInstance {
inst := &IndependentInstance{}
inst.Instance = &Instance{}
inst.master = inst
inst.id = instanceID
inst.stepID = 0
inst.workItemQueue = util.NewSyncQueue()
inst.flowDef = flow
inst.flowURI = flowURI
inst.flowModel = getFlowModel(flow)
inst.status = model.FlowStatusNotStarted
inst.ChangeTracker = NewInstanceChangeTracker()
inst.taskInsts = make(map[string]*TaskInst)
inst.linkInsts = make(map[int]*LinkInst)
return inst
}
func (inst *IndependentInstance) newEmbeddedInstance(taskInst *TaskInst, flowURI string, flow *definition.Definition) *Instance {
inst.subFlowCtr++
embeddedInst := &Instance{}
embeddedInst.subFlowId = inst.subFlowCtr
embeddedInst.master = inst
embeddedInst.host = taskInst
embeddedInst.flowDef = flow
embeddedInst.status = model.FlowStatusNotStarted
embeddedInst.taskInsts = make(map[string]*TaskInst)
embeddedInst.linkInsts = make(map[int]*LinkInst)
embeddedInst.flowURI = flowURI
if inst.subFlows == nil {
inst.subFlows = make(map[int]*Instance)
}
inst.subFlows[embeddedInst.subFlowId] = embeddedInst
inst.ChangeTracker.SubFlowChange(taskInst.flowInst.subFlowId, CtAdd, embeddedInst.subFlowId, "")
return embeddedInst
}
func (inst *IndependentInstance) startEmbedded(embedded *Instance, startAttrs map[string]*data.Attribute) error {
embedded.attrs = startAttrs
if embedded.master != inst {
errors.New("embedded instance is not from this independent instance")
}
//if inst.attrs == nil {
// inst.attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// inst.attrs[attr.Name()] = attr
//}
inst.startInstance(embedded)
return nil
}
func (inst *IndependentInstance) Start(startAttrs map[string]*data.Attribute) bool {
inst.attrs = startAttrs
//if inst.attrs == nil {
// inst.attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// inst.attrs[attr.Name()] = attr
//}
return inst.startInstance(inst.Instance)
}
func (inst *IndependentInstance) ApplyPatch(patch *support.Patch) {
if inst.patch == nil {
inst.patch = patch
inst.patch.Init()
}
}
func (inst *IndependentInstance) ApplyInterceptor(interceptor *support.Interceptor) {
if inst.interceptor == nil {
inst.interceptor = interceptor
inst.interceptor.Init()
}
}
// GetChanges returns the Change Tracker object
func (inst *IndependentInstance) GetChanges() *InstanceChangeTracker {
return inst.ChangeTracker
}
// ResetChanges resets an changes that were being tracked
func (inst *IndependentInstance) ResetChanges() {
if inst.ChangeTracker != nil {
inst.ChangeTracker.ResetChanges()
}
//todo: can we reuse this to avoid gc
inst.ChangeTracker = NewInstanceChangeTracker()
}
// StepID returns the current step ID of the Flow Instance
func (inst *IndependentInstance) StepID() int |
func (inst *IndependentInstance) DoStep() bool {
hasNext := false
inst.ResetChanges()
inst.stepID++
if inst.status == model.FlowStatusActive {
// get item to be worked on
item, ok := inst.workItemQueue.Pop()
if ok {
logger.Debug("Retrieved item from Flow Instance work queue")
workItem := item.(*WorkItem)
// get the corresponding behavior
behavior := inst.flowModel.GetDefaultTaskBehavior()
if typeID := workItem.taskInst.task.TypeID(); typeID != "" {
behavior = inst.flowModel.GetTaskBehavior(typeID)
}
// track the fact that the work item was removed from the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtDel, ID: workItem.ID, WorkItem: workItem})
inst.execTask(behavior, workItem.taskInst)
hasNext = true
} else {
logger.Debug("Flow Instance work queue empty")
}
}
return hasNext
}
func (inst *IndependentInstance) scheduleEval(taskInst *TaskInst) {
inst.wiCounter++
workItem := NewWorkItem(inst.wiCounter, taskInst)
logger.Debugf("Scheduling task '%s'", taskInst.task.ID())
inst.workItemQueue.Push(workItem)
// track the fact that the work item was added to the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtAdd, ID: workItem.ID, WorkItem: workItem})
}
// execTask executes the specified Work Item of the Flow Instance
func (inst *IndependentInstance) execTask(behavior model.TaskBehavior, taskInst *TaskInst) {
defer func() {
if r := recover(); r != nil {
err := fmt.Errorf("Unhandled Error executing task '%s' : %v", taskInst.task.Name(), r)
logger.Error(err)
// todo: useful for debugging
logger.Errorf("StackTrace: %s", debug.Stack())
if !taskInst.flowInst.isHandlingError {
taskInst.appendErrorData(NewActivityEvalError(taskInst.task.Name(), "unhandled", err.Error()))
inst.HandleGlobalError(taskInst.flowInst, err)
}
// else what should we do?
}
}()
var err error
var evalResult model.EvalResult
if taskInst.status == model.TaskStatusWaiting {
evalResult, err = behavior.PostEval(taskInst)
} else {
evalResult, err = behavior.Eval(taskInst)
}
if err != nil {
taskInst.returnError = err
inst.handleTaskError(behavior, taskInst, err)
return
}
switch evalResult {
case model.EVAL_DONE:
taskInst.SetStatus(model.TaskStatusDone)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_SKIP:
taskInst.SetStatus(model.TaskStatusSkipped)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_WAIT:
taskInst.SetStatus(model.TaskStatusWaiting)
case model.EVAL_FAIL:
taskInst.SetStatus(model.TaskStatusFailed)
case model.EVAL_REPEAT:
taskInst.SetStatus(model.TaskStatusReady)
//task needs to iterate or retry
inst.scheduleEval(taskInst)
}
}
// handleTaskDone handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskDone(taskBehavior model.TaskBehavior, taskInst *TaskInst) {
notifyFlow := false
var taskEntries []*model.TaskEntry
var err error
if taskInst.Status() == model.TaskStatusSkipped {
notifyFlow, taskEntries = taskBehavior.Skip(taskInst)
} else {
notifyFlow, taskEntries, err = taskBehavior.Done(taskInst)
}
containerInst := taskInst.flowInst
if err != nil {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
return
}
flowDone := false
task := taskInst.Task()
if notifyFlow {
flowBehavior := inst.flowModel.GetFlowBehavior()
flowDone = flowBehavior.TaskDone(containerInst)
}
if flowDone || containerInst.forceCompletion {
//flow completed or return was called explicitly, so lets complete the flow
flowBehavior := inst.flowModel.GetFlowBehavior()
flowBehavior.Done(containerInst)
flowDone = true
containerInst.SetStatus(model.FlowStatusCompleted)
if containerInst != inst.Instance {
//not top level flow so we have to schedule next step
// spawned from task instance
host, ok := containerInst.host.(*TaskInst)
if ok {
//if the flow failed, set the error
for _, value := range containerInst.returnData {
host.SetOutput(value.Name(), value.Value())
}
inst.scheduleEval(host)
}
//if containerInst.isHandlingError {
// //was the error handler, so directly under instance
// host,ok := containerInst.host.(*EmbeddedInstance)
// if ok {
// host.SetStatus(model.FlowStatusCompleted)
// host.returnData = containerInst.returnData
// host.returnError = containerInst.returnError
// }
// //todo if not a task inst, what should we do?
//} else {
// // spawned from task instance
//
// //todo if not a task inst, what should we do?
//}
// flow has completed so remove it
delete(inst.subFlows, containerInst.subFlowId)
}
} else {
// not done, so enter tasks specified by the Done behavior call
inst.enterTasks(containerInst, taskEntries)
}
// task is done, so we can release it
containerInst.releaseTask(task)
}
// handleTaskError handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskError(taskBehavior model.TaskBehavior, taskInst *TaskInst, err error) {
handled, taskEntries := taskBehavior.Error(taskInst, err)
containerInst := taskInst.flowInst
if !handled {
if containerInst.isHandlingError {
//fail
inst.SetStatus(model.FlowStatusFailed)
} else {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
}
return
}
if len(taskEntries) != 0 {
inst.enterTasks(containerInst, taskEntries)
}
containerInst.releaseTask(taskInst.Task())
}
// HandleGlobalError handles instance errors
func (inst *IndependentInstance) HandleGlobalError(containerInst *Instance, err error) {
if containerInst.isHandlingError {
//todo: log error information
containerInst.SetStatus(model.FlowStatusFailed)
return
}
containerInst.isHandlingError = true
flowBehavior := inst.flowModel.GetFlowBehavior()
//not currently handling error, so check if it has an error handler
if containerInst.flowDef.GetErrorHandler() != nil {
// todo: should we clear out the existing workitem queue for items from containerInst?
//clear existing instances
inst.taskInsts = make(map[string]*TaskInst)
taskEntries := flowBehavior.StartErrorHandler(containerInst)
inst.enterTasks(containerInst, taskEntries)
} else {
containerInst.SetStatus(model.FlowStatusFailed)
if containerInst != inst.Instance {
// spawned from task instance
host, ok := containerInst.host.(*TaskInst)
if ok {
behavior := inst.flowModel.GetDefaultTaskBehavior()
if typeID := host.task.TypeID(); typeID != "" {
behavior = inst.flowModel.GetTaskBehavior(typeID)
}
inst.handleTaskError(behavior, host, err)
//fail the task
//inst.scheduleEval(host)
}
} else {
inst.returnError = err
}
}
}
func (inst *IndependentInstance) startInstance(toStart *Instance) bool {
toStart.SetStatus(model.FlowStatusActive)
//if pi.Attrs == nil {
// pi.Attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// pi.Attrs[attr.Name()] = attr
//}
//logger.Infof("FlowInstance Flow: %v", pi.FlowModel)
//need input mappings
flowBehavior := inst.flowModel.GetFlowBehavior()
ok, taskEntries := flowBehavior.Start(toStart)
if ok {
inst.enterTasks(toStart, taskEntries)
}
return ok
}
func (inst *IndependentInstance) enterTasks(activeInst *Instance, taskEntries []*model.TaskEntry) {
for _, taskEntry := range taskEntries {
//logger.Debugf("EnterTask - TaskEntry: %v", taskEntry)
taskToEnterBehavior := inst.flowModel.GetTaskBehavior(taskEntry.Task.TypeID())
enterTaskData, _ := activeInst.FindOrCreateTaskData(taskEntry.Task)
enterResult := taskToEnterBehavior.Enter(enterTaskData)
if enterResult == model.ENTER_EVAL {
inst.scheduleEval(enterTaskData)
} else if enterResult == model.ENTER_SKIP {
//todo optimize skip, just keep skipping and don't schedule eval
inst.scheduleEval(enterTaskData)
}
}
}
//////////////////////////////////////////////////////////////////
// WorkItem describes an item of work (event for a Task) that should be executed on Step
type WorkItem struct {
ID int `json:"id"`
taskInst *TaskInst `json:"-"`
TaskID string `json:"taskID"`
SubFlowID int `json:"subFlowId"`
}
// NewWorkItem constructs a new WorkItem for the specified TaskInst
func NewWorkItem(id int, taskInst *TaskInst) *WorkItem {
var workItem WorkItem
workItem.ID = id
workItem.taskInst = taskInst
workItem.TaskID = taskInst.task.ID()
workItem.SubFlowID = taskInst.flowInst.subFlowId
return &workItem
}
func NewActivityEvalError(taskName string, errorType string, errorText string) *ActivityEvalError {
return &ActivityEvalError{taskName: taskName, errType: errorType, errText: errorText}
}
type ActivityEvalError struct {
taskName string
errType string
errText string
}
func (e *ActivityEvalError) TaskName() string {
return e.taskName
}
func (e *ActivityEvalError) Type() string {
return e.errType
}
func (e *ActivityEvalError) Error() string {
return e.errText
}
//////////////
// todo fix the following
func getFlowModel(flow *definition.Definition) *model.FlowModel {
if flow.ModelID() == "" {
return model.Default()
} else {
return model.Get(flow.ModelID())
//todo if model not found, should throw error
}
}
//// Restart indicates that this FlowInstance was restarted
func (inst *IndependentInstance) Restart(id string, manager *support.FlowManager) error {
inst.id = id
var err error
inst.flowDef, err = manager.GetFlow(inst.flowURI)
if err != nil {
return err
}
if inst.flowDef == nil {
return errors.New("unable to resolve flow: " + inst.flowURI)
}
inst.flowModel = getFlowModel(inst.flowDef)
inst.master = inst
inst.init(inst.Instance)
return nil
}
func (inst *IndependentInstance) init(flowInst *Instance) {
for _, v := range flowInst.taskInsts {
v.flowInst = flowInst
v.task = flowInst.flowDef.GetTask(v.taskID)
}
for _, v := range flowInst.linkInsts {
v.flowInst = flowInst
v.link = flowInst.flowDef.GetLink(v.linkID)
}
}
| {
return inst.stepID
} | identifier_body |
instances.go | package instance
import (
"errors"
"fmt"
"runtime/debug"
"github.com/TIBCOSoftware/flogo-contrib/action/flow/definition"
"github.com/TIBCOSoftware/flogo-contrib/action/flow/model"
"github.com/TIBCOSoftware/flogo-contrib/action/flow/support"
"github.com/TIBCOSoftware/flogo-lib/core/data"
"github.com/TIBCOSoftware/flogo-lib/logger"
"github.com/TIBCOSoftware/flogo-lib/util"
)
type IndependentInstance struct {
*Instance
id string
stepID int
workItemQueue *util.SyncQueue //todo: change to faster non-threadsafe queue
wiCounter int
ChangeTracker *InstanceChangeTracker
subFlowCtr int
flowModel *model.FlowModel
patch *support.Patch
interceptor *support.Interceptor
subFlows map[int]*Instance
}
// New creates a new Flow Instance from the specified Flow
func NewIndependentInstance(instanceID string, flowURI string, flow *definition.Definition) *IndependentInstance {
inst := &IndependentInstance{}
inst.Instance = &Instance{}
inst.master = inst
inst.id = instanceID
inst.stepID = 0
inst.workItemQueue = util.NewSyncQueue()
inst.flowDef = flow
inst.flowURI = flowURI
inst.flowModel = getFlowModel(flow)
inst.status = model.FlowStatusNotStarted
inst.ChangeTracker = NewInstanceChangeTracker()
inst.taskInsts = make(map[string]*TaskInst)
inst.linkInsts = make(map[int]*LinkInst)
return inst
}
func (inst *IndependentInstance) newEmbeddedInstance(taskInst *TaskInst, flowURI string, flow *definition.Definition) *Instance {
inst.subFlowCtr++
embeddedInst := &Instance{}
embeddedInst.subFlowId = inst.subFlowCtr
embeddedInst.master = inst
embeddedInst.host = taskInst
embeddedInst.flowDef = flow
embeddedInst.status = model.FlowStatusNotStarted
embeddedInst.taskInsts = make(map[string]*TaskInst)
embeddedInst.linkInsts = make(map[int]*LinkInst)
embeddedInst.flowURI = flowURI
if inst.subFlows == nil {
inst.subFlows = make(map[int]*Instance)
}
inst.subFlows[embeddedInst.subFlowId] = embeddedInst
inst.ChangeTracker.SubFlowChange(taskInst.flowInst.subFlowId, CtAdd, embeddedInst.subFlowId, "")
return embeddedInst
}
func (inst *IndependentInstance) startEmbedded(embedded *Instance, startAttrs map[string]*data.Attribute) error {
embedded.attrs = startAttrs
if embedded.master != inst {
errors.New("embedded instance is not from this independent instance")
}
//if inst.attrs == nil {
// inst.attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// inst.attrs[attr.Name()] = attr
//}
inst.startInstance(embedded)
return nil
}
func (inst *IndependentInstance) Start(startAttrs map[string]*data.Attribute) bool {
inst.attrs = startAttrs
//if inst.attrs == nil {
// inst.attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// inst.attrs[attr.Name()] = attr
//}
return inst.startInstance(inst.Instance)
}
func (inst *IndependentInstance) ApplyPatch(patch *support.Patch) {
if inst.patch == nil {
inst.patch = patch
inst.patch.Init()
}
}
func (inst *IndependentInstance) ApplyInterceptor(interceptor *support.Interceptor) {
if inst.interceptor == nil {
inst.interceptor = interceptor
inst.interceptor.Init()
}
}
// GetChanges returns the Change Tracker object
func (inst *IndependentInstance) GetChanges() *InstanceChangeTracker {
return inst.ChangeTracker
}
// ResetChanges resets an changes that were being tracked
func (inst *IndependentInstance) ResetChanges() {
if inst.ChangeTracker != nil {
inst.ChangeTracker.ResetChanges()
}
//todo: can we reuse this to avoid gc
inst.ChangeTracker = NewInstanceChangeTracker()
}
// StepID returns the current step ID of the Flow Instance
func (inst *IndependentInstance) StepID() int {
return inst.stepID
}
func (inst *IndependentInstance) DoStep() bool {
hasNext := false
inst.ResetChanges()
inst.stepID++
if inst.status == model.FlowStatusActive {
// get item to be worked on
item, ok := inst.workItemQueue.Pop()
if ok {
logger.Debug("Retrieved item from Flow Instance work queue")
workItem := item.(*WorkItem)
// get the corresponding behavior
behavior := inst.flowModel.GetDefaultTaskBehavior()
if typeID := workItem.taskInst.task.TypeID(); typeID != "" {
behavior = inst.flowModel.GetTaskBehavior(typeID)
}
// track the fact that the work item was removed from the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtDel, ID: workItem.ID, WorkItem: workItem})
inst.execTask(behavior, workItem.taskInst)
hasNext = true
} else {
logger.Debug("Flow Instance work queue empty")
}
}
return hasNext
}
func (inst *IndependentInstance) scheduleEval(taskInst *TaskInst) {
inst.wiCounter++
workItem := NewWorkItem(inst.wiCounter, taskInst)
logger.Debugf("Scheduling task '%s'", taskInst.task.ID())
inst.workItemQueue.Push(workItem)
// track the fact that the work item was added to the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtAdd, ID: workItem.ID, WorkItem: workItem})
}
// execTask executes the specified Work Item of the Flow Instance
func (inst *IndependentInstance) execTask(behavior model.TaskBehavior, taskInst *TaskInst) {
defer func() {
if r := recover(); r != nil {
err := fmt.Errorf("Unhandled Error executing task '%s' : %v", taskInst.task.Name(), r)
logger.Error(err)
// todo: useful for debugging
logger.Errorf("StackTrace: %s", debug.Stack())
if !taskInst.flowInst.isHandlingError {
taskInst.appendErrorData(NewActivityEvalError(taskInst.task.Name(), "unhandled", err.Error()))
inst.HandleGlobalError(taskInst.flowInst, err)
}
// else what should we do?
}
}()
var err error
var evalResult model.EvalResult
if taskInst.status == model.TaskStatusWaiting {
evalResult, err = behavior.PostEval(taskInst)
} else {
evalResult, err = behavior.Eval(taskInst)
}
if err != nil {
taskInst.returnError = err
inst.handleTaskError(behavior, taskInst, err)
return
}
switch evalResult {
case model.EVAL_DONE:
taskInst.SetStatus(model.TaskStatusDone)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_SKIP:
taskInst.SetStatus(model.TaskStatusSkipped)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_WAIT:
taskInst.SetStatus(model.TaskStatusWaiting)
case model.EVAL_FAIL:
taskInst.SetStatus(model.TaskStatusFailed)
case model.EVAL_REPEAT:
taskInst.SetStatus(model.TaskStatusReady)
//task needs to iterate or retry
inst.scheduleEval(taskInst)
}
}
// handleTaskDone handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskDone(taskBehavior model.TaskBehavior, taskInst *TaskInst) {
notifyFlow := false
var taskEntries []*model.TaskEntry
var err error
if taskInst.Status() == model.TaskStatusSkipped {
notifyFlow, taskEntries = taskBehavior.Skip(taskInst)
} else {
notifyFlow, taskEntries, err = taskBehavior.Done(taskInst)
}
containerInst := taskInst.flowInst
if err != nil {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
return
}
flowDone := false
task := taskInst.Task()
if notifyFlow {
flowBehavior := inst.flowModel.GetFlowBehavior()
flowDone = flowBehavior.TaskDone(containerInst)
}
if flowDone || containerInst.forceCompletion {
//flow completed or return was called explicitly, so lets complete the flow
flowBehavior := inst.flowModel.GetFlowBehavior()
flowBehavior.Done(containerInst)
flowDone = true
containerInst.SetStatus(model.FlowStatusCompleted)
if containerInst != inst.Instance {
//not top level flow so we have to schedule next step
// spawned from task instance
host, ok := containerInst.host.(*TaskInst)
if ok {
//if the flow failed, set the error
for _, value := range containerInst.returnData |
inst.scheduleEval(host)
}
//if containerInst.isHandlingError {
// //was the error handler, so directly under instance
// host,ok := containerInst.host.(*EmbeddedInstance)
// if ok {
// host.SetStatus(model.FlowStatusCompleted)
// host.returnData = containerInst.returnData
// host.returnError = containerInst.returnError
// }
// //todo if not a task inst, what should we do?
//} else {
// // spawned from task instance
//
// //todo if not a task inst, what should we do?
//}
// flow has completed so remove it
delete(inst.subFlows, containerInst.subFlowId)
}
} else {
// not done, so enter tasks specified by the Done behavior call
inst.enterTasks(containerInst, taskEntries)
}
// task is done, so we can release it
containerInst.releaseTask(task)
}
// handleTaskError handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskError(taskBehavior model.TaskBehavior, taskInst *TaskInst, err error) {
handled, taskEntries := taskBehavior.Error(taskInst, err)
containerInst := taskInst.flowInst
if !handled {
if containerInst.isHandlingError {
//fail
inst.SetStatus(model.FlowStatusFailed)
} else {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
}
return
}
if len(taskEntries) != 0 {
inst.enterTasks(containerInst, taskEntries)
}
containerInst.releaseTask(taskInst.Task())
}
// HandleGlobalError handles instance errors
func (inst *IndependentInstance) HandleGlobalError(containerInst *Instance, err error) {
if containerInst.isHandlingError {
//todo: log error information
containerInst.SetStatus(model.FlowStatusFailed)
return
}
containerInst.isHandlingError = true
flowBehavior := inst.flowModel.GetFlowBehavior()
//not currently handling error, so check if it has an error handler
if containerInst.flowDef.GetErrorHandler() != nil {
// todo: should we clear out the existing workitem queue for items from containerInst?
//clear existing instances
inst.taskInsts = make(map[string]*TaskInst)
taskEntries := flowBehavior.StartErrorHandler(containerInst)
inst.enterTasks(containerInst, taskEntries)
} else {
containerInst.SetStatus(model.FlowStatusFailed)
if containerInst != inst.Instance {
// spawned from task instance
host, ok := containerInst.host.(*TaskInst)
if ok {
behavior := inst.flowModel.GetDefaultTaskBehavior()
if typeID := host.task.TypeID(); typeID != "" {
behavior = inst.flowModel.GetTaskBehavior(typeID)
}
inst.handleTaskError(behavior, host, err)
//fail the task
//inst.scheduleEval(host)
}
} else {
inst.returnError = err
}
}
}
func (inst *IndependentInstance) startInstance(toStart *Instance) bool {
toStart.SetStatus(model.FlowStatusActive)
//if pi.Attrs == nil {
// pi.Attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// pi.Attrs[attr.Name()] = attr
//}
//logger.Infof("FlowInstance Flow: %v", pi.FlowModel)
//need input mappings
flowBehavior := inst.flowModel.GetFlowBehavior()
ok, taskEntries := flowBehavior.Start(toStart)
if ok {
inst.enterTasks(toStart, taskEntries)
}
return ok
}
func (inst *IndependentInstance) enterTasks(activeInst *Instance, taskEntries []*model.TaskEntry) {
for _, taskEntry := range taskEntries {
//logger.Debugf("EnterTask - TaskEntry: %v", taskEntry)
taskToEnterBehavior := inst.flowModel.GetTaskBehavior(taskEntry.Task.TypeID())
enterTaskData, _ := activeInst.FindOrCreateTaskData(taskEntry.Task)
enterResult := taskToEnterBehavior.Enter(enterTaskData)
if enterResult == model.ENTER_EVAL {
inst.scheduleEval(enterTaskData)
} else if enterResult == model.ENTER_SKIP {
//todo optimize skip, just keep skipping and don't schedule eval
inst.scheduleEval(enterTaskData)
}
}
}
//////////////////////////////////////////////////////////////////
// WorkItem describes an item of work (event for a Task) that should be executed on Step
type WorkItem struct {
ID int `json:"id"`
taskInst *TaskInst `json:"-"`
TaskID string `json:"taskID"`
SubFlowID int `json:"subFlowId"`
}
// NewWorkItem constructs a new WorkItem for the specified TaskInst
func NewWorkItem(id int, taskInst *TaskInst) *WorkItem {
var workItem WorkItem
workItem.ID = id
workItem.taskInst = taskInst
workItem.TaskID = taskInst.task.ID()
workItem.SubFlowID = taskInst.flowInst.subFlowId
return &workItem
}
func NewActivityEvalError(taskName string, errorType string, errorText string) *ActivityEvalError {
return &ActivityEvalError{taskName: taskName, errType: errorType, errText: errorText}
}
type ActivityEvalError struct {
taskName string
errType string
errText string
}
func (e *ActivityEvalError) TaskName() string {
return e.taskName
}
func (e *ActivityEvalError) Type() string {
return e.errType
}
func (e *ActivityEvalError) Error() string {
return e.errText
}
//////////////
// todo fix the following
func getFlowModel(flow *definition.Definition) *model.FlowModel {
if flow.ModelID() == "" {
return model.Default()
} else {
return model.Get(flow.ModelID())
//todo if model not found, should throw error
}
}
//// Restart indicates that this FlowInstance was restarted
func (inst *IndependentInstance) Restart(id string, manager *support.FlowManager) error {
inst.id = id
var err error
inst.flowDef, err = manager.GetFlow(inst.flowURI)
if err != nil {
return err
}
if inst.flowDef == nil {
return errors.New("unable to resolve flow: " + inst.flowURI)
}
inst.flowModel = getFlowModel(inst.flowDef)
inst.master = inst
inst.init(inst.Instance)
return nil
}
func (inst *IndependentInstance) init(flowInst *Instance) {
for _, v := range flowInst.taskInsts {
v.flowInst = flowInst
v.task = flowInst.flowDef.GetTask(v.taskID)
}
for _, v := range flowInst.linkInsts {
v.flowInst = flowInst
v.link = flowInst.flowDef.GetLink(v.linkID)
}
}
| {
host.SetOutput(value.Name(), value.Value())
} | conditional_block |
instances.go | package instance
import (
"errors"
"fmt"
"runtime/debug"
"github.com/TIBCOSoftware/flogo-contrib/action/flow/definition"
"github.com/TIBCOSoftware/flogo-contrib/action/flow/model"
"github.com/TIBCOSoftware/flogo-contrib/action/flow/support"
"github.com/TIBCOSoftware/flogo-lib/core/data"
"github.com/TIBCOSoftware/flogo-lib/logger"
"github.com/TIBCOSoftware/flogo-lib/util"
)
type IndependentInstance struct {
*Instance
id string
stepID int
workItemQueue *util.SyncQueue //todo: change to faster non-threadsafe queue
wiCounter int
ChangeTracker *InstanceChangeTracker
subFlowCtr int
flowModel *model.FlowModel
patch *support.Patch
interceptor *support.Interceptor
subFlows map[int]*Instance
}
// New creates a new Flow Instance from the specified Flow
func NewIndependentInstance(instanceID string, flowURI string, flow *definition.Definition) *IndependentInstance {
inst := &IndependentInstance{}
inst.Instance = &Instance{}
inst.master = inst
inst.id = instanceID
inst.stepID = 0
inst.workItemQueue = util.NewSyncQueue()
inst.flowDef = flow
inst.flowURI = flowURI
inst.flowModel = getFlowModel(flow)
inst.status = model.FlowStatusNotStarted
inst.ChangeTracker = NewInstanceChangeTracker()
inst.taskInsts = make(map[string]*TaskInst)
inst.linkInsts = make(map[int]*LinkInst)
return inst
}
func (inst *IndependentInstance) newEmbeddedInstance(taskInst *TaskInst, flowURI string, flow *definition.Definition) *Instance {
inst.subFlowCtr++
embeddedInst := &Instance{}
embeddedInst.subFlowId = inst.subFlowCtr
embeddedInst.master = inst
embeddedInst.host = taskInst
embeddedInst.flowDef = flow
embeddedInst.status = model.FlowStatusNotStarted
embeddedInst.taskInsts = make(map[string]*TaskInst)
embeddedInst.linkInsts = make(map[int]*LinkInst)
embeddedInst.flowURI = flowURI
if inst.subFlows == nil {
inst.subFlows = make(map[int]*Instance)
}
inst.subFlows[embeddedInst.subFlowId] = embeddedInst
inst.ChangeTracker.SubFlowChange(taskInst.flowInst.subFlowId, CtAdd, embeddedInst.subFlowId, "")
return embeddedInst
}
func (inst *IndependentInstance) startEmbedded(embedded *Instance, startAttrs map[string]*data.Attribute) error {
embedded.attrs = startAttrs
if embedded.master != inst {
errors.New("embedded instance is not from this independent instance")
}
//if inst.attrs == nil {
// inst.attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// inst.attrs[attr.Name()] = attr
//}
inst.startInstance(embedded)
return nil
}
func (inst *IndependentInstance) Start(startAttrs map[string]*data.Attribute) bool {
inst.attrs = startAttrs
//if inst.attrs == nil {
// inst.attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// inst.attrs[attr.Name()] = attr
//}
return inst.startInstance(inst.Instance)
}
func (inst *IndependentInstance) ApplyPatch(patch *support.Patch) {
if inst.patch == nil {
inst.patch = patch
inst.patch.Init()
}
}
func (inst *IndependentInstance) ApplyInterceptor(interceptor *support.Interceptor) {
if inst.interceptor == nil {
inst.interceptor = interceptor
inst.interceptor.Init()
}
}
// GetChanges returns the Change Tracker object
func (inst *IndependentInstance) GetChanges() *InstanceChangeTracker {
return inst.ChangeTracker
}
// ResetChanges resets an changes that were being tracked
func (inst *IndependentInstance) ResetChanges() {
if inst.ChangeTracker != nil {
inst.ChangeTracker.ResetChanges()
}
//todo: can we reuse this to avoid gc
inst.ChangeTracker = NewInstanceChangeTracker()
}
// StepID returns the current step ID of the Flow Instance
func (inst *IndependentInstance) StepID() int {
return inst.stepID
}
func (inst *IndependentInstance) DoStep() bool {
hasNext := false
inst.ResetChanges()
inst.stepID++
if inst.status == model.FlowStatusActive {
// get item to be worked on
item, ok := inst.workItemQueue.Pop()
if ok {
logger.Debug("Retrieved item from Flow Instance work queue")
workItem := item.(*WorkItem)
// get the corresponding behavior
behavior := inst.flowModel.GetDefaultTaskBehavior()
if typeID := workItem.taskInst.task.TypeID(); typeID != "" {
behavior = inst.flowModel.GetTaskBehavior(typeID)
}
// track the fact that the work item was removed from the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtDel, ID: workItem.ID, WorkItem: workItem})
inst.execTask(behavior, workItem.taskInst)
hasNext = true
} else {
logger.Debug("Flow Instance work queue empty")
}
}
return hasNext
}
func (inst *IndependentInstance) scheduleEval(taskInst *TaskInst) {
inst.wiCounter++
workItem := NewWorkItem(inst.wiCounter, taskInst)
logger.Debugf("Scheduling task '%s'", taskInst.task.ID())
inst.workItemQueue.Push(workItem)
// track the fact that the work item was added to the queue
inst.ChangeTracker.trackWorkItem(&WorkItemQueueChange{ChgType: CtAdd, ID: workItem.ID, WorkItem: workItem})
}
// execTask executes the specified Work Item of the Flow Instance
func (inst *IndependentInstance) execTask(behavior model.TaskBehavior, taskInst *TaskInst) {
defer func() {
if r := recover(); r != nil {
err := fmt.Errorf("Unhandled Error executing task '%s' : %v", taskInst.task.Name(), r)
logger.Error(err)
// todo: useful for debugging
logger.Errorf("StackTrace: %s", debug.Stack())
if !taskInst.flowInst.isHandlingError {
taskInst.appendErrorData(NewActivityEvalError(taskInst.task.Name(), "unhandled", err.Error()))
inst.HandleGlobalError(taskInst.flowInst, err)
}
// else what should we do?
}
}()
var err error
var evalResult model.EvalResult
if taskInst.status == model.TaskStatusWaiting {
evalResult, err = behavior.PostEval(taskInst)
} else {
evalResult, err = behavior.Eval(taskInst)
}
if err != nil {
taskInst.returnError = err
inst.handleTaskError(behavior, taskInst, err)
return
}
switch evalResult {
case model.EVAL_DONE:
taskInst.SetStatus(model.TaskStatusDone)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_SKIP:
taskInst.SetStatus(model.TaskStatusSkipped)
inst.handleTaskDone(behavior, taskInst)
case model.EVAL_WAIT:
taskInst.SetStatus(model.TaskStatusWaiting)
case model.EVAL_FAIL:
taskInst.SetStatus(model.TaskStatusFailed)
case model.EVAL_REPEAT:
taskInst.SetStatus(model.TaskStatusReady)
//task needs to iterate or retry
inst.scheduleEval(taskInst)
}
}
// handleTaskDone handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskDone(taskBehavior model.TaskBehavior, taskInst *TaskInst) {
notifyFlow := false
var taskEntries []*model.TaskEntry
var err error
if taskInst.Status() == model.TaskStatusSkipped {
notifyFlow, taskEntries = taskBehavior.Skip(taskInst)
} else {
notifyFlow, taskEntries, err = taskBehavior.Done(taskInst)
}
containerInst := taskInst.flowInst
if err != nil {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
return
}
flowDone := false
task := taskInst.Task()
if notifyFlow {
flowBehavior := inst.flowModel.GetFlowBehavior()
flowDone = flowBehavior.TaskDone(containerInst)
}
if flowDone || containerInst.forceCompletion {
//flow completed or return was called explicitly, so lets complete the flow
flowBehavior := inst.flowModel.GetFlowBehavior()
flowBehavior.Done(containerInst)
flowDone = true
containerInst.SetStatus(model.FlowStatusCompleted)
if containerInst != inst.Instance {
//not top level flow so we have to schedule next step
// spawned from task instance
host, ok := containerInst.host.(*TaskInst)
if ok {
//if the flow failed, set the error
for _, value := range containerInst.returnData {
host.SetOutput(value.Name(), value.Value())
}
inst.scheduleEval(host)
}
//if containerInst.isHandlingError {
// //was the error handler, so directly under instance
// host,ok := containerInst.host.(*EmbeddedInstance)
// if ok {
// host.SetStatus(model.FlowStatusCompleted)
// host.returnData = containerInst.returnData
// host.returnError = containerInst.returnError
// }
// //todo if not a task inst, what should we do?
//} else {
// // spawned from task instance
//
// //todo if not a task inst, what should we do?
//}
// flow has completed so remove it
delete(inst.subFlows, containerInst.subFlowId)
}
} else {
// not done, so enter tasks specified by the Done behavior call
inst.enterTasks(containerInst, taskEntries)
}
// task is done, so we can release it
containerInst.releaseTask(task)
}
// handleTaskError handles the completion of a task in the Flow Instance
func (inst *IndependentInstance) handleTaskError(taskBehavior model.TaskBehavior, taskInst *TaskInst, err error) {
handled, taskEntries := taskBehavior.Error(taskInst, err)
containerInst := taskInst.flowInst
if !handled {
if containerInst.isHandlingError {
//fail
inst.SetStatus(model.FlowStatusFailed)
} else {
taskInst.appendErrorData(err)
inst.HandleGlobalError(containerInst, err)
}
return
}
if len(taskEntries) != 0 {
inst.enterTasks(containerInst, taskEntries)
}
containerInst.releaseTask(taskInst.Task())
}
// HandleGlobalError handles instance errors
func (inst *IndependentInstance) HandleGlobalError(containerInst *Instance, err error) {
if containerInst.isHandlingError {
//todo: log error information
containerInst.SetStatus(model.FlowStatusFailed)
return
}
containerInst.isHandlingError = true
flowBehavior := inst.flowModel.GetFlowBehavior()
//not currently handling error, so check if it has an error handler
if containerInst.flowDef.GetErrorHandler() != nil {
// todo: should we clear out the existing workitem queue for items from containerInst?
//clear existing instances
inst.taskInsts = make(map[string]*TaskInst)
taskEntries := flowBehavior.StartErrorHandler(containerInst)
inst.enterTasks(containerInst, taskEntries)
} else {
containerInst.SetStatus(model.FlowStatusFailed)
if containerInst != inst.Instance {
// spawned from task instance
host, ok := containerInst.host.(*TaskInst)
if ok {
behavior := inst.flowModel.GetDefaultTaskBehavior()
if typeID := host.task.TypeID(); typeID != "" {
behavior = inst.flowModel.GetTaskBehavior(typeID)
}
inst.handleTaskError(behavior, host, err)
//fail the task
//inst.scheduleEval(host)
}
} else {
inst.returnError = err
}
}
}
func (inst *IndependentInstance) startInstance(toStart *Instance) bool {
toStart.SetStatus(model.FlowStatusActive)
//if pi.Attrs == nil {
// pi.Attrs = make(map[string]*data.Attribute)
//}
//
//for _, attr := range startAttrs {
// pi.Attrs[attr.Name()] = attr
//}
//logger.Infof("FlowInstance Flow: %v", pi.FlowModel)
//need input mappings
flowBehavior := inst.flowModel.GetFlowBehavior()
ok, taskEntries := flowBehavior.Start(toStart)
if ok {
inst.enterTasks(toStart, taskEntries)
}
return ok
}
func (inst *IndependentInstance) enterTasks(activeInst *Instance, taskEntries []*model.TaskEntry) {
for _, taskEntry := range taskEntries {
//logger.Debugf("EnterTask - TaskEntry: %v", taskEntry)
taskToEnterBehavior := inst.flowModel.GetTaskBehavior(taskEntry.Task.TypeID())
enterTaskData, _ := activeInst.FindOrCreateTaskData(taskEntry.Task)
enterResult := taskToEnterBehavior.Enter(enterTaskData)
if enterResult == model.ENTER_EVAL {
inst.scheduleEval(enterTaskData)
} else if enterResult == model.ENTER_SKIP {
//todo optimize skip, just keep skipping and don't schedule eval
inst.scheduleEval(enterTaskData)
}
}
}
//////////////////////////////////////////////////////////////////
// WorkItem describes an item of work (event for a Task) that should be executed on Step
type WorkItem struct {
ID int `json:"id"`
taskInst *TaskInst `json:"-"`
TaskID string `json:"taskID"`
SubFlowID int `json:"subFlowId"`
}
// NewWorkItem constructs a new WorkItem for the specified TaskInst
func NewWorkItem(id int, taskInst *TaskInst) *WorkItem {
var workItem WorkItem
workItem.ID = id
workItem.taskInst = taskInst
workItem.TaskID = taskInst.task.ID()
workItem.SubFlowID = taskInst.flowInst.subFlowId
return &workItem
}
func | (taskName string, errorType string, errorText string) *ActivityEvalError {
return &ActivityEvalError{taskName: taskName, errType: errorType, errText: errorText}
}
type ActivityEvalError struct {
taskName string
errType string
errText string
}
func (e *ActivityEvalError) TaskName() string {
return e.taskName
}
func (e *ActivityEvalError) Type() string {
return e.errType
}
func (e *ActivityEvalError) Error() string {
return e.errText
}
//////////////
// todo fix the following
func getFlowModel(flow *definition.Definition) *model.FlowModel {
if flow.ModelID() == "" {
return model.Default()
} else {
return model.Get(flow.ModelID())
//todo if model not found, should throw error
}
}
//// Restart indicates that this FlowInstance was restarted
func (inst *IndependentInstance) Restart(id string, manager *support.FlowManager) error {
inst.id = id
var err error
inst.flowDef, err = manager.GetFlow(inst.flowURI)
if err != nil {
return err
}
if inst.flowDef == nil {
return errors.New("unable to resolve flow: " + inst.flowURI)
}
inst.flowModel = getFlowModel(inst.flowDef)
inst.master = inst
inst.init(inst.Instance)
return nil
}
func (inst *IndependentInstance) init(flowInst *Instance) {
for _, v := range flowInst.taskInsts {
v.flowInst = flowInst
v.task = flowInst.flowDef.GetTask(v.taskID)
}
for _, v := range flowInst.linkInsts {
v.flowInst = flowInst
v.link = flowInst.flowDef.GetLink(v.linkID)
}
}
| NewActivityEvalError | identifier_name |
ffi.rs | //! Internals of the `libsvm` FFI.
//!
//! Objects whose names start with `Libsvm` are for the most part
//! things that we pass or get directly from `libsvm`, and are highly
//! fragile.
//!
//! Their safe, memory-owning counterparts start with `Svm`.
use std::ffi::CStr;
use std::os::raw::c_char;
use std::slice;
use prelude::*;
/// SVM type.
#[repr(C)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum SvmType {
C_SVC,
NU_SVC,
ONE_CLASS,
EPSILON_SVR,
NU_SVR,
}
/// Type of the kernel used by the SVM.
#[repr(C)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum KernelType {
Linear,
Polynomial,
RBF,
Sigmoid,
/// Not implemented.
Precomputed,
}
/// Libsvm uses a sparse representation of data,
/// where every entry in the training matrix
/// is characterised by a column index and a value.
/// Because this is a safe Rust-like object in itself,
/// it does not have a safe counterpart.
#[repr(C)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct LibsvmNode {
index: i32,
value: f64,
}
impl LibsvmNode {
fn new(index: i32, value: f64) -> LibsvmNode {
LibsvmNode {
index: index,
value: value,
}
}
}
/// Libsvm structure representing training data.
#[repr(C)]
struct LibsvmProblem {
/// Number of rows in the training data.
l: i32,
y: *const f64,
/// Rows of the X matrix. Because row lenghts
/// are not stored anywhere, and do not need
/// to be equal, `libsvm` uses index = -1 as
/// a sentinel value.
svm_node: *const *const LibsvmNode,
}
/// Safe version of `LibsvmProblem`.
pub struct SvmProblem {
nodes: Vec<Vec<LibsvmNode>>,
node_ptrs: Vec<*const LibsvmNode>,
y: Vec<f64>,
}
/// Conert a row of the X matrix to its Libsvm representation.
fn row_to_nodes<T: NonzeroIterable>(row: T) -> Vec<LibsvmNode> {
let mut nodes = Vec::new();
for (index, value) in row.iter_nonzero() {
nodes.push(LibsvmNode::new(index as i32, value as f64));
}
// Sentinel value for end of row
nodes.push(LibsvmNode::new(-1, 0.0));
nodes
}
impl SvmProblem {
/// Create a new `SvmProblem` from training data.
pub fn new<'a, T>(X: &'a T, y: &Array) -> SvmProblem
where
T: IndexableMatrix,
&'a T: RowIterable,
{
let mut nodes = Vec::with_capacity(X.rows());
for row in X.iter_rows() {
let row_nodes = row_to_nodes(row);
nodes.push(row_nodes)
}
let node_ptrs = nodes.iter().map(|x| x.as_ptr()).collect::<Vec<_>>();
SvmProblem {
nodes: nodes,
node_ptrs: node_ptrs,
y: y.data().iter().map(|&x| x as f64).collect::<Vec<_>>(),
}
}
/// Returns the unsafe object that can be passed into `libsvm`.
fn build_problem(&self) -> LibsvmProblem {
LibsvmProblem {
l: self.nodes.len() as i32,
y: self.y.as_ptr(),
svm_node: self.node_ptrs.as_ptr(),
}
}
}
/// `libsvm` representation of training parameters.
#[repr(C)]
struct LibsvmParameter {
svm_type: SvmType,
kernel_type: KernelType,
degree: i32,
gamma: f64,
coef0: f64,
cache_size: f64,
eps: f64,
C: f64,
nr_weight: i32,
weight_label: *const i32,
weight: *const f64,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
/// Safe representation of `LibsvmParameter`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmParameter {
svm_type: SvmType,
kernel_type: KernelType,
pub degree: i32,
pub gamma: f64,
pub coef0: f64,
pub cache_size: f64,
eps: f64,
pub C: f64,
nr_weight: i32,
weight_label: Vec<i32>,
weight: Vec<f64>,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
impl SvmParameter {
pub fn new(
svm_type: SvmType,
kernel_type: KernelType,
num_classes: usize,
dim: usize,
) -> SvmParameter {
SvmParameter {
svm_type: svm_type,
kernel_type: kernel_type,
degree: 3,
gamma: 1.0 / dim as f64,
C: 1.0,
coef0: 0.0,
cache_size: 100.0,
eps: 0.1,
nr_weight: num_classes as i32,
weight: vec![1.0; num_classes],
weight_label: (0..num_classes).map(|x| x as i32).collect::<Vec<_>>(),
nu: 0.5,
p: 0.1,
shrinking: 1,
probability: 0,
}
}
/// Returns the parameter object to be passed into
/// `libsvm` functions.
fn build_libsvm_parameter(&self) -> LibsvmParameter {
LibsvmParameter {
svm_type: self.svm_type.clone(),
kernel_type: self.kernel_type.clone(),
degree: self.degree,
gamma: self.gamma,
C: self.C,
coef0: self.coef0,
cache_size: self.cache_size,
eps: self.eps,
nr_weight: self.nr_weight,
weight: self.weight.as_ptr(),
weight_label: self.weight_label.as_ptr(),
nu: self.nu,
p: self.p,
shrinking: self.shrinking,
probability: self.probability,
}
}
}
/// The model object returned from and accepted by
/// `libsvm` functions.
#[repr(C)]
struct LibsvmModel {
svm_parameter: LibsvmParameter,
nr_class: i32,
l: i32,
SV: *const *const LibsvmNode,
sv_coef: *const *const f64,
rho: *const f64,
probA: *const f64,
probB: *const f64,
sv_indices: *const i32,
label: *const i32,
nSV: *const i32,
free_sv: i32,
}
/// Safe representation of `LibsvmModel`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmModel {
svm_parameter: SvmParameter,
nr_class: i32,
l: i32,
SV: Vec<Vec<LibsvmNode>>,
sv_coef: Vec<Vec<f64>>,
rho: Vec<f64>,
probA: Vec<f64>,
probB: Vec<f64>,
sv_indices: Vec<i32>,
label: Vec<i32>,
nSV: Vec<i32>,
free_sv: i32,
}
impl SvmModel {
fn new(param: SvmParameter, model_ptr: *const LibsvmModel) -> SvmModel {
unsafe {
SvmModel {
svm_parameter: param,
nr_class: (*model_ptr).nr_class,
l: (*model_ptr).l,
SV: SvmModel::get_SV(model_ptr),
sv_coef: SvmModel::get_sv_coef(model_ptr),
rho: SvmModel::get_rho(model_ptr),
probA: vec![0.0],
probB: vec![0.0],
sv_indices: vec![0],
label: SvmModel::get_label(model_ptr),
nSV: SvmModel::get_nSV(model_ptr),
free_sv: 0,
}
}
}
fn get_libsvm_model(
&self,
SV_ptrs: &mut Vec<*const LibsvmNode>,
sv_coef_ptrs: &mut Vec<*const f64>,
) -> LibsvmModel {
SV_ptrs.clear();
sv_coef_ptrs.clear();
for x in &self.SV {
SV_ptrs.push(x.as_ptr());
}
for x in &self.sv_coef {
sv_coef_ptrs.push(x.as_ptr());
}
LibsvmModel {
svm_parameter: self.svm_parameter.build_libsvm_parameter(),
nr_class: self.nr_class,
l: self.l,
SV: SV_ptrs.as_ptr(),
sv_coef: sv_coef_ptrs.as_ptr(),
rho: self.rho.as_ptr(),
probA: self.probA.as_ptr(),
probB: self.probB.as_ptr(),
sv_indices: self.sv_indices.as_ptr(),
label: self.label.as_ptr(),
nSV: self.nSV.as_ptr(),
free_sv: self.free_sv,
}
}
unsafe fn get_nSV(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).nSV, nr_class).to_owned()
}
unsafe fn get_label(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).label, nr_class).to_owned()
}
unsafe fn get_SV(model_ptr: *const LibsvmModel) -> Vec<Vec<LibsvmNode>> {
let l = (*model_ptr).l;
let mut sv_rows = Vec::with_capacity(l as usize);
let sv_ptr = (*model_ptr).SV;
for row in 0..l {
let mut sv_row = Vec::new();
let sv_row_ptr = *sv_ptr.offset(row as isize);
let mut i = 0;
loop {
let node = (*sv_row_ptr.offset(i as isize)).clone();
sv_row.push(node.clone());
if node.index == -1 {
break;
}
i += 1;
}
sv_rows.push(sv_row);
}
sv_rows
}
unsafe fn get_rho(model_ptr: *const LibsvmModel) -> Vec<f64> {
let mut nr_class = (*model_ptr).nr_class as usize;
nr_class = nr_class * (nr_class - 1) / 2;
slice::from_raw_parts((*model_ptr).rho, nr_class).to_owned()
}
unsafe fn get_sv_coef(model_ptr: *const LibsvmModel) -> Vec<Vec<f64>> {
let nr_class = (*model_ptr).nr_class as usize;
let l = (*model_ptr).l as usize;
slice::from_raw_parts((*model_ptr).sv_coef, nr_class - 1)
.iter()
.map(|&x| slice::from_raw_parts(x, l).to_owned())
.collect::<Vec<_>>()
}
}
extern "C" {
fn svm_train(prob: *const LibsvmProblem, param: *const LibsvmParameter) -> *const LibsvmModel;
fn svm_predict_values(
svm_model: *mut LibsvmModel,
svm_nodes: *const LibsvmNode,
out: *const f64,
) -> f64;
fn svm_free_and_destroy_model(svm_model: *const *const LibsvmModel);
fn svm_check_parameter(
problem: *const LibsvmProblem,
param: *const LibsvmParameter,
) -> *const c_char;
}
fn check(problem: *const LibsvmProblem, param: *const LibsvmParameter) -> Result<(), String> {
unsafe {
let message = svm_check_parameter(problem, param);
if message.is_null() {
Ok(())
} else |
}
}
/// Fit a `libsvm` model.
pub fn fit<'a, T>(X: &'a T, y: &Array, parameters: &SvmParameter) -> Result<SvmModel, &'static str>
where
T: IndexableMatrix,
&'a T: RowIterable,
{
let problem = SvmProblem::new(X, y);
let libsvm_problem = problem.build_problem();
let libsvm_param = parameters.build_libsvm_parameter();
let model_ptr = unsafe {
match check(
&libsvm_problem as *const LibsvmProblem,
&libsvm_param as *const LibsvmParameter,
) {
Ok(_) => {}
Err(error_str) => {
// A bit of a horrible out-of-band error reporting,
// we should switch the model traits to String errors
println!("Libsvm check error: {}", error_str);
return Err("Invalid libsvm parameters.");
}
};
svm_train(
&libsvm_problem as *const LibsvmProblem,
&libsvm_param as *const LibsvmParameter,
)
};
let model = SvmModel::new(parameters.clone(), model_ptr);
unsafe {
// Free the model data allocated by libsvm,
// we've got our own, sane copy.
svm_free_and_destroy_model(&model_ptr);
}
Ok(model)
}
/// Call `libsvm` to get predictions (both predicted classes
/// and `OvO` decision function values.
pub fn predict<'a, T>(model: &SvmModel, X: &'a T) -> (Array, Array)
where
T: IndexableMatrix,
&'a T: RowIterable,
{
let x_rows = X.rows();
let num_classes = model.nr_class as usize;
let ovo_num_classes = num_classes * (num_classes - 1) / 2;
// We are actually mutating this in C, but convincing rustc that is
// safe is a bit of a pain
let df = vec![0.0; x_rows * ovo_num_classes];
let mut df_slice = &df[..];
let mut predicted_class = Vec::with_capacity(x_rows);
// Allocate space for pointers to support vector components,
// we don't need them after we're finished here
// so they will be freed.
let mut sv_ptrs = Vec::new();
let mut sv_coef_ptrs = Vec::new();
let mut libsvm_model = model.get_libsvm_model(&mut sv_ptrs, &mut sv_coef_ptrs);
for (_, row) in X.iter_rows().enumerate() {
let nodes = row_to_nodes(row);
unsafe {
predicted_class.push(svm_predict_values(
&mut libsvm_model as *mut LibsvmModel,
nodes.as_ptr(),
df_slice.as_ptr(),
) as f32);
}
df_slice = &df_slice[ovo_num_classes..];
}
let df_data = df.iter().map(|&x| x as f32).collect::<Vec<_>>();
let mut df_array = Array::from(df_data);
df_array.reshape(x_rows, ovo_num_classes);
(df_array, Array::from(predicted_class))
}
| {
Err(CStr::from_ptr(message).to_str().unwrap().to_owned())
} | conditional_block |
ffi.rs | //! Internals of the `libsvm` FFI.
//!
//! Objects whose names start with `Libsvm` are for the most part
//! things that we pass or get directly from `libsvm`, and are highly
//! fragile.
//!
//! Their safe, memory-owning counterparts start with `Svm`.
use std::ffi::CStr;
use std::os::raw::c_char;
use std::slice;
use prelude::*;
/// SVM type.
#[repr(C)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum SvmType {
C_SVC,
NU_SVC,
ONE_CLASS,
EPSILON_SVR,
NU_SVR,
}
/// Type of the kernel used by the SVM.
#[repr(C)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum KernelType {
Linear,
Polynomial,
RBF,
Sigmoid,
/// Not implemented.
Precomputed,
}
/// Libsvm uses a sparse representation of data,
/// where every entry in the training matrix
/// is characterised by a column index and a value.
/// Because this is a safe Rust-like object in itself,
/// it does not have a safe counterpart.
#[repr(C)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct LibsvmNode {
index: i32,
value: f64,
}
impl LibsvmNode {
fn new(index: i32, value: f64) -> LibsvmNode {
LibsvmNode {
index: index,
value: value,
}
}
}
/// Libsvm structure representing training data.
#[repr(C)]
struct LibsvmProblem {
/// Number of rows in the training data.
l: i32,
y: *const f64,
/// Rows of the X matrix. Because row lenghts
/// are not stored anywhere, and do not need
/// to be equal, `libsvm` uses index = -1 as
/// a sentinel value.
svm_node: *const *const LibsvmNode,
}
/// Safe version of `LibsvmProblem`.
pub struct SvmProblem {
nodes: Vec<Vec<LibsvmNode>>,
node_ptrs: Vec<*const LibsvmNode>,
y: Vec<f64>,
}
/// Conert a row of the X matrix to its Libsvm representation.
fn row_to_nodes<T: NonzeroIterable>(row: T) -> Vec<LibsvmNode> {
let mut nodes = Vec::new();
for (index, value) in row.iter_nonzero() {
nodes.push(LibsvmNode::new(index as i32, value as f64));
}
// Sentinel value for end of row
nodes.push(LibsvmNode::new(-1, 0.0));
nodes
}
impl SvmProblem {
/// Create a new `SvmProblem` from training data.
pub fn new<'a, T>(X: &'a T, y: &Array) -> SvmProblem
where
T: IndexableMatrix,
&'a T: RowIterable,
{
let mut nodes = Vec::with_capacity(X.rows());
for row in X.iter_rows() {
let row_nodes = row_to_nodes(row);
nodes.push(row_nodes)
}
let node_ptrs = nodes.iter().map(|x| x.as_ptr()).collect::<Vec<_>>();
SvmProblem {
nodes: nodes,
node_ptrs: node_ptrs,
y: y.data().iter().map(|&x| x as f64).collect::<Vec<_>>(),
}
}
/// Returns the unsafe object that can be passed into `libsvm`.
fn build_problem(&self) -> LibsvmProblem {
LibsvmProblem {
l: self.nodes.len() as i32,
y: self.y.as_ptr(),
svm_node: self.node_ptrs.as_ptr(),
}
}
}
/// `libsvm` representation of training parameters.
#[repr(C)]
struct LibsvmParameter {
svm_type: SvmType, | cache_size: f64,
eps: f64,
C: f64,
nr_weight: i32,
weight_label: *const i32,
weight: *const f64,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
/// Safe representation of `LibsvmParameter`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmParameter {
svm_type: SvmType,
kernel_type: KernelType,
pub degree: i32,
pub gamma: f64,
pub coef0: f64,
pub cache_size: f64,
eps: f64,
pub C: f64,
nr_weight: i32,
weight_label: Vec<i32>,
weight: Vec<f64>,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
impl SvmParameter {
pub fn new(
svm_type: SvmType,
kernel_type: KernelType,
num_classes: usize,
dim: usize,
) -> SvmParameter {
SvmParameter {
svm_type: svm_type,
kernel_type: kernel_type,
degree: 3,
gamma: 1.0 / dim as f64,
C: 1.0,
coef0: 0.0,
cache_size: 100.0,
eps: 0.1,
nr_weight: num_classes as i32,
weight: vec![1.0; num_classes],
weight_label: (0..num_classes).map(|x| x as i32).collect::<Vec<_>>(),
nu: 0.5,
p: 0.1,
shrinking: 1,
probability: 0,
}
}
/// Returns the parameter object to be passed into
/// `libsvm` functions.
fn build_libsvm_parameter(&self) -> LibsvmParameter {
LibsvmParameter {
svm_type: self.svm_type.clone(),
kernel_type: self.kernel_type.clone(),
degree: self.degree,
gamma: self.gamma,
C: self.C,
coef0: self.coef0,
cache_size: self.cache_size,
eps: self.eps,
nr_weight: self.nr_weight,
weight: self.weight.as_ptr(),
weight_label: self.weight_label.as_ptr(),
nu: self.nu,
p: self.p,
shrinking: self.shrinking,
probability: self.probability,
}
}
}
/// The model object returned from and accepted by
/// `libsvm` functions.
#[repr(C)]
struct LibsvmModel {
svm_parameter: LibsvmParameter,
nr_class: i32,
l: i32,
SV: *const *const LibsvmNode,
sv_coef: *const *const f64,
rho: *const f64,
probA: *const f64,
probB: *const f64,
sv_indices: *const i32,
label: *const i32,
nSV: *const i32,
free_sv: i32,
}
/// Safe representation of `LibsvmModel`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmModel {
svm_parameter: SvmParameter,
nr_class: i32,
l: i32,
SV: Vec<Vec<LibsvmNode>>,
sv_coef: Vec<Vec<f64>>,
rho: Vec<f64>,
probA: Vec<f64>,
probB: Vec<f64>,
sv_indices: Vec<i32>,
label: Vec<i32>,
nSV: Vec<i32>,
free_sv: i32,
}
impl SvmModel {
fn new(param: SvmParameter, model_ptr: *const LibsvmModel) -> SvmModel {
unsafe {
SvmModel {
svm_parameter: param,
nr_class: (*model_ptr).nr_class,
l: (*model_ptr).l,
SV: SvmModel::get_SV(model_ptr),
sv_coef: SvmModel::get_sv_coef(model_ptr),
rho: SvmModel::get_rho(model_ptr),
probA: vec![0.0],
probB: vec![0.0],
sv_indices: vec![0],
label: SvmModel::get_label(model_ptr),
nSV: SvmModel::get_nSV(model_ptr),
free_sv: 0,
}
}
}
fn get_libsvm_model(
&self,
SV_ptrs: &mut Vec<*const LibsvmNode>,
sv_coef_ptrs: &mut Vec<*const f64>,
) -> LibsvmModel {
SV_ptrs.clear();
sv_coef_ptrs.clear();
for x in &self.SV {
SV_ptrs.push(x.as_ptr());
}
for x in &self.sv_coef {
sv_coef_ptrs.push(x.as_ptr());
}
LibsvmModel {
svm_parameter: self.svm_parameter.build_libsvm_parameter(),
nr_class: self.nr_class,
l: self.l,
SV: SV_ptrs.as_ptr(),
sv_coef: sv_coef_ptrs.as_ptr(),
rho: self.rho.as_ptr(),
probA: self.probA.as_ptr(),
probB: self.probB.as_ptr(),
sv_indices: self.sv_indices.as_ptr(),
label: self.label.as_ptr(),
nSV: self.nSV.as_ptr(),
free_sv: self.free_sv,
}
}
unsafe fn get_nSV(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).nSV, nr_class).to_owned()
}
unsafe fn get_label(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).label, nr_class).to_owned()
}
unsafe fn get_SV(model_ptr: *const LibsvmModel) -> Vec<Vec<LibsvmNode>> {
let l = (*model_ptr).l;
let mut sv_rows = Vec::with_capacity(l as usize);
let sv_ptr = (*model_ptr).SV;
for row in 0..l {
let mut sv_row = Vec::new();
let sv_row_ptr = *sv_ptr.offset(row as isize);
let mut i = 0;
loop {
let node = (*sv_row_ptr.offset(i as isize)).clone();
sv_row.push(node.clone());
if node.index == -1 {
break;
}
i += 1;
}
sv_rows.push(sv_row);
}
sv_rows
}
unsafe fn get_rho(model_ptr: *const LibsvmModel) -> Vec<f64> {
let mut nr_class = (*model_ptr).nr_class as usize;
nr_class = nr_class * (nr_class - 1) / 2;
slice::from_raw_parts((*model_ptr).rho, nr_class).to_owned()
}
unsafe fn get_sv_coef(model_ptr: *const LibsvmModel) -> Vec<Vec<f64>> {
let nr_class = (*model_ptr).nr_class as usize;
let l = (*model_ptr).l as usize;
slice::from_raw_parts((*model_ptr).sv_coef, nr_class - 1)
.iter()
.map(|&x| slice::from_raw_parts(x, l).to_owned())
.collect::<Vec<_>>()
}
}
extern "C" {
fn svm_train(prob: *const LibsvmProblem, param: *const LibsvmParameter) -> *const LibsvmModel;
fn svm_predict_values(
svm_model: *mut LibsvmModel,
svm_nodes: *const LibsvmNode,
out: *const f64,
) -> f64;
fn svm_free_and_destroy_model(svm_model: *const *const LibsvmModel);
fn svm_check_parameter(
problem: *const LibsvmProblem,
param: *const LibsvmParameter,
) -> *const c_char;
}
fn check(problem: *const LibsvmProblem, param: *const LibsvmParameter) -> Result<(), String> {
unsafe {
let message = svm_check_parameter(problem, param);
if message.is_null() {
Ok(())
} else {
Err(CStr::from_ptr(message).to_str().unwrap().to_owned())
}
}
}
/// Fit a `libsvm` model.
pub fn fit<'a, T>(X: &'a T, y: &Array, parameters: &SvmParameter) -> Result<SvmModel, &'static str>
where
T: IndexableMatrix,
&'a T: RowIterable,
{
let problem = SvmProblem::new(X, y);
let libsvm_problem = problem.build_problem();
let libsvm_param = parameters.build_libsvm_parameter();
let model_ptr = unsafe {
match check(
&libsvm_problem as *const LibsvmProblem,
&libsvm_param as *const LibsvmParameter,
) {
Ok(_) => {}
Err(error_str) => {
// A bit of a horrible out-of-band error reporting,
// we should switch the model traits to String errors
println!("Libsvm check error: {}", error_str);
return Err("Invalid libsvm parameters.");
}
};
svm_train(
&libsvm_problem as *const LibsvmProblem,
&libsvm_param as *const LibsvmParameter,
)
};
let model = SvmModel::new(parameters.clone(), model_ptr);
unsafe {
// Free the model data allocated by libsvm,
// we've got our own, sane copy.
svm_free_and_destroy_model(&model_ptr);
}
Ok(model)
}
/// Call `libsvm` to get predictions (both predicted classes
/// and `OvO` decision function values.
pub fn predict<'a, T>(model: &SvmModel, X: &'a T) -> (Array, Array)
where
T: IndexableMatrix,
&'a T: RowIterable,
{
let x_rows = X.rows();
let num_classes = model.nr_class as usize;
let ovo_num_classes = num_classes * (num_classes - 1) / 2;
// We are actually mutating this in C, but convincing rustc that is
// safe is a bit of a pain
let df = vec![0.0; x_rows * ovo_num_classes];
let mut df_slice = &df[..];
let mut predicted_class = Vec::with_capacity(x_rows);
// Allocate space for pointers to support vector components,
// we don't need them after we're finished here
// so they will be freed.
let mut sv_ptrs = Vec::new();
let mut sv_coef_ptrs = Vec::new();
let mut libsvm_model = model.get_libsvm_model(&mut sv_ptrs, &mut sv_coef_ptrs);
for (_, row) in X.iter_rows().enumerate() {
let nodes = row_to_nodes(row);
unsafe {
predicted_class.push(svm_predict_values(
&mut libsvm_model as *mut LibsvmModel,
nodes.as_ptr(),
df_slice.as_ptr(),
) as f32);
}
df_slice = &df_slice[ovo_num_classes..];
}
let df_data = df.iter().map(|&x| x as f32).collect::<Vec<_>>();
let mut df_array = Array::from(df_data);
df_array.reshape(x_rows, ovo_num_classes);
(df_array, Array::from(predicted_class))
} | kernel_type: KernelType,
degree: i32,
gamma: f64,
coef0: f64, | random_line_split |
ffi.rs | //! Internals of the `libsvm` FFI.
//!
//! Objects whose names start with `Libsvm` are for the most part
//! things that we pass or get directly from `libsvm`, and are highly
//! fragile.
//!
//! Their safe, memory-owning counterparts start with `Svm`.
use std::ffi::CStr;
use std::os::raw::c_char;
use std::slice;
use prelude::*;
/// SVM type.
#[repr(C)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum SvmType {
C_SVC,
NU_SVC,
ONE_CLASS,
EPSILON_SVR,
NU_SVR,
}
/// Type of the kernel used by the SVM.
#[repr(C)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum KernelType {
Linear,
Polynomial,
RBF,
Sigmoid,
/// Not implemented.
Precomputed,
}
/// Libsvm uses a sparse representation of data,
/// where every entry in the training matrix
/// is characterised by a column index and a value.
/// Because this is a safe Rust-like object in itself,
/// it does not have a safe counterpart.
#[repr(C)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct LibsvmNode {
index: i32,
value: f64,
}
impl LibsvmNode {
fn new(index: i32, value: f64) -> LibsvmNode {
LibsvmNode {
index: index,
value: value,
}
}
}
/// Libsvm structure representing training data.
#[repr(C)]
struct LibsvmProblem {
/// Number of rows in the training data.
l: i32,
y: *const f64,
/// Rows of the X matrix. Because row lenghts
/// are not stored anywhere, and do not need
/// to be equal, `libsvm` uses index = -1 as
/// a sentinel value.
svm_node: *const *const LibsvmNode,
}
/// Safe version of `LibsvmProblem`.
pub struct SvmProblem {
nodes: Vec<Vec<LibsvmNode>>,
node_ptrs: Vec<*const LibsvmNode>,
y: Vec<f64>,
}
/// Conert a row of the X matrix to its Libsvm representation.
fn | <T: NonzeroIterable>(row: T) -> Vec<LibsvmNode> {
let mut nodes = Vec::new();
for (index, value) in row.iter_nonzero() {
nodes.push(LibsvmNode::new(index as i32, value as f64));
}
// Sentinel value for end of row
nodes.push(LibsvmNode::new(-1, 0.0));
nodes
}
impl SvmProblem {
/// Create a new `SvmProblem` from training data.
pub fn new<'a, T>(X: &'a T, y: &Array) -> SvmProblem
where
T: IndexableMatrix,
&'a T: RowIterable,
{
let mut nodes = Vec::with_capacity(X.rows());
for row in X.iter_rows() {
let row_nodes = row_to_nodes(row);
nodes.push(row_nodes)
}
let node_ptrs = nodes.iter().map(|x| x.as_ptr()).collect::<Vec<_>>();
SvmProblem {
nodes: nodes,
node_ptrs: node_ptrs,
y: y.data().iter().map(|&x| x as f64).collect::<Vec<_>>(),
}
}
/// Returns the unsafe object that can be passed into `libsvm`.
fn build_problem(&self) -> LibsvmProblem {
LibsvmProblem {
l: self.nodes.len() as i32,
y: self.y.as_ptr(),
svm_node: self.node_ptrs.as_ptr(),
}
}
}
/// `libsvm` representation of training parameters.
#[repr(C)]
struct LibsvmParameter {
svm_type: SvmType,
kernel_type: KernelType,
degree: i32,
gamma: f64,
coef0: f64,
cache_size: f64,
eps: f64,
C: f64,
nr_weight: i32,
weight_label: *const i32,
weight: *const f64,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
/// Safe representation of `LibsvmParameter`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmParameter {
svm_type: SvmType,
kernel_type: KernelType,
pub degree: i32,
pub gamma: f64,
pub coef0: f64,
pub cache_size: f64,
eps: f64,
pub C: f64,
nr_weight: i32,
weight_label: Vec<i32>,
weight: Vec<f64>,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
impl SvmParameter {
pub fn new(
svm_type: SvmType,
kernel_type: KernelType,
num_classes: usize,
dim: usize,
) -> SvmParameter {
SvmParameter {
svm_type: svm_type,
kernel_type: kernel_type,
degree: 3,
gamma: 1.0 / dim as f64,
C: 1.0,
coef0: 0.0,
cache_size: 100.0,
eps: 0.1,
nr_weight: num_classes as i32,
weight: vec![1.0; num_classes],
weight_label: (0..num_classes).map(|x| x as i32).collect::<Vec<_>>(),
nu: 0.5,
p: 0.1,
shrinking: 1,
probability: 0,
}
}
/// Returns the parameter object to be passed into
/// `libsvm` functions.
fn build_libsvm_parameter(&self) -> LibsvmParameter {
LibsvmParameter {
svm_type: self.svm_type.clone(),
kernel_type: self.kernel_type.clone(),
degree: self.degree,
gamma: self.gamma,
C: self.C,
coef0: self.coef0,
cache_size: self.cache_size,
eps: self.eps,
nr_weight: self.nr_weight,
weight: self.weight.as_ptr(),
weight_label: self.weight_label.as_ptr(),
nu: self.nu,
p: self.p,
shrinking: self.shrinking,
probability: self.probability,
}
}
}
/// The model object returned from and accepted by
/// `libsvm` functions.
#[repr(C)]
struct LibsvmModel {
svm_parameter: LibsvmParameter,
nr_class: i32,
l: i32,
SV: *const *const LibsvmNode,
sv_coef: *const *const f64,
rho: *const f64,
probA: *const f64,
probB: *const f64,
sv_indices: *const i32,
label: *const i32,
nSV: *const i32,
free_sv: i32,
}
/// Safe representation of `LibsvmModel`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmModel {
svm_parameter: SvmParameter,
nr_class: i32,
l: i32,
SV: Vec<Vec<LibsvmNode>>,
sv_coef: Vec<Vec<f64>>,
rho: Vec<f64>,
probA: Vec<f64>,
probB: Vec<f64>,
sv_indices: Vec<i32>,
label: Vec<i32>,
nSV: Vec<i32>,
free_sv: i32,
}
impl SvmModel {
fn new(param: SvmParameter, model_ptr: *const LibsvmModel) -> SvmModel {
unsafe {
SvmModel {
svm_parameter: param,
nr_class: (*model_ptr).nr_class,
l: (*model_ptr).l,
SV: SvmModel::get_SV(model_ptr),
sv_coef: SvmModel::get_sv_coef(model_ptr),
rho: SvmModel::get_rho(model_ptr),
probA: vec![0.0],
probB: vec![0.0],
sv_indices: vec![0],
label: SvmModel::get_label(model_ptr),
nSV: SvmModel::get_nSV(model_ptr),
free_sv: 0,
}
}
}
fn get_libsvm_model(
&self,
SV_ptrs: &mut Vec<*const LibsvmNode>,
sv_coef_ptrs: &mut Vec<*const f64>,
) -> LibsvmModel {
SV_ptrs.clear();
sv_coef_ptrs.clear();
for x in &self.SV {
SV_ptrs.push(x.as_ptr());
}
for x in &self.sv_coef {
sv_coef_ptrs.push(x.as_ptr());
}
LibsvmModel {
svm_parameter: self.svm_parameter.build_libsvm_parameter(),
nr_class: self.nr_class,
l: self.l,
SV: SV_ptrs.as_ptr(),
sv_coef: sv_coef_ptrs.as_ptr(),
rho: self.rho.as_ptr(),
probA: self.probA.as_ptr(),
probB: self.probB.as_ptr(),
sv_indices: self.sv_indices.as_ptr(),
label: self.label.as_ptr(),
nSV: self.nSV.as_ptr(),
free_sv: self.free_sv,
}
}
unsafe fn get_nSV(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).nSV, nr_class).to_owned()
}
unsafe fn get_label(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).label, nr_class).to_owned()
}
unsafe fn get_SV(model_ptr: *const LibsvmModel) -> Vec<Vec<LibsvmNode>> {
let l = (*model_ptr).l;
let mut sv_rows = Vec::with_capacity(l as usize);
let sv_ptr = (*model_ptr).SV;
for row in 0..l {
let mut sv_row = Vec::new();
let sv_row_ptr = *sv_ptr.offset(row as isize);
let mut i = 0;
loop {
let node = (*sv_row_ptr.offset(i as isize)).clone();
sv_row.push(node.clone());
if node.index == -1 {
break;
}
i += 1;
}
sv_rows.push(sv_row);
}
sv_rows
}
unsafe fn get_rho(model_ptr: *const LibsvmModel) -> Vec<f64> {
let mut nr_class = (*model_ptr).nr_class as usize;
nr_class = nr_class * (nr_class - 1) / 2;
slice::from_raw_parts((*model_ptr).rho, nr_class).to_owned()
}
unsafe fn get_sv_coef(model_ptr: *const LibsvmModel) -> Vec<Vec<f64>> {
let nr_class = (*model_ptr).nr_class as usize;
let l = (*model_ptr).l as usize;
slice::from_raw_parts((*model_ptr).sv_coef, nr_class - 1)
.iter()
.map(|&x| slice::from_raw_parts(x, l).to_owned())
.collect::<Vec<_>>()
}
}
extern "C" {
fn svm_train(prob: *const LibsvmProblem, param: *const LibsvmParameter) -> *const LibsvmModel;
fn svm_predict_values(
svm_model: *mut LibsvmModel,
svm_nodes: *const LibsvmNode,
out: *const f64,
) -> f64;
fn svm_free_and_destroy_model(svm_model: *const *const LibsvmModel);
fn svm_check_parameter(
problem: *const LibsvmProblem,
param: *const LibsvmParameter,
) -> *const c_char;
}
fn check(problem: *const LibsvmProblem, param: *const LibsvmParameter) -> Result<(), String> {
unsafe {
let message = svm_check_parameter(problem, param);
if message.is_null() {
Ok(())
} else {
Err(CStr::from_ptr(message).to_str().unwrap().to_owned())
}
}
}
/// Fit a `libsvm` model.
pub fn fit<'a, T>(X: &'a T, y: &Array, parameters: &SvmParameter) -> Result<SvmModel, &'static str>
where
T: IndexableMatrix,
&'a T: RowIterable,
{
let problem = SvmProblem::new(X, y);
let libsvm_problem = problem.build_problem();
let libsvm_param = parameters.build_libsvm_parameter();
let model_ptr = unsafe {
match check(
&libsvm_problem as *const LibsvmProblem,
&libsvm_param as *const LibsvmParameter,
) {
Ok(_) => {}
Err(error_str) => {
// A bit of a horrible out-of-band error reporting,
// we should switch the model traits to String errors
println!("Libsvm check error: {}", error_str);
return Err("Invalid libsvm parameters.");
}
};
svm_train(
&libsvm_problem as *const LibsvmProblem,
&libsvm_param as *const LibsvmParameter,
)
};
let model = SvmModel::new(parameters.clone(), model_ptr);
unsafe {
// Free the model data allocated by libsvm,
// we've got our own, sane copy.
svm_free_and_destroy_model(&model_ptr);
}
Ok(model)
}
/// Call `libsvm` to get predictions (both predicted classes
/// and `OvO` decision function values.
pub fn predict<'a, T>(model: &SvmModel, X: &'a T) -> (Array, Array)
where
T: IndexableMatrix,
&'a T: RowIterable,
{
let x_rows = X.rows();
let num_classes = model.nr_class as usize;
let ovo_num_classes = num_classes * (num_classes - 1) / 2;
// We are actually mutating this in C, but convincing rustc that is
// safe is a bit of a pain
let df = vec![0.0; x_rows * ovo_num_classes];
let mut df_slice = &df[..];
let mut predicted_class = Vec::with_capacity(x_rows);
// Allocate space for pointers to support vector components,
// we don't need them after we're finished here
// so they will be freed.
let mut sv_ptrs = Vec::new();
let mut sv_coef_ptrs = Vec::new();
let mut libsvm_model = model.get_libsvm_model(&mut sv_ptrs, &mut sv_coef_ptrs);
for (_, row) in X.iter_rows().enumerate() {
let nodes = row_to_nodes(row);
unsafe {
predicted_class.push(svm_predict_values(
&mut libsvm_model as *mut LibsvmModel,
nodes.as_ptr(),
df_slice.as_ptr(),
) as f32);
}
df_slice = &df_slice[ovo_num_classes..];
}
let df_data = df.iter().map(|&x| x as f32).collect::<Vec<_>>();
let mut df_array = Array::from(df_data);
df_array.reshape(x_rows, ovo_num_classes);
(df_array, Array::from(predicted_class))
}
| row_to_nodes | identifier_name |
ffi.rs | //! Internals of the `libsvm` FFI.
//!
//! Objects whose names start with `Libsvm` are for the most part
//! things that we pass or get directly from `libsvm`, and are highly
//! fragile.
//!
//! Their safe, memory-owning counterparts start with `Svm`.
use std::ffi::CStr;
use std::os::raw::c_char;
use std::slice;
use prelude::*;
/// SVM type.
#[repr(C)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum SvmType {
C_SVC,
NU_SVC,
ONE_CLASS,
EPSILON_SVR,
NU_SVR,
}
/// Type of the kernel used by the SVM.
#[repr(C)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum KernelType {
Linear,
Polynomial,
RBF,
Sigmoid,
/// Not implemented.
Precomputed,
}
/// Libsvm uses a sparse representation of data,
/// where every entry in the training matrix
/// is characterised by a column index and a value.
/// Because this is a safe Rust-like object in itself,
/// it does not have a safe counterpart.
#[repr(C)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct LibsvmNode {
index: i32,
value: f64,
}
impl LibsvmNode {
fn new(index: i32, value: f64) -> LibsvmNode {
LibsvmNode {
index: index,
value: value,
}
}
}
/// Libsvm structure representing training data.
#[repr(C)]
struct LibsvmProblem {
/// Number of rows in the training data.
l: i32,
y: *const f64,
/// Rows of the X matrix. Because row lenghts
/// are not stored anywhere, and do not need
/// to be equal, `libsvm` uses index = -1 as
/// a sentinel value.
svm_node: *const *const LibsvmNode,
}
/// Safe version of `LibsvmProblem`.
pub struct SvmProblem {
nodes: Vec<Vec<LibsvmNode>>,
node_ptrs: Vec<*const LibsvmNode>,
y: Vec<f64>,
}
/// Conert a row of the X matrix to its Libsvm representation.
fn row_to_nodes<T: NonzeroIterable>(row: T) -> Vec<LibsvmNode> {
let mut nodes = Vec::new();
for (index, value) in row.iter_nonzero() {
nodes.push(LibsvmNode::new(index as i32, value as f64));
}
// Sentinel value for end of row
nodes.push(LibsvmNode::new(-1, 0.0));
nodes
}
impl SvmProblem {
/// Create a new `SvmProblem` from training data.
pub fn new<'a, T>(X: &'a T, y: &Array) -> SvmProblem
where
T: IndexableMatrix,
&'a T: RowIterable,
|
/// Returns the unsafe object that can be passed into `libsvm`.
fn build_problem(&self) -> LibsvmProblem {
LibsvmProblem {
l: self.nodes.len() as i32,
y: self.y.as_ptr(),
svm_node: self.node_ptrs.as_ptr(),
}
}
}
/// `libsvm` representation of training parameters.
#[repr(C)]
struct LibsvmParameter {
svm_type: SvmType,
kernel_type: KernelType,
degree: i32,
gamma: f64,
coef0: f64,
cache_size: f64,
eps: f64,
C: f64,
nr_weight: i32,
weight_label: *const i32,
weight: *const f64,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
/// Safe representation of `LibsvmParameter`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmParameter {
svm_type: SvmType,
kernel_type: KernelType,
pub degree: i32,
pub gamma: f64,
pub coef0: f64,
pub cache_size: f64,
eps: f64,
pub C: f64,
nr_weight: i32,
weight_label: Vec<i32>,
weight: Vec<f64>,
nu: f64,
p: f64,
shrinking: i32,
probability: i32,
}
impl SvmParameter {
pub fn new(
svm_type: SvmType,
kernel_type: KernelType,
num_classes: usize,
dim: usize,
) -> SvmParameter {
SvmParameter {
svm_type: svm_type,
kernel_type: kernel_type,
degree: 3,
gamma: 1.0 / dim as f64,
C: 1.0,
coef0: 0.0,
cache_size: 100.0,
eps: 0.1,
nr_weight: num_classes as i32,
weight: vec![1.0; num_classes],
weight_label: (0..num_classes).map(|x| x as i32).collect::<Vec<_>>(),
nu: 0.5,
p: 0.1,
shrinking: 1,
probability: 0,
}
}
/// Returns the parameter object to be passed into
/// `libsvm` functions.
fn build_libsvm_parameter(&self) -> LibsvmParameter {
LibsvmParameter {
svm_type: self.svm_type.clone(),
kernel_type: self.kernel_type.clone(),
degree: self.degree,
gamma: self.gamma,
C: self.C,
coef0: self.coef0,
cache_size: self.cache_size,
eps: self.eps,
nr_weight: self.nr_weight,
weight: self.weight.as_ptr(),
weight_label: self.weight_label.as_ptr(),
nu: self.nu,
p: self.p,
shrinking: self.shrinking,
probability: self.probability,
}
}
}
/// The model object returned from and accepted by
/// `libsvm` functions.
#[repr(C)]
struct LibsvmModel {
svm_parameter: LibsvmParameter,
nr_class: i32,
l: i32,
SV: *const *const LibsvmNode,
sv_coef: *const *const f64,
rho: *const f64,
probA: *const f64,
probB: *const f64,
sv_indices: *const i32,
label: *const i32,
nSV: *const i32,
free_sv: i32,
}
/// Safe representation of `LibsvmModel`.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct SvmModel {
svm_parameter: SvmParameter,
nr_class: i32,
l: i32,
SV: Vec<Vec<LibsvmNode>>,
sv_coef: Vec<Vec<f64>>,
rho: Vec<f64>,
probA: Vec<f64>,
probB: Vec<f64>,
sv_indices: Vec<i32>,
label: Vec<i32>,
nSV: Vec<i32>,
free_sv: i32,
}
impl SvmModel {
fn new(param: SvmParameter, model_ptr: *const LibsvmModel) -> SvmModel {
unsafe {
SvmModel {
svm_parameter: param,
nr_class: (*model_ptr).nr_class,
l: (*model_ptr).l,
SV: SvmModel::get_SV(model_ptr),
sv_coef: SvmModel::get_sv_coef(model_ptr),
rho: SvmModel::get_rho(model_ptr),
probA: vec![0.0],
probB: vec![0.0],
sv_indices: vec![0],
label: SvmModel::get_label(model_ptr),
nSV: SvmModel::get_nSV(model_ptr),
free_sv: 0,
}
}
}
fn get_libsvm_model(
&self,
SV_ptrs: &mut Vec<*const LibsvmNode>,
sv_coef_ptrs: &mut Vec<*const f64>,
) -> LibsvmModel {
SV_ptrs.clear();
sv_coef_ptrs.clear();
for x in &self.SV {
SV_ptrs.push(x.as_ptr());
}
for x in &self.sv_coef {
sv_coef_ptrs.push(x.as_ptr());
}
LibsvmModel {
svm_parameter: self.svm_parameter.build_libsvm_parameter(),
nr_class: self.nr_class,
l: self.l,
SV: SV_ptrs.as_ptr(),
sv_coef: sv_coef_ptrs.as_ptr(),
rho: self.rho.as_ptr(),
probA: self.probA.as_ptr(),
probB: self.probB.as_ptr(),
sv_indices: self.sv_indices.as_ptr(),
label: self.label.as_ptr(),
nSV: self.nSV.as_ptr(),
free_sv: self.free_sv,
}
}
unsafe fn get_nSV(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).nSV, nr_class).to_owned()
}
unsafe fn get_label(model_ptr: *const LibsvmModel) -> Vec<i32> {
let nr_class = (*model_ptr).nr_class as usize;
slice::from_raw_parts((*model_ptr).label, nr_class).to_owned()
}
unsafe fn get_SV(model_ptr: *const LibsvmModel) -> Vec<Vec<LibsvmNode>> {
let l = (*model_ptr).l;
let mut sv_rows = Vec::with_capacity(l as usize);
let sv_ptr = (*model_ptr).SV;
for row in 0..l {
let mut sv_row = Vec::new();
let sv_row_ptr = *sv_ptr.offset(row as isize);
let mut i = 0;
loop {
let node = (*sv_row_ptr.offset(i as isize)).clone();
sv_row.push(node.clone());
if node.index == -1 {
break;
}
i += 1;
}
sv_rows.push(sv_row);
}
sv_rows
}
unsafe fn get_rho(model_ptr: *const LibsvmModel) -> Vec<f64> {
let mut nr_class = (*model_ptr).nr_class as usize;
nr_class = nr_class * (nr_class - 1) / 2;
slice::from_raw_parts((*model_ptr).rho, nr_class).to_owned()
}
unsafe fn get_sv_coef(model_ptr: *const LibsvmModel) -> Vec<Vec<f64>> {
let nr_class = (*model_ptr).nr_class as usize;
let l = (*model_ptr).l as usize;
slice::from_raw_parts((*model_ptr).sv_coef, nr_class - 1)
.iter()
.map(|&x| slice::from_raw_parts(x, l).to_owned())
.collect::<Vec<_>>()
}
}
extern "C" {
fn svm_train(prob: *const LibsvmProblem, param: *const LibsvmParameter) -> *const LibsvmModel;
fn svm_predict_values(
svm_model: *mut LibsvmModel,
svm_nodes: *const LibsvmNode,
out: *const f64,
) -> f64;
fn svm_free_and_destroy_model(svm_model: *const *const LibsvmModel);
fn svm_check_parameter(
problem: *const LibsvmProblem,
param: *const LibsvmParameter,
) -> *const c_char;
}
fn check(problem: *const LibsvmProblem, param: *const LibsvmParameter) -> Result<(), String> {
unsafe {
let message = svm_check_parameter(problem, param);
if message.is_null() {
Ok(())
} else {
Err(CStr::from_ptr(message).to_str().unwrap().to_owned())
}
}
}
/// Fit a `libsvm` model.
pub fn fit<'a, T>(X: &'a T, y: &Array, parameters: &SvmParameter) -> Result<SvmModel, &'static str>
where
T: IndexableMatrix,
&'a T: RowIterable,
{
let problem = SvmProblem::new(X, y);
let libsvm_problem = problem.build_problem();
let libsvm_param = parameters.build_libsvm_parameter();
let model_ptr = unsafe {
match check(
&libsvm_problem as *const LibsvmProblem,
&libsvm_param as *const LibsvmParameter,
) {
Ok(_) => {}
Err(error_str) => {
// A bit of a horrible out-of-band error reporting,
// we should switch the model traits to String errors
println!("Libsvm check error: {}", error_str);
return Err("Invalid libsvm parameters.");
}
};
svm_train(
&libsvm_problem as *const LibsvmProblem,
&libsvm_param as *const LibsvmParameter,
)
};
let model = SvmModel::new(parameters.clone(), model_ptr);
unsafe {
// Free the model data allocated by libsvm,
// we've got our own, sane copy.
svm_free_and_destroy_model(&model_ptr);
}
Ok(model)
}
/// Call `libsvm` to get predictions (both predicted classes
/// and `OvO` decision function values.
pub fn predict<'a, T>(model: &SvmModel, X: &'a T) -> (Array, Array)
where
T: IndexableMatrix,
&'a T: RowIterable,
{
let x_rows = X.rows();
let num_classes = model.nr_class as usize;
let ovo_num_classes = num_classes * (num_classes - 1) / 2;
// We are actually mutating this in C, but convincing rustc that is
// safe is a bit of a pain
let df = vec![0.0; x_rows * ovo_num_classes];
let mut df_slice = &df[..];
let mut predicted_class = Vec::with_capacity(x_rows);
// Allocate space for pointers to support vector components,
// we don't need them after we're finished here
// so they will be freed.
let mut sv_ptrs = Vec::new();
let mut sv_coef_ptrs = Vec::new();
let mut libsvm_model = model.get_libsvm_model(&mut sv_ptrs, &mut sv_coef_ptrs);
for (_, row) in X.iter_rows().enumerate() {
let nodes = row_to_nodes(row);
unsafe {
predicted_class.push(svm_predict_values(
&mut libsvm_model as *mut LibsvmModel,
nodes.as_ptr(),
df_slice.as_ptr(),
) as f32);
}
df_slice = &df_slice[ovo_num_classes..];
}
let df_data = df.iter().map(|&x| x as f32).collect::<Vec<_>>();
let mut df_array = Array::from(df_data);
df_array.reshape(x_rows, ovo_num_classes);
(df_array, Array::from(predicted_class))
}
| {
let mut nodes = Vec::with_capacity(X.rows());
for row in X.iter_rows() {
let row_nodes = row_to_nodes(row);
nodes.push(row_nodes)
}
let node_ptrs = nodes.iter().map(|x| x.as_ptr()).collect::<Vec<_>>();
SvmProblem {
nodes: nodes,
node_ptrs: node_ptrs,
y: y.data().iter().map(|&x| x as f64).collect::<Vec<_>>(),
}
} | identifier_body |
auxiliary_plots.py | """This module contains auxiliary functions for plotting which are used in the main notebook."""
import numpy as np
import pandas as pd
import pandas.io.formats.style
import seaborn as sns
import statsmodels as sm
import statsmodels.formula.api as smf
import statsmodels.api as sm_api
import matplotlib.pyplot as plt
import geopandas
from IPython.display import HTML
def worldplot(data):
""" Function to plot a custom colored worldmap. Therefore we have to input a dataframe that contains the column on which
conditionally we want o color the worldmap
Args:
-------
data = pd.dataframe wich contains column of interes
Returns:
---------
plot
"""
plt.rcParams['font.size'] = 18
world_df= geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'));
world_df = world_df[world_df["iso_a3"].isin(data["recipient_iso3"])];
#world_2df.["OFa_all_con"] = np.nan;
#world_2df.sort_values(by="iso_a3").head()
for i in world_df.index:
for j in data.index:
if world_df.loc[i,"iso_a3"] == data.loc[j,"recipient_iso3"]:
world_df.loc[i,"OFa_all_con"] = data.loc[j, "OFa_all_con"];
fig, ax = plt.subplots(1,1, figsize=(22,14))
ax.axis('off')
fig.suptitle('Chinese Development Finance', fontsize=25)
world_df.plot(column='OFa_all_con', ax = ax, legend=True, legend_kwds={"label":"\n Chinese Development Finance in $10 bln.",
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
###
def worldplot_2(data, cc, pc):
""" Function to plot a custom colored worldmap with help of a standart GeoPandas dataframe. I used the iso3 number of the countries
in order to clearly identify the countries and assign the choosen value (financial amount or project count) to the
specific country
For plotting, we have to input a dataframe that contains the iso3 code of countries (cc). Furthermore you need
to specify the column of the input data that you want to display on the worldmap (pc)
Args:
-------
data = pd.dataframe wich contains column of interest
cc = columnnumber of country of input df
pc = the columnnumber of the column that we want to plot
Returns:
---------
The return is a formated plot
"""
# define the columns of input
# cc = data.columns[checkcol]
#pc = data.columns[plotcol]
plt.rcParams['font.size'] = 18
# generate standart geopandas dataframe
world_df = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'));
#check indicies of the input dataframe and modify standart geopandas df
world_df = world_df[world_df["iso_a3"].isin(data[cc])];
#world_2df.["OFa_all_con"] = np.nan;
#world_2df.sort_values(by="iso_a3").head()
for i in world_df.index:
for j in data.index:
if world_df.loc[i,"iso_a3"] == data.loc[j, cc]:
try:
world_df.loc[i,pc] = data.loc[j, pc];
except:
print("\nError! Invalid Input. Example for input: OFa_all_con")
return
fig, ax = plt.subplots(1,1, figsize=(22,12))
ax.axis('off')
if pc == "OFa_all_con":
fig.suptitle('Chinese Development Finance (financial amount)', fontsize=25)
world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={"label":"\n Chinese Development Finance in $10 bln (2000-2014)",
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
else:
fig.suptitle('Chinese Development Finance (probability)', fontsize=25)
world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={"label":"\n Probability of receiving Chinese Development Finance (2000-2014)",###ADDDDJUST!!!!!
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
###
def flow_class_plot(data):
sns.set_theme(style="whitegrid")
fig, ax = plt.subplots(1,2, figsize = (14,6))
plt.subplots_adjust(wspace=0.5)
plotting = data.flow_class.value_counts(1)
plt.subplot(121)
ax = sns.barplot(x=plotting.index, y=plotting.values)
ax.set_ylabel("share")
ax.set_xlabel("Project type")
ax.set_title("Share of project type");
plotting2 = data.groupby("flow_class").usd_defl.sum()
plt.subplot(122)
ax = sns.barplot(x=plotting2.index, y=(plotting2.values/1e6))
ax.set_ylabel("Amount in million USD")
ax.set_xlabel("Project type")
ax.set_title("Financial amount per project type");
plt.plot()
df = pd.DataFrame([["ODA", plotting[0], round(plotting2[0]/1e6,2)],
["OOF", plotting[1], round(plotting2[1]/1e6,2)],
["Vague",plotting[2], round(plotting2[2]/1e6,2)]], columns = ["flow_class", "Share", "Amount in mio 2010 USD"])
#print(f"Number of projects:\nODA-like:{plotting[0]*100}%, \nOOF-like:{plotting[1]*100}%, \nVague OF:{plotting[2]*100}%")
# print(f"ODA-like:{plotting2[0]/1e6:.2f}, \nOOF-like:{plotting2[1]/1e6:.2f}, \nVague OF:{plotting2[2]/1e6:.2f}")
#print((plotting2[0]/1e6)/(plotting2.values.sum()/1e6))
return df
###
def year_plot(data):
sns.set_theme(style="whitegrid")
year = np.unique(data.year)
total_projects_year = data.year.value_counts().sort_index()
ax = sns.barplot(x=year, y=total_projects_year, color= "blue")
ax.set_ylabel("number of projects")
ax.set_xticklabels(["'00", "'01", "'02","'03","'04","'05","'06","'07","'08","'09","'10","'11","'12","'13","'14"])
ax.set_title("number of projects per year");
###
def sectoral_plot(data):
sns.set_theme(style="whitegrid")
sectoral_analysis_df = data.crs_sector_name.value_counts(1).sort_index().to_frame("project_share")
sectoral_analysis_df["in_USD"] = data.groupby("crs_sector_name").usd_defl.sum()
sectoral_analysis_df = sectoral_analysis_df.sort_values(by="in_USD", ascending=False)
# plotting
f, axs = plt.subplots(2,1,figsize=(14,18))
plt.subplot(211)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.in_USD, color = "darkblue")
ax.set_title("Value per sector");
plt.subplot(212)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.project_share, color = "lightblue")
ax.set_title("Sare of projects per sector");
# Share of health, education and governance
share_HEG = ((sectoral_analysis_df.loc["Health", "in_USD"] + sectoral_analysis_df.loc["Education", "in_USD"]
+ sectoral_analysis_df.loc["Government and Civil Society", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
# Share of energy, transport, industry
share_ETI = ((sectoral_analysis_df.loc["Energy Generation and Supply", "in_USD"]
+ sectoral_analysis_df.loc["Industry, Mining, Construction", "in_USD"]
+ sectoral_analysis_df.loc["Transport and Storage", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
print(f"All projects of the health-, education, and governance sector account for {share_HEG*100:.2f}% of the total financial value,\nwhereas the energy-, transportation, and industry/mining sector accounts for {share_ETI*100:.2f}%")
###
def financeform_plot(data):
|
###
def quali_descriptive_plots(data, liste):
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(3, 2, figsize=(14, 16))
# Use the axes for plotting
axes[0,0].set_title(liste[0])
sns.violinplot(x=liste[0], y="OFn_all", data=data, ax=axes[0,0], inner = "quartiles");
axes[0,1].set_title(liste[0])
ax = sns.violinplot(x=liste[0], y="OFa_all_con", data=data, ax=axes[0,1], inner = "quartiles")
axes[1,0].set_title(liste[1])
sns.violinplot(x=liste[1], y="OFn_all", data=data,ax=axes[1,0], inner = "quartiles");
axes[1,1].set_title(liste[1])
ax = sns.violinplot(x=liste[1], y="OFa_all_con", data=data,ax=axes[1,1], inner = "quartiles");
axes[2,0].set_title(liste[2])
sns.violinplot(x=liste[2], y="OFn_all", data=data,ax=axes[2,0], inner = "quartiles");
axes[2,1].set_title(liste[2])
ax = sns.violinplot(x=liste[2], y="OFa_all_con", data=data,ax=axes[2,1], inner = "quartiles");
plt.tight_layout(pad=2.5);
###
def quanti_descriptive_plots(data, liste, hue):
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(4, 2, figsize=(14, 16))
# Use the axes for plotting
axes[0,0].set_title(liste[0])
sns.scatterplot(x=liste[0], y="OFn_all", data=data, ax=axes[0,0], hue = hue)
axes[0,1].set_title(liste[0])
ax = sns.scatterplot(x=liste[0], y="OFa_all_con", data=data, ax=axes[0,1], hue = hue)
axes[1,0].set_title(liste[1])
sns.scatterplot(x=liste[1], y="OFn_all", data=data,ax=axes[1,0], hue = hue);
axes[1,1].set_title(liste[1])
ax = sns.scatterplot(x=liste[1], y="OFa_all_con", data=data,ax=axes[1,1], hue = hue);
axes[2,0].set_title(liste[2])
sns.scatterplot(x=liste[2], y="OFn_all", data=data,ax=axes[2,0], hue = hue);
axes[2,1].set_title(liste[2])
ax = sns.scatterplot(x=liste[2], y="OFa_all_con", data=data,ax=axes[2,1], hue = hue);
axes[3,0].set_title(liste[3])
sns.scatterplot(x=liste[3], y="OFn_all", data=data,ax=axes[3,0], hue = hue);
axes[3,1].set_title(liste[3])
ax = sns.scatterplot(x=liste[3], y="OFa_all_con", data=data,ax=axes[3,1], hue = hue);
plt.tight_layout(pad=2.5);
###
def plot_parallel_trends(results_df, data4):
# Since I code the x ticks by hard, I get a warning that I will supress here
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
input_data = pd.read_stata("data/test_instruments2.dta")
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(2,2, figsize=(15, 15))
plt.subplot(221)
fig.suptitle("Parallel trends assumption", fontsize = 20)
#fig.suptitle("Parallel trends assumption: Material and Reserves", fontsize = 20)
for plots in ["ln_steel", "ln_iron", "ln_timber", "ln_glass", "ln_aluminum"]:
ax = sns.lineplot(x = "year", y = plots, data = input_data, label = plots)
ax.set_ylabel("(log) production volume of respective input)")
ax.set_title("Panel A: Chinas (logged) production of input materials over time");
plt.legend(fontsize = 15)
plt.subplot(222)
ax3 = sns.lineplot(x="year",y= "lower_probGrowthpc", data = results_df, label = "below median prob", )
ax3 = sns.lineplot(x ="year",y= "upper_probGrowthpc", data = results_df, label = "above median prob")
ax3.set_xticklabels(["2002","2003","2004","2005","2006","2007","2008","2009","2010","2011","2012","2013","2014"]);
ax3.set_ylabel("Average growth p.c.")
ax3.set_title("Panel B: Average Growth along countries within groups")
plt.legend(fontsize=15)
plt.subplot(223)
ax = sns.lineplot(x= "year", y= "lower_probOFn_ln", data = results_df, label = "below median OFn")
ax = sns.lineplot(x= "year", y= "upper_probOFn_ln", data = results_df, label = "above median OFn")
ax = sns.lineplot(x="year", y="lower_probOFa_ln", data = results_df, label = "below median OFa")
ax = sns.lineplot(x="year", y="upper_probOFa_ln", data = results_df, label = "above median OFa")
ax.set_ylabel("(log) count and financial amount of projects in t-2")
ax.set_xticklabels(["2002","2003","2004","2005","2006","2007","2008","2009","2010","2011","2012","2013","2014"]);
ax.set_title("Panel C: Chinese (logged and lagged) project count and financial amounts for groups ")
plt.legend(fontsize = 15)
data_mod = data4[(data4.year >= "2002-01-01") & (data4.countryname == "Angola")]
plt.subplot(224)
ax2 = sns.lineplot(x = "year", y = "l3Reserves", data = data_mod, label = "Reserves (t-3)")
ax2.set_ylabel("Change in foreign exchange reserves")
ax2.set_title("Panel D: Chinas change in net foreign exchange reserves in trillion 2010 USD")
plt.legend(fontsize=15)
plt.tight_layout(pad=2.5);
| sns.set_theme(style="whitegrid")
f, axs = plt.subplots(2,1, figsize = (12,10))
plt.subplots_adjust(wspace=0.25)
plt.subplot(211)
ODA_like = data[data.flow_class == "ODA-like"].flow.value_counts()
ax = sns.barplot(y=ODA_like.index, x=ODA_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of ODA-like projects");
plt.subplot(212)
OOFv_like = data[data.flow_class == ("OOF-like" or "Vague (Official Finance)")].flow.value_counts()
ax = sns.barplot(y=OOFv_like.index, x=OOFv_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of OOFV-like projects");
plt.tight_layout(pad=2.5); | identifier_body |
auxiliary_plots.py | """This module contains auxiliary functions for plotting which are used in the main notebook."""
import numpy as np
import pandas as pd
import pandas.io.formats.style
import seaborn as sns
import statsmodels as sm
import statsmodels.formula.api as smf
import statsmodels.api as sm_api
import matplotlib.pyplot as plt
import geopandas
from IPython.display import HTML
def worldplot(data):
""" Function to plot a custom colored worldmap. Therefore we have to input a dataframe that contains the column on which
conditionally we want o color the worldmap
Args:
-------
data = pd.dataframe wich contains column of interes
Returns:
---------
plot
"""
plt.rcParams['font.size'] = 18
world_df= geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'));
world_df = world_df[world_df["iso_a3"].isin(data["recipient_iso3"])];
#world_2df.["OFa_all_con"] = np.nan;
#world_2df.sort_values(by="iso_a3").head()
for i in world_df.index:
for j in data.index:
if world_df.loc[i,"iso_a3"] == data.loc[j,"recipient_iso3"]:
world_df.loc[i,"OFa_all_con"] = data.loc[j, "OFa_all_con"];
fig, ax = plt.subplots(1,1, figsize=(22,14))
ax.axis('off')
fig.suptitle('Chinese Development Finance', fontsize=25)
world_df.plot(column='OFa_all_con', ax = ax, legend=True, legend_kwds={"label":"\n Chinese Development Finance in $10 bln.",
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
###
def worldplot_2(data, cc, pc):
""" Function to plot a custom colored worldmap with help of a standart GeoPandas dataframe. I used the iso3 number of the countries
in order to clearly identify the countries and assign the choosen value (financial amount or project count) to the
specific country
For plotting, we have to input a dataframe that contains the iso3 code of countries (cc). Furthermore you need
to specify the column of the input data that you want to display on the worldmap (pc)
Args:
-------
data = pd.dataframe wich contains column of interest
cc = columnnumber of country of input df
pc = the columnnumber of the column that we want to plot
Returns:
---------
The return is a formated plot
"""
# define the columns of input
# cc = data.columns[checkcol]
#pc = data.columns[plotcol]
plt.rcParams['font.size'] = 18
# generate standart geopandas dataframe
world_df = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'));
#check indicies of the input dataframe and modify standart geopandas df
world_df = world_df[world_df["iso_a3"].isin(data[cc])];
#world_2df.["OFa_all_con"] = np.nan;
#world_2df.sort_values(by="iso_a3").head()
for i in world_df.index:
for j in data.index:
if world_df.loc[i,"iso_a3"] == data.loc[j, cc]:
try:
world_df.loc[i,pc] = data.loc[j, pc];
except:
print("\nError! Invalid Input. Example for input: OFa_all_con")
return
fig, ax = plt.subplots(1,1, figsize=(22,12))
ax.axis('off')
if pc == "OFa_all_con":
fig.suptitle('Chinese Development Finance (financial amount)', fontsize=25)
world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={"label":"\n Chinese Development Finance in $10 bln (2000-2014)",
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
else:
|
###
def flow_class_plot(data):
sns.set_theme(style="whitegrid")
fig, ax = plt.subplots(1,2, figsize = (14,6))
plt.subplots_adjust(wspace=0.5)
plotting = data.flow_class.value_counts(1)
plt.subplot(121)
ax = sns.barplot(x=plotting.index, y=plotting.values)
ax.set_ylabel("share")
ax.set_xlabel("Project type")
ax.set_title("Share of project type");
plotting2 = data.groupby("flow_class").usd_defl.sum()
plt.subplot(122)
ax = sns.barplot(x=plotting2.index, y=(plotting2.values/1e6))
ax.set_ylabel("Amount in million USD")
ax.set_xlabel("Project type")
ax.set_title("Financial amount per project type");
plt.plot()
df = pd.DataFrame([["ODA", plotting[0], round(plotting2[0]/1e6,2)],
["OOF", plotting[1], round(plotting2[1]/1e6,2)],
["Vague",plotting[2], round(plotting2[2]/1e6,2)]], columns = ["flow_class", "Share", "Amount in mio 2010 USD"])
#print(f"Number of projects:\nODA-like:{plotting[0]*100}%, \nOOF-like:{plotting[1]*100}%, \nVague OF:{plotting[2]*100}%")
# print(f"ODA-like:{plotting2[0]/1e6:.2f}, \nOOF-like:{plotting2[1]/1e6:.2f}, \nVague OF:{plotting2[2]/1e6:.2f}")
#print((plotting2[0]/1e6)/(plotting2.values.sum()/1e6))
return df
###
def year_plot(data):
sns.set_theme(style="whitegrid")
year = np.unique(data.year)
total_projects_year = data.year.value_counts().sort_index()
ax = sns.barplot(x=year, y=total_projects_year, color= "blue")
ax.set_ylabel("number of projects")
ax.set_xticklabels(["'00", "'01", "'02","'03","'04","'05","'06","'07","'08","'09","'10","'11","'12","'13","'14"])
ax.set_title("number of projects per year");
###
def sectoral_plot(data):
sns.set_theme(style="whitegrid")
sectoral_analysis_df = data.crs_sector_name.value_counts(1).sort_index().to_frame("project_share")
sectoral_analysis_df["in_USD"] = data.groupby("crs_sector_name").usd_defl.sum()
sectoral_analysis_df = sectoral_analysis_df.sort_values(by="in_USD", ascending=False)
# plotting
f, axs = plt.subplots(2,1,figsize=(14,18))
plt.subplot(211)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.in_USD, color = "darkblue")
ax.set_title("Value per sector");
plt.subplot(212)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.project_share, color = "lightblue")
ax.set_title("Sare of projects per sector");
# Share of health, education and governance
share_HEG = ((sectoral_analysis_df.loc["Health", "in_USD"] + sectoral_analysis_df.loc["Education", "in_USD"]
+ sectoral_analysis_df.loc["Government and Civil Society", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
# Share of energy, transport, industry
share_ETI = ((sectoral_analysis_df.loc["Energy Generation and Supply", "in_USD"]
+ sectoral_analysis_df.loc["Industry, Mining, Construction", "in_USD"]
+ sectoral_analysis_df.loc["Transport and Storage", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
print(f"All projects of the health-, education, and governance sector account for {share_HEG*100:.2f}% of the total financial value,\nwhereas the energy-, transportation, and industry/mining sector accounts for {share_ETI*100:.2f}%")
###
def financeform_plot(data):
sns.set_theme(style="whitegrid")
f, axs = plt.subplots(2,1, figsize = (12,10))
plt.subplots_adjust(wspace=0.25)
plt.subplot(211)
ODA_like = data[data.flow_class == "ODA-like"].flow.value_counts()
ax = sns.barplot(y=ODA_like.index, x=ODA_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of ODA-like projects");
plt.subplot(212)
OOFv_like = data[data.flow_class == ("OOF-like" or "Vague (Official Finance)")].flow.value_counts()
ax = sns.barplot(y=OOFv_like.index, x=OOFv_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of OOFV-like projects");
plt.tight_layout(pad=2.5);
###
def quali_descriptive_plots(data, liste):
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(3, 2, figsize=(14, 16))
# Use the axes for plotting
axes[0,0].set_title(liste[0])
sns.violinplot(x=liste[0], y="OFn_all", data=data, ax=axes[0,0], inner = "quartiles");
axes[0,1].set_title(liste[0])
ax = sns.violinplot(x=liste[0], y="OFa_all_con", data=data, ax=axes[0,1], inner = "quartiles")
axes[1,0].set_title(liste[1])
sns.violinplot(x=liste[1], y="OFn_all", data=data,ax=axes[1,0], inner = "quartiles");
axes[1,1].set_title(liste[1])
ax = sns.violinplot(x=liste[1], y="OFa_all_con", data=data,ax=axes[1,1], inner = "quartiles");
axes[2,0].set_title(liste[2])
sns.violinplot(x=liste[2], y="OFn_all", data=data,ax=axes[2,0], inner = "quartiles");
axes[2,1].set_title(liste[2])
ax = sns.violinplot(x=liste[2], y="OFa_all_con", data=data,ax=axes[2,1], inner = "quartiles");
plt.tight_layout(pad=2.5);
###
def quanti_descriptive_plots(data, liste, hue):
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(4, 2, figsize=(14, 16))
# Use the axes for plotting
axes[0,0].set_title(liste[0])
sns.scatterplot(x=liste[0], y="OFn_all", data=data, ax=axes[0,0], hue = hue)
axes[0,1].set_title(liste[0])
ax = sns.scatterplot(x=liste[0], y="OFa_all_con", data=data, ax=axes[0,1], hue = hue)
axes[1,0].set_title(liste[1])
sns.scatterplot(x=liste[1], y="OFn_all", data=data,ax=axes[1,0], hue = hue);
axes[1,1].set_title(liste[1])
ax = sns.scatterplot(x=liste[1], y="OFa_all_con", data=data,ax=axes[1,1], hue = hue);
axes[2,0].set_title(liste[2])
sns.scatterplot(x=liste[2], y="OFn_all", data=data,ax=axes[2,0], hue = hue);
axes[2,1].set_title(liste[2])
ax = sns.scatterplot(x=liste[2], y="OFa_all_con", data=data,ax=axes[2,1], hue = hue);
axes[3,0].set_title(liste[3])
sns.scatterplot(x=liste[3], y="OFn_all", data=data,ax=axes[3,0], hue = hue);
axes[3,1].set_title(liste[3])
ax = sns.scatterplot(x=liste[3], y="OFa_all_con", data=data,ax=axes[3,1], hue = hue);
plt.tight_layout(pad=2.5);
###
def plot_parallel_trends(results_df, data4):
# Since I code the x ticks by hard, I get a warning that I will supress here
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
input_data = pd.read_stata("data/test_instruments2.dta")
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(2,2, figsize=(15, 15))
plt.subplot(221)
fig.suptitle("Parallel trends assumption", fontsize = 20)
#fig.suptitle("Parallel trends assumption: Material and Reserves", fontsize = 20)
for plots in ["ln_steel", "ln_iron", "ln_timber", "ln_glass", "ln_aluminum"]:
ax = sns.lineplot(x = "year", y = plots, data = input_data, label = plots)
ax.set_ylabel("(log) production volume of respective input)")
ax.set_title("Panel A: Chinas (logged) production of input materials over time");
plt.legend(fontsize = 15)
plt.subplot(222)
ax3 = sns.lineplot(x="year",y= "lower_probGrowthpc", data = results_df, label = "below median prob", )
ax3 = sns.lineplot(x ="year",y= "upper_probGrowthpc", data = results_df, label = "above median prob")
ax3.set_xticklabels(["2002","2003","2004","2005","2006","2007","2008","2009","2010","2011","2012","2013","2014"]);
ax3.set_ylabel("Average growth p.c.")
ax3.set_title("Panel B: Average Growth along countries within groups")
plt.legend(fontsize=15)
plt.subplot(223)
ax = sns.lineplot(x= "year", y= "lower_probOFn_ln", data = results_df, label = "below median OFn")
ax = sns.lineplot(x= "year", y= "upper_probOFn_ln", data = results_df, label = "above median OFn")
ax = sns.lineplot(x="year", y="lower_probOFa_ln", data = results_df, label = "below median OFa")
ax = sns.lineplot(x="year", y="upper_probOFa_ln", data = results_df, label = "above median OFa")
ax.set_ylabel("(log) count and financial amount of projects in t-2")
ax.set_xticklabels(["2002","2003","2004","2005","2006","2007","2008","2009","2010","2011","2012","2013","2014"]);
ax.set_title("Panel C: Chinese (logged and lagged) project count and financial amounts for groups ")
plt.legend(fontsize = 15)
data_mod = data4[(data4.year >= "2002-01-01") & (data4.countryname == "Angola")]
plt.subplot(224)
ax2 = sns.lineplot(x = "year", y = "l3Reserves", data = data_mod, label = "Reserves (t-3)")
ax2.set_ylabel("Change in foreign exchange reserves")
ax2.set_title("Panel D: Chinas change in net foreign exchange reserves in trillion 2010 USD")
plt.legend(fontsize=15)
plt.tight_layout(pad=2.5);
| fig.suptitle('Chinese Development Finance (probability)', fontsize=25)
world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={"label":"\n Probability of receiving Chinese Development Finance (2000-2014)",###ADDDDJUST!!!!!
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"}); | conditional_block |
auxiliary_plots.py | """This module contains auxiliary functions for plotting which are used in the main notebook."""
import numpy as np
import pandas as pd
import pandas.io.formats.style
import seaborn as sns
import statsmodels as sm
import statsmodels.formula.api as smf
import statsmodels.api as sm_api
import matplotlib.pyplot as plt
import geopandas
from IPython.display import HTML
def worldplot(data):
""" Function to plot a custom colored worldmap. Therefore we have to input a dataframe that contains the column on which
conditionally we want o color the worldmap
Args:
-------
data = pd.dataframe wich contains column of interes
Returns:
---------
plot
"""
plt.rcParams['font.size'] = 18
world_df= geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'));
world_df = world_df[world_df["iso_a3"].isin(data["recipient_iso3"])];
#world_2df.["OFa_all_con"] = np.nan;
#world_2df.sort_values(by="iso_a3").head()
for i in world_df.index:
for j in data.index:
if world_df.loc[i,"iso_a3"] == data.loc[j,"recipient_iso3"]:
world_df.loc[i,"OFa_all_con"] = data.loc[j, "OFa_all_con"];
fig, ax = plt.subplots(1,1, figsize=(22,14))
ax.axis('off')
fig.suptitle('Chinese Development Finance', fontsize=25)
world_df.plot(column='OFa_all_con', ax = ax, legend=True, legend_kwds={"label":"\n Chinese Development Finance in $10 bln.",
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
###
def worldplot_2(data, cc, pc):
""" Function to plot a custom colored worldmap with help of a standart GeoPandas dataframe. I used the iso3 number of the countries
in order to clearly identify the countries and assign the choosen value (financial amount or project count) to the
specific country
For plotting, we have to input a dataframe that contains the iso3 code of countries (cc). Furthermore you need
to specify the column of the input data that you want to display on the worldmap (pc)
Args:
-------
data = pd.dataframe wich contains column of interest
cc = columnnumber of country of input df
pc = the columnnumber of the column that we want to plot
Returns:
---------
The return is a formated plot
"""
# define the columns of input
# cc = data.columns[checkcol]
#pc = data.columns[plotcol]
plt.rcParams['font.size'] = 18
# generate standart geopandas dataframe
world_df = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'));
#check indicies of the input dataframe and modify standart geopandas df
world_df = world_df[world_df["iso_a3"].isin(data[cc])];
#world_2df.["OFa_all_con"] = np.nan;
#world_2df.sort_values(by="iso_a3").head()
for i in world_df.index:
for j in data.index:
if world_df.loc[i,"iso_a3"] == data.loc[j, cc]:
try:
world_df.loc[i,pc] = data.loc[j, pc];
except:
print("\nError! Invalid Input. Example for input: OFa_all_con")
return
fig, ax = plt.subplots(1,1, figsize=(22,12))
ax.axis('off')
if pc == "OFa_all_con":
fig.suptitle('Chinese Development Finance (financial amount)', fontsize=25)
world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={"label":"\n Chinese Development Finance in $10 bln (2000-2014)",
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
else:
fig.suptitle('Chinese Development Finance (probability)', fontsize=25)
world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={"label":"\n Probability of receiving Chinese Development Finance (2000-2014)",###ADDDDJUST!!!!!
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
###
def flow_class_plot(data):
sns.set_theme(style="whitegrid")
fig, ax = plt.subplots(1,2, figsize = (14,6))
plt.subplots_adjust(wspace=0.5)
plotting = data.flow_class.value_counts(1)
plt.subplot(121)
ax = sns.barplot(x=plotting.index, y=plotting.values)
ax.set_ylabel("share")
ax.set_xlabel("Project type")
ax.set_title("Share of project type");
plotting2 = data.groupby("flow_class").usd_defl.sum()
plt.subplot(122)
ax = sns.barplot(x=plotting2.index, y=(plotting2.values/1e6))
ax.set_ylabel("Amount in million USD")
ax.set_xlabel("Project type")
ax.set_title("Financial amount per project type");
plt.plot()
df = pd.DataFrame([["ODA", plotting[0], round(plotting2[0]/1e6,2)],
["OOF", plotting[1], round(plotting2[1]/1e6,2)],
["Vague",plotting[2], round(plotting2[2]/1e6,2)]], columns = ["flow_class", "Share", "Amount in mio 2010 USD"])
#print(f"Number of projects:\nODA-like:{plotting[0]*100}%, \nOOF-like:{plotting[1]*100}%, \nVague OF:{plotting[2]*100}%")
# print(f"ODA-like:{plotting2[0]/1e6:.2f}, \nOOF-like:{plotting2[1]/1e6:.2f}, \nVague OF:{plotting2[2]/1e6:.2f}")
#print((plotting2[0]/1e6)/(plotting2.values.sum()/1e6))
return df
###
def year_plot(data):
sns.set_theme(style="whitegrid")
year = np.unique(data.year)
total_projects_year = data.year.value_counts().sort_index()
ax = sns.barplot(x=year, y=total_projects_year, color= "blue")
ax.set_ylabel("number of projects")
ax.set_xticklabels(["'00", "'01", "'02","'03","'04","'05","'06","'07","'08","'09","'10","'11","'12","'13","'14"])
ax.set_title("number of projects per year");
###
def sectoral_plot(data):
sns.set_theme(style="whitegrid")
sectoral_analysis_df = data.crs_sector_name.value_counts(1).sort_index().to_frame("project_share")
sectoral_analysis_df["in_USD"] = data.groupby("crs_sector_name").usd_defl.sum()
sectoral_analysis_df = sectoral_analysis_df.sort_values(by="in_USD", ascending=False)
# plotting
f, axs = plt.subplots(2,1,figsize=(14,18))
plt.subplot(211)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.in_USD, color = "darkblue")
ax.set_title("Value per sector");
plt.subplot(212)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.project_share, color = "lightblue")
ax.set_title("Sare of projects per sector");
# Share of health, education and governance
share_HEG = ((sectoral_analysis_df.loc["Health", "in_USD"] + sectoral_analysis_df.loc["Education", "in_USD"]
+ sectoral_analysis_df.loc["Government and Civil Society", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
# Share of energy, transport, industry
share_ETI = ((sectoral_analysis_df.loc["Energy Generation and Supply", "in_USD"]
+ sectoral_analysis_df.loc["Industry, Mining, Construction", "in_USD"]
+ sectoral_analysis_df.loc["Transport and Storage", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
print(f"All projects of the health-, education, and governance sector account for {share_HEG*100:.2f}% of the total financial value,\nwhereas the energy-, transportation, and industry/mining sector accounts for {share_ETI*100:.2f}%")
###
def financeform_plot(data):
sns.set_theme(style="whitegrid")
f, axs = plt.subplots(2,1, figsize = (12,10))
plt.subplots_adjust(wspace=0.25)
plt.subplot(211)
ODA_like = data[data.flow_class == "ODA-like"].flow.value_counts()
ax = sns.barplot(y=ODA_like.index, x=ODA_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of ODA-like projects");
plt.subplot(212)
OOFv_like = data[data.flow_class == ("OOF-like" or "Vague (Official Finance)")].flow.value_counts()
ax = sns.barplot(y=OOFv_like.index, x=OOFv_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of OOFV-like projects");
plt.tight_layout(pad=2.5);
###
def quali_descriptive_plots(data, liste):
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(3, 2, figsize=(14, 16))
# Use the axes for plotting
axes[0,0].set_title(liste[0])
sns.violinplot(x=liste[0], y="OFn_all", data=data, ax=axes[0,0], inner = "quartiles");
axes[0,1].set_title(liste[0])
ax = sns.violinplot(x=liste[0], y="OFa_all_con", data=data, ax=axes[0,1], inner = "quartiles")
axes[1,0].set_title(liste[1])
sns.violinplot(x=liste[1], y="OFn_all", data=data,ax=axes[1,0], inner = "quartiles");
axes[1,1].set_title(liste[1])
ax = sns.violinplot(x=liste[1], y="OFa_all_con", data=data,ax=axes[1,1], inner = "quartiles");
axes[2,0].set_title(liste[2])
sns.violinplot(x=liste[2], y="OFn_all", data=data,ax=axes[2,0], inner = "quartiles");
axes[2,1].set_title(liste[2])
ax = sns.violinplot(x=liste[2], y="OFa_all_con", data=data,ax=axes[2,1], inner = "quartiles");
plt.tight_layout(pad=2.5);
###
def quanti_descriptive_plots(data, liste, hue):
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(4, 2, figsize=(14, 16))
# Use the axes for plotting
axes[0,0].set_title(liste[0])
sns.scatterplot(x=liste[0], y="OFn_all", data=data, ax=axes[0,0], hue = hue)
axes[0,1].set_title(liste[0])
ax = sns.scatterplot(x=liste[0], y="OFa_all_con", data=data, ax=axes[0,1], hue = hue)
axes[1,0].set_title(liste[1])
sns.scatterplot(x=liste[1], y="OFn_all", data=data,ax=axes[1,0], hue = hue);
axes[1,1].set_title(liste[1])
ax = sns.scatterplot(x=liste[1], y="OFa_all_con", data=data,ax=axes[1,1], hue = hue);
axes[2,0].set_title(liste[2])
sns.scatterplot(x=liste[2], y="OFn_all", data=data,ax=axes[2,0], hue = hue);
axes[2,1].set_title(liste[2])
ax = sns.scatterplot(x=liste[2], y="OFa_all_con", data=data,ax=axes[2,1], hue = hue);
axes[3,0].set_title(liste[3])
sns.scatterplot(x=liste[3], y="OFn_all", data=data,ax=axes[3,0], hue = hue);
axes[3,1].set_title(liste[3])
ax = sns.scatterplot(x=liste[3], y="OFa_all_con", data=data,ax=axes[3,1], hue = hue);
plt.tight_layout(pad=2.5);
###
def plot_parallel_trends(results_df, data4):
# Since I code the x ticks by hard, I get a warning that I will supress here
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
input_data = pd.read_stata("data/test_instruments2.dta")
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(2,2, figsize=(15, 15))
plt.subplot(221)
fig.suptitle("Parallel trends assumption", fontsize = 20)
#fig.suptitle("Parallel trends assumption: Material and Reserves", fontsize = 20)
for plots in ["ln_steel", "ln_iron", "ln_timber", "ln_glass", "ln_aluminum"]:
ax = sns.lineplot(x = "year", y = plots, data = input_data, label = plots)
ax.set_ylabel("(log) production volume of respective input)")
ax.set_title("Panel A: Chinas (logged) production of input materials over time");
plt.legend(fontsize = 15)
plt.subplot(222)
ax3 = sns.lineplot(x="year",y= "lower_probGrowthpc", data = results_df, label = "below median prob", )
ax3 = sns.lineplot(x ="year",y= "upper_probGrowthpc", data = results_df, label = "above median prob") | ax3.set_ylabel("Average growth p.c.")
ax3.set_title("Panel B: Average Growth along countries within groups")
plt.legend(fontsize=15)
plt.subplot(223)
ax = sns.lineplot(x= "year", y= "lower_probOFn_ln", data = results_df, label = "below median OFn")
ax = sns.lineplot(x= "year", y= "upper_probOFn_ln", data = results_df, label = "above median OFn")
ax = sns.lineplot(x="year", y="lower_probOFa_ln", data = results_df, label = "below median OFa")
ax = sns.lineplot(x="year", y="upper_probOFa_ln", data = results_df, label = "above median OFa")
ax.set_ylabel("(log) count and financial amount of projects in t-2")
ax.set_xticklabels(["2002","2003","2004","2005","2006","2007","2008","2009","2010","2011","2012","2013","2014"]);
ax.set_title("Panel C: Chinese (logged and lagged) project count and financial amounts for groups ")
plt.legend(fontsize = 15)
data_mod = data4[(data4.year >= "2002-01-01") & (data4.countryname == "Angola")]
plt.subplot(224)
ax2 = sns.lineplot(x = "year", y = "l3Reserves", data = data_mod, label = "Reserves (t-3)")
ax2.set_ylabel("Change in foreign exchange reserves")
ax2.set_title("Panel D: Chinas change in net foreign exchange reserves in trillion 2010 USD")
plt.legend(fontsize=15)
plt.tight_layout(pad=2.5); | ax3.set_xticklabels(["2002","2003","2004","2005","2006","2007","2008","2009","2010","2011","2012","2013","2014"]); | random_line_split |
auxiliary_plots.py | """This module contains auxiliary functions for plotting which are used in the main notebook."""
import numpy as np
import pandas as pd
import pandas.io.formats.style
import seaborn as sns
import statsmodels as sm
import statsmodels.formula.api as smf
import statsmodels.api as sm_api
import matplotlib.pyplot as plt
import geopandas
from IPython.display import HTML
def worldplot(data):
""" Function to plot a custom colored worldmap. Therefore we have to input a dataframe that contains the column on which
conditionally we want o color the worldmap
Args:
-------
data = pd.dataframe wich contains column of interes
Returns:
---------
plot
"""
plt.rcParams['font.size'] = 18
world_df= geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'));
world_df = world_df[world_df["iso_a3"].isin(data["recipient_iso3"])];
#world_2df.["OFa_all_con"] = np.nan;
#world_2df.sort_values(by="iso_a3").head()
for i in world_df.index:
for j in data.index:
if world_df.loc[i,"iso_a3"] == data.loc[j,"recipient_iso3"]:
world_df.loc[i,"OFa_all_con"] = data.loc[j, "OFa_all_con"];
fig, ax = plt.subplots(1,1, figsize=(22,14))
ax.axis('off')
fig.suptitle('Chinese Development Finance', fontsize=25)
world_df.plot(column='OFa_all_con', ax = ax, legend=True, legend_kwds={"label":"\n Chinese Development Finance in $10 bln.",
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
###
def | (data, cc, pc):
""" Function to plot a custom colored worldmap with help of a standart GeoPandas dataframe. I used the iso3 number of the countries
in order to clearly identify the countries and assign the choosen value (financial amount or project count) to the
specific country
For plotting, we have to input a dataframe that contains the iso3 code of countries (cc). Furthermore you need
to specify the column of the input data that you want to display on the worldmap (pc)
Args:
-------
data = pd.dataframe wich contains column of interest
cc = columnnumber of country of input df
pc = the columnnumber of the column that we want to plot
Returns:
---------
The return is a formated plot
"""
# define the columns of input
# cc = data.columns[checkcol]
#pc = data.columns[plotcol]
plt.rcParams['font.size'] = 18
# generate standart geopandas dataframe
world_df = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'));
#check indicies of the input dataframe and modify standart geopandas df
world_df = world_df[world_df["iso_a3"].isin(data[cc])];
#world_2df.["OFa_all_con"] = np.nan;
#world_2df.sort_values(by="iso_a3").head()
for i in world_df.index:
for j in data.index:
if world_df.loc[i,"iso_a3"] == data.loc[j, cc]:
try:
world_df.loc[i,pc] = data.loc[j, pc];
except:
print("\nError! Invalid Input. Example for input: OFa_all_con")
return
fig, ax = plt.subplots(1,1, figsize=(22,12))
ax.axis('off')
if pc == "OFa_all_con":
fig.suptitle('Chinese Development Finance (financial amount)', fontsize=25)
world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={"label":"\n Chinese Development Finance in $10 bln (2000-2014)",
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
else:
fig.suptitle('Chinese Development Finance (probability)', fontsize=25)
world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={"label":"\n Probability of receiving Chinese Development Finance (2000-2014)",###ADDDDJUST!!!!!
"orientation": "horizontal"},
missing_kwds={"color": "lightgrey",
"edgecolor": "red",
"hatch": "///",
"label": "Missing values"});
###
def flow_class_plot(data):
sns.set_theme(style="whitegrid")
fig, ax = plt.subplots(1,2, figsize = (14,6))
plt.subplots_adjust(wspace=0.5)
plotting = data.flow_class.value_counts(1)
plt.subplot(121)
ax = sns.barplot(x=plotting.index, y=plotting.values)
ax.set_ylabel("share")
ax.set_xlabel("Project type")
ax.set_title("Share of project type");
plotting2 = data.groupby("flow_class").usd_defl.sum()
plt.subplot(122)
ax = sns.barplot(x=plotting2.index, y=(plotting2.values/1e6))
ax.set_ylabel("Amount in million USD")
ax.set_xlabel("Project type")
ax.set_title("Financial amount per project type");
plt.plot()
df = pd.DataFrame([["ODA", plotting[0], round(plotting2[0]/1e6,2)],
["OOF", plotting[1], round(plotting2[1]/1e6,2)],
["Vague",plotting[2], round(plotting2[2]/1e6,2)]], columns = ["flow_class", "Share", "Amount in mio 2010 USD"])
#print(f"Number of projects:\nODA-like:{plotting[0]*100}%, \nOOF-like:{plotting[1]*100}%, \nVague OF:{plotting[2]*100}%")
# print(f"ODA-like:{plotting2[0]/1e6:.2f}, \nOOF-like:{plotting2[1]/1e6:.2f}, \nVague OF:{plotting2[2]/1e6:.2f}")
#print((plotting2[0]/1e6)/(plotting2.values.sum()/1e6))
return df
###
def year_plot(data):
sns.set_theme(style="whitegrid")
year = np.unique(data.year)
total_projects_year = data.year.value_counts().sort_index()
ax = sns.barplot(x=year, y=total_projects_year, color= "blue")
ax.set_ylabel("number of projects")
ax.set_xticklabels(["'00", "'01", "'02","'03","'04","'05","'06","'07","'08","'09","'10","'11","'12","'13","'14"])
ax.set_title("number of projects per year");
###
def sectoral_plot(data):
sns.set_theme(style="whitegrid")
sectoral_analysis_df = data.crs_sector_name.value_counts(1).sort_index().to_frame("project_share")
sectoral_analysis_df["in_USD"] = data.groupby("crs_sector_name").usd_defl.sum()
sectoral_analysis_df = sectoral_analysis_df.sort_values(by="in_USD", ascending=False)
# plotting
f, axs = plt.subplots(2,1,figsize=(14,18))
plt.subplot(211)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.in_USD, color = "darkblue")
ax.set_title("Value per sector");
plt.subplot(212)
ax = sns.barplot(y=sectoral_analysis_df.index, x=sectoral_analysis_df.project_share, color = "lightblue")
ax.set_title("Sare of projects per sector");
# Share of health, education and governance
share_HEG = ((sectoral_analysis_df.loc["Health", "in_USD"] + sectoral_analysis_df.loc["Education", "in_USD"]
+ sectoral_analysis_df.loc["Government and Civil Society", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
# Share of energy, transport, industry
share_ETI = ((sectoral_analysis_df.loc["Energy Generation and Supply", "in_USD"]
+ sectoral_analysis_df.loc["Industry, Mining, Construction", "in_USD"]
+ sectoral_analysis_df.loc["Transport and Storage", "in_USD"]) / sectoral_analysis_df["in_USD"].sum())
print(f"All projects of the health-, education, and governance sector account for {share_HEG*100:.2f}% of the total financial value,\nwhereas the energy-, transportation, and industry/mining sector accounts for {share_ETI*100:.2f}%")
###
def financeform_plot(data):
sns.set_theme(style="whitegrid")
f, axs = plt.subplots(2,1, figsize = (12,10))
plt.subplots_adjust(wspace=0.25)
plt.subplot(211)
ODA_like = data[data.flow_class == "ODA-like"].flow.value_counts()
ax = sns.barplot(y=ODA_like.index, x=ODA_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of ODA-like projects");
plt.subplot(212)
OOFv_like = data[data.flow_class == ("OOF-like" or "Vague (Official Finance)")].flow.value_counts()
ax = sns.barplot(y=OOFv_like.index, x=OOFv_like.values)
ax.set_xlabel("Number of projects")
ax.set_title("Financeform of OOFV-like projects");
plt.tight_layout(pad=2.5);
###
def quali_descriptive_plots(data, liste):
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(3, 2, figsize=(14, 16))
# Use the axes for plotting
axes[0,0].set_title(liste[0])
sns.violinplot(x=liste[0], y="OFn_all", data=data, ax=axes[0,0], inner = "quartiles");
axes[0,1].set_title(liste[0])
ax = sns.violinplot(x=liste[0], y="OFa_all_con", data=data, ax=axes[0,1], inner = "quartiles")
axes[1,0].set_title(liste[1])
sns.violinplot(x=liste[1], y="OFn_all", data=data,ax=axes[1,0], inner = "quartiles");
axes[1,1].set_title(liste[1])
ax = sns.violinplot(x=liste[1], y="OFa_all_con", data=data,ax=axes[1,1], inner = "quartiles");
axes[2,0].set_title(liste[2])
sns.violinplot(x=liste[2], y="OFn_all", data=data,ax=axes[2,0], inner = "quartiles");
axes[2,1].set_title(liste[2])
ax = sns.violinplot(x=liste[2], y="OFa_all_con", data=data,ax=axes[2,1], inner = "quartiles");
plt.tight_layout(pad=2.5);
###
def quanti_descriptive_plots(data, liste, hue):
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(4, 2, figsize=(14, 16))
# Use the axes for plotting
axes[0,0].set_title(liste[0])
sns.scatterplot(x=liste[0], y="OFn_all", data=data, ax=axes[0,0], hue = hue)
axes[0,1].set_title(liste[0])
ax = sns.scatterplot(x=liste[0], y="OFa_all_con", data=data, ax=axes[0,1], hue = hue)
axes[1,0].set_title(liste[1])
sns.scatterplot(x=liste[1], y="OFn_all", data=data,ax=axes[1,0], hue = hue);
axes[1,1].set_title(liste[1])
ax = sns.scatterplot(x=liste[1], y="OFa_all_con", data=data,ax=axes[1,1], hue = hue);
axes[2,0].set_title(liste[2])
sns.scatterplot(x=liste[2], y="OFn_all", data=data,ax=axes[2,0], hue = hue);
axes[2,1].set_title(liste[2])
ax = sns.scatterplot(x=liste[2], y="OFa_all_con", data=data,ax=axes[2,1], hue = hue);
axes[3,0].set_title(liste[3])
sns.scatterplot(x=liste[3], y="OFn_all", data=data,ax=axes[3,0], hue = hue);
axes[3,1].set_title(liste[3])
ax = sns.scatterplot(x=liste[3], y="OFa_all_con", data=data,ax=axes[3,1], hue = hue);
plt.tight_layout(pad=2.5);
###
def plot_parallel_trends(results_df, data4):
# Since I code the x ticks by hard, I get a warning that I will supress here
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
input_data = pd.read_stata("data/test_instruments2.dta")
sns.set_theme(style="whitegrid")
fig, axes = plt.subplots(2,2, figsize=(15, 15))
plt.subplot(221)
fig.suptitle("Parallel trends assumption", fontsize = 20)
#fig.suptitle("Parallel trends assumption: Material and Reserves", fontsize = 20)
for plots in ["ln_steel", "ln_iron", "ln_timber", "ln_glass", "ln_aluminum"]:
ax = sns.lineplot(x = "year", y = plots, data = input_data, label = plots)
ax.set_ylabel("(log) production volume of respective input)")
ax.set_title("Panel A: Chinas (logged) production of input materials over time");
plt.legend(fontsize = 15)
plt.subplot(222)
ax3 = sns.lineplot(x="year",y= "lower_probGrowthpc", data = results_df, label = "below median prob", )
ax3 = sns.lineplot(x ="year",y= "upper_probGrowthpc", data = results_df, label = "above median prob")
ax3.set_xticklabels(["2002","2003","2004","2005","2006","2007","2008","2009","2010","2011","2012","2013","2014"]);
ax3.set_ylabel("Average growth p.c.")
ax3.set_title("Panel B: Average Growth along countries within groups")
plt.legend(fontsize=15)
plt.subplot(223)
ax = sns.lineplot(x= "year", y= "lower_probOFn_ln", data = results_df, label = "below median OFn")
ax = sns.lineplot(x= "year", y= "upper_probOFn_ln", data = results_df, label = "above median OFn")
ax = sns.lineplot(x="year", y="lower_probOFa_ln", data = results_df, label = "below median OFa")
ax = sns.lineplot(x="year", y="upper_probOFa_ln", data = results_df, label = "above median OFa")
ax.set_ylabel("(log) count and financial amount of projects in t-2")
ax.set_xticklabels(["2002","2003","2004","2005","2006","2007","2008","2009","2010","2011","2012","2013","2014"]);
ax.set_title("Panel C: Chinese (logged and lagged) project count and financial amounts for groups ")
plt.legend(fontsize = 15)
data_mod = data4[(data4.year >= "2002-01-01") & (data4.countryname == "Angola")]
plt.subplot(224)
ax2 = sns.lineplot(x = "year", y = "l3Reserves", data = data_mod, label = "Reserves (t-3)")
ax2.set_ylabel("Change in foreign exchange reserves")
ax2.set_title("Panel D: Chinas change in net foreign exchange reserves in trillion 2010 USD")
plt.legend(fontsize=15)
plt.tight_layout(pad=2.5);
| worldplot_2 | identifier_name |
binge-watch.mock.ts | import { Movie } from "../models/movie";
import { TvSeries } from "../models/tvSeries";
export const Genres = [
"Action",
"Animation",
"Comedy",
"Crime",
"Drama",
"Experimental",
"Fantasy",
"Historical",
"Horror",
"Romance",
"Science Fiction",
"Thriller",
"Western",
"Other"
];
export const MOVIE_LIST: Movie[] = [
{
name: "Hotel Terminus: The Life and Times of Klaus Barbie",
duration: 120,
genres: ["romantic", "action"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"A documentary about Klaus Barbie, the Gestapo chief of Lyon, and his life after the war.",
image:
"http://snagfilms-a.akamaihd.net/a2/f5/ba7c86794ad38255d4879e47109a/hotelterminus-214x317.jpg?impolicy=resize&w=300&h=444"
},
{
name: "Alive Inside",
duration: 78,
genres: ["romantic"],
releaseYear: "1998",
director: "Marcel Ophüls",
description:
"Dan Cohen, founder of the nonprofit organization Music & Memory, fights against a broken healthcare system to demonstrate music's ability to combat memory loss and restore a deep sense of self to those suffering from it.",
image:
"http://snagfilms-a.akamaihd.net/7e/97/ae2ac5fe459b848775ecf047e86b/aliveinside-214x317.jpg?impolicy=resize&w=300&h=444"
},
{
name: "Bohemian Rapsody",
duration: 120,
genres: ["romantic"],
releaseYear: "1989",
director: "Marcel Ophüls",
description:
"The story of the legendary British rock band Queen and lead singer Freddie Mercury, leading up to their famous performance at Live Aid (1985).",
image:
"https://m.media-amazon.com/images/M/MV5BMTA2NDc3Njg5NDVeQTJeQWpwZ15BbWU4MDc1NDcxNTUz._V1_SY1000_CR0,0,674,1000_AL_.jpg"
},
{
name: "Once Upon a Time ... in Hollywood",
duration: 161,
genres: ["romantic"],
releaseYear: "1980",
director: "Marcel Ophüls",
description:
"A faded television actor and his stunt double strive to achieve fame and success in the film industry during the final years of Hollywood's Golden Age in 1969 Los Angeles.",
image:
"https://m.media-amazon.com/images/M/MV5BOTg4ZTNkZmUtMzNlZi00YmFjLTk1MmUtNWQwNTM0YjcyNTNkXkEyXkFqcGdeQXVyNjg2NjQwMDQ@._V1_SY1000_CR0,0,674,1000_AL_.jpg"
},
{
name: "Inception",
duration: 120,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"A thief who steals corporate secrets through the use of dream-sharing technology is given the inverse task of planting an idea into the mind of a C.E.O.",
image:
"https://m.media-amazon.com/images/M/MV5BMjAxMzY3NjcxNF5BMl5BanBnXkFtZTcwNTI5OTM0Mw@@._V1_SY1000_CR0,0,675,1000_AL_.jpg"
},
{
name: "The Great Gatsby",
duration: 143,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"A writer and wall street trader, Nick, finds himself drawn to the past and lifestyle of his millionaire neighbor, Jay Gatsby.",
image:
"https://m.media-amazon.com/images/M/MV5BMTkxNTk1ODcxNl5BMl5BanBnXkFtZTcwMDI1OTMzOQ@@._V1_SY1000_SX666_AL_.jpg"
},
{
name: "Shakespeare in Love",
duration: 123,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"A young Shakespeare, out of ideas and short of cash, meets his ideal woman and is inspired to write one of his most famous plays.",
image:
"https://m.media-amazon.com/images/M/MV5BM2ZkNjM5MjEtNTBlMC00OTI5LTgyYmEtZDljMzNmNzhiNzY0XkEyXkFqcGdeQXVyNDYyMDk5MTU@._V1_SY1000_CR0,0,703,1000_AL_.jpg"
},
{
name: "The Hobbit: An Unexpected Journey",
duration: 169,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"A reluctant Hobbit, Bilbo Baggins, sets out to the Lonely Mountain with a spirited group of dwarves to reclaim their mountain home, and the gold within it from the dragon Smaug.",
image:
"https://m.media-amazon.com/images/M/MV5BMTcwNTE4MTUxMl5BMl5BanBnXkFtZTcwMDIyODM4OA@@._V1_SY1000_CR0,0,674,1000_AL_.jpg"
},
{
name: "Fantastic Beasts and Where to Find Them",
duration: 132,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"The adventures of writer Newt Scamander in New York's secret community of witches and wizards seventy years before Harry Potter reads his book in school.",
image:
"https://m.media-amazon.com/images/M/MV5BMjMxOTM1OTI4MV5BMl5BanBnXkFtZTgwODE5OTYxMDI@._V1_SY1000_CR0,0,674,1000_AL_.jpg"
},
{
name: "Harry Potter and the Deathly Hallows: Part 1",
duration: 146,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"As Harry (Daniel Radcliffe), Ron (Rupert Grint), and Hermione (Emma Watson) race against time and evil to destroy the Horcruxes, they uncover the existence of the three most powerful objects in the wizarding world: the Deathly Hallows.",
image:
"https://m.media-amazon.com/images/M/MV5BMTQ2OTE1Mjk0N15BMl5BanBnXkFtZTcwODE3MDAwNA@@._V1_SY1000_CR0,0,675,1000_AL_.jpg"
},
{
name: "The Chronicles of Narnia: The Lion, the Witch and the Wardrobe",
duration: 143,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"Four kids travel through a wardrobe to the land of Narnia and learn of their destiny to free it with the guidance of a mystical lion.",
image:
"https://m.media-amazon.com/images/M/MV5BMTc0NTUwMTU5OV5BMl5BanBnXkFtZTcwNjAwNzQzMw@@._V1_SY1000_CR0,0,676,1000_AL_.jpg"
},
{
name: "Casablanca",
duration: 102,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"A cynical American expatriate struggles to decide whether or not he should help his former lover and her fugitive husband escape French Morocco.",
image:
"https://m.media-amazon.com/images/M/MV5BY2IzZGY2YmEtYzljNS00NTM5LTgwMzUtMzM1NjQ4NGI0OTk0XkEyXkFqcGdeQXVyNDYyMDk5MTU@._V1_.jpg"
},
{
name: "Life Is Beautiful",
duration: 116,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"When an open-minded Jewish librarian and his son become victims of the Holocaust, he uses a perfect mixture of will, humor, and imagination to protect his son from the dangers around their camp.",
image:
"https://m.media-amazon.com/images/M/MV5BYmJmM2Q4NmMtYThmNC00ZjRlLWEyZmItZTIwOTBlZDQ3NTQ1XkEyXkFqcGdeQXVyMTQxNzMzNDI@._V1_SY1000_SX670_AL_.jpg"
},
{
name: "The Breakfast Club",
duration: 97,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"Five high school students meet in Saturday detention and discover how they have a lot more in common than they thought.",
image:
"https://m.media-amazon.com/images/M/MV5BOTM5N2ZmZTMtNjlmOS00YzlkLTk3YjEtNTU1ZmQ5OTdhODZhXkEyXkFqcGdeQXVyMTQxNzMzNDI@._V1_SY1000_CR0,0,639,1000_AL_.jpg"
},
{
name: "Green Book",
duration: 130,
genres: ["romantic"],
releaseYear: "1988",
director: "Marcel Ophüls",
description:
"A working-class Italian-American bouncer becomes the driver of an African-American classical pianist on a tour of venues through the 1960s American South.",
image:
"https://m.media-amazon.com/images/M/MV5BYzIzYmJlYTYtNGNiYy00N2EwLTk4ZjItMGYyZTJiOTVkM2RlXkEyXkFqcGdeQXVyODY1NDk1NjE@._V1_SY1000_CR0,0,666,1000_AL_.jpg"
}
];
export const TV_SERIES: TvSeries[] = [
{
id: 1,
name: "Friends",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 10,
noSeasons: 10,
director: "Marcel Ophüls",
description:
"Follows the personal and professional lives of six twenty to thirty-something-year-old friends living in Manhattan.",
image:
"https://m.media-amazon.com/images/M/MV5BNDVkYjU0MzctMWRmZi00NTkxLTgwZWEtOWVhYjZlYjllYmU4XkEyXkFqcGdeQXVyNTA4NzY1MzY@._V1_.jpg"
},
{
id: 2,
name: "The Big Bang Theory",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 24,
noSeasons: 12,
director: "Marcel Ophüls",
description:
"A woman who moves into an apartment across the hall from two brilliant but socially awkward physicists shows them how little they know about life outside of the laboratory.",
image:
"https://m.media-amazon.com/images/M/MV5BY2FmZTY5YTktOWRlYy00NmIyLWE0ZmQtZDg2YjlmMzczZDZiXkEyXkFqcGdeQXVyNjg4NzAyOTA@._V1_SY1000_CR0,0,666,1000_AL_.jpg"
},
{
id: 3,
name: "How I Met Your Mother",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 22,
noSeasons: 9,
director: "Marcel Ophüls",
description:
"A father recounts to his children, through a series of flashbacks, the journey he and his four best friends took leading up to him meeting their mother.",
image:
"https://m.media-amazon.com/images/M/MV5BZWJjMDEzZjUtYWE1Yy00M2ZiLThlMmItODljNTAzODFiMzc2XkEyXkFqcGdeQXVyNTA4NzY1MzY@._V1_SY1000_CR0,0,666,1000_AL_.jpg"
},
{
id: 4,
name: "Family Guy",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 23,
| "https://m.media-amazon.com/images/M/MV5BODEwZjEzMjAtNjQxMy00Yjc4LWFlMDAtYjhjZTAxNDU3OTg3XkEyXkFqcGdeQXVyOTM2NTM4MjA@._V1_SY1000_SX750_AL_.jpg"
},
{
id: 5,
name: "Peaky Blinders",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 6,
noSeasons: 7,
director: "Marcel Ophüls",
description:
"A gangster family epic set in 1919 Birmingham, England; centered on a gang who sew razor blades in the peaks of their caps, and their fierce boss Tommy Shelby.",
image:
"https://m.media-amazon.com/images/M/MV5BMTkzNjEzMDEzMF5BMl5BanBnXkFtZTgwMDI0MjE4MjE@._V1_SY1000_CR0,0,674,1000_AL_.jpg"
},
{
id: 6,
name: "Game of Thrones",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 10,
noSeasons: 8,
director: "Marcel Ophüls",
description:
"Nine noble families fight for control over the mythical lands of Westeros, while an ancient enemy returns after being dormant for thousands of years.",
image:
"https://m.media-amazon.com/images/M/MV5BMjA5NzA5NjMwNl5BMl5BanBnXkFtZTgwNjg2OTk2NzM@._V1_SY1000_CR0,0,674,1000_AL_.jpg"
},
{
id: 7,
name: "Vikings",
genres: ["romantic"],
releaseYear: "1908",
noEpisodes: 9,
noSeasons: 6,
director: "Marcel Ophüls",
description:
"Vikings transports us to the brutal and mysterious world of Ragnar Lothbrok, a Viking warrior and farmer who yearns to explore - and raid - the distant shores across the ocean.",
image:
"https://m.media-amazon.com/images/M/MV5BNjIzZjljZmQtOGNiYi00YmY2LWE1MGYtN2VlMmEyZDBlMzRmXkEyXkFqcGdeQXVyMTkxNjUyNQ@@._V1_SY1000_SX666_AL_.jpg"
},
{
id: 8,
name: "South Park",
genres: ["romantic"],
releaseYear: "1998",
noEpisodes: 9,
director: "Marcel Ophüls",
noSeasons: 24,
description:
"Follows the misadventures of four irreverent grade-schoolers in the quiet, dysfunctional town of South Park, Colorado.",
image:
"https://m.media-amazon.com/images/M/MV5BOGE2YWUzMDItNTg2Ny00NTUzLTlmZGYtNWMyNzVjMjQ3MThkXkEyXkFqcGdeQXVyNTA4NzY1MzY@._V1_.jpg"
},
{
id: 9,
name: "Sherlock",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 4,
noSeasons: 4,
director: "Marcel Ophüls",
description:
"A modern update finds the famous sleuth and his doctor partner solving crime in 21st century London.",
image:
"https://m.media-amazon.com/images/M/MV5BMWY3NTljMjEtYzRiMi00NWM2LTkzNjItZTVmZjE0MTdjMjJhL2ltYWdlL2ltYWdlXkEyXkFqcGdeQXVyNTQ4NTc5OTU@._V1_.jpg"
},
{
id: 10,
name: "Chernobyl",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 5,
noSeasons: 1,
director: "Marcel Ophüls",
description:
"In April 1986, an explosion at the Chernobyl nuclear power plant in the Union of Soviet Socialist Republics becomes one of the world's worst man-made catastrophes.",
image:
"https://m.media-amazon.com/images/M/MV5BNTEyYmIzMDUtNWMwNC00Y2Q1LWIyZTgtMGY1YzUxOTAwYTAwXkEyXkFqcGdeQXVyMjIyMTc0ODQ@._V1_SY1000_CR0,0,674,1000_AL_.jpg"
},
{
id: 11,
name: "Money Heist",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 9,
noSeasons: 4,
director: "Marcel Ophüls",
description:
"A group of unique robbers assault the Factory of Moneda and Timbre to carry out the most perfect robbery in the history of Spain and take home 2.4 billion euros.",
image:
"https://m.media-amazon.com/images/M/MV5BNmYxZWNlMDAtYzczZC00M2ViLWIwYjctMDg3M2QyN2E1MzlmXkEyXkFqcGdeQXVyNjE4ODA3NTY@._V1_SY1000_CR0,0,675,1000_AL_.jpg"
},
{
id: 12,
name: "Riverdale",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 13,
noSeasons: 4,
director: "Marcel Ophüls",
description:
"While navigating the troubled waters of romance, school and family, Archie and his gang become entangled in dark Riverdale mysteries.",
image:
"https://m.media-amazon.com/images/M/MV5BNDYxOTU0NDYtYzg2MC00YzgyLTg1YzctMDc1MGJmOGIzMTc3XkEyXkFqcGdeQXVyMzQ2MDI5NjU@._V1_SY1000_CR0,0,666,1000_AL_.jpg"
},
{
id: 13,
name: "The Crown",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 10,
noSeasons: 4,
director: "Marcel Ophüls",
description:
"Follows the political rivalries and romance of Queen Elizabeth II's reign and the events that shaped the second half of the twentieth century.",
image:
"https://m.media-amazon.com/images/M/MV5BNGI1ODkzZDQtZTYxYS00MTg1LWFlY2QtMTM5MGNhNWRhYmVmXkEyXkFqcGdeQXVyNjU2ODM5MjU@._V1_SY1000_SX800_AL_.jpg"
},
{
id: 14,
name: "House",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 22,
noSeasons: 8,
director: "Marcel Ophüls",
description:
"An antisocial maverick doctor who specializes in diagnostic medicine does whatever it takes to solve puzzling cases that come his way using his crack team of doctors and his wits.",
image:
"https://m.media-amazon.com/images/M/MV5BMDA4NjQzN2ItZDhhNC00ZjVlLWFjNTgtMTEyNDQyOGNjMDE1XkEyXkFqcGdeQXVyNTA4NzY1MzY@._V1_SY1000_CR0,0,679,1000_AL_.jpg"
},
{
id: 15,
name: "Lie to Me",
genres: ["romantic"],
releaseYear: "1988",
noEpisodes: 13,
noSeasons: 3,
director: "Marcel Ophüls",
description:
"About Cal Lightman, the world's leading deception expert who studies facial expressions and involuntary body language to expose the truth behind the lies.",
image:
"https://m.media-amazon.com/images/M/MV5BMTc2MjA4MTM2OV5BMl5BanBnXkFtZTcwMTYzMzA1Mg@@._V1_.jpg"
}
]; | noSeasons: 18,
director: "Marcel Ophüls",
description:
"In a wacky Rhode Island town, a dysfunctional family strive to cope with everyday life as they are thrown from one crazy scenario to another.",
image:
| random_line_split |
lib.rs | use encoding_rs::{EncoderResult, EUC_KR, SHIFT_JIS, UTF_16LE};
use eztrans_rs::{Container, EzTransLib};
use fxhash::FxHashMap;
use serde_derive::{Deserialize, Serialize};
use std::ffi::CStr;
use std::fs;
use std::path::Path;
use std::ptr::null_mut;
pub struct EzDictItem {
key: String,
value: String,
}
impl EzDictItem {
pub fn new(key: String, value: String) -> Self {
assert!(!key.is_empty());
Self { key, value }
}
pub fn apply(&self, text: &mut String) {
let mut prev_pos = 0;
while let Some(pos) = twoway::find_str(&text[prev_pos..], &self.key) {
text.replace_range(prev_pos + pos..prev_pos + pos + self.key.len(), &self.value);
prev_pos = pos + self.value.len();
}
}
#[inline]
pub fn key(&self) -> &str {
&self.key
}
#[inline]
pub fn value(&self) -> &str {
&self.value
}
}
#[test]
fn dict_item_test() {
let item = EzDictItem::new("あなた".into(), "아나타".into());
let mut foo = "あなた당신あなた".into();
item.apply(&mut foo);
assert_eq!(foo, "아나타당신아나타");
}
#[test]
#[should_panic]
fn dict_item_empty_key_test() {
let _item = EzDictItem::new("".into(), "123".into());
}
#[test]
fn dict_item_empty_value_test() {
let item = EzDictItem::new("123".into(), "".into());
let mut foo = "123def".into();
item.apply(&mut foo);
assert_eq!(foo, "def");
}
#[test]
fn dict_item_eq_kv_test() {
let item = EzDictItem::new("123".into(), "123".into());
let mut foo = "123def".into();
item.apply(&mut foo);
assert_eq!(foo, "123def");
}
#[derive(Serialize, Deserialize, Default)]
struct EzDict {
#[serde(default)]
sort: bool,
#[serde(alias = "BeforeDic")]
#[serde(with = "dict_items")]
#[serde(default)]
before_dict: Vec<EzDictItem>,
#[serde(alias = "AfterDic")]
#[serde(with = "dict_items")]
#[serde(default)]
after_dict: Vec<EzDictItem>,
}
impl EzDict {
pub fn sort_before_dict(&mut self) {
if !self.sort {
return | if !self.sort {
return;
}
self.after_dict
.sort_unstable_by(|l, r| l.key().cmp(r.key()));
}
pub fn sort(&mut self) {
self.sort_after_dict();
self.sort_before_dict();
}
}
mod dict_items {
use super::EzDictItem;
use serde::de::{MapAccess, Visitor};
use serde::ser::SerializeMap;
use serde::{Deserializer, Serializer};
use std::fmt;
pub fn serialize<S: Serializer>(items: &Vec<EzDictItem>, s: S) -> Result<S::Ok, S::Error> {
let mut map = s.serialize_map(Some(items.len()))?;
for item in items {
map.serialize_entry(item.key(), item.value())?;
}
map.end()
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<EzDictItem>, D::Error> {
struct ItemVisitor;
impl<'de> Visitor<'de> for ItemVisitor {
type Value = Vec<EzDictItem>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("key and value")
}
fn visit_map<M: MapAccess<'de>>(self, mut access: M) -> Result<Self::Value, M::Error> {
let mut ret = Vec::with_capacity(access.size_hint().unwrap_or(10));
while let Some((key, value)) = access.next_entry()? {
ret.push(EzDictItem::new(key, value));
}
Ok(ret)
}
}
d.deserialize_map(ItemVisitor)
}
}
pub struct EzContext {
lib: Container<EzTransLib<'static>>,
cache: FxHashMap<String, String>,
dict: EzDict,
encode_buffer: Vec<u8>,
string_buffer: String,
}
impl EzContext {
pub fn from_path(
lib: Container<EzTransLib<'static>>,
path: &Path,
) -> Result<Self, Box<dyn std::error::Error>> {
let cache_path = path.join("cache.msgpack");
let dict_path = path.join("userdic.yml");
let json_dict_path = path.join("userdic.json");
let mut cache = if cache_path.exists() {
rmp_serde::from_read(fs::File::open(cache_path)?)?
} else {
FxHashMap::default()
};
cache.insert(String::new(), String::new());
let mut dict = if dict_path.exists() {
serde_yaml::from_reader(fs::File::open(dict_path)?)?
} else if json_dict_path.exists() {
serde_json::from_reader(fs::File::open(json_dict_path)?)?
} else {
EzDict::default()
};
dict.sort();
Ok(Self {
lib,
cache,
dict,
encode_buffer: Vec::with_capacity(8192),
string_buffer: String::new(),
})
}
pub fn save_to(&self, path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let cache_path = path.join("cache.msgpack");
let dict_path = path.join("userdic.yml");
use std::fs::write;
write(cache_path, rmp_serde::to_vec(&self.cache)?)?;
write(dict_path, serde_yaml::to_vec(&self.dict)?)?;
Ok(())
}
fn translate_impl(&mut self, text: &str) -> &str {
let dict = &mut self.dict;
let lib = &self.lib;
let buf = &mut self.encode_buffer;
let str_buf = &mut self.string_buffer;
self.cache.entry(text.into()).or_insert_with(move || {
str_buf.push_str(text);
let mut encoder = SHIFT_JIS.new_encoder();
let mut decoder = EUC_KR.new_decoder_without_bom_handling();
let max_buf_len = encoder
.max_buffer_length_from_utf8_without_replacement(str_buf.len())
.unwrap_or(0);
buf.reserve(max_buf_len + 1);
let (encoder_ret, _) =
encoder.encode_from_utf8_to_vec_without_replacement(&str_buf, buf, true);
buf.push(0);
assert_eq!(encoder_ret, EncoderResult::InputEmpty);
let translated =
unsafe { lib.translate(CStr::from_bytes_with_nul_unchecked(&buf[..])) };
let translated = translated.as_bytes();
buf.clear();
str_buf.clear();
let mut ret = String::with_capacity(
decoder
.max_utf8_buffer_length_without_replacement(translated.len())
.unwrap_or(0),
);
let (_decoder_ret, _) =
decoder.decode_to_string_without_replacement(translated, &mut ret, true);
for after in dict.after_dict.iter() {
after.apply(&mut ret);
}
ret
})
}
pub fn translate(&mut self, text: &str) -> &str {
if !self.cache.contains_key(text) {
let max_len = UTF_16LE
.new_decoder_without_bom_handling()
.max_utf8_buffer_length_without_replacement(text.len() * 2);
let mut ret = String::with_capacity(max_len.unwrap_or(text.len() * 3));
{
let mut text = text.into();
for before in self.dict.before_dict.iter() {
before.apply(&mut text);
}
let mut prev_pos = 0;
let mut is_in_japanese = is_japanese(text.chars().next().unwrap());
for (pos, ch) in text.char_indices() {
if is_japanese(ch) {
if !is_in_japanese {
ret.push_str(&text[prev_pos..pos]);
prev_pos = pos;
is_in_japanese = true;
}
} else {
if is_in_japanese {
let translated = self.translate_impl(&text[prev_pos..pos]);
ret.push_str(translated);
prev_pos = pos;
is_in_japanese = false;
}
}
}
if !is_in_japanese {
ret.push_str(&text[prev_pos..]);
} else {
let translated = self.translate_impl(&text[prev_pos..]);
ret.push_str(translated);
}
}
self.cache.insert(text.into(), ret);
}
self.cache.get(text).unwrap()
}
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_init(
ez_path: *const u16,
ez_path_len: usize,
ctx_path: *const u16,
ctx_path_len: usize,
) -> *mut EzContext {
let path = utf16_to_string(ez_path, ez_path_len);
let ctx_path = utf16_to_string(ctx_path, ctx_path_len);
let path = Path::new(&path);
let ctx_path = Path::new(&ctx_path);
eprintln!("Loading lib from {}", path.display());
let lib = match eztrans_rs::load_library(path.join("J2KEngine.dll")) {
Ok(lib) => lib,
Err(err) => {
eprintln!("EzTrans library loading failed: {:?}", err);
return null_mut();
}
};
let mut dat_dir = path.join("Dat").to_str().unwrap().to_string().into_bytes();
dat_dir.push(0);
lib.initialize(
CStr::from_bytes_with_nul_unchecked(b"CSUSER123455\0"),
CStr::from_bytes_with_nul_unchecked(&dat_dir[..]),
);
let ctx = match EzContext::from_path(lib, ctx_path) {
Ok(ctx) => ctx,
Err(err) => {
eprintln!("Loading context failed: {:?}", err);
return null_mut();
}
};
Box::into_raw(Box::new(ctx))
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_save(ctx: *mut EzContext, path: *const u16, path_len: usize) {
let path = utf16_to_string(path, path_len);
let path = Path::new(&path);
if let Err(err) = (*ctx).save_to(path) {
eprintln!("Save err: {:?}", err);
}
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_delete(ctx: *mut EzContext) {
(*ctx).lib.terminate();
let _ = Box::from_raw(ctx);
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_add_before_dict(
ctx: *mut EzContext,
key: *const u16,
key_len: usize,
value: *const u16,
value_len: usize,
) {
let key = utf16_to_string(key, key_len);
let value = utf16_to_string(value, value_len);
(*ctx).dict.before_dict.push(EzDictItem::new(key, value));
(*ctx).dict.sort_before_dict();
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_add_after_dict(
ctx: *mut EzContext,
key: *const u16,
key_len: usize,
value: *const u16,
value_len: usize,
) {
let key = utf16_to_string(key, key_len);
let value = utf16_to_string(value, value_len);
(*ctx).dict.after_dict.push(EzDictItem::new(key, value));
(*ctx).dict.sort_after_dict();
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_translate(
ctx: *mut EzContext,
text: *const u16,
text_len: usize,
out_text: *mut *const u8,
out_text_len: *mut usize,
) -> i32 {
let text = utf16_to_string(text, text_len);
let translated = (*ctx).translate(text.as_ref());
*out_text = translated.as_ptr();
*out_text_len = translated.len();
0
}
fn u16_slice_to_u8_slice(slice: &[u16]) -> &[u8] {
unsafe { std::slice::from_raw_parts(slice.as_ptr() as *const u8, slice.len() * 2) }
}
unsafe fn utf16_to_string<'a>(text: *const u16, len: usize) -> String {
let (text, _) = UTF_16LE
.decode_without_bom_handling(u16_slice_to_u8_slice(std::slice::from_raw_parts(text, len)));
text.into()
}
fn is_japanese(ch: char) -> bool {
let ch = ch as u32;
(ch >= 0x3000 && ch <= 0x30FF) || (ch >= 0x4E00 && ch <= 0x9FAF)
}
| ;
}
self.before_dict
.sort_unstable_by(|l, r| l.key().cmp(r.key()));
}
pub fn sort_after_dict(&mut self) {
| identifier_body |
lib.rs | use encoding_rs::{EncoderResult, EUC_KR, SHIFT_JIS, UTF_16LE};
use eztrans_rs::{Container, EzTransLib};
use fxhash::FxHashMap;
use serde_derive::{Deserialize, Serialize};
use std::ffi::CStr;
use std::fs;
use std::path::Path;
use std::ptr::null_mut;
pub struct EzDictItem {
key: String,
value: String,
}
impl EzDictItem {
pub fn new(key: String, value: String) -> Self {
assert!(!key.is_empty());
Self { key, value }
}
pub fn apply(&self, text: &mut String) {
let mut prev_pos = 0;
while let Some(pos) = twoway::find_str(&text[prev_pos..], &self.key) {
text.replace_range(prev_pos + pos..prev_pos + pos + self.key.len(), &self.value);
prev_pos = pos + self.value.len();
}
}
#[inline]
pub fn key(&self) -> &str {
&self.key
}
#[inline]
pub fn value(&self) -> &str {
&self.value
}
}
#[test]
fn dict_item_test() {
let item = EzDictItem::new("あなた".into(), "아나타".into());
let mut foo = "あなた당신あなた".into();
item.apply(&mut foo);
assert_eq!(foo, "아나타당신아나타");
}
#[test]
#[should_panic]
fn dict_item_empty_key_test() {
let _item = EzDictItem::new("".into(), "123".into());
}
#[test]
fn dict_item_empty_value_test() {
let item = EzDictItem::new("123".into(), "".into());
let mut foo = "123def".into();
item.apply(&mut foo);
assert_eq!(foo, "def");
}
#[test]
fn dict_item_eq_kv_test() {
let item = EzDictItem::new("123".into(), "123".into());
let mut foo = "123def".into();
item.apply(&mut foo);
assert_eq!(foo, "123def");
}
#[derive(Serialize, Deserialize, Default)]
struct EzDict {
#[serde(default)]
sort: bool,
#[serde(alias = "BeforeDic")]
#[serde(with = "dict_items")]
#[serde(default)]
before_dict: Vec<EzDictItem>,
#[serde(alias = "AfterDic")]
#[serde(with = "dict_items")]
#[serde(default)]
after_dict: Vec<EzDictItem>,
}
| impl EzDict {
pub fn sort_before_dict(&mut self) {
if !self.sort {
return;
}
self.before_dict
.sort_unstable_by(|l, r| l.key().cmp(r.key()));
}
pub fn sort_after_dict(&mut self) {
if !self.sort {
return;
}
self.after_dict
.sort_unstable_by(|l, r| l.key().cmp(r.key()));
}
pub fn sort(&mut self) {
self.sort_after_dict();
self.sort_before_dict();
}
}
mod dict_items {
use super::EzDictItem;
use serde::de::{MapAccess, Visitor};
use serde::ser::SerializeMap;
use serde::{Deserializer, Serializer};
use std::fmt;
pub fn serialize<S: Serializer>(items: &Vec<EzDictItem>, s: S) -> Result<S::Ok, S::Error> {
let mut map = s.serialize_map(Some(items.len()))?;
for item in items {
map.serialize_entry(item.key(), item.value())?;
}
map.end()
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<EzDictItem>, D::Error> {
struct ItemVisitor;
impl<'de> Visitor<'de> for ItemVisitor {
type Value = Vec<EzDictItem>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("key and value")
}
fn visit_map<M: MapAccess<'de>>(self, mut access: M) -> Result<Self::Value, M::Error> {
let mut ret = Vec::with_capacity(access.size_hint().unwrap_or(10));
while let Some((key, value)) = access.next_entry()? {
ret.push(EzDictItem::new(key, value));
}
Ok(ret)
}
}
d.deserialize_map(ItemVisitor)
}
}
pub struct EzContext {
lib: Container<EzTransLib<'static>>,
cache: FxHashMap<String, String>,
dict: EzDict,
encode_buffer: Vec<u8>,
string_buffer: String,
}
impl EzContext {
pub fn from_path(
lib: Container<EzTransLib<'static>>,
path: &Path,
) -> Result<Self, Box<dyn std::error::Error>> {
let cache_path = path.join("cache.msgpack");
let dict_path = path.join("userdic.yml");
let json_dict_path = path.join("userdic.json");
let mut cache = if cache_path.exists() {
rmp_serde::from_read(fs::File::open(cache_path)?)?
} else {
FxHashMap::default()
};
cache.insert(String::new(), String::new());
let mut dict = if dict_path.exists() {
serde_yaml::from_reader(fs::File::open(dict_path)?)?
} else if json_dict_path.exists() {
serde_json::from_reader(fs::File::open(json_dict_path)?)?
} else {
EzDict::default()
};
dict.sort();
Ok(Self {
lib,
cache,
dict,
encode_buffer: Vec::with_capacity(8192),
string_buffer: String::new(),
})
}
pub fn save_to(&self, path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let cache_path = path.join("cache.msgpack");
let dict_path = path.join("userdic.yml");
use std::fs::write;
write(cache_path, rmp_serde::to_vec(&self.cache)?)?;
write(dict_path, serde_yaml::to_vec(&self.dict)?)?;
Ok(())
}
fn translate_impl(&mut self, text: &str) -> &str {
let dict = &mut self.dict;
let lib = &self.lib;
let buf = &mut self.encode_buffer;
let str_buf = &mut self.string_buffer;
self.cache.entry(text.into()).or_insert_with(move || {
str_buf.push_str(text);
let mut encoder = SHIFT_JIS.new_encoder();
let mut decoder = EUC_KR.new_decoder_without_bom_handling();
let max_buf_len = encoder
.max_buffer_length_from_utf8_without_replacement(str_buf.len())
.unwrap_or(0);
buf.reserve(max_buf_len + 1);
let (encoder_ret, _) =
encoder.encode_from_utf8_to_vec_without_replacement(&str_buf, buf, true);
buf.push(0);
assert_eq!(encoder_ret, EncoderResult::InputEmpty);
let translated =
unsafe { lib.translate(CStr::from_bytes_with_nul_unchecked(&buf[..])) };
let translated = translated.as_bytes();
buf.clear();
str_buf.clear();
let mut ret = String::with_capacity(
decoder
.max_utf8_buffer_length_without_replacement(translated.len())
.unwrap_or(0),
);
let (_decoder_ret, _) =
decoder.decode_to_string_without_replacement(translated, &mut ret, true);
for after in dict.after_dict.iter() {
after.apply(&mut ret);
}
ret
})
}
pub fn translate(&mut self, text: &str) -> &str {
if !self.cache.contains_key(text) {
let max_len = UTF_16LE
.new_decoder_without_bom_handling()
.max_utf8_buffer_length_without_replacement(text.len() * 2);
let mut ret = String::with_capacity(max_len.unwrap_or(text.len() * 3));
{
let mut text = text.into();
for before in self.dict.before_dict.iter() {
before.apply(&mut text);
}
let mut prev_pos = 0;
let mut is_in_japanese = is_japanese(text.chars().next().unwrap());
for (pos, ch) in text.char_indices() {
if is_japanese(ch) {
if !is_in_japanese {
ret.push_str(&text[prev_pos..pos]);
prev_pos = pos;
is_in_japanese = true;
}
} else {
if is_in_japanese {
let translated = self.translate_impl(&text[prev_pos..pos]);
ret.push_str(translated);
prev_pos = pos;
is_in_japanese = false;
}
}
}
if !is_in_japanese {
ret.push_str(&text[prev_pos..]);
} else {
let translated = self.translate_impl(&text[prev_pos..]);
ret.push_str(translated);
}
}
self.cache.insert(text.into(), ret);
}
self.cache.get(text).unwrap()
}
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_init(
ez_path: *const u16,
ez_path_len: usize,
ctx_path: *const u16,
ctx_path_len: usize,
) -> *mut EzContext {
let path = utf16_to_string(ez_path, ez_path_len);
let ctx_path = utf16_to_string(ctx_path, ctx_path_len);
let path = Path::new(&path);
let ctx_path = Path::new(&ctx_path);
eprintln!("Loading lib from {}", path.display());
let lib = match eztrans_rs::load_library(path.join("J2KEngine.dll")) {
Ok(lib) => lib,
Err(err) => {
eprintln!("EzTrans library loading failed: {:?}", err);
return null_mut();
}
};
let mut dat_dir = path.join("Dat").to_str().unwrap().to_string().into_bytes();
dat_dir.push(0);
lib.initialize(
CStr::from_bytes_with_nul_unchecked(b"CSUSER123455\0"),
CStr::from_bytes_with_nul_unchecked(&dat_dir[..]),
);
let ctx = match EzContext::from_path(lib, ctx_path) {
Ok(ctx) => ctx,
Err(err) => {
eprintln!("Loading context failed: {:?}", err);
return null_mut();
}
};
Box::into_raw(Box::new(ctx))
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_save(ctx: *mut EzContext, path: *const u16, path_len: usize) {
let path = utf16_to_string(path, path_len);
let path = Path::new(&path);
if let Err(err) = (*ctx).save_to(path) {
eprintln!("Save err: {:?}", err);
}
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_delete(ctx: *mut EzContext) {
(*ctx).lib.terminate();
let _ = Box::from_raw(ctx);
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_add_before_dict(
ctx: *mut EzContext,
key: *const u16,
key_len: usize,
value: *const u16,
value_len: usize,
) {
let key = utf16_to_string(key, key_len);
let value = utf16_to_string(value, value_len);
(*ctx).dict.before_dict.push(EzDictItem::new(key, value));
(*ctx).dict.sort_before_dict();
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_add_after_dict(
ctx: *mut EzContext,
key: *const u16,
key_len: usize,
value: *const u16,
value_len: usize,
) {
let key = utf16_to_string(key, key_len);
let value = utf16_to_string(value, value_len);
(*ctx).dict.after_dict.push(EzDictItem::new(key, value));
(*ctx).dict.sort_after_dict();
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_translate(
ctx: *mut EzContext,
text: *const u16,
text_len: usize,
out_text: *mut *const u8,
out_text_len: *mut usize,
) -> i32 {
let text = utf16_to_string(text, text_len);
let translated = (*ctx).translate(text.as_ref());
*out_text = translated.as_ptr();
*out_text_len = translated.len();
0
}
fn u16_slice_to_u8_slice(slice: &[u16]) -> &[u8] {
unsafe { std::slice::from_raw_parts(slice.as_ptr() as *const u8, slice.len() * 2) }
}
unsafe fn utf16_to_string<'a>(text: *const u16, len: usize) -> String {
let (text, _) = UTF_16LE
.decode_without_bom_handling(u16_slice_to_u8_slice(std::slice::from_raw_parts(text, len)));
text.into()
}
fn is_japanese(ch: char) -> bool {
let ch = ch as u32;
(ch >= 0x3000 && ch <= 0x30FF) || (ch >= 0x4E00 && ch <= 0x9FAF)
} | random_line_split |
|
lib.rs | use encoding_rs::{EncoderResult, EUC_KR, SHIFT_JIS, UTF_16LE};
use eztrans_rs::{Container, EzTransLib};
use fxhash::FxHashMap;
use serde_derive::{Deserialize, Serialize};
use std::ffi::CStr;
use std::fs;
use std::path::Path;
use std::ptr::null_mut;
pub struct EzDictItem {
key: String,
value: String,
}
impl EzDictItem {
pub fn new(key: String, value: String) -> Self {
assert!(!key.is_empty());
Self { key, value }
}
pub fn apply(&self, text: &mut String) {
let mut prev_pos = 0;
while let Some(pos) = twoway::find_str(&text[prev_pos..], &self.key) {
text.replace_range(prev_pos + pos..prev_pos + pos + self.key.len(), &self.value);
prev_pos = pos + self.value.len();
}
}
#[inline]
pub fn key(&self) -> &str {
&self.key
}
#[inline]
pub fn value(&self) -> &str {
&self.value
}
}
#[test]
fn dict_item_test() {
let item = EzDictItem::new("あなた".into(), "아나타".into());
let mut foo = "あなた당신あなた".into();
item.apply(&mut foo);
assert_eq!(foo, "아나타당신아나타");
}
#[test]
#[should_panic]
fn dict_item_empty_key_test() {
let _item = EzDictItem::new("".into(), "123".into());
}
#[test]
fn dict_item_empty_value_test() {
let item = EzDictItem::new("123".into(), "".into());
let mut foo = "123def".into();
item.apply(&mut foo);
assert_eq!(foo, "def");
}
#[test]
fn dict_item_eq_kv_test() {
let item = EzDictItem::new("123".into(), "123".into());
let mut foo = "123def".into();
item.apply(&mut foo);
assert_eq!(foo, "123def");
}
#[derive(Serialize, Deserialize, Default)]
struct EzDict {
#[serde(default)]
sort: bool,
#[serde(alias = "BeforeDic")]
#[serde(with = "dict_items")]
#[serde(default)]
before_dict: Vec<EzDictItem>,
#[serde(alias = "AfterDic")]
#[serde(with = "dict_items")]
#[serde(default)]
after_dict: Vec<EzDictItem>,
}
impl EzDict {
pub fn sort_before_dict(&mut self) {
if !self.sort {
return;
}
self.before_dict
.sort_unstable_by(|l, r| l.key().cmp(r.key()));
}
pub fn sort_after_dict(&mut self) {
if !self.sort {
return;
}
self.after_dict
.sort_unstable_by(|l, r| l.key().cmp(r.key()));
}
pub fn sort(&mut self) {
self.sort_after_dict();
self.sort_before_dict();
}
}
mod dict_items {
use super::EzDictItem;
use serde::de::{MapAccess, Visitor};
use serde::ser::SerializeMap;
use serde::{Deserializer, Serializer};
use std::fmt;
pub fn serialize<S: Serializer>(items: &Vec<EzDictItem>, s: S) -> Result<S::Ok, S::Error> {
let mut map = s.serialize_map(Some(items.len()))?;
for item in items {
map.serialize_entry(item.key(), item.value())?;
}
map.end()
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<EzDictItem>, D::Error> {
struct ItemVisitor;
impl<'de> Visitor<'de> for ItemVisitor {
type Value = Vec<EzDictItem>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("key and value")
}
fn visit_map<M: MapAccess<'de>>(self, mut access: M) -> Result<Self::Value, M::Error> {
let mut ret = Vec::with_capacity(access.size_hint().unwrap_or(10));
while let Some((key, value)) = access.next_entry()? {
ret.push(EzDictItem::new(key, value));
}
Ok(ret)
}
}
d.deserialize_map(ItemVisitor)
}
}
pub struct EzContext {
lib: Container<EzTransLib<'static>>,
cache: FxHashMap<String, String>,
dict: EzDict,
encode_buffer: Vec<u8>,
string_buffer: String,
}
impl EzContext {
pub fn from_path(
lib: Container<EzTransLib<'static>>,
path: &Path,
) -> Result<Self, Box<dyn std::error::Error>> {
let cache_path = path.join("cache.msgpack");
let dict_path = path.join("userdic.yml");
let json_dict_path = path.join("userdic.json");
let mut cache = if cache_path.exists() {
rmp_serde::from_read(fs::File::open(cache_path)?)?
} else {
FxHashMap::default()
};
cache.insert(String::new(), String::new());
let mut dict = if dict_path.exists() {
serde_yaml::from_reader(fs::File::open(dict_path)?)?
} else if json_dict_path.exists() {
serde_json::from_reader(fs::File::open(json_dict_path)?)?
} else {
EzDict::default()
};
dict.sort();
Ok(Self {
lib,
cache,
dict,
encode_buffer: Vec::with_capacity(8192),
string_buffer: String::new(),
})
}
pub fn save_to(&self, path: &Path) -> Result<(), Box<dyn std::error::Error>> {
let cache_path = path.join("cache.msgpack");
let dict_path = path.join("userdic.yml");
use std::fs::write;
write(cache_path, rmp_serde::to_vec(&self.cache)?)?;
write(dict_path, serde_yaml::to_vec(&self.dict)?)?;
Ok(())
}
fn translate_impl(&mut self, text: &str) -> &st | t dict = &mut self.dict;
let lib = &self.lib;
let buf = &mut self.encode_buffer;
let str_buf = &mut self.string_buffer;
self.cache.entry(text.into()).or_insert_with(move || {
str_buf.push_str(text);
let mut encoder = SHIFT_JIS.new_encoder();
let mut decoder = EUC_KR.new_decoder_without_bom_handling();
let max_buf_len = encoder
.max_buffer_length_from_utf8_without_replacement(str_buf.len())
.unwrap_or(0);
buf.reserve(max_buf_len + 1);
let (encoder_ret, _) =
encoder.encode_from_utf8_to_vec_without_replacement(&str_buf, buf, true);
buf.push(0);
assert_eq!(encoder_ret, EncoderResult::InputEmpty);
let translated =
unsafe { lib.translate(CStr::from_bytes_with_nul_unchecked(&buf[..])) };
let translated = translated.as_bytes();
buf.clear();
str_buf.clear();
let mut ret = String::with_capacity(
decoder
.max_utf8_buffer_length_without_replacement(translated.len())
.unwrap_or(0),
);
let (_decoder_ret, _) =
decoder.decode_to_string_without_replacement(translated, &mut ret, true);
for after in dict.after_dict.iter() {
after.apply(&mut ret);
}
ret
})
}
pub fn translate(&mut self, text: &str) -> &str {
if !self.cache.contains_key(text) {
let max_len = UTF_16LE
.new_decoder_without_bom_handling()
.max_utf8_buffer_length_without_replacement(text.len() * 2);
let mut ret = String::with_capacity(max_len.unwrap_or(text.len() * 3));
{
let mut text = text.into();
for before in self.dict.before_dict.iter() {
before.apply(&mut text);
}
let mut prev_pos = 0;
let mut is_in_japanese = is_japanese(text.chars().next().unwrap());
for (pos, ch) in text.char_indices() {
if is_japanese(ch) {
if !is_in_japanese {
ret.push_str(&text[prev_pos..pos]);
prev_pos = pos;
is_in_japanese = true;
}
} else {
if is_in_japanese {
let translated = self.translate_impl(&text[prev_pos..pos]);
ret.push_str(translated);
prev_pos = pos;
is_in_japanese = false;
}
}
}
if !is_in_japanese {
ret.push_str(&text[prev_pos..]);
} else {
let translated = self.translate_impl(&text[prev_pos..]);
ret.push_str(translated);
}
}
self.cache.insert(text.into(), ret);
}
self.cache.get(text).unwrap()
}
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_init(
ez_path: *const u16,
ez_path_len: usize,
ctx_path: *const u16,
ctx_path_len: usize,
) -> *mut EzContext {
let path = utf16_to_string(ez_path, ez_path_len);
let ctx_path = utf16_to_string(ctx_path, ctx_path_len);
let path = Path::new(&path);
let ctx_path = Path::new(&ctx_path);
eprintln!("Loading lib from {}", path.display());
let lib = match eztrans_rs::load_library(path.join("J2KEngine.dll")) {
Ok(lib) => lib,
Err(err) => {
eprintln!("EzTrans library loading failed: {:?}", err);
return null_mut();
}
};
let mut dat_dir = path.join("Dat").to_str().unwrap().to_string().into_bytes();
dat_dir.push(0);
lib.initialize(
CStr::from_bytes_with_nul_unchecked(b"CSUSER123455\0"),
CStr::from_bytes_with_nul_unchecked(&dat_dir[..]),
);
let ctx = match EzContext::from_path(lib, ctx_path) {
Ok(ctx) => ctx,
Err(err) => {
eprintln!("Loading context failed: {:?}", err);
return null_mut();
}
};
Box::into_raw(Box::new(ctx))
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_save(ctx: *mut EzContext, path: *const u16, path_len: usize) {
let path = utf16_to_string(path, path_len);
let path = Path::new(&path);
if let Err(err) = (*ctx).save_to(path) {
eprintln!("Save err: {:?}", err);
}
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_delete(ctx: *mut EzContext) {
(*ctx).lib.terminate();
let _ = Box::from_raw(ctx);
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_add_before_dict(
ctx: *mut EzContext,
key: *const u16,
key_len: usize,
value: *const u16,
value_len: usize,
) {
let key = utf16_to_string(key, key_len);
let value = utf16_to_string(value, value_len);
(*ctx).dict.before_dict.push(EzDictItem::new(key, value));
(*ctx).dict.sort_before_dict();
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_add_after_dict(
ctx: *mut EzContext,
key: *const u16,
key_len: usize,
value: *const u16,
value_len: usize,
) {
let key = utf16_to_string(key, key_len);
let value = utf16_to_string(value, value_len);
(*ctx).dict.after_dict.push(EzDictItem::new(key, value));
(*ctx).dict.sort_after_dict();
}
#[no_mangle]
pub unsafe extern "cdecl" fn ez_translate(
ctx: *mut EzContext,
text: *const u16,
text_len: usize,
out_text: *mut *const u8,
out_text_len: *mut usize,
) -> i32 {
let text = utf16_to_string(text, text_len);
let translated = (*ctx).translate(text.as_ref());
*out_text = translated.as_ptr();
*out_text_len = translated.len();
0
}
fn u16_slice_to_u8_slice(slice: &[u16]) -> &[u8] {
unsafe { std::slice::from_raw_parts(slice.as_ptr() as *const u8, slice.len() * 2) }
}
unsafe fn utf16_to_string<'a>(text: *const u16, len: usize) -> String {
let (text, _) = UTF_16LE
.decode_without_bom_handling(u16_slice_to_u8_slice(std::slice::from_raw_parts(text, len)));
text.into()
}
fn is_japanese(ch: char) -> bool {
let ch = ch as u32;
(ch >= 0x3000 && ch <= 0x30FF) || (ch >= 0x4E00 && ch <= 0x9FAF)
}
| r {
le | identifier_name |
bksv.go | package bksv
// Package for posting a {ComplainerProfile,Complaint} to BKSV's web form
// Bug 1: Edits to profile should call to maps to reparse the address; ignore what's in the form fields.
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"time"
"github.com/skypies/util/date"
"github.com/skypies/complaints/pkg/complaintdb"
"github.com/skypies/complaints/pkg/config"
)
// "Real" one : https://viewpoint.emsbk.com/sfo5
// Temp one : https://viewpoint.emsbk.com/sfo12
// Other Temp one : https://viewpoint.emsbk.com/sfo50
//https://viewpoint.emsbk.com/<sitename>?response=json
//const bksvHost = "complaints-us.emsbk.com"
const bksvHost = "viewpoint.emsbk.com"
const bksvPath = "/sfo5" + "?response=json" // response *must* be a GET param, not POST
// {{{ PopulateForm
func PopulateForm(c complaintdb.Complaint, submitkey string) url.Values {
first,last := c.Profile.SplitName()
if c.Activity == "" { c.Activity = "Loud noise" }
address1 := ""
addr := c.Profile.GetStructuredAddress()
if addr.Street == "" {
address1 = c.Profile.Address // default to the raw string, if we don't have a structured one
} else if addr.Number == "" {
address1 = addr.Street
} else {
address1 = addr.Number + " " + addr.Street
}
browser_version := c.Browser.Version
if (len(browser_version) > 50) {
browser_version = browser_version[0:49]
}
getLoudVal := func(in int) string {
loudVals := map[int]string{1: "Loud", 2:"Very Loud", 3:"Excessively Loud"}
if val, exists := loudVals[in]; exists {
return val
}
return "Loud"
}
vals := url.Values{
"response": {"json"}, // Must always set this as a GET param
"contactmethod": {"App"},
"apiKey": {config.Get("bksv.apiKey")},
"accept_privacy": {"Y"},
"caller_code": {c.Profile.CallerCode},
"name": {first},
"surname": {last},
"address1": {address1},
"address2": {""},
"zipcode": {addr.Zip},
"city": {addr.City},
"state": {addr.State},
"email": {c.Profile.EmailAddress},
"airports": {"KSFO"}, // KOAK, KSJC, KSAN
"month": {date.InPdt(c.Timestamp).Format("1")},
"day": {date.InPdt(c.Timestamp).Format("2")},
"year": {date.InPdt(c.Timestamp).Format("2006")},
"hour": {date.InPdt(c.Timestamp).Format("15")},
"min": {date.InPdt(c.Timestamp).Format("4")},
"sec": {date.InPdt(c.Timestamp).Format("5")},
"aircrafttype": {"J"},
"aircraftcategory": {"J"},
"activity_type": {"Other"}, // perhaps map c.Activity to something ?
"event_type": {getLoudVal(c.Loudness)}, // as per 2023.03.16
"adflag": {"U"},
"comments": {c.Description},
"responserequired": {"N"},
"enquirytype": {"C"},
"submit": {"Submit complaint"},
//"submitkey": {submitkey},
"nowebtrak": {"1"},
"defaulttime": {"0"},
"webtraklinkback": {""},
"title": {""},
"homephone": {""},
"workphone": {""},
"cellphone": {""},
"browser_name": {c.Browser.Name},
"browser_version": {browser_version},
"browser_vendor": {c.Browser.Vendor},
"browser_uuid": {c.Browser.UUID},
"browser_platform": {c.Browser.Platform},
}
if c.AircraftOverhead.FlightNumber != "" |
return vals
}
// }}}
// {{{ PostComplaint
// https://complaints-staging.bksv.com/sfo2?json=1&resp=json
// {"result":"1",
// "title":"Complaint Received",
// "body":"Thank you. We have received your complaint."}
func PostComplaint(client *http.Client, c complaintdb.Complaint) (*complaintdb.Submission, error) {
// Initialize a new submission object, inheriting from previous
s := complaintdb.Submission{
Attempts: c.Submission.Attempts + 1,
Log: c.Submission.Log+fmt.Sprintf("\n--------=={ PostComplaint @ %s }==-\n", time.Now()),
Key: c.Submission.Key, // We're now keyless, should prob strip this out
T: time.Now().UTC(),
Outcome: complaintdb.SubmissionFailed, // Be pessimistic right up until the end
}
// We used to have to fetch a unique key (which lived in the form),
// that we'd need to submit with the rest of the complaint; that
// prevented dupes on their end. But the new version skips that
// requirement for API based submissions like ours, so we're
// keyless now.
s.Log += fmt.Sprintf("----{ time: %s }----\n --{ keyless submission }--\n", s.T)
vals := PopulateForm(c, "")
s.Log += "Submitting these vals:-\n"
for k,v := range vals { s.Log += fmt.Sprintf(" * %-20.20s: %v\n", k, v) }
s.Log += "\n"
// resp,err := client.PostForm("https://"+bksvHost+bksvPath, vals)
req,_ := http.NewRequest("POST", "https://"+bksvHost+bksvPath, strings.NewReader(vals.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded") // This is important
reqBytes,_ := httputil.DumpRequestOut(req,true)
s.Log += "Full req to ["+bksvHost+"]:-\n--\n"+string(reqBytes)+"\n--\n\n"
resp,err := client.Do(req)
s.D = time.Since(s.T)
if err != nil {
if strings.Contains(err.Error(), "DEADLINE_EXCEEDED") {
s.Outcome = complaintdb.SubmissionTimeout
}
s.Log += fmt.Sprintf("ComplaintPOST: Posting error (dur=%s): %v\n", s.D, err)
return &s,err
}
respBytes,_ := httputil.DumpResponse(resp,true)
s.Log += "Full resp:-\n--\n"+string(respBytes)+"\n--\n\n"
defer resp.Body.Close()
body,_ := ioutil.ReadAll(resp.Body)
s.Log += fmt.Sprintf("ComplaintPOST: HTTP response '%s'\n", resp.Status)
s.Response = []byte(body)
if resp.StatusCode >= 400 {
s.Log += fmt.Sprintf("ComplaintPOST: HTTP Body:-\n%s\n--\n", body)
return &s,fmt.Errorf("ComplaintPOST: HTTP err %s", resp.Status)
}
var jsonMap map[string]interface{}
if err := json.Unmarshal([]byte(body), &jsonMap); err != nil {
s.Log += fmt.Sprintf("ComplaintPOST: JSON unmarshal '%v'\nBody:-\n%s\n--\n", err, body)
return &s,fmt.Errorf("ComplaintPOST: JSON unmarshal %v", err)
/* A few times, the remote site failed to send JSON responses, and sent HTML instead. This
* will work in that case.
if !regexp.MustCompile(`(?i:received your complaint)`).MatchString(string(body)) {
debug += fmt.Sprintf("BKSV body ...\n%s\n------\n", string(body))
return debug,fmt.Errorf("Returned response did not say 'received your complaint'")
} else {
debug += "Success !\n"+string(body)
}
*/
}
indentedBytes,_ := json.MarshalIndent(jsonMap, "", " ")
s.Log += "\n-- JsonMap:-\n"+string(indentedBytes)+"\n--\n"
/* on success ...
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"receipt_key": "adasdsdadsdasds786dsa87d6as87d6as",
"result": "1",
"title": "Submission Received"
}
Or more recently,
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"complaint_receipt_keys": [
"b85409b1152840d6d149e721cfda6eb639b05979"
],
"receipt_key": null,
"result": "1",
"title": "Submission Received"
}
*/
v := jsonMap["result"];
if v == nil {
s.Log += fmt.Sprintf("ComplaintPOST: json no 'result'\n")
return &s,fmt.Errorf("ComplaintPOST: jsonmap had no 'result'")
}
result := v.(string)
if result != "1" {
s.Outcome = complaintdb.SubmissionRejected
s.Log += fmt.Sprintf("Json result not '1'\n")
return &s,fmt.Errorf("ComplaintPOST: result='%s'", result)
}
// Extract the foreign key for this complaint
found := false
if v = jsonMap["receipt_key"]; v != nil {
s.Key = fmt.Sprintf("%.0f", jsonMap["receipt_key"].(string))
s.Log += "Json Success !\n"
s.Outcome = complaintdb.SubmissionAccepted
found = true
} else if r := jsonMap["complaint_receipt_keys"]; r != nil {
if v, isSlice := r.([]interface{}); isSlice {
if len(v) > 0 {
s.Key = fmt.Sprintf("%.0f", v[0].(string))
s.Log += "Json Success !\n"
s.Outcome = complaintdb.SubmissionAccepted
found = true
}
}
}
if ! found {
s.Log += fmt.Sprintf("ComplaintPOST: json no 'receipt_key', or 'complaint_receipt_keys[]'\n")
return &s,fmt.Errorf("ComplaintPOST: jsonmap had no 'receipt_key'")
}
return &s,nil
}
// }}}
// {{{ Notes
/* These POST fields were sent by browser, for happy success
nowebtrak:1
submitkey:4aef9c8831919524ec35ae8af8ff25ba
defaulttime:0
webtraklinkback:
title:
name:Adam
surname:Worrall
address1:1 Some Drive
address2:
city:Scotts Valley
state:CA
homephone:
workphone:
cellphone:
email:[email protected]
airports:KSFO
month:10
day:2
year:2015
hour:20
min:16
aircrafttype:Jet
eventtype:Loud noise
comments:Blah
responserequired:N
enquirytype:C
submit:Submit complaint
*/
/* BKSV:
You can call it this way
https://complaints-staging.bksv.com/sfo2?json=1
to get a json object of all the form field definitions we accept. That
will tell you what fields we require to be set and also gives you
handy things like the set of allowed disturbance types, event types
etc. NB: I haven't yet configured it to match the SFO system values
for these but that is a simple config change I can do as soon as I
have the information.
{
"airports": "{ \"KSFO\": \"San Francisco International Airport (SFO)\" , \"KSAN\": \"San Diego International Airport (SAN)\", \"KOAK\": \"Oakland International Airport (OAK)\", \"KSJC\": \"Mineta San José International Airport (SJC)\" }",
"locale": "en_AU",
"displayAreaCodes": "0",
"submitKey": "797eaa0e960b5e8848ce6785950dfd3c",
"hours": [
"12 AM",
"1 AM",
"2 AM",
"3 AM",
"4 AM",
"5 AM",
"6 AM",
"7 AM",
"8 AM",
"9 AM",
"10 AM",
"11 AM",
"12 PM",
"1 PM",
"2 PM",
"3 PM",
"4 PM",
"5 PM",
"6 PM",
"7 PM",
"8 PM",
"9 PM",
"10 PM",
"11 PM"
],
"atLeastOneContact": true,
"field_defs": {
"address2": {
"maxlength": 124,
"required": false,
"scope": "profile",
"type": "text",
"label": "Address (line 2)"
},
"webtrak": {
"maxlength": 0,
"required": false,
"scope": "ignore",
"type": "ignore",
"label": "Information from WebTrak"
},
"email": {
"maxlength": 64,
"required": false,
"scope": "profile",
"type": "email",
"label": "Email"
},
"text2": {
"maxlength": 0,
"required": false,
"scope": "about",
"type": "content",
"label": ""
},
"state": {
"maxlength": 100,
"required": true,
"scope": "profile",
"type": "list",
"label": "State"
},
"responserequired": {
"maxlength": 0,
"required": true,
"scope": "profile",
"type": "boolean",
"label": "Would you like to be contacted by one of our staff?"
},
"enquirytype": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "list",
"label": "Enquiry type"
},
"time": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "datetime",
"label": "Disturbance time"
},
"workphone": {
"maxlength": 62,
"required": false,
"scope": "profile",
"type": "tel",
"label": "Work phone"
},
"airports": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "list",
"label": "Airport"
},
"contact": {
"maxlength": 0,
"required": false,
"scope": "ignore",
"type": "ignore",
"label": "Contact number"
},
"date": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "datetime",
"label": "Disturbance date"
},
"text1": {
"maxlength": 0,
"required": false,
"scope": "about",
"type": "content",
"label": ""
},
"eventtype": {
"maxlength": 0,
"required": false,
"scope": "complaint",
"type": "list",
"label": "Disturbance type"
},
"name": {
"maxlength": 62,
"required": true,
"scope": "profile",
"type": "text",
"label": "First name"
},
"city": {
"maxlength": 46,
"required": true,
"scope": "profile",
"type": "text",
"label": "City"
},
"address1": {
"maxlength": 124,
"required": true,
"scope": "profile",
"type": "text",
"label": "Address"
},
"cellphone": {
"maxlength": 62,
"required": false,
"scope": "profile",
"type": "tel",
"label": "Mobile phone"
},
"aircrafttype": {
"maxlength": 0,
"required": false,
"scope": "complaint",
"type": "list",
"label": "Aircraft type"
},
"comments": {
"maxlength": 10000,
"required": false,
"scope": "complaint",
"type": "textarea",
"label": "Please give details"
},
"title": {
"maxlength": 30,
"required": false,
"scope": "profile",
"type": "list",
"label": "Title"
},
"surname": {
"maxlength": 62,
"required": true,
"scope": "profile",
"type": "text",
"label": "Last name"
},
"homephone": {
"maxlength": 62,
"required": false,
"scope": "profile",
"type": "tel",
"label": "Home phone"
}
},
"years": {
"2015": "2015",
"2014": 2014
},
"dateFormat": [
"month",
"day",
"year"
],
"strings": {
"months/short/5": "Jun",
"labels/month": "Month",
"complaintsform/lists/acTypes": "Jet,Propeller,Helicopter,Various,Unknown",
"months/short/3": "Apr",
"complaintsform/lists/activity_types": "Indoors,Outdoors,Watching TV,Sleeping,Working,Other",
"labels/hour": "Hour",
"labels/year": "Year",
"months/short/4": "May",
"months/short/9": "Oct",
"months/short/2": "Mar",
"complaintsform/app/complaintReceived": "Complaint received!",
"complaintsform/lists/event_types": "Loud noise,Overflight,Low flying,Early turn,Go-around,Too frequent,Helicopter operations,Engine run-up,Ground noise,Other",
"complaintsform/blocks/submitComplaint": "Submit complaint",
"months/short/7": "Aug",
"complaintsform/blocks/pleaseFillIn": "Please fill in",
"timeOfDay/1": "PM",
"complaintsform/blocks/tooShort": "Value is too short",
"complaintsform/lists/acModes_internal": "",
"complaintsform/blocks/required": "(required)",
"months/short/8": "Sep",
"complaintsform/lists/acModes": "Arrival,Departure,Overflight,Unknown",
"labels/minute": "Min",
"timeOfDay/0": "AM",
"months/short/6": "Jul",
"complaintsform/lists/acTypes_internal": "",
"labels/yes": "Yes",
"months/short/10": "Nov",
"months/short/1": "Feb",
"complaintsform/lists/titles": "Mr,Mrs,Miss,Ms,Dr",
"complaintsform/lists/contact_method": "Letter,Email,Telephone",
"labels/no": "No",
"complaintsform/blocks/errors": "There are some problems. Please correct the mistakes and submit the form again.",
"labels/day": "Day",
"months/short/0": "Jan",
"lists/state": "CA,AZ",
"months/short/11": "Dec"
},
"fields": [
"text1",
"title",
"name",
"surname",
"address1",
"address2",
"city",
"state",
"contact",
"airports",
"text2",
"date",
"time",
"webtrak",
"aircrafttype",
"eventtype",
"comments",
"responserequired",
"enquirytype",
"homephone",
"workphone",
"cellphone",
"email"
]
}
*/
// }}}
// {{{ -------------------------={ E N D }=----------------------------------
// Local variables:
// folded-file: t
// end:
// }}}
| {
vals.Add("acid", c.AircraftOverhead.Callsign)
vals.Add("aacode", c.AircraftOverhead.Id2)
vals.Add("tailnumber", c.AircraftOverhead.Registration)
//vals.Add("adflag", "??") // Operation type (A, D or O for Arr, Dept or Overflight)
//vals.Add("beacon", "??") // Squawk SSR code (eg 2100)
} | conditional_block |
bksv.go | package bksv
// Package for posting a {ComplainerProfile,Complaint} to BKSV's web form
// Bug 1: Edits to profile should call to maps to reparse the address; ignore what's in the form fields.
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"time"
"github.com/skypies/util/date"
"github.com/skypies/complaints/pkg/complaintdb"
"github.com/skypies/complaints/pkg/config"
)
// "Real" one : https://viewpoint.emsbk.com/sfo5
// Temp one : https://viewpoint.emsbk.com/sfo12
// Other Temp one : https://viewpoint.emsbk.com/sfo50
//https://viewpoint.emsbk.com/<sitename>?response=json
//const bksvHost = "complaints-us.emsbk.com"
const bksvHost = "viewpoint.emsbk.com"
const bksvPath = "/sfo5" + "?response=json" // response *must* be a GET param, not POST
// {{{ PopulateForm
func | (c complaintdb.Complaint, submitkey string) url.Values {
first,last := c.Profile.SplitName()
if c.Activity == "" { c.Activity = "Loud noise" }
address1 := ""
addr := c.Profile.GetStructuredAddress()
if addr.Street == "" {
address1 = c.Profile.Address // default to the raw string, if we don't have a structured one
} else if addr.Number == "" {
address1 = addr.Street
} else {
address1 = addr.Number + " " + addr.Street
}
browser_version := c.Browser.Version
if (len(browser_version) > 50) {
browser_version = browser_version[0:49]
}
getLoudVal := func(in int) string {
loudVals := map[int]string{1: "Loud", 2:"Very Loud", 3:"Excessively Loud"}
if val, exists := loudVals[in]; exists {
return val
}
return "Loud"
}
vals := url.Values{
"response": {"json"}, // Must always set this as a GET param
"contactmethod": {"App"},
"apiKey": {config.Get("bksv.apiKey")},
"accept_privacy": {"Y"},
"caller_code": {c.Profile.CallerCode},
"name": {first},
"surname": {last},
"address1": {address1},
"address2": {""},
"zipcode": {addr.Zip},
"city": {addr.City},
"state": {addr.State},
"email": {c.Profile.EmailAddress},
"airports": {"KSFO"}, // KOAK, KSJC, KSAN
"month": {date.InPdt(c.Timestamp).Format("1")},
"day": {date.InPdt(c.Timestamp).Format("2")},
"year": {date.InPdt(c.Timestamp).Format("2006")},
"hour": {date.InPdt(c.Timestamp).Format("15")},
"min": {date.InPdt(c.Timestamp).Format("4")},
"sec": {date.InPdt(c.Timestamp).Format("5")},
"aircrafttype": {"J"},
"aircraftcategory": {"J"},
"activity_type": {"Other"}, // perhaps map c.Activity to something ?
"event_type": {getLoudVal(c.Loudness)}, // as per 2023.03.16
"adflag": {"U"},
"comments": {c.Description},
"responserequired": {"N"},
"enquirytype": {"C"},
"submit": {"Submit complaint"},
//"submitkey": {submitkey},
"nowebtrak": {"1"},
"defaulttime": {"0"},
"webtraklinkback": {""},
"title": {""},
"homephone": {""},
"workphone": {""},
"cellphone": {""},
"browser_name": {c.Browser.Name},
"browser_version": {browser_version},
"browser_vendor": {c.Browser.Vendor},
"browser_uuid": {c.Browser.UUID},
"browser_platform": {c.Browser.Platform},
}
if c.AircraftOverhead.FlightNumber != "" {
vals.Add("acid", c.AircraftOverhead.Callsign)
vals.Add("aacode", c.AircraftOverhead.Id2)
vals.Add("tailnumber", c.AircraftOverhead.Registration)
//vals.Add("adflag", "??") // Operation type (A, D or O for Arr, Dept or Overflight)
//vals.Add("beacon", "??") // Squawk SSR code (eg 2100)
}
return vals
}
// }}}
// {{{ PostComplaint
// https://complaints-staging.bksv.com/sfo2?json=1&resp=json
// {"result":"1",
// "title":"Complaint Received",
// "body":"Thank you. We have received your complaint."}
func PostComplaint(client *http.Client, c complaintdb.Complaint) (*complaintdb.Submission, error) {
// Initialize a new submission object, inheriting from previous
s := complaintdb.Submission{
Attempts: c.Submission.Attempts + 1,
Log: c.Submission.Log+fmt.Sprintf("\n--------=={ PostComplaint @ %s }==-\n", time.Now()),
Key: c.Submission.Key, // We're now keyless, should prob strip this out
T: time.Now().UTC(),
Outcome: complaintdb.SubmissionFailed, // Be pessimistic right up until the end
}
// We used to have to fetch a unique key (which lived in the form),
// that we'd need to submit with the rest of the complaint; that
// prevented dupes on their end. But the new version skips that
// requirement for API based submissions like ours, so we're
// keyless now.
s.Log += fmt.Sprintf("----{ time: %s }----\n --{ keyless submission }--\n", s.T)
vals := PopulateForm(c, "")
s.Log += "Submitting these vals:-\n"
for k,v := range vals { s.Log += fmt.Sprintf(" * %-20.20s: %v\n", k, v) }
s.Log += "\n"
// resp,err := client.PostForm("https://"+bksvHost+bksvPath, vals)
req,_ := http.NewRequest("POST", "https://"+bksvHost+bksvPath, strings.NewReader(vals.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded") // This is important
reqBytes,_ := httputil.DumpRequestOut(req,true)
s.Log += "Full req to ["+bksvHost+"]:-\n--\n"+string(reqBytes)+"\n--\n\n"
resp,err := client.Do(req)
s.D = time.Since(s.T)
if err != nil {
if strings.Contains(err.Error(), "DEADLINE_EXCEEDED") {
s.Outcome = complaintdb.SubmissionTimeout
}
s.Log += fmt.Sprintf("ComplaintPOST: Posting error (dur=%s): %v\n", s.D, err)
return &s,err
}
respBytes,_ := httputil.DumpResponse(resp,true)
s.Log += "Full resp:-\n--\n"+string(respBytes)+"\n--\n\n"
defer resp.Body.Close()
body,_ := ioutil.ReadAll(resp.Body)
s.Log += fmt.Sprintf("ComplaintPOST: HTTP response '%s'\n", resp.Status)
s.Response = []byte(body)
if resp.StatusCode >= 400 {
s.Log += fmt.Sprintf("ComplaintPOST: HTTP Body:-\n%s\n--\n", body)
return &s,fmt.Errorf("ComplaintPOST: HTTP err %s", resp.Status)
}
var jsonMap map[string]interface{}
if err := json.Unmarshal([]byte(body), &jsonMap); err != nil {
s.Log += fmt.Sprintf("ComplaintPOST: JSON unmarshal '%v'\nBody:-\n%s\n--\n", err, body)
return &s,fmt.Errorf("ComplaintPOST: JSON unmarshal %v", err)
/* A few times, the remote site failed to send JSON responses, and sent HTML instead. This
* will work in that case.
if !regexp.MustCompile(`(?i:received your complaint)`).MatchString(string(body)) {
debug += fmt.Sprintf("BKSV body ...\n%s\n------\n", string(body))
return debug,fmt.Errorf("Returned response did not say 'received your complaint'")
} else {
debug += "Success !\n"+string(body)
}
*/
}
indentedBytes,_ := json.MarshalIndent(jsonMap, "", " ")
s.Log += "\n-- JsonMap:-\n"+string(indentedBytes)+"\n--\n"
/* on success ...
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"receipt_key": "adasdsdadsdasds786dsa87d6as87d6as",
"result": "1",
"title": "Submission Received"
}
Or more recently,
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"complaint_receipt_keys": [
"b85409b1152840d6d149e721cfda6eb639b05979"
],
"receipt_key": null,
"result": "1",
"title": "Submission Received"
}
*/
v := jsonMap["result"];
if v == nil {
s.Log += fmt.Sprintf("ComplaintPOST: json no 'result'\n")
return &s,fmt.Errorf("ComplaintPOST: jsonmap had no 'result'")
}
result := v.(string)
if result != "1" {
s.Outcome = complaintdb.SubmissionRejected
s.Log += fmt.Sprintf("Json result not '1'\n")
return &s,fmt.Errorf("ComplaintPOST: result='%s'", result)
}
// Extract the foreign key for this complaint
found := false
if v = jsonMap["receipt_key"]; v != nil {
s.Key = fmt.Sprintf("%.0f", jsonMap["receipt_key"].(string))
s.Log += "Json Success !\n"
s.Outcome = complaintdb.SubmissionAccepted
found = true
} else if r := jsonMap["complaint_receipt_keys"]; r != nil {
if v, isSlice := r.([]interface{}); isSlice {
if len(v) > 0 {
s.Key = fmt.Sprintf("%.0f", v[0].(string))
s.Log += "Json Success !\n"
s.Outcome = complaintdb.SubmissionAccepted
found = true
}
}
}
if ! found {
s.Log += fmt.Sprintf("ComplaintPOST: json no 'receipt_key', or 'complaint_receipt_keys[]'\n")
return &s,fmt.Errorf("ComplaintPOST: jsonmap had no 'receipt_key'")
}
return &s,nil
}
// }}}
// {{{ Notes
/* These POST fields were sent by browser, for happy success
nowebtrak:1
submitkey:4aef9c8831919524ec35ae8af8ff25ba
defaulttime:0
webtraklinkback:
title:
name:Adam
surname:Worrall
address1:1 Some Drive
address2:
city:Scotts Valley
state:CA
homephone:
workphone:
cellphone:
email:[email protected]
airports:KSFO
month:10
day:2
year:2015
hour:20
min:16
aircrafttype:Jet
eventtype:Loud noise
comments:Blah
responserequired:N
enquirytype:C
submit:Submit complaint
*/
/* BKSV:
You can call it this way
https://complaints-staging.bksv.com/sfo2?json=1
to get a json object of all the form field definitions we accept. That
will tell you what fields we require to be set and also gives you
handy things like the set of allowed disturbance types, event types
etc. NB: I haven't yet configured it to match the SFO system values
for these but that is a simple config change I can do as soon as I
have the information.
{
"airports": "{ \"KSFO\": \"San Francisco International Airport (SFO)\" , \"KSAN\": \"San Diego International Airport (SAN)\", \"KOAK\": \"Oakland International Airport (OAK)\", \"KSJC\": \"Mineta San José International Airport (SJC)\" }",
"locale": "en_AU",
"displayAreaCodes": "0",
"submitKey": "797eaa0e960b5e8848ce6785950dfd3c",
"hours": [
"12 AM",
"1 AM",
"2 AM",
"3 AM",
"4 AM",
"5 AM",
"6 AM",
"7 AM",
"8 AM",
"9 AM",
"10 AM",
"11 AM",
"12 PM",
"1 PM",
"2 PM",
"3 PM",
"4 PM",
"5 PM",
"6 PM",
"7 PM",
"8 PM",
"9 PM",
"10 PM",
"11 PM"
],
"atLeastOneContact": true,
"field_defs": {
"address2": {
"maxlength": 124,
"required": false,
"scope": "profile",
"type": "text",
"label": "Address (line 2)"
},
"webtrak": {
"maxlength": 0,
"required": false,
"scope": "ignore",
"type": "ignore",
"label": "Information from WebTrak"
},
"email": {
"maxlength": 64,
"required": false,
"scope": "profile",
"type": "email",
"label": "Email"
},
"text2": {
"maxlength": 0,
"required": false,
"scope": "about",
"type": "content",
"label": ""
},
"state": {
"maxlength": 100,
"required": true,
"scope": "profile",
"type": "list",
"label": "State"
},
"responserequired": {
"maxlength": 0,
"required": true,
"scope": "profile",
"type": "boolean",
"label": "Would you like to be contacted by one of our staff?"
},
"enquirytype": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "list",
"label": "Enquiry type"
},
"time": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "datetime",
"label": "Disturbance time"
},
"workphone": {
"maxlength": 62,
"required": false,
"scope": "profile",
"type": "tel",
"label": "Work phone"
},
"airports": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "list",
"label": "Airport"
},
"contact": {
"maxlength": 0,
"required": false,
"scope": "ignore",
"type": "ignore",
"label": "Contact number"
},
"date": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "datetime",
"label": "Disturbance date"
},
"text1": {
"maxlength": 0,
"required": false,
"scope": "about",
"type": "content",
"label": ""
},
"eventtype": {
"maxlength": 0,
"required": false,
"scope": "complaint",
"type": "list",
"label": "Disturbance type"
},
"name": {
"maxlength": 62,
"required": true,
"scope": "profile",
"type": "text",
"label": "First name"
},
"city": {
"maxlength": 46,
"required": true,
"scope": "profile",
"type": "text",
"label": "City"
},
"address1": {
"maxlength": 124,
"required": true,
"scope": "profile",
"type": "text",
"label": "Address"
},
"cellphone": {
"maxlength": 62,
"required": false,
"scope": "profile",
"type": "tel",
"label": "Mobile phone"
},
"aircrafttype": {
"maxlength": 0,
"required": false,
"scope": "complaint",
"type": "list",
"label": "Aircraft type"
},
"comments": {
"maxlength": 10000,
"required": false,
"scope": "complaint",
"type": "textarea",
"label": "Please give details"
},
"title": {
"maxlength": 30,
"required": false,
"scope": "profile",
"type": "list",
"label": "Title"
},
"surname": {
"maxlength": 62,
"required": true,
"scope": "profile",
"type": "text",
"label": "Last name"
},
"homephone": {
"maxlength": 62,
"required": false,
"scope": "profile",
"type": "tel",
"label": "Home phone"
}
},
"years": {
"2015": "2015",
"2014": 2014
},
"dateFormat": [
"month",
"day",
"year"
],
"strings": {
"months/short/5": "Jun",
"labels/month": "Month",
"complaintsform/lists/acTypes": "Jet,Propeller,Helicopter,Various,Unknown",
"months/short/3": "Apr",
"complaintsform/lists/activity_types": "Indoors,Outdoors,Watching TV,Sleeping,Working,Other",
"labels/hour": "Hour",
"labels/year": "Year",
"months/short/4": "May",
"months/short/9": "Oct",
"months/short/2": "Mar",
"complaintsform/app/complaintReceived": "Complaint received!",
"complaintsform/lists/event_types": "Loud noise,Overflight,Low flying,Early turn,Go-around,Too frequent,Helicopter operations,Engine run-up,Ground noise,Other",
"complaintsform/blocks/submitComplaint": "Submit complaint",
"months/short/7": "Aug",
"complaintsform/blocks/pleaseFillIn": "Please fill in",
"timeOfDay/1": "PM",
"complaintsform/blocks/tooShort": "Value is too short",
"complaintsform/lists/acModes_internal": "",
"complaintsform/blocks/required": "(required)",
"months/short/8": "Sep",
"complaintsform/lists/acModes": "Arrival,Departure,Overflight,Unknown",
"labels/minute": "Min",
"timeOfDay/0": "AM",
"months/short/6": "Jul",
"complaintsform/lists/acTypes_internal": "",
"labels/yes": "Yes",
"months/short/10": "Nov",
"months/short/1": "Feb",
"complaintsform/lists/titles": "Mr,Mrs,Miss,Ms,Dr",
"complaintsform/lists/contact_method": "Letter,Email,Telephone",
"labels/no": "No",
"complaintsform/blocks/errors": "There are some problems. Please correct the mistakes and submit the form again.",
"labels/day": "Day",
"months/short/0": "Jan",
"lists/state": "CA,AZ",
"months/short/11": "Dec"
},
"fields": [
"text1",
"title",
"name",
"surname",
"address1",
"address2",
"city",
"state",
"contact",
"airports",
"text2",
"date",
"time",
"webtrak",
"aircrafttype",
"eventtype",
"comments",
"responserequired",
"enquirytype",
"homephone",
"workphone",
"cellphone",
"email"
]
}
*/
// }}}
// {{{ -------------------------={ E N D }=----------------------------------
// Local variables:
// folded-file: t
// end:
// }}}
| PopulateForm | identifier_name |
bksv.go | package bksv
// Package for posting a {ComplainerProfile,Complaint} to BKSV's web form
// Bug 1: Edits to profile should call to maps to reparse the address; ignore what's in the form fields.
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"time"
"github.com/skypies/util/date"
"github.com/skypies/complaints/pkg/complaintdb"
"github.com/skypies/complaints/pkg/config"
)
// "Real" one : https://viewpoint.emsbk.com/sfo5
// Temp one : https://viewpoint.emsbk.com/sfo12
// Other Temp one : https://viewpoint.emsbk.com/sfo50
//https://viewpoint.emsbk.com/<sitename>?response=json
//const bksvHost = "complaints-us.emsbk.com"
const bksvHost = "viewpoint.emsbk.com"
const bksvPath = "/sfo5" + "?response=json" // response *must* be a GET param, not POST
// {{{ PopulateForm
func PopulateForm(c complaintdb.Complaint, submitkey string) url.Values {
first,last := c.Profile.SplitName()
if c.Activity == "" { c.Activity = "Loud noise" }
address1 := ""
addr := c.Profile.GetStructuredAddress()
if addr.Street == "" {
address1 = c.Profile.Address // default to the raw string, if we don't have a structured one
} else if addr.Number == "" {
address1 = addr.Street
} else {
address1 = addr.Number + " " + addr.Street
}
browser_version := c.Browser.Version
if (len(browser_version) > 50) {
browser_version = browser_version[0:49]
}
getLoudVal := func(in int) string {
loudVals := map[int]string{1: "Loud", 2:"Very Loud", 3:"Excessively Loud"}
if val, exists := loudVals[in]; exists {
return val
}
return "Loud"
}
vals := url.Values{
"response": {"json"}, // Must always set this as a GET param
"contactmethod": {"App"},
"apiKey": {config.Get("bksv.apiKey")},
"accept_privacy": {"Y"},
"caller_code": {c.Profile.CallerCode},
"name": {first},
"surname": {last},
"address1": {address1},
"address2": {""},
"zipcode": {addr.Zip},
"city": {addr.City},
"state": {addr.State},
"email": {c.Profile.EmailAddress},
"airports": {"KSFO"}, // KOAK, KSJC, KSAN
"month": {date.InPdt(c.Timestamp).Format("1")},
"day": {date.InPdt(c.Timestamp).Format("2")},
"year": {date.InPdt(c.Timestamp).Format("2006")},
"hour": {date.InPdt(c.Timestamp).Format("15")},
"min": {date.InPdt(c.Timestamp).Format("4")},
"sec": {date.InPdt(c.Timestamp).Format("5")},
"aircrafttype": {"J"},
"aircraftcategory": {"J"},
"activity_type": {"Other"}, // perhaps map c.Activity to something ?
"event_type": {getLoudVal(c.Loudness)}, // as per 2023.03.16
"adflag": {"U"},
"comments": {c.Description},
"responserequired": {"N"},
"enquirytype": {"C"},
"submit": {"Submit complaint"},
//"submitkey": {submitkey},
"nowebtrak": {"1"},
"defaulttime": {"0"},
"webtraklinkback": {""},
"title": {""},
"homephone": {""},
"workphone": {""},
"cellphone": {""},
"browser_name": {c.Browser.Name},
"browser_version": {browser_version},
"browser_vendor": {c.Browser.Vendor},
"browser_uuid": {c.Browser.UUID},
"browser_platform": {c.Browser.Platform},
}
if c.AircraftOverhead.FlightNumber != "" {
vals.Add("acid", c.AircraftOverhead.Callsign)
vals.Add("aacode", c.AircraftOverhead.Id2)
vals.Add("tailnumber", c.AircraftOverhead.Registration)
//vals.Add("adflag", "??") // Operation type (A, D or O for Arr, Dept or Overflight)
//vals.Add("beacon", "??") // Squawk SSR code (eg 2100)
}
return vals
}
// }}}
// {{{ PostComplaint
// https://complaints-staging.bksv.com/sfo2?json=1&resp=json
// {"result":"1",
// "title":"Complaint Received",
// "body":"Thank you. We have received your complaint."}
func PostComplaint(client *http.Client, c complaintdb.Complaint) (*complaintdb.Submission, error) {
// Initialize a new submission object, inheriting from previous
s := complaintdb.Submission{
Attempts: c.Submission.Attempts + 1,
Log: c.Submission.Log+fmt.Sprintf("\n--------=={ PostComplaint @ %s }==-\n", time.Now()),
Key: c.Submission.Key, // We're now keyless, should prob strip this out
T: time.Now().UTC(),
Outcome: complaintdb.SubmissionFailed, // Be pessimistic right up until the end
}
// We used to have to fetch a unique key (which lived in the form),
// that we'd need to submit with the rest of the complaint; that
// prevented dupes on their end. But the new version skips that
// requirement for API based submissions like ours, so we're
// keyless now.
s.Log += fmt.Sprintf("----{ time: %s }----\n --{ keyless submission }--\n", s.T)
vals := PopulateForm(c, "")
s.Log += "Submitting these vals:-\n"
for k,v := range vals { s.Log += fmt.Sprintf(" * %-20.20s: %v\n", k, v) }
s.Log += "\n"
// resp,err := client.PostForm("https://"+bksvHost+bksvPath, vals)
req,_ := http.NewRequest("POST", "https://"+bksvHost+bksvPath, strings.NewReader(vals.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded") // This is important
reqBytes,_ := httputil.DumpRequestOut(req,true)
s.Log += "Full req to ["+bksvHost+"]:-\n--\n"+string(reqBytes)+"\n--\n\n"
resp,err := client.Do(req)
s.D = time.Since(s.T)
if err != nil {
if strings.Contains(err.Error(), "DEADLINE_EXCEEDED") {
s.Outcome = complaintdb.SubmissionTimeout
}
s.Log += fmt.Sprintf("ComplaintPOST: Posting error (dur=%s): %v\n", s.D, err)
return &s,err
}
respBytes,_ := httputil.DumpResponse(resp,true)
s.Log += "Full resp:-\n--\n"+string(respBytes)+"\n--\n\n"
defer resp.Body.Close()
body,_ := ioutil.ReadAll(resp.Body)
s.Log += fmt.Sprintf("ComplaintPOST: HTTP response '%s'\n", resp.Status)
s.Response = []byte(body)
if resp.StatusCode >= 400 {
s.Log += fmt.Sprintf("ComplaintPOST: HTTP Body:-\n%s\n--\n", body)
return &s,fmt.Errorf("ComplaintPOST: HTTP err %s", resp.Status)
}
var jsonMap map[string]interface{}
if err := json.Unmarshal([]byte(body), &jsonMap); err != nil {
s.Log += fmt.Sprintf("ComplaintPOST: JSON unmarshal '%v'\nBody:-\n%s\n--\n", err, body)
return &s,fmt.Errorf("ComplaintPOST: JSON unmarshal %v", err)
/* A few times, the remote site failed to send JSON responses, and sent HTML instead. This
* will work in that case.
if !regexp.MustCompile(`(?i:received your complaint)`).MatchString(string(body)) {
debug += fmt.Sprintf("BKSV body ...\n%s\n------\n", string(body)) | }
indentedBytes,_ := json.MarshalIndent(jsonMap, "", " ")
s.Log += "\n-- JsonMap:-\n"+string(indentedBytes)+"\n--\n"
/* on success ...
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"receipt_key": "adasdsdadsdasds786dsa87d6as87d6as",
"result": "1",
"title": "Submission Received"
}
Or more recently,
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"complaint_receipt_keys": [
"b85409b1152840d6d149e721cfda6eb639b05979"
],
"receipt_key": null,
"result": "1",
"title": "Submission Received"
}
*/
v := jsonMap["result"];
if v == nil {
s.Log += fmt.Sprintf("ComplaintPOST: json no 'result'\n")
return &s,fmt.Errorf("ComplaintPOST: jsonmap had no 'result'")
}
result := v.(string)
if result != "1" {
s.Outcome = complaintdb.SubmissionRejected
s.Log += fmt.Sprintf("Json result not '1'\n")
return &s,fmt.Errorf("ComplaintPOST: result='%s'", result)
}
// Extract the foreign key for this complaint
found := false
if v = jsonMap["receipt_key"]; v != nil {
s.Key = fmt.Sprintf("%.0f", jsonMap["receipt_key"].(string))
s.Log += "Json Success !\n"
s.Outcome = complaintdb.SubmissionAccepted
found = true
} else if r := jsonMap["complaint_receipt_keys"]; r != nil {
if v, isSlice := r.([]interface{}); isSlice {
if len(v) > 0 {
s.Key = fmt.Sprintf("%.0f", v[0].(string))
s.Log += "Json Success !\n"
s.Outcome = complaintdb.SubmissionAccepted
found = true
}
}
}
if ! found {
s.Log += fmt.Sprintf("ComplaintPOST: json no 'receipt_key', or 'complaint_receipt_keys[]'\n")
return &s,fmt.Errorf("ComplaintPOST: jsonmap had no 'receipt_key'")
}
return &s,nil
}
// }}}
// {{{ Notes
/* These POST fields were sent by browser, for happy success
nowebtrak:1
submitkey:4aef9c8831919524ec35ae8af8ff25ba
defaulttime:0
webtraklinkback:
title:
name:Adam
surname:Worrall
address1:1 Some Drive
address2:
city:Scotts Valley
state:CA
homephone:
workphone:
cellphone:
email:[email protected]
airports:KSFO
month:10
day:2
year:2015
hour:20
min:16
aircrafttype:Jet
eventtype:Loud noise
comments:Blah
responserequired:N
enquirytype:C
submit:Submit complaint
*/
/* BKSV:
You can call it this way
https://complaints-staging.bksv.com/sfo2?json=1
to get a json object of all the form field definitions we accept. That
will tell you what fields we require to be set and also gives you
handy things like the set of allowed disturbance types, event types
etc. NB: I haven't yet configured it to match the SFO system values
for these but that is a simple config change I can do as soon as I
have the information.
{
"airports": "{ \"KSFO\": \"San Francisco International Airport (SFO)\" , \"KSAN\": \"San Diego International Airport (SAN)\", \"KOAK\": \"Oakland International Airport (OAK)\", \"KSJC\": \"Mineta San José International Airport (SJC)\" }",
"locale": "en_AU",
"displayAreaCodes": "0",
"submitKey": "797eaa0e960b5e8848ce6785950dfd3c",
"hours": [
"12 AM",
"1 AM",
"2 AM",
"3 AM",
"4 AM",
"5 AM",
"6 AM",
"7 AM",
"8 AM",
"9 AM",
"10 AM",
"11 AM",
"12 PM",
"1 PM",
"2 PM",
"3 PM",
"4 PM",
"5 PM",
"6 PM",
"7 PM",
"8 PM",
"9 PM",
"10 PM",
"11 PM"
],
"atLeastOneContact": true,
"field_defs": {
"address2": {
"maxlength": 124,
"required": false,
"scope": "profile",
"type": "text",
"label": "Address (line 2)"
},
"webtrak": {
"maxlength": 0,
"required": false,
"scope": "ignore",
"type": "ignore",
"label": "Information from WebTrak"
},
"email": {
"maxlength": 64,
"required": false,
"scope": "profile",
"type": "email",
"label": "Email"
},
"text2": {
"maxlength": 0,
"required": false,
"scope": "about",
"type": "content",
"label": ""
},
"state": {
"maxlength": 100,
"required": true,
"scope": "profile",
"type": "list",
"label": "State"
},
"responserequired": {
"maxlength": 0,
"required": true,
"scope": "profile",
"type": "boolean",
"label": "Would you like to be contacted by one of our staff?"
},
"enquirytype": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "list",
"label": "Enquiry type"
},
"time": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "datetime",
"label": "Disturbance time"
},
"workphone": {
"maxlength": 62,
"required": false,
"scope": "profile",
"type": "tel",
"label": "Work phone"
},
"airports": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "list",
"label": "Airport"
},
"contact": {
"maxlength": 0,
"required": false,
"scope": "ignore",
"type": "ignore",
"label": "Contact number"
},
"date": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "datetime",
"label": "Disturbance date"
},
"text1": {
"maxlength": 0,
"required": false,
"scope": "about",
"type": "content",
"label": ""
},
"eventtype": {
"maxlength": 0,
"required": false,
"scope": "complaint",
"type": "list",
"label": "Disturbance type"
},
"name": {
"maxlength": 62,
"required": true,
"scope": "profile",
"type": "text",
"label": "First name"
},
"city": {
"maxlength": 46,
"required": true,
"scope": "profile",
"type": "text",
"label": "City"
},
"address1": {
"maxlength": 124,
"required": true,
"scope": "profile",
"type": "text",
"label": "Address"
},
"cellphone": {
"maxlength": 62,
"required": false,
"scope": "profile",
"type": "tel",
"label": "Mobile phone"
},
"aircrafttype": {
"maxlength": 0,
"required": false,
"scope": "complaint",
"type": "list",
"label": "Aircraft type"
},
"comments": {
"maxlength": 10000,
"required": false,
"scope": "complaint",
"type": "textarea",
"label": "Please give details"
},
"title": {
"maxlength": 30,
"required": false,
"scope": "profile",
"type": "list",
"label": "Title"
},
"surname": {
"maxlength": 62,
"required": true,
"scope": "profile",
"type": "text",
"label": "Last name"
},
"homephone": {
"maxlength": 62,
"required": false,
"scope": "profile",
"type": "tel",
"label": "Home phone"
}
},
"years": {
"2015": "2015",
"2014": 2014
},
"dateFormat": [
"month",
"day",
"year"
],
"strings": {
"months/short/5": "Jun",
"labels/month": "Month",
"complaintsform/lists/acTypes": "Jet,Propeller,Helicopter,Various,Unknown",
"months/short/3": "Apr",
"complaintsform/lists/activity_types": "Indoors,Outdoors,Watching TV,Sleeping,Working,Other",
"labels/hour": "Hour",
"labels/year": "Year",
"months/short/4": "May",
"months/short/9": "Oct",
"months/short/2": "Mar",
"complaintsform/app/complaintReceived": "Complaint received!",
"complaintsform/lists/event_types": "Loud noise,Overflight,Low flying,Early turn,Go-around,Too frequent,Helicopter operations,Engine run-up,Ground noise,Other",
"complaintsform/blocks/submitComplaint": "Submit complaint",
"months/short/7": "Aug",
"complaintsform/blocks/pleaseFillIn": "Please fill in",
"timeOfDay/1": "PM",
"complaintsform/blocks/tooShort": "Value is too short",
"complaintsform/lists/acModes_internal": "",
"complaintsform/blocks/required": "(required)",
"months/short/8": "Sep",
"complaintsform/lists/acModes": "Arrival,Departure,Overflight,Unknown",
"labels/minute": "Min",
"timeOfDay/0": "AM",
"months/short/6": "Jul",
"complaintsform/lists/acTypes_internal": "",
"labels/yes": "Yes",
"months/short/10": "Nov",
"months/short/1": "Feb",
"complaintsform/lists/titles": "Mr,Mrs,Miss,Ms,Dr",
"complaintsform/lists/contact_method": "Letter,Email,Telephone",
"labels/no": "No",
"complaintsform/blocks/errors": "There are some problems. Please correct the mistakes and submit the form again.",
"labels/day": "Day",
"months/short/0": "Jan",
"lists/state": "CA,AZ",
"months/short/11": "Dec"
},
"fields": [
"text1",
"title",
"name",
"surname",
"address1",
"address2",
"city",
"state",
"contact",
"airports",
"text2",
"date",
"time",
"webtrak",
"aircrafttype",
"eventtype",
"comments",
"responserequired",
"enquirytype",
"homephone",
"workphone",
"cellphone",
"email"
]
}
*/
// }}}
// {{{ -------------------------={ E N D }=----------------------------------
// Local variables:
// folded-file: t
// end:
// }}} | return debug,fmt.Errorf("Returned response did not say 'received your complaint'")
} else {
debug += "Success !\n"+string(body)
}
*/ | random_line_split |
bksv.go | package bksv
// Package for posting a {ComplainerProfile,Complaint} to BKSV's web form
// Bug 1: Edits to profile should call to maps to reparse the address; ignore what's in the form fields.
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httputil"
"net/url"
"strings"
"time"
"github.com/skypies/util/date"
"github.com/skypies/complaints/pkg/complaintdb"
"github.com/skypies/complaints/pkg/config"
)
// "Real" one : https://viewpoint.emsbk.com/sfo5
// Temp one : https://viewpoint.emsbk.com/sfo12
// Other Temp one : https://viewpoint.emsbk.com/sfo50
//https://viewpoint.emsbk.com/<sitename>?response=json
//const bksvHost = "complaints-us.emsbk.com"
const bksvHost = "viewpoint.emsbk.com"
const bksvPath = "/sfo5" + "?response=json" // response *must* be a GET param, not POST
// {{{ PopulateForm
func PopulateForm(c complaintdb.Complaint, submitkey string) url.Values {
first,last := c.Profile.SplitName()
if c.Activity == "" { c.Activity = "Loud noise" }
address1 := ""
addr := c.Profile.GetStructuredAddress()
if addr.Street == "" {
address1 = c.Profile.Address // default to the raw string, if we don't have a structured one
} else if addr.Number == "" {
address1 = addr.Street
} else {
address1 = addr.Number + " " + addr.Street
}
browser_version := c.Browser.Version
if (len(browser_version) > 50) {
browser_version = browser_version[0:49]
}
getLoudVal := func(in int) string {
loudVals := map[int]string{1: "Loud", 2:"Very Loud", 3:"Excessively Loud"}
if val, exists := loudVals[in]; exists {
return val
}
return "Loud"
}
vals := url.Values{
"response": {"json"}, // Must always set this as a GET param
"contactmethod": {"App"},
"apiKey": {config.Get("bksv.apiKey")},
"accept_privacy": {"Y"},
"caller_code": {c.Profile.CallerCode},
"name": {first},
"surname": {last},
"address1": {address1},
"address2": {""},
"zipcode": {addr.Zip},
"city": {addr.City},
"state": {addr.State},
"email": {c.Profile.EmailAddress},
"airports": {"KSFO"}, // KOAK, KSJC, KSAN
"month": {date.InPdt(c.Timestamp).Format("1")},
"day": {date.InPdt(c.Timestamp).Format("2")},
"year": {date.InPdt(c.Timestamp).Format("2006")},
"hour": {date.InPdt(c.Timestamp).Format("15")},
"min": {date.InPdt(c.Timestamp).Format("4")},
"sec": {date.InPdt(c.Timestamp).Format("5")},
"aircrafttype": {"J"},
"aircraftcategory": {"J"},
"activity_type": {"Other"}, // perhaps map c.Activity to something ?
"event_type": {getLoudVal(c.Loudness)}, // as per 2023.03.16
"adflag": {"U"},
"comments": {c.Description},
"responserequired": {"N"},
"enquirytype": {"C"},
"submit": {"Submit complaint"},
//"submitkey": {submitkey},
"nowebtrak": {"1"},
"defaulttime": {"0"},
"webtraklinkback": {""},
"title": {""},
"homephone": {""},
"workphone": {""},
"cellphone": {""},
"browser_name": {c.Browser.Name},
"browser_version": {browser_version},
"browser_vendor": {c.Browser.Vendor},
"browser_uuid": {c.Browser.UUID},
"browser_platform": {c.Browser.Platform},
}
if c.AircraftOverhead.FlightNumber != "" {
vals.Add("acid", c.AircraftOverhead.Callsign)
vals.Add("aacode", c.AircraftOverhead.Id2)
vals.Add("tailnumber", c.AircraftOverhead.Registration)
//vals.Add("adflag", "??") // Operation type (A, D or O for Arr, Dept or Overflight)
//vals.Add("beacon", "??") // Squawk SSR code (eg 2100)
}
return vals
}
// }}}
// {{{ PostComplaint
// https://complaints-staging.bksv.com/sfo2?json=1&resp=json
// {"result":"1",
// "title":"Complaint Received",
// "body":"Thank you. We have received your complaint."}
func PostComplaint(client *http.Client, c complaintdb.Complaint) (*complaintdb.Submission, error) |
// }}}
// {{{ Notes
/* These POST fields were sent by browser, for happy success
nowebtrak:1
submitkey:4aef9c8831919524ec35ae8af8ff25ba
defaulttime:0
webtraklinkback:
title:
name:Adam
surname:Worrall
address1:1 Some Drive
address2:
city:Scotts Valley
state:CA
homephone:
workphone:
cellphone:
email:[email protected]
airports:KSFO
month:10
day:2
year:2015
hour:20
min:16
aircrafttype:Jet
eventtype:Loud noise
comments:Blah
responserequired:N
enquirytype:C
submit:Submit complaint
*/
/* BKSV:
You can call it this way
https://complaints-staging.bksv.com/sfo2?json=1
to get a json object of all the form field definitions we accept. That
will tell you what fields we require to be set and also gives you
handy things like the set of allowed disturbance types, event types
etc. NB: I haven't yet configured it to match the SFO system values
for these but that is a simple config change I can do as soon as I
have the information.
{
"airports": "{ \"KSFO\": \"San Francisco International Airport (SFO)\" , \"KSAN\": \"San Diego International Airport (SAN)\", \"KOAK\": \"Oakland International Airport (OAK)\", \"KSJC\": \"Mineta San José International Airport (SJC)\" }",
"locale": "en_AU",
"displayAreaCodes": "0",
"submitKey": "797eaa0e960b5e8848ce6785950dfd3c",
"hours": [
"12 AM",
"1 AM",
"2 AM",
"3 AM",
"4 AM",
"5 AM",
"6 AM",
"7 AM",
"8 AM",
"9 AM",
"10 AM",
"11 AM",
"12 PM",
"1 PM",
"2 PM",
"3 PM",
"4 PM",
"5 PM",
"6 PM",
"7 PM",
"8 PM",
"9 PM",
"10 PM",
"11 PM"
],
"atLeastOneContact": true,
"field_defs": {
"address2": {
"maxlength": 124,
"required": false,
"scope": "profile",
"type": "text",
"label": "Address (line 2)"
},
"webtrak": {
"maxlength": 0,
"required": false,
"scope": "ignore",
"type": "ignore",
"label": "Information from WebTrak"
},
"email": {
"maxlength": 64,
"required": false,
"scope": "profile",
"type": "email",
"label": "Email"
},
"text2": {
"maxlength": 0,
"required": false,
"scope": "about",
"type": "content",
"label": ""
},
"state": {
"maxlength": 100,
"required": true,
"scope": "profile",
"type": "list",
"label": "State"
},
"responserequired": {
"maxlength": 0,
"required": true,
"scope": "profile",
"type": "boolean",
"label": "Would you like to be contacted by one of our staff?"
},
"enquirytype": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "list",
"label": "Enquiry type"
},
"time": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "datetime",
"label": "Disturbance time"
},
"workphone": {
"maxlength": 62,
"required": false,
"scope": "profile",
"type": "tel",
"label": "Work phone"
},
"airports": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "list",
"label": "Airport"
},
"contact": {
"maxlength": 0,
"required": false,
"scope": "ignore",
"type": "ignore",
"label": "Contact number"
},
"date": {
"maxlength": 0,
"required": true,
"scope": "complaint",
"type": "datetime",
"label": "Disturbance date"
},
"text1": {
"maxlength": 0,
"required": false,
"scope": "about",
"type": "content",
"label": ""
},
"eventtype": {
"maxlength": 0,
"required": false,
"scope": "complaint",
"type": "list",
"label": "Disturbance type"
},
"name": {
"maxlength": 62,
"required": true,
"scope": "profile",
"type": "text",
"label": "First name"
},
"city": {
"maxlength": 46,
"required": true,
"scope": "profile",
"type": "text",
"label": "City"
},
"address1": {
"maxlength": 124,
"required": true,
"scope": "profile",
"type": "text",
"label": "Address"
},
"cellphone": {
"maxlength": 62,
"required": false,
"scope": "profile",
"type": "tel",
"label": "Mobile phone"
},
"aircrafttype": {
"maxlength": 0,
"required": false,
"scope": "complaint",
"type": "list",
"label": "Aircraft type"
},
"comments": {
"maxlength": 10000,
"required": false,
"scope": "complaint",
"type": "textarea",
"label": "Please give details"
},
"title": {
"maxlength": 30,
"required": false,
"scope": "profile",
"type": "list",
"label": "Title"
},
"surname": {
"maxlength": 62,
"required": true,
"scope": "profile",
"type": "text",
"label": "Last name"
},
"homephone": {
"maxlength": 62,
"required": false,
"scope": "profile",
"type": "tel",
"label": "Home phone"
}
},
"years": {
"2015": "2015",
"2014": 2014
},
"dateFormat": [
"month",
"day",
"year"
],
"strings": {
"months/short/5": "Jun",
"labels/month": "Month",
"complaintsform/lists/acTypes": "Jet,Propeller,Helicopter,Various,Unknown",
"months/short/3": "Apr",
"complaintsform/lists/activity_types": "Indoors,Outdoors,Watching TV,Sleeping,Working,Other",
"labels/hour": "Hour",
"labels/year": "Year",
"months/short/4": "May",
"months/short/9": "Oct",
"months/short/2": "Mar",
"complaintsform/app/complaintReceived": "Complaint received!",
"complaintsform/lists/event_types": "Loud noise,Overflight,Low flying,Early turn,Go-around,Too frequent,Helicopter operations,Engine run-up,Ground noise,Other",
"complaintsform/blocks/submitComplaint": "Submit complaint",
"months/short/7": "Aug",
"complaintsform/blocks/pleaseFillIn": "Please fill in",
"timeOfDay/1": "PM",
"complaintsform/blocks/tooShort": "Value is too short",
"complaintsform/lists/acModes_internal": "",
"complaintsform/blocks/required": "(required)",
"months/short/8": "Sep",
"complaintsform/lists/acModes": "Arrival,Departure,Overflight,Unknown",
"labels/minute": "Min",
"timeOfDay/0": "AM",
"months/short/6": "Jul",
"complaintsform/lists/acTypes_internal": "",
"labels/yes": "Yes",
"months/short/10": "Nov",
"months/short/1": "Feb",
"complaintsform/lists/titles": "Mr,Mrs,Miss,Ms,Dr",
"complaintsform/lists/contact_method": "Letter,Email,Telephone",
"labels/no": "No",
"complaintsform/blocks/errors": "There are some problems. Please correct the mistakes and submit the form again.",
"labels/day": "Day",
"months/short/0": "Jan",
"lists/state": "CA,AZ",
"months/short/11": "Dec"
},
"fields": [
"text1",
"title",
"name",
"surname",
"address1",
"address2",
"city",
"state",
"contact",
"airports",
"text2",
"date",
"time",
"webtrak",
"aircrafttype",
"eventtype",
"comments",
"responserequired",
"enquirytype",
"homephone",
"workphone",
"cellphone",
"email"
]
}
*/
// }}}
// {{{ -------------------------={ E N D }=----------------------------------
// Local variables:
// folded-file: t
// end:
// }}}
| {
// Initialize a new submission object, inheriting from previous
s := complaintdb.Submission{
Attempts: c.Submission.Attempts + 1,
Log: c.Submission.Log+fmt.Sprintf("\n--------=={ PostComplaint @ %s }==-\n", time.Now()),
Key: c.Submission.Key, // We're now keyless, should prob strip this out
T: time.Now().UTC(),
Outcome: complaintdb.SubmissionFailed, // Be pessimistic right up until the end
}
// We used to have to fetch a unique key (which lived in the form),
// that we'd need to submit with the rest of the complaint; that
// prevented dupes on their end. But the new version skips that
// requirement for API based submissions like ours, so we're
// keyless now.
s.Log += fmt.Sprintf("----{ time: %s }----\n --{ keyless submission }--\n", s.T)
vals := PopulateForm(c, "")
s.Log += "Submitting these vals:-\n"
for k,v := range vals { s.Log += fmt.Sprintf(" * %-20.20s: %v\n", k, v) }
s.Log += "\n"
// resp,err := client.PostForm("https://"+bksvHost+bksvPath, vals)
req,_ := http.NewRequest("POST", "https://"+bksvHost+bksvPath, strings.NewReader(vals.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded") // This is important
reqBytes,_ := httputil.DumpRequestOut(req,true)
s.Log += "Full req to ["+bksvHost+"]:-\n--\n"+string(reqBytes)+"\n--\n\n"
resp,err := client.Do(req)
s.D = time.Since(s.T)
if err != nil {
if strings.Contains(err.Error(), "DEADLINE_EXCEEDED") {
s.Outcome = complaintdb.SubmissionTimeout
}
s.Log += fmt.Sprintf("ComplaintPOST: Posting error (dur=%s): %v\n", s.D, err)
return &s,err
}
respBytes,_ := httputil.DumpResponse(resp,true)
s.Log += "Full resp:-\n--\n"+string(respBytes)+"\n--\n\n"
defer resp.Body.Close()
body,_ := ioutil.ReadAll(resp.Body)
s.Log += fmt.Sprintf("ComplaintPOST: HTTP response '%s'\n", resp.Status)
s.Response = []byte(body)
if resp.StatusCode >= 400 {
s.Log += fmt.Sprintf("ComplaintPOST: HTTP Body:-\n%s\n--\n", body)
return &s,fmt.Errorf("ComplaintPOST: HTTP err %s", resp.Status)
}
var jsonMap map[string]interface{}
if err := json.Unmarshal([]byte(body), &jsonMap); err != nil {
s.Log += fmt.Sprintf("ComplaintPOST: JSON unmarshal '%v'\nBody:-\n%s\n--\n", err, body)
return &s,fmt.Errorf("ComplaintPOST: JSON unmarshal %v", err)
/* A few times, the remote site failed to send JSON responses, and sent HTML instead. This
* will work in that case.
if !regexp.MustCompile(`(?i:received your complaint)`).MatchString(string(body)) {
debug += fmt.Sprintf("BKSV body ...\n%s\n------\n", string(body))
return debug,fmt.Errorf("Returned response did not say 'received your complaint'")
} else {
debug += "Success !\n"+string(body)
}
*/
}
indentedBytes,_ := json.MarshalIndent(jsonMap, "", " ")
s.Log += "\n-- JsonMap:-\n"+string(indentedBytes)+"\n--\n"
/* on success ...
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"receipt_key": "adasdsdadsdasds786dsa87d6as87d6as",
"result": "1",
"title": "Submission Received"
}
Or more recently,
-- JsonMap:-
{
"body": "Thank you, your submission has been received. Would you like to save these details for next time?",
"complaint_receipt_keys": [
"b85409b1152840d6d149e721cfda6eb639b05979"
],
"receipt_key": null,
"result": "1",
"title": "Submission Received"
}
*/
v := jsonMap["result"];
if v == nil {
s.Log += fmt.Sprintf("ComplaintPOST: json no 'result'\n")
return &s,fmt.Errorf("ComplaintPOST: jsonmap had no 'result'")
}
result := v.(string)
if result != "1" {
s.Outcome = complaintdb.SubmissionRejected
s.Log += fmt.Sprintf("Json result not '1'\n")
return &s,fmt.Errorf("ComplaintPOST: result='%s'", result)
}
// Extract the foreign key for this complaint
found := false
if v = jsonMap["receipt_key"]; v != nil {
s.Key = fmt.Sprintf("%.0f", jsonMap["receipt_key"].(string))
s.Log += "Json Success !\n"
s.Outcome = complaintdb.SubmissionAccepted
found = true
} else if r := jsonMap["complaint_receipt_keys"]; r != nil {
if v, isSlice := r.([]interface{}); isSlice {
if len(v) > 0 {
s.Key = fmt.Sprintf("%.0f", v[0].(string))
s.Log += "Json Success !\n"
s.Outcome = complaintdb.SubmissionAccepted
found = true
}
}
}
if ! found {
s.Log += fmt.Sprintf("ComplaintPOST: json no 'receipt_key', or 'complaint_receipt_keys[]'\n")
return &s,fmt.Errorf("ComplaintPOST: jsonmap had no 'receipt_key'")
}
return &s,nil
} | identifier_body |
write.go | package packer
import (
"bufio"
"bytes"
"crypto/sha256"
"fmt"
"io"
"io/fs"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"sort"
"strings"
"time"
"github.com/gokrazy/internal/config"
"github.com/gokrazy/internal/deviceconfig"
"github.com/gokrazy/internal/fat"
"github.com/gokrazy/internal/humanize"
"github.com/gokrazy/internal/mbr"
"github.com/gokrazy/internal/squashfs"
"github.com/gokrazy/tools/internal/measure"
"github.com/gokrazy/tools/packer"
"github.com/gokrazy/tools/third_party/systemd-250.5-1"
)
func copyFile(fw *fat.Writer, dest string, src fs.File) error {
st, err := src.Stat()
if err != nil {
return err
}
w, err := fw.File(dest, st.ModTime())
if err != nil {
return err
}
if _, err := io.Copy(w, src); err != nil {
return err
}
return src.Close()
}
func copyFileSquash(d *squashfs.Directory, dest, src string) error {
f, err := os.Open(src)
if err != nil {
return err
}
defer f.Close()
st, err := f.Stat()
if err != nil {
return err
}
w, err := d.File(filepath.Base(dest), st.ModTime(), st.Mode()&os.ModePerm)
if err != nil {
return err
}
if _, err := io.Copy(w, f); err != nil {
return err
}
return w.Close()
}
func (p *Pack) writeCmdline(fw *fat.Writer, src string) error {
b, err := ioutil.ReadFile(src)
if err != nil {
return err
}
cmdline := "console=tty1 "
serialConsole := p.Cfg.SerialConsoleOrDefault()
if serialConsole != "disabled" && serialConsole != "off" {
if serialConsole == "UART0" {
// For backwards compatibility, treat the special value UART0 as
// serial0,115200:
cmdline += "console=serial0,115200 "
} else {
cmdline += "console=" + serialConsole + " "
}
}
cmdline += string(b)
// TODO: change {gokrazy,rtr7}/kernel/cmdline.txt to contain a dummy PARTUUID=
if p.ModifyCmdlineRoot() {
root := "root=" + p.Root()
cmdline = strings.ReplaceAll(cmdline, "root=/dev/mmcblk0p2", root)
cmdline = strings.ReplaceAll(cmdline, "root=/dev/sda2", root)
} else {
log.Printf("(not using PARTUUID= in cmdline.txt yet)")
}
// Pad the kernel command line with enough whitespace that can be used for
// in-place file overwrites to add additional command line flags for the
// gokrazy update process:
const pad = 64
padded := append([]byte(cmdline), bytes.Repeat([]byte{' '}, pad)...)
w, err := fw.File("/cmdline.txt", time.Now())
if err != nil {
return err
}
if _, err := w.Write(padded); err != nil {
return err
}
if p.UseGPTPartuuid {
// In addition to the cmdline.txt for the Raspberry Pi bootloader, also
// write a systemd-boot entries configuration file as per
// https://systemd.io/BOOT_LOADER_SPECIFICATION/
w, err = fw.File("/loader/entries/gokrazy.conf", time.Now())
if err != nil {
return err
}
fmt.Fprintf(w, `title gokrazy
linux /vmlinuz
`)
if _, err := w.Write(append([]byte("options "), padded...)); err != nil {
return err
}
}
return nil
}
func (p *Pack) writeConfig(fw *fat.Writer, src string) error {
b, err := ioutil.ReadFile(src)
if err != nil {
return err
}
config := string(b)
if p.Cfg.SerialConsoleOrDefault() != "off" {
config = strings.ReplaceAll(config, "enable_uart=0", "enable_uart=1")
}
w, err := fw.File("/config.txt", time.Now())
if err != nil {
return err
}
_, err = w.Write([]byte(config))
return err
}
func shortenSHA256(sum []byte) string {
hash := fmt.Sprintf("%x", sum)
if len(hash) > 10 {
return hash[:10]
}
return hash
}
var (
firmwareGlobs = []string{
"*.bin",
"*.dat",
"*.elf",
"*.upd",
"*.sig",
}
kernelGlobs = []string{
"boot.scr", // u-boot script file
"vmlinuz",
"*.dtb",
}
)
func (p *Pack) writeBoot(f io.Writer, mbrfilename string) error {
fmt.Printf("\n")
fmt.Printf("Creating boot file system\n")
done := measure.Interactively("creating boot file system")
fragment := ""
defer func() {
done(fragment)
}()
globs := make([]string, 0, len(firmwareGlobs)+len(kernelGlobs))
if fw := p.Cfg.FirmwarePackageOrDefault(); fw != "" {
firmwareDir, err := packer.PackageDir(fw)
if err != nil {
return err
}
for _, glob := range firmwareGlobs {
globs = append(globs, filepath.Join(firmwareDir, glob))
}
}
var eepromDir string
if eeprom := p.Cfg.EEPROMPackageOrDefault(); eeprom != "" {
var err error
eepromDir, err = packer.PackageDir(eeprom)
if err != nil {
return err
}
}
kernelDir, err := packer.PackageDir(p.Cfg.KernelPackageOrDefault())
if err != nil {
return err
}
fmt.Printf("\nKernel directory: %s\n", kernelDir)
for _, glob := range kernelGlobs {
globs = append(globs, filepath.Join(kernelDir, glob))
}
bufw := bufio.NewWriter(f)
fw, err := fat.NewWriter(bufw)
if err != nil {
return err
}
for _, pattern := range globs {
matches, err := filepath.Glob(pattern)
if err != nil {
return err
}
for _, m := range matches {
src, err := os.Open(m)
if err != nil {
return err
}
if err := copyFile(fw, "/"+filepath.Base(m), src); err != nil {
return err
}
}
}
// EEPROM update procedure. See also:
// https://news.ycombinator.com/item?id=21674550
writeEepromUpdateFile := func(globPattern, target string) (sig string, _ error) {
matches, err := filepath.Glob(globPattern)
if err != nil {
return "", err
}
if len(matches) == 0 {
return "", fmt.Errorf("invalid -eeprom_package: no files matching %s", filepath.Base(globPattern))
}
// Select the EEPROM file that sorts last.
// This corresponds to most recent for the pieeprom-*.bin files,
// which contain the date in yyyy-mm-dd format.
sort.Sort(sort.Reverse(sort.StringSlice(matches)))
f, err := os.Open(matches[0])
if err != nil {
return "", err
}
defer f.Close()
st, err := f.Stat()
if err != nil {
return "", err
}
// Copy the EEPROM file into the image and calculate its SHA256 hash
// while doing so:
w, err := fw.File(target, st.ModTime())
if err != nil {
return "", err
}
h := sha256.New()
if _, err := io.Copy(w, io.TeeReader(f, h)); err != nil {
return "", err
}
if base := filepath.Base(target); base == "recovery.bin" || base == "RECOVERY.000" {
fmt.Printf(" %s\n", base)
// No signature required for recovery.bin itself.
return "", nil
}
fmt.Printf(" %s (sig %s)\n", filepath.Base(target), shortenSHA256(h.Sum(nil)))
// Include the SHA256 hash in the image in an accompanying .sig file:
sigFn := target
ext := filepath.Ext(sigFn)
if ext == "" {
return "", fmt.Errorf("BUG: cannot derive signature file name from matches[0]=%q", matches[0])
}
sigFn = strings.TrimSuffix(sigFn, ext) + ".sig"
w, err = fw.File(sigFn, st.ModTime())
if err != nil {
return "", err
}
_, err = fmt.Fprintf(w, "%x\n", h.Sum(nil))
return fmt.Sprintf("%x", h.Sum(nil)), err
}
if eepromDir != "" {
fmt.Printf("EEPROM update summary:\n")
pieSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "pieeprom-*.bin"), "/pieeprom.upd")
if err != nil {
return err
}
vlSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "vl805-*.bin"), "/vl805.bin")
if err != nil {
return err
}
targetFilename := "/recovery.bin"
if pieSig == p.ExistingEEPROM.PieepromSHA256 &&
vlSig == p.ExistingEEPROM.VL805SHA256 {
fmt.Printf(" installing recovery.bin as RECOVERY.000 (EEPROM already up-to-date)\n")
targetFilename = "/RECOVERY.000"
}
if _, err := writeEepromUpdateFile(filepath.Join(eepromDir, "recovery.bin"), targetFilename); err != nil {
return err
}
}
if err := p.writeCmdline(fw, filepath.Join(kernelDir, "cmdline.txt")); err != nil {
return err
}
if err := p.writeConfig(fw, filepath.Join(kernelDir, "config.txt")); err != nil {
return err
}
if p.UseGPTPartuuid {
srcX86, err := systemd.SystemdBootX64.Open("systemd-bootx64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTX64.EFI", srcX86); err != nil {
return err
}
srcAA86, err := systemd.SystemdBootAA64.Open("systemd-bootaa64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTAA64.EFI", srcAA86); err != nil {
return err
}
}
if err := fw.Flush(); err != nil {
return err
}
if err := bufw.Flush(); err != nil {
return err
}
if seeker, ok := f.(io.Seeker); ok {
off, err := seeker.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
fragment = ", " + humanize.Bytes(uint64(off))
}
if mbrfilename != "" {
if _, ok := f.(io.ReadSeeker); !ok {
return fmt.Errorf("BUG: f does not implement io.ReadSeeker")
}
fmbr, err := os.OpenFile(mbrfilename, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
defer fmbr.Close()
if err := writeMBR(f.(io.ReadSeeker), fmbr, p.Partuuid); err != nil {
return err
}
if err := fmbr.Close(); err != nil {
return err
}
}
return nil
}
type FileInfo struct {
Filename string
Mode os.FileMode
FromHost string
FromLiteral string
SymlinkDest string
Dirents []*FileInfo
}
func (fi *FileInfo) isFile() bool {
return fi.FromHost != "" || fi.FromLiteral != ""
}
func (fi *FileInfo) pathList() (paths []string) {
for _, ent := range fi.Dirents {
if ent.isFile() {
paths = append(paths, ent.Filename)
continue
}
for _, e := range ent.pathList() {
paths = append(paths, path.Join(ent.Filename, e))
}
}
return paths
}
func (fi *FileInfo) combine(fi2 *FileInfo) error {
for _, ent2 := range fi2.Dirents {
// get existing file info
var f *FileInfo
for _, ent := range fi.Dirents {
if ent.Filename == ent2.Filename {
f = ent
break
}
}
// if not found add complete subtree directly
if f == nil {
fi.Dirents = append(fi.Dirents, ent2)
continue
}
// file overwrite is not supported -> return error
if f.isFile() || ent2.isFile() {
return fmt.Errorf("file already exist: %s", ent2.Filename)
}
if err := f.combine(ent2); err != nil {
return err
}
}
return nil
}
func (fi *FileInfo) mustFindDirent(path string) *FileInfo {
for _, ent := range fi.Dirents {
// TODO: split path into components and compare piecemeal
if ent.Filename == path {
return ent
}
}
log.Panicf("mustFindDirent(%q) did not find directory entry", path)
return nil
}
func findBins(cfg *config.Struct, buildEnv *packer.BuildEnv, bindir string) (*FileInfo, error) {
result := FileInfo{Filename: ""}
// TODO: doing all three packer.MainPackages calls concurrently hides go
// module proxy latency
gokrazyMainPkgs, err := buildEnv.MainPackages(cfg.GokrazyPackagesOrDefault())
if err != nil {
return nil, err
}
gokrazy := FileInfo{Filename: "gokrazy"}
for _, pkg := range gokrazyMainPkgs {
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
gokrazy.Dirents = append(gokrazy.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
if cfg.InternalCompatibilityFlags.InitPkg != "" {
initMainPkgs, err := buildEnv.MainPackages([]string{cfg.InternalCompatibilityFlags.InitPkg})
if err != nil {
return nil, err
}
for _, pkg := range initMainPkgs {
if got, want := pkg.Basename(), "init"; got != want {
log.Printf("Error: -init_pkg=%q produced unexpected binary name: got %q, want %q", cfg.InternalCompatibilityFlags.InitPkg, got, want)
continue
}
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
gokrazy.Dirents = append(gokrazy.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
}
result.Dirents = append(result.Dirents, &gokrazy)
mainPkgs, err := buildEnv.MainPackages(cfg.Packages)
if err != nil {
return nil, err
}
user := FileInfo{Filename: "user"}
for _, pkg := range mainPkgs {
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
user.Dirents = append(user.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
result.Dirents = append(result.Dirents, &user)
return &result, nil
}
func writeFileInfo(dir *squashfs.Directory, fi *FileInfo) error {
if fi.FromHost != "" { // copy a regular file
return copyFileSquash(dir, fi.Filename, fi.FromHost)
}
if fi.FromLiteral != "" { // write a regular file
mode := fi.Mode
if mode == 0 {
mode = 0444
}
w, err := dir.File(fi.Filename, time.Now(), mode)
if err != nil {
return err
}
if _, err := w.Write([]byte(fi.FromLiteral)); err != nil {
return err
}
return w.Close()
}
if fi.SymlinkDest != "" { // create a symlink
return dir.Symlink(fi.SymlinkDest, fi.Filename, time.Now(), 0444)
}
// subdir
var d *squashfs.Directory
if fi.Filename == "" { // root
d = dir
} else {
d = dir.Directory(fi.Filename, time.Now())
}
sort.Slice(fi.Dirents, func(i, j int) bool {
return fi.Dirents[i].Filename < fi.Dirents[j].Filename
})
for _, ent := range fi.Dirents {
if err := writeFileInfo(d, ent); err != nil {
return err
}
}
return d.Flush()
}
func | (f io.WriteSeeker, root *FileInfo) error {
fmt.Printf("\n")
fmt.Printf("Creating root file system\n")
done := measure.Interactively("creating root file system")
defer func() {
done("")
}()
// TODO: make fw.Flush() report the size of the root fs
fw, err := squashfs.NewWriter(f, time.Now())
if err != nil {
return err
}
if err := writeFileInfo(fw.Root, root); err != nil {
return err
}
return fw.Flush()
}
func (p *Pack) writeRootDeviceFiles(f io.WriteSeeker, rootDeviceFiles []deviceconfig.RootFile) error {
kernelDir, err := packer.PackageDir(p.Cfg.KernelPackageOrDefault())
if err != nil {
return err
}
for _, rootFile := range rootDeviceFiles {
if _, err := f.Seek(rootFile.Offset, io.SeekStart); err != nil {
return err
}
source, err := os.Open(filepath.Join(kernelDir, rootFile.Name))
if err != nil {
return err
}
if _, err := io.Copy(f, source); err != nil {
return err
}
_ = source.Close()
}
return nil
}
func writeMBR(f io.ReadSeeker, fw io.WriteSeeker, partuuid uint32) error {
rd, err := fat.NewReader(f)
if err != nil {
return err
}
vmlinuzOffset, _, err := rd.Extents("/vmlinuz")
if err != nil {
return err
}
cmdlineOffset, _, err := rd.Extents("/cmdline.txt")
if err != nil {
return err
}
if _, err := fw.Seek(0, io.SeekStart); err != nil {
return err
}
vmlinuzLba := uint32((vmlinuzOffset / 512) + 8192)
cmdlineTxtLba := uint32((cmdlineOffset / 512) + 8192)
fmt.Printf("MBR summary:\n")
fmt.Printf(" LBAs: vmlinuz=%d cmdline.txt=%d\n", vmlinuzLba, cmdlineTxtLba)
fmt.Printf(" PARTUUID: %08x\n", partuuid)
mbr := mbr.Configure(vmlinuzLba, cmdlineTxtLba, partuuid)
if _, err := fw.Write(mbr[:]); err != nil {
return err
}
return nil
}
// getDuplication between the two given filesystems
func getDuplication(fiA, fiB *FileInfo) (paths []string) {
allPaths := append(fiA.pathList(), fiB.pathList()...)
checkMap := make(map[string]bool, len(allPaths))
for _, p := range allPaths {
if _, ok := checkMap[p]; ok {
paths = append(paths, p)
}
checkMap[p] = true
}
return paths
}
| writeRoot | identifier_name |
write.go | package packer
import (
"bufio"
"bytes"
"crypto/sha256"
"fmt"
"io"
"io/fs"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"sort"
"strings"
"time"
"github.com/gokrazy/internal/config"
"github.com/gokrazy/internal/deviceconfig"
"github.com/gokrazy/internal/fat"
"github.com/gokrazy/internal/humanize"
"github.com/gokrazy/internal/mbr"
"github.com/gokrazy/internal/squashfs"
"github.com/gokrazy/tools/internal/measure"
"github.com/gokrazy/tools/packer"
"github.com/gokrazy/tools/third_party/systemd-250.5-1"
)
func copyFile(fw *fat.Writer, dest string, src fs.File) error {
st, err := src.Stat()
if err != nil {
return err
}
w, err := fw.File(dest, st.ModTime())
if err != nil {
return err
}
if _, err := io.Copy(w, src); err != nil {
return err
}
return src.Close()
}
func copyFileSquash(d *squashfs.Directory, dest, src string) error {
f, err := os.Open(src)
if err != nil {
return err
}
defer f.Close()
st, err := f.Stat()
if err != nil {
return err
}
w, err := d.File(filepath.Base(dest), st.ModTime(), st.Mode()&os.ModePerm)
if err != nil {
return err
}
if _, err := io.Copy(w, f); err != nil {
return err
}
return w.Close()
}
func (p *Pack) writeCmdline(fw *fat.Writer, src string) error {
b, err := ioutil.ReadFile(src)
if err != nil {
return err
}
cmdline := "console=tty1 "
serialConsole := p.Cfg.SerialConsoleOrDefault()
if serialConsole != "disabled" && serialConsole != "off" {
if serialConsole == "UART0" {
// For backwards compatibility, treat the special value UART0 as
// serial0,115200:
cmdline += "console=serial0,115200 "
} else {
cmdline += "console=" + serialConsole + " "
}
}
cmdline += string(b)
// TODO: change {gokrazy,rtr7}/kernel/cmdline.txt to contain a dummy PARTUUID=
if p.ModifyCmdlineRoot() {
root := "root=" + p.Root()
cmdline = strings.ReplaceAll(cmdline, "root=/dev/mmcblk0p2", root)
cmdline = strings.ReplaceAll(cmdline, "root=/dev/sda2", root)
} else {
log.Printf("(not using PARTUUID= in cmdline.txt yet)")
}
// Pad the kernel command line with enough whitespace that can be used for
// in-place file overwrites to add additional command line flags for the
// gokrazy update process:
const pad = 64
padded := append([]byte(cmdline), bytes.Repeat([]byte{' '}, pad)...)
w, err := fw.File("/cmdline.txt", time.Now())
if err != nil {
return err
}
if _, err := w.Write(padded); err != nil {
return err
}
if p.UseGPTPartuuid {
// In addition to the cmdline.txt for the Raspberry Pi bootloader, also
// write a systemd-boot entries configuration file as per
// https://systemd.io/BOOT_LOADER_SPECIFICATION/
w, err = fw.File("/loader/entries/gokrazy.conf", time.Now())
if err != nil {
return err
}
fmt.Fprintf(w, `title gokrazy
linux /vmlinuz
`)
if _, err := w.Write(append([]byte("options "), padded...)); err != nil {
return err
}
}
return nil
}
func (p *Pack) writeConfig(fw *fat.Writer, src string) error {
b, err := ioutil.ReadFile(src)
if err != nil {
return err
}
config := string(b)
if p.Cfg.SerialConsoleOrDefault() != "off" {
config = strings.ReplaceAll(config, "enable_uart=0", "enable_uart=1")
}
w, err := fw.File("/config.txt", time.Now())
if err != nil {
return err
}
_, err = w.Write([]byte(config))
return err
}
func shortenSHA256(sum []byte) string {
hash := fmt.Sprintf("%x", sum)
if len(hash) > 10 {
return hash[:10]
}
return hash
}
var (
firmwareGlobs = []string{
"*.bin",
"*.dat",
"*.elf",
"*.upd",
"*.sig",
}
kernelGlobs = []string{
"boot.scr", // u-boot script file
"vmlinuz",
"*.dtb",
}
)
func (p *Pack) writeBoot(f io.Writer, mbrfilename string) error {
fmt.Printf("\n")
fmt.Printf("Creating boot file system\n")
done := measure.Interactively("creating boot file system")
fragment := ""
defer func() {
done(fragment)
}()
globs := make([]string, 0, len(firmwareGlobs)+len(kernelGlobs))
if fw := p.Cfg.FirmwarePackageOrDefault(); fw != "" {
firmwareDir, err := packer.PackageDir(fw)
if err != nil {
return err
}
for _, glob := range firmwareGlobs {
globs = append(globs, filepath.Join(firmwareDir, glob))
}
}
var eepromDir string
if eeprom := p.Cfg.EEPROMPackageOrDefault(); eeprom != "" {
var err error
eepromDir, err = packer.PackageDir(eeprom)
if err != nil {
return err
}
}
kernelDir, err := packer.PackageDir(p.Cfg.KernelPackageOrDefault())
if err != nil {
return err
}
fmt.Printf("\nKernel directory: %s\n", kernelDir)
for _, glob := range kernelGlobs {
globs = append(globs, filepath.Join(kernelDir, glob))
}
bufw := bufio.NewWriter(f)
fw, err := fat.NewWriter(bufw)
if err != nil {
return err
}
for _, pattern := range globs {
matches, err := filepath.Glob(pattern)
if err != nil {
return err
}
for _, m := range matches {
src, err := os.Open(m)
if err != nil {
return err
}
if err := copyFile(fw, "/"+filepath.Base(m), src); err != nil {
return err
}
}
}
// EEPROM update procedure. See also:
// https://news.ycombinator.com/item?id=21674550
writeEepromUpdateFile := func(globPattern, target string) (sig string, _ error) {
matches, err := filepath.Glob(globPattern)
if err != nil {
return "", err
}
if len(matches) == 0 {
return "", fmt.Errorf("invalid -eeprom_package: no files matching %s", filepath.Base(globPattern))
}
// Select the EEPROM file that sorts last.
// This corresponds to most recent for the pieeprom-*.bin files,
// which contain the date in yyyy-mm-dd format.
sort.Sort(sort.Reverse(sort.StringSlice(matches)))
f, err := os.Open(matches[0])
if err != nil {
return "", err
}
defer f.Close()
st, err := f.Stat()
if err != nil {
return "", err
}
// Copy the EEPROM file into the image and calculate its SHA256 hash
// while doing so:
w, err := fw.File(target, st.ModTime())
if err != nil {
return "", err
}
h := sha256.New()
if _, err := io.Copy(w, io.TeeReader(f, h)); err != nil {
return "", err
}
if base := filepath.Base(target); base == "recovery.bin" || base == "RECOVERY.000" {
fmt.Printf(" %s\n", base)
// No signature required for recovery.bin itself.
return "", nil
}
fmt.Printf(" %s (sig %s)\n", filepath.Base(target), shortenSHA256(h.Sum(nil)))
// Include the SHA256 hash in the image in an accompanying .sig file:
sigFn := target
ext := filepath.Ext(sigFn)
if ext == "" {
return "", fmt.Errorf("BUG: cannot derive signature file name from matches[0]=%q", matches[0])
}
sigFn = strings.TrimSuffix(sigFn, ext) + ".sig"
w, err = fw.File(sigFn, st.ModTime())
if err != nil {
return "", err
}
_, err = fmt.Fprintf(w, "%x\n", h.Sum(nil))
return fmt.Sprintf("%x", h.Sum(nil)), err
}
if eepromDir != "" {
fmt.Printf("EEPROM update summary:\n")
pieSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "pieeprom-*.bin"), "/pieeprom.upd")
if err != nil {
return err
}
vlSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "vl805-*.bin"), "/vl805.bin")
if err != nil {
return err
}
targetFilename := "/recovery.bin"
if pieSig == p.ExistingEEPROM.PieepromSHA256 &&
vlSig == p.ExistingEEPROM.VL805SHA256 {
fmt.Printf(" installing recovery.bin as RECOVERY.000 (EEPROM already up-to-date)\n")
targetFilename = "/RECOVERY.000"
}
if _, err := writeEepromUpdateFile(filepath.Join(eepromDir, "recovery.bin"), targetFilename); err != nil {
return err
}
}
if err := p.writeCmdline(fw, filepath.Join(kernelDir, "cmdline.txt")); err != nil {
return err
}
if err := p.writeConfig(fw, filepath.Join(kernelDir, "config.txt")); err != nil {
return err
}
if p.UseGPTPartuuid {
srcX86, err := systemd.SystemdBootX64.Open("systemd-bootx64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTX64.EFI", srcX86); err != nil {
return err
}
srcAA86, err := systemd.SystemdBootAA64.Open("systemd-bootaa64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTAA64.EFI", srcAA86); err != nil {
return err
}
}
if err := fw.Flush(); err != nil {
return err
}
if err := bufw.Flush(); err != nil {
return err
}
if seeker, ok := f.(io.Seeker); ok {
off, err := seeker.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
fragment = ", " + humanize.Bytes(uint64(off))
}
if mbrfilename != "" {
if _, ok := f.(io.ReadSeeker); !ok {
return fmt.Errorf("BUG: f does not implement io.ReadSeeker")
}
fmbr, err := os.OpenFile(mbrfilename, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
defer fmbr.Close()
if err := writeMBR(f.(io.ReadSeeker), fmbr, p.Partuuid); err != nil {
return err
}
if err := fmbr.Close(); err != nil {
return err
}
}
return nil
}
type FileInfo struct {
Filename string
Mode os.FileMode
FromHost string
FromLiteral string
SymlinkDest string
Dirents []*FileInfo
}
func (fi *FileInfo) isFile() bool {
return fi.FromHost != "" || fi.FromLiteral != ""
}
func (fi *FileInfo) pathList() (paths []string) {
for _, ent := range fi.Dirents {
if ent.isFile() {
paths = append(paths, ent.Filename)
continue
}
for _, e := range ent.pathList() {
paths = append(paths, path.Join(ent.Filename, e))
}
}
return paths
}
func (fi *FileInfo) combine(fi2 *FileInfo) error {
for _, ent2 := range fi2.Dirents {
// get existing file info
var f *FileInfo
for _, ent := range fi.Dirents { | }
// if not found add complete subtree directly
if f == nil {
fi.Dirents = append(fi.Dirents, ent2)
continue
}
// file overwrite is not supported -> return error
if f.isFile() || ent2.isFile() {
return fmt.Errorf("file already exist: %s", ent2.Filename)
}
if err := f.combine(ent2); err != nil {
return err
}
}
return nil
}
func (fi *FileInfo) mustFindDirent(path string) *FileInfo {
for _, ent := range fi.Dirents {
// TODO: split path into components and compare piecemeal
if ent.Filename == path {
return ent
}
}
log.Panicf("mustFindDirent(%q) did not find directory entry", path)
return nil
}
func findBins(cfg *config.Struct, buildEnv *packer.BuildEnv, bindir string) (*FileInfo, error) {
result := FileInfo{Filename: ""}
// TODO: doing all three packer.MainPackages calls concurrently hides go
// module proxy latency
gokrazyMainPkgs, err := buildEnv.MainPackages(cfg.GokrazyPackagesOrDefault())
if err != nil {
return nil, err
}
gokrazy := FileInfo{Filename: "gokrazy"}
for _, pkg := range gokrazyMainPkgs {
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
gokrazy.Dirents = append(gokrazy.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
if cfg.InternalCompatibilityFlags.InitPkg != "" {
initMainPkgs, err := buildEnv.MainPackages([]string{cfg.InternalCompatibilityFlags.InitPkg})
if err != nil {
return nil, err
}
for _, pkg := range initMainPkgs {
if got, want := pkg.Basename(), "init"; got != want {
log.Printf("Error: -init_pkg=%q produced unexpected binary name: got %q, want %q", cfg.InternalCompatibilityFlags.InitPkg, got, want)
continue
}
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
gokrazy.Dirents = append(gokrazy.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
}
result.Dirents = append(result.Dirents, &gokrazy)
mainPkgs, err := buildEnv.MainPackages(cfg.Packages)
if err != nil {
return nil, err
}
user := FileInfo{Filename: "user"}
for _, pkg := range mainPkgs {
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
user.Dirents = append(user.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
result.Dirents = append(result.Dirents, &user)
return &result, nil
}
func writeFileInfo(dir *squashfs.Directory, fi *FileInfo) error {
if fi.FromHost != "" { // copy a regular file
return copyFileSquash(dir, fi.Filename, fi.FromHost)
}
if fi.FromLiteral != "" { // write a regular file
mode := fi.Mode
if mode == 0 {
mode = 0444
}
w, err := dir.File(fi.Filename, time.Now(), mode)
if err != nil {
return err
}
if _, err := w.Write([]byte(fi.FromLiteral)); err != nil {
return err
}
return w.Close()
}
if fi.SymlinkDest != "" { // create a symlink
return dir.Symlink(fi.SymlinkDest, fi.Filename, time.Now(), 0444)
}
// subdir
var d *squashfs.Directory
if fi.Filename == "" { // root
d = dir
} else {
d = dir.Directory(fi.Filename, time.Now())
}
sort.Slice(fi.Dirents, func(i, j int) bool {
return fi.Dirents[i].Filename < fi.Dirents[j].Filename
})
for _, ent := range fi.Dirents {
if err := writeFileInfo(d, ent); err != nil {
return err
}
}
return d.Flush()
}
func writeRoot(f io.WriteSeeker, root *FileInfo) error {
fmt.Printf("\n")
fmt.Printf("Creating root file system\n")
done := measure.Interactively("creating root file system")
defer func() {
done("")
}()
// TODO: make fw.Flush() report the size of the root fs
fw, err := squashfs.NewWriter(f, time.Now())
if err != nil {
return err
}
if err := writeFileInfo(fw.Root, root); err != nil {
return err
}
return fw.Flush()
}
func (p *Pack) writeRootDeviceFiles(f io.WriteSeeker, rootDeviceFiles []deviceconfig.RootFile) error {
kernelDir, err := packer.PackageDir(p.Cfg.KernelPackageOrDefault())
if err != nil {
return err
}
for _, rootFile := range rootDeviceFiles {
if _, err := f.Seek(rootFile.Offset, io.SeekStart); err != nil {
return err
}
source, err := os.Open(filepath.Join(kernelDir, rootFile.Name))
if err != nil {
return err
}
if _, err := io.Copy(f, source); err != nil {
return err
}
_ = source.Close()
}
return nil
}
func writeMBR(f io.ReadSeeker, fw io.WriteSeeker, partuuid uint32) error {
rd, err := fat.NewReader(f)
if err != nil {
return err
}
vmlinuzOffset, _, err := rd.Extents("/vmlinuz")
if err != nil {
return err
}
cmdlineOffset, _, err := rd.Extents("/cmdline.txt")
if err != nil {
return err
}
if _, err := fw.Seek(0, io.SeekStart); err != nil {
return err
}
vmlinuzLba := uint32((vmlinuzOffset / 512) + 8192)
cmdlineTxtLba := uint32((cmdlineOffset / 512) + 8192)
fmt.Printf("MBR summary:\n")
fmt.Printf(" LBAs: vmlinuz=%d cmdline.txt=%d\n", vmlinuzLba, cmdlineTxtLba)
fmt.Printf(" PARTUUID: %08x\n", partuuid)
mbr := mbr.Configure(vmlinuzLba, cmdlineTxtLba, partuuid)
if _, err := fw.Write(mbr[:]); err != nil {
return err
}
return nil
}
// getDuplication between the two given filesystems
func getDuplication(fiA, fiB *FileInfo) (paths []string) {
allPaths := append(fiA.pathList(), fiB.pathList()...)
checkMap := make(map[string]bool, len(allPaths))
for _, p := range allPaths {
if _, ok := checkMap[p]; ok {
paths = append(paths, p)
}
checkMap[p] = true
}
return paths
} | if ent.Filename == ent2.Filename {
f = ent
break
} | random_line_split |
write.go | package packer
import (
"bufio"
"bytes"
"crypto/sha256"
"fmt"
"io"
"io/fs"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"sort"
"strings"
"time"
"github.com/gokrazy/internal/config"
"github.com/gokrazy/internal/deviceconfig"
"github.com/gokrazy/internal/fat"
"github.com/gokrazy/internal/humanize"
"github.com/gokrazy/internal/mbr"
"github.com/gokrazy/internal/squashfs"
"github.com/gokrazy/tools/internal/measure"
"github.com/gokrazy/tools/packer"
"github.com/gokrazy/tools/third_party/systemd-250.5-1"
)
func copyFile(fw *fat.Writer, dest string, src fs.File) error {
st, err := src.Stat()
if err != nil {
return err
}
w, err := fw.File(dest, st.ModTime())
if err != nil {
return err
}
if _, err := io.Copy(w, src); err != nil {
return err
}
return src.Close()
}
func copyFileSquash(d *squashfs.Directory, dest, src string) error {
f, err := os.Open(src)
if err != nil {
return err
}
defer f.Close()
st, err := f.Stat()
if err != nil {
return err
}
w, err := d.File(filepath.Base(dest), st.ModTime(), st.Mode()&os.ModePerm)
if err != nil {
return err
}
if _, err := io.Copy(w, f); err != nil {
return err
}
return w.Close()
}
func (p *Pack) writeCmdline(fw *fat.Writer, src string) error {
b, err := ioutil.ReadFile(src)
if err != nil {
return err
}
cmdline := "console=tty1 "
serialConsole := p.Cfg.SerialConsoleOrDefault()
if serialConsole != "disabled" && serialConsole != "off" {
if serialConsole == "UART0" {
// For backwards compatibility, treat the special value UART0 as
// serial0,115200:
cmdline += "console=serial0,115200 "
} else {
cmdline += "console=" + serialConsole + " "
}
}
cmdline += string(b)
// TODO: change {gokrazy,rtr7}/kernel/cmdline.txt to contain a dummy PARTUUID=
if p.ModifyCmdlineRoot() {
root := "root=" + p.Root()
cmdline = strings.ReplaceAll(cmdline, "root=/dev/mmcblk0p2", root)
cmdline = strings.ReplaceAll(cmdline, "root=/dev/sda2", root)
} else {
log.Printf("(not using PARTUUID= in cmdline.txt yet)")
}
// Pad the kernel command line with enough whitespace that can be used for
// in-place file overwrites to add additional command line flags for the
// gokrazy update process:
const pad = 64
padded := append([]byte(cmdline), bytes.Repeat([]byte{' '}, pad)...)
w, err := fw.File("/cmdline.txt", time.Now())
if err != nil {
return err
}
if _, err := w.Write(padded); err != nil {
return err
}
if p.UseGPTPartuuid {
// In addition to the cmdline.txt for the Raspberry Pi bootloader, also
// write a systemd-boot entries configuration file as per
// https://systemd.io/BOOT_LOADER_SPECIFICATION/
w, err = fw.File("/loader/entries/gokrazy.conf", time.Now())
if err != nil {
return err
}
fmt.Fprintf(w, `title gokrazy
linux /vmlinuz
`)
if _, err := w.Write(append([]byte("options "), padded...)); err != nil {
return err
}
}
return nil
}
func (p *Pack) writeConfig(fw *fat.Writer, src string) error {
b, err := ioutil.ReadFile(src)
if err != nil {
return err
}
config := string(b)
if p.Cfg.SerialConsoleOrDefault() != "off" {
config = strings.ReplaceAll(config, "enable_uart=0", "enable_uart=1")
}
w, err := fw.File("/config.txt", time.Now())
if err != nil {
return err
}
_, err = w.Write([]byte(config))
return err
}
func shortenSHA256(sum []byte) string {
hash := fmt.Sprintf("%x", sum)
if len(hash) > 10 {
return hash[:10]
}
return hash
}
var (
firmwareGlobs = []string{
"*.bin",
"*.dat",
"*.elf",
"*.upd",
"*.sig",
}
kernelGlobs = []string{
"boot.scr", // u-boot script file
"vmlinuz",
"*.dtb",
}
)
func (p *Pack) writeBoot(f io.Writer, mbrfilename string) error {
fmt.Printf("\n")
fmt.Printf("Creating boot file system\n")
done := measure.Interactively("creating boot file system")
fragment := ""
defer func() {
done(fragment)
}()
globs := make([]string, 0, len(firmwareGlobs)+len(kernelGlobs))
if fw := p.Cfg.FirmwarePackageOrDefault(); fw != "" {
firmwareDir, err := packer.PackageDir(fw)
if err != nil {
return err
}
for _, glob := range firmwareGlobs {
globs = append(globs, filepath.Join(firmwareDir, glob))
}
}
var eepromDir string
if eeprom := p.Cfg.EEPROMPackageOrDefault(); eeprom != "" {
var err error
eepromDir, err = packer.PackageDir(eeprom)
if err != nil {
return err
}
}
kernelDir, err := packer.PackageDir(p.Cfg.KernelPackageOrDefault())
if err != nil {
return err
}
fmt.Printf("\nKernel directory: %s\n", kernelDir)
for _, glob := range kernelGlobs {
globs = append(globs, filepath.Join(kernelDir, glob))
}
bufw := bufio.NewWriter(f)
fw, err := fat.NewWriter(bufw)
if err != nil {
return err
}
for _, pattern := range globs {
matches, err := filepath.Glob(pattern)
if err != nil {
return err
}
for _, m := range matches {
src, err := os.Open(m)
if err != nil {
return err
}
if err := copyFile(fw, "/"+filepath.Base(m), src); err != nil {
return err
}
}
}
// EEPROM update procedure. See also:
// https://news.ycombinator.com/item?id=21674550
writeEepromUpdateFile := func(globPattern, target string) (sig string, _ error) {
matches, err := filepath.Glob(globPattern)
if err != nil {
return "", err
}
if len(matches) == 0 {
return "", fmt.Errorf("invalid -eeprom_package: no files matching %s", filepath.Base(globPattern))
}
// Select the EEPROM file that sorts last.
// This corresponds to most recent for the pieeprom-*.bin files,
// which contain the date in yyyy-mm-dd format.
sort.Sort(sort.Reverse(sort.StringSlice(matches)))
f, err := os.Open(matches[0])
if err != nil {
return "", err
}
defer f.Close()
st, err := f.Stat()
if err != nil {
return "", err
}
// Copy the EEPROM file into the image and calculate its SHA256 hash
// while doing so:
w, err := fw.File(target, st.ModTime())
if err != nil {
return "", err
}
h := sha256.New()
if _, err := io.Copy(w, io.TeeReader(f, h)); err != nil {
return "", err
}
if base := filepath.Base(target); base == "recovery.bin" || base == "RECOVERY.000" {
fmt.Printf(" %s\n", base)
// No signature required for recovery.bin itself.
return "", nil
}
fmt.Printf(" %s (sig %s)\n", filepath.Base(target), shortenSHA256(h.Sum(nil)))
// Include the SHA256 hash in the image in an accompanying .sig file:
sigFn := target
ext := filepath.Ext(sigFn)
if ext == "" {
return "", fmt.Errorf("BUG: cannot derive signature file name from matches[0]=%q", matches[0])
}
sigFn = strings.TrimSuffix(sigFn, ext) + ".sig"
w, err = fw.File(sigFn, st.ModTime())
if err != nil {
return "", err
}
_, err = fmt.Fprintf(w, "%x\n", h.Sum(nil))
return fmt.Sprintf("%x", h.Sum(nil)), err
}
if eepromDir != "" {
fmt.Printf("EEPROM update summary:\n")
pieSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "pieeprom-*.bin"), "/pieeprom.upd")
if err != nil {
return err
}
vlSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "vl805-*.bin"), "/vl805.bin")
if err != nil {
return err
}
targetFilename := "/recovery.bin"
if pieSig == p.ExistingEEPROM.PieepromSHA256 &&
vlSig == p.ExistingEEPROM.VL805SHA256 {
fmt.Printf(" installing recovery.bin as RECOVERY.000 (EEPROM already up-to-date)\n")
targetFilename = "/RECOVERY.000"
}
if _, err := writeEepromUpdateFile(filepath.Join(eepromDir, "recovery.bin"), targetFilename); err != nil {
return err
}
}
if err := p.writeCmdline(fw, filepath.Join(kernelDir, "cmdline.txt")); err != nil {
return err
}
if err := p.writeConfig(fw, filepath.Join(kernelDir, "config.txt")); err != nil {
return err
}
if p.UseGPTPartuuid {
srcX86, err := systemd.SystemdBootX64.Open("systemd-bootx64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTX64.EFI", srcX86); err != nil {
return err
}
srcAA86, err := systemd.SystemdBootAA64.Open("systemd-bootaa64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTAA64.EFI", srcAA86); err != nil {
return err
}
}
if err := fw.Flush(); err != nil {
return err
}
if err := bufw.Flush(); err != nil {
return err
}
if seeker, ok := f.(io.Seeker); ok {
off, err := seeker.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
fragment = ", " + humanize.Bytes(uint64(off))
}
if mbrfilename != "" {
if _, ok := f.(io.ReadSeeker); !ok {
return fmt.Errorf("BUG: f does not implement io.ReadSeeker")
}
fmbr, err := os.OpenFile(mbrfilename, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
defer fmbr.Close()
if err := writeMBR(f.(io.ReadSeeker), fmbr, p.Partuuid); err != nil {
return err
}
if err := fmbr.Close(); err != nil {
return err
}
}
return nil
}
type FileInfo struct {
Filename string
Mode os.FileMode
FromHost string
FromLiteral string
SymlinkDest string
Dirents []*FileInfo
}
func (fi *FileInfo) isFile() bool {
return fi.FromHost != "" || fi.FromLiteral != ""
}
func (fi *FileInfo) pathList() (paths []string) {
for _, ent := range fi.Dirents {
if ent.isFile() {
paths = append(paths, ent.Filename)
continue
}
for _, e := range ent.pathList() {
paths = append(paths, path.Join(ent.Filename, e))
}
}
return paths
}
func (fi *FileInfo) combine(fi2 *FileInfo) error {
for _, ent2 := range fi2.Dirents {
// get existing file info
var f *FileInfo
for _, ent := range fi.Dirents {
if ent.Filename == ent2.Filename {
f = ent
break
}
}
// if not found add complete subtree directly
if f == nil {
fi.Dirents = append(fi.Dirents, ent2)
continue
}
// file overwrite is not supported -> return error
if f.isFile() || ent2.isFile() {
return fmt.Errorf("file already exist: %s", ent2.Filename)
}
if err := f.combine(ent2); err != nil {
return err
}
}
return nil
}
func (fi *FileInfo) mustFindDirent(path string) *FileInfo |
func findBins(cfg *config.Struct, buildEnv *packer.BuildEnv, bindir string) (*FileInfo, error) {
result := FileInfo{Filename: ""}
// TODO: doing all three packer.MainPackages calls concurrently hides go
// module proxy latency
gokrazyMainPkgs, err := buildEnv.MainPackages(cfg.GokrazyPackagesOrDefault())
if err != nil {
return nil, err
}
gokrazy := FileInfo{Filename: "gokrazy"}
for _, pkg := range gokrazyMainPkgs {
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
gokrazy.Dirents = append(gokrazy.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
if cfg.InternalCompatibilityFlags.InitPkg != "" {
initMainPkgs, err := buildEnv.MainPackages([]string{cfg.InternalCompatibilityFlags.InitPkg})
if err != nil {
return nil, err
}
for _, pkg := range initMainPkgs {
if got, want := pkg.Basename(), "init"; got != want {
log.Printf("Error: -init_pkg=%q produced unexpected binary name: got %q, want %q", cfg.InternalCompatibilityFlags.InitPkg, got, want)
continue
}
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
gokrazy.Dirents = append(gokrazy.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
}
result.Dirents = append(result.Dirents, &gokrazy)
mainPkgs, err := buildEnv.MainPackages(cfg.Packages)
if err != nil {
return nil, err
}
user := FileInfo{Filename: "user"}
for _, pkg := range mainPkgs {
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
user.Dirents = append(user.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
result.Dirents = append(result.Dirents, &user)
return &result, nil
}
func writeFileInfo(dir *squashfs.Directory, fi *FileInfo) error {
if fi.FromHost != "" { // copy a regular file
return copyFileSquash(dir, fi.Filename, fi.FromHost)
}
if fi.FromLiteral != "" { // write a regular file
mode := fi.Mode
if mode == 0 {
mode = 0444
}
w, err := dir.File(fi.Filename, time.Now(), mode)
if err != nil {
return err
}
if _, err := w.Write([]byte(fi.FromLiteral)); err != nil {
return err
}
return w.Close()
}
if fi.SymlinkDest != "" { // create a symlink
return dir.Symlink(fi.SymlinkDest, fi.Filename, time.Now(), 0444)
}
// subdir
var d *squashfs.Directory
if fi.Filename == "" { // root
d = dir
} else {
d = dir.Directory(fi.Filename, time.Now())
}
sort.Slice(fi.Dirents, func(i, j int) bool {
return fi.Dirents[i].Filename < fi.Dirents[j].Filename
})
for _, ent := range fi.Dirents {
if err := writeFileInfo(d, ent); err != nil {
return err
}
}
return d.Flush()
}
func writeRoot(f io.WriteSeeker, root *FileInfo) error {
fmt.Printf("\n")
fmt.Printf("Creating root file system\n")
done := measure.Interactively("creating root file system")
defer func() {
done("")
}()
// TODO: make fw.Flush() report the size of the root fs
fw, err := squashfs.NewWriter(f, time.Now())
if err != nil {
return err
}
if err := writeFileInfo(fw.Root, root); err != nil {
return err
}
return fw.Flush()
}
func (p *Pack) writeRootDeviceFiles(f io.WriteSeeker, rootDeviceFiles []deviceconfig.RootFile) error {
kernelDir, err := packer.PackageDir(p.Cfg.KernelPackageOrDefault())
if err != nil {
return err
}
for _, rootFile := range rootDeviceFiles {
if _, err := f.Seek(rootFile.Offset, io.SeekStart); err != nil {
return err
}
source, err := os.Open(filepath.Join(kernelDir, rootFile.Name))
if err != nil {
return err
}
if _, err := io.Copy(f, source); err != nil {
return err
}
_ = source.Close()
}
return nil
}
func writeMBR(f io.ReadSeeker, fw io.WriteSeeker, partuuid uint32) error {
rd, err := fat.NewReader(f)
if err != nil {
return err
}
vmlinuzOffset, _, err := rd.Extents("/vmlinuz")
if err != nil {
return err
}
cmdlineOffset, _, err := rd.Extents("/cmdline.txt")
if err != nil {
return err
}
if _, err := fw.Seek(0, io.SeekStart); err != nil {
return err
}
vmlinuzLba := uint32((vmlinuzOffset / 512) + 8192)
cmdlineTxtLba := uint32((cmdlineOffset / 512) + 8192)
fmt.Printf("MBR summary:\n")
fmt.Printf(" LBAs: vmlinuz=%d cmdline.txt=%d\n", vmlinuzLba, cmdlineTxtLba)
fmt.Printf(" PARTUUID: %08x\n", partuuid)
mbr := mbr.Configure(vmlinuzLba, cmdlineTxtLba, partuuid)
if _, err := fw.Write(mbr[:]); err != nil {
return err
}
return nil
}
// getDuplication between the two given filesystems
func getDuplication(fiA, fiB *FileInfo) (paths []string) {
allPaths := append(fiA.pathList(), fiB.pathList()...)
checkMap := make(map[string]bool, len(allPaths))
for _, p := range allPaths {
if _, ok := checkMap[p]; ok {
paths = append(paths, p)
}
checkMap[p] = true
}
return paths
}
| {
for _, ent := range fi.Dirents {
// TODO: split path into components and compare piecemeal
if ent.Filename == path {
return ent
}
}
log.Panicf("mustFindDirent(%q) did not find directory entry", path)
return nil
} | identifier_body |
write.go | package packer
import (
"bufio"
"bytes"
"crypto/sha256"
"fmt"
"io"
"io/fs"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"sort"
"strings"
"time"
"github.com/gokrazy/internal/config"
"github.com/gokrazy/internal/deviceconfig"
"github.com/gokrazy/internal/fat"
"github.com/gokrazy/internal/humanize"
"github.com/gokrazy/internal/mbr"
"github.com/gokrazy/internal/squashfs"
"github.com/gokrazy/tools/internal/measure"
"github.com/gokrazy/tools/packer"
"github.com/gokrazy/tools/third_party/systemd-250.5-1"
)
func copyFile(fw *fat.Writer, dest string, src fs.File) error {
st, err := src.Stat()
if err != nil {
return err
}
w, err := fw.File(dest, st.ModTime())
if err != nil {
return err
}
if _, err := io.Copy(w, src); err != nil {
return err
}
return src.Close()
}
func copyFileSquash(d *squashfs.Directory, dest, src string) error {
f, err := os.Open(src)
if err != nil {
return err
}
defer f.Close()
st, err := f.Stat()
if err != nil {
return err
}
w, err := d.File(filepath.Base(dest), st.ModTime(), st.Mode()&os.ModePerm)
if err != nil {
return err
}
if _, err := io.Copy(w, f); err != nil {
return err
}
return w.Close()
}
func (p *Pack) writeCmdline(fw *fat.Writer, src string) error {
b, err := ioutil.ReadFile(src)
if err != nil {
return err
}
cmdline := "console=tty1 "
serialConsole := p.Cfg.SerialConsoleOrDefault()
if serialConsole != "disabled" && serialConsole != "off" {
if serialConsole == "UART0" {
// For backwards compatibility, treat the special value UART0 as
// serial0,115200:
cmdline += "console=serial0,115200 "
} else {
cmdline += "console=" + serialConsole + " "
}
}
cmdline += string(b)
// TODO: change {gokrazy,rtr7}/kernel/cmdline.txt to contain a dummy PARTUUID=
if p.ModifyCmdlineRoot() {
root := "root=" + p.Root()
cmdline = strings.ReplaceAll(cmdline, "root=/dev/mmcblk0p2", root)
cmdline = strings.ReplaceAll(cmdline, "root=/dev/sda2", root)
} else {
log.Printf("(not using PARTUUID= in cmdline.txt yet)")
}
// Pad the kernel command line with enough whitespace that can be used for
// in-place file overwrites to add additional command line flags for the
// gokrazy update process:
const pad = 64
padded := append([]byte(cmdline), bytes.Repeat([]byte{' '}, pad)...)
w, err := fw.File("/cmdline.txt", time.Now())
if err != nil {
return err
}
if _, err := w.Write(padded); err != nil {
return err
}
if p.UseGPTPartuuid {
// In addition to the cmdline.txt for the Raspberry Pi bootloader, also
// write a systemd-boot entries configuration file as per
// https://systemd.io/BOOT_LOADER_SPECIFICATION/
w, err = fw.File("/loader/entries/gokrazy.conf", time.Now())
if err != nil {
return err
}
fmt.Fprintf(w, `title gokrazy
linux /vmlinuz
`)
if _, err := w.Write(append([]byte("options "), padded...)); err != nil {
return err
}
}
return nil
}
func (p *Pack) writeConfig(fw *fat.Writer, src string) error {
b, err := ioutil.ReadFile(src)
if err != nil {
return err
}
config := string(b)
if p.Cfg.SerialConsoleOrDefault() != "off" {
config = strings.ReplaceAll(config, "enable_uart=0", "enable_uart=1")
}
w, err := fw.File("/config.txt", time.Now())
if err != nil {
return err
}
_, err = w.Write([]byte(config))
return err
}
func shortenSHA256(sum []byte) string {
hash := fmt.Sprintf("%x", sum)
if len(hash) > 10 {
return hash[:10]
}
return hash
}
var (
firmwareGlobs = []string{
"*.bin",
"*.dat",
"*.elf",
"*.upd",
"*.sig",
}
kernelGlobs = []string{
"boot.scr", // u-boot script file
"vmlinuz",
"*.dtb",
}
)
func (p *Pack) writeBoot(f io.Writer, mbrfilename string) error {
fmt.Printf("\n")
fmt.Printf("Creating boot file system\n")
done := measure.Interactively("creating boot file system")
fragment := ""
defer func() {
done(fragment)
}()
globs := make([]string, 0, len(firmwareGlobs)+len(kernelGlobs))
if fw := p.Cfg.FirmwarePackageOrDefault(); fw != "" {
firmwareDir, err := packer.PackageDir(fw)
if err != nil {
return err
}
for _, glob := range firmwareGlobs {
globs = append(globs, filepath.Join(firmwareDir, glob))
}
}
var eepromDir string
if eeprom := p.Cfg.EEPROMPackageOrDefault(); eeprom != "" {
var err error
eepromDir, err = packer.PackageDir(eeprom)
if err != nil {
return err
}
}
kernelDir, err := packer.PackageDir(p.Cfg.KernelPackageOrDefault())
if err != nil {
return err
}
fmt.Printf("\nKernel directory: %s\n", kernelDir)
for _, glob := range kernelGlobs {
globs = append(globs, filepath.Join(kernelDir, glob))
}
bufw := bufio.NewWriter(f)
fw, err := fat.NewWriter(bufw)
if err != nil {
return err
}
for _, pattern := range globs {
matches, err := filepath.Glob(pattern)
if err != nil {
return err
}
for _, m := range matches {
src, err := os.Open(m)
if err != nil {
return err
}
if err := copyFile(fw, "/"+filepath.Base(m), src); err != nil {
return err
}
}
}
// EEPROM update procedure. See also:
// https://news.ycombinator.com/item?id=21674550
writeEepromUpdateFile := func(globPattern, target string) (sig string, _ error) {
matches, err := filepath.Glob(globPattern)
if err != nil {
return "", err
}
if len(matches) == 0 {
return "", fmt.Errorf("invalid -eeprom_package: no files matching %s", filepath.Base(globPattern))
}
// Select the EEPROM file that sorts last.
// This corresponds to most recent for the pieeprom-*.bin files,
// which contain the date in yyyy-mm-dd format.
sort.Sort(sort.Reverse(sort.StringSlice(matches)))
f, err := os.Open(matches[0])
if err != nil {
return "", err
}
defer f.Close()
st, err := f.Stat()
if err != nil {
return "", err
}
// Copy the EEPROM file into the image and calculate its SHA256 hash
// while doing so:
w, err := fw.File(target, st.ModTime())
if err != nil {
return "", err
}
h := sha256.New()
if _, err := io.Copy(w, io.TeeReader(f, h)); err != nil {
return "", err
}
if base := filepath.Base(target); base == "recovery.bin" || base == "RECOVERY.000" {
fmt.Printf(" %s\n", base)
// No signature required for recovery.bin itself.
return "", nil
}
fmt.Printf(" %s (sig %s)\n", filepath.Base(target), shortenSHA256(h.Sum(nil)))
// Include the SHA256 hash in the image in an accompanying .sig file:
sigFn := target
ext := filepath.Ext(sigFn)
if ext == "" {
return "", fmt.Errorf("BUG: cannot derive signature file name from matches[0]=%q", matches[0])
}
sigFn = strings.TrimSuffix(sigFn, ext) + ".sig"
w, err = fw.File(sigFn, st.ModTime())
if err != nil {
return "", err
}
_, err = fmt.Fprintf(w, "%x\n", h.Sum(nil))
return fmt.Sprintf("%x", h.Sum(nil)), err
}
if eepromDir != "" {
fmt.Printf("EEPROM update summary:\n")
pieSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "pieeprom-*.bin"), "/pieeprom.upd")
if err != nil {
return err
}
vlSig, err := writeEepromUpdateFile(filepath.Join(eepromDir, "vl805-*.bin"), "/vl805.bin")
if err != nil {
return err
}
targetFilename := "/recovery.bin"
if pieSig == p.ExistingEEPROM.PieepromSHA256 &&
vlSig == p.ExistingEEPROM.VL805SHA256 {
fmt.Printf(" installing recovery.bin as RECOVERY.000 (EEPROM already up-to-date)\n")
targetFilename = "/RECOVERY.000"
}
if _, err := writeEepromUpdateFile(filepath.Join(eepromDir, "recovery.bin"), targetFilename); err != nil {
return err
}
}
if err := p.writeCmdline(fw, filepath.Join(kernelDir, "cmdline.txt")); err != nil {
return err
}
if err := p.writeConfig(fw, filepath.Join(kernelDir, "config.txt")); err != nil {
return err
}
if p.UseGPTPartuuid {
srcX86, err := systemd.SystemdBootX64.Open("systemd-bootx64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTX64.EFI", srcX86); err != nil {
return err
}
srcAA86, err := systemd.SystemdBootAA64.Open("systemd-bootaa64.efi")
if err != nil {
return err
}
if err := copyFile(fw, "/EFI/BOOT/BOOTAA64.EFI", srcAA86); err != nil {
return err
}
}
if err := fw.Flush(); err != nil {
return err
}
if err := bufw.Flush(); err != nil {
return err
}
if seeker, ok := f.(io.Seeker); ok {
off, err := seeker.Seek(0, io.SeekCurrent)
if err != nil {
return err
}
fragment = ", " + humanize.Bytes(uint64(off))
}
if mbrfilename != "" {
if _, ok := f.(io.ReadSeeker); !ok {
return fmt.Errorf("BUG: f does not implement io.ReadSeeker")
}
fmbr, err := os.OpenFile(mbrfilename, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
defer fmbr.Close()
if err := writeMBR(f.(io.ReadSeeker), fmbr, p.Partuuid); err != nil {
return err
}
if err := fmbr.Close(); err != nil {
return err
}
}
return nil
}
type FileInfo struct {
Filename string
Mode os.FileMode
FromHost string
FromLiteral string
SymlinkDest string
Dirents []*FileInfo
}
func (fi *FileInfo) isFile() bool {
return fi.FromHost != "" || fi.FromLiteral != ""
}
func (fi *FileInfo) pathList() (paths []string) {
for _, ent := range fi.Dirents {
if ent.isFile() {
paths = append(paths, ent.Filename)
continue
}
for _, e := range ent.pathList() {
paths = append(paths, path.Join(ent.Filename, e))
}
}
return paths
}
func (fi *FileInfo) combine(fi2 *FileInfo) error {
for _, ent2 := range fi2.Dirents {
// get existing file info
var f *FileInfo
for _, ent := range fi.Dirents {
if ent.Filename == ent2.Filename {
f = ent
break
}
}
// if not found add complete subtree directly
if f == nil {
fi.Dirents = append(fi.Dirents, ent2)
continue
}
// file overwrite is not supported -> return error
if f.isFile() || ent2.isFile() {
return fmt.Errorf("file already exist: %s", ent2.Filename)
}
if err := f.combine(ent2); err != nil {
return err
}
}
return nil
}
func (fi *FileInfo) mustFindDirent(path string) *FileInfo {
for _, ent := range fi.Dirents {
// TODO: split path into components and compare piecemeal
if ent.Filename == path {
return ent
}
}
log.Panicf("mustFindDirent(%q) did not find directory entry", path)
return nil
}
func findBins(cfg *config.Struct, buildEnv *packer.BuildEnv, bindir string) (*FileInfo, error) {
result := FileInfo{Filename: ""}
// TODO: doing all three packer.MainPackages calls concurrently hides go
// module proxy latency
gokrazyMainPkgs, err := buildEnv.MainPackages(cfg.GokrazyPackagesOrDefault())
if err != nil {
return nil, err
}
gokrazy := FileInfo{Filename: "gokrazy"}
for _, pkg := range gokrazyMainPkgs {
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
gokrazy.Dirents = append(gokrazy.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
if cfg.InternalCompatibilityFlags.InitPkg != "" |
result.Dirents = append(result.Dirents, &gokrazy)
mainPkgs, err := buildEnv.MainPackages(cfg.Packages)
if err != nil {
return nil, err
}
user := FileInfo{Filename: "user"}
for _, pkg := range mainPkgs {
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
user.Dirents = append(user.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
result.Dirents = append(result.Dirents, &user)
return &result, nil
}
func writeFileInfo(dir *squashfs.Directory, fi *FileInfo) error {
if fi.FromHost != "" { // copy a regular file
return copyFileSquash(dir, fi.Filename, fi.FromHost)
}
if fi.FromLiteral != "" { // write a regular file
mode := fi.Mode
if mode == 0 {
mode = 0444
}
w, err := dir.File(fi.Filename, time.Now(), mode)
if err != nil {
return err
}
if _, err := w.Write([]byte(fi.FromLiteral)); err != nil {
return err
}
return w.Close()
}
if fi.SymlinkDest != "" { // create a symlink
return dir.Symlink(fi.SymlinkDest, fi.Filename, time.Now(), 0444)
}
// subdir
var d *squashfs.Directory
if fi.Filename == "" { // root
d = dir
} else {
d = dir.Directory(fi.Filename, time.Now())
}
sort.Slice(fi.Dirents, func(i, j int) bool {
return fi.Dirents[i].Filename < fi.Dirents[j].Filename
})
for _, ent := range fi.Dirents {
if err := writeFileInfo(d, ent); err != nil {
return err
}
}
return d.Flush()
}
func writeRoot(f io.WriteSeeker, root *FileInfo) error {
fmt.Printf("\n")
fmt.Printf("Creating root file system\n")
done := measure.Interactively("creating root file system")
defer func() {
done("")
}()
// TODO: make fw.Flush() report the size of the root fs
fw, err := squashfs.NewWriter(f, time.Now())
if err != nil {
return err
}
if err := writeFileInfo(fw.Root, root); err != nil {
return err
}
return fw.Flush()
}
func (p *Pack) writeRootDeviceFiles(f io.WriteSeeker, rootDeviceFiles []deviceconfig.RootFile) error {
kernelDir, err := packer.PackageDir(p.Cfg.KernelPackageOrDefault())
if err != nil {
return err
}
for _, rootFile := range rootDeviceFiles {
if _, err := f.Seek(rootFile.Offset, io.SeekStart); err != nil {
return err
}
source, err := os.Open(filepath.Join(kernelDir, rootFile.Name))
if err != nil {
return err
}
if _, err := io.Copy(f, source); err != nil {
return err
}
_ = source.Close()
}
return nil
}
func writeMBR(f io.ReadSeeker, fw io.WriteSeeker, partuuid uint32) error {
rd, err := fat.NewReader(f)
if err != nil {
return err
}
vmlinuzOffset, _, err := rd.Extents("/vmlinuz")
if err != nil {
return err
}
cmdlineOffset, _, err := rd.Extents("/cmdline.txt")
if err != nil {
return err
}
if _, err := fw.Seek(0, io.SeekStart); err != nil {
return err
}
vmlinuzLba := uint32((vmlinuzOffset / 512) + 8192)
cmdlineTxtLba := uint32((cmdlineOffset / 512) + 8192)
fmt.Printf("MBR summary:\n")
fmt.Printf(" LBAs: vmlinuz=%d cmdline.txt=%d\n", vmlinuzLba, cmdlineTxtLba)
fmt.Printf(" PARTUUID: %08x\n", partuuid)
mbr := mbr.Configure(vmlinuzLba, cmdlineTxtLba, partuuid)
if _, err := fw.Write(mbr[:]); err != nil {
return err
}
return nil
}
// getDuplication between the two given filesystems
func getDuplication(fiA, fiB *FileInfo) (paths []string) {
allPaths := append(fiA.pathList(), fiB.pathList()...)
checkMap := make(map[string]bool, len(allPaths))
for _, p := range allPaths {
if _, ok := checkMap[p]; ok {
paths = append(paths, p)
}
checkMap[p] = true
}
return paths
}
| {
initMainPkgs, err := buildEnv.MainPackages([]string{cfg.InternalCompatibilityFlags.InitPkg})
if err != nil {
return nil, err
}
for _, pkg := range initMainPkgs {
if got, want := pkg.Basename(), "init"; got != want {
log.Printf("Error: -init_pkg=%q produced unexpected binary name: got %q, want %q", cfg.InternalCompatibilityFlags.InitPkg, got, want)
continue
}
binPath := filepath.Join(bindir, pkg.Basename())
fileIsELFOrFatal(binPath)
gokrazy.Dirents = append(gokrazy.Dirents, &FileInfo{
Filename: pkg.Basename(),
FromHost: binPath,
})
}
} | conditional_block |
circular.menu.helpers.ts |
/*****************************************************************************************
*
* Utility functions
*
*****************************************************************************************/
/**
* Distance between two points p1 and p2
*/
const getDistance = (p1, p2 = {x:0,y:0}) => Math.sqrt((p2.x - p1.x)*(p2.x - p1.x) + (p2.y - p1.y)*(p2.y - p1.y));
/**
* Comptute vector v such that OP1 + v = OP2
*/
const getTranslator = (p1, p2 = {x:0,y:0}) => ({deltaX: p2.x - p1.x, deltaY: p2.y - p1.y});
/**
* Comptute vector v such that OP1 + OP2 = v
*/
const sumCoords = (p1, p2) => ({x: p1.x + p2.x, y: p1.y + p2.y})
/**
* Comptute vector v such that k * OP1 = v where k is a scalar (aka scalar multiplication)
*/
const scalarByCoords = (p1, k) => ({x: k * p1.x, y: k * p1.y})
/**
* Compute new rectangle with same dimensions but translated i.e. its upper left corner coordinate are translated by {deltaX, deltaY}
*/
const applyTranslatorToRect = ({x, y, width, height}, { deltaX, deltaY }) => ({x: x + deltaX, y: y + deltaY, width, height});
/**
* Set left and top style properies of node to x and y effectively moving the node to the coordinates {x,y}
*/
const moveNodeToCoords = (node, { x, y }) => ((node.style.left = `${x}px`) && (node.style.top = `${y}px`));
/**
* Instead of moving node to the coordinates {x,y}, set css variables to hold these values.
* The node can then be moved at some later point by dynamically manipulating css classes.
*/
const putCoordsOnNode = (node, { x, y }) => { node.style.setProperty('--to-x', `${x}px`); node.style.setProperty('--to-y', `${y}px`);};
/**
* Convert cartesian coordinates {x,y} to polar coordinates {r, theta}
*/
const cartesian2Polar = ({x, y}) => ({ distance: getDistance({x, y}), radians: Math.atan2(y, x) })
/**
* Convert polar coordinates {r, theta} to polar cartesian {x,y}
*/
const polar2Cartesian = ({distance, radians}) => ({ x: distance * Math.cos(radians), y: distance * Math.sin(radians) });
/**
* Given a vector OP in cartesian coordinates {x,y} rotate the angle by an angle 'radiansAngle'
* Where P is the "point" input argument
*/
const rotatePoint = (point, radiansAngle) => {
const {distance, radians} = cartesian2Polar(point);
return polar2Cartesian({distance, radians: radians + radiansAngle});
}
/**
* A point "P" is looking a circle with center "C". How much angle does the circle occupy at this distance?
* ```
* - OT1 = OC + a/d CP + b/d x CPperpendicular
* - OT2 = OC + a/d CP - b/d x CPperpendicular
* ```
* - where
* - T1 and T2 are tangent points on the circle
* - CP (center of circle to P) has a length d
* - CPperpendicular is CP rotated by 90 degrees clock
* - The equations express CP using its components along CP (length a) and CPperpendicular (length b)
*
* https://math.stackexchange.com/questions/543496/how-to-find-the-equation-of-a-line-tangent-to-a-circle-that-passes-through-a-g
*/
const getViewingAngle = (viewPoint, targetCircle) => {
const { center: targetCenter, radius: targetRadius } = targetCircle;
const distance = getDistance(viewPoint, targetCenter);
if(distance < targetRadius) throw new Error(`Point with cordinates (${viewPoint.x}, ${viewPoint.y}) is within the circle and cannot be used to generate tangents lines`);
// Compute vector CP and CP rotated by 90 degrees
const {deltaX: deltaXParallel, deltaY: deltaYParallel} = getTranslator(targetCenter, viewPoint); // CP x,y components
const deltaXPerpendicular = -1 * deltaYParallel; // CP rotated x,y components
const deltaYPerpendicular = deltaXParallel;
// Compute intermediary values for final calculations
const { x: cx, y: cy } = targetCenter;
const rho = targetRadius / distance;
const a_d = rho * rho;
const b_d = rho * Math.sqrt(1 - rho * rho);
// Compute x,y components of OT1 and OT2 where O is the origin of the coordinate system (0,0)
// a_d = a/d where d is the distance of CP: CT1x = a_d x CP
// b_d = b/d where d is the distance of CP: CT1y = b_d x CProtated
// a is therefore the projection of CT along CP
// b is therefore the projection of CT along a direction perpendicular to CP
const T1x = cx + a_d * deltaXParallel + b_d * deltaXPerpendicular;
const T1y = cy + a_d * deltaYParallel + b_d * deltaYPerpendicular;
const T2x = cx + a_d * deltaXParallel - b_d * deltaXPerpendicular;
const T2y = cy + a_d * deltaYParallel - b_d * deltaYPerpendicular;
// Assert that the tangents points computed are actually on the circle's circumference
if(Math.round(getDistance(targetCenter, {x: T1x, y: T1y}) - targetRadius) !== 0) throw new Error("Tangent point does not lie on circumference");
if(Math.round(getDistance(targetCenter, {x: T2x, y: T2y}) - targetRadius) !== 0) throw new Error("Tangent point does not lie on circumference");
// Compute the angle theta = (T1, P, T2) which is the viewing angle of the circle observed from P the viewpoint
// a.b = |a| . |b| . cos(theta) (Scalar Multiplication)
const angle = Math.acos((
(T1x - viewPoint.x)*(T2x - viewPoint.x) + (T1y - viewPoint.y)*(T2y - viewPoint.y)
)/(
getDistance(viewPoint, {x:T1x, y:T1y}) * getDistance(viewPoint, {x:T2x, y:T2y})
)
);
return {
angle: angle,
tangents: [ { x: T1x, y: T1y }, { x: T2x, y: T2y } ]
}
}
/**
* Get the circle that either fits inside the rectangle, or the circle that contains the rectangle.
*/
const getCircleFromRect = ({x, y, width, height})=>{
const center = { x: x + width / 2, y: y + height / 2 };
const radius = getDistance(center, {x, y});
if(Math.round(getDistance(center, {x, y}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x:x+width, y:y}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x:x+width, y:y+height}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x, y:y+height}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
return {center, radius: Math.min(width / 2, height / 2)}
}
/**
* A point is looking at a bunch of circles. How much cumulative angle do they occupy?
*/
const getAngleFromCircles = (viewPoint, circles, maxAngle = Math.PI) => {
const angle = circles.reduce((acc, circle) => acc + getViewingAngle(viewPoint, circle).angle, 0);
if(angle >= maxAngle) throw new Error("Circles do not fit at current distance from viewpoint");
return angle;
}
/**
* The menu tries to arrange the nav items around itself so that they all fit within te angle "maxAngle"
* If they dont, push they away from the menu center by step pixel, and try again recursively
*/
function expandMenuCircle(menuCenter, navRects, step = 100, maxAngle = Math.PI){
try {
return {
// getAngleFromCircles throws an error if the accumulated angle for all these circle is greater than maxAngle
angle: getAngleFromCircles(menuCenter, navRects.map(rect => getCircleFromRect(rect)), maxAngle),
rects: navRects
}
}catch(e){
// On error thrown by getAngleFromCircles, push the circles a little bit further, and try again recursively
return expandMenuCircle(menuCenter, navRects.map(rect => applyTranslatorToRect(rect, {deltaX: 0, deltaY: -1 * step})), step, maxAngle);
}
}
/*****************************************************************************************
*
* Main Function
*
*****************************************************************************************/
/**
* Position nav items around a menu so that they fit within some angle.
*/
export default function | (){
// Constants to regulate the positioning algorithm
const angularSpace = Math.PI / 2;
const angularAnchor = Math.PI;
const menuExpansionSteps = 5;
// Node items involved
const navs = Array.from(document.querySelectorAll(".nav__item"));
const menu = document.querySelector(".hamburger-menu");
// Get center point of menu
const {center: viewPoint, radius: viewPointSize} = getCircleFromRect(menu.getBoundingClientRect())
// Try to compute a distance at which the nav items can nicely fit around the menu within an angle "angularSpace"
const {angle, rects} = expandMenuCircle(viewPoint, navs.map(nav => nav.getBoundingClientRect()), menuExpansionSteps, angularSpace);
// Compute the space between the nav items, so that they are nicely spread out to occupy the "angularSpace"
const angleGap = angularSpace/(navs.length - 1);
// Collect important points for display purposes
const coordsArr = [{item: 'menu', radius: viewPointSize, ...viewPoint}];
// Attempt to actually move the nav items
// rects contain the distance information for where these items will sit
rects.reduce((acc, rect, i)=> {
// First get the circle related to the menu
const circle = getCircleFromRect(rect);
// Extract the distance
const distance = getDistance(viewPoint, circle.center);
// Compute the translation vector to move a nav item from the menu position to
// where it needs to sit so that they form a circle around the menu. Then put this information
// on the node itself via css variables. Lastly collect this data for display purposes
const coordsChange = polar2Cartesian({distance, radians: acc});
putCoordsOnNode(navs[i], coordsChange);
coordsArr.push({item: `nav${i}`, radius: circle.radius, ...coordsChange })
// Prepare to position next nav item
return acc + angleGap;
// Initial angular position for first nav item
}, angularAnchor);
console.log(coordsArr);
}
| positionMenuItem | identifier_name |
circular.menu.helpers.ts |
/*****************************************************************************************
*
* Utility functions
*
*****************************************************************************************/
/**
* Distance between two points p1 and p2
*/
const getDistance = (p1, p2 = {x:0,y:0}) => Math.sqrt((p2.x - p1.x)*(p2.x - p1.x) + (p2.y - p1.y)*(p2.y - p1.y));
/**
* Comptute vector v such that OP1 + v = OP2
*/
const getTranslator = (p1, p2 = {x:0,y:0}) => ({deltaX: p2.x - p1.x, deltaY: p2.y - p1.y});
/**
* Comptute vector v such that OP1 + OP2 = v
*/
const sumCoords = (p1, p2) => ({x: p1.x + p2.x, y: p1.y + p2.y})
/**
* Comptute vector v such that k * OP1 = v where k is a scalar (aka scalar multiplication)
*/
const scalarByCoords = (p1, k) => ({x: k * p1.x, y: k * p1.y})
/**
* Compute new rectangle with same dimensions but translated i.e. its upper left corner coordinate are translated by {deltaX, deltaY}
*/
const applyTranslatorToRect = ({x, y, width, height}, { deltaX, deltaY }) => ({x: x + deltaX, y: y + deltaY, width, height});
/**
* Set left and top style properies of node to x and y effectively moving the node to the coordinates {x,y}
*/
const moveNodeToCoords = (node, { x, y }) => ((node.style.left = `${x}px`) && (node.style.top = `${y}px`));
/**
* Instead of moving node to the coordinates {x,y}, set css variables to hold these values.
* The node can then be moved at some later point by dynamically manipulating css classes.
*/
const putCoordsOnNode = (node, { x, y }) => { node.style.setProperty('--to-x', `${x}px`); node.style.setProperty('--to-y', `${y}px`);};
/**
* Convert cartesian coordinates {x,y} to polar coordinates {r, theta}
*/
const cartesian2Polar = ({x, y}) => ({ distance: getDistance({x, y}), radians: Math.atan2(y, x) })
/**
* Convert polar coordinates {r, theta} to polar cartesian {x,y}
*/
const polar2Cartesian = ({distance, radians}) => ({ x: distance * Math.cos(radians), y: distance * Math.sin(radians) });
/**
* Given a vector OP in cartesian coordinates {x,y} rotate the angle by an angle 'radiansAngle'
* Where P is the "point" input argument
*/
const rotatePoint = (point, radiansAngle) => {
const {distance, radians} = cartesian2Polar(point);
return polar2Cartesian({distance, radians: radians + radiansAngle});
}
/**
* A point "P" is looking a circle with center "C". How much angle does the circle occupy at this distance?
* ```
* - OT1 = OC + a/d CP + b/d x CPperpendicular
* - OT2 = OC + a/d CP - b/d x CPperpendicular
* ```
* - where
* - T1 and T2 are tangent points on the circle
* - CP (center of circle to P) has a length d
* - CPperpendicular is CP rotated by 90 degrees clock
* - The equations express CP using its components along CP (length a) and CPperpendicular (length b)
*
* https://math.stackexchange.com/questions/543496/how-to-find-the-equation-of-a-line-tangent-to-a-circle-that-passes-through-a-g
*/
const getViewingAngle = (viewPoint, targetCircle) => {
const { center: targetCenter, radius: targetRadius } = targetCircle;
const distance = getDistance(viewPoint, targetCenter);
if(distance < targetRadius) throw new Error(`Point with cordinates (${viewPoint.x}, ${viewPoint.y}) is within the circle and cannot be used to generate tangents lines`);
// Compute vector CP and CP rotated by 90 degrees
const {deltaX: deltaXParallel, deltaY: deltaYParallel} = getTranslator(targetCenter, viewPoint); // CP x,y components
const deltaXPerpendicular = -1 * deltaYParallel; // CP rotated x,y components
const deltaYPerpendicular = deltaXParallel;
// Compute intermediary values for final calculations
const { x: cx, y: cy } = targetCenter;
const rho = targetRadius / distance;
const a_d = rho * rho;
const b_d = rho * Math.sqrt(1 - rho * rho);
// Compute x,y components of OT1 and OT2 where O is the origin of the coordinate system (0,0)
// a_d = a/d where d is the distance of CP: CT1x = a_d x CP
// b_d = b/d where d is the distance of CP: CT1y = b_d x CProtated
// a is therefore the projection of CT along CP
// b is therefore the projection of CT along a direction perpendicular to CP
const T1x = cx + a_d * deltaXParallel + b_d * deltaXPerpendicular;
const T1y = cy + a_d * deltaYParallel + b_d * deltaYPerpendicular;
const T2x = cx + a_d * deltaXParallel - b_d * deltaXPerpendicular;
const T2y = cy + a_d * deltaYParallel - b_d * deltaYPerpendicular;
// Assert that the tangents points computed are actually on the circle's circumference
if(Math.round(getDistance(targetCenter, {x: T1x, y: T1y}) - targetRadius) !== 0) throw new Error("Tangent point does not lie on circumference");
if(Math.round(getDistance(targetCenter, {x: T2x, y: T2y}) - targetRadius) !== 0) throw new Error("Tangent point does not lie on circumference");
// Compute the angle theta = (T1, P, T2) which is the viewing angle of the circle observed from P the viewpoint
// a.b = |a| . |b| . cos(theta) (Scalar Multiplication)
const angle = Math.acos((
(T1x - viewPoint.x)*(T2x - viewPoint.x) + (T1y - viewPoint.y)*(T2y - viewPoint.y)
)/(
getDistance(viewPoint, {x:T1x, y:T1y}) * getDistance(viewPoint, {x:T2x, y:T2y})
)
);
return {
angle: angle,
tangents: [ { x: T1x, y: T1y }, { x: T2x, y: T2y } ]
}
}
/**
* Get the circle that either fits inside the rectangle, or the circle that contains the rectangle.
*/
const getCircleFromRect = ({x, y, width, height})=>{
const center = { x: x + width / 2, y: y + height / 2 };
const radius = getDistance(center, {x, y});
if(Math.round(getDistance(center, {x, y}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x:x+width, y:y}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x:x+width, y:y+height}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x, y:y+height}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
return {center, radius: Math.min(width / 2, height / 2)}
}
/**
* A point is looking at a bunch of circles. How much cumulative angle do they occupy?
*/
const getAngleFromCircles = (viewPoint, circles, maxAngle = Math.PI) => {
const angle = circles.reduce((acc, circle) => acc + getViewingAngle(viewPoint, circle).angle, 0);
if(angle >= maxAngle) throw new Error("Circles do not fit at current distance from viewpoint");
return angle;
}
/**
* The menu tries to arrange the nav items around itself so that they all fit within te angle "maxAngle"
* If they dont, push they away from the menu center by step pixel, and try again recursively
*/
function expandMenuCircle(menuCenter, navRects, step = 100, maxAngle = Math.PI){
try {
return {
// getAngleFromCircles throws an error if the accumulated angle for all these circle is greater than maxAngle
angle: getAngleFromCircles(menuCenter, navRects.map(rect => getCircleFromRect(rect)), maxAngle),
rects: navRects
}
}catch(e){
// On error thrown by getAngleFromCircles, push the circles a little bit further, and try again recursively
return expandMenuCircle(menuCenter, navRects.map(rect => applyTranslatorToRect(rect, {deltaX: 0, deltaY: -1 * step})), step, maxAngle);
}
}
/*****************************************************************************************
*
* Main Function
*
*****************************************************************************************/
/**
* Position nav items around a menu so that they fit within some angle.
*/
export default function positionMenuItem() | {
// Constants to regulate the positioning algorithm
const angularSpace = Math.PI / 2;
const angularAnchor = Math.PI;
const menuExpansionSteps = 5;
// Node items involved
const navs = Array.from(document.querySelectorAll(".nav__item"));
const menu = document.querySelector(".hamburger-menu");
// Get center point of menu
const {center: viewPoint, radius: viewPointSize} = getCircleFromRect(menu.getBoundingClientRect())
// Try to compute a distance at which the nav items can nicely fit around the menu within an angle "angularSpace"
const {angle, rects} = expandMenuCircle(viewPoint, navs.map(nav => nav.getBoundingClientRect()), menuExpansionSteps, angularSpace);
// Compute the space between the nav items, so that they are nicely spread out to occupy the "angularSpace"
const angleGap = angularSpace/(navs.length - 1);
// Collect important points for display purposes
const coordsArr = [{item: 'menu', radius: viewPointSize, ...viewPoint}];
// Attempt to actually move the nav items
// rects contain the distance information for where these items will sit
rects.reduce((acc, rect, i)=> {
// First get the circle related to the menu
const circle = getCircleFromRect(rect);
// Extract the distance
const distance = getDistance(viewPoint, circle.center);
// Compute the translation vector to move a nav item from the menu position to
// where it needs to sit so that they form a circle around the menu. Then put this information
// on the node itself via css variables. Lastly collect this data for display purposes
const coordsChange = polar2Cartesian({distance, radians: acc});
putCoordsOnNode(navs[i], coordsChange);
coordsArr.push({item: `nav${i}`, radius: circle.radius, ...coordsChange })
// Prepare to position next nav item
return acc + angleGap;
// Initial angular position for first nav item
}, angularAnchor);
console.log(coordsArr);
} | identifier_body |
|
circular.menu.helpers.ts | /*****************************************************************************************
*
* Utility functions
*
*****************************************************************************************/
/** | */
const getDistance = (p1, p2 = {x:0,y:0}) => Math.sqrt((p2.x - p1.x)*(p2.x - p1.x) + (p2.y - p1.y)*(p2.y - p1.y));
/**
* Comptute vector v such that OP1 + v = OP2
*/
const getTranslator = (p1, p2 = {x:0,y:0}) => ({deltaX: p2.x - p1.x, deltaY: p2.y - p1.y});
/**
* Comptute vector v such that OP1 + OP2 = v
*/
const sumCoords = (p1, p2) => ({x: p1.x + p2.x, y: p1.y + p2.y})
/**
* Comptute vector v such that k * OP1 = v where k is a scalar (aka scalar multiplication)
*/
const scalarByCoords = (p1, k) => ({x: k * p1.x, y: k * p1.y})
/**
* Compute new rectangle with same dimensions but translated i.e. its upper left corner coordinate are translated by {deltaX, deltaY}
*/
const applyTranslatorToRect = ({x, y, width, height}, { deltaX, deltaY }) => ({x: x + deltaX, y: y + deltaY, width, height});
/**
* Set left and top style properies of node to x and y effectively moving the node to the coordinates {x,y}
*/
const moveNodeToCoords = (node, { x, y }) => ((node.style.left = `${x}px`) && (node.style.top = `${y}px`));
/**
* Instead of moving node to the coordinates {x,y}, set css variables to hold these values.
* The node can then be moved at some later point by dynamically manipulating css classes.
*/
const putCoordsOnNode = (node, { x, y }) => { node.style.setProperty('--to-x', `${x}px`); node.style.setProperty('--to-y', `${y}px`);};
/**
* Convert cartesian coordinates {x,y} to polar coordinates {r, theta}
*/
const cartesian2Polar = ({x, y}) => ({ distance: getDistance({x, y}), radians: Math.atan2(y, x) })
/**
* Convert polar coordinates {r, theta} to polar cartesian {x,y}
*/
const polar2Cartesian = ({distance, radians}) => ({ x: distance * Math.cos(radians), y: distance * Math.sin(radians) });
/**
* Given a vector OP in cartesian coordinates {x,y} rotate the angle by an angle 'radiansAngle'
* Where P is the "point" input argument
*/
const rotatePoint = (point, radiansAngle) => {
const {distance, radians} = cartesian2Polar(point);
return polar2Cartesian({distance, radians: radians + radiansAngle});
}
/**
* A point "P" is looking a circle with center "C". How much angle does the circle occupy at this distance?
* ```
* - OT1 = OC + a/d CP + b/d x CPperpendicular
* - OT2 = OC + a/d CP - b/d x CPperpendicular
* ```
* - where
* - T1 and T2 are tangent points on the circle
* - CP (center of circle to P) has a length d
* - CPperpendicular is CP rotated by 90 degrees clock
* - The equations express CP using its components along CP (length a) and CPperpendicular (length b)
*
* https://math.stackexchange.com/questions/543496/how-to-find-the-equation-of-a-line-tangent-to-a-circle-that-passes-through-a-g
*/
const getViewingAngle = (viewPoint, targetCircle) => {
const { center: targetCenter, radius: targetRadius } = targetCircle;
const distance = getDistance(viewPoint, targetCenter);
if(distance < targetRadius) throw new Error(`Point with cordinates (${viewPoint.x}, ${viewPoint.y}) is within the circle and cannot be used to generate tangents lines`);
// Compute vector CP and CP rotated by 90 degrees
const {deltaX: deltaXParallel, deltaY: deltaYParallel} = getTranslator(targetCenter, viewPoint); // CP x,y components
const deltaXPerpendicular = -1 * deltaYParallel; // CP rotated x,y components
const deltaYPerpendicular = deltaXParallel;
// Compute intermediary values for final calculations
const { x: cx, y: cy } = targetCenter;
const rho = targetRadius / distance;
const a_d = rho * rho;
const b_d = rho * Math.sqrt(1 - rho * rho);
// Compute x,y components of OT1 and OT2 where O is the origin of the coordinate system (0,0)
// a_d = a/d where d is the distance of CP: CT1x = a_d x CP
// b_d = b/d where d is the distance of CP: CT1y = b_d x CProtated
// a is therefore the projection of CT along CP
// b is therefore the projection of CT along a direction perpendicular to CP
const T1x = cx + a_d * deltaXParallel + b_d * deltaXPerpendicular;
const T1y = cy + a_d * deltaYParallel + b_d * deltaYPerpendicular;
const T2x = cx + a_d * deltaXParallel - b_d * deltaXPerpendicular;
const T2y = cy + a_d * deltaYParallel - b_d * deltaYPerpendicular;
// Assert that the tangents points computed are actually on the circle's circumference
if(Math.round(getDistance(targetCenter, {x: T1x, y: T1y}) - targetRadius) !== 0) throw new Error("Tangent point does not lie on circumference");
if(Math.round(getDistance(targetCenter, {x: T2x, y: T2y}) - targetRadius) !== 0) throw new Error("Tangent point does not lie on circumference");
// Compute the angle theta = (T1, P, T2) which is the viewing angle of the circle observed from P the viewpoint
// a.b = |a| . |b| . cos(theta) (Scalar Multiplication)
const angle = Math.acos((
(T1x - viewPoint.x)*(T2x - viewPoint.x) + (T1y - viewPoint.y)*(T2y - viewPoint.y)
)/(
getDistance(viewPoint, {x:T1x, y:T1y}) * getDistance(viewPoint, {x:T2x, y:T2y})
)
);
return {
angle: angle,
tangents: [ { x: T1x, y: T1y }, { x: T2x, y: T2y } ]
}
}
/**
* Get the circle that either fits inside the rectangle, or the circle that contains the rectangle.
*/
const getCircleFromRect = ({x, y, width, height})=>{
const center = { x: x + width / 2, y: y + height / 2 };
const radius = getDistance(center, {x, y});
if(Math.round(getDistance(center, {x, y}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x:x+width, y:y}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x:x+width, y:y+height}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
if(Math.round(getDistance(center, {x, y:y+height}) - radius) !== 0) throw new Error("Circle around rectangle fail to be equidistant from its corners");
return {center, radius: Math.min(width / 2, height / 2)}
}
/**
* A point is looking at a bunch of circles. How much cumulative angle do they occupy?
*/
const getAngleFromCircles = (viewPoint, circles, maxAngle = Math.PI) => {
const angle = circles.reduce((acc, circle) => acc + getViewingAngle(viewPoint, circle).angle, 0);
if(angle >= maxAngle) throw new Error("Circles do not fit at current distance from viewpoint");
return angle;
}
/**
* The menu tries to arrange the nav items around itself so that they all fit within te angle "maxAngle"
* If they dont, push they away from the menu center by step pixel, and try again recursively
*/
function expandMenuCircle(menuCenter, navRects, step = 100, maxAngle = Math.PI){
try {
return {
// getAngleFromCircles throws an error if the accumulated angle for all these circle is greater than maxAngle
angle: getAngleFromCircles(menuCenter, navRects.map(rect => getCircleFromRect(rect)), maxAngle),
rects: navRects
}
}catch(e){
// On error thrown by getAngleFromCircles, push the circles a little bit further, and try again recursively
return expandMenuCircle(menuCenter, navRects.map(rect => applyTranslatorToRect(rect, {deltaX: 0, deltaY: -1 * step})), step, maxAngle);
}
}
/*****************************************************************************************
*
* Main Function
*
*****************************************************************************************/
/**
* Position nav items around a menu so that they fit within some angle.
*/
export default function positionMenuItem(){
// Constants to regulate the positioning algorithm
const angularSpace = Math.PI / 2;
const angularAnchor = Math.PI;
const menuExpansionSteps = 5;
// Node items involved
const navs = Array.from(document.querySelectorAll(".nav__item"));
const menu = document.querySelector(".hamburger-menu");
// Get center point of menu
const {center: viewPoint, radius: viewPointSize} = getCircleFromRect(menu.getBoundingClientRect())
// Try to compute a distance at which the nav items can nicely fit around the menu within an angle "angularSpace"
const {angle, rects} = expandMenuCircle(viewPoint, navs.map(nav => nav.getBoundingClientRect()), menuExpansionSteps, angularSpace);
// Compute the space between the nav items, so that they are nicely spread out to occupy the "angularSpace"
const angleGap = angularSpace/(navs.length - 1);
// Collect important points for display purposes
const coordsArr = [{item: 'menu', radius: viewPointSize, ...viewPoint}];
// Attempt to actually move the nav items
// rects contain the distance information for where these items will sit
rects.reduce((acc, rect, i)=> {
// First get the circle related to the menu
const circle = getCircleFromRect(rect);
// Extract the distance
const distance = getDistance(viewPoint, circle.center);
// Compute the translation vector to move a nav item from the menu position to
// where it needs to sit so that they form a circle around the menu. Then put this information
// on the node itself via css variables. Lastly collect this data for display purposes
const coordsChange = polar2Cartesian({distance, radians: acc});
putCoordsOnNode(navs[i], coordsChange);
coordsArr.push({item: `nav${i}`, radius: circle.radius, ...coordsChange })
// Prepare to position next nav item
return acc + angleGap;
// Initial angular position for first nav item
}, angularAnchor);
console.log(coordsArr);
} | * Distance between two points p1 and p2 | random_line_split |
DiffStreamOplogFilter.js | const http = require('http');
const stream = require('stream');
const { shuffle } = require('arsenal');
const DiffStreamOplogFilter = require('../../../CompareRaftMembers/DiffStreamOplogFilter');
const HTTP_TEST_PORT = 9090;
class MockRaftOplogStream extends stream.Readable {
constructor(entriesToEmit, refreshPeriodMs) {
super({ objectMode: true });
this.entriesToEmit = entriesToEmit;
this.refreshPeriodMs = refreshPeriodMs;
}
_read() {
if (this.entriesToEmit.length > 0) {
// introduce a little delay between events to make sure
// the filter waits for oplog events before emitting
// output entries
setTimeout(() => {
this.push(this.entriesToEmit.shift()); | }
}, 10);
} else {
setTimeout(() => {
this.push({ entry: null });
}, this.refreshPeriodMs);
}
}
}
describe('DiffStreamOplogFilter', () => {
let httpServer;
let reqCount = 0;
beforeAll(done => {
const handleGetBucketRSRequest = (res, bucketName) => {
// simple mock matching bucket names "foo-on-rsX" to raft
// session X as long as 1 <= X <= 5
const bucketMatch = /-on-rs([1-5])$/.exec(bucketName);
if (bucketMatch) {
const rsIdStr = bucketMatch[1];
res.writeHead(200, {
'Content-Length': 1,
});
return res.end(rsIdStr);
}
if (bucketName === 'bucket-with-error') {
res.writeHead(500);
} else {
res.writeHead(404);
}
return res.end();
};
httpServer = http.createServer((req, res) => {
req.resume();
reqCount += 1;
// fail 1/3 requests to check retry behavior
if (reqCount % 3 === 0) {
res.writeHead(500);
return res.end('OOPS');
}
const url = new URL(req.url, `http://${req.headers.host}`);
const bucketRsMatch = /^\/_\/buckets\/([a-z0-9-]+)\/id$/.exec(url.pathname);
if (bucketRsMatch) {
const bucketName = bucketRsMatch[1];
return handleGetBucketRSRequest(res, bucketName);
}
throw new Error(`unexpected request path ${url.pathname}`);
});
httpServer.listen(HTTP_TEST_PORT);
httpServer.once('listening', done);
});
afterAll(done => {
httpServer.close(done);
});
test('filtering test with mocks', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {
1: 10,
2: 20,
3: 30,
4: 40,
},
retryDelayMs: 10,
maxRetryDelayMs: 100,
});
// Prepare some test diff entries and oplog entries with a
// nested loop over raft sessions, buckets and keys
//
// Note: we loop over raft sessions 0 to 5 but only 1-4 are monitored:
//
// - use the first loop iteration (phony raft session 0) to
// - create buckets with no raft session
//
// - use raft session 5 to create buckets attached to a raft
// session not monitored by the filter
//
// In both cases, all entries belonging to those buckets
// should be filtered hence not appear in the output. This is
// consistent with cases where buckets would have been deleted
// during the scan, and possibly recreated to another raft
// session: in such case, all bucket keys were necessarily
// updated, so should be ignored.
const inputDiffEntries = [];
const latestOplogs = [];
for (let rs = 0; rs <= 5; ++rs) {
const latestRsOplog = [];
latestOplogs[rs] = latestRsOplog;
for (let b = 1; b <= 5; ++b) {
for (let k = 1; k <= 5; ++k) {
const bucketName = `bucket${b}-on-rs${rs}`;
const key = `key${k}`;
// insert a diff entry that should pass through if on RS 1-4
inputDiffEntries.push([{
key: `${bucketName}/${key}`,
value: 'foobar',
}, null]);
// insert a diff entry that should be filtered out
// because the key appears somewhere in the latest
// oplog
inputDiffEntries.push([{
key: `${bucketName}/${key}-updated`,
value: 'oldfoobar',
}, null]);
// insert the updated key's oplog entry
latestRsOplog.push({
entry: {
method: 'BATCH',
bucket: bucketName,
key: `${key}-updated`,
value: 'newfoobar',
},
});
}
}
}
// shuffle both the input diff entries and each oplog to
// increase fuzziness in the test, and replace actual oplog
// streams by mocks
shuffle(inputDiffEntries);
for (let rs = 1; rs <= 4; ++rs) {
shuffle(latestOplogs[rs]);
oplogFilter.raftSessionStates[rs].oplogStream.destroy();
const mockOplogStream = new MockRaftOplogStream(latestOplogs[rs], 100);
oplogFilter.raftSessionStates[rs].oplogStream = mockOplogStream;
oplogFilter._setupOplogStream(oplogFilter.raftSessionStates[rs], mockOplogStream);
}
// ingest all diff entries in the shuffled order in the filter
// stream
for (const diffEntry of inputDiffEntries) {
oplogFilter.write(diffEntry);
}
oplogFilter.end();
// listen for output diff events and store them in a set for
// checking that they correspond to what should have been sent
const filteredDiffEntries = new Set();
oplogFilter
.on('data', data => {
// stringify in JSON to support quick lookup from the set
// note: diff entries are forwarded as is, the
// javascript objects embedded have their fields in
// the same order, which is retained in the
// stringified version
filteredDiffEntries.add(JSON.stringify(data));
})
.on('end', () => {
// check that all diff entries expected to be output
// have been output, i.e. only non-updated diff
// entries from RS 1 to 4
for (let rs = 1; rs <= 4; ++rs) {
for (let b = 1; b <= 5; ++b) {
for (let k = 1; k <= 5; ++k) {
const bucketName = `bucket${b}-on-rs${rs}`;
const key = `key${k}`;
const outputDiffEntry = [{
key: `${bucketName}/${key}`,
value: 'foobar',
}, null];
const jsonOutputDiffEntry = JSON.stringify(outputDiffEntry);
expect(filteredDiffEntries.has(jsonOutputDiffEntry)).toBeTruthy();
filteredDiffEntries.delete(jsonOutputDiffEntry);
}
}
}
// check that no other entry than what was expected has been output
expect(filteredDiffEntries.size).toEqual(0);
done();
})
.on('error', err => {
fail(`an error occurred during filtering: ${err}`);
});
});
test('should handle an empty input stream', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {},
});
oplogFilter.end();
oplogFilter
.on('data', event => {
fail('should not have received a "data" event', event);
})
.on('end', () => {
done();
})
.on('error', err => {
fail(`an error occurred during filtering: ${err}`);
});
});
test('should emit a stream error if failing to fetch RSID after retries', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {
1: 10,
},
retryDelayMs: 10,
maxRetryDelayMs: 100,
});
oplogFilter.raftSessionStates[1].oplogStream.destroy();
const mockOplogStream = new MockRaftOplogStream([], 100);
oplogFilter.raftSessionStates[1].oplogStream = mockOplogStream;
oplogFilter._setupOplogStream(oplogFilter.raftSessionStates[1], mockOplogStream);
// this bucket name triggers a 500 error in the HTTP server
// mock when requested for its raft session ID
oplogFilter.write([{ key: 'bucket-with-error/key', value: 'foobar' }, null]);
oplogFilter.end();
oplogFilter
.on('data', event => {
fail('should not have received a "data" event', event);
})
.on('error', err => {
expect(err).toBeTruthy();
done();
});
});
}); | if (this.entriesToEmit.length === 0) {
this.push({ entry: null }); | random_line_split |
DiffStreamOplogFilter.js | const http = require('http');
const stream = require('stream');
const { shuffle } = require('arsenal');
const DiffStreamOplogFilter = require('../../../CompareRaftMembers/DiffStreamOplogFilter');
const HTTP_TEST_PORT = 9090;
class MockRaftOplogStream extends stream.Readable {
| (entriesToEmit, refreshPeriodMs) {
super({ objectMode: true });
this.entriesToEmit = entriesToEmit;
this.refreshPeriodMs = refreshPeriodMs;
}
_read() {
if (this.entriesToEmit.length > 0) {
// introduce a little delay between events to make sure
// the filter waits for oplog events before emitting
// output entries
setTimeout(() => {
this.push(this.entriesToEmit.shift());
if (this.entriesToEmit.length === 0) {
this.push({ entry: null });
}
}, 10);
} else {
setTimeout(() => {
this.push({ entry: null });
}, this.refreshPeriodMs);
}
}
}
describe('DiffStreamOplogFilter', () => {
let httpServer;
let reqCount = 0;
beforeAll(done => {
const handleGetBucketRSRequest = (res, bucketName) => {
// simple mock matching bucket names "foo-on-rsX" to raft
// session X as long as 1 <= X <= 5
const bucketMatch = /-on-rs([1-5])$/.exec(bucketName);
if (bucketMatch) {
const rsIdStr = bucketMatch[1];
res.writeHead(200, {
'Content-Length': 1,
});
return res.end(rsIdStr);
}
if (bucketName === 'bucket-with-error') {
res.writeHead(500);
} else {
res.writeHead(404);
}
return res.end();
};
httpServer = http.createServer((req, res) => {
req.resume();
reqCount += 1;
// fail 1/3 requests to check retry behavior
if (reqCount % 3 === 0) {
res.writeHead(500);
return res.end('OOPS');
}
const url = new URL(req.url, `http://${req.headers.host}`);
const bucketRsMatch = /^\/_\/buckets\/([a-z0-9-]+)\/id$/.exec(url.pathname);
if (bucketRsMatch) {
const bucketName = bucketRsMatch[1];
return handleGetBucketRSRequest(res, bucketName);
}
throw new Error(`unexpected request path ${url.pathname}`);
});
httpServer.listen(HTTP_TEST_PORT);
httpServer.once('listening', done);
});
afterAll(done => {
httpServer.close(done);
});
test('filtering test with mocks', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {
1: 10,
2: 20,
3: 30,
4: 40,
},
retryDelayMs: 10,
maxRetryDelayMs: 100,
});
// Prepare some test diff entries and oplog entries with a
// nested loop over raft sessions, buckets and keys
//
// Note: we loop over raft sessions 0 to 5 but only 1-4 are monitored:
//
// - use the first loop iteration (phony raft session 0) to
// - create buckets with no raft session
//
// - use raft session 5 to create buckets attached to a raft
// session not monitored by the filter
//
// In both cases, all entries belonging to those buckets
// should be filtered hence not appear in the output. This is
// consistent with cases where buckets would have been deleted
// during the scan, and possibly recreated to another raft
// session: in such case, all bucket keys were necessarily
// updated, so should be ignored.
const inputDiffEntries = [];
const latestOplogs = [];
for (let rs = 0; rs <= 5; ++rs) {
const latestRsOplog = [];
latestOplogs[rs] = latestRsOplog;
for (let b = 1; b <= 5; ++b) {
for (let k = 1; k <= 5; ++k) {
const bucketName = `bucket${b}-on-rs${rs}`;
const key = `key${k}`;
// insert a diff entry that should pass through if on RS 1-4
inputDiffEntries.push([{
key: `${bucketName}/${key}`,
value: 'foobar',
}, null]);
// insert a diff entry that should be filtered out
// because the key appears somewhere in the latest
// oplog
inputDiffEntries.push([{
key: `${bucketName}/${key}-updated`,
value: 'oldfoobar',
}, null]);
// insert the updated key's oplog entry
latestRsOplog.push({
entry: {
method: 'BATCH',
bucket: bucketName,
key: `${key}-updated`,
value: 'newfoobar',
},
});
}
}
}
// shuffle both the input diff entries and each oplog to
// increase fuzziness in the test, and replace actual oplog
// streams by mocks
shuffle(inputDiffEntries);
for (let rs = 1; rs <= 4; ++rs) {
shuffle(latestOplogs[rs]);
oplogFilter.raftSessionStates[rs].oplogStream.destroy();
const mockOplogStream = new MockRaftOplogStream(latestOplogs[rs], 100);
oplogFilter.raftSessionStates[rs].oplogStream = mockOplogStream;
oplogFilter._setupOplogStream(oplogFilter.raftSessionStates[rs], mockOplogStream);
}
// ingest all diff entries in the shuffled order in the filter
// stream
for (const diffEntry of inputDiffEntries) {
oplogFilter.write(diffEntry);
}
oplogFilter.end();
// listen for output diff events and store them in a set for
// checking that they correspond to what should have been sent
const filteredDiffEntries = new Set();
oplogFilter
.on('data', data => {
// stringify in JSON to support quick lookup from the set
// note: diff entries are forwarded as is, the
// javascript objects embedded have their fields in
// the same order, which is retained in the
// stringified version
filteredDiffEntries.add(JSON.stringify(data));
})
.on('end', () => {
// check that all diff entries expected to be output
// have been output, i.e. only non-updated diff
// entries from RS 1 to 4
for (let rs = 1; rs <= 4; ++rs) {
for (let b = 1; b <= 5; ++b) {
for (let k = 1; k <= 5; ++k) {
const bucketName = `bucket${b}-on-rs${rs}`;
const key = `key${k}`;
const outputDiffEntry = [{
key: `${bucketName}/${key}`,
value: 'foobar',
}, null];
const jsonOutputDiffEntry = JSON.stringify(outputDiffEntry);
expect(filteredDiffEntries.has(jsonOutputDiffEntry)).toBeTruthy();
filteredDiffEntries.delete(jsonOutputDiffEntry);
}
}
}
// check that no other entry than what was expected has been output
expect(filteredDiffEntries.size).toEqual(0);
done();
})
.on('error', err => {
fail(`an error occurred during filtering: ${err}`);
});
});
test('should handle an empty input stream', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {},
});
oplogFilter.end();
oplogFilter
.on('data', event => {
fail('should not have received a "data" event', event);
})
.on('end', () => {
done();
})
.on('error', err => {
fail(`an error occurred during filtering: ${err}`);
});
});
test('should emit a stream error if failing to fetch RSID after retries', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {
1: 10,
},
retryDelayMs: 10,
maxRetryDelayMs: 100,
});
oplogFilter.raftSessionStates[1].oplogStream.destroy();
const mockOplogStream = new MockRaftOplogStream([], 100);
oplogFilter.raftSessionStates[1].oplogStream = mockOplogStream;
oplogFilter._setupOplogStream(oplogFilter.raftSessionStates[1], mockOplogStream);
// this bucket name triggers a 500 error in the HTTP server
// mock when requested for its raft session ID
oplogFilter.write([{ key: 'bucket-with-error/key', value: 'foobar' }, null]);
oplogFilter.end();
oplogFilter
.on('data', event => {
fail('should not have received a "data" event', event);
})
.on('error', err => {
expect(err).toBeTruthy();
done();
});
});
});
| constructor | identifier_name |
DiffStreamOplogFilter.js | const http = require('http');
const stream = require('stream');
const { shuffle } = require('arsenal');
const DiffStreamOplogFilter = require('../../../CompareRaftMembers/DiffStreamOplogFilter');
const HTTP_TEST_PORT = 9090;
class MockRaftOplogStream extends stream.Readable {
constructor(entriesToEmit, refreshPeriodMs) {
super({ objectMode: true });
this.entriesToEmit = entriesToEmit;
this.refreshPeriodMs = refreshPeriodMs;
}
_read() {
if (this.entriesToEmit.length > 0) {
// introduce a little delay between events to make sure
// the filter waits for oplog events before emitting
// output entries
setTimeout(() => {
this.push(this.entriesToEmit.shift());
if (this.entriesToEmit.length === 0) {
this.push({ entry: null });
}
}, 10);
} else {
setTimeout(() => {
this.push({ entry: null });
}, this.refreshPeriodMs);
}
}
}
describe('DiffStreamOplogFilter', () => {
let httpServer;
let reqCount = 0;
beforeAll(done => {
const handleGetBucketRSRequest = (res, bucketName) => {
// simple mock matching bucket names "foo-on-rsX" to raft
// session X as long as 1 <= X <= 5
const bucketMatch = /-on-rs([1-5])$/.exec(bucketName);
if (bucketMatch) {
const rsIdStr = bucketMatch[1];
res.writeHead(200, {
'Content-Length': 1,
});
return res.end(rsIdStr);
}
if (bucketName === 'bucket-with-error') {
res.writeHead(500);
} else {
res.writeHead(404);
}
return res.end();
};
httpServer = http.createServer((req, res) => {
req.resume();
reqCount += 1;
// fail 1/3 requests to check retry behavior
if (reqCount % 3 === 0) {
res.writeHead(500);
return res.end('OOPS');
}
const url = new URL(req.url, `http://${req.headers.host}`);
const bucketRsMatch = /^\/_\/buckets\/([a-z0-9-]+)\/id$/.exec(url.pathname);
if (bucketRsMatch) {
const bucketName = bucketRsMatch[1];
return handleGetBucketRSRequest(res, bucketName);
}
throw new Error(`unexpected request path ${url.pathname}`);
});
httpServer.listen(HTTP_TEST_PORT);
httpServer.once('listening', done);
});
afterAll(done => {
httpServer.close(done);
});
test('filtering test with mocks', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {
1: 10,
2: 20,
3: 30,
4: 40,
},
retryDelayMs: 10,
maxRetryDelayMs: 100,
});
// Prepare some test diff entries and oplog entries with a
// nested loop over raft sessions, buckets and keys
//
// Note: we loop over raft sessions 0 to 5 but only 1-4 are monitored:
//
// - use the first loop iteration (phony raft session 0) to
// - create buckets with no raft session
//
// - use raft session 5 to create buckets attached to a raft
// session not monitored by the filter
//
// In both cases, all entries belonging to those buckets
// should be filtered hence not appear in the output. This is
// consistent with cases where buckets would have been deleted
// during the scan, and possibly recreated to another raft
// session: in such case, all bucket keys were necessarily
// updated, so should be ignored.
const inputDiffEntries = [];
const latestOplogs = [];
for (let rs = 0; rs <= 5; ++rs) {
const latestRsOplog = [];
latestOplogs[rs] = latestRsOplog;
for (let b = 1; b <= 5; ++b) {
for (let k = 1; k <= 5; ++k) {
const bucketName = `bucket${b}-on-rs${rs}`;
const key = `key${k}`;
// insert a diff entry that should pass through if on RS 1-4
inputDiffEntries.push([{
key: `${bucketName}/${key}`,
value: 'foobar',
}, null]);
// insert a diff entry that should be filtered out
// because the key appears somewhere in the latest
// oplog
inputDiffEntries.push([{
key: `${bucketName}/${key}-updated`,
value: 'oldfoobar',
}, null]);
// insert the updated key's oplog entry
latestRsOplog.push({
entry: {
method: 'BATCH',
bucket: bucketName,
key: `${key}-updated`,
value: 'newfoobar',
},
});
}
}
}
// shuffle both the input diff entries and each oplog to
// increase fuzziness in the test, and replace actual oplog
// streams by mocks
shuffle(inputDiffEntries);
for (let rs = 1; rs <= 4; ++rs) {
shuffle(latestOplogs[rs]);
oplogFilter.raftSessionStates[rs].oplogStream.destroy();
const mockOplogStream = new MockRaftOplogStream(latestOplogs[rs], 100);
oplogFilter.raftSessionStates[rs].oplogStream = mockOplogStream;
oplogFilter._setupOplogStream(oplogFilter.raftSessionStates[rs], mockOplogStream);
}
// ingest all diff entries in the shuffled order in the filter
// stream
for (const diffEntry of inputDiffEntries) {
oplogFilter.write(diffEntry);
}
oplogFilter.end();
// listen for output diff events and store them in a set for
// checking that they correspond to what should have been sent
const filteredDiffEntries = new Set();
oplogFilter
.on('data', data => {
// stringify in JSON to support quick lookup from the set
// note: diff entries are forwarded as is, the
// javascript objects embedded have their fields in
// the same order, which is retained in the
// stringified version
filteredDiffEntries.add(JSON.stringify(data));
})
.on('end', () => {
// check that all diff entries expected to be output
// have been output, i.e. only non-updated diff
// entries from RS 1 to 4
for (let rs = 1; rs <= 4; ++rs) {
for (let b = 1; b <= 5; ++b) |
}
// check that no other entry than what was expected has been output
expect(filteredDiffEntries.size).toEqual(0);
done();
})
.on('error', err => {
fail(`an error occurred during filtering: ${err}`);
});
});
test('should handle an empty input stream', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {},
});
oplogFilter.end();
oplogFilter
.on('data', event => {
fail('should not have received a "data" event', event);
})
.on('end', () => {
done();
})
.on('error', err => {
fail(`an error occurred during filtering: ${err}`);
});
});
test('should emit a stream error if failing to fetch RSID after retries', done => {
const oplogFilter = new DiffStreamOplogFilter({
bucketdHost: 'localhost',
bucketdPort: HTTP_TEST_PORT,
maxBufferedEntries: 5,
excludeFromCseqs: {
1: 10,
},
retryDelayMs: 10,
maxRetryDelayMs: 100,
});
oplogFilter.raftSessionStates[1].oplogStream.destroy();
const mockOplogStream = new MockRaftOplogStream([], 100);
oplogFilter.raftSessionStates[1].oplogStream = mockOplogStream;
oplogFilter._setupOplogStream(oplogFilter.raftSessionStates[1], mockOplogStream);
// this bucket name triggers a 500 error in the HTTP server
// mock when requested for its raft session ID
oplogFilter.write([{ key: 'bucket-with-error/key', value: 'foobar' }, null]);
oplogFilter.end();
oplogFilter
.on('data', event => {
fail('should not have received a "data" event', event);
})
.on('error', err => {
expect(err).toBeTruthy();
done();
});
});
});
| {
for (let k = 1; k <= 5; ++k) {
const bucketName = `bucket${b}-on-rs${rs}`;
const key = `key${k}`;
const outputDiffEntry = [{
key: `${bucketName}/${key}`,
value: 'foobar',
}, null];
const jsonOutputDiffEntry = JSON.stringify(outputDiffEntry);
expect(filteredDiffEntries.has(jsonOutputDiffEntry)).toBeTruthy();
filteredDiffEntries.delete(jsonOutputDiffEntry);
}
} | conditional_block |
lib.rs | //! Colorful and clean backtraces on panic.
//!
//! This library aims to make panics a little less painful by nicely colorizing
//! them, skipping over frames of functions called after the panic was already
//! initiated and printing relevant source snippets. Frames of functions in your
//! application are colored in a different color (red) than those of
//! dependencies (green).
//!
//! ### Screenshot
//! 
//!
//! ### Features
//! - Colorize backtraces to be easier on the eyes
//! - Show source snippets if source files are found on disk
//! - Print frames of application code vs dependencies in different color
//! - Hide all the frames after the panic was already initiated
//! - Hide language runtime initialization frames
//!
//! ### Installing the panic handler
//!
//! In your main function, just insert the following snippet. That's it!
//! ```rust
//! color_backtrace::install();
//! ```
//!
//! If you want to customize some settings, you can instead do:
//! ```rust
//! use color_backtrace::{default_output_stream, BacktracePrinter};
//! BacktracePrinter::new().message("Custom message!").install(default_output_stream());
//! ```
//!
//! ### Controlling verbosity
//! The default verbosity is configured via the `RUST_BACKTRACE` environment
//! variable. An unset `RUST_BACKTRACE` corresponds to
//! [minimal](Verbosity::Minimal), `RUST_BACKTRACE=1` to
//! [medium](Verbosity::Medium) and `RUST_BACKTRACE=full` to
//! [full](Verbosity::Full) verbosity levels.
use std::env;
use std::fs::File;
use std::io::{BufRead, BufReader, ErrorKind, IsTerminal as _};
use std::panic::PanicInfo;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use termcolor::{Ansi, Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
// Re-export termcolor so users don't have to depend on it themselves.
pub use termcolor;
// ============================================================================================== //
// [Result / Error types] //
// ============================================================================================== //
type IOResult<T = ()> = Result<T, std::io::Error>;
// ============================================================================================== //
// [Verbosity management] //
// ============================================================================================== //
/// Defines how verbose the backtrace is supposed to be.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Verbosity {
/// Print a small message including the panic payload and the panic location.
Minimal,
/// Everything in `Minimal` and additionally print a backtrace.
Medium,
/// Everything in `Medium` plus source snippets for all backtrace locations.
Full,
}
impl Verbosity {
/// Get the verbosity level from `RUST_BACKTRACE` env variable.
pub fn from_env() -> Self {
Self::convert_env(env::var("RUST_BACKTRACE").ok())
}
/// Get the verbosity level from `RUST_LIB_BACKTRACE` env variable,
/// falling back to the `RUST_BACKTRACE`.
pub fn lib_from_env() -> Self {
Self::convert_env(
env::var("RUST_LIB_BACKTRACE")
.or_else(|_| env::var("RUST_BACKTRACE"))
.ok(),
)
}
fn convert_env(env: Option<String>) -> Self {
match env {
Some(ref x) if x == "full" => Verbosity::Full,
Some(_) => Verbosity::Medium,
None => Verbosity::Minimal,
}
}
}
// ============================================================================================== //
// [Panic handler and install logic] //
// ============================================================================================== //
/// Install a `BacktracePrinter` handler with `::default()` settings.
///
/// This currently is a convenience shortcut for writing
///
/// ```rust
/// use color_backtrace::{BacktracePrinter, default_output_stream};
/// BacktracePrinter::default().install(default_output_stream())
/// ```
pub fn install() {
BacktracePrinter::default().install(default_output_stream());
}
/// Create the default output stream.
///
/// If stderr is attached to a tty, this is a colorized stderr, else it's
/// a plain (colorless) stderr.
pub fn default_output_stream() -> Box<StandardStream> {
Box::new(StandardStream::stderr(if std::io::stderr().is_terminal() {
ColorChoice::Always
} else {
ColorChoice::Never
}))
}
#[deprecated(
since = "0.4.0",
note = "Use `BacktracePrinter::into_panic_handler()` instead."
)]
pub fn create_panic_handler(
printer: BacktracePrinter,
) -> Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send> {
let out_stream_mutex = Mutex::new(default_output_stream());
Box::new(move |pi| {
let mut lock = out_stream_mutex.lock().unwrap();
if let Err(e) = printer.print_panic_info(pi, &mut *lock) {
// Panicking while handling a panic would send us into a deadlock,
// so we just print the error to stderr instead.
eprintln!("Error while printing panic: {:?}", e);
}
})
}
#[deprecated(since = "0.4.0", note = "Use `BacktracePrinter::install()` instead.")]
pub fn install_with_settings(printer: BacktracePrinter) {
std::panic::set_hook(printer.into_panic_handler(default_output_stream()))
}
// ============================================================================================== //
// [Backtrace frame] //
// ============================================================================================== //
pub type FilterCallback = dyn Fn(&mut Vec<&Frame>) + Send + Sync + 'static;
#[derive(Debug)]
pub struct Frame {
pub n: usize,
pub name: Option<String>,
pub lineno: Option<u32>,
pub filename: Option<PathBuf>,
pub ip: usize,
_private_ctor: (),
}
impl Frame {
/// Heuristically determine whether the frame is likely to be part of a
/// dependency.
///
/// If it fails to detect some patterns in your code base, feel free to drop
/// an issue / a pull request!
fn is_dependency_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"std::",
"core::",
"backtrace::backtrace::",
"_rust_begin_unwind",
"color_traceback::",
"__rust_",
"___rust_",
"__pthread",
"_main",
"main",
"__scrt_common_main_seh",
"BaseThreadInitThunk",
"_start",
"__libc_start_main",
"start_thread",
];
// Inspect name.
if let Some(ref name) = self.name {
if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) {
return true;
}
}
const FILE_PREFIXES: &[&str] = &[
"/rustc/",
"src/libstd/",
"src/libpanic_unwind/",
"src/libtest/",
];
// Inspect filename.
if let Some(ref filename) = self.filename {
let filename = filename.to_string_lossy();
if FILE_PREFIXES.iter().any(|x| filename.starts_with(x))
|| filename.contains("/.cargo/registry/src/")
{
return true;
}
}
false
}
/// Heuristically determine whether a frame is likely to be a post panic
/// frame.
///
/// Post panic frames are frames of a functions called after the actual panic
/// is already in progress and don't contain any useful information for a
/// reader of the backtrace.
fn is_post_panic_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"_rust_begin_unwind",
"rust_begin_unwind",
"core::result::unwrap_failed",
"core::option::expect_none_failed",
"core::panicking::panic_fmt",
"color_backtrace::create_panic_handler",
"std::panicking::begin_panic",
"begin_panic_fmt",
"backtrace::capture",
];
match self.name.as_ref() {
Some(name) => SYM_PREFIXES.iter().any(|x| name.starts_with(x)),
None => false,
}
}
/// Heuristically determine whether a frame is likely to be part of language
/// runtime.
fn is_runtime_init_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"std::rt::lang_start::",
"test::run_test::run_test_inner::",
"std::sys_common::backtrace::__rust_begin_short_backtrace",
];
let (name, file) = match (self.name.as_ref(), self.filename.as_ref()) {
(Some(name), Some(filename)) => (name, filename.to_string_lossy()),
_ => return false,
};
if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) {
return true;
}
// For Linux, this is the best rule for skipping test init I found.
if name == "{{closure}}" && file == "src/libtest/lib.rs" {
return true;
}
false
}
fn print_source_if_avail(&self, mut out: impl WriteColor, s: &BacktracePrinter) -> IOResult {
let (lineno, filename) = match (self.lineno, self.filename.as_ref()) {
(Some(a), Some(b)) => (a, b),
// Without a line number and file name, we can't sensibly proceed.
_ => return Ok(()),
};
let file = match File::open(filename) {
Ok(file) => file,
Err(ref e) if e.kind() == ErrorKind::NotFound => return Ok(()),
e @ Err(_) => e?,
};
// Extract relevant lines.
let reader = BufReader::new(file);
let start_line = lineno - 2.min(lineno - 1);
let surrounding_src = reader.lines().skip(start_line as usize - 1).take(5);
for (line, cur_line_no) in surrounding_src.zip(start_line..) {
if cur_line_no == lineno {
// Print actual source line with brighter color.
out.set_color(&s.colors.selected_src_ln)?;
writeln!(out, "{:>8} > {}", cur_line_no, line?)?;
out.reset()?;
} else {
writeln!(out, "{:>8} │ {}", cur_line_no, line?)?;
}
}
Ok(())
}
/// Get the module's name by walking /proc/self/maps
#[cfg(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
))]
fn module_info(&self) -> Option<(String, usize)> {
use regex::Regex;
use std::path::Path;
let re = Regex::new(
r"(?x)
^
(?P<start>[0-9a-f]{8,16})
-
(?P<end>[0-9a-f]{8,16})
\s
(?P<perm>[-rwxp]{4})
\s
(?P<offset>[0-9a-f]{8})
\s
[0-9a-f]+:[0-9a-f]+
\s
[0-9]+
\s+
(?P<path>.*)
$
",
)
.unwrap();
let mapsfile = File::open("/proc/self/maps").expect("Unable to open /proc/self/maps");
for line in BufReader::new(mapsfile).lines() {
let line = line.unwrap();
if let Some(caps) = re.captures(&line) {
let (start, end, path) = (
usize::from_str_radix(caps.name("start").unwrap().as_str(), 16).unwrap(),
usize::from_str_radix(caps.name("end").unwrap().as_str(), 16).unwrap(),
caps.name("path").unwrap().as_str().to_string(),
);
if self.ip >= start && self.ip < end {
return if let Some(filename) = Path::new(&path).file_name() {
Some((filename.to_str().unwrap().to_string(), start))
} else {
None
};
}
}
}
None
}
#[cfg(not(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
)))]
fn module_info(&self) -> Option<(String, usize)> {
None
}
fn pr | self, i: usize, out: &mut impl WriteColor, s: &BacktracePrinter) -> IOResult {
let is_dependency_code = self.is_dependency_code();
// Print frame index.
write!(out, "{:>2}: ", i)?;
if s.should_print_addresses() {
if let Some((module_name, module_base)) = self.module_info() {
write!(out, "{}:0x{:08x} - ", module_name, self.ip - module_base)?;
} else {
write!(out, "0x{:016x} - ", self.ip)?;
}
}
// Does the function have a hash suffix?
// (dodging a dep on the regex crate here)
let name = self
.name
.as_ref()
.map(|s| s.as_str())
.unwrap_or("<unknown>");
let has_hash_suffix = name.len() > 19
&& &name[name.len() - 19..name.len() - 16] == "::h"
&& name[name.len() - 16..].chars().all(|x| x.is_digit(16));
// Print function name.
out.set_color(if is_dependency_code {
&s.colors.dependency_code
} else {
&s.colors.crate_code
})?;
if has_hash_suffix {
write!(out, "{}", &name[..name.len() - 19])?;
if s.strip_function_hash {
writeln!(out)?;
} else {
out.set_color(if is_dependency_code {
&s.colors.dependency_code_hash
} else {
&s.colors.crate_code_hash
})?;
writeln!(out, "{}", &name[name.len() - 19..])?;
}
} else {
writeln!(out, "{}", name)?;
}
out.reset()?;
// Print source location, if known.
if let Some(ref file) = self.filename {
let filestr = file.to_str().unwrap_or("<bad utf8>");
let lineno = self
.lineno
.map_or("<unknown line>".to_owned(), |x| x.to_string());
writeln!(out, " at {}:{}", filestr, lineno)?;
} else {
writeln!(out, " at <unknown source file>")?;
}
// Maybe print source.
if s.current_verbosity() >= Verbosity::Full {
self.print_source_if_avail(out, s)?;
}
Ok(())
}
}
/// The default frame filter. Heuristically determines whether a frame is likely to be an
/// uninteresting frame. This filters out post panic frames and runtime init frames and dependency
/// code.
pub fn default_frame_filter(frames: &mut Vec<&Frame>) {
let top_cutoff = frames
.iter()
.rposition(|x| x.is_post_panic_code())
.map(|x| x + 2) // indices are 1 based
.unwrap_or(0);
let bottom_cutoff = frames
.iter()
.position(|x| x.is_runtime_init_code())
.unwrap_or_else(|| frames.len());
let rng = top_cutoff..=bottom_cutoff;
frames.retain(|x| rng.contains(&x.n))
}
// ============================================================================================== //
// [BacktracePrinter] //
// ============================================================================================== //
/// Color scheme definition.
#[derive(Debug, Clone)]
pub struct ColorScheme {
pub frames_omitted_msg: ColorSpec,
pub header: ColorSpec,
pub msg_loc_prefix: ColorSpec,
pub src_loc: ColorSpec,
pub src_loc_separator: ColorSpec,
pub env_var: ColorSpec,
pub dependency_code: ColorSpec,
pub dependency_code_hash: ColorSpec,
pub crate_code: ColorSpec,
pub crate_code_hash: ColorSpec,
pub selected_src_ln: ColorSpec,
}
impl ColorScheme {
/// Helper to create a new `ColorSpec` & set a few properties in one wash.
fn cs(fg: Option<Color>, intense: bool, bold: bool) -> ColorSpec {
let mut cs = ColorSpec::new();
cs.set_fg(fg);
cs.set_bold(bold);
cs.set_intense(intense);
cs
}
/// The classic `color-backtrace` scheme, as shown in the screenshots.
pub fn classic() -> Self {
Self {
frames_omitted_msg: Self::cs(Some(Color::Cyan), true, false),
header: Self::cs(Some(Color::Red), false, false),
msg_loc_prefix: Self::cs(Some(Color::Cyan), false, false),
src_loc: Self::cs(Some(Color::Magenta), false, false),
src_loc_separator: Self::cs(Some(Color::White), false, false),
env_var: Self::cs(None, false, true),
dependency_code: Self::cs(Some(Color::Green), false, false),
dependency_code_hash: Self::cs(Some(Color::Black), true, false),
crate_code: Self::cs(Some(Color::Red), true, false),
crate_code_hash: Self::cs(Some(Color::Black), true, false),
selected_src_ln: Self::cs(None, false, true),
}
}
}
impl Default for ColorScheme {
fn default() -> Self {
Self::classic()
}
}
#[deprecated(since = "0.4.0", note = "Use `BacktracePrinter` instead.")]
pub type Settings = BacktracePrinter;
/// Pretty-printer for backtraces and [`PanicInfo`](PanicInfo) structs.
#[derive(Clone)]
pub struct BacktracePrinter {
message: String,
verbosity: Verbosity,
lib_verbosity: Verbosity,
strip_function_hash: bool,
is_panic_handler: bool,
colors: ColorScheme,
filters: Vec<Arc<FilterCallback>>,
should_print_addresses: bool,
}
impl Default for BacktracePrinter {
fn default() -> Self {
Self {
verbosity: Verbosity::from_env(),
lib_verbosity: Verbosity::lib_from_env(),
message: "The application panicked (crashed).".to_owned(),
strip_function_hash: false,
colors: ColorScheme::classic(),
is_panic_handler: false,
filters: vec![Arc::new(default_frame_filter)],
should_print_addresses: false,
}
}
}
impl std::fmt::Debug for BacktracePrinter {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("Settings")
.field("message", &self.message)
.field("verbosity", &self.verbosity)
.field("lib_verbosity", &self.lib_verbosity)
.field("strip_function_hash", &self.strip_function_hash)
.field("is_panic_handler", &self.is_panic_handler)
.field("print_addresses", &self.should_print_addresses)
.field("colors", &self.colors)
.finish()
}
}
/// Builder functions.
impl BacktracePrinter {
/// Alias for `BacktracePrinter::default`.
pub fn new() -> Self {
Self::default()
}
/// Alter the color scheme.
///
/// Defaults to `ColorScheme::classic()`.
pub fn color_scheme(mut self, colors: ColorScheme) -> Self {
self.colors = colors;
self
}
/// Controls the "greeting" message of the panic.
///
/// Defaults to `"The application panicked (crashed)"`.
pub fn message(mut self, message: impl Into<String>) -> Self {
self.message = message.into();
self
}
/// Controls the verbosity level used when installed as panic handler.
///
/// Defaults to `Verbosity::from_env()`.
pub fn verbosity(mut self, v: Verbosity) -> Self {
self.verbosity = v;
self
}
/// Controls the lib verbosity level used when formatting user provided traces.
///
/// Defaults to `Verbosity::lib_from_env()`.
pub fn lib_verbosity(mut self, v: Verbosity) -> Self {
self.lib_verbosity = v;
self
}
/// Controls whether the hash part of functions is stripped.
///
/// Defaults to `false`.
pub fn strip_function_hash(mut self, strip: bool) -> Self {
self.strip_function_hash = strip;
self
}
/// Controls whether addresses (or module offsets if available) should be printed.
///
/// Defaults to `false`.
pub fn print_addresses(mut self, val: bool) -> Self {
self.should_print_addresses = val;
self
}
/// Add a custom filter to the set of frame filters
///
/// Filters are run in the order they are added.
///
/// # Example
///
/// ```rust
/// use color_backtrace::{default_output_stream, BacktracePrinter};
///
/// BacktracePrinter::new()
/// .add_frame_filter(Box::new(|frames| {
/// frames.retain(|x| matches!(&x.name, Some(n) if !n.starts_with("blabla")))
/// }))
/// .install(default_output_stream());
/// ```
pub fn add_frame_filter(mut self, filter: Box<FilterCallback>) -> Self {
self.filters.push(filter.into());
self
}
/// Clears all filters associated with this printer, including the default filter
pub fn clear_frame_filters(mut self) -> Self {
self.filters.clear();
self
}
}
/// Routines for putting the panic printer to use.
impl BacktracePrinter {
/// Install the `color_backtrace` handler with default settings.
///
/// Output streams can be created via `default_output_stream()` or
/// using any other stream that implements
/// [`termcolor::WriteColor`](termcolor::WriteColor).
pub fn install(self, out: impl WriteColor + Sync + Send + 'static) {
std::panic::set_hook(self.into_panic_handler(out))
}
/// Create a `color_backtrace` panic handler from this panic printer.
///
/// This can be used if you want to combine the handler with other handlers.
pub fn into_panic_handler(
mut self,
out: impl WriteColor + Sync + Send + 'static,
) -> Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send> {
self.is_panic_handler = true;
let out_stream_mutex = Mutex::new(out);
Box::new(move |pi| {
let mut lock = out_stream_mutex.lock().unwrap();
if let Err(e) = self.print_panic_info(pi, &mut *lock) {
// Panicking while handling a panic would send us into a deadlock,
// so we just print the error to stderr instead.
eprintln!("Error while printing panic: {:?}", e);
}
})
}
/// Pretty-prints a [`backtrace::Backtrace`](backtrace::Backtrace) to an output stream.
pub fn print_trace(&self, trace: &backtrace::Backtrace, out: &mut impl WriteColor) -> IOResult {
writeln!(out, "{:━^80}", " BACKTRACE ")?;
// Collect frame info.
let frames: Vec<_> = trace
.frames()
.iter()
.flat_map(|frame| frame.symbols().iter().map(move |sym| (frame.ip(), sym)))
.zip(1usize..)
.map(|((ip, sym), n)| Frame {
name: sym.name().map(|x| x.to_string()),
lineno: sym.lineno(),
filename: sym.filename().map(|x| x.into()),
n,
ip: ip as usize,
_private_ctor: (),
})
.collect();
let mut filtered_frames = frames.iter().collect();
match env::var("COLORBT_SHOW_HIDDEN").ok().as_deref() {
Some("1") | Some("on") | Some("y") => (),
_ => {
for filter in &self.filters {
filter(&mut filtered_frames);
}
}
}
if filtered_frames.is_empty() {
// TODO: Would probably look better centered.
return writeln!(out, "<empty backtrace>");
}
// Don't let filters mess with the order.
filtered_frames.sort_by_key(|x| x.n);
macro_rules! print_hidden {
($n:expr) => {
out.set_color(&self.colors.frames_omitted_msg)?;
let n = $n;
let text = format!(
"{decorator} {n} frame{plural} hidden {decorator}",
n = n,
plural = if n == 1 { "" } else { "s" },
decorator = "⋮",
);
writeln!(out, "{:^80}", text)?;
out.reset()?;
};
}
let mut last_n = 0;
for frame in &filtered_frames {
let frame_delta = frame.n - last_n - 1;
if frame_delta != 0 {
print_hidden!(frame_delta);
}
frame.print(frame.n, out, self)?;
last_n = frame.n;
}
let last_filtered_n = filtered_frames.last().unwrap().n;
let last_unfiltered_n = frames.last().unwrap().n;
if last_filtered_n < last_unfiltered_n {
print_hidden!(last_unfiltered_n - last_filtered_n);
}
Ok(())
}
/// Pretty-print a backtrace to a `String`, using VT100 color codes.
pub fn format_trace_to_string(&self, trace: &backtrace::Backtrace) -> IOResult<String> {
// TODO: should we implicitly enable VT100 support on Windows here?
let mut ansi = Ansi::new(vec![]);
self.print_trace(trace, &mut ansi)?;
Ok(String::from_utf8(ansi.into_inner()).unwrap())
}
/// Pretty-prints a [`PanicInfo`](PanicInfo) struct to an output stream.
pub fn print_panic_info(&self, pi: &PanicInfo, out: &mut impl WriteColor) -> IOResult {
out.set_color(&self.colors.header)?;
writeln!(out, "{}", self.message)?;
out.reset()?;
// Print panic message.
let payload = pi
.payload()
.downcast_ref::<String>()
.map(String::as_str)
.or_else(|| pi.payload().downcast_ref::<&str>().cloned())
.unwrap_or("<non string panic payload>");
write!(out, "Message: ")?;
out.set_color(&self.colors.msg_loc_prefix)?;
writeln!(out, "{}", payload)?;
out.reset()?;
// If known, print panic location.
write!(out, "Location: ")?;
if let Some(loc) = pi.location() {
out.set_color(&self.colors.src_loc)?;
write!(out, "{}", loc.file())?;
out.set_color(&self.colors.src_loc_separator)?;
write!(out, ":")?;
out.set_color(&self.colors.src_loc)?;
writeln!(out, "{}", loc.line())?;
out.reset()?;
} else {
writeln!(out, "<unknown>")?;
}
// Print some info on how to increase verbosity.
if self.current_verbosity() == Verbosity::Minimal {
write!(out, "\nBacktrace omitted.\n\nRun with ")?;
out.set_color(&self.colors.env_var)?;
write!(out, "RUST_BACKTRACE=1")?;
out.reset()?;
writeln!(out, " environment variable to display it.")?;
} else {
// This text only makes sense if frames are displayed.
write!(out, "\nRun with ")?;
out.set_color(&self.colors.env_var)?;
write!(out, "COLORBT_SHOW_HIDDEN=1")?;
out.reset()?;
writeln!(out, " environment variable to disable frame filtering.")?;
}
if self.current_verbosity() <= Verbosity::Medium {
write!(out, "Run with ")?;
out.set_color(&self.colors.env_var)?;
write!(out, "RUST_BACKTRACE=full")?;
out.reset()?;
writeln!(out, " to include source snippets.")?;
}
if self.current_verbosity() >= Verbosity::Medium {
self.print_trace(&backtrace::Backtrace::new(), out)?;
}
Ok(())
}
fn current_verbosity(&self) -> Verbosity {
if self.is_panic_handler {
self.verbosity
} else {
self.lib_verbosity
}
}
fn should_print_addresses(&self) -> bool {
self.should_print_addresses
}
}
// ============================================================================================== //
// [Deprecated routines for backward compat] //
// ============================================================================================== //
#[deprecated(since = "0.4.0", note = "Use `BacktracePrinter::print_trace` instead`")]
pub fn print_backtrace(trace: &backtrace::Backtrace, s: &mut BacktracePrinter) -> IOResult {
s.print_trace(trace, &mut default_output_stream())
}
#[deprecated(
since = "0.4.0",
note = "Use `BacktracePrinter::print_panic_info` instead`"
)]
pub fn print_panic_info(pi: &PanicInfo, s: &mut BacktracePrinter) -> IOResult {
s.print_panic_info(pi, &mut default_output_stream())
}
// ============================================================================================== //
| int(& | identifier_name |
lib.rs | //! Colorful and clean backtraces on panic.
//!
//! This library aims to make panics a little less painful by nicely colorizing
//! them, skipping over frames of functions called after the panic was already
//! initiated and printing relevant source snippets. Frames of functions in your
//! application are colored in a different color (red) than those of
//! dependencies (green).
//!
//! ### Screenshot
//! 
//!
//! ### Features
//! - Colorize backtraces to be easier on the eyes
//! - Show source snippets if source files are found on disk
//! - Print frames of application code vs dependencies in different color
//! - Hide all the frames after the panic was already initiated
//! - Hide language runtime initialization frames
//!
//! ### Installing the panic handler
//!
//! In your main function, just insert the following snippet. That's it!
//! ```rust
//! color_backtrace::install();
//! ```
//!
//! If you want to customize some settings, you can instead do:
//! ```rust
//! use color_backtrace::{default_output_stream, BacktracePrinter};
//! BacktracePrinter::new().message("Custom message!").install(default_output_stream());
//! ```
//!
//! ### Controlling verbosity
//! The default verbosity is configured via the `RUST_BACKTRACE` environment
//! variable. An unset `RUST_BACKTRACE` corresponds to
//! [minimal](Verbosity::Minimal), `RUST_BACKTRACE=1` to
//! [medium](Verbosity::Medium) and `RUST_BACKTRACE=full` to
//! [full](Verbosity::Full) verbosity levels.
use std::env;
use std::fs::File;
use std::io::{BufRead, BufReader, ErrorKind, IsTerminal as _};
use std::panic::PanicInfo;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use termcolor::{Ansi, Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
// Re-export termcolor so users don't have to depend on it themselves.
pub use termcolor;
// ============================================================================================== //
// [Result / Error types] //
// ============================================================================================== //
type IOResult<T = ()> = Result<T, std::io::Error>;
// ============================================================================================== //
// [Verbosity management] //
// ============================================================================================== //
/// Defines how verbose the backtrace is supposed to be.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Verbosity {
/// Print a small message including the panic payload and the panic location.
Minimal,
/// Everything in `Minimal` and additionally print a backtrace.
Medium,
/// Everything in `Medium` plus source snippets for all backtrace locations.
Full,
}
impl Verbosity {
/// Get the verbosity level from `RUST_BACKTRACE` env variable.
pub fn from_env() -> Self {
Self::convert_env(env::var("RUST_BACKTRACE").ok())
}
/// Get the verbosity level from `RUST_LIB_BACKTRACE` env variable,
/// falling back to the `RUST_BACKTRACE`.
pub fn lib_from_env() -> Self {
Self::convert_env(
env::var("RUST_LIB_BACKTRACE")
.or_else(|_| env::var("RUST_BACKTRACE"))
.ok(),
)
}
fn convert_env(env: Option<String>) -> Self {
match env {
Some(ref x) if x == "full" => Verbosity::Full,
Some(_) => Verbosity::Medium,
None => Verbosity::Minimal,
}
}
}
// ============================================================================================== //
// [Panic handler and install logic] //
// ============================================================================================== //
/// Install a `BacktracePrinter` handler with `::default()` settings.
///
/// This currently is a convenience shortcut for writing
///
/// ```rust
/// use color_backtrace::{BacktracePrinter, default_output_stream};
/// BacktracePrinter::default().install(default_output_stream())
/// ```
pub fn install() {
BacktracePrinter::default().install(default_output_stream());
}
/// Create the default output stream.
///
/// If stderr is attached to a tty, this is a colorized stderr, else it's
/// a plain (colorless) stderr.
pub fn default_output_stream() -> Box<StandardStream> {
Box::new(StandardStream::stderr(if std::io::stderr().is_terminal() {
ColorChoice::Always
} else {
ColorChoice::Never
}))
}
#[deprecated(
since = "0.4.0",
note = "Use `BacktracePrinter::into_panic_handler()` instead."
)]
pub fn create_panic_handler(
printer: BacktracePrinter,
) -> Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send> {
let out_stream_mutex = Mutex::new(default_output_stream());
Box::new(move |pi| {
let mut lock = out_stream_mutex.lock().unwrap();
if let Err(e) = printer.print_panic_info(pi, &mut *lock) {
// Panicking while handling a panic would send us into a deadlock,
// so we just print the error to stderr instead.
eprintln!("Error while printing panic: {:?}", e);
}
})
}
#[deprecated(since = "0.4.0", note = "Use `BacktracePrinter::install()` instead.")]
pub fn install_with_settings(printer: BacktracePrinter) {
std::panic::set_hook(printer.into_panic_handler(default_output_stream()))
}
// ============================================================================================== //
// [Backtrace frame] //
// ============================================================================================== //
pub type FilterCallback = dyn Fn(&mut Vec<&Frame>) + Send + Sync + 'static;
#[derive(Debug)]
pub struct Frame {
pub n: usize,
pub name: Option<String>,
pub lineno: Option<u32>,
pub filename: Option<PathBuf>,
pub ip: usize,
_private_ctor: (),
}
impl Frame {
/// Heuristically determine whether the frame is likely to be part of a
/// dependency.
///
/// If it fails to detect some patterns in your code base, feel free to drop
/// an issue / a pull request!
fn is_dependency_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"std::",
"core::",
"backtrace::backtrace::",
"_rust_begin_unwind",
"color_traceback::",
"__rust_", | "___rust_",
"__pthread",
"_main",
"main",
"__scrt_common_main_seh",
"BaseThreadInitThunk",
"_start",
"__libc_start_main",
"start_thread",
];
// Inspect name.
if let Some(ref name) = self.name {
if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) {
return true;
}
}
const FILE_PREFIXES: &[&str] = &[
"/rustc/",
"src/libstd/",
"src/libpanic_unwind/",
"src/libtest/",
];
// Inspect filename.
if let Some(ref filename) = self.filename {
let filename = filename.to_string_lossy();
if FILE_PREFIXES.iter().any(|x| filename.starts_with(x))
|| filename.contains("/.cargo/registry/src/")
{
return true;
}
}
false
}
/// Heuristically determine whether a frame is likely to be a post panic
/// frame.
///
/// Post panic frames are frames of a functions called after the actual panic
/// is already in progress and don't contain any useful information for a
/// reader of the backtrace.
fn is_post_panic_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"_rust_begin_unwind",
"rust_begin_unwind",
"core::result::unwrap_failed",
"core::option::expect_none_failed",
"core::panicking::panic_fmt",
"color_backtrace::create_panic_handler",
"std::panicking::begin_panic",
"begin_panic_fmt",
"backtrace::capture",
];
match self.name.as_ref() {
Some(name) => SYM_PREFIXES.iter().any(|x| name.starts_with(x)),
None => false,
}
}
/// Heuristically determine whether a frame is likely to be part of language
/// runtime.
fn is_runtime_init_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"std::rt::lang_start::",
"test::run_test::run_test_inner::",
"std::sys_common::backtrace::__rust_begin_short_backtrace",
];
let (name, file) = match (self.name.as_ref(), self.filename.as_ref()) {
(Some(name), Some(filename)) => (name, filename.to_string_lossy()),
_ => return false,
};
if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) {
return true;
}
// For Linux, this is the best rule for skipping test init I found.
if name == "{{closure}}" && file == "src/libtest/lib.rs" {
return true;
}
false
}
fn print_source_if_avail(&self, mut out: impl WriteColor, s: &BacktracePrinter) -> IOResult {
let (lineno, filename) = match (self.lineno, self.filename.as_ref()) {
(Some(a), Some(b)) => (a, b),
// Without a line number and file name, we can't sensibly proceed.
_ => return Ok(()),
};
let file = match File::open(filename) {
Ok(file) => file,
Err(ref e) if e.kind() == ErrorKind::NotFound => return Ok(()),
e @ Err(_) => e?,
};
// Extract relevant lines.
let reader = BufReader::new(file);
let start_line = lineno - 2.min(lineno - 1);
let surrounding_src = reader.lines().skip(start_line as usize - 1).take(5);
for (line, cur_line_no) in surrounding_src.zip(start_line..) {
if cur_line_no == lineno {
// Print actual source line with brighter color.
out.set_color(&s.colors.selected_src_ln)?;
writeln!(out, "{:>8} > {}", cur_line_no, line?)?;
out.reset()?;
} else {
writeln!(out, "{:>8} │ {}", cur_line_no, line?)?;
}
}
Ok(())
}
/// Get the module's name by walking /proc/self/maps
#[cfg(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
))]
fn module_info(&self) -> Option<(String, usize)> {
use regex::Regex;
use std::path::Path;
let re = Regex::new(
r"(?x)
^
(?P<start>[0-9a-f]{8,16})
-
(?P<end>[0-9a-f]{8,16})
\s
(?P<perm>[-rwxp]{4})
\s
(?P<offset>[0-9a-f]{8})
\s
[0-9a-f]+:[0-9a-f]+
\s
[0-9]+
\s+
(?P<path>.*)
$
",
)
.unwrap();
let mapsfile = File::open("/proc/self/maps").expect("Unable to open /proc/self/maps");
for line in BufReader::new(mapsfile).lines() {
let line = line.unwrap();
if let Some(caps) = re.captures(&line) {
let (start, end, path) = (
usize::from_str_radix(caps.name("start").unwrap().as_str(), 16).unwrap(),
usize::from_str_radix(caps.name("end").unwrap().as_str(), 16).unwrap(),
caps.name("path").unwrap().as_str().to_string(),
);
if self.ip >= start && self.ip < end {
return if let Some(filename) = Path::new(&path).file_name() {
Some((filename.to_str().unwrap().to_string(), start))
} else {
None
};
}
}
}
None
}
#[cfg(not(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
)))]
fn module_info(&self) -> Option<(String, usize)> {
None
}
fn print(&self, i: usize, out: &mut impl WriteColor, s: &BacktracePrinter) -> IOResult {
let is_dependency_code = self.is_dependency_code();
// Print frame index.
write!(out, "{:>2}: ", i)?;
if s.should_print_addresses() {
if let Some((module_name, module_base)) = self.module_info() {
write!(out, "{}:0x{:08x} - ", module_name, self.ip - module_base)?;
} else {
write!(out, "0x{:016x} - ", self.ip)?;
}
}
// Does the function have a hash suffix?
// (dodging a dep on the regex crate here)
let name = self
.name
.as_ref()
.map(|s| s.as_str())
.unwrap_or("<unknown>");
let has_hash_suffix = name.len() > 19
&& &name[name.len() - 19..name.len() - 16] == "::h"
&& name[name.len() - 16..].chars().all(|x| x.is_digit(16));
// Print function name.
out.set_color(if is_dependency_code {
&s.colors.dependency_code
} else {
&s.colors.crate_code
})?;
if has_hash_suffix {
write!(out, "{}", &name[..name.len() - 19])?;
if s.strip_function_hash {
writeln!(out)?;
} else {
out.set_color(if is_dependency_code {
&s.colors.dependency_code_hash
} else {
&s.colors.crate_code_hash
})?;
writeln!(out, "{}", &name[name.len() - 19..])?;
}
} else {
writeln!(out, "{}", name)?;
}
out.reset()?;
// Print source location, if known.
if let Some(ref file) = self.filename {
let filestr = file.to_str().unwrap_or("<bad utf8>");
let lineno = self
.lineno
.map_or("<unknown line>".to_owned(), |x| x.to_string());
writeln!(out, " at {}:{}", filestr, lineno)?;
} else {
writeln!(out, " at <unknown source file>")?;
}
// Maybe print source.
if s.current_verbosity() >= Verbosity::Full {
self.print_source_if_avail(out, s)?;
}
Ok(())
}
}
/// The default frame filter. Heuristically determines whether a frame is likely to be an
/// uninteresting frame. This filters out post panic frames and runtime init frames and dependency
/// code.
pub fn default_frame_filter(frames: &mut Vec<&Frame>) {
let top_cutoff = frames
.iter()
.rposition(|x| x.is_post_panic_code())
.map(|x| x + 2) // indices are 1 based
.unwrap_or(0);
let bottom_cutoff = frames
.iter()
.position(|x| x.is_runtime_init_code())
.unwrap_or_else(|| frames.len());
let rng = top_cutoff..=bottom_cutoff;
frames.retain(|x| rng.contains(&x.n))
}
// ============================================================================================== //
// [BacktracePrinter] //
// ============================================================================================== //
/// Color scheme definition.
#[derive(Debug, Clone)]
pub struct ColorScheme {
pub frames_omitted_msg: ColorSpec,
pub header: ColorSpec,
pub msg_loc_prefix: ColorSpec,
pub src_loc: ColorSpec,
pub src_loc_separator: ColorSpec,
pub env_var: ColorSpec,
pub dependency_code: ColorSpec,
pub dependency_code_hash: ColorSpec,
pub crate_code: ColorSpec,
pub crate_code_hash: ColorSpec,
pub selected_src_ln: ColorSpec,
}
impl ColorScheme {
/// Helper to create a new `ColorSpec` & set a few properties in one wash.
fn cs(fg: Option<Color>, intense: bool, bold: bool) -> ColorSpec {
let mut cs = ColorSpec::new();
cs.set_fg(fg);
cs.set_bold(bold);
cs.set_intense(intense);
cs
}
/// The classic `color-backtrace` scheme, as shown in the screenshots.
pub fn classic() -> Self {
Self {
frames_omitted_msg: Self::cs(Some(Color::Cyan), true, false),
header: Self::cs(Some(Color::Red), false, false),
msg_loc_prefix: Self::cs(Some(Color::Cyan), false, false),
src_loc: Self::cs(Some(Color::Magenta), false, false),
src_loc_separator: Self::cs(Some(Color::White), false, false),
env_var: Self::cs(None, false, true),
dependency_code: Self::cs(Some(Color::Green), false, false),
dependency_code_hash: Self::cs(Some(Color::Black), true, false),
crate_code: Self::cs(Some(Color::Red), true, false),
crate_code_hash: Self::cs(Some(Color::Black), true, false),
selected_src_ln: Self::cs(None, false, true),
}
}
}
impl Default for ColorScheme {
fn default() -> Self {
Self::classic()
}
}
#[deprecated(since = "0.4.0", note = "Use `BacktracePrinter` instead.")]
pub type Settings = BacktracePrinter;
/// Pretty-printer for backtraces and [`PanicInfo`](PanicInfo) structs.
#[derive(Clone)]
pub struct BacktracePrinter {
message: String,
verbosity: Verbosity,
lib_verbosity: Verbosity,
strip_function_hash: bool,
is_panic_handler: bool,
colors: ColorScheme,
filters: Vec<Arc<FilterCallback>>,
should_print_addresses: bool,
}
impl Default for BacktracePrinter {
fn default() -> Self {
Self {
verbosity: Verbosity::from_env(),
lib_verbosity: Verbosity::lib_from_env(),
message: "The application panicked (crashed).".to_owned(),
strip_function_hash: false,
colors: ColorScheme::classic(),
is_panic_handler: false,
filters: vec![Arc::new(default_frame_filter)],
should_print_addresses: false,
}
}
}
impl std::fmt::Debug for BacktracePrinter {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("Settings")
.field("message", &self.message)
.field("verbosity", &self.verbosity)
.field("lib_verbosity", &self.lib_verbosity)
.field("strip_function_hash", &self.strip_function_hash)
.field("is_panic_handler", &self.is_panic_handler)
.field("print_addresses", &self.should_print_addresses)
.field("colors", &self.colors)
.finish()
}
}
/// Builder functions.
impl BacktracePrinter {
/// Alias for `BacktracePrinter::default`.
pub fn new() -> Self {
Self::default()
}
/// Alter the color scheme.
///
/// Defaults to `ColorScheme::classic()`.
pub fn color_scheme(mut self, colors: ColorScheme) -> Self {
self.colors = colors;
self
}
/// Controls the "greeting" message of the panic.
///
/// Defaults to `"The application panicked (crashed)"`.
pub fn message(mut self, message: impl Into<String>) -> Self {
self.message = message.into();
self
}
/// Controls the verbosity level used when installed as panic handler.
///
/// Defaults to `Verbosity::from_env()`.
pub fn verbosity(mut self, v: Verbosity) -> Self {
self.verbosity = v;
self
}
/// Controls the lib verbosity level used when formatting user provided traces.
///
/// Defaults to `Verbosity::lib_from_env()`.
pub fn lib_verbosity(mut self, v: Verbosity) -> Self {
self.lib_verbosity = v;
self
}
/// Controls whether the hash part of functions is stripped.
///
/// Defaults to `false`.
pub fn strip_function_hash(mut self, strip: bool) -> Self {
self.strip_function_hash = strip;
self
}
/// Controls whether addresses (or module offsets if available) should be printed.
///
/// Defaults to `false`.
pub fn print_addresses(mut self, val: bool) -> Self {
self.should_print_addresses = val;
self
}
/// Add a custom filter to the set of frame filters
///
/// Filters are run in the order they are added.
///
/// # Example
///
/// ```rust
/// use color_backtrace::{default_output_stream, BacktracePrinter};
///
/// BacktracePrinter::new()
/// .add_frame_filter(Box::new(|frames| {
/// frames.retain(|x| matches!(&x.name, Some(n) if !n.starts_with("blabla")))
/// }))
/// .install(default_output_stream());
/// ```
pub fn add_frame_filter(mut self, filter: Box<FilterCallback>) -> Self {
self.filters.push(filter.into());
self
}
/// Clears all filters associated with this printer, including the default filter
pub fn clear_frame_filters(mut self) -> Self {
self.filters.clear();
self
}
}
/// Routines for putting the panic printer to use.
impl BacktracePrinter {
/// Install the `color_backtrace` handler with default settings.
///
/// Output streams can be created via `default_output_stream()` or
/// using any other stream that implements
/// [`termcolor::WriteColor`](termcolor::WriteColor).
pub fn install(self, out: impl WriteColor + Sync + Send + 'static) {
std::panic::set_hook(self.into_panic_handler(out))
}
/// Create a `color_backtrace` panic handler from this panic printer.
///
/// This can be used if you want to combine the handler with other handlers.
pub fn into_panic_handler(
mut self,
out: impl WriteColor + Sync + Send + 'static,
) -> Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send> {
self.is_panic_handler = true;
let out_stream_mutex = Mutex::new(out);
Box::new(move |pi| {
let mut lock = out_stream_mutex.lock().unwrap();
if let Err(e) = self.print_panic_info(pi, &mut *lock) {
// Panicking while handling a panic would send us into a deadlock,
// so we just print the error to stderr instead.
eprintln!("Error while printing panic: {:?}", e);
}
})
}
/// Pretty-prints a [`backtrace::Backtrace`](backtrace::Backtrace) to an output stream.
pub fn print_trace(&self, trace: &backtrace::Backtrace, out: &mut impl WriteColor) -> IOResult {
writeln!(out, "{:━^80}", " BACKTRACE ")?;
// Collect frame info.
let frames: Vec<_> = trace
.frames()
.iter()
.flat_map(|frame| frame.symbols().iter().map(move |sym| (frame.ip(), sym)))
.zip(1usize..)
.map(|((ip, sym), n)| Frame {
name: sym.name().map(|x| x.to_string()),
lineno: sym.lineno(),
filename: sym.filename().map(|x| x.into()),
n,
ip: ip as usize,
_private_ctor: (),
})
.collect();
let mut filtered_frames = frames.iter().collect();
match env::var("COLORBT_SHOW_HIDDEN").ok().as_deref() {
Some("1") | Some("on") | Some("y") => (),
_ => {
for filter in &self.filters {
filter(&mut filtered_frames);
}
}
}
if filtered_frames.is_empty() {
// TODO: Would probably look better centered.
return writeln!(out, "<empty backtrace>");
}
// Don't let filters mess with the order.
filtered_frames.sort_by_key(|x| x.n);
macro_rules! print_hidden {
($n:expr) => {
out.set_color(&self.colors.frames_omitted_msg)?;
let n = $n;
let text = format!(
"{decorator} {n} frame{plural} hidden {decorator}",
n = n,
plural = if n == 1 { "" } else { "s" },
decorator = "⋮",
);
writeln!(out, "{:^80}", text)?;
out.reset()?;
};
}
let mut last_n = 0;
for frame in &filtered_frames {
let frame_delta = frame.n - last_n - 1;
if frame_delta != 0 {
print_hidden!(frame_delta);
}
frame.print(frame.n, out, self)?;
last_n = frame.n;
}
let last_filtered_n = filtered_frames.last().unwrap().n;
let last_unfiltered_n = frames.last().unwrap().n;
if last_filtered_n < last_unfiltered_n {
print_hidden!(last_unfiltered_n - last_filtered_n);
}
Ok(())
}
/// Pretty-print a backtrace to a `String`, using VT100 color codes.
pub fn format_trace_to_string(&self, trace: &backtrace::Backtrace) -> IOResult<String> {
// TODO: should we implicitly enable VT100 support on Windows here?
let mut ansi = Ansi::new(vec![]);
self.print_trace(trace, &mut ansi)?;
Ok(String::from_utf8(ansi.into_inner()).unwrap())
}
/// Pretty-prints a [`PanicInfo`](PanicInfo) struct to an output stream.
pub fn print_panic_info(&self, pi: &PanicInfo, out: &mut impl WriteColor) -> IOResult {
out.set_color(&self.colors.header)?;
writeln!(out, "{}", self.message)?;
out.reset()?;
// Print panic message.
let payload = pi
.payload()
.downcast_ref::<String>()
.map(String::as_str)
.or_else(|| pi.payload().downcast_ref::<&str>().cloned())
.unwrap_or("<non string panic payload>");
write!(out, "Message: ")?;
out.set_color(&self.colors.msg_loc_prefix)?;
writeln!(out, "{}", payload)?;
out.reset()?;
// If known, print panic location.
write!(out, "Location: ")?;
if let Some(loc) = pi.location() {
out.set_color(&self.colors.src_loc)?;
write!(out, "{}", loc.file())?;
out.set_color(&self.colors.src_loc_separator)?;
write!(out, ":")?;
out.set_color(&self.colors.src_loc)?;
writeln!(out, "{}", loc.line())?;
out.reset()?;
} else {
writeln!(out, "<unknown>")?;
}
// Print some info on how to increase verbosity.
if self.current_verbosity() == Verbosity::Minimal {
write!(out, "\nBacktrace omitted.\n\nRun with ")?;
out.set_color(&self.colors.env_var)?;
write!(out, "RUST_BACKTRACE=1")?;
out.reset()?;
writeln!(out, " environment variable to display it.")?;
} else {
// This text only makes sense if frames are displayed.
write!(out, "\nRun with ")?;
out.set_color(&self.colors.env_var)?;
write!(out, "COLORBT_SHOW_HIDDEN=1")?;
out.reset()?;
writeln!(out, " environment variable to disable frame filtering.")?;
}
if self.current_verbosity() <= Verbosity::Medium {
write!(out, "Run with ")?;
out.set_color(&self.colors.env_var)?;
write!(out, "RUST_BACKTRACE=full")?;
out.reset()?;
writeln!(out, " to include source snippets.")?;
}
if self.current_verbosity() >= Verbosity::Medium {
self.print_trace(&backtrace::Backtrace::new(), out)?;
}
Ok(())
}
fn current_verbosity(&self) -> Verbosity {
if self.is_panic_handler {
self.verbosity
} else {
self.lib_verbosity
}
}
fn should_print_addresses(&self) -> bool {
self.should_print_addresses
}
}
// ============================================================================================== //
// [Deprecated routines for backward compat] //
// ============================================================================================== //
#[deprecated(since = "0.4.0", note = "Use `BacktracePrinter::print_trace` instead`")]
pub fn print_backtrace(trace: &backtrace::Backtrace, s: &mut BacktracePrinter) -> IOResult {
s.print_trace(trace, &mut default_output_stream())
}
#[deprecated(
since = "0.4.0",
note = "Use `BacktracePrinter::print_panic_info` instead`"
)]
pub fn print_panic_info(pi: &PanicInfo, s: &mut BacktracePrinter) -> IOResult {
s.print_panic_info(pi, &mut default_output_stream())
}
// ============================================================================================== // | random_line_split |
|
lib.rs | //! Colorful and clean backtraces on panic.
//!
//! This library aims to make panics a little less painful by nicely colorizing
//! them, skipping over frames of functions called after the panic was already
//! initiated and printing relevant source snippets. Frames of functions in your
//! application are colored in a different color (red) than those of
//! dependencies (green).
//!
//! ### Screenshot
//! 
//!
//! ### Features
//! - Colorize backtraces to be easier on the eyes
//! - Show source snippets if source files are found on disk
//! - Print frames of application code vs dependencies in different color
//! - Hide all the frames after the panic was already initiated
//! - Hide language runtime initialization frames
//!
//! ### Installing the panic handler
//!
//! In your main function, just insert the following snippet. That's it!
//! ```rust
//! color_backtrace::install();
//! ```
//!
//! If you want to customize some settings, you can instead do:
//! ```rust
//! use color_backtrace::{default_output_stream, BacktracePrinter};
//! BacktracePrinter::new().message("Custom message!").install(default_output_stream());
//! ```
//!
//! ### Controlling verbosity
//! The default verbosity is configured via the `RUST_BACKTRACE` environment
//! variable. An unset `RUST_BACKTRACE` corresponds to
//! [minimal](Verbosity::Minimal), `RUST_BACKTRACE=1` to
//! [medium](Verbosity::Medium) and `RUST_BACKTRACE=full` to
//! [full](Verbosity::Full) verbosity levels.
use std::env;
use std::fs::File;
use std::io::{BufRead, BufReader, ErrorKind, IsTerminal as _};
use std::panic::PanicInfo;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use termcolor::{Ansi, Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
// Re-export termcolor so users don't have to depend on it themselves.
pub use termcolor;
// ============================================================================================== //
// [Result / Error types] //
// ============================================================================================== //
type IOResult<T = ()> = Result<T, std::io::Error>;
// ============================================================================================== //
// [Verbosity management] //
// ============================================================================================== //
/// Defines how verbose the backtrace is supposed to be.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Verbosity {
/// Print a small message including the panic payload and the panic location.
Minimal,
/// Everything in `Minimal` and additionally print a backtrace.
Medium,
/// Everything in `Medium` plus source snippets for all backtrace locations.
Full,
}
impl Verbosity {
/// Get the verbosity level from `RUST_BACKTRACE` env variable.
pub fn from_env() -> Self {
Self::convert_env(env::var("RUST_BACKTRACE").ok())
}
/// Get the verbosity level from `RUST_LIB_BACKTRACE` env variable,
/// falling back to the `RUST_BACKTRACE`.
pub fn lib_from_env() -> Self {
Self::convert_env(
env::var("RUST_LIB_BACKTRACE")
.or_else(|_| env::var("RUST_BACKTRACE"))
.ok(),
)
}
fn convert_env(env: Option<String>) -> Self {
match env {
Some(ref x) if x == "full" => Verbosity::Full,
Some(_) => Verbosity::Medium,
None => Verbosity::Minimal,
}
}
}
// ============================================================================================== //
// [Panic handler and install logic] //
// ============================================================================================== //
/// Install a `BacktracePrinter` handler with `::default()` settings.
///
/// This currently is a convenience shortcut for writing
///
/// ```rust
/// use color_backtrace::{BacktracePrinter, default_output_stream};
/// BacktracePrinter::default().install(default_output_stream())
/// ```
pub fn install() {
BacktracePrinter::default().install(default_output_stream());
}
/// Create the default output stream.
///
/// If stderr is attached to a tty, this is a colorized stderr, else it's
/// a plain (colorless) stderr.
pub fn default_output_stream() -> Box<StandardStream> {
Box::new(StandardStream::stderr(if std::io::stderr().is_terminal() {
ColorChoice::Always
} else {
ColorChoice::Never
}))
}
#[deprecated(
since = "0.4.0",
note = "Use `BacktracePrinter::into_panic_handler()` instead."
)]
pub fn create_panic_handler(
printer: BacktracePrinter,
) -> Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send> {
let out_stream_mutex = Mutex::new(default_output_stream());
Box::new(move |pi| {
let mut lock = out_stream_mutex.lock().unwrap();
if let Err(e) = printer.print_panic_info(pi, &mut *lock) {
// Panicking while handling a panic would send us into a deadlock,
// so we just print the error to stderr instead.
eprintln!("Error while printing panic: {:?}", e);
}
})
}
#[deprecated(since = "0.4.0", note = "Use `BacktracePrinter::install()` instead.")]
pub fn install_with_settings(printer: BacktracePrinter) {
std::panic::set_hook(printer.into_panic_handler(default_output_stream()))
}
// ============================================================================================== //
// [Backtrace frame] //
// ============================================================================================== //
pub type FilterCallback = dyn Fn(&mut Vec<&Frame>) + Send + Sync + 'static;
#[derive(Debug)]
pub struct Frame {
pub n: usize,
pub name: Option<String>,
pub lineno: Option<u32>,
pub filename: Option<PathBuf>,
pub ip: usize,
_private_ctor: (),
}
impl Frame {
/// Heuristically determine whether the frame is likely to be part of a
/// dependency.
///
/// If it fails to detect some patterns in your code base, feel free to drop
/// an issue / a pull request!
fn is_dependency_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"std::",
"core::",
"backtrace::backtrace::",
"_rust_begin_unwind",
"color_traceback::",
"__rust_",
"___rust_",
"__pthread",
"_main",
"main",
"__scrt_common_main_seh",
"BaseThreadInitThunk",
"_start",
"__libc_start_main",
"start_thread",
];
// Inspect name.
if let Some(ref name) = self.name {
if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) {
return true;
}
}
const FILE_PREFIXES: &[&str] = &[
"/rustc/",
"src/libstd/",
"src/libpanic_unwind/",
"src/libtest/",
];
// Inspect filename.
if let Some(ref filename) = self.filename {
let filename = filename.to_string_lossy();
if FILE_PREFIXES.iter().any(|x| filename.starts_with(x))
|| filename.contains("/.cargo/registry/src/")
{
return true;
}
}
false
}
/// Heuristically determine whether a frame is likely to be a post panic
/// frame.
///
/// Post panic frames are frames of a functions called after the actual panic
/// is already in progress and don't contain any useful information for a
/// reader of the backtrace.
fn is_post_panic_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"_rust_begin_unwind",
"rust_begin_unwind",
"core::result::unwrap_failed",
"core::option::expect_none_failed",
"core::panicking::panic_fmt",
"color_backtrace::create_panic_handler",
"std::panicking::begin_panic",
"begin_panic_fmt",
"backtrace::capture",
];
match self.name.as_ref() {
Some(name) => SYM_PREFIXES.iter().any(|x| name.starts_with(x)),
None => false,
}
}
/// Heuristically determine whether a frame is likely to be part of language
/// runtime.
fn is_runtime_init_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"std::rt::lang_start::",
"test::run_test::run_test_inner::",
"std::sys_common::backtrace::__rust_begin_short_backtrace",
];
let (name, file) = match (self.name.as_ref(), self.filename.as_ref()) {
(Some(name), Some(filename)) => (name, filename.to_string_lossy()),
_ => return false,
};
if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) {
return true;
}
// For Linux, this is the best rule for skipping test init I found.
if name == "{{closure}}" && file == "src/libtest/lib.rs" {
return true;
}
false
}
fn print_source_if_avail(&self, mut out: impl WriteColor, s: &BacktracePrinter) -> IOResult {
let (lineno, filename) = match (self.lineno, self.filename.as_ref()) {
(Some(a), Some(b)) => (a, b),
// Without a line number and file name, we can't sensibly proceed.
_ => return Ok(()),
};
let file = match File::open(filename) {
Ok(file) => file,
Err(ref e) if e.kind() == ErrorKind::NotFound => return Ok(()),
e @ Err(_) => e?,
};
// Extract relevant lines.
let reader = BufReader::new(file);
let start_line = lineno - 2.min(lineno - 1);
let surrounding_src = reader.lines().skip(start_line as usize - 1).take(5);
for (line, cur_line_no) in surrounding_src.zip(start_line..) {
if cur_line_no == lineno {
// Print actual source line with brighter color.
out.set_color(&s.colors.selected_src_ln)?;
writeln!(out, "{:>8} > {}", cur_line_no, line?)?;
out.reset()?;
} else {
writeln!(out, "{:>8} │ {}", cur_line_no, line?)?;
}
}
Ok(())
}
/// Get the module's name by walking /proc/self/maps
#[cfg(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
))]
fn module_info(&self) -> Option<(String, usize)> {
use regex::Regex;
use std::path::Path;
let re = Regex::new(
r"(?x)
^
(?P<start>[0-9a-f]{8,16})
-
(?P<end>[0-9a-f]{8,16})
\s
(?P<perm>[-rwxp]{4})
\s
(?P<offset>[0-9a-f]{8})
\s
[0-9a-f]+:[0-9a-f]+
\s
[0-9]+
\s+
(?P<path>.*)
$
",
)
.unwrap();
let mapsfile = File::open("/proc/self/maps").expect("Unable to open /proc/self/maps");
for line in BufReader::new(mapsfile).lines() {
let line = line.unwrap();
if let Some(caps) = re.captures(&line) {
let (start, end, path) = (
usize::from_str_radix(caps.name("start").unwrap().as_str(), 16).unwrap(),
usize::from_str_radix(caps.name("end").unwrap().as_str(), 16).unwrap(),
caps.name("path").unwrap().as_str().to_string(),
);
if self.ip >= start && self.ip < end {
return if let Some(filename) = Path::new(&path).file_name() {
Some((filename.to_str().unwrap().to_string(), start))
} else {
None
};
}
}
}
None
}
#[cfg(not(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
)))]
fn module_info(&self) -> Option<(String, usize)> {
None
}
fn print(&self, i: usize, out: &mut impl WriteColor, s: &BacktracePrinter) -> IOResult {
let is_dependency_code = self.is_dependency_code();
// Print frame index.
write!(out, "{:>2}: ", i)?;
if s.should_print_addresses() {
if let Some((module_name, module_base)) = self.module_info() {
write!(out, "{}:0x{:08x} - ", module_name, self.ip - module_base)?;
} else {
write!(out, "0x{:016x} - ", self.ip)?;
}
}
// Does the function have a hash suffix?
// (dodging a dep on the regex crate here)
let name = self
.name
.as_ref()
.map(|s| s.as_str())
.unwrap_or("<unknown>");
let has_hash_suffix = name.len() > 19
&& &name[name.len() - 19..name.len() - 16] == "::h"
&& name[name.len() - 16..].chars().all(|x| x.is_digit(16));
// Print function name.
out.set_color(if is_dependency_code {
| lse {
&s.colors.crate_code
})?;
if has_hash_suffix {
write!(out, "{}", &name[..name.len() - 19])?;
if s.strip_function_hash {
writeln!(out)?;
} else {
out.set_color(if is_dependency_code {
&s.colors.dependency_code_hash
} else {
&s.colors.crate_code_hash
})?;
writeln!(out, "{}", &name[name.len() - 19..])?;
}
} else {
writeln!(out, "{}", name)?;
}
out.reset()?;
// Print source location, if known.
if let Some(ref file) = self.filename {
let filestr = file.to_str().unwrap_or("<bad utf8>");
let lineno = self
.lineno
.map_or("<unknown line>".to_owned(), |x| x.to_string());
writeln!(out, " at {}:{}", filestr, lineno)?;
} else {
writeln!(out, " at <unknown source file>")?;
}
// Maybe print source.
if s.current_verbosity() >= Verbosity::Full {
self.print_source_if_avail(out, s)?;
}
Ok(())
}
}
/// The default frame filter. Heuristically determines whether a frame is likely to be an
/// uninteresting frame. This filters out post panic frames and runtime init frames and dependency
/// code.
pub fn default_frame_filter(frames: &mut Vec<&Frame>) {
let top_cutoff = frames
.iter()
.rposition(|x| x.is_post_panic_code())
.map(|x| x + 2) // indices are 1 based
.unwrap_or(0);
let bottom_cutoff = frames
.iter()
.position(|x| x.is_runtime_init_code())
.unwrap_or_else(|| frames.len());
let rng = top_cutoff..=bottom_cutoff;
frames.retain(|x| rng.contains(&x.n))
}
// ============================================================================================== //
// [BacktracePrinter] //
// ============================================================================================== //
/// Color scheme definition.
#[derive(Debug, Clone)]
pub struct ColorScheme {
pub frames_omitted_msg: ColorSpec,
pub header: ColorSpec,
pub msg_loc_prefix: ColorSpec,
pub src_loc: ColorSpec,
pub src_loc_separator: ColorSpec,
pub env_var: ColorSpec,
pub dependency_code: ColorSpec,
pub dependency_code_hash: ColorSpec,
pub crate_code: ColorSpec,
pub crate_code_hash: ColorSpec,
pub selected_src_ln: ColorSpec,
}
impl ColorScheme {
/// Helper to create a new `ColorSpec` & set a few properties in one wash.
fn cs(fg: Option<Color>, intense: bool, bold: bool) -> ColorSpec {
let mut cs = ColorSpec::new();
cs.set_fg(fg);
cs.set_bold(bold);
cs.set_intense(intense);
cs
}
/// The classic `color-backtrace` scheme, as shown in the screenshots.
pub fn classic() -> Self {
Self {
frames_omitted_msg: Self::cs(Some(Color::Cyan), true, false),
header: Self::cs(Some(Color::Red), false, false),
msg_loc_prefix: Self::cs(Some(Color::Cyan), false, false),
src_loc: Self::cs(Some(Color::Magenta), false, false),
src_loc_separator: Self::cs(Some(Color::White), false, false),
env_var: Self::cs(None, false, true),
dependency_code: Self::cs(Some(Color::Green), false, false),
dependency_code_hash: Self::cs(Some(Color::Black), true, false),
crate_code: Self::cs(Some(Color::Red), true, false),
crate_code_hash: Self::cs(Some(Color::Black), true, false),
selected_src_ln: Self::cs(None, false, true),
}
}
}
impl Default for ColorScheme {
fn default() -> Self {
Self::classic()
}
}
#[deprecated(since = "0.4.0", note = "Use `BacktracePrinter` instead.")]
pub type Settings = BacktracePrinter;
/// Pretty-printer for backtraces and [`PanicInfo`](PanicInfo) structs.
#[derive(Clone)]
pub struct BacktracePrinter {
message: String,
verbosity: Verbosity,
lib_verbosity: Verbosity,
strip_function_hash: bool,
is_panic_handler: bool,
colors: ColorScheme,
filters: Vec<Arc<FilterCallback>>,
should_print_addresses: bool,
}
impl Default for BacktracePrinter {
fn default() -> Self {
Self {
verbosity: Verbosity::from_env(),
lib_verbosity: Verbosity::lib_from_env(),
message: "The application panicked (crashed).".to_owned(),
strip_function_hash: false,
colors: ColorScheme::classic(),
is_panic_handler: false,
filters: vec![Arc::new(default_frame_filter)],
should_print_addresses: false,
}
}
}
impl std::fmt::Debug for BacktracePrinter {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("Settings")
.field("message", &self.message)
.field("verbosity", &self.verbosity)
.field("lib_verbosity", &self.lib_verbosity)
.field("strip_function_hash", &self.strip_function_hash)
.field("is_panic_handler", &self.is_panic_handler)
.field("print_addresses", &self.should_print_addresses)
.field("colors", &self.colors)
.finish()
}
}
/// Builder functions.
impl BacktracePrinter {
/// Alias for `BacktracePrinter::default`.
pub fn new() -> Self {
Self::default()
}
/// Alter the color scheme.
///
/// Defaults to `ColorScheme::classic()`.
pub fn color_scheme(mut self, colors: ColorScheme) -> Self {
self.colors = colors;
self
}
/// Controls the "greeting" message of the panic.
///
/// Defaults to `"The application panicked (crashed)"`.
pub fn message(mut self, message: impl Into<String>) -> Self {
self.message = message.into();
self
}
/// Controls the verbosity level used when installed as panic handler.
///
/// Defaults to `Verbosity::from_env()`.
pub fn verbosity(mut self, v: Verbosity) -> Self {
self.verbosity = v;
self
}
/// Controls the lib verbosity level used when formatting user provided traces.
///
/// Defaults to `Verbosity::lib_from_env()`.
pub fn lib_verbosity(mut self, v: Verbosity) -> Self {
self.lib_verbosity = v;
self
}
/// Controls whether the hash part of functions is stripped.
///
/// Defaults to `false`.
pub fn strip_function_hash(mut self, strip: bool) -> Self {
self.strip_function_hash = strip;
self
}
/// Controls whether addresses (or module offsets if available) should be printed.
///
/// Defaults to `false`.
pub fn print_addresses(mut self, val: bool) -> Self {
self.should_print_addresses = val;
self
}
/// Add a custom filter to the set of frame filters
///
/// Filters are run in the order they are added.
///
/// # Example
///
/// ```rust
/// use color_backtrace::{default_output_stream, BacktracePrinter};
///
/// BacktracePrinter::new()
/// .add_frame_filter(Box::new(|frames| {
/// frames.retain(|x| matches!(&x.name, Some(n) if !n.starts_with("blabla")))
/// }))
/// .install(default_output_stream());
/// ```
pub fn add_frame_filter(mut self, filter: Box<FilterCallback>) -> Self {
self.filters.push(filter.into());
self
}
/// Clears all filters associated with this printer, including the default filter
pub fn clear_frame_filters(mut self) -> Self {
self.filters.clear();
self
}
}
/// Routines for putting the panic printer to use.
impl BacktracePrinter {
/// Install the `color_backtrace` handler with default settings.
///
/// Output streams can be created via `default_output_stream()` or
/// using any other stream that implements
/// [`termcolor::WriteColor`](termcolor::WriteColor).
pub fn install(self, out: impl WriteColor + Sync + Send + 'static) {
std::panic::set_hook(self.into_panic_handler(out))
}
/// Create a `color_backtrace` panic handler from this panic printer.
///
/// This can be used if you want to combine the handler with other handlers.
pub fn into_panic_handler(
mut self,
out: impl WriteColor + Sync + Send + 'static,
) -> Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send> {
self.is_panic_handler = true;
let out_stream_mutex = Mutex::new(out);
Box::new(move |pi| {
let mut lock = out_stream_mutex.lock().unwrap();
if let Err(e) = self.print_panic_info(pi, &mut *lock) {
// Panicking while handling a panic would send us into a deadlock,
// so we just print the error to stderr instead.
eprintln!("Error while printing panic: {:?}", e);
}
})
}
/// Pretty-prints a [`backtrace::Backtrace`](backtrace::Backtrace) to an output stream.
pub fn print_trace(&self, trace: &backtrace::Backtrace, out: &mut impl WriteColor) -> IOResult {
writeln!(out, "{:━^80}", " BACKTRACE ")?;
// Collect frame info.
let frames: Vec<_> = trace
.frames()
.iter()
.flat_map(|frame| frame.symbols().iter().map(move |sym| (frame.ip(), sym)))
.zip(1usize..)
.map(|((ip, sym), n)| Frame {
name: sym.name().map(|x| x.to_string()),
lineno: sym.lineno(),
filename: sym.filename().map(|x| x.into()),
n,
ip: ip as usize,
_private_ctor: (),
})
.collect();
let mut filtered_frames = frames.iter().collect();
match env::var("COLORBT_SHOW_HIDDEN").ok().as_deref() {
Some("1") | Some("on") | Some("y") => (),
_ => {
for filter in &self.filters {
filter(&mut filtered_frames);
}
}
}
if filtered_frames.is_empty() {
// TODO: Would probably look better centered.
return writeln!(out, "<empty backtrace>");
}
// Don't let filters mess with the order.
filtered_frames.sort_by_key(|x| x.n);
macro_rules! print_hidden {
($n:expr) => {
out.set_color(&self.colors.frames_omitted_msg)?;
let n = $n;
let text = format!(
"{decorator} {n} frame{plural} hidden {decorator}",
n = n,
plural = if n == 1 { "" } else { "s" },
decorator = "⋮",
);
writeln!(out, "{:^80}", text)?;
out.reset()?;
};
}
let mut last_n = 0;
for frame in &filtered_frames {
let frame_delta = frame.n - last_n - 1;
if frame_delta != 0 {
print_hidden!(frame_delta);
}
frame.print(frame.n, out, self)?;
last_n = frame.n;
}
let last_filtered_n = filtered_frames.last().unwrap().n;
let last_unfiltered_n = frames.last().unwrap().n;
if last_filtered_n < last_unfiltered_n {
print_hidden!(last_unfiltered_n - last_filtered_n);
}
Ok(())
}
/// Pretty-print a backtrace to a `String`, using VT100 color codes.
pub fn format_trace_to_string(&self, trace: &backtrace::Backtrace) -> IOResult<String> {
// TODO: should we implicitly enable VT100 support on Windows here?
let mut ansi = Ansi::new(vec![]);
self.print_trace(trace, &mut ansi)?;
Ok(String::from_utf8(ansi.into_inner()).unwrap())
}
/// Pretty-prints a [`PanicInfo`](PanicInfo) struct to an output stream.
pub fn print_panic_info(&self, pi: &PanicInfo, out: &mut impl WriteColor) -> IOResult {
out.set_color(&self.colors.header)?;
writeln!(out, "{}", self.message)?;
out.reset()?;
// Print panic message.
let payload = pi
.payload()
.downcast_ref::<String>()
.map(String::as_str)
.or_else(|| pi.payload().downcast_ref::<&str>().cloned())
.unwrap_or("<non string panic payload>");
write!(out, "Message: ")?;
out.set_color(&self.colors.msg_loc_prefix)?;
writeln!(out, "{}", payload)?;
out.reset()?;
// If known, print panic location.
write!(out, "Location: ")?;
if let Some(loc) = pi.location() {
out.set_color(&self.colors.src_loc)?;
write!(out, "{}", loc.file())?;
out.set_color(&self.colors.src_loc_separator)?;
write!(out, ":")?;
out.set_color(&self.colors.src_loc)?;
writeln!(out, "{}", loc.line())?;
out.reset()?;
} else {
writeln!(out, "<unknown>")?;
}
// Print some info on how to increase verbosity.
if self.current_verbosity() == Verbosity::Minimal {
write!(out, "\nBacktrace omitted.\n\nRun with ")?;
out.set_color(&self.colors.env_var)?;
write!(out, "RUST_BACKTRACE=1")?;
out.reset()?;
writeln!(out, " environment variable to display it.")?;
} else {
// This text only makes sense if frames are displayed.
write!(out, "\nRun with ")?;
out.set_color(&self.colors.env_var)?;
write!(out, "COLORBT_SHOW_HIDDEN=1")?;
out.reset()?;
writeln!(out, " environment variable to disable frame filtering.")?;
}
if self.current_verbosity() <= Verbosity::Medium {
write!(out, "Run with ")?;
out.set_color(&self.colors.env_var)?;
write!(out, "RUST_BACKTRACE=full")?;
out.reset()?;
writeln!(out, " to include source snippets.")?;
}
if self.current_verbosity() >= Verbosity::Medium {
self.print_trace(&backtrace::Backtrace::new(), out)?;
}
Ok(())
}
fn current_verbosity(&self) -> Verbosity {
if self.is_panic_handler {
self.verbosity
} else {
self.lib_verbosity
}
}
fn should_print_addresses(&self) -> bool {
self.should_print_addresses
}
}
// ============================================================================================== //
// [Deprecated routines for backward compat] //
// ============================================================================================== //
#[deprecated(since = "0.4.0", note = "Use `BacktracePrinter::print_trace` instead`")]
pub fn print_backtrace(trace: &backtrace::Backtrace, s: &mut BacktracePrinter) -> IOResult {
s.print_trace(trace, &mut default_output_stream())
}
#[deprecated(
since = "0.4.0",
note = "Use `BacktracePrinter::print_panic_info` instead`"
)]
pub fn print_panic_info(pi: &PanicInfo, s: &mut BacktracePrinter) -> IOResult {
s.print_panic_info(pi, &mut default_output_stream())
}
// ============================================================================================== //
| &s.colors.dependency_code
} e | conditional_block |
lib.rs | //! Colorful and clean backtraces on panic.
//!
//! This library aims to make panics a little less painful by nicely colorizing
//! them, skipping over frames of functions called after the panic was already
//! initiated and printing relevant source snippets. Frames of functions in your
//! application are colored in a different color (red) than those of
//! dependencies (green).
//!
//! ### Screenshot
//! 
//!
//! ### Features
//! - Colorize backtraces to be easier on the eyes
//! - Show source snippets if source files are found on disk
//! - Print frames of application code vs dependencies in different color
//! - Hide all the frames after the panic was already initiated
//! - Hide language runtime initialization frames
//!
//! ### Installing the panic handler
//!
//! In your main function, just insert the following snippet. That's it!
//! ```rust
//! color_backtrace::install();
//! ```
//!
//! If you want to customize some settings, you can instead do:
//! ```rust
//! use color_backtrace::{default_output_stream, BacktracePrinter};
//! BacktracePrinter::new().message("Custom message!").install(default_output_stream());
//! ```
//!
//! ### Controlling verbosity
//! The default verbosity is configured via the `RUST_BACKTRACE` environment
//! variable. An unset `RUST_BACKTRACE` corresponds to
//! [minimal](Verbosity::Minimal), `RUST_BACKTRACE=1` to
//! [medium](Verbosity::Medium) and `RUST_BACKTRACE=full` to
//! [full](Verbosity::Full) verbosity levels.
use std::env;
use std::fs::File;
use std::io::{BufRead, BufReader, ErrorKind, IsTerminal as _};
use std::panic::PanicInfo;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use termcolor::{Ansi, Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
// Re-export termcolor so users don't have to depend on it themselves.
pub use termcolor;
// ============================================================================================== //
// [Result / Error types] //
// ============================================================================================== //
type IOResult<T = ()> = Result<T, std::io::Error>;
// ============================================================================================== //
// [Verbosity management] //
// ============================================================================================== //
/// Defines how verbose the backtrace is supposed to be.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Verbosity {
/// Print a small message including the panic payload and the panic location.
Minimal,
/// Everything in `Minimal` and additionally print a backtrace.
Medium,
/// Everything in `Medium` plus source snippets for all backtrace locations.
Full,
}
impl Verbosity {
/// Get the verbosity level from `RUST_BACKTRACE` env variable.
pub fn from_env() -> Self {
Self::convert_env(env::var("RUST_BACKTRACE").ok())
}
/// Get the verbosity level from `RUST_LIB_BACKTRACE` env variable,
/// falling back to the `RUST_BACKTRACE`.
pub fn lib_from_env() -> Self {
Self::convert_env(
env::var("RUST_LIB_BACKTRACE")
.or_else(|_| env::var("RUST_BACKTRACE"))
.ok(),
)
}
fn convert_env(env: Option<String>) -> Self {
match env {
Some(ref x) if x == "full" => Verbosity::Full,
Some(_) => Verbosity::Medium,
None => Verbosity::Minimal,
}
}
}
// ============================================================================================== //
// [Panic handler and install logic] //
// ============================================================================================== //
/// Install a `BacktracePrinter` handler with `::default()` settings.
///
/// This currently is a convenience shortcut for writing
///
/// ```rust
/// use color_backtrace::{BacktracePrinter, default_output_stream};
/// BacktracePrinter::default().install(default_output_stream())
/// ```
pub fn install() {
BacktracePrinter::default().install(default_output_stream());
}
/// Create the default output stream.
///
/// If stderr is attached to a tty, this is a colorized stderr, else it's
/// a plain (colorless) stderr.
pub fn default_output_stream() -> Box<StandardStream> {
Box::new(StandardStream::stderr(if std::io::stderr().is_terminal() {
ColorChoice::Always
} else {
ColorChoice::Never
}))
}
#[deprecated(
since = "0.4.0",
note = "Use `BacktracePrinter::into_panic_handler()` instead."
)]
pub fn create_panic_handler(
printer: BacktracePrinter,
) -> Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send> {
let out_stream_mutex = Mutex::new(default_output_stream());
Box::new(move |pi| {
let mut lock = out_stream_mutex.lock().unwrap();
if let Err(e) = printer.print_panic_info(pi, &mut *lock) {
// Panicking while handling a panic would send us into a deadlock,
// so we just print the error to stderr instead.
eprintln!("Error while printing panic: {:?}", e);
}
})
}
#[deprecated(since = "0.4.0", note = "Use `BacktracePrinter::install()` instead.")]
pub fn install_with_settings(printer: BacktracePrinter) {
std::panic::set_hook(printer.into_panic_handler(default_output_stream()))
}
// ============================================================================================== //
// [Backtrace frame] //
// ============================================================================================== //
pub type FilterCallback = dyn Fn(&mut Vec<&Frame>) + Send + Sync + 'static;
#[derive(Debug)]
pub struct Frame {
pub n: usize,
pub name: Option<String>,
pub lineno: Option<u32>,
pub filename: Option<PathBuf>,
pub ip: usize,
_private_ctor: (),
}
impl Frame {
/// Heuristically determine whether the frame is likely to be part of a
/// dependency.
///
/// If it fails to detect some patterns in your code base, feel free to drop
/// an issue / a pull request!
fn is_dependency_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"std::",
"core::",
"backtrace::backtrace::",
"_rust_begin_unwind",
"color_traceback::",
"__rust_",
"___rust_",
"__pthread",
"_main",
"main",
"__scrt_common_main_seh",
"BaseThreadInitThunk",
"_start",
"__libc_start_main",
"start_thread",
];
// Inspect name.
if let Some(ref name) = self.name {
if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) {
return true;
}
}
const FILE_PREFIXES: &[&str] = &[
"/rustc/",
"src/libstd/",
"src/libpanic_unwind/",
"src/libtest/",
];
// Inspect filename.
if let Some(ref filename) = self.filename {
let filename = filename.to_string_lossy();
if FILE_PREFIXES.iter().any(|x| filename.starts_with(x))
|| filename.contains("/.cargo/registry/src/")
{
return true;
}
}
false
}
/// Heuristically determine whether a frame is likely to be a post panic
/// frame.
///
/// Post panic frames are frames of a functions called after the actual panic
/// is already in progress and don't contain any useful information for a
/// reader of the backtrace.
fn is_post_panic_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"_rust_begin_unwind",
"rust_begin_unwind",
"core::result::unwrap_failed",
"core::option::expect_none_failed",
"core::panicking::panic_fmt",
"color_backtrace::create_panic_handler",
"std::panicking::begin_panic",
"begin_panic_fmt",
"backtrace::capture",
];
match self.name.as_ref() {
Some(name) => SYM_PREFIXES.iter().any(|x| name.starts_with(x)),
None => false,
}
}
/// Heuristically determine whether a frame is likely to be part of language
/// runtime.
fn is_runtime_init_code(&self) -> bool {
const SYM_PREFIXES: &[&str] = &[
"std::rt::lang_start::",
"test::run_test::run_test_inner::",
"std::sys_common::backtrace::__rust_begin_short_backtrace",
];
let (name, file) = match (self.name.as_ref(), self.filename.as_ref()) {
(Some(name), Some(filename)) => (name, filename.to_string_lossy()),
_ => return false,
};
if SYM_PREFIXES.iter().any(|x| name.starts_with(x)) {
return true;
}
// For Linux, this is the best rule for skipping test init I found.
if name == "{{closure}}" && file == "src/libtest/lib.rs" {
return true;
}
false
}
fn print_source_if_avail(&self, mut out: impl WriteColor, s: &BacktracePrinter) -> IOResult {
let (lineno, filename) = match (self.lineno, self.filename.as_ref()) {
(Some(a), Some(b)) => (a, b),
// Without a line number and file name, we can't sensibly proceed.
_ => return Ok(()),
};
let file = match File::open(filename) {
Ok(file) => file,
Err(ref e) if e.kind() == ErrorKind::NotFound => return Ok(()),
e @ Err(_) => e?,
};
// Extract relevant lines.
let reader = BufReader::new(file);
let start_line = lineno - 2.min(lineno - 1);
let surrounding_src = reader.lines().skip(start_line as usize - 1).take(5);
for (line, cur_line_no) in surrounding_src.zip(start_line..) {
if cur_line_no == lineno {
// Print actual source line with brighter color.
out.set_color(&s.colors.selected_src_ln)?;
writeln!(out, "{:>8} > {}", cur_line_no, line?)?;
out.reset()?;
} else {
writeln!(out, "{:>8} │ {}", cur_line_no, line?)?;
}
}
Ok(())
}
/// Get the module's name by walking /proc/self/maps
#[cfg(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
))]
fn module_info(&self) -> Option<(String, usize)> {
use regex::Regex;
use std::path::Path;
let re = Regex::new(
r"(?x)
^
(?P<start>[0-9a-f]{8,16})
-
(?P<end>[0-9a-f]{8,16})
\s
(?P<perm>[-rwxp]{4})
\s
(?P<offset>[0-9a-f]{8})
\s
[0-9a-f]+:[0-9a-f]+
\s
[0-9]+
\s+
(?P<path>.*)
$
",
)
.unwrap();
let mapsfile = File::open("/proc/self/maps").expect("Unable to open /proc/self/maps");
for line in BufReader::new(mapsfile).lines() {
let line = line.unwrap();
if let Some(caps) = re.captures(&line) {
let (start, end, path) = (
usize::from_str_radix(caps.name("start").unwrap().as_str(), 16).unwrap(),
usize::from_str_radix(caps.name("end").unwrap().as_str(), 16).unwrap(),
caps.name("path").unwrap().as_str().to_string(),
);
if self.ip >= start && self.ip < end {
return if let Some(filename) = Path::new(&path).file_name() {
Some((filename.to_str().unwrap().to_string(), start))
} else {
None
};
}
}
}
None
}
#[cfg(not(all(
feature = "resolve-modules",
unix,
not(any(target_os = "macos", target_os = "ios"))
)))]
fn module_info(&self) -> Option<(String, usize)> {
None
}
fn print(&self, i: usize, out: &mut impl WriteColor, s: &BacktracePrinter) -> IOResult {
let is_dependency_code = self.is_dependency_code();
// Print frame index.
write!(out, "{:>2}: ", i)?;
if s.should_print_addresses() {
if let Some((module_name, module_base)) = self.module_info() {
write!(out, "{}:0x{:08x} - ", module_name, self.ip - module_base)?;
} else {
write!(out, "0x{:016x} - ", self.ip)?;
}
}
// Does the function have a hash suffix?
// (dodging a dep on the regex crate here)
let name = self
.name
.as_ref()
.map(|s| s.as_str())
.unwrap_or("<unknown>");
let has_hash_suffix = name.len() > 19
&& &name[name.len() - 19..name.len() - 16] == "::h"
&& name[name.len() - 16..].chars().all(|x| x.is_digit(16));
// Print function name.
out.set_color(if is_dependency_code {
&s.colors.dependency_code
} else {
&s.colors.crate_code
})?;
if has_hash_suffix {
write!(out, "{}", &name[..name.len() - 19])?;
if s.strip_function_hash {
writeln!(out)?;
} else {
out.set_color(if is_dependency_code {
&s.colors.dependency_code_hash
} else {
&s.colors.crate_code_hash
})?;
writeln!(out, "{}", &name[name.len() - 19..])?;
}
} else {
writeln!(out, "{}", name)?;
}
out.reset()?;
// Print source location, if known.
if let Some(ref file) = self.filename {
let filestr = file.to_str().unwrap_or("<bad utf8>");
let lineno = self
.lineno
.map_or("<unknown line>".to_owned(), |x| x.to_string());
writeln!(out, " at {}:{}", filestr, lineno)?;
} else {
writeln!(out, " at <unknown source file>")?;
}
// Maybe print source.
if s.current_verbosity() >= Verbosity::Full {
self.print_source_if_avail(out, s)?;
}
Ok(())
}
}
/// The default frame filter. Heuristically determines whether a frame is likely to be an
/// uninteresting frame. This filters out post panic frames and runtime init frames and dependency
/// code.
pub fn default_frame_filter(frames: &mut Vec<&Frame>) {
let top_cutoff = frames
.iter()
.rposition(|x| x.is_post_panic_code())
.map(|x| x + 2) // indices are 1 based
.unwrap_or(0);
let bottom_cutoff = frames
.iter()
.position(|x| x.is_runtime_init_code())
.unwrap_or_else(|| frames.len());
let rng = top_cutoff..=bottom_cutoff;
frames.retain(|x| rng.contains(&x.n))
}
// ============================================================================================== //
// [BacktracePrinter] //
// ============================================================================================== //
/// Color scheme definition.
#[derive(Debug, Clone)]
pub struct ColorScheme {
pub frames_omitted_msg: ColorSpec,
pub header: ColorSpec,
pub msg_loc_prefix: ColorSpec,
pub src_loc: ColorSpec,
pub src_loc_separator: ColorSpec,
pub env_var: ColorSpec,
pub dependency_code: ColorSpec,
pub dependency_code_hash: ColorSpec,
pub crate_code: ColorSpec,
pub crate_code_hash: ColorSpec,
pub selected_src_ln: ColorSpec,
}
impl ColorScheme {
/// Helper to create a new `ColorSpec` & set a few properties in one wash.
fn cs(fg: Option<Color>, intense: bool, bold: bool) -> ColorSpec {
let mut cs = ColorSpec::new();
cs.set_fg(fg);
cs.set_bold(bold);
cs.set_intense(intense);
cs
}
/// The classic `color-backtrace` scheme, as shown in the screenshots.
pub fn classic() -> Self {
Self {
frames_omitted_msg: Self::cs(Some(Color::Cyan), true, false),
header: Self::cs(Some(Color::Red), false, false),
msg_loc_prefix: Self::cs(Some(Color::Cyan), false, false),
src_loc: Self::cs(Some(Color::Magenta), false, false),
src_loc_separator: Self::cs(Some(Color::White), false, false),
env_var: Self::cs(None, false, true),
dependency_code: Self::cs(Some(Color::Green), false, false),
dependency_code_hash: Self::cs(Some(Color::Black), true, false),
crate_code: Self::cs(Some(Color::Red), true, false),
crate_code_hash: Self::cs(Some(Color::Black), true, false),
selected_src_ln: Self::cs(None, false, true),
}
}
}
impl Default for ColorScheme {
fn default() -> Self {
Self::classic()
}
}
#[deprecated(since = "0.4.0", note = "Use `BacktracePrinter` instead.")]
pub type Settings = BacktracePrinter;
/// Pretty-printer for backtraces and [`PanicInfo`](PanicInfo) structs.
#[derive(Clone)]
pub struct BacktracePrinter {
message: String,
verbosity: Verbosity,
lib_verbosity: Verbosity,
strip_function_hash: bool,
is_panic_handler: bool,
colors: ColorScheme,
filters: Vec<Arc<FilterCallback>>,
should_print_addresses: bool,
}
impl Default for BacktracePrinter {
fn default() -> Self {
Self {
verbosity: Verbosity::from_env(),
lib_verbosity: Verbosity::lib_from_env(),
message: "The application panicked (crashed).".to_owned(),
strip_function_hash: false,
colors: ColorScheme::classic(),
is_panic_handler: false,
filters: vec![Arc::new(default_frame_filter)],
should_print_addresses: false,
}
}
}
impl std::fmt::Debug for BacktracePrinter {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.debug_struct("Settings")
.field("message", &self.message)
.field("verbosity", &self.verbosity)
.field("lib_verbosity", &self.lib_verbosity)
.field("strip_function_hash", &self.strip_function_hash)
.field("is_panic_handler", &self.is_panic_handler)
.field("print_addresses", &self.should_print_addresses)
.field("colors", &self.colors)
.finish()
}
}
/// Builder functions.
impl BacktracePrinter {
/// Alias for `BacktracePrinter::default`.
pub fn new() -> Self {
| /// Alter the color scheme.
///
/// Defaults to `ColorScheme::classic()`.
pub fn color_scheme(mut self, colors: ColorScheme) -> Self {
self.colors = colors;
self
}
/// Controls the "greeting" message of the panic.
///
/// Defaults to `"The application panicked (crashed)"`.
pub fn message(mut self, message: impl Into<String>) -> Self {
self.message = message.into();
self
}
/// Controls the verbosity level used when installed as panic handler.
///
/// Defaults to `Verbosity::from_env()`.
pub fn verbosity(mut self, v: Verbosity) -> Self {
self.verbosity = v;
self
}
/// Controls the lib verbosity level used when formatting user provided traces.
///
/// Defaults to `Verbosity::lib_from_env()`.
pub fn lib_verbosity(mut self, v: Verbosity) -> Self {
self.lib_verbosity = v;
self
}
/// Controls whether the hash part of functions is stripped.
///
/// Defaults to `false`.
pub fn strip_function_hash(mut self, strip: bool) -> Self {
self.strip_function_hash = strip;
self
}
/// Controls whether addresses (or module offsets if available) should be printed.
///
/// Defaults to `false`.
pub fn print_addresses(mut self, val: bool) -> Self {
self.should_print_addresses = val;
self
}
/// Add a custom filter to the set of frame filters
///
/// Filters are run in the order they are added.
///
/// # Example
///
/// ```rust
/// use color_backtrace::{default_output_stream, BacktracePrinter};
///
/// BacktracePrinter::new()
/// .add_frame_filter(Box::new(|frames| {
/// frames.retain(|x| matches!(&x.name, Some(n) if !n.starts_with("blabla")))
/// }))
/// .install(default_output_stream());
/// ```
pub fn add_frame_filter(mut self, filter: Box<FilterCallback>) -> Self {
self.filters.push(filter.into());
self
}
/// Clears all filters associated with this printer, including the default filter
pub fn clear_frame_filters(mut self) -> Self {
self.filters.clear();
self
}
}
/// Routines for putting the panic printer to use.
impl BacktracePrinter {
/// Install the `color_backtrace` handler with default settings.
///
/// Output streams can be created via `default_output_stream()` or
/// using any other stream that implements
/// [`termcolor::WriteColor`](termcolor::WriteColor).
pub fn install(self, out: impl WriteColor + Sync + Send + 'static) {
std::panic::set_hook(self.into_panic_handler(out))
}
/// Create a `color_backtrace` panic handler from this panic printer.
///
/// This can be used if you want to combine the handler with other handlers.
pub fn into_panic_handler(
mut self,
out: impl WriteColor + Sync + Send + 'static,
) -> Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send> {
self.is_panic_handler = true;
let out_stream_mutex = Mutex::new(out);
Box::new(move |pi| {
let mut lock = out_stream_mutex.lock().unwrap();
if let Err(e) = self.print_panic_info(pi, &mut *lock) {
// Panicking while handling a panic would send us into a deadlock,
// so we just print the error to stderr instead.
eprintln!("Error while printing panic: {:?}", e);
}
})
}
/// Pretty-prints a [`backtrace::Backtrace`](backtrace::Backtrace) to an output stream.
pub fn print_trace(&self, trace: &backtrace::Backtrace, out: &mut impl WriteColor) -> IOResult {
writeln!(out, "{:━^80}", " BACKTRACE ")?;
// Collect frame info.
let frames: Vec<_> = trace
.frames()
.iter()
.flat_map(|frame| frame.symbols().iter().map(move |sym| (frame.ip(), sym)))
.zip(1usize..)
.map(|((ip, sym), n)| Frame {
name: sym.name().map(|x| x.to_string()),
lineno: sym.lineno(),
filename: sym.filename().map(|x| x.into()),
n,
ip: ip as usize,
_private_ctor: (),
})
.collect();
let mut filtered_frames = frames.iter().collect();
match env::var("COLORBT_SHOW_HIDDEN").ok().as_deref() {
Some("1") | Some("on") | Some("y") => (),
_ => {
for filter in &self.filters {
filter(&mut filtered_frames);
}
}
}
if filtered_frames.is_empty() {
// TODO: Would probably look better centered.
return writeln!(out, "<empty backtrace>");
}
// Don't let filters mess with the order.
filtered_frames.sort_by_key(|x| x.n);
macro_rules! print_hidden {
($n:expr) => {
out.set_color(&self.colors.frames_omitted_msg)?;
let n = $n;
let text = format!(
"{decorator} {n} frame{plural} hidden {decorator}",
n = n,
plural = if n == 1 { "" } else { "s" },
decorator = "⋮",
);
writeln!(out, "{:^80}", text)?;
out.reset()?;
};
}
let mut last_n = 0;
for frame in &filtered_frames {
let frame_delta = frame.n - last_n - 1;
if frame_delta != 0 {
print_hidden!(frame_delta);
}
frame.print(frame.n, out, self)?;
last_n = frame.n;
}
let last_filtered_n = filtered_frames.last().unwrap().n;
let last_unfiltered_n = frames.last().unwrap().n;
if last_filtered_n < last_unfiltered_n {
print_hidden!(last_unfiltered_n - last_filtered_n);
}
Ok(())
}
/// Pretty-print a backtrace to a `String`, using VT100 color codes.
pub fn format_trace_to_string(&self, trace: &backtrace::Backtrace) -> IOResult<String> {
// TODO: should we implicitly enable VT100 support on Windows here?
let mut ansi = Ansi::new(vec![]);
self.print_trace(trace, &mut ansi)?;
Ok(String::from_utf8(ansi.into_inner()).unwrap())
}
/// Pretty-prints a [`PanicInfo`](PanicInfo) struct to an output stream.
pub fn print_panic_info(&self, pi: &PanicInfo, out: &mut impl WriteColor) -> IOResult {
out.set_color(&self.colors.header)?;
writeln!(out, "{}", self.message)?;
out.reset()?;
// Print panic message.
let payload = pi
.payload()
.downcast_ref::<String>()
.map(String::as_str)
.or_else(|| pi.payload().downcast_ref::<&str>().cloned())
.unwrap_or("<non string panic payload>");
write!(out, "Message: ")?;
out.set_color(&self.colors.msg_loc_prefix)?;
writeln!(out, "{}", payload)?;
out.reset()?;
// If known, print panic location.
write!(out, "Location: ")?;
if let Some(loc) = pi.location() {
out.set_color(&self.colors.src_loc)?;
write!(out, "{}", loc.file())?;
out.set_color(&self.colors.src_loc_separator)?;
write!(out, ":")?;
out.set_color(&self.colors.src_loc)?;
writeln!(out, "{}", loc.line())?;
out.reset()?;
} else {
writeln!(out, "<unknown>")?;
}
// Print some info on how to increase verbosity.
if self.current_verbosity() == Verbosity::Minimal {
write!(out, "\nBacktrace omitted.\n\nRun with ")?;
out.set_color(&self.colors.env_var)?;
write!(out, "RUST_BACKTRACE=1")?;
out.reset()?;
writeln!(out, " environment variable to display it.")?;
} else {
// This text only makes sense if frames are displayed.
write!(out, "\nRun with ")?;
out.set_color(&self.colors.env_var)?;
write!(out, "COLORBT_SHOW_HIDDEN=1")?;
out.reset()?;
writeln!(out, " environment variable to disable frame filtering.")?;
}
if self.current_verbosity() <= Verbosity::Medium {
write!(out, "Run with ")?;
out.set_color(&self.colors.env_var)?;
write!(out, "RUST_BACKTRACE=full")?;
out.reset()?;
writeln!(out, " to include source snippets.")?;
}
if self.current_verbosity() >= Verbosity::Medium {
self.print_trace(&backtrace::Backtrace::new(), out)?;
}
Ok(())
}
fn current_verbosity(&self) -> Verbosity {
if self.is_panic_handler {
self.verbosity
} else {
self.lib_verbosity
}
}
fn should_print_addresses(&self) -> bool {
self.should_print_addresses
}
}
// ============================================================================================== //
// [Deprecated routines for backward compat] //
// ============================================================================================== //
#[deprecated(since = "0.4.0", note = "Use `BacktracePrinter::print_trace` instead`")]
pub fn print_backtrace(trace: &backtrace::Backtrace, s: &mut BacktracePrinter) -> IOResult {
s.print_trace(trace, &mut default_output_stream())
}
#[deprecated(
since = "0.4.0",
note = "Use `BacktracePrinter::print_panic_info` instead`"
)]
pub fn print_panic_info(pi: &PanicInfo, s: &mut BacktracePrinter) -> IOResult {
s.print_panic_info(pi, &mut default_output_stream())
}
// ============================================================================================== //
| Self::default()
}
| identifier_body |
u2eve.py | #! /usr/bin/env python
#
# Copyright (c) 2015 Jason Ish
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read unified2 log files and output events as Suricata EVE JSON."""
from __future__ import print_function
import sys
import os
import os.path
import base64
if sys.argv[0] == __file__:
sys.path.insert(
0, os.path.abspath(os.path.join(__file__, "..", "..", "..")))
import socket
import time
import json
import logging
import struct
from datetime import datetime
try:
from collections import OrderedDict
except ImportError as err:
from idstools.compat.ordereddict import OrderedDict
try:
import argparse
except ImportError as err:
from idstools.compat.argparse import argparse
from idstools import unified2
from idstools import maps
logging.basicConfig(level=logging.INFO, format="%(message)s")
LOG = logging.getLogger()
proto_map = {
1: "ICMP",
6: "TCP",
17: "UDP",
}
def get_tzoffset(sec):
offset = datetime.fromtimestamp(sec) - datetime.utcfromtimestamp(sec)
if offset.days == -1:
return "-%02d%02d" % (
(86400 - offset.seconds) / 3600, (86400 - offset.seconds) % 3600)
else:
return "+%02d%02d" % (
offset.seconds / 3600, offset.seconds % 3600)
def render_timestamp(sec, usec):
tt = time.localtime(sec)
return "%04d-%02d-%02dT%02d:%02d:%02d.%06d%s" % (
tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec,
usec, get_tzoffset(sec))
def calculate_flow_id(event):
flow_id = event["protocol"] << 24
if len(event["source-ip.raw"]) == 4:
flow_id = flow_id ^ \
struct.unpack(">L", event["source-ip.raw"])[0] ^ \
struct.unpack(">L", event["destination-ip.raw"])[0]
else:
for part in struct.unpack(">LLLL", event["source-ip.raw"]):
flow_id = flow_id ^ part
for part in struct.unpack(">LLLL", event["destination-ip.raw"]):
flow_id = flow_id ^ part
if "src_port" in event and "dest_port" in event:
flow_id = flow_id ^ event["src_port"] ^ event["dest_port"]
return flow_id
class EveFilter(object):
def __init__(
self, msgmap=None, classmap=None):
self.msgmap = msgmap
self.classmap = classmap
def filter(self, event):
output = OrderedDict()
output["timestamp"] = render_timestamp(
event["event-second"], event["event-microsecond"])
output["sensor_id"] = event["sensor-id"]
output["event_type"] = "alert"
output["src_ip"] = event["source-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["src_port"] = event["sport-itype"]
output["dest_ip"] = event["destination-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["dest_port"] = event["dport-icode"]
output["proto"] = self.getprotobynumber(event["protocol"])
if event["protocol"] in [socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6]:
output["icmp_type"] = event["sport-itype"]
output["icmp_code"] = event["dport-icode"]
output["flow_id"] = calculate_flow_id(event)
alert = OrderedDict()
alert["action"] = "blocked" if event["blocked"] == 1 else "allowed"
alert["gid"] = event["generator-id"]
alert["signature_id"] = event["signature-id"]
alert["rev"] = event["signature-revision"]
alert["signature"] = self.resolve_msg(event)
alert["category"] = self.resolve_classification(event)
alert["severity"] = event["priority"]
output["alert"] = alert
# EVE only includes one packet.
if event["packets"]:
output["packet"] = base64.b64encode(event["packets"][0]["data"])
return output
def resolve_classification(self, event, default=None):
if self.classmap:
classinfo = self.classmap.get(event["classification-id"])
if classinfo:
return classinfo["description"]
return default
def resolve_msg(self, event, default=None):
if self.msgmap:
signature = self.msgmap.get(
event["generator-id"], event["signature-id"])
if signature:
return signature["msg"]
return default
def getprotobynumber(self, protocol):
return proto_map.get(protocol, str(protocol))
class OutputWrapper(object):
def __init__(self, filename, fileobj=None):
self.filename = filename
self.fileobj = fileobj
if self.fileobj is None:
self.reopen()
self.isfile = True
else:
self.isfile = False
def reopen(self):
if self.fileobj:
self.fileobj.close()
self.fileobj = open(self.filename, "ab")
def write(self, buf):
if self.isfile:
if not os.path.exists(self.filename):
self.reopen()
self.fileobj.write(buf)
self.fileobj.write("\n")
self.fileobj.flush()
def load_from_snort_conf(snort_conf, classmap, msgmap):
snort_etc = os.path.dirname(os.path.expanduser(snort_conf))
classification_config = os.path.join(snort_etc, "classification.config")
if os.path.exists(classification_config):
LOG.debug("Loading %s.", classification_config)
classmap.load_from_file(open(classification_config))
genmsg_map = os.path.join(snort_etc, "gen-msg.map")
if os.path.exists(genmsg_map):
LOG.debug("Loading %s.", genmsg_map)
msgmap.load_generator_map(open(genmsg_map))
sidmsg_map = os.path.join(snort_etc, "sid-msg.map")
if os.path.exists(sidmsg_map):
LOG.debug("Loading %s.", sidmsg_map)
msgmap.load_signature_map(open(sidmsg_map))
epilog = """If --directory and --prefix are provided files will be
read from the specified 'spool' directory. Otherwise files on the
command line will be processed.
"""
def main():
msgmap = maps.SignatureMap()
classmap = maps.ClassificationMap()
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@', epilog=epilog)
parser.add_argument(
"-C", dest="classification_path", metavar="<classification.config>",
help="path to classification config")
parser.add_argument(
"-S", dest="sidmsgmap_path", metavar="<msg-msg.map>",
help="path to sid-msg.map")
parser.add_argument(
"-G", dest="genmsgmap_path", metavar="<gen-msg.map>",
help="path to gen-msg.map")
parser.add_argument(
"--snort-conf", dest="snort_conf", metavar="<snort.conf>",
help="attempt to load classifications and map files based on the "
"location of the snort.conf")
parser.add_argument(
"--directory", metavar="<spool directory>",
help="spool directory (eg: /var/log/snort)")
parser.add_argument(
"--prefix", metavar="<spool file prefix>",
help="spool filename prefix (eg: unified2.log)")
parser.add_argument(
"--bookmark", action="store_true", default=False,
help="enable bookmarking")
parser.add_argument(
"--follow", action="store_true", default=False,
help="follow files/continuous mode (spool mode only)")
parser.add_argument(
"--delete", action="store_true", default=False,
help="delete spool files")
parser.add_argument(
"--output", metavar="<filename>",
help="output filename (eg: /var/log/snort/alerts.json")
parser.add_argument(
"--stdout", action="store_true", default=False,
help="also log to stdout if --output is a file")
parser.add_argument(
"filenames", nargs="*")
args = parser.parse_args()
if args.snort_conf:
load_from_snort_conf(args.snort_conf, classmap, msgmap)
if args.classification_path:
classmap.load_from_file(
open(os.path.expanduser(args.classification_path)))
if args.genmsgmap_path:
msgmap.load_generator_map(open(os.path.expanduser(args.genmsgmap_path)))
if args.sidmsgmap_path:
msgmap.load_signature_map(open(os.path.expanduser(args.sidmsgmap_path)))
if msgmap.size() == 0:
|
else:
LOG.info("Loaded %s rule message map entries.", msgmap.size())
if classmap.size() == 0:
LOG.warn("WARNING: No classifications loaded.")
else:
LOG.info("Loaded %s classifications.", classmap.size())
eve_filter = EveFilter(msgmap, classmap)
outputs = []
if args.output:
outputs.append(OutputWrapper(args.output))
if args.stdout:
outputs.append(OutputWrapper("-", sys.stdout))
else:
outputs.append(OutputWrapper("-", sys.stdout))
if args.directory and args.prefix:
reader = unified2.SpoolEventReader(
directory=args.directory,
prefix=args.prefix,
follow=args.follow,
delete=args.delete,
bookmark=args.bookmark)
elif args.filenames:
reader = unified2.FileEventReader(*args.filenames)
else:
print("nothing to do.")
return
for event in reader:
try:
encoded = json.dumps(eve_filter.filter(event), encoding="latin-1")
for out in outputs:
out.write(encoded)
except Exception as err:
LOG.error("Failed to encode record as JSON: %s: %s" % (
str(err), str(event)))
if __name__ == "__main__":
sys.exit(main())
| LOG.warn("WARNING: No alert message map entries loaded.") | conditional_block |
u2eve.py | #! /usr/bin/env python
#
# Copyright (c) 2015 Jason Ish
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read unified2 log files and output events as Suricata EVE JSON."""
from __future__ import print_function
import sys
import os
import os.path
import base64
if sys.argv[0] == __file__:
sys.path.insert(
0, os.path.abspath(os.path.join(__file__, "..", "..", "..")))
import socket
import time
import json
import logging
import struct
from datetime import datetime
try:
from collections import OrderedDict
except ImportError as err:
from idstools.compat.ordereddict import OrderedDict
try:
import argparse
except ImportError as err:
from idstools.compat.argparse import argparse
from idstools import unified2
from idstools import maps
logging.basicConfig(level=logging.INFO, format="%(message)s")
LOG = logging.getLogger()
proto_map = {
1: "ICMP",
6: "TCP",
17: "UDP",
}
def get_tzoffset(sec):
offset = datetime.fromtimestamp(sec) - datetime.utcfromtimestamp(sec)
if offset.days == -1:
return "-%02d%02d" % (
(86400 - offset.seconds) / 3600, (86400 - offset.seconds) % 3600)
else:
return "+%02d%02d" % (
offset.seconds / 3600, offset.seconds % 3600)
def render_timestamp(sec, usec):
tt = time.localtime(sec)
return "%04d-%02d-%02dT%02d:%02d:%02d.%06d%s" % (
tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec,
usec, get_tzoffset(sec))
def calculate_flow_id(event):
flow_id = event["protocol"] << 24
if len(event["source-ip.raw"]) == 4:
flow_id = flow_id ^ \
struct.unpack(">L", event["source-ip.raw"])[0] ^ \
struct.unpack(">L", event["destination-ip.raw"])[0]
else:
for part in struct.unpack(">LLLL", event["source-ip.raw"]):
flow_id = flow_id ^ part
for part in struct.unpack(">LLLL", event["destination-ip.raw"]):
flow_id = flow_id ^ part
if "src_port" in event and "dest_port" in event:
flow_id = flow_id ^ event["src_port"] ^ event["dest_port"]
return flow_id
class EveFilter(object):
def __init__(
self, msgmap=None, classmap=None):
self.msgmap = msgmap
self.classmap = classmap
def filter(self, event):
output = OrderedDict()
output["timestamp"] = render_timestamp(
event["event-second"], event["event-microsecond"])
output["sensor_id"] = event["sensor-id"]
output["event_type"] = "alert"
output["src_ip"] = event["source-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["src_port"] = event["sport-itype"]
output["dest_ip"] = event["destination-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["dest_port"] = event["dport-icode"]
output["proto"] = self.getprotobynumber(event["protocol"])
if event["protocol"] in [socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6]:
output["icmp_type"] = event["sport-itype"]
output["icmp_code"] = event["dport-icode"]
output["flow_id"] = calculate_flow_id(event)
alert = OrderedDict()
alert["action"] = "blocked" if event["blocked"] == 1 else "allowed"
alert["gid"] = event["generator-id"]
alert["signature_id"] = event["signature-id"]
alert["rev"] = event["signature-revision"]
alert["signature"] = self.resolve_msg(event)
alert["category"] = self.resolve_classification(event)
alert["severity"] = event["priority"]
output["alert"] = alert
# EVE only includes one packet.
if event["packets"]:
output["packet"] = base64.b64encode(event["packets"][0]["data"])
return output
def resolve_classification(self, event, default=None):
if self.classmap:
classinfo = self.classmap.get(event["classification-id"])
if classinfo:
return classinfo["description"]
return default
def resolve_msg(self, event, default=None):
if self.msgmap:
signature = self.msgmap.get(
event["generator-id"], event["signature-id"])
if signature:
return signature["msg"]
return default
def | (self, protocol):
return proto_map.get(protocol, str(protocol))
class OutputWrapper(object):
def __init__(self, filename, fileobj=None):
self.filename = filename
self.fileobj = fileobj
if self.fileobj is None:
self.reopen()
self.isfile = True
else:
self.isfile = False
def reopen(self):
if self.fileobj:
self.fileobj.close()
self.fileobj = open(self.filename, "ab")
def write(self, buf):
if self.isfile:
if not os.path.exists(self.filename):
self.reopen()
self.fileobj.write(buf)
self.fileobj.write("\n")
self.fileobj.flush()
def load_from_snort_conf(snort_conf, classmap, msgmap):
snort_etc = os.path.dirname(os.path.expanduser(snort_conf))
classification_config = os.path.join(snort_etc, "classification.config")
if os.path.exists(classification_config):
LOG.debug("Loading %s.", classification_config)
classmap.load_from_file(open(classification_config))
genmsg_map = os.path.join(snort_etc, "gen-msg.map")
if os.path.exists(genmsg_map):
LOG.debug("Loading %s.", genmsg_map)
msgmap.load_generator_map(open(genmsg_map))
sidmsg_map = os.path.join(snort_etc, "sid-msg.map")
if os.path.exists(sidmsg_map):
LOG.debug("Loading %s.", sidmsg_map)
msgmap.load_signature_map(open(sidmsg_map))
epilog = """If --directory and --prefix are provided files will be
read from the specified 'spool' directory. Otherwise files on the
command line will be processed.
"""
def main():
msgmap = maps.SignatureMap()
classmap = maps.ClassificationMap()
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@', epilog=epilog)
parser.add_argument(
"-C", dest="classification_path", metavar="<classification.config>",
help="path to classification config")
parser.add_argument(
"-S", dest="sidmsgmap_path", metavar="<msg-msg.map>",
help="path to sid-msg.map")
parser.add_argument(
"-G", dest="genmsgmap_path", metavar="<gen-msg.map>",
help="path to gen-msg.map")
parser.add_argument(
"--snort-conf", dest="snort_conf", metavar="<snort.conf>",
help="attempt to load classifications and map files based on the "
"location of the snort.conf")
parser.add_argument(
"--directory", metavar="<spool directory>",
help="spool directory (eg: /var/log/snort)")
parser.add_argument(
"--prefix", metavar="<spool file prefix>",
help="spool filename prefix (eg: unified2.log)")
parser.add_argument(
"--bookmark", action="store_true", default=False,
help="enable bookmarking")
parser.add_argument(
"--follow", action="store_true", default=False,
help="follow files/continuous mode (spool mode only)")
parser.add_argument(
"--delete", action="store_true", default=False,
help="delete spool files")
parser.add_argument(
"--output", metavar="<filename>",
help="output filename (eg: /var/log/snort/alerts.json")
parser.add_argument(
"--stdout", action="store_true", default=False,
help="also log to stdout if --output is a file")
parser.add_argument(
"filenames", nargs="*")
args = parser.parse_args()
if args.snort_conf:
load_from_snort_conf(args.snort_conf, classmap, msgmap)
if args.classification_path:
classmap.load_from_file(
open(os.path.expanduser(args.classification_path)))
if args.genmsgmap_path:
msgmap.load_generator_map(open(os.path.expanduser(args.genmsgmap_path)))
if args.sidmsgmap_path:
msgmap.load_signature_map(open(os.path.expanduser(args.sidmsgmap_path)))
if msgmap.size() == 0:
LOG.warn("WARNING: No alert message map entries loaded.")
else:
LOG.info("Loaded %s rule message map entries.", msgmap.size())
if classmap.size() == 0:
LOG.warn("WARNING: No classifications loaded.")
else:
LOG.info("Loaded %s classifications.", classmap.size())
eve_filter = EveFilter(msgmap, classmap)
outputs = []
if args.output:
outputs.append(OutputWrapper(args.output))
if args.stdout:
outputs.append(OutputWrapper("-", sys.stdout))
else:
outputs.append(OutputWrapper("-", sys.stdout))
if args.directory and args.prefix:
reader = unified2.SpoolEventReader(
directory=args.directory,
prefix=args.prefix,
follow=args.follow,
delete=args.delete,
bookmark=args.bookmark)
elif args.filenames:
reader = unified2.FileEventReader(*args.filenames)
else:
print("nothing to do.")
return
for event in reader:
try:
encoded = json.dumps(eve_filter.filter(event), encoding="latin-1")
for out in outputs:
out.write(encoded)
except Exception as err:
LOG.error("Failed to encode record as JSON: %s: %s" % (
str(err), str(event)))
if __name__ == "__main__":
sys.exit(main())
| getprotobynumber | identifier_name |
u2eve.py | #! /usr/bin/env python
#
# Copyright (c) 2015 Jason Ish
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read unified2 log files and output events as Suricata EVE JSON."""
from __future__ import print_function
import sys
import os
import os.path
import base64
if sys.argv[0] == __file__:
sys.path.insert(
0, os.path.abspath(os.path.join(__file__, "..", "..", "..")))
import socket
import time
import json
import logging
import struct
from datetime import datetime
try:
from collections import OrderedDict
except ImportError as err:
from idstools.compat.ordereddict import OrderedDict
try:
import argparse
except ImportError as err:
from idstools.compat.argparse import argparse
from idstools import unified2
from idstools import maps
logging.basicConfig(level=logging.INFO, format="%(message)s")
LOG = logging.getLogger()
proto_map = {
1: "ICMP",
6: "TCP",
17: "UDP",
}
def get_tzoffset(sec):
offset = datetime.fromtimestamp(sec) - datetime.utcfromtimestamp(sec)
if offset.days == -1:
return "-%02d%02d" % (
(86400 - offset.seconds) / 3600, (86400 - offset.seconds) % 3600)
else:
return "+%02d%02d" % (
offset.seconds / 3600, offset.seconds % 3600)
def render_timestamp(sec, usec):
tt = time.localtime(sec)
return "%04d-%02d-%02dT%02d:%02d:%02d.%06d%s" % (
tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec,
usec, get_tzoffset(sec))
def calculate_flow_id(event):
flow_id = event["protocol"] << 24
if len(event["source-ip.raw"]) == 4:
flow_id = flow_id ^ \
struct.unpack(">L", event["source-ip.raw"])[0] ^ \
struct.unpack(">L", event["destination-ip.raw"])[0]
else:
for part in struct.unpack(">LLLL", event["source-ip.raw"]):
flow_id = flow_id ^ part
for part in struct.unpack(">LLLL", event["destination-ip.raw"]):
flow_id = flow_id ^ part
if "src_port" in event and "dest_port" in event:
flow_id = flow_id ^ event["src_port"] ^ event["dest_port"]
return flow_id
class EveFilter(object):
def __init__(
self, msgmap=None, classmap=None):
self.msgmap = msgmap
self.classmap = classmap
def filter(self, event):
output = OrderedDict()
output["timestamp"] = render_timestamp(
event["event-second"], event["event-microsecond"])
output["sensor_id"] = event["sensor-id"]
output["event_type"] = "alert"
output["src_ip"] = event["source-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["src_port"] = event["sport-itype"]
output["dest_ip"] = event["destination-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["dest_port"] = event["dport-icode"]
output["proto"] = self.getprotobynumber(event["protocol"])
if event["protocol"] in [socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6]:
output["icmp_type"] = event["sport-itype"]
output["icmp_code"] = event["dport-icode"]
output["flow_id"] = calculate_flow_id(event)
alert = OrderedDict()
alert["action"] = "blocked" if event["blocked"] == 1 else "allowed"
alert["gid"] = event["generator-id"]
alert["signature_id"] = event["signature-id"]
alert["rev"] = event["signature-revision"]
alert["signature"] = self.resolve_msg(event)
alert["category"] = self.resolve_classification(event)
alert["severity"] = event["priority"]
output["alert"] = alert
# EVE only includes one packet.
if event["packets"]:
output["packet"] = base64.b64encode(event["packets"][0]["data"])
return output
def resolve_classification(self, event, default=None):
if self.classmap:
classinfo = self.classmap.get(event["classification-id"])
if classinfo:
return classinfo["description"]
return default
def resolve_msg(self, event, default=None):
if self.msgmap:
signature = self.msgmap.get(
event["generator-id"], event["signature-id"])
if signature:
return signature["msg"]
return default
def getprotobynumber(self, protocol):
return proto_map.get(protocol, str(protocol))
class OutputWrapper(object):
def __init__(self, filename, fileobj=None):
self.filename = filename
self.fileobj = fileobj
if self.fileobj is None:
self.reopen()
self.isfile = True
else:
self.isfile = False
def reopen(self):
if self.fileobj:
self.fileobj.close()
self.fileobj = open(self.filename, "ab")
def write(self, buf):
|
def load_from_snort_conf(snort_conf, classmap, msgmap):
snort_etc = os.path.dirname(os.path.expanduser(snort_conf))
classification_config = os.path.join(snort_etc, "classification.config")
if os.path.exists(classification_config):
LOG.debug("Loading %s.", classification_config)
classmap.load_from_file(open(classification_config))
genmsg_map = os.path.join(snort_etc, "gen-msg.map")
if os.path.exists(genmsg_map):
LOG.debug("Loading %s.", genmsg_map)
msgmap.load_generator_map(open(genmsg_map))
sidmsg_map = os.path.join(snort_etc, "sid-msg.map")
if os.path.exists(sidmsg_map):
LOG.debug("Loading %s.", sidmsg_map)
msgmap.load_signature_map(open(sidmsg_map))
epilog = """If --directory and --prefix are provided files will be
read from the specified 'spool' directory. Otherwise files on the
command line will be processed.
"""
def main():
msgmap = maps.SignatureMap()
classmap = maps.ClassificationMap()
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@', epilog=epilog)
parser.add_argument(
"-C", dest="classification_path", metavar="<classification.config>",
help="path to classification config")
parser.add_argument(
"-S", dest="sidmsgmap_path", metavar="<msg-msg.map>",
help="path to sid-msg.map")
parser.add_argument(
"-G", dest="genmsgmap_path", metavar="<gen-msg.map>",
help="path to gen-msg.map")
parser.add_argument(
"--snort-conf", dest="snort_conf", metavar="<snort.conf>",
help="attempt to load classifications and map files based on the "
"location of the snort.conf")
parser.add_argument(
"--directory", metavar="<spool directory>",
help="spool directory (eg: /var/log/snort)")
parser.add_argument(
"--prefix", metavar="<spool file prefix>",
help="spool filename prefix (eg: unified2.log)")
parser.add_argument(
"--bookmark", action="store_true", default=False,
help="enable bookmarking")
parser.add_argument(
"--follow", action="store_true", default=False,
help="follow files/continuous mode (spool mode only)")
parser.add_argument(
"--delete", action="store_true", default=False,
help="delete spool files")
parser.add_argument(
"--output", metavar="<filename>",
help="output filename (eg: /var/log/snort/alerts.json")
parser.add_argument(
"--stdout", action="store_true", default=False,
help="also log to stdout if --output is a file")
parser.add_argument(
"filenames", nargs="*")
args = parser.parse_args()
if args.snort_conf:
load_from_snort_conf(args.snort_conf, classmap, msgmap)
if args.classification_path:
classmap.load_from_file(
open(os.path.expanduser(args.classification_path)))
if args.genmsgmap_path:
msgmap.load_generator_map(open(os.path.expanduser(args.genmsgmap_path)))
if args.sidmsgmap_path:
msgmap.load_signature_map(open(os.path.expanduser(args.sidmsgmap_path)))
if msgmap.size() == 0:
LOG.warn("WARNING: No alert message map entries loaded.")
else:
LOG.info("Loaded %s rule message map entries.", msgmap.size())
if classmap.size() == 0:
LOG.warn("WARNING: No classifications loaded.")
else:
LOG.info("Loaded %s classifications.", classmap.size())
eve_filter = EveFilter(msgmap, classmap)
outputs = []
if args.output:
outputs.append(OutputWrapper(args.output))
if args.stdout:
outputs.append(OutputWrapper("-", sys.stdout))
else:
outputs.append(OutputWrapper("-", sys.stdout))
if args.directory and args.prefix:
reader = unified2.SpoolEventReader(
directory=args.directory,
prefix=args.prefix,
follow=args.follow,
delete=args.delete,
bookmark=args.bookmark)
elif args.filenames:
reader = unified2.FileEventReader(*args.filenames)
else:
print("nothing to do.")
return
for event in reader:
try:
encoded = json.dumps(eve_filter.filter(event), encoding="latin-1")
for out in outputs:
out.write(encoded)
except Exception as err:
LOG.error("Failed to encode record as JSON: %s: %s" % (
str(err), str(event)))
if __name__ == "__main__":
sys.exit(main())
| if self.isfile:
if not os.path.exists(self.filename):
self.reopen()
self.fileobj.write(buf)
self.fileobj.write("\n")
self.fileobj.flush() | identifier_body |
u2eve.py | # Copyright (c) 2015 Jason Ish
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read unified2 log files and output events as Suricata EVE JSON."""
from __future__ import print_function
import sys
import os
import os.path
import base64
if sys.argv[0] == __file__:
sys.path.insert(
0, os.path.abspath(os.path.join(__file__, "..", "..", "..")))
import socket
import time
import json
import logging
import struct
from datetime import datetime
try:
from collections import OrderedDict
except ImportError as err:
from idstools.compat.ordereddict import OrderedDict
try:
import argparse
except ImportError as err:
from idstools.compat.argparse import argparse
from idstools import unified2
from idstools import maps
logging.basicConfig(level=logging.INFO, format="%(message)s")
LOG = logging.getLogger()
proto_map = {
1: "ICMP",
6: "TCP",
17: "UDP",
}
def get_tzoffset(sec):
offset = datetime.fromtimestamp(sec) - datetime.utcfromtimestamp(sec)
if offset.days == -1:
return "-%02d%02d" % (
(86400 - offset.seconds) / 3600, (86400 - offset.seconds) % 3600)
else:
return "+%02d%02d" % (
offset.seconds / 3600, offset.seconds % 3600)
def render_timestamp(sec, usec):
tt = time.localtime(sec)
return "%04d-%02d-%02dT%02d:%02d:%02d.%06d%s" % (
tt.tm_year, tt.tm_mon, tt.tm_mday, tt.tm_hour, tt.tm_min, tt.tm_sec,
usec, get_tzoffset(sec))
def calculate_flow_id(event):
flow_id = event["protocol"] << 24
if len(event["source-ip.raw"]) == 4:
flow_id = flow_id ^ \
struct.unpack(">L", event["source-ip.raw"])[0] ^ \
struct.unpack(">L", event["destination-ip.raw"])[0]
else:
for part in struct.unpack(">LLLL", event["source-ip.raw"]):
flow_id = flow_id ^ part
for part in struct.unpack(">LLLL", event["destination-ip.raw"]):
flow_id = flow_id ^ part
if "src_port" in event and "dest_port" in event:
flow_id = flow_id ^ event["src_port"] ^ event["dest_port"]
return flow_id
class EveFilter(object):
def __init__(
self, msgmap=None, classmap=None):
self.msgmap = msgmap
self.classmap = classmap
def filter(self, event):
output = OrderedDict()
output["timestamp"] = render_timestamp(
event["event-second"], event["event-microsecond"])
output["sensor_id"] = event["sensor-id"]
output["event_type"] = "alert"
output["src_ip"] = event["source-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["src_port"] = event["sport-itype"]
output["dest_ip"] = event["destination-ip"]
if event["protocol"] in [socket.IPPROTO_UDP, socket.IPPROTO_TCP]:
output["dest_port"] = event["dport-icode"]
output["proto"] = self.getprotobynumber(event["protocol"])
if event["protocol"] in [socket.IPPROTO_ICMP, socket.IPPROTO_ICMPV6]:
output["icmp_type"] = event["sport-itype"]
output["icmp_code"] = event["dport-icode"]
output["flow_id"] = calculate_flow_id(event)
alert = OrderedDict()
alert["action"] = "blocked" if event["blocked"] == 1 else "allowed"
alert["gid"] = event["generator-id"]
alert["signature_id"] = event["signature-id"]
alert["rev"] = event["signature-revision"]
alert["signature"] = self.resolve_msg(event)
alert["category"] = self.resolve_classification(event)
alert["severity"] = event["priority"]
output["alert"] = alert
# EVE only includes one packet.
if event["packets"]:
output["packet"] = base64.b64encode(event["packets"][0]["data"])
return output
def resolve_classification(self, event, default=None):
if self.classmap:
classinfo = self.classmap.get(event["classification-id"])
if classinfo:
return classinfo["description"]
return default
def resolve_msg(self, event, default=None):
if self.msgmap:
signature = self.msgmap.get(
event["generator-id"], event["signature-id"])
if signature:
return signature["msg"]
return default
def getprotobynumber(self, protocol):
return proto_map.get(protocol, str(protocol))
class OutputWrapper(object):
def __init__(self, filename, fileobj=None):
self.filename = filename
self.fileobj = fileobj
if self.fileobj is None:
self.reopen()
self.isfile = True
else:
self.isfile = False
def reopen(self):
if self.fileobj:
self.fileobj.close()
self.fileobj = open(self.filename, "ab")
def write(self, buf):
if self.isfile:
if not os.path.exists(self.filename):
self.reopen()
self.fileobj.write(buf)
self.fileobj.write("\n")
self.fileobj.flush()
def load_from_snort_conf(snort_conf, classmap, msgmap):
snort_etc = os.path.dirname(os.path.expanduser(snort_conf))
classification_config = os.path.join(snort_etc, "classification.config")
if os.path.exists(classification_config):
LOG.debug("Loading %s.", classification_config)
classmap.load_from_file(open(classification_config))
genmsg_map = os.path.join(snort_etc, "gen-msg.map")
if os.path.exists(genmsg_map):
LOG.debug("Loading %s.", genmsg_map)
msgmap.load_generator_map(open(genmsg_map))
sidmsg_map = os.path.join(snort_etc, "sid-msg.map")
if os.path.exists(sidmsg_map):
LOG.debug("Loading %s.", sidmsg_map)
msgmap.load_signature_map(open(sidmsg_map))
epilog = """If --directory and --prefix are provided files will be
read from the specified 'spool' directory. Otherwise files on the
command line will be processed.
"""
def main():
msgmap = maps.SignatureMap()
classmap = maps.ClassificationMap()
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@', epilog=epilog)
parser.add_argument(
"-C", dest="classification_path", metavar="<classification.config>",
help="path to classification config")
parser.add_argument(
"-S", dest="sidmsgmap_path", metavar="<msg-msg.map>",
help="path to sid-msg.map")
parser.add_argument(
"-G", dest="genmsgmap_path", metavar="<gen-msg.map>",
help="path to gen-msg.map")
parser.add_argument(
"--snort-conf", dest="snort_conf", metavar="<snort.conf>",
help="attempt to load classifications and map files based on the "
"location of the snort.conf")
parser.add_argument(
"--directory", metavar="<spool directory>",
help="spool directory (eg: /var/log/snort)")
parser.add_argument(
"--prefix", metavar="<spool file prefix>",
help="spool filename prefix (eg: unified2.log)")
parser.add_argument(
"--bookmark", action="store_true", default=False,
help="enable bookmarking")
parser.add_argument(
"--follow", action="store_true", default=False,
help="follow files/continuous mode (spool mode only)")
parser.add_argument(
"--delete", action="store_true", default=False,
help="delete spool files")
parser.add_argument(
"--output", metavar="<filename>",
help="output filename (eg: /var/log/snort/alerts.json")
parser.add_argument(
"--stdout", action="store_true", default=False,
help="also log to stdout if --output is a file")
parser.add_argument(
"filenames", nargs="*")
args = parser.parse_args()
if args.snort_conf:
load_from_snort_conf(args.snort_conf, classmap, msgmap)
if args.classification_path:
classmap.load_from_file(
open(os.path.expanduser(args.classification_path)))
if args.genmsgmap_path:
msgmap.load_generator_map(open(os.path.expanduser(args.genmsgmap_path)))
if args.sidmsgmap_path:
msgmap.load_signature_map(open(os.path.expanduser(args.sidmsgmap_path)))
if msgmap.size() == 0:
LOG.warn("WARNING: No alert message map entries loaded.")
else:
LOG.info("Loaded %s rule message map entries.", msgmap.size())
if classmap.size() == 0:
LOG.warn("WARNING: No classifications loaded.")
else:
LOG.info("Loaded %s classifications.", classmap.size())
eve_filter = EveFilter(msgmap, classmap)
outputs = []
if args.output:
outputs.append(OutputWrapper(args.output))
if args.stdout:
outputs.append(OutputWrapper("-", sys.stdout))
else:
outputs.append(OutputWrapper("-", sys.stdout))
if args.directory and args.prefix:
reader = unified2.SpoolEventReader(
directory=args.directory,
prefix=args.prefix,
follow=args.follow,
delete=args.delete,
bookmark=args.bookmark)
elif args.filenames:
reader = unified2.FileEventReader(*args.filenames)
else:
print("nothing to do.")
return
for event in reader:
try:
encoded = json.dumps(eve_filter.filter(event), encoding="latin-1")
for out in outputs:
out.write(encoded)
except Exception as err:
LOG.error("Failed to encode record as JSON: %s: %s" % (
str(err), str(event)))
if __name__ == "__main__":
sys.exit(main()) | #! /usr/bin/env python
# | random_line_split |
|
executor.go | package executor
import (
"encoding/json"
"fmt"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
"github.com/fsouza/go-dockerclient"
"github.com/gogo/protobuf/proto"
log "github.com/golang/glog"
bindings "github.com/mesos/mesos-go/executor"
mesos "github.com/mesos/mesos-go/mesosproto"
mutil "github.com/mesos/mesos-go/mesosutil"
"github.com/mesosphere/kubernetes-mesos/pkg/executor/messages"
"github.com/mesosphere/kubernetes-mesos/pkg/scheduler/meta"
"gopkg.in/v2/yaml"
)
const (
containerPollTime = 300 * time.Millisecond
launchGracePeriod = 5 * time.Minute
)
type stateType int32
const (
disconnectedState stateType = iota
connectedState
doneState
)
type kuberTask struct {
mesosTaskInfo *mesos.TaskInfo
podName string
}
// KubernetesExecutor is an mesos executor that runs pods
// in a minion machine.
type KubernetesExecutor struct {
kl *kubelet.Kubelet // the kubelet instance.
updateChan chan<- interface{}
state stateType
tasks map[string]*kuberTask
pods map[string]*api.BoundPod
lock sync.RWMutex
sourcename string
client *client.Client
events <-chan watch.Event
done chan struct{} // signals shutdown
outgoing chan func() (mesos.Status, error)
dockerClient dockertools.DockerInterface
}
func (k *KubernetesExecutor) getState() stateType {
return stateType(atomic.LoadInt32((*int32)(&k.state)))
}
func (k *KubernetesExecutor) isConnected() bool {
return connectedState == k.getState()
}
func (k *KubernetesExecutor) swapState(from, to stateType) bool {
return atomic.CompareAndSwapInt32((*int32)(&k.state), int32(from), int32(to))
}
// New creates a new kubernetes executor.
func New(kl *kubelet.Kubelet, ch chan<- interface{}, ns string, cl *client.Client, w watch.Interface, dc dockertools.DockerInterface) *KubernetesExecutor {
//TODO(jdef) do something real with these events..
events := w.ResultChan()
if events != nil {
go func() {
for e := range events {
// e ~= watch.Event { ADDED, *api.Event }
log.V(1).Info(e)
}
}()
}
k := &KubernetesExecutor{
kl: kl,
updateChan: ch,
state: disconnectedState,
tasks: make(map[string]*kuberTask),
pods: make(map[string]*api.BoundPod),
sourcename: ns,
client: cl,
events: events,
done: make(chan struct{}),
outgoing: make(chan func() (mesos.Status, error), 1024),
dockerClient: dc,
}
go k.sendLoop()
return k
}
func (k *KubernetesExecutor) isDone() bool {
select {
case <-k.done:
return true
default:
return false
}
}
// Registered is called when the executor is successfully registered with the slave.
func (k *KubernetesExecutor) Registered(driver bindings.ExecutorDriver,
executorInfo *mesos.ExecutorInfo, frameworkInfo *mesos.FrameworkInfo, slaveInfo *mesos.SlaveInfo) {
if k.isDone() {
return
}
log.Infof("Executor %v of framework %v registered with slave %v\n",
executorInfo, frameworkInfo, slaveInfo)
if !k.swapState(disconnectedState, connectedState) {
//programming error?
panic("already connected?!")
}
}
// Reregistered is called when the executor is successfully re-registered with the slave.
// This can happen when the slave fails over.
func (k *KubernetesExecutor) Reregistered(driver bindings.ExecutorDriver, slaveInfo *mesos.SlaveInfo) {
if k.isDone() {
return
}
log.Infof("Reregistered with slave %v\n", slaveInfo)
if !k.swapState(disconnectedState, connectedState) {
//programming error?
panic("already connected?!")
}
}
// Disconnected is called when the executor is disconnected with the slave.
func (k *KubernetesExecutor) Disconnected(driver bindings.ExecutorDriver) {
if k.isDone() {
return
}
log.Infof("Slave is disconnected\n")
if !k.swapState(connectedState, disconnectedState) {
//programming error?
panic("already disconnected?!")
}
}
// LaunchTask is called when the executor receives a request to launch a task.
func (k *KubernetesExecutor) LaunchTask(driver bindings.ExecutorDriver, taskInfo *mesos.TaskInfo) {
if k.isDone() {
return
}
log.Infof("Launch task %v\n", taskInfo)
if !k.isConnected() {
log.Warningf("Ignore launch task because the executor is disconnected\n")
k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED,
messages.ExecutorUnregistered))
return
}
var pod api.BoundPod
if err := yaml.Unmarshal(taskInfo.GetData(), &pod); err != nil {
log.Warningf("Failed to extract yaml data from the taskInfo.data %v\n", err)
k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED,
messages.UnmarshalTaskDataFailure))
return
}
k.lock.Lock()
defer k.lock.Unlock()
taskId := taskInfo.GetTaskId().GetValue()
if _, found := k.tasks[taskId]; found {
log.Warningf("task already launched\n")
// Not to send back TASK_RUNNING here, because
// may be duplicated messages or duplicated task id.
return
}
// remember this task so that:
// (a) we ignore future launches for it
// (b) we have a record of it so that we can kill it if needed
// (c) we're leaving podName == "" for now, indicates we don't need to delete containers
k.tasks[taskId] = &kuberTask{
mesosTaskInfo: taskInfo,
}
go k.launchTask(driver, taskId, &pod)
}
func (k *KubernetesExecutor) getPidInfo(name string) (api.PodStatus, error) {
return k.kl.GetPodStatus(name, "")
}
// async continuation of LaunchTask
func (k *KubernetesExecutor) launchTask(driver bindings.ExecutorDriver, taskId string, pod *api.BoundPod) {
//HACK(jdef): cloned binding construction from k8s plugin/pkg/scheduler/scheduler.go
binding := &api.Binding{
ObjectMeta: api.ObjectMeta{
Namespace: pod.Namespace,
Annotations: make(map[string]string),
},
PodID: pod.Name,
Host: pod.Annotations[meta.BindingHostKey],
}
// forward the bindings that the scheduler wants to apply
for k, v := range pod.Annotations {
binding.Annotations[k] = v
}
log.Infof("Binding '%v' to '%v' with annotations %+v...", binding.PodID, binding.Host, binding.Annotations)
ctx := api.WithNamespace(api.NewDefaultContext(), binding.Namespace)
err := k.client.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error()
if err != nil {
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED,
messages.CreateBindingFailure))
return
}
podFullName := kubelet.GetPodFullName(&api.BoundPod{
ObjectMeta: api.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Annotations: map[string]string{kubelet.ConfigSourceAnnotationKey: k.sourcename},
},
})
// allow a recently failed-over scheduler the chance to recover the task/pod binding:
// it may have failed and recovered before the apiserver is able to report the updated
// binding information. replays of this status event will signal to the scheduler that
// the apiserver should be up-to-date.
data, err := json.Marshal(api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
})
if err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED, err.Error()))
return
}
k.lock.Lock()
defer k.lock.Unlock()
// Add the task.
task, found := k.tasks[taskId]
if !found {
log.V(1).Infof("task %v no longer on record, probably killed, aborting launch sequence - reporting lost", taskId)
k.reportLostTask(driver, taskId, messages.LaunchTaskFailed)
return
}
// from here on, we need to delete containers associated with the task
// upon it going into a terminal state
task.podName = podFullName
k.pods[podFullName] = pod
// Send the pod updates to the channel.
update := kubelet.PodUpdate{Op: kubelet.SET}
for _, p := range k.pods |
k.updateChan <- update
statusUpdate := &mesos.TaskStatus{
TaskId: mutil.NewTaskID(taskId),
State: mesos.TaskState_TASK_STARTING.Enum(),
Message: proto.String(messages.CreateBindingSuccess),
Data: data,
}
k.sendStatus(driver, statusUpdate)
// Delay reporting 'task running' until container is up.
go k._launchTask(driver, taskId, podFullName)
}
func (k *KubernetesExecutor) _launchTask(driver bindings.ExecutorDriver, taskId, podFullName string) {
expired := make(chan struct{})
time.AfterFunc(launchGracePeriod, func() { close(expired) })
getMarshalledInfo := func() (data []byte, cancel bool) {
// potentially long call..
if podStatus, err := k.getPidInfo(podFullName); err == nil {
select {
case <-expired:
cancel = true
default:
k.lock.Lock()
defer k.lock.Unlock()
if _, found := k.tasks[taskId]; !found {
// don't bother with the pod status if the task is already gone
cancel = true
break
} else if podStatus.Phase != api.PodRunning {
// avoid sending back a running status before it's really running
break
}
log.V(2).Infof("Found pod status: '%v'", podStatus)
result := api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
Status: podStatus,
}
if data, err = json.Marshal(result); err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
}
}
}
return
}
waitForRunningPod:
for {
select {
case <-expired:
log.Warningf("Launch expired grace period of '%v'", launchGracePeriod)
break waitForRunningPod
case <-time.After(containerPollTime):
if data, cancel := getMarshalledInfo(); cancel {
break waitForRunningPod
} else if data == nil {
continue waitForRunningPod
} else {
k.lock.Lock()
defer k.lock.Unlock()
if _, found := k.tasks[taskId]; !found {
goto reportLost
}
statusUpdate := &mesos.TaskStatus{
TaskId: mutil.NewTaskID(taskId),
State: mesos.TaskState_TASK_RUNNING.Enum(),
Message: proto.String(fmt.Sprintf("pod-running:%s", podFullName)),
Data: data,
}
k.sendStatus(driver, statusUpdate)
// continue to monitor the health of the pod
go k.__launchTask(driver, taskId, podFullName)
return
}
}
}
k.lock.Lock()
defer k.lock.Unlock()
reportLost:
k.reportLostTask(driver, taskId, messages.LaunchTaskFailed)
}
func (k *KubernetesExecutor) __launchTask(driver bindings.ExecutorDriver, taskId, podFullName string) {
// TODO(nnielsen): Monitor health of pod and report if lost.
// Should we also allow this to fail a couple of times before reporting lost?
// What if the docker daemon is restarting and we can't connect, but it's
// going to bring the pods back online as soon as it restarts?
knownPod := func() bool {
_, err := k.getPidInfo(podFullName)
return err == nil
}
// Wait for the pod to go away and stop monitoring once it does
// TODO (jdefelice) replace with an /events watch?
for {
time.Sleep(containerPollTime)
if k.checkForLostPodTask(driver, taskId, knownPod) {
return
}
}
}
// Intended to be executed as part of the pod monitoring loop, this fn (ultimately) checks with Docker
// whether the pod is running. It will only return false if the task is still registered and the pod is
// registered in Docker. Otherwise it returns true. If there's still a task record on file, but no pod
// in Docker, then we'll also send a TASK_LOST event.
func (k *KubernetesExecutor) checkForLostPodTask(driver bindings.ExecutorDriver, taskId string, isKnownPod func() bool) bool {
// TODO (jdefelice) don't send false alarms for deleted pods (KILLED tasks)
k.lock.Lock()
defer k.lock.Unlock()
// TODO(jdef) we should really consider k.pods here, along with what docker is reporting, since the kubelet
// may constantly attempt to instantiate a pod as long as it's in the pod state that we're handing to it.
// otherwise, we're probably reporting a TASK_LOST prematurely. Should probably consult RestartPolicy to
// determine appropriate behavior. Should probably also gracefully handle docker daemon restarts.
if _, ok := k.tasks[taskId]; ok {
if isKnownPod() {
return false
} else {
log.Warningf("Detected lost pod, reporting lost task %v", taskId)
k.reportLostTask(driver, taskId, messages.ContainersDisappeared)
}
} else {
log.V(2).Infof("Task %v no longer registered, stop monitoring for lost pods", taskId)
}
return true
}
// KillTask is called when the executor receives a request to kill a task.
func (k *KubernetesExecutor) KillTask(driver bindings.ExecutorDriver, taskId *mesos.TaskID) {
if k.isDone() {
return
}
log.Infof("Kill task %v\n", taskId)
if !k.isConnected() {
//TODO(jdefelice) sent TASK_LOST here?
log.Warningf("Ignore kill task because the executor is disconnected\n")
return
}
k.lock.Lock()
defer k.lock.Unlock()
k.killPodForTask(driver, taskId.GetValue(), messages.TaskKilled)
}
// Kills the pod associated with the given task. Assumes that the caller is locking around
// pod and task storage.
func (k *KubernetesExecutor) killPodForTask(driver bindings.ExecutorDriver, tid, reason string) {
k.removePodTask(driver, tid, reason, mesos.TaskState_TASK_KILLED)
}
// Reports a lost task to the slave and updates internal task and pod tracking state.
// Assumes that the caller is locking around pod and task state.
func (k *KubernetesExecutor) reportLostTask(driver bindings.ExecutorDriver, tid, reason string) {
k.removePodTask(driver, tid, reason, mesos.TaskState_TASK_LOST)
}
// returns a chan that closes when the pod is no longer running in Docker
func (k *KubernetesExecutor) removePodTask(driver bindings.ExecutorDriver, tid, reason string, state mesos.TaskState) {
task, ok := k.tasks[tid]
if !ok {
log.V(1).Infof("Failed to remove task, unknown task %v\n", tid)
return
}
delete(k.tasks, tid)
pid := task.podName
if _, found := k.pods[pid]; !found {
log.Warningf("Cannot remove unknown pod %v for task %v", pid, tid)
} else {
log.V(2).Infof("deleting pod %v for task %v", pid, tid)
delete(k.pods, pid)
// Send the pod updates to the channel.
update := kubelet.PodUpdate{Op: kubelet.SET}
for _, p := range k.pods {
update.Pods = append(update.Pods, *p)
}
k.updateChan <- update
}
// TODO(jdef): ensure that the update propagates, perhaps return a signal chan?
k.sendStatus(driver, newStatus(mutil.NewTaskID(tid), state, reason))
}
// FrameworkMessage is called when the framework sends some message to the executor
func (k *KubernetesExecutor) FrameworkMessage(driver bindings.ExecutorDriver, message string) {
if k.isDone() {
return
}
if !k.isConnected() {
log.Warningf("Ignore framework message because the executor is disconnected\n")
return
}
log.Infof("Receives message from framework %v\n", message)
//TODO(jdef) master reported a lost task, reconcile this! @see scheduler.go:handleTaskLost
if strings.HasPrefix("task-lost:", message) && len(message) > 10 {
taskId := message[10:]
if taskId != "" {
// clean up pod state
k.reportLostTask(driver, taskId, messages.TaskLostAck)
}
}
}
// Shutdown is called when the executor receives a shutdown request.
func (k *KubernetesExecutor) Shutdown(driver bindings.ExecutorDriver) {
if k.isDone() {
return
}
close(k.done)
log.Infoln("Shutdown the executor")
defer func() {
for !k.swapState(k.getState(), doneState) {
}
}()
func() {
k.lock.Lock()
defer k.lock.Unlock()
k.tasks = map[string]*kuberTask{}
}()
// according to docs, mesos will generate TASK_LOST updates for us
// if needed, so don't take extra time to do that here.
// also, clear the pod configuration so that after we issue our Kill
// kubernetes doesn't start spinning things up before we exit.
k.updateChan <- kubelet.PodUpdate{Op: kubelet.SET}
KillKubeletContainers(k.dockerClient)
}
// Destroy existing k8s containers
func KillKubeletContainers(dockerClient dockertools.DockerInterface) {
if containers, err := dockertools.GetKubeletDockerContainers(dockerClient, true); err == nil {
opts := docker.RemoveContainerOptions{
RemoveVolumes: true,
Force: true,
}
for _, container := range containers {
opts.ID = container.ID
log.V(2).Infof("Removing container: %v", opts.ID)
if err := dockerClient.RemoveContainer(opts); err != nil {
log.Warning(err)
}
}
} else {
log.Warningf("Failed to list kubelet docker containers: %v", err)
}
}
// Error is called when some error happens.
func (k *KubernetesExecutor) Error(driver bindings.ExecutorDriver, message string) {
log.Errorln(message)
}
func newStatus(taskId *mesos.TaskID, state mesos.TaskState, message string) *mesos.TaskStatus {
return &mesos.TaskStatus{
TaskId: taskId,
State: &state,
Message: proto.String(message),
}
}
func (k *KubernetesExecutor) sendStatus(driver bindings.ExecutorDriver, status *mesos.TaskStatus) {
select {
case <-k.done:
default:
k.outgoing <- func() (mesos.Status, error) { return driver.SendStatusUpdate(status) }
}
}
func (k *KubernetesExecutor) sendFrameworkMessage(driver bindings.ExecutorDriver, msg string) {
select {
case <-k.done:
default:
k.outgoing <- func() (mesos.Status, error) { return driver.SendFrameworkMessage(msg) }
}
}
func (k *KubernetesExecutor) sendLoop() {
defer log.V(1).Info("sender loop exiting")
for {
select {
case <-k.done:
return
default:
if !k.isConnected() {
select {
case <-k.done:
case <-time.After(1 * time.Second):
}
continue
}
sender, ok := <-k.outgoing
if !ok {
// programming error
panic("someone closed the outgoing channel")
}
if status, err := sender(); err == nil {
continue
} else {
log.Error(err)
if status == mesos.Status_DRIVER_ABORTED {
return
}
}
// attempt to re-queue the sender
select {
case <-k.done:
case k.outgoing <- sender:
}
}
}
}
| {
update.Pods = append(update.Pods, *p)
} | conditional_block |
executor.go | package executor
import (
"encoding/json"
"fmt"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
"github.com/fsouza/go-dockerclient"
"github.com/gogo/protobuf/proto"
log "github.com/golang/glog"
bindings "github.com/mesos/mesos-go/executor"
mesos "github.com/mesos/mesos-go/mesosproto"
mutil "github.com/mesos/mesos-go/mesosutil"
"github.com/mesosphere/kubernetes-mesos/pkg/executor/messages"
"github.com/mesosphere/kubernetes-mesos/pkg/scheduler/meta"
"gopkg.in/v2/yaml"
)
const (
containerPollTime = 300 * time.Millisecond
launchGracePeriod = 5 * time.Minute
)
type stateType int32
const (
disconnectedState stateType = iota
connectedState
doneState
)
type kuberTask struct {
mesosTaskInfo *mesos.TaskInfo
podName string
}
// KubernetesExecutor is an mesos executor that runs pods
// in a minion machine.
type KubernetesExecutor struct {
kl *kubelet.Kubelet // the kubelet instance.
updateChan chan<- interface{}
state stateType
tasks map[string]*kuberTask
pods map[string]*api.BoundPod
lock sync.RWMutex
sourcename string
client *client.Client
events <-chan watch.Event
done chan struct{} // signals shutdown
outgoing chan func() (mesos.Status, error)
dockerClient dockertools.DockerInterface
}
func (k *KubernetesExecutor) getState() stateType {
return stateType(atomic.LoadInt32((*int32)(&k.state)))
}
func (k *KubernetesExecutor) isConnected() bool {
return connectedState == k.getState()
}
func (k *KubernetesExecutor) swapState(from, to stateType) bool {
return atomic.CompareAndSwapInt32((*int32)(&k.state), int32(from), int32(to))
}
// New creates a new kubernetes executor.
func New(kl *kubelet.Kubelet, ch chan<- interface{}, ns string, cl *client.Client, w watch.Interface, dc dockertools.DockerInterface) *KubernetesExecutor {
//TODO(jdef) do something real with these events..
events := w.ResultChan()
if events != nil {
go func() {
for e := range events {
// e ~= watch.Event { ADDED, *api.Event }
log.V(1).Info(e)
}
}()
}
k := &KubernetesExecutor{
kl: kl,
updateChan: ch,
state: disconnectedState,
tasks: make(map[string]*kuberTask),
pods: make(map[string]*api.BoundPod),
sourcename: ns,
client: cl,
events: events,
done: make(chan struct{}),
outgoing: make(chan func() (mesos.Status, error), 1024),
dockerClient: dc,
}
go k.sendLoop()
return k
}
func (k *KubernetesExecutor) isDone() bool {
select {
case <-k.done:
return true
default:
return false
}
}
// Registered is called when the executor is successfully registered with the slave.
func (k *KubernetesExecutor) Registered(driver bindings.ExecutorDriver,
executorInfo *mesos.ExecutorInfo, frameworkInfo *mesos.FrameworkInfo, slaveInfo *mesos.SlaveInfo) {
if k.isDone() {
return
}
log.Infof("Executor %v of framework %v registered with slave %v\n",
executorInfo, frameworkInfo, slaveInfo)
if !k.swapState(disconnectedState, connectedState) {
//programming error?
panic("already connected?!")
}
}
// Reregistered is called when the executor is successfully re-registered with the slave.
// This can happen when the slave fails over.
func (k *KubernetesExecutor) Reregistered(driver bindings.ExecutorDriver, slaveInfo *mesos.SlaveInfo) {
if k.isDone() {
return
}
log.Infof("Reregistered with slave %v\n", slaveInfo)
if !k.swapState(disconnectedState, connectedState) {
//programming error?
panic("already connected?!")
}
}
// Disconnected is called when the executor is disconnected with the slave.
func (k *KubernetesExecutor) Disconnected(driver bindings.ExecutorDriver) {
if k.isDone() {
return
}
log.Infof("Slave is disconnected\n")
if !k.swapState(connectedState, disconnectedState) {
//programming error?
panic("already disconnected?!")
}
}
// LaunchTask is called when the executor receives a request to launch a task.
func (k *KubernetesExecutor) LaunchTask(driver bindings.ExecutorDriver, taskInfo *mesos.TaskInfo) {
if k.isDone() {
return
}
log.Infof("Launch task %v\n", taskInfo)
if !k.isConnected() {
log.Warningf("Ignore launch task because the executor is disconnected\n")
k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED,
messages.ExecutorUnregistered))
return
}
var pod api.BoundPod
if err := yaml.Unmarshal(taskInfo.GetData(), &pod); err != nil {
log.Warningf("Failed to extract yaml data from the taskInfo.data %v\n", err)
k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED,
messages.UnmarshalTaskDataFailure))
return
}
k.lock.Lock()
defer k.lock.Unlock()
taskId := taskInfo.GetTaskId().GetValue()
if _, found := k.tasks[taskId]; found {
log.Warningf("task already launched\n")
// Not to send back TASK_RUNNING here, because
// may be duplicated messages or duplicated task id.
return
}
// remember this task so that:
// (a) we ignore future launches for it
// (b) we have a record of it so that we can kill it if needed
// (c) we're leaving podName == "" for now, indicates we don't need to delete containers
k.tasks[taskId] = &kuberTask{
mesosTaskInfo: taskInfo,
}
go k.launchTask(driver, taskId, &pod)
}
func (k *KubernetesExecutor) getPidInfo(name string) (api.PodStatus, error) {
return k.kl.GetPodStatus(name, "")
}
// async continuation of LaunchTask
func (k *KubernetesExecutor) launchTask(driver bindings.ExecutorDriver, taskId string, pod *api.BoundPod) {
//HACK(jdef): cloned binding construction from k8s plugin/pkg/scheduler/scheduler.go
binding := &api.Binding{
ObjectMeta: api.ObjectMeta{
Namespace: pod.Namespace,
Annotations: make(map[string]string),
},
PodID: pod.Name,
Host: pod.Annotations[meta.BindingHostKey],
}
// forward the bindings that the scheduler wants to apply
for k, v := range pod.Annotations {
binding.Annotations[k] = v
}
log.Infof("Binding '%v' to '%v' with annotations %+v...", binding.PodID, binding.Host, binding.Annotations)
ctx := api.WithNamespace(api.NewDefaultContext(), binding.Namespace)
err := k.client.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error()
if err != nil {
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED,
messages.CreateBindingFailure))
return
}
podFullName := kubelet.GetPodFullName(&api.BoundPod{
ObjectMeta: api.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Annotations: map[string]string{kubelet.ConfigSourceAnnotationKey: k.sourcename},
},
})
// allow a recently failed-over scheduler the chance to recover the task/pod binding:
// it may have failed and recovered before the apiserver is able to report the updated
// binding information. replays of this status event will signal to the scheduler that
// the apiserver should be up-to-date.
data, err := json.Marshal(api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
})
if err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED, err.Error()))
return
}
k.lock.Lock()
defer k.lock.Unlock()
// Add the task.
task, found := k.tasks[taskId]
if !found {
log.V(1).Infof("task %v no longer on record, probably killed, aborting launch sequence - reporting lost", taskId)
k.reportLostTask(driver, taskId, messages.LaunchTaskFailed)
return
}
// from here on, we need to delete containers associated with the task
// upon it going into a terminal state
task.podName = podFullName
k.pods[podFullName] = pod
// Send the pod updates to the channel.
update := kubelet.PodUpdate{Op: kubelet.SET}
for _, p := range k.pods {
update.Pods = append(update.Pods, *p)
}
k.updateChan <- update
statusUpdate := &mesos.TaskStatus{
TaskId: mutil.NewTaskID(taskId),
State: mesos.TaskState_TASK_STARTING.Enum(),
Message: proto.String(messages.CreateBindingSuccess),
Data: data,
}
k.sendStatus(driver, statusUpdate)
// Delay reporting 'task running' until container is up.
go k._launchTask(driver, taskId, podFullName)
}
func (k *KubernetesExecutor) _launchTask(driver bindings.ExecutorDriver, taskId, podFullName string) {
expired := make(chan struct{})
time.AfterFunc(launchGracePeriod, func() { close(expired) })
getMarshalledInfo := func() (data []byte, cancel bool) {
// potentially long call..
if podStatus, err := k.getPidInfo(podFullName); err == nil {
select {
case <-expired:
cancel = true
default:
k.lock.Lock()
defer k.lock.Unlock()
if _, found := k.tasks[taskId]; !found {
// don't bother with the pod status if the task is already gone
cancel = true
break
} else if podStatus.Phase != api.PodRunning {
// avoid sending back a running status before it's really running
break
}
log.V(2).Infof("Found pod status: '%v'", podStatus)
result := api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
Status: podStatus,
}
if data, err = json.Marshal(result); err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
}
}
}
return
}
waitForRunningPod:
for {
select {
case <-expired:
log.Warningf("Launch expired grace period of '%v'", launchGracePeriod)
break waitForRunningPod
case <-time.After(containerPollTime):
if data, cancel := getMarshalledInfo(); cancel {
break waitForRunningPod
} else if data == nil {
continue waitForRunningPod
} else {
k.lock.Lock()
defer k.lock.Unlock()
if _, found := k.tasks[taskId]; !found {
goto reportLost
}
statusUpdate := &mesos.TaskStatus{
TaskId: mutil.NewTaskID(taskId),
State: mesos.TaskState_TASK_RUNNING.Enum(),
Message: proto.String(fmt.Sprintf("pod-running:%s", podFullName)),
Data: data,
}
k.sendStatus(driver, statusUpdate)
// continue to monitor the health of the pod
go k.__launchTask(driver, taskId, podFullName)
return
}
}
}
k.lock.Lock()
defer k.lock.Unlock()
reportLost:
k.reportLostTask(driver, taskId, messages.LaunchTaskFailed)
}
func (k *KubernetesExecutor) __launchTask(driver bindings.ExecutorDriver, taskId, podFullName string) {
// TODO(nnielsen): Monitor health of pod and report if lost.
// Should we also allow this to fail a couple of times before reporting lost?
// What if the docker daemon is restarting and we can't connect, but it's
// going to bring the pods back online as soon as it restarts?
knownPod := func() bool {
_, err := k.getPidInfo(podFullName)
return err == nil
}
// Wait for the pod to go away and stop monitoring once it does
// TODO (jdefelice) replace with an /events watch?
for {
time.Sleep(containerPollTime)
if k.checkForLostPodTask(driver, taskId, knownPod) {
return
}
}
}
// Intended to be executed as part of the pod monitoring loop, this fn (ultimately) checks with Docker
// whether the pod is running. It will only return false if the task is still registered and the pod is
// registered in Docker. Otherwise it returns true. If there's still a task record on file, but no pod
// in Docker, then we'll also send a TASK_LOST event.
func (k *KubernetesExecutor) checkForLostPodTask(driver bindings.ExecutorDriver, taskId string, isKnownPod func() bool) bool {
// TODO (jdefelice) don't send false alarms for deleted pods (KILLED tasks)
k.lock.Lock()
defer k.lock.Unlock()
// TODO(jdef) we should really consider k.pods here, along with what docker is reporting, since the kubelet
// may constantly attempt to instantiate a pod as long as it's in the pod state that we're handing to it.
// otherwise, we're probably reporting a TASK_LOST prematurely. Should probably consult RestartPolicy to
// determine appropriate behavior. Should probably also gracefully handle docker daemon restarts.
if _, ok := k.tasks[taskId]; ok {
if isKnownPod() {
return false
} else {
log.Warningf("Detected lost pod, reporting lost task %v", taskId)
k.reportLostTask(driver, taskId, messages.ContainersDisappeared)
} | }
// KillTask is called when the executor receives a request to kill a task.
func (k *KubernetesExecutor) KillTask(driver bindings.ExecutorDriver, taskId *mesos.TaskID) {
if k.isDone() {
return
}
log.Infof("Kill task %v\n", taskId)
if !k.isConnected() {
//TODO(jdefelice) sent TASK_LOST here?
log.Warningf("Ignore kill task because the executor is disconnected\n")
return
}
k.lock.Lock()
defer k.lock.Unlock()
k.killPodForTask(driver, taskId.GetValue(), messages.TaskKilled)
}
// Kills the pod associated with the given task. Assumes that the caller is locking around
// pod and task storage.
func (k *KubernetesExecutor) killPodForTask(driver bindings.ExecutorDriver, tid, reason string) {
k.removePodTask(driver, tid, reason, mesos.TaskState_TASK_KILLED)
}
// Reports a lost task to the slave and updates internal task and pod tracking state.
// Assumes that the caller is locking around pod and task state.
func (k *KubernetesExecutor) reportLostTask(driver bindings.ExecutorDriver, tid, reason string) {
k.removePodTask(driver, tid, reason, mesos.TaskState_TASK_LOST)
}
// returns a chan that closes when the pod is no longer running in Docker
func (k *KubernetesExecutor) removePodTask(driver bindings.ExecutorDriver, tid, reason string, state mesos.TaskState) {
task, ok := k.tasks[tid]
if !ok {
log.V(1).Infof("Failed to remove task, unknown task %v\n", tid)
return
}
delete(k.tasks, tid)
pid := task.podName
if _, found := k.pods[pid]; !found {
log.Warningf("Cannot remove unknown pod %v for task %v", pid, tid)
} else {
log.V(2).Infof("deleting pod %v for task %v", pid, tid)
delete(k.pods, pid)
// Send the pod updates to the channel.
update := kubelet.PodUpdate{Op: kubelet.SET}
for _, p := range k.pods {
update.Pods = append(update.Pods, *p)
}
k.updateChan <- update
}
// TODO(jdef): ensure that the update propagates, perhaps return a signal chan?
k.sendStatus(driver, newStatus(mutil.NewTaskID(tid), state, reason))
}
// FrameworkMessage is called when the framework sends some message to the executor
func (k *KubernetesExecutor) FrameworkMessage(driver bindings.ExecutorDriver, message string) {
if k.isDone() {
return
}
if !k.isConnected() {
log.Warningf("Ignore framework message because the executor is disconnected\n")
return
}
log.Infof("Receives message from framework %v\n", message)
//TODO(jdef) master reported a lost task, reconcile this! @see scheduler.go:handleTaskLost
if strings.HasPrefix("task-lost:", message) && len(message) > 10 {
taskId := message[10:]
if taskId != "" {
// clean up pod state
k.reportLostTask(driver, taskId, messages.TaskLostAck)
}
}
}
// Shutdown is called when the executor receives a shutdown request.
func (k *KubernetesExecutor) Shutdown(driver bindings.ExecutorDriver) {
if k.isDone() {
return
}
close(k.done)
log.Infoln("Shutdown the executor")
defer func() {
for !k.swapState(k.getState(), doneState) {
}
}()
func() {
k.lock.Lock()
defer k.lock.Unlock()
k.tasks = map[string]*kuberTask{}
}()
// according to docs, mesos will generate TASK_LOST updates for us
// if needed, so don't take extra time to do that here.
// also, clear the pod configuration so that after we issue our Kill
// kubernetes doesn't start spinning things up before we exit.
k.updateChan <- kubelet.PodUpdate{Op: kubelet.SET}
KillKubeletContainers(k.dockerClient)
}
// Destroy existing k8s containers
func KillKubeletContainers(dockerClient dockertools.DockerInterface) {
if containers, err := dockertools.GetKubeletDockerContainers(dockerClient, true); err == nil {
opts := docker.RemoveContainerOptions{
RemoveVolumes: true,
Force: true,
}
for _, container := range containers {
opts.ID = container.ID
log.V(2).Infof("Removing container: %v", opts.ID)
if err := dockerClient.RemoveContainer(opts); err != nil {
log.Warning(err)
}
}
} else {
log.Warningf("Failed to list kubelet docker containers: %v", err)
}
}
// Error is called when some error happens.
func (k *KubernetesExecutor) Error(driver bindings.ExecutorDriver, message string) {
log.Errorln(message)
}
func newStatus(taskId *mesos.TaskID, state mesos.TaskState, message string) *mesos.TaskStatus {
return &mesos.TaskStatus{
TaskId: taskId,
State: &state,
Message: proto.String(message),
}
}
func (k *KubernetesExecutor) sendStatus(driver bindings.ExecutorDriver, status *mesos.TaskStatus) {
select {
case <-k.done:
default:
k.outgoing <- func() (mesos.Status, error) { return driver.SendStatusUpdate(status) }
}
}
func (k *KubernetesExecutor) sendFrameworkMessage(driver bindings.ExecutorDriver, msg string) {
select {
case <-k.done:
default:
k.outgoing <- func() (mesos.Status, error) { return driver.SendFrameworkMessage(msg) }
}
}
func (k *KubernetesExecutor) sendLoop() {
defer log.V(1).Info("sender loop exiting")
for {
select {
case <-k.done:
return
default:
if !k.isConnected() {
select {
case <-k.done:
case <-time.After(1 * time.Second):
}
continue
}
sender, ok := <-k.outgoing
if !ok {
// programming error
panic("someone closed the outgoing channel")
}
if status, err := sender(); err == nil {
continue
} else {
log.Error(err)
if status == mesos.Status_DRIVER_ABORTED {
return
}
}
// attempt to re-queue the sender
select {
case <-k.done:
case k.outgoing <- sender:
}
}
}
} | } else {
log.V(2).Infof("Task %v no longer registered, stop monitoring for lost pods", taskId)
}
return true | random_line_split |
executor.go | package executor
import (
"encoding/json"
"fmt"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
"github.com/fsouza/go-dockerclient"
"github.com/gogo/protobuf/proto"
log "github.com/golang/glog"
bindings "github.com/mesos/mesos-go/executor"
mesos "github.com/mesos/mesos-go/mesosproto"
mutil "github.com/mesos/mesos-go/mesosutil"
"github.com/mesosphere/kubernetes-mesos/pkg/executor/messages"
"github.com/mesosphere/kubernetes-mesos/pkg/scheduler/meta"
"gopkg.in/v2/yaml"
)
const (
containerPollTime = 300 * time.Millisecond
launchGracePeriod = 5 * time.Minute
)
type stateType int32
const (
disconnectedState stateType = iota
connectedState
doneState
)
type kuberTask struct {
mesosTaskInfo *mesos.TaskInfo
podName string
}
// KubernetesExecutor is an mesos executor that runs pods
// in a minion machine.
type KubernetesExecutor struct {
kl *kubelet.Kubelet // the kubelet instance.
updateChan chan<- interface{}
state stateType
tasks map[string]*kuberTask
pods map[string]*api.BoundPod
lock sync.RWMutex
sourcename string
client *client.Client
events <-chan watch.Event
done chan struct{} // signals shutdown
outgoing chan func() (mesos.Status, error)
dockerClient dockertools.DockerInterface
}
func (k *KubernetesExecutor) getState() stateType {
return stateType(atomic.LoadInt32((*int32)(&k.state)))
}
func (k *KubernetesExecutor) isConnected() bool {
return connectedState == k.getState()
}
func (k *KubernetesExecutor) | (from, to stateType) bool {
return atomic.CompareAndSwapInt32((*int32)(&k.state), int32(from), int32(to))
}
// New creates a new kubernetes executor.
func New(kl *kubelet.Kubelet, ch chan<- interface{}, ns string, cl *client.Client, w watch.Interface, dc dockertools.DockerInterface) *KubernetesExecutor {
//TODO(jdef) do something real with these events..
events := w.ResultChan()
if events != nil {
go func() {
for e := range events {
// e ~= watch.Event { ADDED, *api.Event }
log.V(1).Info(e)
}
}()
}
k := &KubernetesExecutor{
kl: kl,
updateChan: ch,
state: disconnectedState,
tasks: make(map[string]*kuberTask),
pods: make(map[string]*api.BoundPod),
sourcename: ns,
client: cl,
events: events,
done: make(chan struct{}),
outgoing: make(chan func() (mesos.Status, error), 1024),
dockerClient: dc,
}
go k.sendLoop()
return k
}
func (k *KubernetesExecutor) isDone() bool {
select {
case <-k.done:
return true
default:
return false
}
}
// Registered is called when the executor is successfully registered with the slave.
func (k *KubernetesExecutor) Registered(driver bindings.ExecutorDriver,
executorInfo *mesos.ExecutorInfo, frameworkInfo *mesos.FrameworkInfo, slaveInfo *mesos.SlaveInfo) {
if k.isDone() {
return
}
log.Infof("Executor %v of framework %v registered with slave %v\n",
executorInfo, frameworkInfo, slaveInfo)
if !k.swapState(disconnectedState, connectedState) {
//programming error?
panic("already connected?!")
}
}
// Reregistered is called when the executor is successfully re-registered with the slave.
// This can happen when the slave fails over.
func (k *KubernetesExecutor) Reregistered(driver bindings.ExecutorDriver, slaveInfo *mesos.SlaveInfo) {
if k.isDone() {
return
}
log.Infof("Reregistered with slave %v\n", slaveInfo)
if !k.swapState(disconnectedState, connectedState) {
//programming error?
panic("already connected?!")
}
}
// Disconnected is called when the executor is disconnected with the slave.
func (k *KubernetesExecutor) Disconnected(driver bindings.ExecutorDriver) {
if k.isDone() {
return
}
log.Infof("Slave is disconnected\n")
if !k.swapState(connectedState, disconnectedState) {
//programming error?
panic("already disconnected?!")
}
}
// LaunchTask is called when the executor receives a request to launch a task.
func (k *KubernetesExecutor) LaunchTask(driver bindings.ExecutorDriver, taskInfo *mesos.TaskInfo) {
if k.isDone() {
return
}
log.Infof("Launch task %v\n", taskInfo)
if !k.isConnected() {
log.Warningf("Ignore launch task because the executor is disconnected\n")
k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED,
messages.ExecutorUnregistered))
return
}
var pod api.BoundPod
if err := yaml.Unmarshal(taskInfo.GetData(), &pod); err != nil {
log.Warningf("Failed to extract yaml data from the taskInfo.data %v\n", err)
k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED,
messages.UnmarshalTaskDataFailure))
return
}
k.lock.Lock()
defer k.lock.Unlock()
taskId := taskInfo.GetTaskId().GetValue()
if _, found := k.tasks[taskId]; found {
log.Warningf("task already launched\n")
// Not to send back TASK_RUNNING here, because
// may be duplicated messages or duplicated task id.
return
}
// remember this task so that:
// (a) we ignore future launches for it
// (b) we have a record of it so that we can kill it if needed
// (c) we're leaving podName == "" for now, indicates we don't need to delete containers
k.tasks[taskId] = &kuberTask{
mesosTaskInfo: taskInfo,
}
go k.launchTask(driver, taskId, &pod)
}
func (k *KubernetesExecutor) getPidInfo(name string) (api.PodStatus, error) {
return k.kl.GetPodStatus(name, "")
}
// async continuation of LaunchTask
func (k *KubernetesExecutor) launchTask(driver bindings.ExecutorDriver, taskId string, pod *api.BoundPod) {
//HACK(jdef): cloned binding construction from k8s plugin/pkg/scheduler/scheduler.go
binding := &api.Binding{
ObjectMeta: api.ObjectMeta{
Namespace: pod.Namespace,
Annotations: make(map[string]string),
},
PodID: pod.Name,
Host: pod.Annotations[meta.BindingHostKey],
}
// forward the bindings that the scheduler wants to apply
for k, v := range pod.Annotations {
binding.Annotations[k] = v
}
log.Infof("Binding '%v' to '%v' with annotations %+v...", binding.PodID, binding.Host, binding.Annotations)
ctx := api.WithNamespace(api.NewDefaultContext(), binding.Namespace)
err := k.client.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error()
if err != nil {
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED,
messages.CreateBindingFailure))
return
}
podFullName := kubelet.GetPodFullName(&api.BoundPod{
ObjectMeta: api.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Annotations: map[string]string{kubelet.ConfigSourceAnnotationKey: k.sourcename},
},
})
// allow a recently failed-over scheduler the chance to recover the task/pod binding:
// it may have failed and recovered before the apiserver is able to report the updated
// binding information. replays of this status event will signal to the scheduler that
// the apiserver should be up-to-date.
data, err := json.Marshal(api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
})
if err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED, err.Error()))
return
}
k.lock.Lock()
defer k.lock.Unlock()
// Add the task.
task, found := k.tasks[taskId]
if !found {
log.V(1).Infof("task %v no longer on record, probably killed, aborting launch sequence - reporting lost", taskId)
k.reportLostTask(driver, taskId, messages.LaunchTaskFailed)
return
}
// from here on, we need to delete containers associated with the task
// upon it going into a terminal state
task.podName = podFullName
k.pods[podFullName] = pod
// Send the pod updates to the channel.
update := kubelet.PodUpdate{Op: kubelet.SET}
for _, p := range k.pods {
update.Pods = append(update.Pods, *p)
}
k.updateChan <- update
statusUpdate := &mesos.TaskStatus{
TaskId: mutil.NewTaskID(taskId),
State: mesos.TaskState_TASK_STARTING.Enum(),
Message: proto.String(messages.CreateBindingSuccess),
Data: data,
}
k.sendStatus(driver, statusUpdate)
// Delay reporting 'task running' until container is up.
go k._launchTask(driver, taskId, podFullName)
}
func (k *KubernetesExecutor) _launchTask(driver bindings.ExecutorDriver, taskId, podFullName string) {
expired := make(chan struct{})
time.AfterFunc(launchGracePeriod, func() { close(expired) })
getMarshalledInfo := func() (data []byte, cancel bool) {
// potentially long call..
if podStatus, err := k.getPidInfo(podFullName); err == nil {
select {
case <-expired:
cancel = true
default:
k.lock.Lock()
defer k.lock.Unlock()
if _, found := k.tasks[taskId]; !found {
// don't bother with the pod status if the task is already gone
cancel = true
break
} else if podStatus.Phase != api.PodRunning {
// avoid sending back a running status before it's really running
break
}
log.V(2).Infof("Found pod status: '%v'", podStatus)
result := api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
Status: podStatus,
}
if data, err = json.Marshal(result); err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
}
}
}
return
}
waitForRunningPod:
for {
select {
case <-expired:
log.Warningf("Launch expired grace period of '%v'", launchGracePeriod)
break waitForRunningPod
case <-time.After(containerPollTime):
if data, cancel := getMarshalledInfo(); cancel {
break waitForRunningPod
} else if data == nil {
continue waitForRunningPod
} else {
k.lock.Lock()
defer k.lock.Unlock()
if _, found := k.tasks[taskId]; !found {
goto reportLost
}
statusUpdate := &mesos.TaskStatus{
TaskId: mutil.NewTaskID(taskId),
State: mesos.TaskState_TASK_RUNNING.Enum(),
Message: proto.String(fmt.Sprintf("pod-running:%s", podFullName)),
Data: data,
}
k.sendStatus(driver, statusUpdate)
// continue to monitor the health of the pod
go k.__launchTask(driver, taskId, podFullName)
return
}
}
}
k.lock.Lock()
defer k.lock.Unlock()
reportLost:
k.reportLostTask(driver, taskId, messages.LaunchTaskFailed)
}
func (k *KubernetesExecutor) __launchTask(driver bindings.ExecutorDriver, taskId, podFullName string) {
// TODO(nnielsen): Monitor health of pod and report if lost.
// Should we also allow this to fail a couple of times before reporting lost?
// What if the docker daemon is restarting and we can't connect, but it's
// going to bring the pods back online as soon as it restarts?
knownPod := func() bool {
_, err := k.getPidInfo(podFullName)
return err == nil
}
// Wait for the pod to go away and stop monitoring once it does
// TODO (jdefelice) replace with an /events watch?
for {
time.Sleep(containerPollTime)
if k.checkForLostPodTask(driver, taskId, knownPod) {
return
}
}
}
// Intended to be executed as part of the pod monitoring loop, this fn (ultimately) checks with Docker
// whether the pod is running. It will only return false if the task is still registered and the pod is
// registered in Docker. Otherwise it returns true. If there's still a task record on file, but no pod
// in Docker, then we'll also send a TASK_LOST event.
func (k *KubernetesExecutor) checkForLostPodTask(driver bindings.ExecutorDriver, taskId string, isKnownPod func() bool) bool {
// TODO (jdefelice) don't send false alarms for deleted pods (KILLED tasks)
k.lock.Lock()
defer k.lock.Unlock()
// TODO(jdef) we should really consider k.pods here, along with what docker is reporting, since the kubelet
// may constantly attempt to instantiate a pod as long as it's in the pod state that we're handing to it.
// otherwise, we're probably reporting a TASK_LOST prematurely. Should probably consult RestartPolicy to
// determine appropriate behavior. Should probably also gracefully handle docker daemon restarts.
if _, ok := k.tasks[taskId]; ok {
if isKnownPod() {
return false
} else {
log.Warningf("Detected lost pod, reporting lost task %v", taskId)
k.reportLostTask(driver, taskId, messages.ContainersDisappeared)
}
} else {
log.V(2).Infof("Task %v no longer registered, stop monitoring for lost pods", taskId)
}
return true
}
// KillTask is called when the executor receives a request to kill a task.
func (k *KubernetesExecutor) KillTask(driver bindings.ExecutorDriver, taskId *mesos.TaskID) {
if k.isDone() {
return
}
log.Infof("Kill task %v\n", taskId)
if !k.isConnected() {
//TODO(jdefelice) sent TASK_LOST here?
log.Warningf("Ignore kill task because the executor is disconnected\n")
return
}
k.lock.Lock()
defer k.lock.Unlock()
k.killPodForTask(driver, taskId.GetValue(), messages.TaskKilled)
}
// Kills the pod associated with the given task. Assumes that the caller is locking around
// pod and task storage.
func (k *KubernetesExecutor) killPodForTask(driver bindings.ExecutorDriver, tid, reason string) {
k.removePodTask(driver, tid, reason, mesos.TaskState_TASK_KILLED)
}
// Reports a lost task to the slave and updates internal task and pod tracking state.
// Assumes that the caller is locking around pod and task state.
func (k *KubernetesExecutor) reportLostTask(driver bindings.ExecutorDriver, tid, reason string) {
k.removePodTask(driver, tid, reason, mesos.TaskState_TASK_LOST)
}
// returns a chan that closes when the pod is no longer running in Docker
func (k *KubernetesExecutor) removePodTask(driver bindings.ExecutorDriver, tid, reason string, state mesos.TaskState) {
task, ok := k.tasks[tid]
if !ok {
log.V(1).Infof("Failed to remove task, unknown task %v\n", tid)
return
}
delete(k.tasks, tid)
pid := task.podName
if _, found := k.pods[pid]; !found {
log.Warningf("Cannot remove unknown pod %v for task %v", pid, tid)
} else {
log.V(2).Infof("deleting pod %v for task %v", pid, tid)
delete(k.pods, pid)
// Send the pod updates to the channel.
update := kubelet.PodUpdate{Op: kubelet.SET}
for _, p := range k.pods {
update.Pods = append(update.Pods, *p)
}
k.updateChan <- update
}
// TODO(jdef): ensure that the update propagates, perhaps return a signal chan?
k.sendStatus(driver, newStatus(mutil.NewTaskID(tid), state, reason))
}
// FrameworkMessage is called when the framework sends some message to the executor
func (k *KubernetesExecutor) FrameworkMessage(driver bindings.ExecutorDriver, message string) {
if k.isDone() {
return
}
if !k.isConnected() {
log.Warningf("Ignore framework message because the executor is disconnected\n")
return
}
log.Infof("Receives message from framework %v\n", message)
//TODO(jdef) master reported a lost task, reconcile this! @see scheduler.go:handleTaskLost
if strings.HasPrefix("task-lost:", message) && len(message) > 10 {
taskId := message[10:]
if taskId != "" {
// clean up pod state
k.reportLostTask(driver, taskId, messages.TaskLostAck)
}
}
}
// Shutdown is called when the executor receives a shutdown request.
func (k *KubernetesExecutor) Shutdown(driver bindings.ExecutorDriver) {
if k.isDone() {
return
}
close(k.done)
log.Infoln("Shutdown the executor")
defer func() {
for !k.swapState(k.getState(), doneState) {
}
}()
func() {
k.lock.Lock()
defer k.lock.Unlock()
k.tasks = map[string]*kuberTask{}
}()
// according to docs, mesos will generate TASK_LOST updates for us
// if needed, so don't take extra time to do that here.
// also, clear the pod configuration so that after we issue our Kill
// kubernetes doesn't start spinning things up before we exit.
k.updateChan <- kubelet.PodUpdate{Op: kubelet.SET}
KillKubeletContainers(k.dockerClient)
}
// Destroy existing k8s containers
func KillKubeletContainers(dockerClient dockertools.DockerInterface) {
if containers, err := dockertools.GetKubeletDockerContainers(dockerClient, true); err == nil {
opts := docker.RemoveContainerOptions{
RemoveVolumes: true,
Force: true,
}
for _, container := range containers {
opts.ID = container.ID
log.V(2).Infof("Removing container: %v", opts.ID)
if err := dockerClient.RemoveContainer(opts); err != nil {
log.Warning(err)
}
}
} else {
log.Warningf("Failed to list kubelet docker containers: %v", err)
}
}
// Error is called when some error happens.
func (k *KubernetesExecutor) Error(driver bindings.ExecutorDriver, message string) {
log.Errorln(message)
}
func newStatus(taskId *mesos.TaskID, state mesos.TaskState, message string) *mesos.TaskStatus {
return &mesos.TaskStatus{
TaskId: taskId,
State: &state,
Message: proto.String(message),
}
}
func (k *KubernetesExecutor) sendStatus(driver bindings.ExecutorDriver, status *mesos.TaskStatus) {
select {
case <-k.done:
default:
k.outgoing <- func() (mesos.Status, error) { return driver.SendStatusUpdate(status) }
}
}
func (k *KubernetesExecutor) sendFrameworkMessage(driver bindings.ExecutorDriver, msg string) {
select {
case <-k.done:
default:
k.outgoing <- func() (mesos.Status, error) { return driver.SendFrameworkMessage(msg) }
}
}
func (k *KubernetesExecutor) sendLoop() {
defer log.V(1).Info("sender loop exiting")
for {
select {
case <-k.done:
return
default:
if !k.isConnected() {
select {
case <-k.done:
case <-time.After(1 * time.Second):
}
continue
}
sender, ok := <-k.outgoing
if !ok {
// programming error
panic("someone closed the outgoing channel")
}
if status, err := sender(); err == nil {
continue
} else {
log.Error(err)
if status == mesos.Status_DRIVER_ABORTED {
return
}
}
// attempt to re-queue the sender
select {
case <-k.done:
case k.outgoing <- sender:
}
}
}
}
| swapState | identifier_name |
executor.go | package executor
import (
"encoding/json"
"fmt"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
"github.com/fsouza/go-dockerclient"
"github.com/gogo/protobuf/proto"
log "github.com/golang/glog"
bindings "github.com/mesos/mesos-go/executor"
mesos "github.com/mesos/mesos-go/mesosproto"
mutil "github.com/mesos/mesos-go/mesosutil"
"github.com/mesosphere/kubernetes-mesos/pkg/executor/messages"
"github.com/mesosphere/kubernetes-mesos/pkg/scheduler/meta"
"gopkg.in/v2/yaml"
)
const (
containerPollTime = 300 * time.Millisecond
launchGracePeriod = 5 * time.Minute
)
type stateType int32
const (
disconnectedState stateType = iota
connectedState
doneState
)
type kuberTask struct {
mesosTaskInfo *mesos.TaskInfo
podName string
}
// KubernetesExecutor is an mesos executor that runs pods
// in a minion machine.
type KubernetesExecutor struct {
kl *kubelet.Kubelet // the kubelet instance.
updateChan chan<- interface{}
state stateType
tasks map[string]*kuberTask
pods map[string]*api.BoundPod
lock sync.RWMutex
sourcename string
client *client.Client
events <-chan watch.Event
done chan struct{} // signals shutdown
outgoing chan func() (mesos.Status, error)
dockerClient dockertools.DockerInterface
}
func (k *KubernetesExecutor) getState() stateType {
return stateType(atomic.LoadInt32((*int32)(&k.state)))
}
func (k *KubernetesExecutor) isConnected() bool |
func (k *KubernetesExecutor) swapState(from, to stateType) bool {
return atomic.CompareAndSwapInt32((*int32)(&k.state), int32(from), int32(to))
}
// New creates a new kubernetes executor.
func New(kl *kubelet.Kubelet, ch chan<- interface{}, ns string, cl *client.Client, w watch.Interface, dc dockertools.DockerInterface) *KubernetesExecutor {
//TODO(jdef) do something real with these events..
events := w.ResultChan()
if events != nil {
go func() {
for e := range events {
// e ~= watch.Event { ADDED, *api.Event }
log.V(1).Info(e)
}
}()
}
k := &KubernetesExecutor{
kl: kl,
updateChan: ch,
state: disconnectedState,
tasks: make(map[string]*kuberTask),
pods: make(map[string]*api.BoundPod),
sourcename: ns,
client: cl,
events: events,
done: make(chan struct{}),
outgoing: make(chan func() (mesos.Status, error), 1024),
dockerClient: dc,
}
go k.sendLoop()
return k
}
func (k *KubernetesExecutor) isDone() bool {
select {
case <-k.done:
return true
default:
return false
}
}
// Registered is called when the executor is successfully registered with the slave.
func (k *KubernetesExecutor) Registered(driver bindings.ExecutorDriver,
executorInfo *mesos.ExecutorInfo, frameworkInfo *mesos.FrameworkInfo, slaveInfo *mesos.SlaveInfo) {
if k.isDone() {
return
}
log.Infof("Executor %v of framework %v registered with slave %v\n",
executorInfo, frameworkInfo, slaveInfo)
if !k.swapState(disconnectedState, connectedState) {
//programming error?
panic("already connected?!")
}
}
// Reregistered is called when the executor is successfully re-registered with the slave.
// This can happen when the slave fails over.
func (k *KubernetesExecutor) Reregistered(driver bindings.ExecutorDriver, slaveInfo *mesos.SlaveInfo) {
if k.isDone() {
return
}
log.Infof("Reregistered with slave %v\n", slaveInfo)
if !k.swapState(disconnectedState, connectedState) {
//programming error?
panic("already connected?!")
}
}
// Disconnected is called when the executor is disconnected with the slave.
func (k *KubernetesExecutor) Disconnected(driver bindings.ExecutorDriver) {
if k.isDone() {
return
}
log.Infof("Slave is disconnected\n")
if !k.swapState(connectedState, disconnectedState) {
//programming error?
panic("already disconnected?!")
}
}
// LaunchTask is called when the executor receives a request to launch a task.
func (k *KubernetesExecutor) LaunchTask(driver bindings.ExecutorDriver, taskInfo *mesos.TaskInfo) {
if k.isDone() {
return
}
log.Infof("Launch task %v\n", taskInfo)
if !k.isConnected() {
log.Warningf("Ignore launch task because the executor is disconnected\n")
k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED,
messages.ExecutorUnregistered))
return
}
var pod api.BoundPod
if err := yaml.Unmarshal(taskInfo.GetData(), &pod); err != nil {
log.Warningf("Failed to extract yaml data from the taskInfo.data %v\n", err)
k.sendStatus(driver, newStatus(taskInfo.GetTaskId(), mesos.TaskState_TASK_FAILED,
messages.UnmarshalTaskDataFailure))
return
}
k.lock.Lock()
defer k.lock.Unlock()
taskId := taskInfo.GetTaskId().GetValue()
if _, found := k.tasks[taskId]; found {
log.Warningf("task already launched\n")
// Not to send back TASK_RUNNING here, because
// may be duplicated messages or duplicated task id.
return
}
// remember this task so that:
// (a) we ignore future launches for it
// (b) we have a record of it so that we can kill it if needed
// (c) we're leaving podName == "" for now, indicates we don't need to delete containers
k.tasks[taskId] = &kuberTask{
mesosTaskInfo: taskInfo,
}
go k.launchTask(driver, taskId, &pod)
}
func (k *KubernetesExecutor) getPidInfo(name string) (api.PodStatus, error) {
return k.kl.GetPodStatus(name, "")
}
// async continuation of LaunchTask
func (k *KubernetesExecutor) launchTask(driver bindings.ExecutorDriver, taskId string, pod *api.BoundPod) {
//HACK(jdef): cloned binding construction from k8s plugin/pkg/scheduler/scheduler.go
binding := &api.Binding{
ObjectMeta: api.ObjectMeta{
Namespace: pod.Namespace,
Annotations: make(map[string]string),
},
PodID: pod.Name,
Host: pod.Annotations[meta.BindingHostKey],
}
// forward the bindings that the scheduler wants to apply
for k, v := range pod.Annotations {
binding.Annotations[k] = v
}
log.Infof("Binding '%v' to '%v' with annotations %+v...", binding.PodID, binding.Host, binding.Annotations)
ctx := api.WithNamespace(api.NewDefaultContext(), binding.Namespace)
err := k.client.Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error()
if err != nil {
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED,
messages.CreateBindingFailure))
return
}
podFullName := kubelet.GetPodFullName(&api.BoundPod{
ObjectMeta: api.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
Annotations: map[string]string{kubelet.ConfigSourceAnnotationKey: k.sourcename},
},
})
// allow a recently failed-over scheduler the chance to recover the task/pod binding:
// it may have failed and recovered before the apiserver is able to report the updated
// binding information. replays of this status event will signal to the scheduler that
// the apiserver should be up-to-date.
data, err := json.Marshal(api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
})
if err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
k.sendStatus(driver, newStatus(mutil.NewTaskID(taskId), mesos.TaskState_TASK_FAILED, err.Error()))
return
}
k.lock.Lock()
defer k.lock.Unlock()
// Add the task.
task, found := k.tasks[taskId]
if !found {
log.V(1).Infof("task %v no longer on record, probably killed, aborting launch sequence - reporting lost", taskId)
k.reportLostTask(driver, taskId, messages.LaunchTaskFailed)
return
}
// from here on, we need to delete containers associated with the task
// upon it going into a terminal state
task.podName = podFullName
k.pods[podFullName] = pod
// Send the pod updates to the channel.
update := kubelet.PodUpdate{Op: kubelet.SET}
for _, p := range k.pods {
update.Pods = append(update.Pods, *p)
}
k.updateChan <- update
statusUpdate := &mesos.TaskStatus{
TaskId: mutil.NewTaskID(taskId),
State: mesos.TaskState_TASK_STARTING.Enum(),
Message: proto.String(messages.CreateBindingSuccess),
Data: data,
}
k.sendStatus(driver, statusUpdate)
// Delay reporting 'task running' until container is up.
go k._launchTask(driver, taskId, podFullName)
}
func (k *KubernetesExecutor) _launchTask(driver bindings.ExecutorDriver, taskId, podFullName string) {
expired := make(chan struct{})
time.AfterFunc(launchGracePeriod, func() { close(expired) })
getMarshalledInfo := func() (data []byte, cancel bool) {
// potentially long call..
if podStatus, err := k.getPidInfo(podFullName); err == nil {
select {
case <-expired:
cancel = true
default:
k.lock.Lock()
defer k.lock.Unlock()
if _, found := k.tasks[taskId]; !found {
// don't bother with the pod status if the task is already gone
cancel = true
break
} else if podStatus.Phase != api.PodRunning {
// avoid sending back a running status before it's really running
break
}
log.V(2).Infof("Found pod status: '%v'", podStatus)
result := api.PodStatusResult{
ObjectMeta: api.ObjectMeta{
Name: podFullName,
SelfLink: "/podstatusresult",
},
Status: podStatus,
}
if data, err = json.Marshal(result); err != nil {
log.Errorf("failed to marshal pod status result: %v", err)
}
}
}
return
}
waitForRunningPod:
for {
select {
case <-expired:
log.Warningf("Launch expired grace period of '%v'", launchGracePeriod)
break waitForRunningPod
case <-time.After(containerPollTime):
if data, cancel := getMarshalledInfo(); cancel {
break waitForRunningPod
} else if data == nil {
continue waitForRunningPod
} else {
k.lock.Lock()
defer k.lock.Unlock()
if _, found := k.tasks[taskId]; !found {
goto reportLost
}
statusUpdate := &mesos.TaskStatus{
TaskId: mutil.NewTaskID(taskId),
State: mesos.TaskState_TASK_RUNNING.Enum(),
Message: proto.String(fmt.Sprintf("pod-running:%s", podFullName)),
Data: data,
}
k.sendStatus(driver, statusUpdate)
// continue to monitor the health of the pod
go k.__launchTask(driver, taskId, podFullName)
return
}
}
}
k.lock.Lock()
defer k.lock.Unlock()
reportLost:
k.reportLostTask(driver, taskId, messages.LaunchTaskFailed)
}
func (k *KubernetesExecutor) __launchTask(driver bindings.ExecutorDriver, taskId, podFullName string) {
// TODO(nnielsen): Monitor health of pod and report if lost.
// Should we also allow this to fail a couple of times before reporting lost?
// What if the docker daemon is restarting and we can't connect, but it's
// going to bring the pods back online as soon as it restarts?
knownPod := func() bool {
_, err := k.getPidInfo(podFullName)
return err == nil
}
// Wait for the pod to go away and stop monitoring once it does
// TODO (jdefelice) replace with an /events watch?
for {
time.Sleep(containerPollTime)
if k.checkForLostPodTask(driver, taskId, knownPod) {
return
}
}
}
// Intended to be executed as part of the pod monitoring loop, this fn (ultimately) checks with Docker
// whether the pod is running. It will only return false if the task is still registered and the pod is
// registered in Docker. Otherwise it returns true. If there's still a task record on file, but no pod
// in Docker, then we'll also send a TASK_LOST event.
func (k *KubernetesExecutor) checkForLostPodTask(driver bindings.ExecutorDriver, taskId string, isKnownPod func() bool) bool {
// TODO (jdefelice) don't send false alarms for deleted pods (KILLED tasks)
k.lock.Lock()
defer k.lock.Unlock()
// TODO(jdef) we should really consider k.pods here, along with what docker is reporting, since the kubelet
// may constantly attempt to instantiate a pod as long as it's in the pod state that we're handing to it.
// otherwise, we're probably reporting a TASK_LOST prematurely. Should probably consult RestartPolicy to
// determine appropriate behavior. Should probably also gracefully handle docker daemon restarts.
if _, ok := k.tasks[taskId]; ok {
if isKnownPod() {
return false
} else {
log.Warningf("Detected lost pod, reporting lost task %v", taskId)
k.reportLostTask(driver, taskId, messages.ContainersDisappeared)
}
} else {
log.V(2).Infof("Task %v no longer registered, stop monitoring for lost pods", taskId)
}
return true
}
// KillTask is called when the executor receives a request to kill a task.
func (k *KubernetesExecutor) KillTask(driver bindings.ExecutorDriver, taskId *mesos.TaskID) {
if k.isDone() {
return
}
log.Infof("Kill task %v\n", taskId)
if !k.isConnected() {
//TODO(jdefelice) sent TASK_LOST here?
log.Warningf("Ignore kill task because the executor is disconnected\n")
return
}
k.lock.Lock()
defer k.lock.Unlock()
k.killPodForTask(driver, taskId.GetValue(), messages.TaskKilled)
}
// Kills the pod associated with the given task. Assumes that the caller is locking around
// pod and task storage.
func (k *KubernetesExecutor) killPodForTask(driver bindings.ExecutorDriver, tid, reason string) {
k.removePodTask(driver, tid, reason, mesos.TaskState_TASK_KILLED)
}
// Reports a lost task to the slave and updates internal task and pod tracking state.
// Assumes that the caller is locking around pod and task state.
func (k *KubernetesExecutor) reportLostTask(driver bindings.ExecutorDriver, tid, reason string) {
k.removePodTask(driver, tid, reason, mesos.TaskState_TASK_LOST)
}
// returns a chan that closes when the pod is no longer running in Docker
func (k *KubernetesExecutor) removePodTask(driver bindings.ExecutorDriver, tid, reason string, state mesos.TaskState) {
task, ok := k.tasks[tid]
if !ok {
log.V(1).Infof("Failed to remove task, unknown task %v\n", tid)
return
}
delete(k.tasks, tid)
pid := task.podName
if _, found := k.pods[pid]; !found {
log.Warningf("Cannot remove unknown pod %v for task %v", pid, tid)
} else {
log.V(2).Infof("deleting pod %v for task %v", pid, tid)
delete(k.pods, pid)
// Send the pod updates to the channel.
update := kubelet.PodUpdate{Op: kubelet.SET}
for _, p := range k.pods {
update.Pods = append(update.Pods, *p)
}
k.updateChan <- update
}
// TODO(jdef): ensure that the update propagates, perhaps return a signal chan?
k.sendStatus(driver, newStatus(mutil.NewTaskID(tid), state, reason))
}
// FrameworkMessage is called when the framework sends some message to the executor
func (k *KubernetesExecutor) FrameworkMessage(driver bindings.ExecutorDriver, message string) {
if k.isDone() {
return
}
if !k.isConnected() {
log.Warningf("Ignore framework message because the executor is disconnected\n")
return
}
log.Infof("Receives message from framework %v\n", message)
//TODO(jdef) master reported a lost task, reconcile this! @see scheduler.go:handleTaskLost
if strings.HasPrefix("task-lost:", message) && len(message) > 10 {
taskId := message[10:]
if taskId != "" {
// clean up pod state
k.reportLostTask(driver, taskId, messages.TaskLostAck)
}
}
}
// Shutdown is called when the executor receives a shutdown request.
func (k *KubernetesExecutor) Shutdown(driver bindings.ExecutorDriver) {
if k.isDone() {
return
}
close(k.done)
log.Infoln("Shutdown the executor")
defer func() {
for !k.swapState(k.getState(), doneState) {
}
}()
func() {
k.lock.Lock()
defer k.lock.Unlock()
k.tasks = map[string]*kuberTask{}
}()
// according to docs, mesos will generate TASK_LOST updates for us
// if needed, so don't take extra time to do that here.
// also, clear the pod configuration so that after we issue our Kill
// kubernetes doesn't start spinning things up before we exit.
k.updateChan <- kubelet.PodUpdate{Op: kubelet.SET}
KillKubeletContainers(k.dockerClient)
}
// Destroy existing k8s containers
func KillKubeletContainers(dockerClient dockertools.DockerInterface) {
if containers, err := dockertools.GetKubeletDockerContainers(dockerClient, true); err == nil {
opts := docker.RemoveContainerOptions{
RemoveVolumes: true,
Force: true,
}
for _, container := range containers {
opts.ID = container.ID
log.V(2).Infof("Removing container: %v", opts.ID)
if err := dockerClient.RemoveContainer(opts); err != nil {
log.Warning(err)
}
}
} else {
log.Warningf("Failed to list kubelet docker containers: %v", err)
}
}
// Error is called when some error happens.
func (k *KubernetesExecutor) Error(driver bindings.ExecutorDriver, message string) {
log.Errorln(message)
}
func newStatus(taskId *mesos.TaskID, state mesos.TaskState, message string) *mesos.TaskStatus {
return &mesos.TaskStatus{
TaskId: taskId,
State: &state,
Message: proto.String(message),
}
}
func (k *KubernetesExecutor) sendStatus(driver bindings.ExecutorDriver, status *mesos.TaskStatus) {
select {
case <-k.done:
default:
k.outgoing <- func() (mesos.Status, error) { return driver.SendStatusUpdate(status) }
}
}
func (k *KubernetesExecutor) sendFrameworkMessage(driver bindings.ExecutorDriver, msg string) {
select {
case <-k.done:
default:
k.outgoing <- func() (mesos.Status, error) { return driver.SendFrameworkMessage(msg) }
}
}
func (k *KubernetesExecutor) sendLoop() {
defer log.V(1).Info("sender loop exiting")
for {
select {
case <-k.done:
return
default:
if !k.isConnected() {
select {
case <-k.done:
case <-time.After(1 * time.Second):
}
continue
}
sender, ok := <-k.outgoing
if !ok {
// programming error
panic("someone closed the outgoing channel")
}
if status, err := sender(); err == nil {
continue
} else {
log.Error(err)
if status == mesos.Status_DRIVER_ABORTED {
return
}
}
// attempt to re-queue the sender
select {
case <-k.done:
case k.outgoing <- sender:
}
}
}
}
| {
return connectedState == k.getState()
} | identifier_body |
IoManager.py | import ast
import os
from os import listdir
from os.path import isfile, join
import requests
import shutil
import pandas as pd
from pathlib import Path
import time
import numpy as np
class IoManager:
"""
Handles every input/output of the program
Currently:
* loads cube list and cards base data of startup
* checks/downloads missing card images (en/fr) on startup
"""
CUBE_LIST_FILE_PATH = "data/cube_list.txt"
CARD_INFOS_FILE_PATH = "data/cube_list_base_data.csv"
CARD_RATINGS_FILE_PATH = "data/archetype_ratings.csv"
ARCH_PRESENCE_PATH = "data/archetype_presence.csv"
CARD_IMAGES_PATH = "data/imgs"
CARD_IMAGES_PATH_EN = CARD_IMAGES_PATH + "/en"
CARD_IMAGES_PATH_FR = CARD_IMAGES_PATH + "/fr"
SPRITE_DIR_PATH = "data/imgs/sprites/"
BASIC_LANDS = ["Plains", "Island", "Swamp", "Mountain", "Forest"]
BASIC_LANDS_URLS = {
"Plains": "https://img.scryfall.com/cards/large/front/a/9/a9891b7b-fc52-470c-9f74-292ae665f378.jpg?1581719749",
"Island": "https://img.scryfall.com/cards/large/front/a/c/acf7b664-3e75-4018-81f6-2a14ab59f258.jpg?1582126055",
"Swamp": "https://img.scryfall.com/cards/large/front/0/2/02cb5cfd-018e-4c5e-bef1-166262aa5f1d.jpg?1582126067",
"Mountain": "https://img.scryfall.com/cards/large/front/5/3/53fb7b99-9e47-46a6-9c8a-88e28b5197f1.jpg?1582126072",
"Forest": "https://img.scryfall.com/cards/large/front/3/2/32af9f41-89e2-4e7a-9fec-fffe79cae077.jpg?1582126077"
}
base_infos_data = None
def __init__(self, archetypes, download_missing_images=True):
self.archetypes = archetypes
self.cube_list = IoManager.get_cube_list()
self.non_basics = self.get_nonbasic_lands_list()
self.base_infos_df = IoManager.get_cards_base_info()
IoManager.base_infos_data = self.base_infos_df
self.ratings = self.get_ratings()
if download_missing_images:
self.download_missing_images()
if not IoManager.arch_presence_exists():
self.init_arch_presence()
@staticmethod
def get_cube_list():
"""
:return: list of card names, cube list
"""
f = open(IoManager.CUBE_LIST_FILE_PATH, "r")
lines = f.readlines()
f.close()
# removing '\n' at then end of each name
lines = [card_name[:-1] for card_name in lines]
return lines
@staticmethod
def get_cards_base_info():
"""
:return: DataFrame containing data for each card, such as power, toughness, urls...
"""
df = pd.read_csv(IoManager.CARD_INFOS_FILE_PATH)
for list_feature in ["colors", "color_identity"]:
df[list_feature] = df[list_feature].apply(lambda e: e if type(e) != float else "[]")
df[list_feature] = df[list_feature].apply(ast.literal_eval)
return df
def get_ratings(self):
"""
Loads, scales, add sum and returns the card's ratings per archetype
:return: DataFrame containing each archetype rating for each card
"""
df = pd.read_csv(IoManager.CARD_RATINGS_FILE_PATH)
df = IoManager.scale_ratings(df)
df = IoManager.normalize_ratings_per_archetype(df)
df = self.add_ratings_sum(df)
# print(df[["name", "monogreen", "simic_ramp", "general"]].tail(60))
# print(df[["name", "general"]].sort_values(ascending=False, by="general").head(50))
return df
@staticmethod
def scale_ratings(ratings):
"""
The gap between a 3 and a 4 is wider than 1/3 better
:param ratings: df
:return: updated df
"""
mapping = {
2: 3,
3: 8,
4: 30,
5: 60
}
ratings = ratings.applymap(lambda e: mapping[e] if e in mapping else e)
return ratings
@staticmethod
def normalize_ratings_per_archetype(ratings):
"""
Divides each rating by a value proportional to the sum of all the ratings in the archetype
:param ratings:
:return:
"""
archetype_cols = [c for c in ratings.columns if c != "name"]
n_cards = len(ratings["monored"])
for arch_col in archetype_cols:
ratings[arch_col] = ratings[arch_col] / (ratings[arch_col].sum() / n_cards)
return ratings
def add_ratings_sum(self, ratings):
"""
Adds a column to the ratings DataFrame: 'general'
:return: the updated DataFrame
"""
rate_columns = [c for c in ratings.columns if c != "name"]
coef_per_archetype = self.archetypes.get_color_coeffs()
ratings["general"] = ratings[rate_columns].apply(lambda row: sum([row[c]*coef_per_archetype[c] for c in rate_columns]), axis=1)
ratings["general_raw"] = ratings[rate_columns].sum(axis=1)
return ratings
def get_nonbasic_lands_list(self):
"""
:return: a list of land names
"""
return self.cube_list[320:]
@staticmethod
def get_downloaded_images(lang="en"):
"""
Returns which cards we have downloaded images for (of the specified language, en or fr)
:return: list of card names
"""
path = IoManager.CARD_IMAGES_PATH_EN if lang == "en" else IoManager.CARD_IMAGES_PATH_FR
Path(path).mkdir(parents=True, exist_ok=True)
file_names_en = [f for f in listdir(path) if
isfile(join(path, f))]
card_names = [f[:-4] for f in file_names_en]
card_names = [c.replace("__", "//") for c in card_names] # to handle double-faced cards
return card_names
def get_missing_images(self):
"""
Returns which cards we miss images for
:return: tuple: list of en missing, list of fr missing
"""
downloaded_images_en = IoManager.get_downloaded_images(lang="en")
downloaded_images_fr = IoManager.get_downloaded_images(lang="fr")
complete_list = self.cube_list + IoManager.BASIC_LANDS
missing_images_en = [card for card in complete_list if card not in downloaded_images_en]
missing_images_fr = [card for card in complete_list if card not in downloaded_images_fr]
return missing_images_en, missing_images_fr
def download_card_images(self, card_names, lang="en"):
"""
Downloads the en and fr card image of each card
:param lang: en or fr
:param card_names: list of card names
:return:
"""
for card_name in card_names:
print("Dowloading card imgs for \'" + card_name + "\' (" + lang + ")")
output_file_name = card_name + ".jpg"
output_file_path = IoManager.CARD_IMAGES_PATH_EN + "/" + output_file_name if lang == "en" else IoManager.CARD_IMAGES_PATH_FR + "/" + output_file_name
output_file_path = output_file_path.replace('//', '__')
en_url, fr_url = self.get_card_urls(card_name)
url = en_url if lang == "en" else fr_url
# Open the url image, set stream to True, this will return the stream content.
resp = requests.get(url, stream=True)
# Open a local file with wb ( write binary ) permission.
local_file = open(output_file_path, 'wb')
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
resp.raw.decode_content = True
# Copy the response stream raw data to local image file.
shutil.copyfileobj(resp.raw, local_file)
# Remove the image url response object.
del resp
def get_card_urls(self, card_name):
"""
:param card_name:
:return: tuple: en url, fr url
"""
if card_name in IoManager.BASIC_LANDS:
return IoManager.BASIC_LANDS_URLS[card_name], IoManager.BASIC_LANDS_URLS[card_name]
urls_df = self.base_infos_df[["name", "img_en_large", "img_fr_large"]]
card_row = urls_df[urls_df["name"] == card_name]
print(card_name)
en_url = card_row["img_en_large"].iloc[0]
fr_url = card_row["img_fr_large"].iloc[0]
return en_url, fr_url
def download_missing_images(self, only_english: bool = True):
"""
Checks for missing images, and downloads them if any are found
:return:
"""
print("\nChecking for missing images")
missing_images_en, missing_images_fr = self.get_missing_images()
for card_names, lang in [(missing_images_en, "en"), (missing_images_fr, "fr")]:
if card_names and (not only_english or lang == "en"):
self.download_card_images(card_names, lang)
@staticmethod
def get_img_path(card_name, lang="en"):
"""
:param card_name:
:param lang: en or fr
:return: path to the img
"""
imgs_folder = IoManager.CARD_IMAGES_PATH_EN if lang == "en" else IoManager.CARD_IMAGES_PATH_FR
return imgs_folder + "/" + card_name + ".jpg"
@staticmethod
def get_sprite_path(sprite_name):
return IoManager.SPRITE_DIR_PATH + sprite_name
@staticmethod
def arch_presence_exists():
return os.path.isfile(IoManager.ARCH_PRESENCE_PATH)
def init_arch_presence(self):
|
@staticmethod
def get_arch_presence():
return pd.read_csv(IoManager.ARCH_PRESENCE_PATH, index_col=[0])
def save_arch_presence(self, arch_presence_entries):
"""
Adds the new entries to the db
:param arch_presence_entries: [[1, 0, 0, 1, 1, 0], [1, 0, 1...]...] 1 for archetype was present at the draft
:return: new_df
"""
df = IoManager.get_arch_presence()
print(len(arch_presence_entries[0]))
df2 = pd.DataFrame(data=arch_presence_entries, columns=self.archetypes.get_archetype_names(as_feature_names=True))
new_df = pd.concat([df, df2], sort=False)
new_df = pd.concat([df, df2], sort=False)
new_df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return new_df
class DataFetcher:
"""
Used to get base card data from cube_list.txt using Scryfall api
"""
@staticmethod
def update_base_data():
cube_list_file_name = "cube_list.txt"
output_csv_name = "cube_list_base_data.csv"
#DataFetcher.clean_double_faced_from_cube_list(cube_list_file_name=cube_list_file_name)
new_base_data = DataFetcher.fetch_clean_save(cube_list_file_name=cube_list_file_name, output_csv_name=output_csv_name)
if new_base_data is not None:
print(new_base_data.tail(10))
@staticmethod
# Returns the infos for a card in english, with its fr img urls too
def get_card_data(card_name):
card_name = "\"" + card_name + "\""
time.sleep(0.1)
en_data = requests.get("https://api.scryfall.com/cards/search?q=!" + card_name).json()
time.sleep(0.1)
fr_data = requests.get("https://api.scryfall.com/cards/search?q=!" + card_name + "+lang%3Afr").json()
en_output = en_data["data"][0]
fr_output = fr_data["data"][0] if "data" in fr_data else None # fr version my not exist
if fr_output is None:
print("French missing for " + card_name)
# handling double-faced cards
if "card_faces" in en_output and en_output["layout"] == "modal_dfc":
full_name = en_output["name"]
en_output = {**en_output, **en_output["card_faces"][0]}
en_output["name"] = full_name
if fr_output is not None:
fr_output = fr_output["card_faces"][0]
return en_output, fr_output
@staticmethod
# Returns a Dataframe containing the relevant fields of the cards in the list
def get_cards_data(card_names):
relevant_fields_en = [
"name",
"highres_image",
"image_uris",
"mana_cost",
"cmc",
"type_line",
"power",
"toughness",
"colors",
"color_identity"
]
relevant_fields_fr = [
"image_uris"
]
raw_data = [DataFetcher.get_card_data(card_name) for card_name in card_names]
df_content = {}
for field in relevant_fields_en:
df_content[field] = [data[0][field] if field in data[0] else np.nan for data in raw_data]
for field in relevant_fields_fr:
df_content[field + "_fr"] = [data[1][field] if data[1] is not None and field in data[1] else np.nan for data in
raw_data]
df = pd.DataFrame(df_content)
return df
@staticmethod
def clean_double_faced_from_cube_list(cube_list_file_name):
# removes the second face name for each double faced card
f = open("data/" + cube_list_file_name, "r")
lines = f.readlines()
f.close()
def rm_second_face(line):
if "//" in line:
return line.split(" //")[0] + "\n"
return line
lines = [rm_second_face(l) for l in lines]
f = open("data/" + cube_list_file_name, "w")
f.write("".join(lines))
f.close()
@staticmethod
def get_cube_list(cube_list_file_name):
f = open("data/" + cube_list_file_name, "r")
lines = f.readlines()
f.close()
# removing '\n' at then end of each name
lines = [card_name[:-1] for card_name in lines]
return lines
@staticmethod
def infer_new_cards(cube_list, output_csv_name):
prev_ratings = pd.read_csv("data/" + output_csv_name)
new_cards = [c for c in cube_list if c not in prev_ratings.name.to_list()]
print("There are {} new cards: \n{}".format(len(new_cards), new_cards))
return new_cards
@staticmethod
# gets the cube list, fetches the data for each card, and saves the data as a csv
def fetch_cube_data(cube_list_file_name, output_csv_name):
cube_list = DataFetcher.get_cube_list(cube_list_file_name)
new_cards = DataFetcher.infer_new_cards(cube_list, output_csv_name=output_csv_name)
if not new_cards:
return pd.DataFrame()
cube_data = DataFetcher.get_cards_data(new_cards)
return cube_data
@staticmethod
# creates seperate features to store each img url
def clean_image_urls(cube_data):
for lang in ["en", "fr"]:
for image_type in ["small", "normal", "large", "png"]:
feature_name = "img_" + lang + "_" + image_type
current_feature = "image_uris" if lang == "en" else "image_uris_fr"
cube_data[feature_name] = cube_data[current_feature].apply(
lambda d: d[image_type] if type(d) != float and d != None and image_type in d else np.nan)
@staticmethod
def clean_colors(cube_data):
colors = ["W", "U", "B", "R", "G"]
color_pairs = ["WU", "WB", "WR", "WG", "UB", "UR", "UG", "BR", "BR", "RG"]
for color in colors:
cube_data[color] = cube_data["color_identity"].apply(lambda l: 1 if color in l else 0)
for c, c2 in color_pairs:
cube_data[c + c2] = cube_data["color_identity"].apply(lambda l: 1 if c in l and c2 in l else 0)
@staticmethod
def clean_type_line(cube_data):
cube_data["type_line"] = cube_data["type_line"].str.replace(' —', ':')
@staticmethod
def clean_cmc(cube_data):
cube_data["cmc"] = cube_data["cmc"].astype(int)
@staticmethod
def remove_old_columns(cube_data):
old_columns = ["image_uris", "image_uris_fr"]
valid_columns = [c for c in cube_data.columns if c not in old_columns]
return cube_data[valid_columns]
@staticmethod
def clean_booleans(cube_data):
cube_data["highres_image"] = cube_data["highres_image"].astype(int)
@staticmethod
def clean_cube_data(cube_data):
DataFetcher.clean_image_urls(cube_data)
DataFetcher.clean_colors(cube_data)
DataFetcher.clean_type_line(cube_data)
DataFetcher.clean_cmc(cube_data)
DataFetcher.clean_booleans(cube_data)
return DataFetcher.remove_old_columns(cube_data)
@staticmethod
def save_csv(cube_data, output_csv_name, cube_list_file_name):
current_data = pd.read_csv("data/" + output_csv_name)
new_cards = DataFetcher.infer_new_cards(cube_list=DataFetcher.get_cube_list(cube_list_file_name=cube_list_file_name), output_csv_name=output_csv_name)
new_rows = cube_data[cube_data.name.isin(new_cards)]
new_cube_data = current_data.append(new_rows).reset_index(drop=True)
new_cube_data.to_csv("data/" + output_csv_name, index=False)
return new_cube_data
@staticmethod
# does it all
def fetch_clean_save(cube_list_file_name, output_csv_name):
cube_data = DataFetcher.fetch_cube_data(cube_list_file_name=cube_list_file_name, output_csv_name=output_csv_name)
if len(cube_data.index) == 0:
return None
cube_data_clean = DataFetcher.clean_cube_data(cube_data)
return DataFetcher.save_csv(cube_data=cube_data, output_csv_name=output_csv_name, cube_list_file_name=cube_list_file_name)
class RatingsInitializer:
@staticmethod
def prepare_new_ratings(archetypes):
new_ratings = RatingsInitializer.setup_for_new_cards(RatingsInitializer.load_cards_df(), archetypes)
if new_ratings is not None:
RatingsInitializer.save_csv(new_ratings)
exit()
return new_ratings
@staticmethod
def load_cards_df():
df = pd.read_csv("data/cube_list_base_data.csv")
for list_feature in ["colors", "color_identity"]:
df[list_feature] = df[list_feature].apply(lambda e: e if type(e) != float else "[]")
df[list_feature] = df[list_feature].apply(ast.literal_eval)
return df
@staticmethod
def setup_for_new_cards(df, archetypes):
new_df = pd.DataFrame({"name": df["name"]})
for arch in archetypes.list:
feature_name = arch.name.lower().replace(' ', '_')
new_df[feature_name] = df["color_identity"].apply(lambda l: 9 if arch.is_available(l) else 0)
# merging with existing ratings
current_ratings = pd.read_csv("data/archetype_ratings.csv")
new_cards = [c for c in df.name.to_list() if c not in current_ratings.name.to_list()]
if new_cards:
print("Preparing ratings for new cards: {}".format(new_cards))
print("Make sure to manually replace 9 values by 0-4 values")
new_rows = new_df[new_df.name.isin(new_cards)]
new_ratings = current_ratings.append(new_rows)
return new_ratings if new_cards else None
@staticmethod
def save_csv(new_df):
new_df.to_csv("data/archetype_ratings.csv", index=False)
| print("Initialising arch presence")
df = pd.DataFrame(columns=self.archetypes.get_archetype_names(as_feature_names=True))
df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return df | identifier_body |
IoManager.py | import ast
import os
from os import listdir
from os.path import isfile, join
import requests
import shutil
import pandas as pd
from pathlib import Path
import time
import numpy as np
class IoManager:
"""
Handles every input/output of the program
Currently:
* loads cube list and cards base data of startup
* checks/downloads missing card images (en/fr) on startup
"""
CUBE_LIST_FILE_PATH = "data/cube_list.txt"
CARD_INFOS_FILE_PATH = "data/cube_list_base_data.csv"
CARD_RATINGS_FILE_PATH = "data/archetype_ratings.csv"
ARCH_PRESENCE_PATH = "data/archetype_presence.csv"
CARD_IMAGES_PATH = "data/imgs"
CARD_IMAGES_PATH_EN = CARD_IMAGES_PATH + "/en"
CARD_IMAGES_PATH_FR = CARD_IMAGES_PATH + "/fr"
SPRITE_DIR_PATH = "data/imgs/sprites/"
BASIC_LANDS = ["Plains", "Island", "Swamp", "Mountain", "Forest"]
BASIC_LANDS_URLS = {
"Plains": "https://img.scryfall.com/cards/large/front/a/9/a9891b7b-fc52-470c-9f74-292ae665f378.jpg?1581719749",
"Island": "https://img.scryfall.com/cards/large/front/a/c/acf7b664-3e75-4018-81f6-2a14ab59f258.jpg?1582126055",
"Swamp": "https://img.scryfall.com/cards/large/front/0/2/02cb5cfd-018e-4c5e-bef1-166262aa5f1d.jpg?1582126067",
"Mountain": "https://img.scryfall.com/cards/large/front/5/3/53fb7b99-9e47-46a6-9c8a-88e28b5197f1.jpg?1582126072",
"Forest": "https://img.scryfall.com/cards/large/front/3/2/32af9f41-89e2-4e7a-9fec-fffe79cae077.jpg?1582126077"
}
base_infos_data = None
def __init__(self, archetypes, download_missing_images=True):
self.archetypes = archetypes
self.cube_list = IoManager.get_cube_list()
self.non_basics = self.get_nonbasic_lands_list()
self.base_infos_df = IoManager.get_cards_base_info()
IoManager.base_infos_data = self.base_infos_df
self.ratings = self.get_ratings()
if download_missing_images:
self.download_missing_images()
if not IoManager.arch_presence_exists():
self.init_arch_presence()
@staticmethod
def get_cube_list():
"""
:return: list of card names, cube list
"""
f = open(IoManager.CUBE_LIST_FILE_PATH, "r")
lines = f.readlines()
f.close()
# removing '\n' at then end of each name
lines = [card_name[:-1] for card_name in lines]
return lines
@staticmethod
def get_cards_base_info():
"""
:return: DataFrame containing data for each card, such as power, toughness, urls...
"""
df = pd.read_csv(IoManager.CARD_INFOS_FILE_PATH)
for list_feature in ["colors", "color_identity"]:
df[list_feature] = df[list_feature].apply(lambda e: e if type(e) != float else "[]")
df[list_feature] = df[list_feature].apply(ast.literal_eval)
return df
def get_ratings(self):
"""
Loads, scales, add sum and returns the card's ratings per archetype
:return: DataFrame containing each archetype rating for each card
"""
df = pd.read_csv(IoManager.CARD_RATINGS_FILE_PATH)
df = IoManager.scale_ratings(df)
df = IoManager.normalize_ratings_per_archetype(df)
df = self.add_ratings_sum(df)
# print(df[["name", "monogreen", "simic_ramp", "general"]].tail(60))
# print(df[["name", "general"]].sort_values(ascending=False, by="general").head(50))
return df
@staticmethod
def scale_ratings(ratings):
"""
The gap between a 3 and a 4 is wider than 1/3 better
:param ratings: df
:return: updated df
"""
mapping = {
2: 3,
3: 8,
4: 30,
5: 60
}
ratings = ratings.applymap(lambda e: mapping[e] if e in mapping else e)
return ratings
@staticmethod
def normalize_ratings_per_archetype(ratings):
"""
Divides each rating by a value proportional to the sum of all the ratings in the archetype
:param ratings:
:return:
"""
archetype_cols = [c for c in ratings.columns if c != "name"]
n_cards = len(ratings["monored"])
for arch_col in archetype_cols:
ratings[arch_col] = ratings[arch_col] / (ratings[arch_col].sum() / n_cards)
return ratings
def add_ratings_sum(self, ratings):
"""
Adds a column to the ratings DataFrame: 'general'
:return: the updated DataFrame
"""
rate_columns = [c for c in ratings.columns if c != "name"]
coef_per_archetype = self.archetypes.get_color_coeffs()
ratings["general"] = ratings[rate_columns].apply(lambda row: sum([row[c]*coef_per_archetype[c] for c in rate_columns]), axis=1)
ratings["general_raw"] = ratings[rate_columns].sum(axis=1)
return ratings
def get_nonbasic_lands_list(self):
"""
:return: a list of land names
"""
return self.cube_list[320:]
@staticmethod
def get_downloaded_images(lang="en"):
"""
Returns which cards we have downloaded images for (of the specified language, en or fr)
:return: list of card names
"""
path = IoManager.CARD_IMAGES_PATH_EN if lang == "en" else IoManager.CARD_IMAGES_PATH_FR
Path(path).mkdir(parents=True, exist_ok=True)
file_names_en = [f for f in listdir(path) if
isfile(join(path, f))]
card_names = [f[:-4] for f in file_names_en]
card_names = [c.replace("__", "//") for c in card_names] # to handle double-faced cards
return card_names
def get_missing_images(self):
"""
Returns which cards we miss images for
:return: tuple: list of en missing, list of fr missing
"""
downloaded_images_en = IoManager.get_downloaded_images(lang="en")
downloaded_images_fr = IoManager.get_downloaded_images(lang="fr")
complete_list = self.cube_list + IoManager.BASIC_LANDS
missing_images_en = [card for card in complete_list if card not in downloaded_images_en]
missing_images_fr = [card for card in complete_list if card not in downloaded_images_fr]
return missing_images_en, missing_images_fr
def download_card_images(self, card_names, lang="en"):
"""
Downloads the en and fr card image of each card
:param lang: en or fr
:param card_names: list of card names
:return:
"""
for card_name in card_names:
print("Dowloading card imgs for \'" + card_name + "\' (" + lang + ")")
output_file_name = card_name + ".jpg"
output_file_path = IoManager.CARD_IMAGES_PATH_EN + "/" + output_file_name if lang == "en" else IoManager.CARD_IMAGES_PATH_FR + "/" + output_file_name
output_file_path = output_file_path.replace('//', '__')
en_url, fr_url = self.get_card_urls(card_name)
url = en_url if lang == "en" else fr_url
# Open the url image, set stream to True, this will return the stream content.
resp = requests.get(url, stream=True)
# Open a local file with wb ( write binary ) permission.
local_file = open(output_file_path, 'wb')
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
resp.raw.decode_content = True
# Copy the response stream raw data to local image file.
shutil.copyfileobj(resp.raw, local_file)
# Remove the image url response object.
del resp
def get_card_urls(self, card_name):
"""
:param card_name:
:return: tuple: en url, fr url
"""
if card_name in IoManager.BASIC_LANDS:
return IoManager.BASIC_LANDS_URLS[card_name], IoManager.BASIC_LANDS_URLS[card_name]
urls_df = self.base_infos_df[["name", "img_en_large", "img_fr_large"]]
card_row = urls_df[urls_df["name"] == card_name]
print(card_name)
en_url = card_row["img_en_large"].iloc[0]
fr_url = card_row["img_fr_large"].iloc[0]
return en_url, fr_url
def download_missing_images(self, only_english: bool = True):
"""
Checks for missing images, and downloads them if any are found
:return:
"""
print("\nChecking for missing images")
missing_images_en, missing_images_fr = self.get_missing_images()
for card_names, lang in [(missing_images_en, "en"), (missing_images_fr, "fr")]:
if card_names and (not only_english or lang == "en"):
self.download_card_images(card_names, lang)
@staticmethod
def get_img_path(card_name, lang="en"):
"""
:param card_name:
:param lang: en or fr
:return: path to the img
"""
imgs_folder = IoManager.CARD_IMAGES_PATH_EN if lang == "en" else IoManager.CARD_IMAGES_PATH_FR
return imgs_folder + "/" + card_name + ".jpg"
@staticmethod
def get_sprite_path(sprite_name):
return IoManager.SPRITE_DIR_PATH + sprite_name
@staticmethod
def arch_presence_exists():
return os.path.isfile(IoManager.ARCH_PRESENCE_PATH)
def init_arch_presence(self):
print("Initialising arch presence")
df = pd.DataFrame(columns=self.archetypes.get_archetype_names(as_feature_names=True))
df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return df
@staticmethod
def get_arch_presence():
return pd.read_csv(IoManager.ARCH_PRESENCE_PATH, index_col=[0])
def save_arch_presence(self, arch_presence_entries):
"""
Adds the new entries to the db
:param arch_presence_entries: [[1, 0, 0, 1, 1, 0], [1, 0, 1...]...] 1 for archetype was present at the draft
:return: new_df
"""
df = IoManager.get_arch_presence()
print(len(arch_presence_entries[0]))
df2 = pd.DataFrame(data=arch_presence_entries, columns=self.archetypes.get_archetype_names(as_feature_names=True))
new_df = pd.concat([df, df2], sort=False)
new_df = pd.concat([df, df2], sort=False)
new_df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return new_df
class DataFetcher:
"""
Used to get base card data from cube_list.txt using Scryfall api
"""
@staticmethod
def update_base_data():
cube_list_file_name = "cube_list.txt"
output_csv_name = "cube_list_base_data.csv"
#DataFetcher.clean_double_faced_from_cube_list(cube_list_file_name=cube_list_file_name)
new_base_data = DataFetcher.fetch_clean_save(cube_list_file_name=cube_list_file_name, output_csv_name=output_csv_name)
if new_base_data is not None:
print(new_base_data.tail(10))
@staticmethod
# Returns the infos for a card in english, with its fr img urls too
def get_card_data(card_name):
card_name = "\"" + card_name + "\""
time.sleep(0.1)
en_data = requests.get("https://api.scryfall.com/cards/search?q=!" + card_name).json()
time.sleep(0.1)
fr_data = requests.get("https://api.scryfall.com/cards/search?q=!" + card_name + "+lang%3Afr").json()
en_output = en_data["data"][0]
fr_output = fr_data["data"][0] if "data" in fr_data else None # fr version my not exist
if fr_output is None:
print("French missing for " + card_name)
# handling double-faced cards
if "card_faces" in en_output and en_output["layout"] == "modal_dfc":
full_name = en_output["name"]
en_output = {**en_output, **en_output["card_faces"][0]}
en_output["name"] = full_name
if fr_output is not None:
fr_output = fr_output["card_faces"][0]
return en_output, fr_output
@staticmethod
# Returns a Dataframe containing the relevant fields of the cards in the list
def get_cards_data(card_names):
relevant_fields_en = [
"name",
"highres_image",
"image_uris",
"mana_cost",
"cmc",
"type_line",
"power",
"toughness",
"colors",
"color_identity"
]
relevant_fields_fr = [
"image_uris"
]
raw_data = [DataFetcher.get_card_data(card_name) for card_name in card_names]
df_content = {}
for field in relevant_fields_en:
df_content[field] = [data[0][field] if field in data[0] else np.nan for data in raw_data]
for field in relevant_fields_fr:
df_content[field + "_fr"] = [data[1][field] if data[1] is not None and field in data[1] else np.nan for data in
raw_data]
df = pd.DataFrame(df_content)
return df | def clean_double_faced_from_cube_list(cube_list_file_name):
# removes the second face name for each double faced card
f = open("data/" + cube_list_file_name, "r")
lines = f.readlines()
f.close()
def rm_second_face(line):
if "//" in line:
return line.split(" //")[0] + "\n"
return line
lines = [rm_second_face(l) for l in lines]
f = open("data/" + cube_list_file_name, "w")
f.write("".join(lines))
f.close()
@staticmethod
def get_cube_list(cube_list_file_name):
f = open("data/" + cube_list_file_name, "r")
lines = f.readlines()
f.close()
# removing '\n' at then end of each name
lines = [card_name[:-1] for card_name in lines]
return lines
@staticmethod
def infer_new_cards(cube_list, output_csv_name):
prev_ratings = pd.read_csv("data/" + output_csv_name)
new_cards = [c for c in cube_list if c not in prev_ratings.name.to_list()]
print("There are {} new cards: \n{}".format(len(new_cards), new_cards))
return new_cards
@staticmethod
# gets the cube list, fetches the data for each card, and saves the data as a csv
def fetch_cube_data(cube_list_file_name, output_csv_name):
cube_list = DataFetcher.get_cube_list(cube_list_file_name)
new_cards = DataFetcher.infer_new_cards(cube_list, output_csv_name=output_csv_name)
if not new_cards:
return pd.DataFrame()
cube_data = DataFetcher.get_cards_data(new_cards)
return cube_data
@staticmethod
# creates seperate features to store each img url
def clean_image_urls(cube_data):
for lang in ["en", "fr"]:
for image_type in ["small", "normal", "large", "png"]:
feature_name = "img_" + lang + "_" + image_type
current_feature = "image_uris" if lang == "en" else "image_uris_fr"
cube_data[feature_name] = cube_data[current_feature].apply(
lambda d: d[image_type] if type(d) != float and d != None and image_type in d else np.nan)
@staticmethod
def clean_colors(cube_data):
colors = ["W", "U", "B", "R", "G"]
color_pairs = ["WU", "WB", "WR", "WG", "UB", "UR", "UG", "BR", "BR", "RG"]
for color in colors:
cube_data[color] = cube_data["color_identity"].apply(lambda l: 1 if color in l else 0)
for c, c2 in color_pairs:
cube_data[c + c2] = cube_data["color_identity"].apply(lambda l: 1 if c in l and c2 in l else 0)
@staticmethod
def clean_type_line(cube_data):
cube_data["type_line"] = cube_data["type_line"].str.replace(' —', ':')
@staticmethod
def clean_cmc(cube_data):
cube_data["cmc"] = cube_data["cmc"].astype(int)
@staticmethod
def remove_old_columns(cube_data):
old_columns = ["image_uris", "image_uris_fr"]
valid_columns = [c for c in cube_data.columns if c not in old_columns]
return cube_data[valid_columns]
@staticmethod
def clean_booleans(cube_data):
cube_data["highres_image"] = cube_data["highres_image"].astype(int)
@staticmethod
def clean_cube_data(cube_data):
DataFetcher.clean_image_urls(cube_data)
DataFetcher.clean_colors(cube_data)
DataFetcher.clean_type_line(cube_data)
DataFetcher.clean_cmc(cube_data)
DataFetcher.clean_booleans(cube_data)
return DataFetcher.remove_old_columns(cube_data)
@staticmethod
def save_csv(cube_data, output_csv_name, cube_list_file_name):
current_data = pd.read_csv("data/" + output_csv_name)
new_cards = DataFetcher.infer_new_cards(cube_list=DataFetcher.get_cube_list(cube_list_file_name=cube_list_file_name), output_csv_name=output_csv_name)
new_rows = cube_data[cube_data.name.isin(new_cards)]
new_cube_data = current_data.append(new_rows).reset_index(drop=True)
new_cube_data.to_csv("data/" + output_csv_name, index=False)
return new_cube_data
@staticmethod
# does it all
def fetch_clean_save(cube_list_file_name, output_csv_name):
cube_data = DataFetcher.fetch_cube_data(cube_list_file_name=cube_list_file_name, output_csv_name=output_csv_name)
if len(cube_data.index) == 0:
return None
cube_data_clean = DataFetcher.clean_cube_data(cube_data)
return DataFetcher.save_csv(cube_data=cube_data, output_csv_name=output_csv_name, cube_list_file_name=cube_list_file_name)
class RatingsInitializer:
@staticmethod
def prepare_new_ratings(archetypes):
new_ratings = RatingsInitializer.setup_for_new_cards(RatingsInitializer.load_cards_df(), archetypes)
if new_ratings is not None:
RatingsInitializer.save_csv(new_ratings)
exit()
return new_ratings
@staticmethod
def load_cards_df():
df = pd.read_csv("data/cube_list_base_data.csv")
for list_feature in ["colors", "color_identity"]:
df[list_feature] = df[list_feature].apply(lambda e: e if type(e) != float else "[]")
df[list_feature] = df[list_feature].apply(ast.literal_eval)
return df
@staticmethod
def setup_for_new_cards(df, archetypes):
new_df = pd.DataFrame({"name": df["name"]})
for arch in archetypes.list:
feature_name = arch.name.lower().replace(' ', '_')
new_df[feature_name] = df["color_identity"].apply(lambda l: 9 if arch.is_available(l) else 0)
# merging with existing ratings
current_ratings = pd.read_csv("data/archetype_ratings.csv")
new_cards = [c for c in df.name.to_list() if c not in current_ratings.name.to_list()]
if new_cards:
print("Preparing ratings for new cards: {}".format(new_cards))
print("Make sure to manually replace 9 values by 0-4 values")
new_rows = new_df[new_df.name.isin(new_cards)]
new_ratings = current_ratings.append(new_rows)
return new_ratings if new_cards else None
@staticmethod
def save_csv(new_df):
new_df.to_csv("data/archetype_ratings.csv", index=False) |
@staticmethod | random_line_split |
IoManager.py | import ast
import os
from os import listdir
from os.path import isfile, join
import requests
import shutil
import pandas as pd
from pathlib import Path
import time
import numpy as np
class IoManager:
"""
Handles every input/output of the program
Currently:
* loads cube list and cards base data of startup
* checks/downloads missing card images (en/fr) on startup
"""
CUBE_LIST_FILE_PATH = "data/cube_list.txt"
CARD_INFOS_FILE_PATH = "data/cube_list_base_data.csv"
CARD_RATINGS_FILE_PATH = "data/archetype_ratings.csv"
ARCH_PRESENCE_PATH = "data/archetype_presence.csv"
CARD_IMAGES_PATH = "data/imgs"
CARD_IMAGES_PATH_EN = CARD_IMAGES_PATH + "/en"
CARD_IMAGES_PATH_FR = CARD_IMAGES_PATH + "/fr"
SPRITE_DIR_PATH = "data/imgs/sprites/"
BASIC_LANDS = ["Plains", "Island", "Swamp", "Mountain", "Forest"]
BASIC_LANDS_URLS = {
"Plains": "https://img.scryfall.com/cards/large/front/a/9/a9891b7b-fc52-470c-9f74-292ae665f378.jpg?1581719749",
"Island": "https://img.scryfall.com/cards/large/front/a/c/acf7b664-3e75-4018-81f6-2a14ab59f258.jpg?1582126055",
"Swamp": "https://img.scryfall.com/cards/large/front/0/2/02cb5cfd-018e-4c5e-bef1-166262aa5f1d.jpg?1582126067",
"Mountain": "https://img.scryfall.com/cards/large/front/5/3/53fb7b99-9e47-46a6-9c8a-88e28b5197f1.jpg?1582126072",
"Forest": "https://img.scryfall.com/cards/large/front/3/2/32af9f41-89e2-4e7a-9fec-fffe79cae077.jpg?1582126077"
}
base_infos_data = None
def __init__(self, archetypes, download_missing_images=True):
self.archetypes = archetypes
self.cube_list = IoManager.get_cube_list()
self.non_basics = self.get_nonbasic_lands_list()
self.base_infos_df = IoManager.get_cards_base_info()
IoManager.base_infos_data = self.base_infos_df
self.ratings = self.get_ratings()
if download_missing_images:
self.download_missing_images()
if not IoManager.arch_presence_exists():
self.init_arch_presence()
@staticmethod
def | ():
"""
:return: list of card names, cube list
"""
f = open(IoManager.CUBE_LIST_FILE_PATH, "r")
lines = f.readlines()
f.close()
# removing '\n' at then end of each name
lines = [card_name[:-1] for card_name in lines]
return lines
@staticmethod
def get_cards_base_info():
"""
:return: DataFrame containing data for each card, such as power, toughness, urls...
"""
df = pd.read_csv(IoManager.CARD_INFOS_FILE_PATH)
for list_feature in ["colors", "color_identity"]:
df[list_feature] = df[list_feature].apply(lambda e: e if type(e) != float else "[]")
df[list_feature] = df[list_feature].apply(ast.literal_eval)
return df
def get_ratings(self):
"""
Loads, scales, add sum and returns the card's ratings per archetype
:return: DataFrame containing each archetype rating for each card
"""
df = pd.read_csv(IoManager.CARD_RATINGS_FILE_PATH)
df = IoManager.scale_ratings(df)
df = IoManager.normalize_ratings_per_archetype(df)
df = self.add_ratings_sum(df)
# print(df[["name", "monogreen", "simic_ramp", "general"]].tail(60))
# print(df[["name", "general"]].sort_values(ascending=False, by="general").head(50))
return df
@staticmethod
def scale_ratings(ratings):
"""
The gap between a 3 and a 4 is wider than 1/3 better
:param ratings: df
:return: updated df
"""
mapping = {
2: 3,
3: 8,
4: 30,
5: 60
}
ratings = ratings.applymap(lambda e: mapping[e] if e in mapping else e)
return ratings
@staticmethod
def normalize_ratings_per_archetype(ratings):
"""
Divides each rating by a value proportional to the sum of all the ratings in the archetype
:param ratings:
:return:
"""
archetype_cols = [c for c in ratings.columns if c != "name"]
n_cards = len(ratings["monored"])
for arch_col in archetype_cols:
ratings[arch_col] = ratings[arch_col] / (ratings[arch_col].sum() / n_cards)
return ratings
def add_ratings_sum(self, ratings):
"""
Adds a column to the ratings DataFrame: 'general'
:return: the updated DataFrame
"""
rate_columns = [c for c in ratings.columns if c != "name"]
coef_per_archetype = self.archetypes.get_color_coeffs()
ratings["general"] = ratings[rate_columns].apply(lambda row: sum([row[c]*coef_per_archetype[c] for c in rate_columns]), axis=1)
ratings["general_raw"] = ratings[rate_columns].sum(axis=1)
return ratings
def get_nonbasic_lands_list(self):
"""
:return: a list of land names
"""
return self.cube_list[320:]
@staticmethod
def get_downloaded_images(lang="en"):
"""
Returns which cards we have downloaded images for (of the specified language, en or fr)
:return: list of card names
"""
path = IoManager.CARD_IMAGES_PATH_EN if lang == "en" else IoManager.CARD_IMAGES_PATH_FR
Path(path).mkdir(parents=True, exist_ok=True)
file_names_en = [f for f in listdir(path) if
isfile(join(path, f))]
card_names = [f[:-4] for f in file_names_en]
card_names = [c.replace("__", "//") for c in card_names] # to handle double-faced cards
return card_names
def get_missing_images(self):
"""
Returns which cards we miss images for
:return: tuple: list of en missing, list of fr missing
"""
downloaded_images_en = IoManager.get_downloaded_images(lang="en")
downloaded_images_fr = IoManager.get_downloaded_images(lang="fr")
complete_list = self.cube_list + IoManager.BASIC_LANDS
missing_images_en = [card for card in complete_list if card not in downloaded_images_en]
missing_images_fr = [card for card in complete_list if card not in downloaded_images_fr]
return missing_images_en, missing_images_fr
def download_card_images(self, card_names, lang="en"):
"""
Downloads the en and fr card image of each card
:param lang: en or fr
:param card_names: list of card names
:return:
"""
for card_name in card_names:
print("Dowloading card imgs for \'" + card_name + "\' (" + lang + ")")
output_file_name = card_name + ".jpg"
output_file_path = IoManager.CARD_IMAGES_PATH_EN + "/" + output_file_name if lang == "en" else IoManager.CARD_IMAGES_PATH_FR + "/" + output_file_name
output_file_path = output_file_path.replace('//', '__')
en_url, fr_url = self.get_card_urls(card_name)
url = en_url if lang == "en" else fr_url
# Open the url image, set stream to True, this will return the stream content.
resp = requests.get(url, stream=True)
# Open a local file with wb ( write binary ) permission.
local_file = open(output_file_path, 'wb')
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
resp.raw.decode_content = True
# Copy the response stream raw data to local image file.
shutil.copyfileobj(resp.raw, local_file)
# Remove the image url response object.
del resp
def get_card_urls(self, card_name):
"""
:param card_name:
:return: tuple: en url, fr url
"""
if card_name in IoManager.BASIC_LANDS:
return IoManager.BASIC_LANDS_URLS[card_name], IoManager.BASIC_LANDS_URLS[card_name]
urls_df = self.base_infos_df[["name", "img_en_large", "img_fr_large"]]
card_row = urls_df[urls_df["name"] == card_name]
print(card_name)
en_url = card_row["img_en_large"].iloc[0]
fr_url = card_row["img_fr_large"].iloc[0]
return en_url, fr_url
def download_missing_images(self, only_english: bool = True):
"""
Checks for missing images, and downloads them if any are found
:return:
"""
print("\nChecking for missing images")
missing_images_en, missing_images_fr = self.get_missing_images()
for card_names, lang in [(missing_images_en, "en"), (missing_images_fr, "fr")]:
if card_names and (not only_english or lang == "en"):
self.download_card_images(card_names, lang)
@staticmethod
def get_img_path(card_name, lang="en"):
"""
:param card_name:
:param lang: en or fr
:return: path to the img
"""
imgs_folder = IoManager.CARD_IMAGES_PATH_EN if lang == "en" else IoManager.CARD_IMAGES_PATH_FR
return imgs_folder + "/" + card_name + ".jpg"
@staticmethod
def get_sprite_path(sprite_name):
return IoManager.SPRITE_DIR_PATH + sprite_name
@staticmethod
def arch_presence_exists():
return os.path.isfile(IoManager.ARCH_PRESENCE_PATH)
def init_arch_presence(self):
print("Initialising arch presence")
df = pd.DataFrame(columns=self.archetypes.get_archetype_names(as_feature_names=True))
df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return df
@staticmethod
def get_arch_presence():
return pd.read_csv(IoManager.ARCH_PRESENCE_PATH, index_col=[0])
def save_arch_presence(self, arch_presence_entries):
"""
Adds the new entries to the db
:param arch_presence_entries: [[1, 0, 0, 1, 1, 0], [1, 0, 1...]...] 1 for archetype was present at the draft
:return: new_df
"""
df = IoManager.get_arch_presence()
print(len(arch_presence_entries[0]))
df2 = pd.DataFrame(data=arch_presence_entries, columns=self.archetypes.get_archetype_names(as_feature_names=True))
new_df = pd.concat([df, df2], sort=False)
new_df = pd.concat([df, df2], sort=False)
new_df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return new_df
class DataFetcher:
"""
Used to get base card data from cube_list.txt using Scryfall api
"""
@staticmethod
def update_base_data():
cube_list_file_name = "cube_list.txt"
output_csv_name = "cube_list_base_data.csv"
#DataFetcher.clean_double_faced_from_cube_list(cube_list_file_name=cube_list_file_name)
new_base_data = DataFetcher.fetch_clean_save(cube_list_file_name=cube_list_file_name, output_csv_name=output_csv_name)
if new_base_data is not None:
print(new_base_data.tail(10))
@staticmethod
# Returns the infos for a card in english, with its fr img urls too
def get_card_data(card_name):
card_name = "\"" + card_name + "\""
time.sleep(0.1)
en_data = requests.get("https://api.scryfall.com/cards/search?q=!" + card_name).json()
time.sleep(0.1)
fr_data = requests.get("https://api.scryfall.com/cards/search?q=!" + card_name + "+lang%3Afr").json()
en_output = en_data["data"][0]
fr_output = fr_data["data"][0] if "data" in fr_data else None # fr version my not exist
if fr_output is None:
print("French missing for " + card_name)
# handling double-faced cards
if "card_faces" in en_output and en_output["layout"] == "modal_dfc":
full_name = en_output["name"]
en_output = {**en_output, **en_output["card_faces"][0]}
en_output["name"] = full_name
if fr_output is not None:
fr_output = fr_output["card_faces"][0]
return en_output, fr_output
@staticmethod
# Returns a Dataframe containing the relevant fields of the cards in the list
def get_cards_data(card_names):
relevant_fields_en = [
"name",
"highres_image",
"image_uris",
"mana_cost",
"cmc",
"type_line",
"power",
"toughness",
"colors",
"color_identity"
]
relevant_fields_fr = [
"image_uris"
]
raw_data = [DataFetcher.get_card_data(card_name) for card_name in card_names]
df_content = {}
for field in relevant_fields_en:
df_content[field] = [data[0][field] if field in data[0] else np.nan for data in raw_data]
for field in relevant_fields_fr:
df_content[field + "_fr"] = [data[1][field] if data[1] is not None and field in data[1] else np.nan for data in
raw_data]
df = pd.DataFrame(df_content)
return df
@staticmethod
def clean_double_faced_from_cube_list(cube_list_file_name):
# removes the second face name for each double faced card
f = open("data/" + cube_list_file_name, "r")
lines = f.readlines()
f.close()
def rm_second_face(line):
if "//" in line:
return line.split(" //")[0] + "\n"
return line
lines = [rm_second_face(l) for l in lines]
f = open("data/" + cube_list_file_name, "w")
f.write("".join(lines))
f.close()
@staticmethod
def get_cube_list(cube_list_file_name):
f = open("data/" + cube_list_file_name, "r")
lines = f.readlines()
f.close()
# removing '\n' at then end of each name
lines = [card_name[:-1] for card_name in lines]
return lines
@staticmethod
def infer_new_cards(cube_list, output_csv_name):
prev_ratings = pd.read_csv("data/" + output_csv_name)
new_cards = [c for c in cube_list if c not in prev_ratings.name.to_list()]
print("There are {} new cards: \n{}".format(len(new_cards), new_cards))
return new_cards
@staticmethod
# gets the cube list, fetches the data for each card, and saves the data as a csv
def fetch_cube_data(cube_list_file_name, output_csv_name):
cube_list = DataFetcher.get_cube_list(cube_list_file_name)
new_cards = DataFetcher.infer_new_cards(cube_list, output_csv_name=output_csv_name)
if not new_cards:
return pd.DataFrame()
cube_data = DataFetcher.get_cards_data(new_cards)
return cube_data
@staticmethod
# creates seperate features to store each img url
def clean_image_urls(cube_data):
for lang in ["en", "fr"]:
for image_type in ["small", "normal", "large", "png"]:
feature_name = "img_" + lang + "_" + image_type
current_feature = "image_uris" if lang == "en" else "image_uris_fr"
cube_data[feature_name] = cube_data[current_feature].apply(
lambda d: d[image_type] if type(d) != float and d != None and image_type in d else np.nan)
@staticmethod
def clean_colors(cube_data):
colors = ["W", "U", "B", "R", "G"]
color_pairs = ["WU", "WB", "WR", "WG", "UB", "UR", "UG", "BR", "BR", "RG"]
for color in colors:
cube_data[color] = cube_data["color_identity"].apply(lambda l: 1 if color in l else 0)
for c, c2 in color_pairs:
cube_data[c + c2] = cube_data["color_identity"].apply(lambda l: 1 if c in l and c2 in l else 0)
@staticmethod
def clean_type_line(cube_data):
cube_data["type_line"] = cube_data["type_line"].str.replace(' —', ':')
@staticmethod
def clean_cmc(cube_data):
cube_data["cmc"] = cube_data["cmc"].astype(int)
@staticmethod
def remove_old_columns(cube_data):
old_columns = ["image_uris", "image_uris_fr"]
valid_columns = [c for c in cube_data.columns if c not in old_columns]
return cube_data[valid_columns]
@staticmethod
def clean_booleans(cube_data):
cube_data["highres_image"] = cube_data["highres_image"].astype(int)
@staticmethod
def clean_cube_data(cube_data):
DataFetcher.clean_image_urls(cube_data)
DataFetcher.clean_colors(cube_data)
DataFetcher.clean_type_line(cube_data)
DataFetcher.clean_cmc(cube_data)
DataFetcher.clean_booleans(cube_data)
return DataFetcher.remove_old_columns(cube_data)
@staticmethod
def save_csv(cube_data, output_csv_name, cube_list_file_name):
current_data = pd.read_csv("data/" + output_csv_name)
new_cards = DataFetcher.infer_new_cards(cube_list=DataFetcher.get_cube_list(cube_list_file_name=cube_list_file_name), output_csv_name=output_csv_name)
new_rows = cube_data[cube_data.name.isin(new_cards)]
new_cube_data = current_data.append(new_rows).reset_index(drop=True)
new_cube_data.to_csv("data/" + output_csv_name, index=False)
return new_cube_data
@staticmethod
# does it all
def fetch_clean_save(cube_list_file_name, output_csv_name):
cube_data = DataFetcher.fetch_cube_data(cube_list_file_name=cube_list_file_name, output_csv_name=output_csv_name)
if len(cube_data.index) == 0:
return None
cube_data_clean = DataFetcher.clean_cube_data(cube_data)
return DataFetcher.save_csv(cube_data=cube_data, output_csv_name=output_csv_name, cube_list_file_name=cube_list_file_name)
class RatingsInitializer:
@staticmethod
def prepare_new_ratings(archetypes):
new_ratings = RatingsInitializer.setup_for_new_cards(RatingsInitializer.load_cards_df(), archetypes)
if new_ratings is not None:
RatingsInitializer.save_csv(new_ratings)
exit()
return new_ratings
@staticmethod
def load_cards_df():
df = pd.read_csv("data/cube_list_base_data.csv")
for list_feature in ["colors", "color_identity"]:
df[list_feature] = df[list_feature].apply(lambda e: e if type(e) != float else "[]")
df[list_feature] = df[list_feature].apply(ast.literal_eval)
return df
@staticmethod
def setup_for_new_cards(df, archetypes):
new_df = pd.DataFrame({"name": df["name"]})
for arch in archetypes.list:
feature_name = arch.name.lower().replace(' ', '_')
new_df[feature_name] = df["color_identity"].apply(lambda l: 9 if arch.is_available(l) else 0)
# merging with existing ratings
current_ratings = pd.read_csv("data/archetype_ratings.csv")
new_cards = [c for c in df.name.to_list() if c not in current_ratings.name.to_list()]
if new_cards:
print("Preparing ratings for new cards: {}".format(new_cards))
print("Make sure to manually replace 9 values by 0-4 values")
new_rows = new_df[new_df.name.isin(new_cards)]
new_ratings = current_ratings.append(new_rows)
return new_ratings if new_cards else None
@staticmethod
def save_csv(new_df):
new_df.to_csv("data/archetype_ratings.csv", index=False)
| get_cube_list | identifier_name |
IoManager.py | import ast
import os
from os import listdir
from os.path import isfile, join
import requests
import shutil
import pandas as pd
from pathlib import Path
import time
import numpy as np
class IoManager:
"""
Handles every input/output of the program
Currently:
* loads cube list and cards base data of startup
* checks/downloads missing card images (en/fr) on startup
"""
CUBE_LIST_FILE_PATH = "data/cube_list.txt"
CARD_INFOS_FILE_PATH = "data/cube_list_base_data.csv"
CARD_RATINGS_FILE_PATH = "data/archetype_ratings.csv"
ARCH_PRESENCE_PATH = "data/archetype_presence.csv"
CARD_IMAGES_PATH = "data/imgs"
CARD_IMAGES_PATH_EN = CARD_IMAGES_PATH + "/en"
CARD_IMAGES_PATH_FR = CARD_IMAGES_PATH + "/fr"
SPRITE_DIR_PATH = "data/imgs/sprites/"
BASIC_LANDS = ["Plains", "Island", "Swamp", "Mountain", "Forest"]
BASIC_LANDS_URLS = {
"Plains": "https://img.scryfall.com/cards/large/front/a/9/a9891b7b-fc52-470c-9f74-292ae665f378.jpg?1581719749",
"Island": "https://img.scryfall.com/cards/large/front/a/c/acf7b664-3e75-4018-81f6-2a14ab59f258.jpg?1582126055",
"Swamp": "https://img.scryfall.com/cards/large/front/0/2/02cb5cfd-018e-4c5e-bef1-166262aa5f1d.jpg?1582126067",
"Mountain": "https://img.scryfall.com/cards/large/front/5/3/53fb7b99-9e47-46a6-9c8a-88e28b5197f1.jpg?1582126072",
"Forest": "https://img.scryfall.com/cards/large/front/3/2/32af9f41-89e2-4e7a-9fec-fffe79cae077.jpg?1582126077"
}
base_infos_data = None
def __init__(self, archetypes, download_missing_images=True):
self.archetypes = archetypes
self.cube_list = IoManager.get_cube_list()
self.non_basics = self.get_nonbasic_lands_list()
self.base_infos_df = IoManager.get_cards_base_info()
IoManager.base_infos_data = self.base_infos_df
self.ratings = self.get_ratings()
if download_missing_images:
self.download_missing_images()
if not IoManager.arch_presence_exists():
self.init_arch_presence()
@staticmethod
def get_cube_list():
"""
:return: list of card names, cube list
"""
f = open(IoManager.CUBE_LIST_FILE_PATH, "r")
lines = f.readlines()
f.close()
# removing '\n' at then end of each name
lines = [card_name[:-1] for card_name in lines]
return lines
@staticmethod
def get_cards_base_info():
"""
:return: DataFrame containing data for each card, such as power, toughness, urls...
"""
df = pd.read_csv(IoManager.CARD_INFOS_FILE_PATH)
for list_feature in ["colors", "color_identity"]:
df[list_feature] = df[list_feature].apply(lambda e: e if type(e) != float else "[]")
df[list_feature] = df[list_feature].apply(ast.literal_eval)
return df
def get_ratings(self):
"""
Loads, scales, add sum and returns the card's ratings per archetype
:return: DataFrame containing each archetype rating for each card
"""
df = pd.read_csv(IoManager.CARD_RATINGS_FILE_PATH)
df = IoManager.scale_ratings(df)
df = IoManager.normalize_ratings_per_archetype(df)
df = self.add_ratings_sum(df)
# print(df[["name", "monogreen", "simic_ramp", "general"]].tail(60))
# print(df[["name", "general"]].sort_values(ascending=False, by="general").head(50))
return df
@staticmethod
def scale_ratings(ratings):
"""
The gap between a 3 and a 4 is wider than 1/3 better
:param ratings: df
:return: updated df
"""
mapping = {
2: 3,
3: 8,
4: 30,
5: 60
}
ratings = ratings.applymap(lambda e: mapping[e] if e in mapping else e)
return ratings
@staticmethod
def normalize_ratings_per_archetype(ratings):
"""
Divides each rating by a value proportional to the sum of all the ratings in the archetype
:param ratings:
:return:
"""
archetype_cols = [c for c in ratings.columns if c != "name"]
n_cards = len(ratings["monored"])
for arch_col in archetype_cols:
ratings[arch_col] = ratings[arch_col] / (ratings[arch_col].sum() / n_cards)
return ratings
def add_ratings_sum(self, ratings):
"""
Adds a column to the ratings DataFrame: 'general'
:return: the updated DataFrame
"""
rate_columns = [c for c in ratings.columns if c != "name"]
coef_per_archetype = self.archetypes.get_color_coeffs()
ratings["general"] = ratings[rate_columns].apply(lambda row: sum([row[c]*coef_per_archetype[c] for c in rate_columns]), axis=1)
ratings["general_raw"] = ratings[rate_columns].sum(axis=1)
return ratings
def get_nonbasic_lands_list(self):
"""
:return: a list of land names
"""
return self.cube_list[320:]
@staticmethod
def get_downloaded_images(lang="en"):
"""
Returns which cards we have downloaded images for (of the specified language, en or fr)
:return: list of card names
"""
path = IoManager.CARD_IMAGES_PATH_EN if lang == "en" else IoManager.CARD_IMAGES_PATH_FR
Path(path).mkdir(parents=True, exist_ok=True)
file_names_en = [f for f in listdir(path) if
isfile(join(path, f))]
card_names = [f[:-4] for f in file_names_en]
card_names = [c.replace("__", "//") for c in card_names] # to handle double-faced cards
return card_names
def get_missing_images(self):
"""
Returns which cards we miss images for
:return: tuple: list of en missing, list of fr missing
"""
downloaded_images_en = IoManager.get_downloaded_images(lang="en")
downloaded_images_fr = IoManager.get_downloaded_images(lang="fr")
complete_list = self.cube_list + IoManager.BASIC_LANDS
missing_images_en = [card for card in complete_list if card not in downloaded_images_en]
missing_images_fr = [card for card in complete_list if card not in downloaded_images_fr]
return missing_images_en, missing_images_fr
def download_card_images(self, card_names, lang="en"):
"""
Downloads the en and fr card image of each card
:param lang: en or fr
:param card_names: list of card names
:return:
"""
for card_name in card_names:
print("Dowloading card imgs for \'" + card_name + "\' (" + lang + ")")
output_file_name = card_name + ".jpg"
output_file_path = IoManager.CARD_IMAGES_PATH_EN + "/" + output_file_name if lang == "en" else IoManager.CARD_IMAGES_PATH_FR + "/" + output_file_name
output_file_path = output_file_path.replace('//', '__')
en_url, fr_url = self.get_card_urls(card_name)
url = en_url if lang == "en" else fr_url
# Open the url image, set stream to True, this will return the stream content.
resp = requests.get(url, stream=True)
# Open a local file with wb ( write binary ) permission.
local_file = open(output_file_path, 'wb')
# Set decode_content value to True, otherwise the downloaded image file's size will be zero.
resp.raw.decode_content = True
# Copy the response stream raw data to local image file.
shutil.copyfileobj(resp.raw, local_file)
# Remove the image url response object.
del resp
def get_card_urls(self, card_name):
"""
:param card_name:
:return: tuple: en url, fr url
"""
if card_name in IoManager.BASIC_LANDS:
return IoManager.BASIC_LANDS_URLS[card_name], IoManager.BASIC_LANDS_URLS[card_name]
urls_df = self.base_infos_df[["name", "img_en_large", "img_fr_large"]]
card_row = urls_df[urls_df["name"] == card_name]
print(card_name)
en_url = card_row["img_en_large"].iloc[0]
fr_url = card_row["img_fr_large"].iloc[0]
return en_url, fr_url
def download_missing_images(self, only_english: bool = True):
"""
Checks for missing images, and downloads them if any are found
:return:
"""
print("\nChecking for missing images")
missing_images_en, missing_images_fr = self.get_missing_images()
for card_names, lang in [(missing_images_en, "en"), (missing_images_fr, "fr")]:
if card_names and (not only_english or lang == "en"):
self.download_card_images(card_names, lang)
@staticmethod
def get_img_path(card_name, lang="en"):
"""
:param card_name:
:param lang: en or fr
:return: path to the img
"""
imgs_folder = IoManager.CARD_IMAGES_PATH_EN if lang == "en" else IoManager.CARD_IMAGES_PATH_FR
return imgs_folder + "/" + card_name + ".jpg"
@staticmethod
def get_sprite_path(sprite_name):
return IoManager.SPRITE_DIR_PATH + sprite_name
@staticmethod
def arch_presence_exists():
return os.path.isfile(IoManager.ARCH_PRESENCE_PATH)
def init_arch_presence(self):
print("Initialising arch presence")
df = pd.DataFrame(columns=self.archetypes.get_archetype_names(as_feature_names=True))
df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return df
@staticmethod
def get_arch_presence():
return pd.read_csv(IoManager.ARCH_PRESENCE_PATH, index_col=[0])
def save_arch_presence(self, arch_presence_entries):
"""
Adds the new entries to the db
:param arch_presence_entries: [[1, 0, 0, 1, 1, 0], [1, 0, 1...]...] 1 for archetype was present at the draft
:return: new_df
"""
df = IoManager.get_arch_presence()
print(len(arch_presence_entries[0]))
df2 = pd.DataFrame(data=arch_presence_entries, columns=self.archetypes.get_archetype_names(as_feature_names=True))
new_df = pd.concat([df, df2], sort=False)
new_df = pd.concat([df, df2], sort=False)
new_df.to_csv(IoManager.ARCH_PRESENCE_PATH)
return new_df
class DataFetcher:
"""
Used to get base card data from cube_list.txt using Scryfall api
"""
@staticmethod
def update_base_data():
cube_list_file_name = "cube_list.txt"
output_csv_name = "cube_list_base_data.csv"
#DataFetcher.clean_double_faced_from_cube_list(cube_list_file_name=cube_list_file_name)
new_base_data = DataFetcher.fetch_clean_save(cube_list_file_name=cube_list_file_name, output_csv_name=output_csv_name)
if new_base_data is not None:
print(new_base_data.tail(10))
@staticmethod
# Returns the infos for a card in english, with its fr img urls too
def get_card_data(card_name):
card_name = "\"" + card_name + "\""
time.sleep(0.1)
en_data = requests.get("https://api.scryfall.com/cards/search?q=!" + card_name).json()
time.sleep(0.1)
fr_data = requests.get("https://api.scryfall.com/cards/search?q=!" + card_name + "+lang%3Afr").json()
en_output = en_data["data"][0]
fr_output = fr_data["data"][0] if "data" in fr_data else None # fr version my not exist
if fr_output is None:
print("French missing for " + card_name)
# handling double-faced cards
if "card_faces" in en_output and en_output["layout"] == "modal_dfc":
full_name = en_output["name"]
en_output = {**en_output, **en_output["card_faces"][0]}
en_output["name"] = full_name
if fr_output is not None:
fr_output = fr_output["card_faces"][0]
return en_output, fr_output
@staticmethod
# Returns a Dataframe containing the relevant fields of the cards in the list
def get_cards_data(card_names):
relevant_fields_en = [
"name",
"highres_image",
"image_uris",
"mana_cost",
"cmc",
"type_line",
"power",
"toughness",
"colors",
"color_identity"
]
relevant_fields_fr = [
"image_uris"
]
raw_data = [DataFetcher.get_card_data(card_name) for card_name in card_names]
df_content = {}
for field in relevant_fields_en:
df_content[field] = [data[0][field] if field in data[0] else np.nan for data in raw_data]
for field in relevant_fields_fr:
df_content[field + "_fr"] = [data[1][field] if data[1] is not None and field in data[1] else np.nan for data in
raw_data]
df = pd.DataFrame(df_content)
return df
@staticmethod
def clean_double_faced_from_cube_list(cube_list_file_name):
# removes the second face name for each double faced card
f = open("data/" + cube_list_file_name, "r")
lines = f.readlines()
f.close()
def rm_second_face(line):
if "//" in line:
return line.split(" //")[0] + "\n"
return line
lines = [rm_second_face(l) for l in lines]
f = open("data/" + cube_list_file_name, "w")
f.write("".join(lines))
f.close()
@staticmethod
def get_cube_list(cube_list_file_name):
f = open("data/" + cube_list_file_name, "r")
lines = f.readlines()
f.close()
# removing '\n' at then end of each name
lines = [card_name[:-1] for card_name in lines]
return lines
@staticmethod
def infer_new_cards(cube_list, output_csv_name):
prev_ratings = pd.read_csv("data/" + output_csv_name)
new_cards = [c for c in cube_list if c not in prev_ratings.name.to_list()]
print("There are {} new cards: \n{}".format(len(new_cards), new_cards))
return new_cards
@staticmethod
# gets the cube list, fetches the data for each card, and saves the data as a csv
def fetch_cube_data(cube_list_file_name, output_csv_name):
cube_list = DataFetcher.get_cube_list(cube_list_file_name)
new_cards = DataFetcher.infer_new_cards(cube_list, output_csv_name=output_csv_name)
if not new_cards:
return pd.DataFrame()
cube_data = DataFetcher.get_cards_data(new_cards)
return cube_data
@staticmethod
# creates seperate features to store each img url
def clean_image_urls(cube_data):
for lang in ["en", "fr"]:
for image_type in ["small", "normal", "large", "png"]:
feature_name = "img_" + lang + "_" + image_type
current_feature = "image_uris" if lang == "en" else "image_uris_fr"
cube_data[feature_name] = cube_data[current_feature].apply(
lambda d: d[image_type] if type(d) != float and d != None and image_type in d else np.nan)
@staticmethod
def clean_colors(cube_data):
colors = ["W", "U", "B", "R", "G"]
color_pairs = ["WU", "WB", "WR", "WG", "UB", "UR", "UG", "BR", "BR", "RG"]
for color in colors:
cube_data[color] = cube_data["color_identity"].apply(lambda l: 1 if color in l else 0)
for c, c2 in color_pairs:
cube_data[c + c2] = cube_data["color_identity"].apply(lambda l: 1 if c in l and c2 in l else 0)
@staticmethod
def clean_type_line(cube_data):
cube_data["type_line"] = cube_data["type_line"].str.replace(' —', ':')
@staticmethod
def clean_cmc(cube_data):
cube_data["cmc"] = cube_data["cmc"].astype(int)
@staticmethod
def remove_old_columns(cube_data):
old_columns = ["image_uris", "image_uris_fr"]
valid_columns = [c for c in cube_data.columns if c not in old_columns]
return cube_data[valid_columns]
@staticmethod
def clean_booleans(cube_data):
cube_data["highres_image"] = cube_data["highres_image"].astype(int)
@staticmethod
def clean_cube_data(cube_data):
DataFetcher.clean_image_urls(cube_data)
DataFetcher.clean_colors(cube_data)
DataFetcher.clean_type_line(cube_data)
DataFetcher.clean_cmc(cube_data)
DataFetcher.clean_booleans(cube_data)
return DataFetcher.remove_old_columns(cube_data)
@staticmethod
def save_csv(cube_data, output_csv_name, cube_list_file_name):
current_data = pd.read_csv("data/" + output_csv_name)
new_cards = DataFetcher.infer_new_cards(cube_list=DataFetcher.get_cube_list(cube_list_file_name=cube_list_file_name), output_csv_name=output_csv_name)
new_rows = cube_data[cube_data.name.isin(new_cards)]
new_cube_data = current_data.append(new_rows).reset_index(drop=True)
new_cube_data.to_csv("data/" + output_csv_name, index=False)
return new_cube_data
@staticmethod
# does it all
def fetch_clean_save(cube_list_file_name, output_csv_name):
cube_data = DataFetcher.fetch_cube_data(cube_list_file_name=cube_list_file_name, output_csv_name=output_csv_name)
if len(cube_data.index) == 0:
return None
cube_data_clean = DataFetcher.clean_cube_data(cube_data)
return DataFetcher.save_csv(cube_data=cube_data, output_csv_name=output_csv_name, cube_list_file_name=cube_list_file_name)
class RatingsInitializer:
@staticmethod
def prepare_new_ratings(archetypes):
new_ratings = RatingsInitializer.setup_for_new_cards(RatingsInitializer.load_cards_df(), archetypes)
if new_ratings is not None:
Ra | return new_ratings
@staticmethod
def load_cards_df():
df = pd.read_csv("data/cube_list_base_data.csv")
for list_feature in ["colors", "color_identity"]:
df[list_feature] = df[list_feature].apply(lambda e: e if type(e) != float else "[]")
df[list_feature] = df[list_feature].apply(ast.literal_eval)
return df
@staticmethod
def setup_for_new_cards(df, archetypes):
new_df = pd.DataFrame({"name": df["name"]})
for arch in archetypes.list:
feature_name = arch.name.lower().replace(' ', '_')
new_df[feature_name] = df["color_identity"].apply(lambda l: 9 if arch.is_available(l) else 0)
# merging with existing ratings
current_ratings = pd.read_csv("data/archetype_ratings.csv")
new_cards = [c for c in df.name.to_list() if c not in current_ratings.name.to_list()]
if new_cards:
print("Preparing ratings for new cards: {}".format(new_cards))
print("Make sure to manually replace 9 values by 0-4 values")
new_rows = new_df[new_df.name.isin(new_cards)]
new_ratings = current_ratings.append(new_rows)
return new_ratings if new_cards else None
@staticmethod
def save_csv(new_df):
new_df.to_csv("data/archetype_ratings.csv", index=False)
| tingsInitializer.save_csv(new_ratings)
exit()
| conditional_block |
clean_summaries.py | """
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
"""
import json
import os
from unidecode import unidecode
import re
from tqdm import tqdm
from os.path import basename
# We clean one source at a time
sources = ['gradesaver', 'shmoop', 'cliffnotes', 'sparknotes','pinkmonkey', 'bookwolf', 'novelguide', 'thebestnotes']
for ix, source in tqdm(enumerate(sources)):
print ("Cleaning source: ", source)
source_summary_dir_base = "../cleaning_phase/"
dest_dir_base = "../finished_summaries/"
source_summary_dir = os.path.join(source_summary_dir_base, source)
dest_dir = os.path.join(dest_dir_base, source)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
"""
The script cleans the leftover unicode characters along with some analysis present in the text,
as in /export/longsum/chaptersum/cleaned_summaries/gradesaver/Alice in Wonderland
Weird - /export/longsum/chaptersum/cleaned_summaries/gradesaver/Winesburg, Ohio
This one should be able to fix all the occurences of Analysis inside the summary
Prefix cleanup we can leave for the next script?
Just cleanup prefixes with summary and summary & analysis
"""
def remove_section_prefixes_suffixes(summary, name):
pat_suffix = '(.*)(Commentary (.*))'
| summary = summary.replace(to_remove, "")
pat_prefix = '((.*?){}) (.*$)'.format(name)
if re.search(pat_prefix, summary, re.IGNORECASE):
matched_str = re.match(pat_prefix, summary, re.IGNORECASE)
print (matched_str.groups())
to_remove = matched_str.group(2) # Everything after the Commentary keyword
# summary = summary.replace(to_remove, "")
exit()
def remove_summary_analysis_prefix(line):
pat = '^((.*?)summary|analysis|summary and analysis|summary & analysis)[ ]{0,}[-:]?'
if re.search(pat, line, re.IGNORECASE):
to_replace = re.match(pat, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
return line.strip()
def remove_chapter_prefixes(line):
pat_act_scene = '(.*?)((act) ([ivxl|0-9]{1,})[,-]{0,}[ ]{1,}(scene) ([ivxl|0-9]{1,}))'
pat2 = '^((chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
of_pat2 = '^(of (chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
pat3 = '^((chapters|chapter|act) ([ivxl|0-9]{1,})[:-]{0,})(.*$)'
pat_nl = '^((chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$)'
of_pat_nl = '^((of (chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$))'
# Also removes chapter prefix
# TODO:Check why not working
#Should also remove everything before the section name
if re.search(pat_act_scene, line, re.IGNORECASE):
to_replace = re.match(pat_act_scene, line, re.IGNORECASE).group(2)
line = line.replace(to_replace,"")
if re.search(pat_nl, line, re.IGNORECASE):
to_replace = re.match(pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat_nl, line, re.IGNORECASE):
to_replace = re.match(of_pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat2, line, re.IGNORECASE):
to_replace = re.match(of_pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat2, line, re.IGNORECASE):
to_replace = re.match(pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat3, line, re.IGNORECASE):
to_replace = re.match(pat3, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
return line.strip()
book_count = 0
for item in os.listdir(source_summary_dir):
book_count += 1
item_dir = os.path.join(source_summary_dir, item)
book_dir = os.path.join(dest_dir, item)
print ("item_dir: ", item_dir)
if not os.path.exists(book_dir):
os.makedirs(book_dir)
else:
continue
for section in os.listdir(item_dir):
summary_path = os.path.join(item_dir, section)
fp = open(summary_path,"r")
try:
summary_json = json.loads(fp.readlines()[0])
except:
print (item_dir, "=Error reading json==", section)
# continue
new_json_dict = {}
new_json_dict['name'] = unidecode(summary_json['name'])
if 'url' in summary_json:
new_json_dict['url'] = unidecode(summary_json['url'])
summary_list = []
analysis_list = []
analysis_already_present = 0
if 'analysis' in summary_json and summary_json['analysis'] is not None and summary_json['analysis'].strip() != "":
# print ("Analysis already present")
analysis_already_present = 1
for line in summary_json['analysis'].split("<PARAGRAPH>"):
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
analysis_list.append(cleaned_line)
analysis_start = 0
start_adding_lines = 0
summary_content = remove_summary_analysis_prefix(summary_json['summary'])
#Filter out all the notes and the commentary before the Summary keywords
#So when we filter for Notes later, it won't conflict
for line in summary_content.split("<PARAGRAPH>"):
# if analysis keyword is present, break the lines by (.) period
# and then ignore the lines after the 'Analysis' keyword
if 'Analysis' in line or 'Commentary' in line or 'Notes' in line:
# print ("ANALYSIS/COMMENTARY/NOTES IN ABOVE LINE")
sub_lines = list(filter(None, re.split("[.'\"!?]", line)))
summary_sub_lines_to_include = []
analysis_sub_lines_to_include = []
# do not extract the analysis if there is already a separate section present for it
# 'Analysis' keyword should be at the beginning of the line for extraction
pat = "^(Analysis|Commentary|Notes)"
for sub_line in sub_lines:
sub_line = sub_line.strip()
# if the line begins with the keyword 'Analysis'
if re.match(pat, sub_line):
analysis_start = 1
# if analysis_start and not analysis_already_present:
# We may have some left over analysis from the text
if analysis_start:
analysis_sub_lines_to_include.append(sub_line)
# we don't know if we want the whole line to be included
# But if there is only one line which has the summary as well as the analysis,
# all the sub-lines after the analysis keyword is found would be added
else:
summary_sub_lines_to_include.append(sub_line)
cleaned_summ_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(summary_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip()))
if cleaned_summ_line != "":
summary_list.append(cleaned_summ_line)
cleaned_analysis_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(analysis_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip() ))
if cleaned_analysis_line != "":
analysis_list.append(cleaned_analysis_line)
# If analysis_already_present in json = 1, then we don't need to wait for the anaysis start tag to add stuff the summary
# Otherwise, we keep adding lines to the summary which do not belong to the analysis
if analysis_start and analysis_already_present:
pass
elif analysis_already_present or not analysis_start:
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
summary_list.append(" ".join(cleaned_line.split()))
# Only add to the analysis list if there
# 1. Analysis keyword was found in the beginning of sub-line, 2. We have skipped that whole line,
# and 3. The analysis wasn't already present in the json
if analysis_start and start_adding_lines and not analysis_already_present:
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
analysis_list.append(" ".join(cleaned_line.split()))
if analysis_start == 1:
start_adding_lines = 1
# We start including the lines to the analysis from the next one after the one in which we found 'Analysis' keyword
section_path = os.path.join(book_dir, section)
new_json_dict['summary'] = summary_list
new_json_dict['analysis'] = analysis_list
with open(section_path, "w") as fout:
json.dump(new_json_dict, fout)
print ("book_count: ", book_count) | if re.search(pat_suffix, summary, re.IGNORECASE):
matched_str = re.match(pat_suffix, summary, re.IGNORECASE)
to_remove = matched_str.group(2) # Everything after the Commentary keyword | random_line_split |
clean_summaries.py | """
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
"""
import json
import os
from unidecode import unidecode
import re
from tqdm import tqdm
from os.path import basename
# We clean one source at a time
sources = ['gradesaver', 'shmoop', 'cliffnotes', 'sparknotes','pinkmonkey', 'bookwolf', 'novelguide', 'thebestnotes']
for ix, source in tqdm(enumerate(sources)):
print ("Cleaning source: ", source)
source_summary_dir_base = "../cleaning_phase/"
dest_dir_base = "../finished_summaries/"
source_summary_dir = os.path.join(source_summary_dir_base, source)
dest_dir = os.path.join(dest_dir_base, source)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
"""
The script cleans the leftover unicode characters along with some analysis present in the text,
as in /export/longsum/chaptersum/cleaned_summaries/gradesaver/Alice in Wonderland
Weird - /export/longsum/chaptersum/cleaned_summaries/gradesaver/Winesburg, Ohio
This one should be able to fix all the occurences of Analysis inside the summary
Prefix cleanup we can leave for the next script?
Just cleanup prefixes with summary and summary & analysis
"""
def remove_section_prefixes_suffixes(summary, name):
pat_suffix = '(.*)(Commentary (.*))'
if re.search(pat_suffix, summary, re.IGNORECASE):
matched_str = re.match(pat_suffix, summary, re.IGNORECASE)
to_remove = matched_str.group(2) # Everything after the Commentary keyword
summary = summary.replace(to_remove, "")
pat_prefix = '((.*?){}) (.*$)'.format(name)
if re.search(pat_prefix, summary, re.IGNORECASE):
matched_str = re.match(pat_prefix, summary, re.IGNORECASE)
print (matched_str.groups())
to_remove = matched_str.group(2) # Everything after the Commentary keyword
# summary = summary.replace(to_remove, "")
exit()
def | (line):
pat = '^((.*?)summary|analysis|summary and analysis|summary & analysis)[ ]{0,}[-:]?'
if re.search(pat, line, re.IGNORECASE):
to_replace = re.match(pat, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
return line.strip()
def remove_chapter_prefixes(line):
pat_act_scene = '(.*?)((act) ([ivxl|0-9]{1,})[,-]{0,}[ ]{1,}(scene) ([ivxl|0-9]{1,}))'
pat2 = '^((chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
of_pat2 = '^(of (chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
pat3 = '^((chapters|chapter|act) ([ivxl|0-9]{1,})[:-]{0,})(.*$)'
pat_nl = '^((chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$)'
of_pat_nl = '^((of (chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$))'
# Also removes chapter prefix
# TODO:Check why not working
#Should also remove everything before the section name
if re.search(pat_act_scene, line, re.IGNORECASE):
to_replace = re.match(pat_act_scene, line, re.IGNORECASE).group(2)
line = line.replace(to_replace,"")
if re.search(pat_nl, line, re.IGNORECASE):
to_replace = re.match(pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat_nl, line, re.IGNORECASE):
to_replace = re.match(of_pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat2, line, re.IGNORECASE):
to_replace = re.match(of_pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat2, line, re.IGNORECASE):
to_replace = re.match(pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat3, line, re.IGNORECASE):
to_replace = re.match(pat3, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
return line.strip()
book_count = 0
for item in os.listdir(source_summary_dir):
book_count += 1
item_dir = os.path.join(source_summary_dir, item)
book_dir = os.path.join(dest_dir, item)
print ("item_dir: ", item_dir)
if not os.path.exists(book_dir):
os.makedirs(book_dir)
else:
continue
for section in os.listdir(item_dir):
summary_path = os.path.join(item_dir, section)
fp = open(summary_path,"r")
try:
summary_json = json.loads(fp.readlines()[0])
except:
print (item_dir, "=Error reading json==", section)
# continue
new_json_dict = {}
new_json_dict['name'] = unidecode(summary_json['name'])
if 'url' in summary_json:
new_json_dict['url'] = unidecode(summary_json['url'])
summary_list = []
analysis_list = []
analysis_already_present = 0
if 'analysis' in summary_json and summary_json['analysis'] is not None and summary_json['analysis'].strip() != "":
# print ("Analysis already present")
analysis_already_present = 1
for line in summary_json['analysis'].split("<PARAGRAPH>"):
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
analysis_list.append(cleaned_line)
analysis_start = 0
start_adding_lines = 0
summary_content = remove_summary_analysis_prefix(summary_json['summary'])
#Filter out all the notes and the commentary before the Summary keywords
#So when we filter for Notes later, it won't conflict
for line in summary_content.split("<PARAGRAPH>"):
# if analysis keyword is present, break the lines by (.) period
# and then ignore the lines after the 'Analysis' keyword
if 'Analysis' in line or 'Commentary' in line or 'Notes' in line:
# print ("ANALYSIS/COMMENTARY/NOTES IN ABOVE LINE")
sub_lines = list(filter(None, re.split("[.'\"!?]", line)))
summary_sub_lines_to_include = []
analysis_sub_lines_to_include = []
# do not extract the analysis if there is already a separate section present for it
# 'Analysis' keyword should be at the beginning of the line for extraction
pat = "^(Analysis|Commentary|Notes)"
for sub_line in sub_lines:
sub_line = sub_line.strip()
# if the line begins with the keyword 'Analysis'
if re.match(pat, sub_line):
analysis_start = 1
# if analysis_start and not analysis_already_present:
# We may have some left over analysis from the text
if analysis_start:
analysis_sub_lines_to_include.append(sub_line)
# we don't know if we want the whole line to be included
# But if there is only one line which has the summary as well as the analysis,
# all the sub-lines after the analysis keyword is found would be added
else:
summary_sub_lines_to_include.append(sub_line)
cleaned_summ_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(summary_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip()))
if cleaned_summ_line != "":
summary_list.append(cleaned_summ_line)
cleaned_analysis_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(analysis_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip() ))
if cleaned_analysis_line != "":
analysis_list.append(cleaned_analysis_line)
# If analysis_already_present in json = 1, then we don't need to wait for the anaysis start tag to add stuff the summary
# Otherwise, we keep adding lines to the summary which do not belong to the analysis
if analysis_start and analysis_already_present:
pass
elif analysis_already_present or not analysis_start:
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
summary_list.append(" ".join(cleaned_line.split()))
# Only add to the analysis list if there
# 1. Analysis keyword was found in the beginning of sub-line, 2. We have skipped that whole line,
# and 3. The analysis wasn't already present in the json
if analysis_start and start_adding_lines and not analysis_already_present:
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
analysis_list.append(" ".join(cleaned_line.split()))
if analysis_start == 1:
start_adding_lines = 1
# We start including the lines to the analysis from the next one after the one in which we found 'Analysis' keyword
section_path = os.path.join(book_dir, section)
new_json_dict['summary'] = summary_list
new_json_dict['analysis'] = analysis_list
with open(section_path, "w") as fout:
json.dump(new_json_dict, fout)
print ("book_count: ", book_count)
| remove_summary_analysis_prefix | identifier_name |
clean_summaries.py | """
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
"""
import json
import os
from unidecode import unidecode
import re
from tqdm import tqdm
from os.path import basename
# We clean one source at a time
sources = ['gradesaver', 'shmoop', 'cliffnotes', 'sparknotes','pinkmonkey', 'bookwolf', 'novelguide', 'thebestnotes']
for ix, source in tqdm(enumerate(sources)):
print ("Cleaning source: ", source)
source_summary_dir_base = "../cleaning_phase/"
dest_dir_base = "../finished_summaries/"
source_summary_dir = os.path.join(source_summary_dir_base, source)
dest_dir = os.path.join(dest_dir_base, source)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
"""
The script cleans the leftover unicode characters along with some analysis present in the text,
as in /export/longsum/chaptersum/cleaned_summaries/gradesaver/Alice in Wonderland
Weird - /export/longsum/chaptersum/cleaned_summaries/gradesaver/Winesburg, Ohio
This one should be able to fix all the occurences of Analysis inside the summary
Prefix cleanup we can leave for the next script?
Just cleanup prefixes with summary and summary & analysis
"""
def remove_section_prefixes_suffixes(summary, name):
pat_suffix = '(.*)(Commentary (.*))'
if re.search(pat_suffix, summary, re.IGNORECASE):
matched_str = re.match(pat_suffix, summary, re.IGNORECASE)
to_remove = matched_str.group(2) # Everything after the Commentary keyword
summary = summary.replace(to_remove, "")
pat_prefix = '((.*?){}) (.*$)'.format(name)
if re.search(pat_prefix, summary, re.IGNORECASE):
matched_str = re.match(pat_prefix, summary, re.IGNORECASE)
print (matched_str.groups())
to_remove = matched_str.group(2) # Everything after the Commentary keyword
# summary = summary.replace(to_remove, "")
exit()
def remove_summary_analysis_prefix(line):
pat = '^((.*?)summary|analysis|summary and analysis|summary & analysis)[ ]{0,}[-:]?'
if re.search(pat, line, re.IGNORECASE):
to_replace = re.match(pat, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
return line.strip()
def remove_chapter_prefixes(line):
pat_act_scene = '(.*?)((act) ([ivxl|0-9]{1,})[,-]{0,}[ ]{1,}(scene) ([ivxl|0-9]{1,}))'
pat2 = '^((chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
of_pat2 = '^(of (chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
pat3 = '^((chapters|chapter|act) ([ivxl|0-9]{1,})[:-]{0,})(.*$)'
pat_nl = '^((chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$)'
of_pat_nl = '^((of (chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$))'
# Also removes chapter prefix
# TODO:Check why not working
#Should also remove everything before the section name
if re.search(pat_act_scene, line, re.IGNORECASE):
to_replace = re.match(pat_act_scene, line, re.IGNORECASE).group(2)
line = line.replace(to_replace,"")
if re.search(pat_nl, line, re.IGNORECASE):
to_replace = re.match(pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat_nl, line, re.IGNORECASE):
to_replace = re.match(of_pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat2, line, re.IGNORECASE):
to_replace = re.match(of_pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat2, line, re.IGNORECASE):
to_replace = re.match(pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat3, line, re.IGNORECASE):
to_replace = re.match(pat3, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
return line.strip()
book_count = 0
for item in os.listdir(source_summary_dir):
book_count += 1
item_dir = os.path.join(source_summary_dir, item)
book_dir = os.path.join(dest_dir, item)
print ("item_dir: ", item_dir)
if not os.path.exists(book_dir):
os.makedirs(book_dir)
else:
continue
for section in os.listdir(item_dir):
summ | print ("book_count: ", book_count)
| ary_path = os.path.join(item_dir, section)
fp = open(summary_path,"r")
try:
summary_json = json.loads(fp.readlines()[0])
except:
print (item_dir, "=Error reading json==", section)
# continue
new_json_dict = {}
new_json_dict['name'] = unidecode(summary_json['name'])
if 'url' in summary_json:
new_json_dict['url'] = unidecode(summary_json['url'])
summary_list = []
analysis_list = []
analysis_already_present = 0
if 'analysis' in summary_json and summary_json['analysis'] is not None and summary_json['analysis'].strip() != "":
# print ("Analysis already present")
analysis_already_present = 1
for line in summary_json['analysis'].split("<PARAGRAPH>"):
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
analysis_list.append(cleaned_line)
analysis_start = 0
start_adding_lines = 0
summary_content = remove_summary_analysis_prefix(summary_json['summary'])
#Filter out all the notes and the commentary before the Summary keywords
#So when we filter for Notes later, it won't conflict
for line in summary_content.split("<PARAGRAPH>"):
# if analysis keyword is present, break the lines by (.) period
# and then ignore the lines after the 'Analysis' keyword
if 'Analysis' in line or 'Commentary' in line or 'Notes' in line:
# print ("ANALYSIS/COMMENTARY/NOTES IN ABOVE LINE")
sub_lines = list(filter(None, re.split("[.'\"!?]", line)))
summary_sub_lines_to_include = []
analysis_sub_lines_to_include = []
# do not extract the analysis if there is already a separate section present for it
# 'Analysis' keyword should be at the beginning of the line for extraction
pat = "^(Analysis|Commentary|Notes)"
for sub_line in sub_lines:
sub_line = sub_line.strip()
# if the line begins with the keyword 'Analysis'
if re.match(pat, sub_line):
analysis_start = 1
# if analysis_start and not analysis_already_present:
# We may have some left over analysis from the text
if analysis_start:
analysis_sub_lines_to_include.append(sub_line)
# we don't know if we want the whole line to be included
# But if there is only one line which has the summary as well as the analysis,
# all the sub-lines after the analysis keyword is found would be added
else:
summary_sub_lines_to_include.append(sub_line)
cleaned_summ_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(summary_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip()))
if cleaned_summ_line != "":
summary_list.append(cleaned_summ_line)
cleaned_analysis_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(analysis_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip() ))
if cleaned_analysis_line != "":
analysis_list.append(cleaned_analysis_line)
# If analysis_already_present in json = 1, then we don't need to wait for the anaysis start tag to add stuff the summary
# Otherwise, we keep adding lines to the summary which do not belong to the analysis
if analysis_start and analysis_already_present:
pass
elif analysis_already_present or not analysis_start:
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
summary_list.append(" ".join(cleaned_line.split()))
# Only add to the analysis list if there
# 1. Analysis keyword was found in the beginning of sub-line, 2. We have skipped that whole line,
# and 3. The analysis wasn't already present in the json
if analysis_start and start_adding_lines and not analysis_already_present:
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
analysis_list.append(" ".join(cleaned_line.split()))
if analysis_start == 1:
start_adding_lines = 1
# We start including the lines to the analysis from the next one after the one in which we found 'Analysis' keyword
section_path = os.path.join(book_dir, section)
new_json_dict['summary'] = summary_list
new_json_dict['analysis'] = analysis_list
with open(section_path, "w") as fout:
json.dump(new_json_dict, fout)
| conditional_block |
clean_summaries.py | """
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
"""
import json
import os
from unidecode import unidecode
import re
from tqdm import tqdm
from os.path import basename
# We clean one source at a time
sources = ['gradesaver', 'shmoop', 'cliffnotes', 'sparknotes','pinkmonkey', 'bookwolf', 'novelguide', 'thebestnotes']
for ix, source in tqdm(enumerate(sources)):
print ("Cleaning source: ", source)
source_summary_dir_base = "../cleaning_phase/"
dest_dir_base = "../finished_summaries/"
source_summary_dir = os.path.join(source_summary_dir_base, source)
dest_dir = os.path.join(dest_dir_base, source)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
"""
The script cleans the leftover unicode characters along with some analysis present in the text,
as in /export/longsum/chaptersum/cleaned_summaries/gradesaver/Alice in Wonderland
Weird - /export/longsum/chaptersum/cleaned_summaries/gradesaver/Winesburg, Ohio
This one should be able to fix all the occurences of Analysis inside the summary
Prefix cleanup we can leave for the next script?
Just cleanup prefixes with summary and summary & analysis
"""
def remove_section_prefixes_suffixes(summary, name):
|
def remove_summary_analysis_prefix(line):
pat = '^((.*?)summary|analysis|summary and analysis|summary & analysis)[ ]{0,}[-:]?'
if re.search(pat, line, re.IGNORECASE):
to_replace = re.match(pat, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
return line.strip()
def remove_chapter_prefixes(line):
pat_act_scene = '(.*?)((act) ([ivxl|0-9]{1,})[,-]{0,}[ ]{1,}(scene) ([ivxl|0-9]{1,}))'
pat2 = '^((chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
of_pat2 = '^(of (chapters|chapter|act) ([ivxl|0-9]{1,}[,-]{1,}[ivxl|0-9]{1,}))(.*$)'
pat3 = '^((chapters|chapter|act) ([ivxl|0-9]{1,})[:-]{0,})(.*$)'
pat_nl = '^((chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$)'
of_pat_nl = '^((of (chapter|act) (twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)([-|–]?)(eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|one|two|three|four|five|six|seven|eight|nine|ten)?)(.*$))'
# Also removes chapter prefix
# TODO:Check why not working
#Should also remove everything before the section name
if re.search(pat_act_scene, line, re.IGNORECASE):
to_replace = re.match(pat_act_scene, line, re.IGNORECASE).group(2)
line = line.replace(to_replace,"")
if re.search(pat_nl, line, re.IGNORECASE):
to_replace = re.match(pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat_nl, line, re.IGNORECASE):
to_replace = re.match(of_pat_nl, line, re.IGNORECASE).group(0)
line = line.replace(to_replace,"")
if re.search(of_pat2, line, re.IGNORECASE):
to_replace = re.match(of_pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat2, line, re.IGNORECASE):
to_replace = re.match(pat2, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
if re.search(pat3, line, re.IGNORECASE):
to_replace = re.match(pat3, line, re.IGNORECASE).group(1)
line = line.replace(to_replace,"")
return line.strip()
book_count = 0
for item in os.listdir(source_summary_dir):
book_count += 1
item_dir = os.path.join(source_summary_dir, item)
book_dir = os.path.join(dest_dir, item)
print ("item_dir: ", item_dir)
if not os.path.exists(book_dir):
os.makedirs(book_dir)
else:
continue
for section in os.listdir(item_dir):
summary_path = os.path.join(item_dir, section)
fp = open(summary_path,"r")
try:
summary_json = json.loads(fp.readlines()[0])
except:
print (item_dir, "=Error reading json==", section)
# continue
new_json_dict = {}
new_json_dict['name'] = unidecode(summary_json['name'])
if 'url' in summary_json:
new_json_dict['url'] = unidecode(summary_json['url'])
summary_list = []
analysis_list = []
analysis_already_present = 0
if 'analysis' in summary_json and summary_json['analysis'] is not None and summary_json['analysis'].strip() != "":
# print ("Analysis already present")
analysis_already_present = 1
for line in summary_json['analysis'].split("<PARAGRAPH>"):
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
analysis_list.append(cleaned_line)
analysis_start = 0
start_adding_lines = 0
summary_content = remove_summary_analysis_prefix(summary_json['summary'])
#Filter out all the notes and the commentary before the Summary keywords
#So when we filter for Notes later, it won't conflict
for line in summary_content.split("<PARAGRAPH>"):
# if analysis keyword is present, break the lines by (.) period
# and then ignore the lines after the 'Analysis' keyword
if 'Analysis' in line or 'Commentary' in line or 'Notes' in line:
# print ("ANALYSIS/COMMENTARY/NOTES IN ABOVE LINE")
sub_lines = list(filter(None, re.split("[.'\"!?]", line)))
summary_sub_lines_to_include = []
analysis_sub_lines_to_include = []
# do not extract the analysis if there is already a separate section present for it
# 'Analysis' keyword should be at the beginning of the line for extraction
pat = "^(Analysis|Commentary|Notes)"
for sub_line in sub_lines:
sub_line = sub_line.strip()
# if the line begins with the keyword 'Analysis'
if re.match(pat, sub_line):
analysis_start = 1
# if analysis_start and not analysis_already_present:
# We may have some left over analysis from the text
if analysis_start:
analysis_sub_lines_to_include.append(sub_line)
# we don't know if we want the whole line to be included
# But if there is only one line which has the summary as well as the analysis,
# all the sub-lines after the analysis keyword is found would be added
else:
summary_sub_lines_to_include.append(sub_line)
cleaned_summ_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(summary_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip()))
if cleaned_summ_line != "":
summary_list.append(cleaned_summ_line)
cleaned_analysis_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode('. '.join(analysis_sub_lines_to_include)).replace("\t"," ").replace("\n"," ").strip() ))
if cleaned_analysis_line != "":
analysis_list.append(cleaned_analysis_line)
# If analysis_already_present in json = 1, then we don't need to wait for the anaysis start tag to add stuff the summary
# Otherwise, we keep adding lines to the summary which do not belong to the analysis
if analysis_start and analysis_already_present:
pass
elif analysis_already_present or not analysis_start:
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
summary_list.append(" ".join(cleaned_line.split()))
# Only add to the analysis list if there
# 1. Analysis keyword was found in the beginning of sub-line, 2. We have skipped that whole line,
# and 3. The analysis wasn't already present in the json
if analysis_start and start_adding_lines and not analysis_already_present:
cleaned_line = remove_chapter_prefixes(remove_summary_analysis_prefix(unidecode(line.replace("\t"," ").replace("\n"," ")).strip()))
if cleaned_line != "":
analysis_list.append(" ".join(cleaned_line.split()))
if analysis_start == 1:
start_adding_lines = 1
# We start including the lines to the analysis from the next one after the one in which we found 'Analysis' keyword
section_path = os.path.join(book_dir, section)
new_json_dict['summary'] = summary_list
new_json_dict['analysis'] = analysis_list
with open(section_path, "w") as fout:
json.dump(new_json_dict, fout)
print ("book_count: ", book_count)
| pat_suffix = '(.*)(Commentary (.*))'
if re.search(pat_suffix, summary, re.IGNORECASE):
matched_str = re.match(pat_suffix, summary, re.IGNORECASE)
to_remove = matched_str.group(2) # Everything after the Commentary keyword
summary = summary.replace(to_remove, "")
pat_prefix = '((.*?){}) (.*$)'.format(name)
if re.search(pat_prefix, summary, re.IGNORECASE):
matched_str = re.match(pat_prefix, summary, re.IGNORECASE)
print (matched_str.groups())
to_remove = matched_str.group(2) # Everything after the Commentary keyword
# summary = summary.replace(to_remove, "")
exit() | identifier_body |
Util.js | /*
* Copyright 2015-2023 G-Labs. All Rights Reserved.
*
* https://zuixjs.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
*
* This file is part of
* zUIx, Javascript library for component-based development.
* https://zuixjs.org
*
* @author Generoso Martello - https://github.com/genemars
*/
'use strict';
/**
* @typedef QuerySelectors
* @method {function(): string} getAll
* @method {function(i: number): string} get
*/
/**
* Common utility functions.
* @namespace Utils
*/
const Utils = {
/**
* Returns true only if object is null || undefined
* @param {object} obj The object to test.
* @return {boolean} True if null or undefined, otherwise false.
* @memberOf Utils
*/
isNoU(obj) {
return (typeof obj === 'undefined' || obj === null);
},
/**
* Gets object property given its name
* @param {object} o The object to get property from.
* @param {string} s The property path (dotted/indexed form).
* @return {object|undefined} The property matching the given path.
* @memberOf Utils
*/
propertyFromPath(o, s) {
if (typeof s !== 'string' || o == null) {
return;
}
try {
if (typeof o[s] !== 'undefined') {
return o[s];
}
} catch (e) {
// TODO: "TypeError: Cannot create proxy with a non-object as target or handler"
console.log(e);
}
let ref = o; let path = '';
const parts = s.match(/\[(".*?"|'.*?'|(.*?))\]|".*?"|'.*?'|[0-9a-zA-Z_$]+/g);
for (let i = 0; i < parts.length; i++) {
let m = parts[i];
if (m.startsWith('[') && m.endsWith(']')) {
m = m.substring(1, m.length - 1).trim();
}
if (m.startsWith('"') && m.endsWith('"')) {
m = m.substring(1, m.length - 1);
} else if (m.startsWith('\'') && m.endsWith('\'')) {
m = m.substring(1, m.length - 1);
}
path = path + m;
let propertyReference;
try {
propertyReference = ref[m];
} catch (e) {
// TODO: proxy has been revoked
}
if (typeof propertyReference !== 'undefined') {
ref = propertyReference;
} else {
// TODO: maybe logging?
// throw new Error('Undefined property "' + path + '"');
return;
}
path = path + '->';
}
return ref;
},
/**
* Creates a copy of a given object.
* @param {object} obj The source object.
* @return {object} The object copy.
* @memberOf Utils
*/
cloneObject(obj) {
if (obj === null || typeof obj !== 'object') {
return obj;
}
// Give temp the original object's constructor
let temp = obj;
try {
temp = obj.constructor();
for (const key in obj) {
if (obj.hasOwnProperty(key)) {
temp[key] = this.cloneObject(obj[key]);
}
}
} catch (e) {
// TODO: should warn when clone is not possible
}
return temp;
},
// TODO: deprecate `hasPassiveEvents`
/**
* Returns true if browser supports passive events.
* @return {boolean} True if supported, otherwise false.
* @memberOf Utils
*/
hasPassiveEvents() {
let supportsPassive = false;
try {
const opts = Object.defineProperty({}, 'passive', {
get: function() {
supportsPassive = true;
}
});
window.addEventListener('testPassive', null, opts);
window.removeEventListener('testPassive', null, opts);
} catch (e) {}
return supportsPassive;
},
/**
* Converts the given string to `camelCase`
* @param {string} s The string to convert.
* @return {string} The converted string.
* @memberOf Utils
*/
hyphensToCamelCase(s) {
return typeof s === 'string' ? s.replace(/--/g, ':').replace(/-([a-z0-9_$-])/g, function(g) {
return '_$-'.indexOf(g[1]) > -1 || (+g[1]).toString() === g[1] ?
'_' + g[1].replace('-', '_') : g[1].toUpperCase();
}).replace(/:/g, '-') : s;
},
/**
* Converts the given string to `kebab-case`.
* @param {string} s The string to convert.
* @return {string} The converted string.
* @memberOf Utils
*/
camelCaseToHyphens(s) {
if (typeof s !== 'string') return s;
s = s.replace(/(^\w)|(\s+\w)/g, function(letter) {
return letter.toUpperCase();
}).replace(/\s/g, '');
return s.split(/(?=[A-Z])/).join('-').toLowerCase();
},
/**
* Normalizes controller code (ES5/ES6+).
* @param {string} javascriptCode The JS code to normalize.
* @return {string} Normalized JS controller code.
* @memberOf Utils
*/
normalizeControllerCode(javascriptCode) {
if (javascriptCode.indexOf('module.exports') >= 0) {
return '\'use strict\'; let module = {}; ' + javascriptCode + ';\nreturn module.exports;';
} else {
// TODO: improve code parsing
let code = javascriptCode;
const fni = javascriptCode.indexOf('function ');
const fnz = javascriptCode.indexOf('zuix.controller');
const fnc = javascriptCode.indexOf('class ');
if (fnc >= 0 && (fnc < fni || fni === -1) && (fnc < fnz || fnz === -1)) {
code = javascriptCode.substring(0, fnc) + 'return ' + javascriptCode.substring(fnc);
} else if (fni >= 0 && (fni < fnz || fnz === -1)) {
code = javascriptCode.substring(0, fni) + 'return ' + javascriptCode.substring(fni);
} else if (fnz !== -1) {
code = javascriptCode.substring(0, fnz) + 'return ' + javascriptCode.substring(fnz + 15);
}
return code;
}
},
/**
* Catches errors occurred in the specified component context.
* @param {ComponentContext} ctx The component context.
* @param {function} fn Function code to execute.
* @param {function} errorCallback Error callback.
* @memberOf Utils
*/
catchContextError(ctx, fn, errorCallback) {
try {
fn();
} catch (err) {
ctx._error = err;
if (errorCallback) errorCallback(err);
if (err && ctx.options().error) {
(ctx.options().error)
.call(ctx, err, ctx);
} else {
console.error(err);
}
}
},
/**
* @namespace dom
* @memberof Utils
*/
dom: {
/**
* Gets CSS query for matching the given value in a list of specified attributes.
* @param {string} name Comma separated list of attribute names.
* @param {string} value The value to match.
* @param {QuerySelectors} appendValue Additional CSS query to append
* @return {string} The query string.
* @memberof Utils.dom
*/
queryAttribute(name, value, appendValue) {
const fields = name.split(/[\s|,]+/g);
let selector = '';
fields.forEach(function(f, i) {
if (value != null) {
selector += '[' + CSS.escape(f) + '="' + value + '"]';
} else {
selector += '[' + CSS.escape(f) + ']';
}
if (appendValue) {
selector += appendValue.get(i);
}
if (i < fields.length - 1) selector += ',';
});
return selector;
},
/**
* Gets the first non-null value for the given comma-separated list of element attributes.
* @param {Element} element The target element.
* @param {string} name Comma separated list of attributes.
* @return {string} The attribute value.
* @memberof Utils.dom
*/
getAttribute(element, name) {
let value;
if (typeof name === 'string' && name.indexOf(',') !== -1) {
const fields = name.split(/[\s|,]+/g);
for (let i = 0; i < fields.length; i++) {
const f = fields[i];
const a = element.getAttribute(f);
if (a != null) {
value = a;
break;
}
}
} else value = element.getAttribute(name);
return value;
},
/**
* Sets the value of the given comma-separated list of attributes.
* @param {Element} element The target element.
* @param {string} name Comma separated list of attributes.
* @param {object|string} value The value to set.
* @memberof Utils.dom
*/
setAttribute(element, name, value) {
if (typeof name === 'string' && name.indexOf(',') !== -1) {
const fields = name.split(/[\s|,]+/g);
const _t = this;
fields.forEach(function(f) {
_t.setAttribute(element, f, value);
});
} else if (value === null) {
element.removeAttribute(name);
} else if (element.getAttribute(name) !== value) {
element.setAttribute(name, value);
}
},
/**
* Gets the CSS `:not` selector for the given comma-separated list of attributes with the specified value (if any).
* @param {string} name Comma separated list of attributes.
* @param {string} value The value to match.
* @return {QuerySelectors} The query selectors.
* @memberof Utils.dom
*/
cssNot(name, value) {
const fields = name.split(',');
let selector = '';
fields.forEach(function(v, i) {
if (v.startsWith('.')) {
selector += ':not(' + v + ')';
} else if (value != null) {
selector += ':not([' + v + '="' + value + '"])';
} else {
selector += ':not([' + v + '])';
}
if (i < fields.length - 1) selector += ',';
});
return (function(s) {
/** @type {QuerySelectors} */
return {
get(i) {
const selectors = s.split(',');
return (i >= selectors.length || i == null) ? selectors[0] : selectors[i];
},
// eslint-disable-next-line no-unused-vars
getAll() {
const selectors = s.split(',');
return selectors.join('');
}
};
})(selector);
},
/**
* Return the ShadowRoot node containing the given element, or false if not in a shadow DOM.
* @param {Node} node The node element.
* @return {ShadowRoot|boolean} The ShadowRoot or false.
* @memberof Utils.dom
*/
| (node) {
for (; node; node = node.parentNode) {
if (node instanceof ShadowRoot) {
return node;
}
}
return false;
}
}
};
module.exports = Utils;
| getShadowRoot | identifier_name |
Util.js | /*
* Copyright 2015-2023 G-Labs. All Rights Reserved.
*
* https://zuixjs.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
*
* This file is part of
* zUIx, Javascript library for component-based development.
* https://zuixjs.org
*
* @author Generoso Martello - https://github.com/genemars
*/
'use strict';
/**
* @typedef QuerySelectors
* @method {function(): string} getAll
* @method {function(i: number): string} get
*/
/**
* Common utility functions.
* @namespace Utils
*/
const Utils = {
/**
* Returns true only if object is null || undefined
* @param {object} obj The object to test.
* @return {boolean} True if null or undefined, otherwise false.
* @memberOf Utils
*/
isNoU(obj) {
return (typeof obj === 'undefined' || obj === null);
},
/**
* Gets object property given its name
* @param {object} o The object to get property from.
* @param {string} s The property path (dotted/indexed form).
* @return {object|undefined} The property matching the given path.
* @memberOf Utils
*/
propertyFromPath(o, s) {
if (typeof s !== 'string' || o == null) {
return;
}
try {
if (typeof o[s] !== 'undefined') {
return o[s];
}
} catch (e) {
// TODO: "TypeError: Cannot create proxy with a non-object as target or handler"
console.log(e);
}
let ref = o; let path = '';
const parts = s.match(/\[(".*?"|'.*?'|(.*?))\]|".*?"|'.*?'|[0-9a-zA-Z_$]+/g);
for (let i = 0; i < parts.length; i++) {
let m = parts[i];
if (m.startsWith('[') && m.endsWith(']')) {
m = m.substring(1, m.length - 1).trim();
}
if (m.startsWith('"') && m.endsWith('"')) {
m = m.substring(1, m.length - 1);
} else if (m.startsWith('\'') && m.endsWith('\'')) {
m = m.substring(1, m.length - 1);
}
path = path + m;
let propertyReference;
try {
propertyReference = ref[m];
} catch (e) {
// TODO: proxy has been revoked
}
if (typeof propertyReference !== 'undefined') {
ref = propertyReference;
} else {
// TODO: maybe logging?
// throw new Error('Undefined property "' + path + '"');
return;
}
path = path + '->';
}
return ref;
},
/**
* Creates a copy of a given object.
* @param {object} obj The source object.
* @return {object} The object copy.
* @memberOf Utils
*/
cloneObject(obj) {
if (obj === null || typeof obj !== 'object') {
return obj;
}
// Give temp the original object's constructor
let temp = obj;
try {
temp = obj.constructor();
for (const key in obj) {
if (obj.hasOwnProperty(key)) {
temp[key] = this.cloneObject(obj[key]);
}
}
} catch (e) {
// TODO: should warn when clone is not possible
}
return temp;
},
// TODO: deprecate `hasPassiveEvents`
/**
* Returns true if browser supports passive events.
* @return {boolean} True if supported, otherwise false.
* @memberOf Utils
*/
hasPassiveEvents() {
let supportsPassive = false;
try {
const opts = Object.defineProperty({}, 'passive', {
get: function() {
supportsPassive = true;
}
});
window.addEventListener('testPassive', null, opts);
window.removeEventListener('testPassive', null, opts);
} catch (e) {}
return supportsPassive;
},
/**
* Converts the given string to `camelCase`
* @param {string} s The string to convert.
* @return {string} The converted string.
* @memberOf Utils
*/
hyphensToCamelCase(s) {
return typeof s === 'string' ? s.replace(/--/g, ':').replace(/-([a-z0-9_$-])/g, function(g) {
return '_$-'.indexOf(g[1]) > -1 || (+g[1]).toString() === g[1] ?
'_' + g[1].replace('-', '_') : g[1].toUpperCase();
}).replace(/:/g, '-') : s;
},
/**
* Converts the given string to `kebab-case`.
* @param {string} s The string to convert.
* @return {string} The converted string.
* @memberOf Utils
*/
camelCaseToHyphens(s) {
if (typeof s !== 'string') return s;
s = s.replace(/(^\w)|(\s+\w)/g, function(letter) {
return letter.toUpperCase();
}).replace(/\s/g, '');
return s.split(/(?=[A-Z])/).join('-').toLowerCase();
},
/**
* Normalizes controller code (ES5/ES6+).
* @param {string} javascriptCode The JS code to normalize.
* @return {string} Normalized JS controller code.
* @memberOf Utils
*/
normalizeControllerCode(javascriptCode) {
if (javascriptCode.indexOf('module.exports') >= 0) {
return '\'use strict\'; let module = {}; ' + javascriptCode + ';\nreturn module.exports;';
} else {
// TODO: improve code parsing
let code = javascriptCode;
const fni = javascriptCode.indexOf('function ');
const fnz = javascriptCode.indexOf('zuix.controller');
const fnc = javascriptCode.indexOf('class ');
if (fnc >= 0 && (fnc < fni || fni === -1) && (fnc < fnz || fnz === -1)) {
code = javascriptCode.substring(0, fnc) + 'return ' + javascriptCode.substring(fnc);
} else if (fni >= 0 && (fni < fnz || fnz === -1)) {
code = javascriptCode.substring(0, fni) + 'return ' + javascriptCode.substring(fni);
} else if (fnz !== -1) {
code = javascriptCode.substring(0, fnz) + 'return ' + javascriptCode.substring(fnz + 15);
}
return code;
}
},
/**
* Catches errors occurred in the specified component context.
* @param {ComponentContext} ctx The component context.
* @param {function} fn Function code to execute.
* @param {function} errorCallback Error callback.
* @memberOf Utils
*/
catchContextError(ctx, fn, errorCallback) | ,
/**
* @namespace dom
* @memberof Utils
*/
dom: {
/**
* Gets CSS query for matching the given value in a list of specified attributes.
* @param {string} name Comma separated list of attribute names.
* @param {string} value The value to match.
* @param {QuerySelectors} appendValue Additional CSS query to append
* @return {string} The query string.
* @memberof Utils.dom
*/
queryAttribute(name, value, appendValue) {
const fields = name.split(/[\s|,]+/g);
let selector = '';
fields.forEach(function(f, i) {
if (value != null) {
selector += '[' + CSS.escape(f) + '="' + value + '"]';
} else {
selector += '[' + CSS.escape(f) + ']';
}
if (appendValue) {
selector += appendValue.get(i);
}
if (i < fields.length - 1) selector += ',';
});
return selector;
},
/**
* Gets the first non-null value for the given comma-separated list of element attributes.
* @param {Element} element The target element.
* @param {string} name Comma separated list of attributes.
* @return {string} The attribute value.
* @memberof Utils.dom
*/
getAttribute(element, name) {
let value;
if (typeof name === 'string' && name.indexOf(',') !== -1) {
const fields = name.split(/[\s|,]+/g);
for (let i = 0; i < fields.length; i++) {
const f = fields[i];
const a = element.getAttribute(f);
if (a != null) {
value = a;
break;
}
}
} else value = element.getAttribute(name);
return value;
},
/**
* Sets the value of the given comma-separated list of attributes.
* @param {Element} element The target element.
* @param {string} name Comma separated list of attributes.
* @param {object|string} value The value to set.
* @memberof Utils.dom
*/
setAttribute(element, name, value) {
if (typeof name === 'string' && name.indexOf(',') !== -1) {
const fields = name.split(/[\s|,]+/g);
const _t = this;
fields.forEach(function(f) {
_t.setAttribute(element, f, value);
});
} else if (value === null) {
element.removeAttribute(name);
} else if (element.getAttribute(name) !== value) {
element.setAttribute(name, value);
}
},
/**
* Gets the CSS `:not` selector for the given comma-separated list of attributes with the specified value (if any).
* @param {string} name Comma separated list of attributes.
* @param {string} value The value to match.
* @return {QuerySelectors} The query selectors.
* @memberof Utils.dom
*/
cssNot(name, value) {
const fields = name.split(',');
let selector = '';
fields.forEach(function(v, i) {
if (v.startsWith('.')) {
selector += ':not(' + v + ')';
} else if (value != null) {
selector += ':not([' + v + '="' + value + '"])';
} else {
selector += ':not([' + v + '])';
}
if (i < fields.length - 1) selector += ',';
});
return (function(s) {
/** @type {QuerySelectors} */
return {
get(i) {
const selectors = s.split(',');
return (i >= selectors.length || i == null) ? selectors[0] : selectors[i];
},
// eslint-disable-next-line no-unused-vars
getAll() {
const selectors = s.split(',');
return selectors.join('');
}
};
})(selector);
},
/**
* Return the ShadowRoot node containing the given element, or false if not in a shadow DOM.
* @param {Node} node The node element.
* @return {ShadowRoot|boolean} The ShadowRoot or false.
* @memberof Utils.dom
*/
getShadowRoot(node) {
for (; node; node = node.parentNode) {
if (node instanceof ShadowRoot) {
return node;
}
}
return false;
}
}
};
module.exports = Utils;
| {
try {
fn();
} catch (err) {
ctx._error = err;
if (errorCallback) errorCallback(err);
if (err && ctx.options().error) {
(ctx.options().error)
.call(ctx, err, ctx);
} else {
console.error(err);
}
}
} | identifier_body |
Util.js | /*
* Copyright 2015-2023 G-Labs. All Rights Reserved.
*
* https://zuixjs.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
*
* This file is part of
* zUIx, Javascript library for component-based development.
* https://zuixjs.org
*
* @author Generoso Martello - https://github.com/genemars
*/
'use strict';
/**
* @typedef QuerySelectors
* @method {function(): string} getAll
* @method {function(i: number): string} get
*/
/**
* Common utility functions.
* @namespace Utils
*/
const Utils = {
/**
* Returns true only if object is null || undefined
* @param {object} obj The object to test.
* @return {boolean} True if null or undefined, otherwise false.
* @memberOf Utils
*/
isNoU(obj) {
return (typeof obj === 'undefined' || obj === null);
},
/**
* Gets object property given its name
* @param {object} o The object to get property from.
* @param {string} s The property path (dotted/indexed form).
* @return {object|undefined} The property matching the given path.
* @memberOf Utils
*/
propertyFromPath(o, s) {
if (typeof s !== 'string' || o == null) {
return;
}
try {
if (typeof o[s] !== 'undefined') {
return o[s];
}
} catch (e) {
// TODO: "TypeError: Cannot create proxy with a non-object as target or handler"
console.log(e);
}
let ref = o; let path = '';
const parts = s.match(/\[(".*?"|'.*?'|(.*?))\]|".*?"|'.*?'|[0-9a-zA-Z_$]+/g);
for (let i = 0; i < parts.length; i++) {
let m = parts[i];
if (m.startsWith('[') && m.endsWith(']')) {
m = m.substring(1, m.length - 1).trim();
}
if (m.startsWith('"') && m.endsWith('"')) {
m = m.substring(1, m.length - 1);
} else if (m.startsWith('\'') && m.endsWith('\'')) {
m = m.substring(1, m.length - 1);
}
path = path + m;
let propertyReference;
try {
propertyReference = ref[m];
} catch (e) {
// TODO: proxy has been revoked
}
if (typeof propertyReference !== 'undefined') {
ref = propertyReference;
} else {
// TODO: maybe logging?
// throw new Error('Undefined property "' + path + '"');
return;
}
path = path + '->';
}
return ref;
},
/**
* Creates a copy of a given object.
* @param {object} obj The source object.
* @return {object} The object copy.
* @memberOf Utils
*/
cloneObject(obj) {
if (obj === null || typeof obj !== 'object') {
return obj;
}
// Give temp the original object's constructor
let temp = obj;
try {
temp = obj.constructor();
for (const key in obj) {
if (obj.hasOwnProperty(key)) {
temp[key] = this.cloneObject(obj[key]);
}
}
} catch (e) { | // TODO: should warn when clone is not possible
}
return temp;
},
// TODO: deprecate `hasPassiveEvents`
/**
* Returns true if browser supports passive events.
* @return {boolean} True if supported, otherwise false.
* @memberOf Utils
*/
hasPassiveEvents() {
let supportsPassive = false;
try {
const opts = Object.defineProperty({}, 'passive', {
get: function() {
supportsPassive = true;
}
});
window.addEventListener('testPassive', null, opts);
window.removeEventListener('testPassive', null, opts);
} catch (e) {}
return supportsPassive;
},
/**
* Converts the given string to `camelCase`
* @param {string} s The string to convert.
* @return {string} The converted string.
* @memberOf Utils
*/
hyphensToCamelCase(s) {
return typeof s === 'string' ? s.replace(/--/g, ':').replace(/-([a-z0-9_$-])/g, function(g) {
return '_$-'.indexOf(g[1]) > -1 || (+g[1]).toString() === g[1] ?
'_' + g[1].replace('-', '_') : g[1].toUpperCase();
}).replace(/:/g, '-') : s;
},
/**
* Converts the given string to `kebab-case`.
* @param {string} s The string to convert.
* @return {string} The converted string.
* @memberOf Utils
*/
camelCaseToHyphens(s) {
if (typeof s !== 'string') return s;
s = s.replace(/(^\w)|(\s+\w)/g, function(letter) {
return letter.toUpperCase();
}).replace(/\s/g, '');
return s.split(/(?=[A-Z])/).join('-').toLowerCase();
},
/**
* Normalizes controller code (ES5/ES6+).
* @param {string} javascriptCode The JS code to normalize.
* @return {string} Normalized JS controller code.
* @memberOf Utils
*/
normalizeControllerCode(javascriptCode) {
if (javascriptCode.indexOf('module.exports') >= 0) {
return '\'use strict\'; let module = {}; ' + javascriptCode + ';\nreturn module.exports;';
} else {
// TODO: improve code parsing
let code = javascriptCode;
const fni = javascriptCode.indexOf('function ');
const fnz = javascriptCode.indexOf('zuix.controller');
const fnc = javascriptCode.indexOf('class ');
if (fnc >= 0 && (fnc < fni || fni === -1) && (fnc < fnz || fnz === -1)) {
code = javascriptCode.substring(0, fnc) + 'return ' + javascriptCode.substring(fnc);
} else if (fni >= 0 && (fni < fnz || fnz === -1)) {
code = javascriptCode.substring(0, fni) + 'return ' + javascriptCode.substring(fni);
} else if (fnz !== -1) {
code = javascriptCode.substring(0, fnz) + 'return ' + javascriptCode.substring(fnz + 15);
}
return code;
}
},
/**
* Catches errors occurred in the specified component context.
* @param {ComponentContext} ctx The component context.
* @param {function} fn Function code to execute.
* @param {function} errorCallback Error callback.
* @memberOf Utils
*/
catchContextError(ctx, fn, errorCallback) {
try {
fn();
} catch (err) {
ctx._error = err;
if (errorCallback) errorCallback(err);
if (err && ctx.options().error) {
(ctx.options().error)
.call(ctx, err, ctx);
} else {
console.error(err);
}
}
},
/**
* @namespace dom
* @memberof Utils
*/
dom: {
/**
* Gets CSS query for matching the given value in a list of specified attributes.
* @param {string} name Comma separated list of attribute names.
* @param {string} value The value to match.
* @param {QuerySelectors} appendValue Additional CSS query to append
* @return {string} The query string.
* @memberof Utils.dom
*/
queryAttribute(name, value, appendValue) {
const fields = name.split(/[\s|,]+/g);
let selector = '';
fields.forEach(function(f, i) {
if (value != null) {
selector += '[' + CSS.escape(f) + '="' + value + '"]';
} else {
selector += '[' + CSS.escape(f) + ']';
}
if (appendValue) {
selector += appendValue.get(i);
}
if (i < fields.length - 1) selector += ',';
});
return selector;
},
/**
* Gets the first non-null value for the given comma-separated list of element attributes.
* @param {Element} element The target element.
* @param {string} name Comma separated list of attributes.
* @return {string} The attribute value.
* @memberof Utils.dom
*/
getAttribute(element, name) {
let value;
if (typeof name === 'string' && name.indexOf(',') !== -1) {
const fields = name.split(/[\s|,]+/g);
for (let i = 0; i < fields.length; i++) {
const f = fields[i];
const a = element.getAttribute(f);
if (a != null) {
value = a;
break;
}
}
} else value = element.getAttribute(name);
return value;
},
/**
* Sets the value of the given comma-separated list of attributes.
* @param {Element} element The target element.
* @param {string} name Comma separated list of attributes.
* @param {object|string} value The value to set.
* @memberof Utils.dom
*/
setAttribute(element, name, value) {
if (typeof name === 'string' && name.indexOf(',') !== -1) {
const fields = name.split(/[\s|,]+/g);
const _t = this;
fields.forEach(function(f) {
_t.setAttribute(element, f, value);
});
} else if (value === null) {
element.removeAttribute(name);
} else if (element.getAttribute(name) !== value) {
element.setAttribute(name, value);
}
},
/**
* Gets the CSS `:not` selector for the given comma-separated list of attributes with the specified value (if any).
* @param {string} name Comma separated list of attributes.
* @param {string} value The value to match.
* @return {QuerySelectors} The query selectors.
* @memberof Utils.dom
*/
cssNot(name, value) {
const fields = name.split(',');
let selector = '';
fields.forEach(function(v, i) {
if (v.startsWith('.')) {
selector += ':not(' + v + ')';
} else if (value != null) {
selector += ':not([' + v + '="' + value + '"])';
} else {
selector += ':not([' + v + '])';
}
if (i < fields.length - 1) selector += ',';
});
return (function(s) {
/** @type {QuerySelectors} */
return {
get(i) {
const selectors = s.split(',');
return (i >= selectors.length || i == null) ? selectors[0] : selectors[i];
},
// eslint-disable-next-line no-unused-vars
getAll() {
const selectors = s.split(',');
return selectors.join('');
}
};
})(selector);
},
/**
* Return the ShadowRoot node containing the given element, or false if not in a shadow DOM.
* @param {Node} node The node element.
* @return {ShadowRoot|boolean} The ShadowRoot or false.
* @memberof Utils.dom
*/
getShadowRoot(node) {
for (; node; node = node.parentNode) {
if (node instanceof ShadowRoot) {
return node;
}
}
return false;
}
}
};
module.exports = Utils; | random_line_split |
|
lib.rs | //! Example crate demonstrating how to use nom to parse `/proc/mounts`. Browse crates.io for sys-mount, proc-mounts, and libmount for more stable, usable crates.
// Needed to use traits associated with std::io::BufReader.
use std::io::BufRead;
use std::io::Read;
/// Type-erased errors.
pub type BoxError = std::boxed::Box<dyn
std::error::Error // must implement Error to satisfy ?
+ std::marker::Send // needed for threads
+ std::marker::Sync // needed for threads
>;
/// Describes a mounted filesystem, see `man 8 mount` for more details.
#[derive(Clone, Default, Debug)]
pub struct Mount {
/// The device from which the filesystem is mounted, e.g. /dev/sda1
pub device: std::string::String,
/// Where in the root filesystem the device is mounted, e.g. /mnt/disk
pub mount_point: std::string::String,
/// The filesystem type, e.g. ext4
pub file_system_type: std::string::String,
/// A vector of mount options, e.g. ["ro", "nosuid"]
/// Note: This could also be implemented as a set (e.g. std::collections::HashSet)
pub options: std::vec::Vec<std::string::String>,
}
/// Implements `Display` for `Mount` to simulate behavior of Unix mount command.
///
/// # Examples
/// ```
/// # use nom_tutorial::Mount;
/// # use std::string::String;
/// let mount = Mount {
/// device: String::from("/dev/sda1"),
/// mount_point: String::from("/mnt/disk"),
/// file_system_type: String::from("ext4"),
/// options: vec![String::from("ro"), String::from("nosuid")]
/// };
/// assert!(mount.to_string() == "/dev/sda1 on /mnt/disk type ext4 (ro,nosuid)");
/// ```
impl std::fmt::Display for Mount {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{} on {} type {} ({})", self.device, self.mount_point, self.file_system_type, self.options.join(","))
}
}
/// Structure that accesses `/proc/mounts` and iterates over the contained mounts.
///
/// You can generate an instance by calling [Mounts::new()] or the convenience method [mounts()]. Instantiation may fail if `/proc/mounts` does not exist or you do not have access to read it. You can access each individual mount through an iterator with [Mounts::into_iter()](std::iter::IntoIterator::into_iter) for a consuming iterator or [Mounts::iter_mut()] for a mutable iterator. Note that there is no immutable borrowed iterator `Mounts::iter()`. An instance of `Mounts` really isn't useful for anything except iterating over the contained mounts.
/// # Examples
///
/// ```
/// # use nom_tutorial;
/// for mount in nom_tutorial::mounts().unwrap() {
/// println!("{}", mount.unwrap());
/// }
pub struct Mounts {
buf_reader: std::io::BufReader<std::fs::File>
}
impl Mounts {
/// Returns a new Mounts instance. You can also call [mounts()] for convenience.
pub fn new() -> std::result::Result<Mounts, std::io::Error> {
let file = std::fs::File::open("/proc/mounts")?;
Ok( Mounts { buf_reader: std::io::BufReader::new(file) } )
}
}
impl IntoIterator for Mounts {
type Item = std::result::Result<Mount, BoxError>;
type IntoIter = MountsIntoIterator;
/// Consuming iterator, used similarly to mutable iterator. See [Mounts::iter_mut()] for example.
fn into_iter(self) -> Self::IntoIter {
MountsIntoIterator { lines: self.buf_reader.lines() }
}
}
impl<'a> IntoIterator for &'a mut Mounts {
type Item = std::result::Result<Mount, BoxError>;
type IntoIter = MountsIteratorMut<'a>;
/// Mutable iterator, see [Mounts::iter_mut()].
fn into_iter(self) -> Self::IntoIter {
MountsIteratorMut { lines: self.buf_reader.by_ref().lines() }
}
}
/// Consuming iterator for [Mounts].
pub struct MountsIntoIterator {
lines: std::io::Lines<std::io::BufReader<std::fs::File>>
}
impl std::iter::Iterator for MountsIntoIterator {
type Item = std::result::Result<Mount, BoxError>;
/// Returns the next line in `/proc/mounts` as a [Mount]. If there is a problem reading or parsing `/proc/mounts` returns an error. In the case of a parsing error we use [nom::Err::to_owned()] to allow the returned error to outlive `line`. See [Mounts::iter_mut()] for an analagous example using a mutable iterator.
fn next(&mut self) -> std::option::Option<Self::Item> {
match self.lines.next() {
Some(line) => match line {
Ok(line) => match parsers::parse_line(&line[..]) {
Ok( (_, m) ) => Some(Ok(m)),
Err(e) => Some(Err(e.to_owned().into()))
},
Err(e) => Some(Err(e.into()))
},
None => None
}
}
}
/// Mutable iterator for `Mounts`.
pub struct MountsIteratorMut<'a> {
lines: std::io::Lines<&'a mut std::io::BufReader<std::fs::File>>
}
impl<'a> std::iter::Iterator for MountsIteratorMut<'a> {
type Item = std::result::Result<Mount, BoxError>;
// Returns the next line in `/proc/mounts` as a [Mount]. See [Mounts::iter_mut()] for an example.
fn next(&mut self) -> std::option::Option<Self::Item> {
match self.lines.next() {
Some(line) => match line {
Ok(line) => match parsers::parse_line(&line[..]) {
Ok( (_, m) ) => Some(Ok(m)),
Err(e) => Some(Err(e.to_owned().into()))
},
Err(e) => Some(Err(e.into()))
},
None => None
}
}
}
impl<'a> Mounts {
// There is no non-mutable iterator.
/// Mutable iterator.
/// # Examples
/// ```
/// # use nom_tutorial;
/// let mut iter = nom_tutorial::mounts().expect("Couldn't access /proc/mounts.").into_iter();
/// match iter.next() {
/// Some(m) => match m {
/// Ok(m) => eprintln!("Here is a mounted filesystem: {}", m),
/// Err(e) => eprintln!("There was an error parsing the next line in /proc/mounts: {}", e)
/// },
/// None => eprintln!("There are no mounted filesystems.")
/// }
/// ```
pub fn | (&'a mut self) -> MountsIteratorMut<'a> {
self.into_iter()
}
}
// Encapsulate individual nom parsers in a private submodule. The `pub(self)` keyword allows the inner method [parsers::parse_line()] to be called by code within this module, but not my users of our crate.
pub(self) mod parsers {
use super::Mount;
// Extract a string that does not contain whitespace (space or tab). Anything else goes.
fn not_whitespace(i: &str) -> nom::IResult<&str, &str> {
nom::bytes::complete::is_not(" \t")(i)
}
// Replace the sequence 040 with a space.
fn escaped_space(i: &str) -> nom::IResult<&str, &str> {
nom::combinator::value(" ", nom::bytes::complete::tag("040"))(i)
}
// Replace the escaped sequence \ with a \. The inner parser `nom::character::complete::char` returns a `char` instead of a `&str`, so we wrap it in a `nom::combinator::recognize`, which returns that `char` as an `&str` if the inner parser succeeds, and returns an error if the inner parser fails.
fn escaped_backslash(i: &str) -> nom::IResult<&str, &str> {
nom::combinator::recognize(nom::character::complete::char('\\'))(i)
}
// Replace all instances of \040 in a string with a space.
// Replace \\ with a \.
fn transform_escaped(i: &str) -> nom::IResult<&str, std::string::String> {
nom::bytes::complete::escaped_transform(nom::bytes::complete::is_not("\\"), '\\', nom::branch::alt((escaped_backslash, escaped_space)))(i)
}
// Parse the options of a mount into a comma separated vector of strings. The options string is terminated by a whitespace.
// Here we use `nom::combinator::map_parser` to extract the full whitespace-terminated options string and then pass it in to `transform_escaped` to process escaped characters. Then the transformed string is split into a comma-delimited vector of strings by `nom::multi::separated_list`.
fn mount_opts(i: &str) -> nom::IResult<&str, std::vec::Vec<std::string::String>> {
nom::multi::separated_list(nom::character::complete::char(','), nom::combinator::map_parser(nom::bytes::complete::is_not(", \t"),transform_escaped))(i)
}
// Parse a line from `/proc/mounts` into a Mount struct. This is perhaps the most complex looking parser, but it is actually one of the simplest because we build upon each of the parsers defined above. Let's break it down parser by parser:
// # `nom::combinator::all_consuming` generates an error if there is any leftover input. This will force nom to generate an error if there is unexpected input at the end of a line in `/proc/mounts`, for example:
// ```ignore
// /dev/sda1 /mnt/disk ext4 defaults 0 0 this_last_part_shouldn't_be_here
// ```
//
// `nom::sequence::tuple` generates a `Result<Ok(remaining_input: &str, output_tuple), Error>`. Although it looks complicated, we can very easily destructure that tuple. Each sub/inner parser we pass to `nom::sequence::tuple` generates its own element within the tuple. We can ignore the whitespace by matching it with `_` and destructure the other elements of the tuple as the variabels we are insterested such as `device`, `mount_point`, etc. If everything goes as planned we return a new instance of the mount `Mount` structure populated with the variables we destructured from the tuple.
// ```ignore
// let (device, _, mount_point /*, ...*/) = /* tuple returned by all_consuming(tuple()) --> */ ("/dev/sda1", " ", "/mnt/disk" /*, ...*/);
// let mount = Mount { device: device.to_string(), mount_point: mount_point.to_string() /*, ...*/ };
// ```
pub fn parse_line(i: &str) -> nom::IResult<&str, Mount> {
match nom::combinator::all_consuming(nom::sequence::tuple((
nom::combinator::map_parser(not_whitespace, transform_escaped), // device
nom::character::complete::space1,
nom::combinator::map_parser(not_whitespace, transform_escaped), // mount_point
nom::character::complete::space1,
not_whitespace, // file_system_type
nom::character::complete::space1,
mount_opts, // options
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space0,
)))(i) {
Ok((remaining_input, (
device,
_, // whitespace
mount_point,
_, // whitespace
file_system_type,
_, // whitespace
options,
_, // whitespace
_, // 0
_, // whitespace
_, // 0
_, // optional whitespace
))) => {
Ok((remaining_input, Mount {
device: device,
mount_point: mount_point,
file_system_type: file_system_type.to_string(),
options: options
}))
}
Err(e) => Err(e)
}
}
// Alternative version of `parse_line()` above that performs the same
// function using a different style. Rather than parsing the entire line at
// once with one big `nom::sequence::tuple` we break the parsing up into
// multiple separate statements. Each statement runs a parser that returns
// an `Ok(remaining_input, value)`. At the end of each statement we have
// the `?` operator, which unwraps the result and returns early with an
// error if parsing failed. The remaining input from each parser is used as
// the input of each subsequent parser. Values are assigned to temporary
// variables that are used to construct a `Mount` object at the end of the
// function. Values that are not needed are discarded by assigning to `_`.
#[allow(unused)]
pub fn parse_line_alternate(i: &str) -> nom::IResult<&str, Mount> {
let (i, device) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // device
let (i, _) = nom::character::complete::space1(i)?;
let (i, mount_point) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // mount_point
let (i, _) = nom::character::complete::space1(i)?;
let (i, file_system_type) = not_whitespace(i)?; // file_system_type
let (i, _) = nom::character::complete::space1(i)?;
let (i, options) = mount_opts(i)?; // options
let (i, _) = nom::combinator::all_consuming(nom::sequence::tuple((
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space0
)))(i)?;
Ok((i, Mount {
device: device,
mount_point: mount_point,
file_system_type: file_system_type.to_string(),
options:options
}))
}
#[cfg(test)]
mod tests {
use super::*;
// Extracts a string that does not contain whitespace, i.e. comma or tab.
#[test]
fn test_not_whitespace() {
assert_eq!(not_whitespace("abcd efg"), Ok((" efg", "abcd")));
assert_eq!(not_whitespace("abcd\tefg"), Ok(("\tefg", "abcd")));
assert_eq!(not_whitespace(" abcdefg"), Err(nom::Err::Error((" abcdefg", nom::error::ErrorKind::IsNot))));
}
// Converts 040 to a space. Does not actually recognize a literal space.
#[test]
fn test_escaped_space() {
assert_eq!(escaped_space("040"), Ok(("", " ")));
assert_eq!(escaped_space(" "), Err(nom::Err::Error((" ", nom::error::ErrorKind::Tag))));
}
// Converts `char` \ to `&str` \.
#[test]
fn test_escaped_backslash() {
assert_eq!(escaped_backslash("\\"), Ok(("", "\\")));
assert_eq!(escaped_backslash("not a backslash"), Err(nom::Err::Error(("not a backslash", nom::error::ErrorKind::Char))));
}
// Recognizes each escape sequence and transfoms it to the escaped literal.
// For example, each \040 is transformed into a space.
#[test]
fn test_transform_escaped() {
assert_eq!(transform_escaped("abc\\040def\\\\g\\040h"), Ok(("", std::string::String::from("abc def\\g h"))));
assert_eq!(transform_escaped("\\bad"), Err(nom::Err::Error(("bad", nom::error::ErrorKind::Tag))));
}
// Parses a comma separated list of mount options, which might contain spaces.
#[test]
fn test_mount_opts() {
assert_eq!(mount_opts("a,bc,d\\040e"), Ok(("", vec!["a".to_string(), "bc".to_string(), "d e".to_string()])));
}
// Parses a line from /proc/mounts
#[test]
fn test_parse_line() {
let mount1 = Mount{
device: "device".to_string(),
mount_point: "mount_point".to_string(),
file_system_type: "file_system_type".to_string(),
options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()]
};
let (_, mount2) = parse_line("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap();
assert_eq!(mount1.device, mount2.device);
assert_eq!(mount1.mount_point, mount2.mount_point);
assert_eq!(mount1.file_system_type, mount2.file_system_type);
assert_eq!(mount1.options, mount2.options);
}
// Parses a line from /proc/mounts
#[test]
fn test_parse_line_alternate() {
let mount1 = Mount{
device: "device".to_string(),
mount_point: "mount_point".to_string(),
file_system_type: "file_system_type".to_string(),
options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()]
};
let (_, mount2) = parse_line_alternate("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap();
assert_eq!(mount1.device, mount2.device);
assert_eq!(mount1.mount_point, mount2.mount_point);
assert_eq!(mount1.file_system_type, mount2.file_system_type);
assert_eq!(mount1.options, mount2.options);
}
}
}
/// Convenience method equivalent to `Mounts::new()`.
pub fn mounts() -> std::result::Result<Mounts, std::io::Error> {
Mounts::new()
}
| iter_mut | identifier_name |
lib.rs | //! Example crate demonstrating how to use nom to parse `/proc/mounts`. Browse crates.io for sys-mount, proc-mounts, and libmount for more stable, usable crates.
// Needed to use traits associated with std::io::BufReader.
use std::io::BufRead;
use std::io::Read;
/// Type-erased errors.
pub type BoxError = std::boxed::Box<dyn
std::error::Error // must implement Error to satisfy ?
+ std::marker::Send // needed for threads
+ std::marker::Sync // needed for threads
>;
/// Describes a mounted filesystem, see `man 8 mount` for more details.
#[derive(Clone, Default, Debug)]
pub struct Mount {
/// The device from which the filesystem is mounted, e.g. /dev/sda1
pub device: std::string::String,
/// Where in the root filesystem the device is mounted, e.g. /mnt/disk
pub mount_point: std::string::String,
/// The filesystem type, e.g. ext4
pub file_system_type: std::string::String,
/// A vector of mount options, e.g. ["ro", "nosuid"]
/// Note: This could also be implemented as a set (e.g. std::collections::HashSet)
pub options: std::vec::Vec<std::string::String>,
}
/// Implements `Display` for `Mount` to simulate behavior of Unix mount command.
///
/// # Examples
/// ```
/// # use nom_tutorial::Mount;
/// # use std::string::String;
/// let mount = Mount {
/// device: String::from("/dev/sda1"),
/// mount_point: String::from("/mnt/disk"),
/// file_system_type: String::from("ext4"),
/// options: vec![String::from("ro"), String::from("nosuid")]
/// };
/// assert!(mount.to_string() == "/dev/sda1 on /mnt/disk type ext4 (ro,nosuid)");
/// ```
impl std::fmt::Display for Mount {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{} on {} type {} ({})", self.device, self.mount_point, self.file_system_type, self.options.join(","))
}
}
/// Structure that accesses `/proc/mounts` and iterates over the contained mounts.
///
/// You can generate an instance by calling [Mounts::new()] or the convenience method [mounts()]. Instantiation may fail if `/proc/mounts` does not exist or you do not have access to read it. You can access each individual mount through an iterator with [Mounts::into_iter()](std::iter::IntoIterator::into_iter) for a consuming iterator or [Mounts::iter_mut()] for a mutable iterator. Note that there is no immutable borrowed iterator `Mounts::iter()`. An instance of `Mounts` really isn't useful for anything except iterating over the contained mounts.
/// # Examples
///
/// ```
/// # use nom_tutorial;
/// for mount in nom_tutorial::mounts().unwrap() {
/// println!("{}", mount.unwrap());
/// }
pub struct Mounts {
buf_reader: std::io::BufReader<std::fs::File>
}
impl Mounts {
/// Returns a new Mounts instance. You can also call [mounts()] for convenience.
pub fn new() -> std::result::Result<Mounts, std::io::Error> {
let file = std::fs::File::open("/proc/mounts")?;
Ok( Mounts { buf_reader: std::io::BufReader::new(file) } )
}
}
impl IntoIterator for Mounts {
type Item = std::result::Result<Mount, BoxError>;
type IntoIter = MountsIntoIterator;
/// Consuming iterator, used similarly to mutable iterator. See [Mounts::iter_mut()] for example.
fn into_iter(self) -> Self::IntoIter {
MountsIntoIterator { lines: self.buf_reader.lines() }
}
}
impl<'a> IntoIterator for &'a mut Mounts {
type Item = std::result::Result<Mount, BoxError>;
type IntoIter = MountsIteratorMut<'a>;
/// Mutable iterator, see [Mounts::iter_mut()].
fn into_iter(self) -> Self::IntoIter {
MountsIteratorMut { lines: self.buf_reader.by_ref().lines() }
}
}
/// Consuming iterator for [Mounts].
pub struct MountsIntoIterator {
lines: std::io::Lines<std::io::BufReader<std::fs::File>>
}
impl std::iter::Iterator for MountsIntoIterator {
type Item = std::result::Result<Mount, BoxError>;
/// Returns the next line in `/proc/mounts` as a [Mount]. If there is a problem reading or parsing `/proc/mounts` returns an error. In the case of a parsing error we use [nom::Err::to_owned()] to allow the returned error to outlive `line`. See [Mounts::iter_mut()] for an analagous example using a mutable iterator.
fn next(&mut self) -> std::option::Option<Self::Item> {
match self.lines.next() {
Some(line) => match line {
Ok(line) => match parsers::parse_line(&line[..]) {
Ok( (_, m) ) => Some(Ok(m)),
Err(e) => Some(Err(e.to_owned().into()))
},
Err(e) => Some(Err(e.into()))
},
None => None
}
}
}
/// Mutable iterator for `Mounts`.
pub struct MountsIteratorMut<'a> {
lines: std::io::Lines<&'a mut std::io::BufReader<std::fs::File>>
}
impl<'a> std::iter::Iterator for MountsIteratorMut<'a> {
type Item = std::result::Result<Mount, BoxError>;
// Returns the next line in `/proc/mounts` as a [Mount]. See [Mounts::iter_mut()] for an example.
fn next(&mut self) -> std::option::Option<Self::Item> {
match self.lines.next() {
Some(line) => match line {
Ok(line) => match parsers::parse_line(&line[..]) {
Ok( (_, m) ) => Some(Ok(m)),
Err(e) => Some(Err(e.to_owned().into()))
},
Err(e) => Some(Err(e.into()))
},
None => None
}
}
}
impl<'a> Mounts {
// There is no non-mutable iterator.
/// Mutable iterator.
/// # Examples
/// ```
/// # use nom_tutorial;
/// let mut iter = nom_tutorial::mounts().expect("Couldn't access /proc/mounts.").into_iter();
/// match iter.next() {
/// Some(m) => match m {
/// Ok(m) => eprintln!("Here is a mounted filesystem: {}", m),
/// Err(e) => eprintln!("There was an error parsing the next line in /proc/mounts: {}", e)
/// },
/// None => eprintln!("There are no mounted filesystems.")
/// }
/// ```
pub fn iter_mut(&'a mut self) -> MountsIteratorMut<'a> {
self.into_iter()
}
}
// Encapsulate individual nom parsers in a private submodule. The `pub(self)` keyword allows the inner method [parsers::parse_line()] to be called by code within this module, but not my users of our crate.
pub(self) mod parsers {
use super::Mount;
// Extract a string that does not contain whitespace (space or tab). Anything else goes.
fn not_whitespace(i: &str) -> nom::IResult<&str, &str> {
nom::bytes::complete::is_not(" \t")(i)
}
// Replace the sequence 040 with a space.
fn escaped_space(i: &str) -> nom::IResult<&str, &str> {
nom::combinator::value(" ", nom::bytes::complete::tag("040"))(i)
}
// Replace the escaped sequence \ with a \. The inner parser `nom::character::complete::char` returns a `char` instead of a `&str`, so we wrap it in a `nom::combinator::recognize`, which returns that `char` as an `&str` if the inner parser succeeds, and returns an error if the inner parser fails.
fn escaped_backslash(i: &str) -> nom::IResult<&str, &str> {
nom::combinator::recognize(nom::character::complete::char('\\'))(i)
}
// Replace all instances of \040 in a string with a space.
// Replace \\ with a \.
fn transform_escaped(i: &str) -> nom::IResult<&str, std::string::String> {
nom::bytes::complete::escaped_transform(nom::bytes::complete::is_not("\\"), '\\', nom::branch::alt((escaped_backslash, escaped_space)))(i)
}
// Parse the options of a mount into a comma separated vector of strings. The options string is terminated by a whitespace.
// Here we use `nom::combinator::map_parser` to extract the full whitespace-terminated options string and then pass it in to `transform_escaped` to process escaped characters. Then the transformed string is split into a comma-delimited vector of strings by `nom::multi::separated_list`.
fn mount_opts(i: &str) -> nom::IResult<&str, std::vec::Vec<std::string::String>> {
nom::multi::separated_list(nom::character::complete::char(','), nom::combinator::map_parser(nom::bytes::complete::is_not(", \t"),transform_escaped))(i)
}
// Parse a line from `/proc/mounts` into a Mount struct. This is perhaps the most complex looking parser, but it is actually one of the simplest because we build upon each of the parsers defined above. Let's break it down parser by parser:
// # `nom::combinator::all_consuming` generates an error if there is any leftover input. This will force nom to generate an error if there is unexpected input at the end of a line in `/proc/mounts`, for example:
// ```ignore
// /dev/sda1 /mnt/disk ext4 defaults 0 0 this_last_part_shouldn't_be_here
// ```
//
// `nom::sequence::tuple` generates a `Result<Ok(remaining_input: &str, output_tuple), Error>`. Although it looks complicated, we can very easily destructure that tuple. Each sub/inner parser we pass to `nom::sequence::tuple` generates its own element within the tuple. We can ignore the whitespace by matching it with `_` and destructure the other elements of the tuple as the variabels we are insterested such as `device`, `mount_point`, etc. If everything goes as planned we return a new instance of the mount `Mount` structure populated with the variables we destructured from the tuple.
// ```ignore
// let (device, _, mount_point /*, ...*/) = /* tuple returned by all_consuming(tuple()) --> */ ("/dev/sda1", " ", "/mnt/disk" /*, ...*/);
// let mount = Mount { device: device.to_string(), mount_point: mount_point.to_string() /*, ...*/ };
// ```
pub fn parse_line(i: &str) -> nom::IResult<&str, Mount> {
match nom::combinator::all_consuming(nom::sequence::tuple((
nom::combinator::map_parser(not_whitespace, transform_escaped), // device
nom::character::complete::space1,
nom::combinator::map_parser(not_whitespace, transform_escaped), // mount_point
nom::character::complete::space1,
not_whitespace, // file_system_type
nom::character::complete::space1,
mount_opts, // options
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space0,
)))(i) {
Ok((remaining_input, (
device,
_, // whitespace
mount_point,
_, // whitespace
file_system_type,
_, // whitespace
options,
_, // whitespace
_, // 0
_, // whitespace
_, // 0
_, // optional whitespace
))) => {
Ok((remaining_input, Mount {
device: device,
mount_point: mount_point,
file_system_type: file_system_type.to_string(),
options: options
}))
}
Err(e) => Err(e)
}
}
// Alternative version of `parse_line()` above that performs the same
// function using a different style. Rather than parsing the entire line at
// once with one big `nom::sequence::tuple` we break the parsing up into
// multiple separate statements. Each statement runs a parser that returns
// an `Ok(remaining_input, value)`. At the end of each statement we have
// the `?` operator, which unwraps the result and returns early with an
// error if parsing failed. The remaining input from each parser is used as
// the input of each subsequent parser. Values are assigned to temporary
// variables that are used to construct a `Mount` object at the end of the
// function. Values that are not needed are discarded by assigning to `_`.
#[allow(unused)]
pub fn parse_line_alternate(i: &str) -> nom::IResult<&str, Mount> {
let (i, device) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // device
let (i, _) = nom::character::complete::space1(i)?;
let (i, mount_point) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // mount_point
let (i, _) = nom::character::complete::space1(i)?;
let (i, file_system_type) = not_whitespace(i)?; // file_system_type
let (i, _) = nom::character::complete::space1(i)?;
let (i, options) = mount_opts(i)?; // options
let (i, _) = nom::combinator::all_consuming(nom::sequence::tuple((
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space0
)))(i)?;
Ok((i, Mount {
device: device,
mount_point: mount_point,
file_system_type: file_system_type.to_string(),
options:options
}))
}
#[cfg(test)]
mod tests {
use super::*;
// Extracts a string that does not contain whitespace, i.e. comma or tab.
#[test]
fn test_not_whitespace() {
assert_eq!(not_whitespace("abcd efg"), Ok((" efg", "abcd")));
assert_eq!(not_whitespace("abcd\tefg"), Ok(("\tefg", "abcd")));
assert_eq!(not_whitespace(" abcdefg"), Err(nom::Err::Error((" abcdefg", nom::error::ErrorKind::IsNot))));
}
// Converts 040 to a space. Does not actually recognize a literal space.
#[test]
fn test_escaped_space() {
assert_eq!(escaped_space("040"), Ok(("", " ")));
assert_eq!(escaped_space(" "), Err(nom::Err::Error((" ", nom::error::ErrorKind::Tag))));
}
// Converts `char` \ to `&str` \.
#[test]
fn test_escaped_backslash() {
assert_eq!(escaped_backslash("\\"), Ok(("", "\\")));
assert_eq!(escaped_backslash("not a backslash"), Err(nom::Err::Error(("not a backslash", nom::error::ErrorKind::Char))));
}
// Recognizes each escape sequence and transfoms it to the escaped literal.
// For example, each \040 is transformed into a space.
#[test]
fn test_transform_escaped() {
assert_eq!(transform_escaped("abc\\040def\\\\g\\040h"), Ok(("", std::string::String::from("abc def\\g h"))));
assert_eq!(transform_escaped("\\bad"), Err(nom::Err::Error(("bad", nom::error::ErrorKind::Tag))));
}
// Parses a comma separated list of mount options, which might contain spaces.
#[test]
fn test_mount_opts() {
assert_eq!(mount_opts("a,bc,d\\040e"), Ok(("", vec!["a".to_string(), "bc".to_string(), "d e".to_string()])));
}
// Parses a line from /proc/mounts
#[test]
fn test_parse_line() {
let mount1 = Mount{
device: "device".to_string(),
mount_point: "mount_point".to_string(),
file_system_type: "file_system_type".to_string(),
options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()]
};
let (_, mount2) = parse_line("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap();
assert_eq!(mount1.device, mount2.device);
assert_eq!(mount1.mount_point, mount2.mount_point);
assert_eq!(mount1.file_system_type, mount2.file_system_type);
assert_eq!(mount1.options, mount2.options);
}
// Parses a line from /proc/mounts
#[test]
fn test_parse_line_alternate() {
let mount1 = Mount{
device: "device".to_string(),
mount_point: "mount_point".to_string(),
file_system_type: "file_system_type".to_string(),
options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()]
};
let (_, mount2) = parse_line_alternate("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap();
assert_eq!(mount1.device, mount2.device);
assert_eq!(mount1.mount_point, mount2.mount_point);
assert_eq!(mount1.file_system_type, mount2.file_system_type);
assert_eq!(mount1.options, mount2.options);
}
}
}
/// Convenience method equivalent to `Mounts::new()`.
pub fn mounts() -> std::result::Result<Mounts, std::io::Error> { | } | Mounts::new() | random_line_split |
lib.rs | //! Example crate demonstrating how to use nom to parse `/proc/mounts`. Browse crates.io for sys-mount, proc-mounts, and libmount for more stable, usable crates.
// Needed to use traits associated with std::io::BufReader.
use std::io::BufRead;
use std::io::Read;
/// Type-erased errors.
pub type BoxError = std::boxed::Box<dyn
std::error::Error // must implement Error to satisfy ?
+ std::marker::Send // needed for threads
+ std::marker::Sync // needed for threads
>;
/// Describes a mounted filesystem, see `man 8 mount` for more details.
#[derive(Clone, Default, Debug)]
pub struct Mount {
/// The device from which the filesystem is mounted, e.g. /dev/sda1
pub device: std::string::String,
/// Where in the root filesystem the device is mounted, e.g. /mnt/disk
pub mount_point: std::string::String,
/// The filesystem type, e.g. ext4
pub file_system_type: std::string::String,
/// A vector of mount options, e.g. ["ro", "nosuid"]
/// Note: This could also be implemented as a set (e.g. std::collections::HashSet)
pub options: std::vec::Vec<std::string::String>,
}
/// Implements `Display` for `Mount` to simulate behavior of Unix mount command.
///
/// # Examples
/// ```
/// # use nom_tutorial::Mount;
/// # use std::string::String;
/// let mount = Mount {
/// device: String::from("/dev/sda1"),
/// mount_point: String::from("/mnt/disk"),
/// file_system_type: String::from("ext4"),
/// options: vec![String::from("ro"), String::from("nosuid")]
/// };
/// assert!(mount.to_string() == "/dev/sda1 on /mnt/disk type ext4 (ro,nosuid)");
/// ```
impl std::fmt::Display for Mount {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{} on {} type {} ({})", self.device, self.mount_point, self.file_system_type, self.options.join(","))
}
}
/// Structure that accesses `/proc/mounts` and iterates over the contained mounts.
///
/// You can generate an instance by calling [Mounts::new()] or the convenience method [mounts()]. Instantiation may fail if `/proc/mounts` does not exist or you do not have access to read it. You can access each individual mount through an iterator with [Mounts::into_iter()](std::iter::IntoIterator::into_iter) for a consuming iterator or [Mounts::iter_mut()] for a mutable iterator. Note that there is no immutable borrowed iterator `Mounts::iter()`. An instance of `Mounts` really isn't useful for anything except iterating over the contained mounts.
/// # Examples
///
/// ```
/// # use nom_tutorial;
/// for mount in nom_tutorial::mounts().unwrap() {
/// println!("{}", mount.unwrap());
/// }
pub struct Mounts {
buf_reader: std::io::BufReader<std::fs::File>
}
impl Mounts {
/// Returns a new Mounts instance. You can also call [mounts()] for convenience.
pub fn new() -> std::result::Result<Mounts, std::io::Error> {
let file = std::fs::File::open("/proc/mounts")?;
Ok( Mounts { buf_reader: std::io::BufReader::new(file) } )
}
}
impl IntoIterator for Mounts {
type Item = std::result::Result<Mount, BoxError>;
type IntoIter = MountsIntoIterator;
/// Consuming iterator, used similarly to mutable iterator. See [Mounts::iter_mut()] for example.
fn into_iter(self) -> Self::IntoIter {
MountsIntoIterator { lines: self.buf_reader.lines() }
}
}
impl<'a> IntoIterator for &'a mut Mounts {
type Item = std::result::Result<Mount, BoxError>;
type IntoIter = MountsIteratorMut<'a>;
/// Mutable iterator, see [Mounts::iter_mut()].
fn into_iter(self) -> Self::IntoIter {
MountsIteratorMut { lines: self.buf_reader.by_ref().lines() }
}
}
/// Consuming iterator for [Mounts].
pub struct MountsIntoIterator {
lines: std::io::Lines<std::io::BufReader<std::fs::File>>
}
impl std::iter::Iterator for MountsIntoIterator {
type Item = std::result::Result<Mount, BoxError>;
/// Returns the next line in `/proc/mounts` as a [Mount]. If there is a problem reading or parsing `/proc/mounts` returns an error. In the case of a parsing error we use [nom::Err::to_owned()] to allow the returned error to outlive `line`. See [Mounts::iter_mut()] for an analagous example using a mutable iterator.
fn next(&mut self) -> std::option::Option<Self::Item> {
match self.lines.next() {
Some(line) => match line {
Ok(line) => match parsers::parse_line(&line[..]) {
Ok( (_, m) ) => Some(Ok(m)),
Err(e) => Some(Err(e.to_owned().into()))
},
Err(e) => Some(Err(e.into()))
},
None => None
}
}
}
/// Mutable iterator for `Mounts`.
pub struct MountsIteratorMut<'a> {
lines: std::io::Lines<&'a mut std::io::BufReader<std::fs::File>>
}
impl<'a> std::iter::Iterator for MountsIteratorMut<'a> {
type Item = std::result::Result<Mount, BoxError>;
// Returns the next line in `/proc/mounts` as a [Mount]. See [Mounts::iter_mut()] for an example.
fn next(&mut self) -> std::option::Option<Self::Item> {
match self.lines.next() {
Some(line) => match line {
Ok(line) => match parsers::parse_line(&line[..]) {
Ok( (_, m) ) => Some(Ok(m)),
Err(e) => Some(Err(e.to_owned().into()))
},
Err(e) => Some(Err(e.into()))
},
None => None
}
}
}
impl<'a> Mounts {
// There is no non-mutable iterator.
/// Mutable iterator.
/// # Examples
/// ```
/// # use nom_tutorial;
/// let mut iter = nom_tutorial::mounts().expect("Couldn't access /proc/mounts.").into_iter();
/// match iter.next() {
/// Some(m) => match m {
/// Ok(m) => eprintln!("Here is a mounted filesystem: {}", m),
/// Err(e) => eprintln!("There was an error parsing the next line in /proc/mounts: {}", e)
/// },
/// None => eprintln!("There are no mounted filesystems.")
/// }
/// ```
pub fn iter_mut(&'a mut self) -> MountsIteratorMut<'a> {
self.into_iter()
}
}
// Encapsulate individual nom parsers in a private submodule. The `pub(self)` keyword allows the inner method [parsers::parse_line()] to be called by code within this module, but not my users of our crate.
pub(self) mod parsers {
use super::Mount;
// Extract a string that does not contain whitespace (space or tab). Anything else goes.
fn not_whitespace(i: &str) -> nom::IResult<&str, &str> {
nom::bytes::complete::is_not(" \t")(i)
}
// Replace the sequence 040 with a space.
fn escaped_space(i: &str) -> nom::IResult<&str, &str> {
nom::combinator::value(" ", nom::bytes::complete::tag("040"))(i)
}
// Replace the escaped sequence \ with a \. The inner parser `nom::character::complete::char` returns a `char` instead of a `&str`, so we wrap it in a `nom::combinator::recognize`, which returns that `char` as an `&str` if the inner parser succeeds, and returns an error if the inner parser fails.
fn escaped_backslash(i: &str) -> nom::IResult<&str, &str> {
nom::combinator::recognize(nom::character::complete::char('\\'))(i)
}
// Replace all instances of \040 in a string with a space.
// Replace \\ with a \.
fn transform_escaped(i: &str) -> nom::IResult<&str, std::string::String> {
nom::bytes::complete::escaped_transform(nom::bytes::complete::is_not("\\"), '\\', nom::branch::alt((escaped_backslash, escaped_space)))(i)
}
// Parse the options of a mount into a comma separated vector of strings. The options string is terminated by a whitespace.
// Here we use `nom::combinator::map_parser` to extract the full whitespace-terminated options string and then pass it in to `transform_escaped` to process escaped characters. Then the transformed string is split into a comma-delimited vector of strings by `nom::multi::separated_list`.
fn mount_opts(i: &str) -> nom::IResult<&str, std::vec::Vec<std::string::String>> {
nom::multi::separated_list(nom::character::complete::char(','), nom::combinator::map_parser(nom::bytes::complete::is_not(", \t"),transform_escaped))(i)
}
// Parse a line from `/proc/mounts` into a Mount struct. This is perhaps the most complex looking parser, but it is actually one of the simplest because we build upon each of the parsers defined above. Let's break it down parser by parser:
// # `nom::combinator::all_consuming` generates an error if there is any leftover input. This will force nom to generate an error if there is unexpected input at the end of a line in `/proc/mounts`, for example:
// ```ignore
// /dev/sda1 /mnt/disk ext4 defaults 0 0 this_last_part_shouldn't_be_here
// ```
//
// `nom::sequence::tuple` generates a `Result<Ok(remaining_input: &str, output_tuple), Error>`. Although it looks complicated, we can very easily destructure that tuple. Each sub/inner parser we pass to `nom::sequence::tuple` generates its own element within the tuple. We can ignore the whitespace by matching it with `_` and destructure the other elements of the tuple as the variabels we are insterested such as `device`, `mount_point`, etc. If everything goes as planned we return a new instance of the mount `Mount` structure populated with the variables we destructured from the tuple.
// ```ignore
// let (device, _, mount_point /*, ...*/) = /* tuple returned by all_consuming(tuple()) --> */ ("/dev/sda1", " ", "/mnt/disk" /*, ...*/);
// let mount = Mount { device: device.to_string(), mount_point: mount_point.to_string() /*, ...*/ };
// ```
pub fn parse_line(i: &str) -> nom::IResult<&str, Mount> {
match nom::combinator::all_consuming(nom::sequence::tuple((
nom::combinator::map_parser(not_whitespace, transform_escaped), // device
nom::character::complete::space1,
nom::combinator::map_parser(not_whitespace, transform_escaped), // mount_point
nom::character::complete::space1,
not_whitespace, // file_system_type
nom::character::complete::space1,
mount_opts, // options
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space0,
)))(i) {
Ok((remaining_input, (
device,
_, // whitespace
mount_point,
_, // whitespace
file_system_type,
_, // whitespace
options,
_, // whitespace
_, // 0
_, // whitespace
_, // 0
_, // optional whitespace
))) => {
Ok((remaining_input, Mount {
device: device,
mount_point: mount_point,
file_system_type: file_system_type.to_string(),
options: options
}))
}
Err(e) => Err(e)
}
}
// Alternative version of `parse_line()` above that performs the same
// function using a different style. Rather than parsing the entire line at
// once with one big `nom::sequence::tuple` we break the parsing up into
// multiple separate statements. Each statement runs a parser that returns
// an `Ok(remaining_input, value)`. At the end of each statement we have
// the `?` operator, which unwraps the result and returns early with an
// error if parsing failed. The remaining input from each parser is used as
// the input of each subsequent parser. Values are assigned to temporary
// variables that are used to construct a `Mount` object at the end of the
// function. Values that are not needed are discarded by assigning to `_`.
#[allow(unused)]
pub fn parse_line_alternate(i: &str) -> nom::IResult<&str, Mount> {
let (i, device) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // device
let (i, _) = nom::character::complete::space1(i)?;
let (i, mount_point) = nom::combinator::map_parser(not_whitespace, transform_escaped)(i)?; // mount_point
let (i, _) = nom::character::complete::space1(i)?;
let (i, file_system_type) = not_whitespace(i)?; // file_system_type
let (i, _) = nom::character::complete::space1(i)?;
let (i, options) = mount_opts(i)?; // options
let (i, _) = nom::combinator::all_consuming(nom::sequence::tuple((
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space1,
nom::character::complete::char('0'),
nom::character::complete::space0
)))(i)?;
Ok((i, Mount {
device: device,
mount_point: mount_point,
file_system_type: file_system_type.to_string(),
options:options
}))
}
#[cfg(test)]
mod tests {
use super::*;
// Extracts a string that does not contain whitespace, i.e. comma or tab.
#[test]
fn test_not_whitespace() {
assert_eq!(not_whitespace("abcd efg"), Ok((" efg", "abcd")));
assert_eq!(not_whitespace("abcd\tefg"), Ok(("\tefg", "abcd")));
assert_eq!(not_whitespace(" abcdefg"), Err(nom::Err::Error((" abcdefg", nom::error::ErrorKind::IsNot))));
}
// Converts 040 to a space. Does not actually recognize a literal space.
#[test]
fn test_escaped_space() {
assert_eq!(escaped_space("040"), Ok(("", " ")));
assert_eq!(escaped_space(" "), Err(nom::Err::Error((" ", nom::error::ErrorKind::Tag))));
}
// Converts `char` \ to `&str` \.
#[test]
fn test_escaped_backslash() {
assert_eq!(escaped_backslash("\\"), Ok(("", "\\")));
assert_eq!(escaped_backslash("not a backslash"), Err(nom::Err::Error(("not a backslash", nom::error::ErrorKind::Char))));
}
// Recognizes each escape sequence and transfoms it to the escaped literal.
// For example, each \040 is transformed into a space.
#[test]
fn test_transform_escaped() {
assert_eq!(transform_escaped("abc\\040def\\\\g\\040h"), Ok(("", std::string::String::from("abc def\\g h"))));
assert_eq!(transform_escaped("\\bad"), Err(nom::Err::Error(("bad", nom::error::ErrorKind::Tag))));
}
// Parses a comma separated list of mount options, which might contain spaces.
#[test]
fn test_mount_opts() {
assert_eq!(mount_opts("a,bc,d\\040e"), Ok(("", vec!["a".to_string(), "bc".to_string(), "d e".to_string()])));
}
// Parses a line from /proc/mounts
#[test]
fn test_parse_line() {
let mount1 = Mount{
device: "device".to_string(),
mount_point: "mount_point".to_string(),
file_system_type: "file_system_type".to_string(),
options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()]
};
let (_, mount2) = parse_line("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap();
assert_eq!(mount1.device, mount2.device);
assert_eq!(mount1.mount_point, mount2.mount_point);
assert_eq!(mount1.file_system_type, mount2.file_system_type);
assert_eq!(mount1.options, mount2.options);
}
// Parses a line from /proc/mounts
#[test]
fn test_parse_line_alternate() {
let mount1 = Mount{
device: "device".to_string(),
mount_point: "mount_point".to_string(),
file_system_type: "file_system_type".to_string(),
options: vec!["options".to_string(), "a".to_string(), "b=c".to_string(), "d e".to_string()]
};
let (_, mount2) = parse_line_alternate("device mount_point file_system_type options,a,b=c,d\\040e 0 0").unwrap();
assert_eq!(mount1.device, mount2.device);
assert_eq!(mount1.mount_point, mount2.mount_point);
assert_eq!(mount1.file_system_type, mount2.file_system_type);
assert_eq!(mount1.options, mount2.options);
}
}
}
/// Convenience method equivalent to `Mounts::new()`.
pub fn mounts() -> std::result::Result<Mounts, std::io::Error> | {
Mounts::new()
} | identifier_body |
|
stringutils.go | package codegen
import (
"errors"
"fmt"
"strings"
"github.com/gertd/go-pluralize"
)
var pluralizer = pluralize.NewClient()
// edgecaseDataTypes return hardcoded types for specific properties which are problematic to parse
var edgecaseDatatypes = map[string]string{
"locale": "string",
"Permissions<PermissionName>": "string",
}
var dataTypes = map[string]string{
"null": "interface{}",
// string-ish types
"interface{}": "interface{}",
"[]string": "[]string",
"junctionidlist": "[]string",
"string": "string",
"bool": "bool",
"float64": "float64",
"id": "string",
"lookup": "string",
"reference": "string",
"text": "string",
"textarea": "string",
"combobox": "string",
"complexvalue": "string",
"apexclassmetadata": "interface{}",
"url": "string",
"email": "string",
"roll-up summary (sum invoice line)": "string",
"phone": "string",
"picklist": "string",
"manageablestate enumerated list": "string",
"actionemailsendertype enumerated list": "string",
"encryptedstring": "string",
"mns:workflowoutboundmessage": "string",
"mns:workflowtask": "string",
"mns:workflowrule": "string",
"mns:workflowfieldupdate": "string",
"mns:usercriteria": "string",
"mns:workskillrouting": "string",
"mns:timesheettemplate": "string",
"mns:compactlayout": "string",
"mns: compactlayout": "string",
"mns:customapplication": "string",
"mns:workflowalert": "string",
"mns:embeddedserviceconfig": "string", | "mns:eventdelivery": "string",
"mns:eventdescription": "string",
"mns:eventsubscription": "string",
"mns:embeddedserviceliveagent": "string",
"mns: flow": "string",
"https://developer.salesforce.com/docs/atlas.en-us.api_meta.meta/api_meta/meta_embeddedservicebranding.htm": "string",
"restricted picklist": "string",
"multipicklist": "string",
// boolean-ish types
"boolean": "bool",
"checkbox": "bool",
// numeric-ish types
"int": "int",
"currency": "float64",
"number": "int",
"double": "float64",
"long": "int64",
"int64": "int64",
"integer": "int",
"decimal": "float64",
"percent": "float64",
// complex types
"address": "types.Address",
"date": "types.Date",
"datetime": "types.Datetime",
"date/time": "types.Datetime",
"object": "json.RawMessage",
"queryresult": "types.QueryResult",
"anytype": "json.RawMessage",
"types.datetime": "types.Datetime",
"types.address": "types.Address",
"types.date": "types.Date",
"time.time": "time.Time",
"time": "types.Datetime",
"base64": "string",
"json.rawmessage": "json.RawMessage",
"any": "interface{}",
"types.queryresult": "types.QueryResult",
"symboltable": "json.RawMessage",
"apexcomponentmetadata": "json.RawMessage",
"entitydefinition": "json.RawMessage",
"fielddefinition": "json.RawMessage",
"apexresult": "json.RawMessage",
"heapdump": "json.RawMessage",
"soqlresult": "json.RawMessage",
"apexpagemetadata": "json.RawMessage",
"executeanonymousresult": "json.RawMessage",
"apextriggermetadata": "json.RawMessage",
"brandingset": "json.RawMessage",
"compactlayoutinfo": "json.RawMessage",
"deploydetails": "json.RawMessage",
"flexipage": "json.RawMessage",
"customfieldmetadata": "json.RawMessage",
"customfield": "json.RawMessage",
"customtabmetadata": "json.RawMessage",
"customlabel": "json.RawMessage",
"embeddedserviceconfig": "json.RawMessage",
"embeddedserviceflowconfig": "json.RawMessage",
"embeddedservicemenusettings": "json.RawMessage",
"publisher": "json.RawMessage",
"relationshipreferenceto": "json.RawMessage",
"flexipagemetadata": "json.RawMessage",
"flowdefinition": "json.RawMessage",
"flow": "json.RawMessage",
"customvalue[]": "json.RawMessage",
"array of typeextent": "json.RawMessage",
"mns:inboundnetworkconnection": "json.RawMessage",
"mns:keywordlist": "json.RawMessage",
"datacategorygroupreference": "json.RawMessage",
"mns:layout": "json.RawMessage",
"mns:lightningcomponentbundle": "json.RawMessage",
"user": "json.RawMessage",
"location": "json.RawMessage",
"lookupfilter": "json.RawMessage",
"mns: managedcontenttype": "json.RawMessage",
"mns:moderationrule": "json.RawMessage",
"operationparameters": "json.RawMessage",
"mns:outboundnetworkconnection": "json.RawMessage",
"subscriberpackageinstallerrors": "json.RawMessage",
"msn:pathassistant": "json.RawMessage",
"querylocator": "json.RawMessage",
"mns: recommendationstrategy": "json.RawMessage",
"mns:recordactiondeploymentchannel": "json.RawMessage",
"relationshipinfo": "json.RawMessage",
"queryresultmetadata": "json.RawMessage",
"searchlayoutbuttonsdisplayed": "json.RawMessage",
"standardvalue[]": "json.RawMessage",
"subscriberpackagecsptrustedsites": "json.RawMessage",
"properties": "json.RawMessage",
"array of constructor": "json.RawMessage",
"authprovider": "json.RawMessage",
"validationrule metadata": "json.RawMessage",
"any type": "json.RawMessage",
"recordtypessupported": "json.RawMessage",
"datatype": "json.RawMessage",
"mns:flowdefinition": "json.RawMessage",
"ratelimittimeperiod (enumeration of type string)": "json.RawMessage",
"subscriberpackageprofilemappings": "json.RawMessage",
"sobject": "json.RawMessage",
"mns:recordactiondeploymentcontext": "json.RawMessage",
"array of mapvalue": "json.RawMessage",
"searchlayoutfieldsdisplayed": "json.RawMessage",
"subscriberpackagedependencies": "json.RawMessage",
"array of externalreference": "json.RawMessage",
"userentityaccess": "json.RawMessage",
"moderationruletype (enumeration of type string)": "json.RawMessage",
"mns:recordactiondeployment": "json.RawMessage",
"raw": "json.RawMessage",
"array of symboltable": "json.RawMessage",
"mns:recordactionrecommendation": "json.RawMessage",
"subscriberpackageprofiles": "json.RawMessage",
"array of string": "json.RawMessage",
"mns:recordactionselectableitem": "json.RawMessage",
"subscriberpackageremotesitesettings": "json.RawMessage",
"array of method": "json.RawMessage",
"array of visibilitysymbol": "json.RawMessage",
"array of symbol": "json.RawMessage",
//
"[]*field": "[]*Field",
"[]int64": "[]int64",
"*tar": "*Tar",
"*zar": "*Zar",
"types.AlmostBool": "types.AlmostBool",
}
var nillableDataTypes = map[string]string{
"int": "types.NullableInt",
"bool": "types.NullableBool",
"string": "types.NullableString",
"float64": "types.NullableFloat64",
}
// prepareDatatype checks for property.Name specific overrides before calling convertType to
// produce a datatype for the given property
func prepareDatatype(propertyName, rawType string) (string, error) {
if rawType == "" {
potentialType, exists := edgecaseDatatypes[strings.ToLower(propertyName)]
if exists {
return potentialType, nil
}
}
return convertType(rawType)
}
// convertType attempts to convert a string representation of a salesforce datatype
// to a string representation of a golang datatype. Salesforce datatypes may be retrieved from 3
// locations:
//
// 1) the DataType field of a FieldDefinition from the tooling/query API
// 2) the HTML table within the object and tooling reference documentation pages
// 3) the HTML table within the field reference documentation page
//
// importantly these 3 sources use different string representations for the same type. for example,
// the object reference docs may call a foreign-key-like datatype a "reference" and the field reference docs may
// call it an "id". For that reason this method is prone to faults and should error hard, forcing users
// to ensure proper usage and outputs.
//
// Also there is at least one instance of a typo that suggests much of the reference documentation types
// was handwritten or adlibbed.
func convertType(str string) (string, error) {
if str == "" {
return "", fmt.Errorf("convertingType '%s' (empty string)", str)
}
dataType, exists := dataTypes[strings.ToLower(strings.TrimSpace(str))]
if exists {
return dataType, nil
}
return str, fmt.Errorf("convertingType '%s'", strings.ToLower(str))
}
// convertNullableType returns a custom type extending convertType
func convertNillableType(str string) (string, error) {
dataType, err := convertType(str)
if err != nil {
return "", err
}
nillableDataType, exists := nillableDataTypes[dataType]
if exists {
return nillableDataType, nil
}
return str, fmt.Errorf("convertingNillableType '%s'", dataType)
}
// commonInitialisms is a list of common initialisms.
// This list should contain a subset of `golang.org/x/lint/golint`.
var commonInitialisms = [][]string{
{"ACL", "Acl"},
{"API", "Api"},
{"ASCII", "Ascii"},
{"CPU", "Cpu"},
{"CSS", "Css"},
{"DNS", "Dns"},
{"EOF", "Eof"},
{"GUID", "Guid"},
{"HTML", "Html"},
{"HTTP", "Http"},
{"HTTPS", "Https"},
{"ID", "Id"},
{"IP", "Ip"},
{"JSON", "Json"},
{"LHS", "Lhs"},
{"QPS", "Qps"},
{"RAM", "Ram"},
{"RHS", "Rhs"},
{"RPC", "Rpc"},
{"SLA", "Sla"},
{"SMTP", "Smtp"},
{"SObject", "Sobject"},
{"SObjects", "Sobjects"},
{"SQL", "Sql"},
{"SSH", "Ssh"},
{"TCP", "Tcp"},
{"TLS", "Tls"},
{"TTL", "Ttl"},
{"UDP", "Udp"},
{"UI", "Ui"},
{"UID", "Uid"},
{"UUID", "Uuid"},
{"URI", "Uri"},
{"URL", "Url"},
{"UTF8", "Utf8"},
{"VM", "Vm"},
{"XML", "Xml"},
{"XMPP", "Xmpp"},
{"XSRF", "Xsrf"},
{"XSS", "Xss"},
}
// ConvertInitialisms returns a string converted to Go case.
func convertInitialisms(s string) string {
for i := 0; i < len(commonInitialisms); i++ {
s = strings.ReplaceAll(s, commonInitialisms[i][1], commonInitialisms[i][0])
}
return s
}
// RevertInitialisms returns a string converted from Go case to normal case.
func revertInitialisms(s string) string {
for i := 0; i < len(commonInitialisms); i++ {
s = strings.ReplaceAll(s, commonInitialisms[i][0], commonInitialisms[i][1])
}
return s
}
var keywords = map[string]string{
"break": "salesforce_break",
"default": "salesforce_default",
"func": "salesforce_func",
"interface": "salesforce_interface",
"select": "salesforce_select",
"case": "salesforce_case",
"defer": "salesforce_defer",
"go": "salesforce_go",
"map": "salesforce_map",
"struct": "salesforce_struct",
"chan": "salesforce_chan",
"else": "salesforce_else",
"goto": "salesforce_goto",
"package": "salesforce_package",
"switch": "salesforce_switch",
"const": "salesforce_const",
"fallthrough": "salesforce_fallthrough",
"if": "salesforce_if",
"range": "salesforce_range",
"type": "salesforce_type",
"continue": "salesforce_continue",
"for": "salesforce_for",
"import": "salesforce_import",
"return": "salesforce_return",
"var": "salesforce_var",
}
// stripReservedKeywords ensures no Golang keywords will be used in a package name
func stripReservedKeywords(str string) string {
if sanitizedValue, exists := keywords[str]; exists {
return sanitizedValue
}
return str
}
func toCamelInitCase(s string, initCase bool) string {
s = strings.TrimSpace(s)
if s == "" {
return s
}
n := strings.Builder{}
n.Grow(len(s))
capNext := initCase
for i, v := range []byte(s) {
vIsCap := v >= 'A' && v <= 'Z'
vIsLow := v >= 'a' && v <= 'z'
if capNext {
if vIsLow {
v += 'A'
v -= 'a'
}
} else if i == 0 {
if vIsCap {
v += 'a'
v -= 'A'
}
}
if vIsCap || vIsLow {
n.WriteByte(v)
capNext = false
} else if vIsNum := v >= '0' && v <= '9'; vIsNum {
n.WriteByte(v)
capNext = true
} else {
capNext = v == '_' || v == ' ' || v == '-' || v == '.'
}
}
return n.String()
}
// ToCamelCase converts a string to CamelCase
func toCamelCase(s string) string {
return toCamelInitCase(s, true)
}
// ToLowerCamelCase converts a string to lowerCamelCase
func toLowerCamelCase(s string) string {
return toCamelInitCase(s, false)
}
func toFieldName(str string) string {
return convertInitialisms(toCamelCase(str))
}
// enforceLineLimit adds line breaks to a docstring that exceeds lineLimit
func enforceLineLimit(str string, lineLimit int) string {
cummulativeLength := 0
var parts []string
for _, elem := range strings.Split(str, " ") {
cummulativeLength += len(elem)
if cummulativeLength > lineLimit {
cummulativeLength = 0
parts = append(parts, "\n\t//")
}
parts = append(parts, elem)
}
return strings.Join(parts, " ")
}
// stripNewLinesAndTabs ...
func stripNewLinesAndTabs(str string) string {
str = strings.ReplaceAll(str, "\n", " ")
str = strings.ReplaceAll(str, "\t", "")
str = strings.ReplaceAll(str, string('\u200B'), "")
str = strings.ReplaceAll(str, string('\u200D'), "")
str = strings.ReplaceAll(str, string('\u2014'), "")
return strings.Join(strings.Fields(str), " ")
}
type field struct {
Createable bool
IsNillable bool
}
// RequiredFields ...
func requiredFields(fields []*field) (results []*field) {
for i := 0; i < len(fields); i++ {
if fieldIsRequired(fields[i]) {
results = append(results, fields[i])
}
}
return results
}
func fieldIsRequired(f *field) bool {
// f.Required is not an actual field of Salesforce but a field this library sets on the
// SObjectDescribeField struct so that we may explicitly assert a field is required as a result
// of finding the string "required" within Salesforce's Object Reference documentation:
// i.e. : https://developer.salesforce.com/docs/atlas.en-us.object_reference.meta/object_reference/sforce_api_objects_lead.htm
return f.Createable && !f.IsNillable // && !f.DefaultedOnCreate)
}
// agreement https://en.wikipedia.org/wiki/Agreement_(linguistics)
type agreement int
const (
neither agreement = iota
singular
plural
)
// RelationshipName ...
func RelationshipName(name string) (string, error) {
return prepareStructName(name, plural)
}
var (
// ErrInvalidPropertyName ...
ErrInvalidPropertyName = errors.New("property name contains invalid characters")
// ErrPropertyKeyBlank ...
ErrPropertyKeyBlank = errors.New("property name must not be empty string")
protectedChars = map[rune]struct{}{
' ': {},
',': {},
'\n': {},
'-': {},
'.': {},
// U+200B is a 0-width space character that should be thrown into Mordor where it was probably forged
'\u200B': {},
'\u200D': {},
'\u2014': {},
'\u003C': {},
'>': {},
}
)
// prepareStructName ...
func prepareStructName(propertyKey string, pluralization agreement) (string, error) {
// 0) check for empty string
if propertyKey == "" {
return "", ErrPropertyKeyBlank
}
// 1a) check for a common typo i.e. "Postal Code" instead of "PostalCode"
parts := strings.Split(propertyKey, " ")
if len(parts) == 2 {
// edgecase for i.e. "Community (Zone)"
if strings.Contains(parts[1], "(") {
propertyKey = parts[0]
} else {
propertyKey = strings.Join(parts, "")
}
}
// 1b) check for restricted chars
var b strings.Builder
for _, char := range propertyKey {
if _, exists := protectedChars[char]; !exists {
b.Write([]byte(string(char)))
}
}
propertyKey = b.String()
// 1b) check for restricted chars
for _, char := range propertyKey {
if _, exists := protectedChars[char]; exists {
return "", fmt.Errorf("invalid character '%U': %w", char, ErrInvalidPropertyName)
}
}
// 2) ensure proper pluralization
switch pluralization {
case singular:
propertyKey = pluralizer.Singular(propertyKey)
case plural:
propertyKey = pluralizer.Plural(propertyKey)
}
// 3) remove initialisms
propertyKey = convertInitialisms(propertyKey)
// 4) titlecase
return strings.Title(propertyKey), nil
}
func isNillable(p Property) bool {
return strings.Contains(p.Documentation, "Nillable")
}
func dedupe(elems []string, ignoreElem string) []string {
m := make(map[string]struct{})
var final []string
for _, elem := range elems {
if elem == ignoreElem {
continue
}
if _, exists := m[elem]; exists {
continue
}
m[elem] = struct{}{}
final = append(final, elem)
}
return final
} | "mns:embeddedservicefieldservice": "string",
"mns: customobject": "string", | random_line_split |
stringutils.go | package codegen
import (
"errors"
"fmt"
"strings"
"github.com/gertd/go-pluralize"
)
var pluralizer = pluralize.NewClient()
// edgecaseDataTypes return hardcoded types for specific properties which are problematic to parse
var edgecaseDatatypes = map[string]string{
"locale": "string",
"Permissions<PermissionName>": "string",
}
var dataTypes = map[string]string{
"null": "interface{}",
// string-ish types
"interface{}": "interface{}",
"[]string": "[]string",
"junctionidlist": "[]string",
"string": "string",
"bool": "bool",
"float64": "float64",
"id": "string",
"lookup": "string",
"reference": "string",
"text": "string",
"textarea": "string",
"combobox": "string",
"complexvalue": "string",
"apexclassmetadata": "interface{}",
"url": "string",
"email": "string",
"roll-up summary (sum invoice line)": "string",
"phone": "string",
"picklist": "string",
"manageablestate enumerated list": "string",
"actionemailsendertype enumerated list": "string",
"encryptedstring": "string",
"mns:workflowoutboundmessage": "string",
"mns:workflowtask": "string",
"mns:workflowrule": "string",
"mns:workflowfieldupdate": "string",
"mns:usercriteria": "string",
"mns:workskillrouting": "string",
"mns:timesheettemplate": "string",
"mns:compactlayout": "string",
"mns: compactlayout": "string",
"mns:customapplication": "string",
"mns:workflowalert": "string",
"mns:embeddedserviceconfig": "string",
"mns:embeddedservicefieldservice": "string",
"mns: customobject": "string",
"mns:eventdelivery": "string",
"mns:eventdescription": "string",
"mns:eventsubscription": "string",
"mns:embeddedserviceliveagent": "string",
"mns: flow": "string",
"https://developer.salesforce.com/docs/atlas.en-us.api_meta.meta/api_meta/meta_embeddedservicebranding.htm": "string",
"restricted picklist": "string",
"multipicklist": "string",
// boolean-ish types
"boolean": "bool",
"checkbox": "bool",
// numeric-ish types
"int": "int",
"currency": "float64",
"number": "int",
"double": "float64",
"long": "int64",
"int64": "int64",
"integer": "int",
"decimal": "float64",
"percent": "float64",
// complex types
"address": "types.Address",
"date": "types.Date",
"datetime": "types.Datetime",
"date/time": "types.Datetime",
"object": "json.RawMessage",
"queryresult": "types.QueryResult",
"anytype": "json.RawMessage",
"types.datetime": "types.Datetime",
"types.address": "types.Address",
"types.date": "types.Date",
"time.time": "time.Time",
"time": "types.Datetime",
"base64": "string",
"json.rawmessage": "json.RawMessage",
"any": "interface{}",
"types.queryresult": "types.QueryResult",
"symboltable": "json.RawMessage",
"apexcomponentmetadata": "json.RawMessage",
"entitydefinition": "json.RawMessage",
"fielddefinition": "json.RawMessage",
"apexresult": "json.RawMessage",
"heapdump": "json.RawMessage",
"soqlresult": "json.RawMessage",
"apexpagemetadata": "json.RawMessage",
"executeanonymousresult": "json.RawMessage",
"apextriggermetadata": "json.RawMessage",
"brandingset": "json.RawMessage",
"compactlayoutinfo": "json.RawMessage",
"deploydetails": "json.RawMessage",
"flexipage": "json.RawMessage",
"customfieldmetadata": "json.RawMessage",
"customfield": "json.RawMessage",
"customtabmetadata": "json.RawMessage",
"customlabel": "json.RawMessage",
"embeddedserviceconfig": "json.RawMessage",
"embeddedserviceflowconfig": "json.RawMessage",
"embeddedservicemenusettings": "json.RawMessage",
"publisher": "json.RawMessage",
"relationshipreferenceto": "json.RawMessage",
"flexipagemetadata": "json.RawMessage",
"flowdefinition": "json.RawMessage",
"flow": "json.RawMessage",
"customvalue[]": "json.RawMessage",
"array of typeextent": "json.RawMessage",
"mns:inboundnetworkconnection": "json.RawMessage",
"mns:keywordlist": "json.RawMessage",
"datacategorygroupreference": "json.RawMessage",
"mns:layout": "json.RawMessage",
"mns:lightningcomponentbundle": "json.RawMessage",
"user": "json.RawMessage",
"location": "json.RawMessage",
"lookupfilter": "json.RawMessage",
"mns: managedcontenttype": "json.RawMessage",
"mns:moderationrule": "json.RawMessage",
"operationparameters": "json.RawMessage",
"mns:outboundnetworkconnection": "json.RawMessage",
"subscriberpackageinstallerrors": "json.RawMessage",
"msn:pathassistant": "json.RawMessage",
"querylocator": "json.RawMessage",
"mns: recommendationstrategy": "json.RawMessage",
"mns:recordactiondeploymentchannel": "json.RawMessage",
"relationshipinfo": "json.RawMessage",
"queryresultmetadata": "json.RawMessage",
"searchlayoutbuttonsdisplayed": "json.RawMessage",
"standardvalue[]": "json.RawMessage",
"subscriberpackagecsptrustedsites": "json.RawMessage",
"properties": "json.RawMessage",
"array of constructor": "json.RawMessage",
"authprovider": "json.RawMessage",
"validationrule metadata": "json.RawMessage",
"any type": "json.RawMessage",
"recordtypessupported": "json.RawMessage",
"datatype": "json.RawMessage",
"mns:flowdefinition": "json.RawMessage",
"ratelimittimeperiod (enumeration of type string)": "json.RawMessage",
"subscriberpackageprofilemappings": "json.RawMessage",
"sobject": "json.RawMessage",
"mns:recordactiondeploymentcontext": "json.RawMessage",
"array of mapvalue": "json.RawMessage",
"searchlayoutfieldsdisplayed": "json.RawMessage",
"subscriberpackagedependencies": "json.RawMessage",
"array of externalreference": "json.RawMessage",
"userentityaccess": "json.RawMessage",
"moderationruletype (enumeration of type string)": "json.RawMessage",
"mns:recordactiondeployment": "json.RawMessage",
"raw": "json.RawMessage",
"array of symboltable": "json.RawMessage",
"mns:recordactionrecommendation": "json.RawMessage",
"subscriberpackageprofiles": "json.RawMessage",
"array of string": "json.RawMessage",
"mns:recordactionselectableitem": "json.RawMessage",
"subscriberpackageremotesitesettings": "json.RawMessage",
"array of method": "json.RawMessage",
"array of visibilitysymbol": "json.RawMessage",
"array of symbol": "json.RawMessage",
//
"[]*field": "[]*Field",
"[]int64": "[]int64",
"*tar": "*Tar",
"*zar": "*Zar",
"types.AlmostBool": "types.AlmostBool",
}
var nillableDataTypes = map[string]string{
"int": "types.NullableInt",
"bool": "types.NullableBool",
"string": "types.NullableString",
"float64": "types.NullableFloat64",
}
// prepareDatatype checks for property.Name specific overrides before calling convertType to
// produce a datatype for the given property
func prepareDatatype(propertyName, rawType string) (string, error) {
if rawType == "" {
potentialType, exists := edgecaseDatatypes[strings.ToLower(propertyName)]
if exists {
return potentialType, nil
}
}
return convertType(rawType)
}
// convertType attempts to convert a string representation of a salesforce datatype
// to a string representation of a golang datatype. Salesforce datatypes may be retrieved from 3
// locations:
//
// 1) the DataType field of a FieldDefinition from the tooling/query API
// 2) the HTML table within the object and tooling reference documentation pages
// 3) the HTML table within the field reference documentation page
//
// importantly these 3 sources use different string representations for the same type. for example,
// the object reference docs may call a foreign-key-like datatype a "reference" and the field reference docs may
// call it an "id". For that reason this method is prone to faults and should error hard, forcing users
// to ensure proper usage and outputs.
//
// Also there is at least one instance of a typo that suggests much of the reference documentation types
// was handwritten or adlibbed.
func convertType(str string) (string, error) {
if str == "" {
return "", fmt.Errorf("convertingType '%s' (empty string)", str)
}
dataType, exists := dataTypes[strings.ToLower(strings.TrimSpace(str))]
if exists {
return dataType, nil
}
return str, fmt.Errorf("convertingType '%s'", strings.ToLower(str))
}
// convertNullableType returns a custom type extending convertType
func convertNillableType(str string) (string, error) {
dataType, err := convertType(str)
if err != nil {
return "", err
}
nillableDataType, exists := nillableDataTypes[dataType]
if exists {
return nillableDataType, nil
}
return str, fmt.Errorf("convertingNillableType '%s'", dataType)
}
// commonInitialisms is a list of common initialisms.
// This list should contain a subset of `golang.org/x/lint/golint`.
var commonInitialisms = [][]string{
{"ACL", "Acl"},
{"API", "Api"},
{"ASCII", "Ascii"},
{"CPU", "Cpu"},
{"CSS", "Css"},
{"DNS", "Dns"},
{"EOF", "Eof"},
{"GUID", "Guid"},
{"HTML", "Html"},
{"HTTP", "Http"},
{"HTTPS", "Https"},
{"ID", "Id"},
{"IP", "Ip"},
{"JSON", "Json"},
{"LHS", "Lhs"},
{"QPS", "Qps"},
{"RAM", "Ram"},
{"RHS", "Rhs"},
{"RPC", "Rpc"},
{"SLA", "Sla"},
{"SMTP", "Smtp"},
{"SObject", "Sobject"},
{"SObjects", "Sobjects"},
{"SQL", "Sql"},
{"SSH", "Ssh"},
{"TCP", "Tcp"},
{"TLS", "Tls"},
{"TTL", "Ttl"},
{"UDP", "Udp"},
{"UI", "Ui"},
{"UID", "Uid"},
{"UUID", "Uuid"},
{"URI", "Uri"},
{"URL", "Url"},
{"UTF8", "Utf8"},
{"VM", "Vm"},
{"XML", "Xml"},
{"XMPP", "Xmpp"},
{"XSRF", "Xsrf"},
{"XSS", "Xss"},
}
// ConvertInitialisms returns a string converted to Go case.
func convertInitialisms(s string) string {
for i := 0; i < len(commonInitialisms); i++ {
s = strings.ReplaceAll(s, commonInitialisms[i][1], commonInitialisms[i][0])
}
return s
}
// RevertInitialisms returns a string converted from Go case to normal case.
func revertInitialisms(s string) string {
for i := 0; i < len(commonInitialisms); i++ {
s = strings.ReplaceAll(s, commonInitialisms[i][0], commonInitialisms[i][1])
}
return s
}
var keywords = map[string]string{
"break": "salesforce_break",
"default": "salesforce_default",
"func": "salesforce_func",
"interface": "salesforce_interface",
"select": "salesforce_select",
"case": "salesforce_case",
"defer": "salesforce_defer",
"go": "salesforce_go",
"map": "salesforce_map",
"struct": "salesforce_struct",
"chan": "salesforce_chan",
"else": "salesforce_else",
"goto": "salesforce_goto",
"package": "salesforce_package",
"switch": "salesforce_switch",
"const": "salesforce_const",
"fallthrough": "salesforce_fallthrough",
"if": "salesforce_if",
"range": "salesforce_range",
"type": "salesforce_type",
"continue": "salesforce_continue",
"for": "salesforce_for",
"import": "salesforce_import",
"return": "salesforce_return",
"var": "salesforce_var",
}
// stripReservedKeywords ensures no Golang keywords will be used in a package name
func stripReservedKeywords(str string) string {
if sanitizedValue, exists := keywords[str]; exists {
return sanitizedValue
}
return str
}
func toCamelInitCase(s string, initCase bool) string {
s = strings.TrimSpace(s)
if s == "" {
return s
}
n := strings.Builder{}
n.Grow(len(s))
capNext := initCase
for i, v := range []byte(s) {
vIsCap := v >= 'A' && v <= 'Z'
vIsLow := v >= 'a' && v <= 'z'
if capNext {
if vIsLow {
v += 'A'
v -= 'a'
}
} else if i == 0 {
if vIsCap {
v += 'a'
v -= 'A'
}
}
if vIsCap || vIsLow {
n.WriteByte(v)
capNext = false
} else if vIsNum := v >= '0' && v <= '9'; vIsNum {
n.WriteByte(v)
capNext = true
} else {
capNext = v == '_' || v == ' ' || v == '-' || v == '.'
}
}
return n.String()
}
// ToCamelCase converts a string to CamelCase
func | (s string) string {
return toCamelInitCase(s, true)
}
// ToLowerCamelCase converts a string to lowerCamelCase
func toLowerCamelCase(s string) string {
return toCamelInitCase(s, false)
}
func toFieldName(str string) string {
return convertInitialisms(toCamelCase(str))
}
// enforceLineLimit adds line breaks to a docstring that exceeds lineLimit
func enforceLineLimit(str string, lineLimit int) string {
cummulativeLength := 0
var parts []string
for _, elem := range strings.Split(str, " ") {
cummulativeLength += len(elem)
if cummulativeLength > lineLimit {
cummulativeLength = 0
parts = append(parts, "\n\t//")
}
parts = append(parts, elem)
}
return strings.Join(parts, " ")
}
// stripNewLinesAndTabs ...
func stripNewLinesAndTabs(str string) string {
str = strings.ReplaceAll(str, "\n", " ")
str = strings.ReplaceAll(str, "\t", "")
str = strings.ReplaceAll(str, string('\u200B'), "")
str = strings.ReplaceAll(str, string('\u200D'), "")
str = strings.ReplaceAll(str, string('\u2014'), "")
return strings.Join(strings.Fields(str), " ")
}
type field struct {
Createable bool
IsNillable bool
}
// RequiredFields ...
func requiredFields(fields []*field) (results []*field) {
for i := 0; i < len(fields); i++ {
if fieldIsRequired(fields[i]) {
results = append(results, fields[i])
}
}
return results
}
func fieldIsRequired(f *field) bool {
// f.Required is not an actual field of Salesforce but a field this library sets on the
// SObjectDescribeField struct so that we may explicitly assert a field is required as a result
// of finding the string "required" within Salesforce's Object Reference documentation:
// i.e. : https://developer.salesforce.com/docs/atlas.en-us.object_reference.meta/object_reference/sforce_api_objects_lead.htm
return f.Createable && !f.IsNillable // && !f.DefaultedOnCreate)
}
// agreement https://en.wikipedia.org/wiki/Agreement_(linguistics)
type agreement int
const (
neither agreement = iota
singular
plural
)
// RelationshipName ...
func RelationshipName(name string) (string, error) {
return prepareStructName(name, plural)
}
var (
// ErrInvalidPropertyName ...
ErrInvalidPropertyName = errors.New("property name contains invalid characters")
// ErrPropertyKeyBlank ...
ErrPropertyKeyBlank = errors.New("property name must not be empty string")
protectedChars = map[rune]struct{}{
' ': {},
',': {},
'\n': {},
'-': {},
'.': {},
// U+200B is a 0-width space character that should be thrown into Mordor where it was probably forged
'\u200B': {},
'\u200D': {},
'\u2014': {},
'\u003C': {},
'>': {},
}
)
// prepareStructName ...
func prepareStructName(propertyKey string, pluralization agreement) (string, error) {
// 0) check for empty string
if propertyKey == "" {
return "", ErrPropertyKeyBlank
}
// 1a) check for a common typo i.e. "Postal Code" instead of "PostalCode"
parts := strings.Split(propertyKey, " ")
if len(parts) == 2 {
// edgecase for i.e. "Community (Zone)"
if strings.Contains(parts[1], "(") {
propertyKey = parts[0]
} else {
propertyKey = strings.Join(parts, "")
}
}
// 1b) check for restricted chars
var b strings.Builder
for _, char := range propertyKey {
if _, exists := protectedChars[char]; !exists {
b.Write([]byte(string(char)))
}
}
propertyKey = b.String()
// 1b) check for restricted chars
for _, char := range propertyKey {
if _, exists := protectedChars[char]; exists {
return "", fmt.Errorf("invalid character '%U': %w", char, ErrInvalidPropertyName)
}
}
// 2) ensure proper pluralization
switch pluralization {
case singular:
propertyKey = pluralizer.Singular(propertyKey)
case plural:
propertyKey = pluralizer.Plural(propertyKey)
}
// 3) remove initialisms
propertyKey = convertInitialisms(propertyKey)
// 4) titlecase
return strings.Title(propertyKey), nil
}
func isNillable(p Property) bool {
return strings.Contains(p.Documentation, "Nillable")
}
func dedupe(elems []string, ignoreElem string) []string {
m := make(map[string]struct{})
var final []string
for _, elem := range elems {
if elem == ignoreElem {
continue
}
if _, exists := m[elem]; exists {
continue
}
m[elem] = struct{}{}
final = append(final, elem)
}
return final
} | toCamelCase | identifier_name |
stringutils.go | package codegen
import (
"errors"
"fmt"
"strings"
"github.com/gertd/go-pluralize"
)
var pluralizer = pluralize.NewClient()
// edgecaseDataTypes return hardcoded types for specific properties which are problematic to parse
var edgecaseDatatypes = map[string]string{
"locale": "string",
"Permissions<PermissionName>": "string",
}
var dataTypes = map[string]string{
"null": "interface{}",
// string-ish types
"interface{}": "interface{}",
"[]string": "[]string",
"junctionidlist": "[]string",
"string": "string",
"bool": "bool",
"float64": "float64",
"id": "string",
"lookup": "string",
"reference": "string",
"text": "string",
"textarea": "string",
"combobox": "string",
"complexvalue": "string",
"apexclassmetadata": "interface{}",
"url": "string",
"email": "string",
"roll-up summary (sum invoice line)": "string",
"phone": "string",
"picklist": "string",
"manageablestate enumerated list": "string",
"actionemailsendertype enumerated list": "string",
"encryptedstring": "string",
"mns:workflowoutboundmessage": "string",
"mns:workflowtask": "string",
"mns:workflowrule": "string",
"mns:workflowfieldupdate": "string",
"mns:usercriteria": "string",
"mns:workskillrouting": "string",
"mns:timesheettemplate": "string",
"mns:compactlayout": "string",
"mns: compactlayout": "string",
"mns:customapplication": "string",
"mns:workflowalert": "string",
"mns:embeddedserviceconfig": "string",
"mns:embeddedservicefieldservice": "string",
"mns: customobject": "string",
"mns:eventdelivery": "string",
"mns:eventdescription": "string",
"mns:eventsubscription": "string",
"mns:embeddedserviceliveagent": "string",
"mns: flow": "string",
"https://developer.salesforce.com/docs/atlas.en-us.api_meta.meta/api_meta/meta_embeddedservicebranding.htm": "string",
"restricted picklist": "string",
"multipicklist": "string",
// boolean-ish types
"boolean": "bool",
"checkbox": "bool",
// numeric-ish types
"int": "int",
"currency": "float64",
"number": "int",
"double": "float64",
"long": "int64",
"int64": "int64",
"integer": "int",
"decimal": "float64",
"percent": "float64",
// complex types
"address": "types.Address",
"date": "types.Date",
"datetime": "types.Datetime",
"date/time": "types.Datetime",
"object": "json.RawMessage",
"queryresult": "types.QueryResult",
"anytype": "json.RawMessage",
"types.datetime": "types.Datetime",
"types.address": "types.Address",
"types.date": "types.Date",
"time.time": "time.Time",
"time": "types.Datetime",
"base64": "string",
"json.rawmessage": "json.RawMessage",
"any": "interface{}",
"types.queryresult": "types.QueryResult",
"symboltable": "json.RawMessage",
"apexcomponentmetadata": "json.RawMessage",
"entitydefinition": "json.RawMessage",
"fielddefinition": "json.RawMessage",
"apexresult": "json.RawMessage",
"heapdump": "json.RawMessage",
"soqlresult": "json.RawMessage",
"apexpagemetadata": "json.RawMessage",
"executeanonymousresult": "json.RawMessage",
"apextriggermetadata": "json.RawMessage",
"brandingset": "json.RawMessage",
"compactlayoutinfo": "json.RawMessage",
"deploydetails": "json.RawMessage",
"flexipage": "json.RawMessage",
"customfieldmetadata": "json.RawMessage",
"customfield": "json.RawMessage",
"customtabmetadata": "json.RawMessage",
"customlabel": "json.RawMessage",
"embeddedserviceconfig": "json.RawMessage",
"embeddedserviceflowconfig": "json.RawMessage",
"embeddedservicemenusettings": "json.RawMessage",
"publisher": "json.RawMessage",
"relationshipreferenceto": "json.RawMessage",
"flexipagemetadata": "json.RawMessage",
"flowdefinition": "json.RawMessage",
"flow": "json.RawMessage",
"customvalue[]": "json.RawMessage",
"array of typeextent": "json.RawMessage",
"mns:inboundnetworkconnection": "json.RawMessage",
"mns:keywordlist": "json.RawMessage",
"datacategorygroupreference": "json.RawMessage",
"mns:layout": "json.RawMessage",
"mns:lightningcomponentbundle": "json.RawMessage",
"user": "json.RawMessage",
"location": "json.RawMessage",
"lookupfilter": "json.RawMessage",
"mns: managedcontenttype": "json.RawMessage",
"mns:moderationrule": "json.RawMessage",
"operationparameters": "json.RawMessage",
"mns:outboundnetworkconnection": "json.RawMessage",
"subscriberpackageinstallerrors": "json.RawMessage",
"msn:pathassistant": "json.RawMessage",
"querylocator": "json.RawMessage",
"mns: recommendationstrategy": "json.RawMessage",
"mns:recordactiondeploymentchannel": "json.RawMessage",
"relationshipinfo": "json.RawMessage",
"queryresultmetadata": "json.RawMessage",
"searchlayoutbuttonsdisplayed": "json.RawMessage",
"standardvalue[]": "json.RawMessage",
"subscriberpackagecsptrustedsites": "json.RawMessage",
"properties": "json.RawMessage",
"array of constructor": "json.RawMessage",
"authprovider": "json.RawMessage",
"validationrule metadata": "json.RawMessage",
"any type": "json.RawMessage",
"recordtypessupported": "json.RawMessage",
"datatype": "json.RawMessage",
"mns:flowdefinition": "json.RawMessage",
"ratelimittimeperiod (enumeration of type string)": "json.RawMessage",
"subscriberpackageprofilemappings": "json.RawMessage",
"sobject": "json.RawMessage",
"mns:recordactiondeploymentcontext": "json.RawMessage",
"array of mapvalue": "json.RawMessage",
"searchlayoutfieldsdisplayed": "json.RawMessage",
"subscriberpackagedependencies": "json.RawMessage",
"array of externalreference": "json.RawMessage",
"userentityaccess": "json.RawMessage",
"moderationruletype (enumeration of type string)": "json.RawMessage",
"mns:recordactiondeployment": "json.RawMessage",
"raw": "json.RawMessage",
"array of symboltable": "json.RawMessage",
"mns:recordactionrecommendation": "json.RawMessage",
"subscriberpackageprofiles": "json.RawMessage",
"array of string": "json.RawMessage",
"mns:recordactionselectableitem": "json.RawMessage",
"subscriberpackageremotesitesettings": "json.RawMessage",
"array of method": "json.RawMessage",
"array of visibilitysymbol": "json.RawMessage",
"array of symbol": "json.RawMessage",
//
"[]*field": "[]*Field",
"[]int64": "[]int64",
"*tar": "*Tar",
"*zar": "*Zar",
"types.AlmostBool": "types.AlmostBool",
}
var nillableDataTypes = map[string]string{
"int": "types.NullableInt",
"bool": "types.NullableBool",
"string": "types.NullableString",
"float64": "types.NullableFloat64",
}
// prepareDatatype checks for property.Name specific overrides before calling convertType to
// produce a datatype for the given property
func prepareDatatype(propertyName, rawType string) (string, error) {
if rawType == "" {
potentialType, exists := edgecaseDatatypes[strings.ToLower(propertyName)]
if exists {
return potentialType, nil
}
}
return convertType(rawType)
}
// convertType attempts to convert a string representation of a salesforce datatype
// to a string representation of a golang datatype. Salesforce datatypes may be retrieved from 3
// locations:
//
// 1) the DataType field of a FieldDefinition from the tooling/query API
// 2) the HTML table within the object and tooling reference documentation pages
// 3) the HTML table within the field reference documentation page
//
// importantly these 3 sources use different string representations for the same type. for example,
// the object reference docs may call a foreign-key-like datatype a "reference" and the field reference docs may
// call it an "id". For that reason this method is prone to faults and should error hard, forcing users
// to ensure proper usage and outputs.
//
// Also there is at least one instance of a typo that suggests much of the reference documentation types
// was handwritten or adlibbed.
func convertType(str string) (string, error) {
if str == "" {
return "", fmt.Errorf("convertingType '%s' (empty string)", str)
}
dataType, exists := dataTypes[strings.ToLower(strings.TrimSpace(str))]
if exists {
return dataType, nil
}
return str, fmt.Errorf("convertingType '%s'", strings.ToLower(str))
}
// convertNullableType returns a custom type extending convertType
func convertNillableType(str string) (string, error) {
dataType, err := convertType(str)
if err != nil {
return "", err
}
nillableDataType, exists := nillableDataTypes[dataType]
if exists {
return nillableDataType, nil
}
return str, fmt.Errorf("convertingNillableType '%s'", dataType)
}
// commonInitialisms is a list of common initialisms.
// This list should contain a subset of `golang.org/x/lint/golint`.
var commonInitialisms = [][]string{
{"ACL", "Acl"},
{"API", "Api"},
{"ASCII", "Ascii"},
{"CPU", "Cpu"},
{"CSS", "Css"},
{"DNS", "Dns"},
{"EOF", "Eof"},
{"GUID", "Guid"},
{"HTML", "Html"},
{"HTTP", "Http"},
{"HTTPS", "Https"},
{"ID", "Id"},
{"IP", "Ip"},
{"JSON", "Json"},
{"LHS", "Lhs"},
{"QPS", "Qps"},
{"RAM", "Ram"},
{"RHS", "Rhs"},
{"RPC", "Rpc"},
{"SLA", "Sla"},
{"SMTP", "Smtp"},
{"SObject", "Sobject"},
{"SObjects", "Sobjects"},
{"SQL", "Sql"},
{"SSH", "Ssh"},
{"TCP", "Tcp"},
{"TLS", "Tls"},
{"TTL", "Ttl"},
{"UDP", "Udp"},
{"UI", "Ui"},
{"UID", "Uid"},
{"UUID", "Uuid"},
{"URI", "Uri"},
{"URL", "Url"},
{"UTF8", "Utf8"},
{"VM", "Vm"},
{"XML", "Xml"},
{"XMPP", "Xmpp"},
{"XSRF", "Xsrf"},
{"XSS", "Xss"},
}
// ConvertInitialisms returns a string converted to Go case.
func convertInitialisms(s string) string {
for i := 0; i < len(commonInitialisms); i++ {
s = strings.ReplaceAll(s, commonInitialisms[i][1], commonInitialisms[i][0])
}
return s
}
// RevertInitialisms returns a string converted from Go case to normal case.
func revertInitialisms(s string) string {
for i := 0; i < len(commonInitialisms); i++ {
s = strings.ReplaceAll(s, commonInitialisms[i][0], commonInitialisms[i][1])
}
return s
}
var keywords = map[string]string{
"break": "salesforce_break",
"default": "salesforce_default",
"func": "salesforce_func",
"interface": "salesforce_interface",
"select": "salesforce_select",
"case": "salesforce_case",
"defer": "salesforce_defer",
"go": "salesforce_go",
"map": "salesforce_map",
"struct": "salesforce_struct",
"chan": "salesforce_chan",
"else": "salesforce_else",
"goto": "salesforce_goto",
"package": "salesforce_package",
"switch": "salesforce_switch",
"const": "salesforce_const",
"fallthrough": "salesforce_fallthrough",
"if": "salesforce_if",
"range": "salesforce_range",
"type": "salesforce_type",
"continue": "salesforce_continue",
"for": "salesforce_for",
"import": "salesforce_import",
"return": "salesforce_return",
"var": "salesforce_var",
}
// stripReservedKeywords ensures no Golang keywords will be used in a package name
func stripReservedKeywords(str string) string {
if sanitizedValue, exists := keywords[str]; exists {
return sanitizedValue
}
return str
}
func toCamelInitCase(s string, initCase bool) string {
s = strings.TrimSpace(s)
if s == "" {
return s
}
n := strings.Builder{}
n.Grow(len(s))
capNext := initCase
for i, v := range []byte(s) {
vIsCap := v >= 'A' && v <= 'Z'
vIsLow := v >= 'a' && v <= 'z'
if capNext | else if i == 0 {
if vIsCap {
v += 'a'
v -= 'A'
}
}
if vIsCap || vIsLow {
n.WriteByte(v)
capNext = false
} else if vIsNum := v >= '0' && v <= '9'; vIsNum {
n.WriteByte(v)
capNext = true
} else {
capNext = v == '_' || v == ' ' || v == '-' || v == '.'
}
}
return n.String()
}
// ToCamelCase converts a string to CamelCase
func toCamelCase(s string) string {
return toCamelInitCase(s, true)
}
// ToLowerCamelCase converts a string to lowerCamelCase
func toLowerCamelCase(s string) string {
return toCamelInitCase(s, false)
}
func toFieldName(str string) string {
return convertInitialisms(toCamelCase(str))
}
// enforceLineLimit adds line breaks to a docstring that exceeds lineLimit
func enforceLineLimit(str string, lineLimit int) string {
cummulativeLength := 0
var parts []string
for _, elem := range strings.Split(str, " ") {
cummulativeLength += len(elem)
if cummulativeLength > lineLimit {
cummulativeLength = 0
parts = append(parts, "\n\t//")
}
parts = append(parts, elem)
}
return strings.Join(parts, " ")
}
// stripNewLinesAndTabs ...
func stripNewLinesAndTabs(str string) string {
str = strings.ReplaceAll(str, "\n", " ")
str = strings.ReplaceAll(str, "\t", "")
str = strings.ReplaceAll(str, string('\u200B'), "")
str = strings.ReplaceAll(str, string('\u200D'), "")
str = strings.ReplaceAll(str, string('\u2014'), "")
return strings.Join(strings.Fields(str), " ")
}
type field struct {
Createable bool
IsNillable bool
}
// RequiredFields ...
func requiredFields(fields []*field) (results []*field) {
for i := 0; i < len(fields); i++ {
if fieldIsRequired(fields[i]) {
results = append(results, fields[i])
}
}
return results
}
func fieldIsRequired(f *field) bool {
// f.Required is not an actual field of Salesforce but a field this library sets on the
// SObjectDescribeField struct so that we may explicitly assert a field is required as a result
// of finding the string "required" within Salesforce's Object Reference documentation:
// i.e. : https://developer.salesforce.com/docs/atlas.en-us.object_reference.meta/object_reference/sforce_api_objects_lead.htm
return f.Createable && !f.IsNillable // && !f.DefaultedOnCreate)
}
// agreement https://en.wikipedia.org/wiki/Agreement_(linguistics)
type agreement int
const (
neither agreement = iota
singular
plural
)
// RelationshipName ...
func RelationshipName(name string) (string, error) {
return prepareStructName(name, plural)
}
var (
// ErrInvalidPropertyName ...
ErrInvalidPropertyName = errors.New("property name contains invalid characters")
// ErrPropertyKeyBlank ...
ErrPropertyKeyBlank = errors.New("property name must not be empty string")
protectedChars = map[rune]struct{}{
' ': {},
',': {},
'\n': {},
'-': {},
'.': {},
// U+200B is a 0-width space character that should be thrown into Mordor where it was probably forged
'\u200B': {},
'\u200D': {},
'\u2014': {},
'\u003C': {},
'>': {},
}
)
// prepareStructName ...
func prepareStructName(propertyKey string, pluralization agreement) (string, error) {
// 0) check for empty string
if propertyKey == "" {
return "", ErrPropertyKeyBlank
}
// 1a) check for a common typo i.e. "Postal Code" instead of "PostalCode"
parts := strings.Split(propertyKey, " ")
if len(parts) == 2 {
// edgecase for i.e. "Community (Zone)"
if strings.Contains(parts[1], "(") {
propertyKey = parts[0]
} else {
propertyKey = strings.Join(parts, "")
}
}
// 1b) check for restricted chars
var b strings.Builder
for _, char := range propertyKey {
if _, exists := protectedChars[char]; !exists {
b.Write([]byte(string(char)))
}
}
propertyKey = b.String()
// 1b) check for restricted chars
for _, char := range propertyKey {
if _, exists := protectedChars[char]; exists {
return "", fmt.Errorf("invalid character '%U': %w", char, ErrInvalidPropertyName)
}
}
// 2) ensure proper pluralization
switch pluralization {
case singular:
propertyKey = pluralizer.Singular(propertyKey)
case plural:
propertyKey = pluralizer.Plural(propertyKey)
}
// 3) remove initialisms
propertyKey = convertInitialisms(propertyKey)
// 4) titlecase
return strings.Title(propertyKey), nil
}
func isNillable(p Property) bool {
return strings.Contains(p.Documentation, "Nillable")
}
func dedupe(elems []string, ignoreElem string) []string {
m := make(map[string]struct{})
var final []string
for _, elem := range elems {
if elem == ignoreElem {
continue
}
if _, exists := m[elem]; exists {
continue
}
m[elem] = struct{}{}
final = append(final, elem)
}
return final
} | {
if vIsLow {
v += 'A'
v -= 'a'
}
} | conditional_block |
stringutils.go | package codegen
import (
"errors"
"fmt"
"strings"
"github.com/gertd/go-pluralize"
)
var pluralizer = pluralize.NewClient()
// edgecaseDataTypes return hardcoded types for specific properties which are problematic to parse
var edgecaseDatatypes = map[string]string{
"locale": "string",
"Permissions<PermissionName>": "string",
}
var dataTypes = map[string]string{
"null": "interface{}",
// string-ish types
"interface{}": "interface{}",
"[]string": "[]string",
"junctionidlist": "[]string",
"string": "string",
"bool": "bool",
"float64": "float64",
"id": "string",
"lookup": "string",
"reference": "string",
"text": "string",
"textarea": "string",
"combobox": "string",
"complexvalue": "string",
"apexclassmetadata": "interface{}",
"url": "string",
"email": "string",
"roll-up summary (sum invoice line)": "string",
"phone": "string",
"picklist": "string",
"manageablestate enumerated list": "string",
"actionemailsendertype enumerated list": "string",
"encryptedstring": "string",
"mns:workflowoutboundmessage": "string",
"mns:workflowtask": "string",
"mns:workflowrule": "string",
"mns:workflowfieldupdate": "string",
"mns:usercriteria": "string",
"mns:workskillrouting": "string",
"mns:timesheettemplate": "string",
"mns:compactlayout": "string",
"mns: compactlayout": "string",
"mns:customapplication": "string",
"mns:workflowalert": "string",
"mns:embeddedserviceconfig": "string",
"mns:embeddedservicefieldservice": "string",
"mns: customobject": "string",
"mns:eventdelivery": "string",
"mns:eventdescription": "string",
"mns:eventsubscription": "string",
"mns:embeddedserviceliveagent": "string",
"mns: flow": "string",
"https://developer.salesforce.com/docs/atlas.en-us.api_meta.meta/api_meta/meta_embeddedservicebranding.htm": "string",
"restricted picklist": "string",
"multipicklist": "string",
// boolean-ish types
"boolean": "bool",
"checkbox": "bool",
// numeric-ish types
"int": "int",
"currency": "float64",
"number": "int",
"double": "float64",
"long": "int64",
"int64": "int64",
"integer": "int",
"decimal": "float64",
"percent": "float64",
// complex types
"address": "types.Address",
"date": "types.Date",
"datetime": "types.Datetime",
"date/time": "types.Datetime",
"object": "json.RawMessage",
"queryresult": "types.QueryResult",
"anytype": "json.RawMessage",
"types.datetime": "types.Datetime",
"types.address": "types.Address",
"types.date": "types.Date",
"time.time": "time.Time",
"time": "types.Datetime",
"base64": "string",
"json.rawmessage": "json.RawMessage",
"any": "interface{}",
"types.queryresult": "types.QueryResult",
"symboltable": "json.RawMessage",
"apexcomponentmetadata": "json.RawMessage",
"entitydefinition": "json.RawMessage",
"fielddefinition": "json.RawMessage",
"apexresult": "json.RawMessage",
"heapdump": "json.RawMessage",
"soqlresult": "json.RawMessage",
"apexpagemetadata": "json.RawMessage",
"executeanonymousresult": "json.RawMessage",
"apextriggermetadata": "json.RawMessage",
"brandingset": "json.RawMessage",
"compactlayoutinfo": "json.RawMessage",
"deploydetails": "json.RawMessage",
"flexipage": "json.RawMessage",
"customfieldmetadata": "json.RawMessage",
"customfield": "json.RawMessage",
"customtabmetadata": "json.RawMessage",
"customlabel": "json.RawMessage",
"embeddedserviceconfig": "json.RawMessage",
"embeddedserviceflowconfig": "json.RawMessage",
"embeddedservicemenusettings": "json.RawMessage",
"publisher": "json.RawMessage",
"relationshipreferenceto": "json.RawMessage",
"flexipagemetadata": "json.RawMessage",
"flowdefinition": "json.RawMessage",
"flow": "json.RawMessage",
"customvalue[]": "json.RawMessage",
"array of typeextent": "json.RawMessage",
"mns:inboundnetworkconnection": "json.RawMessage",
"mns:keywordlist": "json.RawMessage",
"datacategorygroupreference": "json.RawMessage",
"mns:layout": "json.RawMessage",
"mns:lightningcomponentbundle": "json.RawMessage",
"user": "json.RawMessage",
"location": "json.RawMessage",
"lookupfilter": "json.RawMessage",
"mns: managedcontenttype": "json.RawMessage",
"mns:moderationrule": "json.RawMessage",
"operationparameters": "json.RawMessage",
"mns:outboundnetworkconnection": "json.RawMessage",
"subscriberpackageinstallerrors": "json.RawMessage",
"msn:pathassistant": "json.RawMessage",
"querylocator": "json.RawMessage",
"mns: recommendationstrategy": "json.RawMessage",
"mns:recordactiondeploymentchannel": "json.RawMessage",
"relationshipinfo": "json.RawMessage",
"queryresultmetadata": "json.RawMessage",
"searchlayoutbuttonsdisplayed": "json.RawMessage",
"standardvalue[]": "json.RawMessage",
"subscriberpackagecsptrustedsites": "json.RawMessage",
"properties": "json.RawMessage",
"array of constructor": "json.RawMessage",
"authprovider": "json.RawMessage",
"validationrule metadata": "json.RawMessage",
"any type": "json.RawMessage",
"recordtypessupported": "json.RawMessage",
"datatype": "json.RawMessage",
"mns:flowdefinition": "json.RawMessage",
"ratelimittimeperiod (enumeration of type string)": "json.RawMessage",
"subscriberpackageprofilemappings": "json.RawMessage",
"sobject": "json.RawMessage",
"mns:recordactiondeploymentcontext": "json.RawMessage",
"array of mapvalue": "json.RawMessage",
"searchlayoutfieldsdisplayed": "json.RawMessage",
"subscriberpackagedependencies": "json.RawMessage",
"array of externalreference": "json.RawMessage",
"userentityaccess": "json.RawMessage",
"moderationruletype (enumeration of type string)": "json.RawMessage",
"mns:recordactiondeployment": "json.RawMessage",
"raw": "json.RawMessage",
"array of symboltable": "json.RawMessage",
"mns:recordactionrecommendation": "json.RawMessage",
"subscriberpackageprofiles": "json.RawMessage",
"array of string": "json.RawMessage",
"mns:recordactionselectableitem": "json.RawMessage",
"subscriberpackageremotesitesettings": "json.RawMessage",
"array of method": "json.RawMessage",
"array of visibilitysymbol": "json.RawMessage",
"array of symbol": "json.RawMessage",
//
"[]*field": "[]*Field",
"[]int64": "[]int64",
"*tar": "*Tar",
"*zar": "*Zar",
"types.AlmostBool": "types.AlmostBool",
}
var nillableDataTypes = map[string]string{
"int": "types.NullableInt",
"bool": "types.NullableBool",
"string": "types.NullableString",
"float64": "types.NullableFloat64",
}
// prepareDatatype checks for property.Name specific overrides before calling convertType to
// produce a datatype for the given property
func prepareDatatype(propertyName, rawType string) (string, error) {
if rawType == "" {
potentialType, exists := edgecaseDatatypes[strings.ToLower(propertyName)]
if exists {
return potentialType, nil
}
}
return convertType(rawType)
}
// convertType attempts to convert a string representation of a salesforce datatype
// to a string representation of a golang datatype. Salesforce datatypes may be retrieved from 3
// locations:
//
// 1) the DataType field of a FieldDefinition from the tooling/query API
// 2) the HTML table within the object and tooling reference documentation pages
// 3) the HTML table within the field reference documentation page
//
// importantly these 3 sources use different string representations for the same type. for example,
// the object reference docs may call a foreign-key-like datatype a "reference" and the field reference docs may
// call it an "id". For that reason this method is prone to faults and should error hard, forcing users
// to ensure proper usage and outputs.
//
// Also there is at least one instance of a typo that suggests much of the reference documentation types
// was handwritten or adlibbed.
func convertType(str string) (string, error) {
if str == "" {
return "", fmt.Errorf("convertingType '%s' (empty string)", str)
}
dataType, exists := dataTypes[strings.ToLower(strings.TrimSpace(str))]
if exists {
return dataType, nil
}
return str, fmt.Errorf("convertingType '%s'", strings.ToLower(str))
}
// convertNullableType returns a custom type extending convertType
func convertNillableType(str string) (string, error) {
dataType, err := convertType(str)
if err != nil {
return "", err
}
nillableDataType, exists := nillableDataTypes[dataType]
if exists {
return nillableDataType, nil
}
return str, fmt.Errorf("convertingNillableType '%s'", dataType)
}
// commonInitialisms is a list of common initialisms.
// This list should contain a subset of `golang.org/x/lint/golint`.
var commonInitialisms = [][]string{
{"ACL", "Acl"},
{"API", "Api"},
{"ASCII", "Ascii"},
{"CPU", "Cpu"},
{"CSS", "Css"},
{"DNS", "Dns"},
{"EOF", "Eof"},
{"GUID", "Guid"},
{"HTML", "Html"},
{"HTTP", "Http"},
{"HTTPS", "Https"},
{"ID", "Id"},
{"IP", "Ip"},
{"JSON", "Json"},
{"LHS", "Lhs"},
{"QPS", "Qps"},
{"RAM", "Ram"},
{"RHS", "Rhs"},
{"RPC", "Rpc"},
{"SLA", "Sla"},
{"SMTP", "Smtp"},
{"SObject", "Sobject"},
{"SObjects", "Sobjects"},
{"SQL", "Sql"},
{"SSH", "Ssh"},
{"TCP", "Tcp"},
{"TLS", "Tls"},
{"TTL", "Ttl"},
{"UDP", "Udp"},
{"UI", "Ui"},
{"UID", "Uid"},
{"UUID", "Uuid"},
{"URI", "Uri"},
{"URL", "Url"},
{"UTF8", "Utf8"},
{"VM", "Vm"},
{"XML", "Xml"},
{"XMPP", "Xmpp"},
{"XSRF", "Xsrf"},
{"XSS", "Xss"},
}
// ConvertInitialisms returns a string converted to Go case.
func convertInitialisms(s string) string {
for i := 0; i < len(commonInitialisms); i++ {
s = strings.ReplaceAll(s, commonInitialisms[i][1], commonInitialisms[i][0])
}
return s
}
// RevertInitialisms returns a string converted from Go case to normal case.
func revertInitialisms(s string) string |
var keywords = map[string]string{
"break": "salesforce_break",
"default": "salesforce_default",
"func": "salesforce_func",
"interface": "salesforce_interface",
"select": "salesforce_select",
"case": "salesforce_case",
"defer": "salesforce_defer",
"go": "salesforce_go",
"map": "salesforce_map",
"struct": "salesforce_struct",
"chan": "salesforce_chan",
"else": "salesforce_else",
"goto": "salesforce_goto",
"package": "salesforce_package",
"switch": "salesforce_switch",
"const": "salesforce_const",
"fallthrough": "salesforce_fallthrough",
"if": "salesforce_if",
"range": "salesforce_range",
"type": "salesforce_type",
"continue": "salesforce_continue",
"for": "salesforce_for",
"import": "salesforce_import",
"return": "salesforce_return",
"var": "salesforce_var",
}
// stripReservedKeywords ensures no Golang keywords will be used in a package name
func stripReservedKeywords(str string) string {
if sanitizedValue, exists := keywords[str]; exists {
return sanitizedValue
}
return str
}
func toCamelInitCase(s string, initCase bool) string {
s = strings.TrimSpace(s)
if s == "" {
return s
}
n := strings.Builder{}
n.Grow(len(s))
capNext := initCase
for i, v := range []byte(s) {
vIsCap := v >= 'A' && v <= 'Z'
vIsLow := v >= 'a' && v <= 'z'
if capNext {
if vIsLow {
v += 'A'
v -= 'a'
}
} else if i == 0 {
if vIsCap {
v += 'a'
v -= 'A'
}
}
if vIsCap || vIsLow {
n.WriteByte(v)
capNext = false
} else if vIsNum := v >= '0' && v <= '9'; vIsNum {
n.WriteByte(v)
capNext = true
} else {
capNext = v == '_' || v == ' ' || v == '-' || v == '.'
}
}
return n.String()
}
// ToCamelCase converts a string to CamelCase
func toCamelCase(s string) string {
return toCamelInitCase(s, true)
}
// ToLowerCamelCase converts a string to lowerCamelCase
func toLowerCamelCase(s string) string {
return toCamelInitCase(s, false)
}
func toFieldName(str string) string {
return convertInitialisms(toCamelCase(str))
}
// enforceLineLimit adds line breaks to a docstring that exceeds lineLimit
func enforceLineLimit(str string, lineLimit int) string {
cummulativeLength := 0
var parts []string
for _, elem := range strings.Split(str, " ") {
cummulativeLength += len(elem)
if cummulativeLength > lineLimit {
cummulativeLength = 0
parts = append(parts, "\n\t//")
}
parts = append(parts, elem)
}
return strings.Join(parts, " ")
}
// stripNewLinesAndTabs ...
func stripNewLinesAndTabs(str string) string {
str = strings.ReplaceAll(str, "\n", " ")
str = strings.ReplaceAll(str, "\t", "")
str = strings.ReplaceAll(str, string('\u200B'), "")
str = strings.ReplaceAll(str, string('\u200D'), "")
str = strings.ReplaceAll(str, string('\u2014'), "")
return strings.Join(strings.Fields(str), " ")
}
type field struct {
Createable bool
IsNillable bool
}
// RequiredFields ...
func requiredFields(fields []*field) (results []*field) {
for i := 0; i < len(fields); i++ {
if fieldIsRequired(fields[i]) {
results = append(results, fields[i])
}
}
return results
}
func fieldIsRequired(f *field) bool {
// f.Required is not an actual field of Salesforce but a field this library sets on the
// SObjectDescribeField struct so that we may explicitly assert a field is required as a result
// of finding the string "required" within Salesforce's Object Reference documentation:
// i.e. : https://developer.salesforce.com/docs/atlas.en-us.object_reference.meta/object_reference/sforce_api_objects_lead.htm
return f.Createable && !f.IsNillable // && !f.DefaultedOnCreate)
}
// agreement https://en.wikipedia.org/wiki/Agreement_(linguistics)
type agreement int
const (
neither agreement = iota
singular
plural
)
// RelationshipName ...
func RelationshipName(name string) (string, error) {
return prepareStructName(name, plural)
}
var (
// ErrInvalidPropertyName ...
ErrInvalidPropertyName = errors.New("property name contains invalid characters")
// ErrPropertyKeyBlank ...
ErrPropertyKeyBlank = errors.New("property name must not be empty string")
protectedChars = map[rune]struct{}{
' ': {},
',': {},
'\n': {},
'-': {},
'.': {},
// U+200B is a 0-width space character that should be thrown into Mordor where it was probably forged
'\u200B': {},
'\u200D': {},
'\u2014': {},
'\u003C': {},
'>': {},
}
)
// prepareStructName ...
func prepareStructName(propertyKey string, pluralization agreement) (string, error) {
// 0) check for empty string
if propertyKey == "" {
return "", ErrPropertyKeyBlank
}
// 1a) check for a common typo i.e. "Postal Code" instead of "PostalCode"
parts := strings.Split(propertyKey, " ")
if len(parts) == 2 {
// edgecase for i.e. "Community (Zone)"
if strings.Contains(parts[1], "(") {
propertyKey = parts[0]
} else {
propertyKey = strings.Join(parts, "")
}
}
// 1b) check for restricted chars
var b strings.Builder
for _, char := range propertyKey {
if _, exists := protectedChars[char]; !exists {
b.Write([]byte(string(char)))
}
}
propertyKey = b.String()
// 1b) check for restricted chars
for _, char := range propertyKey {
if _, exists := protectedChars[char]; exists {
return "", fmt.Errorf("invalid character '%U': %w", char, ErrInvalidPropertyName)
}
}
// 2) ensure proper pluralization
switch pluralization {
case singular:
propertyKey = pluralizer.Singular(propertyKey)
case plural:
propertyKey = pluralizer.Plural(propertyKey)
}
// 3) remove initialisms
propertyKey = convertInitialisms(propertyKey)
// 4) titlecase
return strings.Title(propertyKey), nil
}
func isNillable(p Property) bool {
return strings.Contains(p.Documentation, "Nillable")
}
func dedupe(elems []string, ignoreElem string) []string {
m := make(map[string]struct{})
var final []string
for _, elem := range elems {
if elem == ignoreElem {
continue
}
if _, exists := m[elem]; exists {
continue
}
m[elem] = struct{}{}
final = append(final, elem)
}
return final
} | {
for i := 0; i < len(commonInitialisms); i++ {
s = strings.ReplaceAll(s, commonInitialisms[i][0], commonInitialisms[i][1])
}
return s
} | identifier_body |
deepracer_racetrack_env.py | from __future__ import print_function
import bisect
import boto3
import json
import logging
import math
import os
import time
import gym
import numpy as np
from gym import spaces
from PIL import Image
logger = logging.getLogger(__name__)
# Type of worker
SIMULATION_WORKER = "SIMULATION_WORKER"
SAGEMAKER_TRAINING_WORKER = "SAGEMAKER_TRAINING_WORKER"
node_type = os.environ.get("NODE_TYPE", SIMULATION_WORKER)
if node_type == SIMULATION_WORKER:
import rospy
from ackermann_msgs.msg import AckermannDriveStamped
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import GetLinkState, GetModelState, SetModelState
from scipy.spatial.transform import Rotation
from sensor_msgs.msg import Image as sensor_image
from shapely.geometry import Point, Polygon
from shapely.geometry.polygon import LinearRing, LineString
# Type of job
TRAINING_JOB = 'TRAINING'
EVALUATION_JOB = 'EVALUATION'
# Sleep intervals
SLEEP_AFTER_RESET_TIME_IN_SECOND = 0.5
SLEEP_BETWEEN_ACTION_AND_REWARD_CALCULATION_TIME_IN_SECOND = 0.1
SLEEP_WAITING_FOR_IMAGE_TIME_IN_SECOND = 0.01
# Dimensions of the input training image
TRAINING_IMAGE_SIZE = (160, 120)
# Local offset of the front of the car
RELATIVE_POSITION_OF_FRONT_OF_CAR = [0.14, 0, 0]
# Normalized track distance to move with each reset
ROUND_ROBIN_ADVANCE_DIST = 0.05
# Reward to give the car when it "crashes"
CRASHED = 1e-8
### Gym Env ###
class DeepRacerRacetrackEnv(gym.Env):
def __init__(self):
# Create the observation space
img_width = TRAINING_IMAGE_SIZE[0]
img_height = TRAINING_IMAGE_SIZE[1]
self.observation_space = spaces.Box(low=0, high=255, shape=(img_height, img_width, 3), dtype=np.uint8)
# Create the action space
self.action_space = spaces.Box(low=np.array([-1, 0]), high=np.array([+1, +1]), dtype=np.float32)
if node_type == SIMULATION_WORKER:
# ROS initialization
rospy.init_node('rl_coach', anonymous=True)
rospy.Subscriber('/camera/zed/rgb/image_rect_color', sensor_image, self.callback_image)
self.ack_publisher = rospy.Publisher('/vesc/low_level/ackermann_cmd_mux/output',
AckermannDriveStamped, queue_size=100)
self.set_model_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self.get_model_state = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
self.get_link_state = rospy.ServiceProxy('/gazebo/get_link_state', GetLinkState)
# Read in parameters
self.world_name = rospy.get_param('WORLD_NAME')
self.job_type = rospy.get_param('JOB_TYPE')
self.aws_region = rospy.get_param('AWS_REGION')
self.metrics_s3_bucket = rospy.get_param('METRICS_S3_BUCKET')
self.metrics_s3_object_key = rospy.get_param('METRICS_S3_OBJECT_KEY')
self.metrics = []
self.simulation_job_arn = 'arn:aws:robomaker:' + self.aws_region + ':' + \
rospy.get_param('ROBOMAKER_SIMULATION_JOB_ACCOUNT_ID') + \
':simulation-job/' + rospy.get_param('AWS_ROBOMAKER_SIMULATION_JOB_ID')
if self.job_type == TRAINING_JOB:
from custom_files.customer_reward_function import reward_function
self.reward_function = reward_function
self.metric_name = rospy.get_param('METRIC_NAME')
self.metric_namespace = rospy.get_param('METRIC_NAMESPACE')
self.training_job_arn = rospy.get_param('TRAINING_JOB_ARN')
self.target_number_of_episodes = rospy.get_param('NUMBER_OF_EPISODES')
self.target_reward_score = rospy.get_param('TARGET_REWARD_SCORE')
else:
from markov.defaults import reward_function
self.reward_function = reward_function
self.number_of_trials = 0
self.target_number_of_trials = rospy.get_param('NUMBER_OF_TRIALS')
# Read in the waypoints
BUNDLE_CURRENT_PREFIX = os.environ.get("BUNDLE_CURRENT_PREFIX", None)
if not BUNDLE_CURRENT_PREFIX:
raise ValueError("Cannot get BUNDLE_CURRENT_PREFIX")
route_file_name = os.path.join(BUNDLE_CURRENT_PREFIX,
'install', 'deepracer_simulation', 'share',
'deepracer_simulation', 'routes', '{}.npy'.format(self.world_name))
waypoints = np.load(route_file_name)
self.is_loop = np.all(waypoints[0,:] == waypoints[-1,:])
if self.is_loop:
self.center_line = LinearRing(waypoints[:,0:2])
self.inner_border = LinearRing(waypoints[:,2:4])
self.outer_border = LinearRing(waypoints[:,4:6])
self.road_poly = Polygon(self.outer_border, [self.inner_border])
else:
self.center_line = LineString(waypoints[:,0:2])
self.inner_border = LineString(waypoints[:,2:4])
self.outer_border = LineString(waypoints[:,4:6])
self.road_poly = Polygon(np.vstack((self.outer_border, np.flipud(self.inner_border))))
self.center_dists = [self.center_line.project(Point(p), normalized=True) for p in self.center_line.coords[:-1]] + [1.0]
self.track_length = self.center_line.length
# Initialize state data
self.episodes = 0
self.start_dist = 0.0
self.round_robin = (self.job_type == TRAINING_JOB)
self.is_simulation_done = False
self.image = None
self.steering_angle = 0
self.speed = 0
self.action_taken = 0
self.prev_progress = 0
self.prev_point = Point(0, 0)
self.prev_point_2 = Point(0, 0)
self.next_state = None
self.reward = None
self.reward_in_episode = 0
self.done = False
self.steps = 0
self.simulation_start_time = 0
self.reverse_dir = False
def reset(self):
if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample()
# Simulation is done - so RoboMaker will start to shut down the app.
# Till RoboMaker shuts down the app, do nothing more else metrics may show unexpected data.
if (node_type == SIMULATION_WORKER) and self.is_simulation_done:
while True:
time.sleep(1)
self.image = None
self.steering_angle = 0
self.speed = 0
self.action_taken = 0
self.prev_progress = 0
self.prev_point = Point(0, 0)
self.prev_point_2 = Point(0, 0)
self.next_state = None
self.reward = None
self.reward_in_episode = 0
self.done = False
# Reset the car and record the simulation start time
self.send_action(0, 0)
self.racecar_reset()
time.sleep(SLEEP_AFTER_RESET_TIME_IN_SECOND)
self.steps = 0
self.simulation_start_time = time.time()
# Compute the initial state
self.infer_reward_state(0, 0)
return self.next_state
def racecar_reset(self):
rospy.wait_for_service('/gazebo/set_model_state')
# Compute the starting position and heading
next_point_index = bisect.bisect(self.center_dists, self.start_dist)
start_point = self.center_line.interpolate(self.start_dist, normalized=True)
start_yaw = math.atan2(
self.center_line.coords[next_point_index][1] - start_point.y,
self.center_line.coords[next_point_index][0] - start_point.x)
start_quaternion = Rotation.from_euler('zyx', [start_yaw, 0, 0]).as_quat()
# Construct the model state and send to Gazebo
modelState = ModelState()
modelState.model_name = 'racecar'
modelState.pose.position.x = start_point.x
modelState.pose.position.y = start_point.y
modelState.pose.position.z = 0
modelState.pose.orientation.x = start_quaternion[0]
modelState.pose.orientation.y = start_quaternion[1]
modelState.pose.orientation.z = start_quaternion[2]
modelState.pose.orientation.w = start_quaternion[3]
modelState.twist.linear.x = 0
modelState.twist.linear.y = 0
modelState.twist.linear.z = 0
modelState.twist.angular.x = 0
modelState.twist.angular.y = 0
modelState.twist.angular.z = 0
self.set_model_state(modelState)
def | (self, action):
if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample(), 0, False, {}
# Initialize next state, reward, done flag
self.next_state = None
self.reward = None
self.done = False
# Send this action to Gazebo and increment the step count
self.steering_angle = float(action[0])
self.speed = float(action[1])
self.send_action(self.steering_angle, self.speed)
time.sleep(SLEEP_BETWEEN_ACTION_AND_REWARD_CALCULATION_TIME_IN_SECOND)
self.steps += 1
# Compute the next state and reward
self.infer_reward_state(self.steering_angle, self.speed)
return self.next_state, self.reward, self.done, {}
def callback_image(self, data):
self.image = data
def send_action(self, steering_angle, speed):
ack_msg = AckermannDriveStamped()
ack_msg.header.stamp = rospy.Time.now()
ack_msg.drive.steering_angle = steering_angle
ack_msg.drive.speed = speed
self.ack_publisher.publish(ack_msg)
def infer_reward_state(self, steering_angle, speed):
rospy.wait_for_service('/gazebo/get_model_state')
rospy.wait_for_service('/gazebo/get_link_state')
# Wait till we have a image from the camera
# btown TODO: Incorporate feedback from callejae@ here (CR-6434645 rev1)
while not self.image:
time.sleep(SLEEP_WAITING_FOR_IMAGE_TIME_IN_SECOND)
# Read model state from Gazebo
model_state = self.get_model_state('racecar', '')
model_orientation = Rotation.from_quat([
model_state.pose.orientation.x,
model_state.pose.orientation.y,
model_state.pose.orientation.z,
model_state.pose.orientation.w])
model_location = np.array([
model_state.pose.position.x,
model_state.pose.position.y,
model_state.pose.position.z]) + \
model_orientation.apply(RELATIVE_POSITION_OF_FRONT_OF_CAR)
model_point = Point(model_location[0], model_location[1])
model_heading = model_orientation.as_euler('zyx')[0]
# Read the wheel locations from Gazebo
left_rear_wheel_state = self.get_link_state('racecar::left_rear_wheel', '')
left_front_wheel_state = self.get_link_state('racecar::left_front_wheel', '')
right_rear_wheel_state = self.get_link_state('racecar::right_rear_wheel', '')
right_front_wheel_state = self.get_link_state('racecar::right_front_wheel', '')
wheel_points = [
Point(left_rear_wheel_state.link_state.pose.position.x,
left_rear_wheel_state.link_state.pose.position.y),
Point(left_front_wheel_state.link_state.pose.position.x,
left_front_wheel_state.link_state.pose.position.y),
Point(right_rear_wheel_state.link_state.pose.position.x,
right_rear_wheel_state.link_state.pose.position.y),
Point(right_front_wheel_state.link_state.pose.position.x,
right_front_wheel_state.link_state.pose.position.y)
]
# Project the current location onto the center line and find nearest points
current_dist = self.center_line.project(model_point, normalized=True)
next_waypoint_index = max(0, min(bisect.bisect(self.center_dists, current_dist), len(self.center_dists) - 1))
prev_waypoint_index = next_waypoint_index - 1
distance_from_next = model_point.distance(Point(self.center_line.coords[next_waypoint_index]))
distance_from_prev = model_point.distance(Point(self.center_line.coords[prev_waypoint_index]))
closest_waypoint_index = (prev_waypoint_index, next_waypoint_index)[distance_from_next < distance_from_prev]
# Compute distance from center and road width
nearest_point_center = self.center_line.interpolate(current_dist, normalized=True)
nearest_point_inner = self.inner_border.interpolate(self.inner_border.project(nearest_point_center))
nearest_point_outer = self.outer_border.interpolate(self.outer_border.project(nearest_point_center))
distance_from_center = nearest_point_center.distance(model_point)
distance_from_inner = nearest_point_inner.distance(model_point)
distance_from_outer = nearest_point_outer.distance(model_point)
track_width = nearest_point_inner.distance(nearest_point_outer)
is_left_of_center = (distance_from_outer < distance_from_inner) if self.reverse_dir \
else (distance_from_inner < distance_from_outer)
# Convert current progress to be [0,100] starting at the initial waypoint
current_progress = current_dist - self.start_dist
if current_progress < 0.0: current_progress = current_progress + 1.0
current_progress = 100 * current_progress
if current_progress < self.prev_progress:
# Either: (1) we wrapped around and have finished the track,
delta1 = current_progress + 100 - self.prev_progress
# or (2) for some reason the car went backwards (this should be rare)
delta2 = self.prev_progress - current_progress
current_progress = (self.prev_progress, 100)[delta1 < delta2]
# Car is off track if all wheels are outside the borders
wheel_on_track = [self.road_poly.contains(p) for p in wheel_points]
all_wheels_on_track = all(wheel_on_track)
any_wheels_on_track = any(wheel_on_track)
# Compute the reward
if any_wheels_on_track:
done = False
params = {
'all_wheels_on_track': all_wheels_on_track,
'x': model_point.x,
'y': model_point.y,
'heading': model_heading * 180.0 / math.pi,
'distance_from_center': distance_from_center,
'progress': current_progress,
'steps': self.steps,
'speed': speed,
'steering_angle': steering_angle * 180.0 / math.pi,
'track_width': track_width,
'waypoints': list(self.center_line.coords),
'closest_waypoints': [prev_waypoint_index, next_waypoint_index],
'is_left_of_center': is_left_of_center,
'is_reversed': self.reverse_dir
}
reward = self.reward_function(params)
else:
done = True
reward = CRASHED
# Reset if the car position hasn't changed in the last 2 steps
if min(model_point.distance(self.prev_point), model_point.distance(self.prev_point_2)) <= 0.0001:
done = True
reward = CRASHED # stuck
# Simulation jobs are done when progress reaches 100
if current_progress >= 100:
done = True
# Keep data from the previous step around
self.prev_point_2 = self.prev_point
self.prev_point = model_point
self.prev_progress = current_progress
# Read the image and resize to get the state
image = Image.frombytes('RGB', (self.image.width, self.image.height), self.image.data, 'raw', 'RGB', 0, 1)
image = image.resize(TRAINING_IMAGE_SIZE, resample=2)
state = np.array(image)
# Set the next state, reward, and done flag
self.next_state = state
self.reward = reward
self.reward_in_episode += reward
self.done = done
# Trace logs to help us debug and visualize the training runs
# btown TODO: This should be written to S3, not to CWL.
stdout_ = 'SIM_TRACE_LOG:%d,%d,%.4f,%.4f,%.4f,%.2f,%.2f,%d,%.4f,%s,%s,%.4f,%d,%.2f,%s\n' % (
self.episodes, self.steps, model_location[0], model_location[1], model_heading,
self.steering_angle,
self.speed,
self.action_taken,
self.reward,
self.done,
all_wheels_on_track,
current_progress,
closest_waypoint_index,
self.track_length,
time.time())
print(stdout_)
# Terminate this episode when ready
if self.done and node_type == SIMULATION_WORKER:
self.finish_episode(current_progress)
def finish_episode(self, progress):
# Stop the car from moving
self.send_action(0, 0)
# Increment episode count, update start dist for round robin
self.episodes += 1
if self.round_robin:
self.start_dist = (self.start_dist + ROUND_ROBIN_ADVANCE_DIST) % 1.0
# Update metrics based on job type
if self.job_type == TRAINING_JOB:
self.send_reward_to_cloudwatch(self.reward_in_episode)
self.update_training_metrics()
self.write_metrics_to_s3()
if self.is_training_done():
self.cancel_simulation_job()
elif self.job_type == EVALUATION_JOB:
self.number_of_trials += 1
self.update_eval_metrics(progress)
self.write_metrics_to_s3()
if self.is_evaluation_done():
self.cancel_simulation_job()
def update_eval_metrics(self, progress):
eval_metric = {}
eval_metric['completion_percentage'] = int(progress)
eval_metric['metric_time'] = int(round(time.time() * 1000))
eval_metric['start_time'] = int(round(self.simulation_start_time * 1000))
eval_metric['elapsed_time_in_milliseconds'] = int(round((time.time() - self.simulation_start_time) * 1000))
eval_metric['trial'] = int(self.number_of_trials)
self.metrics.append(eval_metric)
def update_training_metrics(self):
training_metric = {}
training_metric['reward_score'] = int(round(self.reward_in_episode))
training_metric['metric_time'] = int(round(time.time() * 1000))
training_metric['start_time'] = int(round(self.simulation_start_time * 1000))
training_metric['elapsed_time_in_milliseconds'] = int(round((time.time() - self.simulation_start_time) * 1000))
training_metric['episode'] = int(self.episodes)
self.metrics.append(training_metric)
def write_metrics_to_s3(self):
session = boto3.session.Session()
s3_url = os.environ.get('S3_ENDPOINT_URL')
s3_client = session.client('s3', region_name=self.aws_region, endpoint_url=s3_url)
metrics_body = json.dumps({'metrics': self.metrics})
s3_client.put_object(
Bucket=self.metrics_s3_bucket,
Key=self.metrics_s3_object_key,
Body=bytes(metrics_body, encoding='utf-8')
)
def is_evaluation_done(self):
if ((self.target_number_of_trials > 0) and (self.target_number_of_trials == self.number_of_trials)):
self.is_simulation_done = True
return self.is_simulation_done
def is_training_done(self):
if ((self.target_number_of_episodes > 0) and (self.target_number_of_episodes == self.episodes)) or \
((self.is_number(self.target_reward_score)) and (self.target_reward_score <= self.reward_in_episode)):
self.is_simulation_done = True
return self.is_simulation_done
def is_number(self, value_to_check):
try:
float(value_to_check)
return True
except ValueError:
return False
def cancel_simulation_job(self):
self.send_action(0, 0)
session = boto3.session.Session()
robomaker_client = session.client('robomaker', region_name=self.aws_region)
robomaker_client.cancel_simulation_job(
job=self.simulation_job_arn
)
def send_reward_to_cloudwatch(self, reward):
isLocal = os.environ.get("LOCAL")
if isLocal == None:
session = boto3.session.Session()
cloudwatch_client = session.client('cloudwatch', region_name=self.aws_region)
cloudwatch_client.put_metric_data(
MetricData=[
{
'MetricName': self.metric_name,
'Dimensions': [
{
'Name': 'TRAINING_JOB_ARN',
'Value': self.training_job_arn
},
],
'Unit': 'None',
'Value': reward
},
],
Namespace=self.metric_namespace
)
else:
print("{}: {}".format(self.metric_name, reward))
class DeepRacerRacetrackCustomActionSpaceEnv(DeepRacerRacetrackEnv):
def __init__(self):
DeepRacerRacetrackEnv.__init__(self)
try:
# Try loading the custom model metadata (may or may not be present)
with open('custom_files/model_metadata.json', 'r') as f:
model_metadata = json.load(f)
self.json_actions = model_metadata['action_space']
logger.info("Loaded action space from file: {}".format(self.json_actions))
except:
# Failed to load, fall back on the default action space
from markov.defaults import model_metadata
self.json_actions = model_metadata['action_space']
logger.info("Loaded default action space: {}".format(self.json_actions))
self.action_space = spaces.Discrete(len(self.json_actions))
def step(self, action):
self.steering_angle = float(self.json_actions[action]['steering_angle']) * math.pi / 180.0
self.speed = float(self.json_actions[action]['speed'])
self.action_taken = action
return super().step([self.steering_angle, self.speed])
| step | identifier_name |
deepracer_racetrack_env.py | from __future__ import print_function
import bisect
import boto3
import json
import logging
import math
import os
import time
import gym
import numpy as np
from gym import spaces
from PIL import Image
logger = logging.getLogger(__name__)
# Type of worker
SIMULATION_WORKER = "SIMULATION_WORKER"
SAGEMAKER_TRAINING_WORKER = "SAGEMAKER_TRAINING_WORKER"
node_type = os.environ.get("NODE_TYPE", SIMULATION_WORKER)
if node_type == SIMULATION_WORKER:
import rospy
from ackermann_msgs.msg import AckermannDriveStamped
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import GetLinkState, GetModelState, SetModelState
from scipy.spatial.transform import Rotation
from sensor_msgs.msg import Image as sensor_image
from shapely.geometry import Point, Polygon
from shapely.geometry.polygon import LinearRing, LineString
# Type of job
TRAINING_JOB = 'TRAINING'
EVALUATION_JOB = 'EVALUATION'
# Sleep intervals
SLEEP_AFTER_RESET_TIME_IN_SECOND = 0.5
SLEEP_BETWEEN_ACTION_AND_REWARD_CALCULATION_TIME_IN_SECOND = 0.1
SLEEP_WAITING_FOR_IMAGE_TIME_IN_SECOND = 0.01
# Dimensions of the input training image
TRAINING_IMAGE_SIZE = (160, 120)
# Local offset of the front of the car
RELATIVE_POSITION_OF_FRONT_OF_CAR = [0.14, 0, 0]
# Normalized track distance to move with each reset
ROUND_ROBIN_ADVANCE_DIST = 0.05
# Reward to give the car when it "crashes"
CRASHED = 1e-8
### Gym Env ###
class DeepRacerRacetrackEnv(gym.Env):
def __init__(self):
# Create the observation space
img_width = TRAINING_IMAGE_SIZE[0]
img_height = TRAINING_IMAGE_SIZE[1]
self.observation_space = spaces.Box(low=0, high=255, shape=(img_height, img_width, 3), dtype=np.uint8)
# Create the action space
self.action_space = spaces.Box(low=np.array([-1, 0]), high=np.array([+1, +1]), dtype=np.float32)
if node_type == SIMULATION_WORKER:
# ROS initialization
rospy.init_node('rl_coach', anonymous=True)
rospy.Subscriber('/camera/zed/rgb/image_rect_color', sensor_image, self.callback_image)
self.ack_publisher = rospy.Publisher('/vesc/low_level/ackermann_cmd_mux/output',
AckermannDriveStamped, queue_size=100)
self.set_model_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self.get_model_state = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
self.get_link_state = rospy.ServiceProxy('/gazebo/get_link_state', GetLinkState)
# Read in parameters
self.world_name = rospy.get_param('WORLD_NAME')
self.job_type = rospy.get_param('JOB_TYPE')
self.aws_region = rospy.get_param('AWS_REGION')
self.metrics_s3_bucket = rospy.get_param('METRICS_S3_BUCKET')
self.metrics_s3_object_key = rospy.get_param('METRICS_S3_OBJECT_KEY')
self.metrics = []
self.simulation_job_arn = 'arn:aws:robomaker:' + self.aws_region + ':' + \
rospy.get_param('ROBOMAKER_SIMULATION_JOB_ACCOUNT_ID') + \
':simulation-job/' + rospy.get_param('AWS_ROBOMAKER_SIMULATION_JOB_ID')
if self.job_type == TRAINING_JOB:
from custom_files.customer_reward_function import reward_function
self.reward_function = reward_function
self.metric_name = rospy.get_param('METRIC_NAME')
self.metric_namespace = rospy.get_param('METRIC_NAMESPACE')
self.training_job_arn = rospy.get_param('TRAINING_JOB_ARN')
self.target_number_of_episodes = rospy.get_param('NUMBER_OF_EPISODES')
self.target_reward_score = rospy.get_param('TARGET_REWARD_SCORE')
else:
from markov.defaults import reward_function
self.reward_function = reward_function
self.number_of_trials = 0
self.target_number_of_trials = rospy.get_param('NUMBER_OF_TRIALS')
# Read in the waypoints
BUNDLE_CURRENT_PREFIX = os.environ.get("BUNDLE_CURRENT_PREFIX", None)
if not BUNDLE_CURRENT_PREFIX:
raise ValueError("Cannot get BUNDLE_CURRENT_PREFIX")
route_file_name = os.path.join(BUNDLE_CURRENT_PREFIX,
'install', 'deepracer_simulation', 'share',
'deepracer_simulation', 'routes', '{}.npy'.format(self.world_name))
waypoints = np.load(route_file_name)
self.is_loop = np.all(waypoints[0,:] == waypoints[-1,:])
if self.is_loop:
self.center_line = LinearRing(waypoints[:,0:2])
self.inner_border = LinearRing(waypoints[:,2:4])
self.outer_border = LinearRing(waypoints[:,4:6])
self.road_poly = Polygon(self.outer_border, [self.inner_border])
else:
self.center_line = LineString(waypoints[:,0:2])
self.inner_border = LineString(waypoints[:,2:4])
self.outer_border = LineString(waypoints[:,4:6])
self.road_poly = Polygon(np.vstack((self.outer_border, np.flipud(self.inner_border))))
self.center_dists = [self.center_line.project(Point(p), normalized=True) for p in self.center_line.coords[:-1]] + [1.0]
self.track_length = self.center_line.length
# Initialize state data
self.episodes = 0
self.start_dist = 0.0
self.round_robin = (self.job_type == TRAINING_JOB)
self.is_simulation_done = False
self.image = None
self.steering_angle = 0
self.speed = 0
self.action_taken = 0
self.prev_progress = 0
self.prev_point = Point(0, 0)
self.prev_point_2 = Point(0, 0)
self.next_state = None
self.reward = None
self.reward_in_episode = 0
self.done = False
self.steps = 0
self.simulation_start_time = 0
self.reverse_dir = False
def reset(self):
if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample()
# Simulation is done - so RoboMaker will start to shut down the app.
# Till RoboMaker shuts down the app, do nothing more else metrics may show unexpected data.
if (node_type == SIMULATION_WORKER) and self.is_simulation_done:
while True:
time.sleep(1)
self.image = None
self.steering_angle = 0
self.speed = 0
self.action_taken = 0
self.prev_progress = 0
self.prev_point = Point(0, 0)
self.prev_point_2 = Point(0, 0)
self.next_state = None
self.reward = None
self.reward_in_episode = 0
self.done = False
# Reset the car and record the simulation start time
self.send_action(0, 0)
self.racecar_reset()
time.sleep(SLEEP_AFTER_RESET_TIME_IN_SECOND)
self.steps = 0
self.simulation_start_time = time.time()
# Compute the initial state
self.infer_reward_state(0, 0)
return self.next_state
def racecar_reset(self):
rospy.wait_for_service('/gazebo/set_model_state')
# Compute the starting position and heading
next_point_index = bisect.bisect(self.center_dists, self.start_dist)
start_point = self.center_line.interpolate(self.start_dist, normalized=True)
start_yaw = math.atan2(
self.center_line.coords[next_point_index][1] - start_point.y,
self.center_line.coords[next_point_index][0] - start_point.x)
start_quaternion = Rotation.from_euler('zyx', [start_yaw, 0, 0]).as_quat()
# Construct the model state and send to Gazebo
modelState = ModelState()
modelState.model_name = 'racecar'
modelState.pose.position.x = start_point.x
modelState.pose.position.y = start_point.y
modelState.pose.position.z = 0
modelState.pose.orientation.x = start_quaternion[0]
modelState.pose.orientation.y = start_quaternion[1]
modelState.pose.orientation.z = start_quaternion[2]
modelState.pose.orientation.w = start_quaternion[3]
modelState.twist.linear.x = 0
modelState.twist.linear.y = 0
modelState.twist.linear.z = 0
modelState.twist.angular.x = 0
modelState.twist.angular.y = 0
modelState.twist.angular.z = 0
self.set_model_state(modelState)
def step(self, action):
if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample(), 0, False, {}
# Initialize next state, reward, done flag
self.next_state = None
self.reward = None
self.done = False
# Send this action to Gazebo and increment the step count
self.steering_angle = float(action[0])
self.speed = float(action[1])
self.send_action(self.steering_angle, self.speed)
time.sleep(SLEEP_BETWEEN_ACTION_AND_REWARD_CALCULATION_TIME_IN_SECOND)
self.steps += 1
# Compute the next state and reward
self.infer_reward_state(self.steering_angle, self.speed)
return self.next_state, self.reward, self.done, {}
def callback_image(self, data):
self.image = data
def send_action(self, steering_angle, speed):
ack_msg = AckermannDriveStamped()
ack_msg.header.stamp = rospy.Time.now()
ack_msg.drive.steering_angle = steering_angle
ack_msg.drive.speed = speed
self.ack_publisher.publish(ack_msg)
def infer_reward_state(self, steering_angle, speed):
rospy.wait_for_service('/gazebo/get_model_state')
rospy.wait_for_service('/gazebo/get_link_state')
# Wait till we have a image from the camera
# btown TODO: Incorporate feedback from callejae@ here (CR-6434645 rev1)
while not self.image:
time.sleep(SLEEP_WAITING_FOR_IMAGE_TIME_IN_SECOND)
# Read model state from Gazebo
model_state = self.get_model_state('racecar', '')
model_orientation = Rotation.from_quat([
model_state.pose.orientation.x,
model_state.pose.orientation.y,
model_state.pose.orientation.z,
model_state.pose.orientation.w])
model_location = np.array([
model_state.pose.position.x,
model_state.pose.position.y,
model_state.pose.position.z]) + \
model_orientation.apply(RELATIVE_POSITION_OF_FRONT_OF_CAR)
model_point = Point(model_location[0], model_location[1])
model_heading = model_orientation.as_euler('zyx')[0]
# Read the wheel locations from Gazebo
left_rear_wheel_state = self.get_link_state('racecar::left_rear_wheel', '')
left_front_wheel_state = self.get_link_state('racecar::left_front_wheel', '')
right_rear_wheel_state = self.get_link_state('racecar::right_rear_wheel', '')
right_front_wheel_state = self.get_link_state('racecar::right_front_wheel', '')
wheel_points = [
Point(left_rear_wheel_state.link_state.pose.position.x,
left_rear_wheel_state.link_state.pose.position.y),
Point(left_front_wheel_state.link_state.pose.position.x,
left_front_wheel_state.link_state.pose.position.y),
Point(right_rear_wheel_state.link_state.pose.position.x,
right_rear_wheel_state.link_state.pose.position.y),
Point(right_front_wheel_state.link_state.pose.position.x,
right_front_wheel_state.link_state.pose.position.y)
]
# Project the current location onto the center line and find nearest points
current_dist = self.center_line.project(model_point, normalized=True)
next_waypoint_index = max(0, min(bisect.bisect(self.center_dists, current_dist), len(self.center_dists) - 1))
prev_waypoint_index = next_waypoint_index - 1
distance_from_next = model_point.distance(Point(self.center_line.coords[next_waypoint_index]))
distance_from_prev = model_point.distance(Point(self.center_line.coords[prev_waypoint_index]))
closest_waypoint_index = (prev_waypoint_index, next_waypoint_index)[distance_from_next < distance_from_prev]
# Compute distance from center and road width
nearest_point_center = self.center_line.interpolate(current_dist, normalized=True)
nearest_point_inner = self.inner_border.interpolate(self.inner_border.project(nearest_point_center))
nearest_point_outer = self.outer_border.interpolate(self.outer_border.project(nearest_point_center))
distance_from_center = nearest_point_center.distance(model_point)
distance_from_inner = nearest_point_inner.distance(model_point)
distance_from_outer = nearest_point_outer.distance(model_point)
track_width = nearest_point_inner.distance(nearest_point_outer)
is_left_of_center = (distance_from_outer < distance_from_inner) if self.reverse_dir \
else (distance_from_inner < distance_from_outer)
# Convert current progress to be [0,100] starting at the initial waypoint
current_progress = current_dist - self.start_dist
if current_progress < 0.0: current_progress = current_progress + 1.0
current_progress = 100 * current_progress
if current_progress < self.prev_progress:
# Either: (1) we wrapped around and have finished the track,
delta1 = current_progress + 100 - self.prev_progress
# or (2) for some reason the car went backwards (this should be rare)
delta2 = self.prev_progress - current_progress
current_progress = (self.prev_progress, 100)[delta1 < delta2]
# Car is off track if all wheels are outside the borders
wheel_on_track = [self.road_poly.contains(p) for p in wheel_points]
all_wheels_on_track = all(wheel_on_track)
any_wheels_on_track = any(wheel_on_track)
# Compute the reward
if any_wheels_on_track:
done = False
params = {
'all_wheels_on_track': all_wheels_on_track,
'x': model_point.x,
'y': model_point.y,
'heading': model_heading * 180.0 / math.pi,
'distance_from_center': distance_from_center,
'progress': current_progress,
'steps': self.steps,
'speed': speed,
'steering_angle': steering_angle * 180.0 / math.pi,
'track_width': track_width,
'waypoints': list(self.center_line.coords),
'closest_waypoints': [prev_waypoint_index, next_waypoint_index],
'is_left_of_center': is_left_of_center,
'is_reversed': self.reverse_dir
}
reward = self.reward_function(params)
else:
done = True
reward = CRASHED
# Reset if the car position hasn't changed in the last 2 steps
if min(model_point.distance(self.prev_point), model_point.distance(self.prev_point_2)) <= 0.0001:
done = True
reward = CRASHED # stuck
# Simulation jobs are done when progress reaches 100
if current_progress >= 100:
done = True
# Keep data from the previous step around
self.prev_point_2 = self.prev_point
self.prev_point = model_point
self.prev_progress = current_progress
# Read the image and resize to get the state
image = Image.frombytes('RGB', (self.image.width, self.image.height), self.image.data, 'raw', 'RGB', 0, 1)
image = image.resize(TRAINING_IMAGE_SIZE, resample=2)
state = np.array(image)
# Set the next state, reward, and done flag
self.next_state = state
self.reward = reward
self.reward_in_episode += reward
self.done = done
# Trace logs to help us debug and visualize the training runs
# btown TODO: This should be written to S3, not to CWL.
stdout_ = 'SIM_TRACE_LOG:%d,%d,%.4f,%.4f,%.4f,%.2f,%.2f,%d,%.4f,%s,%s,%.4f,%d,%.2f,%s\n' % (
self.episodes, self.steps, model_location[0], model_location[1], model_heading,
self.steering_angle,
self.speed,
self.action_taken,
self.reward,
self.done,
all_wheels_on_track,
current_progress,
closest_waypoint_index,
self.track_length,
time.time())
print(stdout_)
# Terminate this episode when ready
if self.done and node_type == SIMULATION_WORKER:
self.finish_episode(current_progress)
def finish_episode(self, progress):
# Stop the car from moving
self.send_action(0, 0)
# Increment episode count, update start dist for round robin
self.episodes += 1
if self.round_robin:
self.start_dist = (self.start_dist + ROUND_ROBIN_ADVANCE_DIST) % 1.0
# Update metrics based on job type
if self.job_type == TRAINING_JOB:
self.send_reward_to_cloudwatch(self.reward_in_episode)
self.update_training_metrics()
self.write_metrics_to_s3()
if self.is_training_done():
self.cancel_simulation_job()
elif self.job_type == EVALUATION_JOB:
self.number_of_trials += 1
self.update_eval_metrics(progress)
self.write_metrics_to_s3()
if self.is_evaluation_done():
self.cancel_simulation_job()
def update_eval_metrics(self, progress):
eval_metric = {}
eval_metric['completion_percentage'] = int(progress)
eval_metric['metric_time'] = int(round(time.time() * 1000))
eval_metric['start_time'] = int(round(self.simulation_start_time * 1000))
eval_metric['elapsed_time_in_milliseconds'] = int(round((time.time() - self.simulation_start_time) * 1000))
eval_metric['trial'] = int(self.number_of_trials)
self.metrics.append(eval_metric)
def update_training_metrics(self):
training_metric = {}
training_metric['reward_score'] = int(round(self.reward_in_episode))
training_metric['metric_time'] = int(round(time.time() * 1000))
training_metric['start_time'] = int(round(self.simulation_start_time * 1000))
training_metric['elapsed_time_in_milliseconds'] = int(round((time.time() - self.simulation_start_time) * 1000))
training_metric['episode'] = int(self.episodes)
self.metrics.append(training_metric)
def write_metrics_to_s3(self):
session = boto3.session.Session()
s3_url = os.environ.get('S3_ENDPOINT_URL')
s3_client = session.client('s3', region_name=self.aws_region, endpoint_url=s3_url)
metrics_body = json.dumps({'metrics': self.metrics})
s3_client.put_object(
Bucket=self.metrics_s3_bucket,
Key=self.metrics_s3_object_key,
Body=bytes(metrics_body, encoding='utf-8')
)
def is_evaluation_done(self):
if ((self.target_number_of_trials > 0) and (self.target_number_of_trials == self.number_of_trials)):
self.is_simulation_done = True
return self.is_simulation_done
def is_training_done(self):
if ((self.target_number_of_episodes > 0) and (self.target_number_of_episodes == self.episodes)) or \
((self.is_number(self.target_reward_score)) and (self.target_reward_score <= self.reward_in_episode)):
|
return self.is_simulation_done
def is_number(self, value_to_check):
try:
float(value_to_check)
return True
except ValueError:
return False
def cancel_simulation_job(self):
self.send_action(0, 0)
session = boto3.session.Session()
robomaker_client = session.client('robomaker', region_name=self.aws_region)
robomaker_client.cancel_simulation_job(
job=self.simulation_job_arn
)
def send_reward_to_cloudwatch(self, reward):
isLocal = os.environ.get("LOCAL")
if isLocal == None:
session = boto3.session.Session()
cloudwatch_client = session.client('cloudwatch', region_name=self.aws_region)
cloudwatch_client.put_metric_data(
MetricData=[
{
'MetricName': self.metric_name,
'Dimensions': [
{
'Name': 'TRAINING_JOB_ARN',
'Value': self.training_job_arn
},
],
'Unit': 'None',
'Value': reward
},
],
Namespace=self.metric_namespace
)
else:
print("{}: {}".format(self.metric_name, reward))
class DeepRacerRacetrackCustomActionSpaceEnv(DeepRacerRacetrackEnv):
def __init__(self):
DeepRacerRacetrackEnv.__init__(self)
try:
# Try loading the custom model metadata (may or may not be present)
with open('custom_files/model_metadata.json', 'r') as f:
model_metadata = json.load(f)
self.json_actions = model_metadata['action_space']
logger.info("Loaded action space from file: {}".format(self.json_actions))
except:
# Failed to load, fall back on the default action space
from markov.defaults import model_metadata
self.json_actions = model_metadata['action_space']
logger.info("Loaded default action space: {}".format(self.json_actions))
self.action_space = spaces.Discrete(len(self.json_actions))
def step(self, action):
self.steering_angle = float(self.json_actions[action]['steering_angle']) * math.pi / 180.0
self.speed = float(self.json_actions[action]['speed'])
self.action_taken = action
return super().step([self.steering_angle, self.speed])
| self.is_simulation_done = True | conditional_block |
deepracer_racetrack_env.py | from __future__ import print_function
import bisect
import boto3
import json
import logging
import math
import os
import time
import gym
import numpy as np
from gym import spaces
from PIL import Image
logger = logging.getLogger(__name__)
# Type of worker
SIMULATION_WORKER = "SIMULATION_WORKER"
SAGEMAKER_TRAINING_WORKER = "SAGEMAKER_TRAINING_WORKER"
node_type = os.environ.get("NODE_TYPE", SIMULATION_WORKER)
if node_type == SIMULATION_WORKER:
import rospy
from ackermann_msgs.msg import AckermannDriveStamped
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import GetLinkState, GetModelState, SetModelState
from scipy.spatial.transform import Rotation
from sensor_msgs.msg import Image as sensor_image
from shapely.geometry import Point, Polygon
from shapely.geometry.polygon import LinearRing, LineString
# Type of job
TRAINING_JOB = 'TRAINING'
EVALUATION_JOB = 'EVALUATION'
# Sleep intervals
SLEEP_AFTER_RESET_TIME_IN_SECOND = 0.5
SLEEP_BETWEEN_ACTION_AND_REWARD_CALCULATION_TIME_IN_SECOND = 0.1
SLEEP_WAITING_FOR_IMAGE_TIME_IN_SECOND = 0.01
# Dimensions of the input training image
TRAINING_IMAGE_SIZE = (160, 120)
# Local offset of the front of the car
RELATIVE_POSITION_OF_FRONT_OF_CAR = [0.14, 0, 0]
# Normalized track distance to move with each reset
ROUND_ROBIN_ADVANCE_DIST = 0.05
# Reward to give the car when it "crashes"
CRASHED = 1e-8
### Gym Env ###
class DeepRacerRacetrackEnv(gym.Env):
def __init__(self):
# Create the observation space
img_width = TRAINING_IMAGE_SIZE[0]
img_height = TRAINING_IMAGE_SIZE[1]
self.observation_space = spaces.Box(low=0, high=255, shape=(img_height, img_width, 3), dtype=np.uint8)
# Create the action space
self.action_space = spaces.Box(low=np.array([-1, 0]), high=np.array([+1, +1]), dtype=np.float32)
if node_type == SIMULATION_WORKER:
# ROS initialization
rospy.init_node('rl_coach', anonymous=True)
rospy.Subscriber('/camera/zed/rgb/image_rect_color', sensor_image, self.callback_image)
self.ack_publisher = rospy.Publisher('/vesc/low_level/ackermann_cmd_mux/output',
AckermannDriveStamped, queue_size=100)
self.set_model_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self.get_model_state = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
self.get_link_state = rospy.ServiceProxy('/gazebo/get_link_state', GetLinkState)
# Read in parameters
self.world_name = rospy.get_param('WORLD_NAME')
self.job_type = rospy.get_param('JOB_TYPE')
self.aws_region = rospy.get_param('AWS_REGION')
self.metrics_s3_bucket = rospy.get_param('METRICS_S3_BUCKET')
self.metrics_s3_object_key = rospy.get_param('METRICS_S3_OBJECT_KEY')
self.metrics = []
self.simulation_job_arn = 'arn:aws:robomaker:' + self.aws_region + ':' + \
rospy.get_param('ROBOMAKER_SIMULATION_JOB_ACCOUNT_ID') + \
':simulation-job/' + rospy.get_param('AWS_ROBOMAKER_SIMULATION_JOB_ID')
if self.job_type == TRAINING_JOB:
from custom_files.customer_reward_function import reward_function
self.reward_function = reward_function
self.metric_name = rospy.get_param('METRIC_NAME')
self.metric_namespace = rospy.get_param('METRIC_NAMESPACE')
self.training_job_arn = rospy.get_param('TRAINING_JOB_ARN')
self.target_number_of_episodes = rospy.get_param('NUMBER_OF_EPISODES')
self.target_reward_score = rospy.get_param('TARGET_REWARD_SCORE')
else:
from markov.defaults import reward_function
self.reward_function = reward_function
self.number_of_trials = 0
self.target_number_of_trials = rospy.get_param('NUMBER_OF_TRIALS')
# Read in the waypoints
BUNDLE_CURRENT_PREFIX = os.environ.get("BUNDLE_CURRENT_PREFIX", None)
if not BUNDLE_CURRENT_PREFIX:
raise ValueError("Cannot get BUNDLE_CURRENT_PREFIX")
route_file_name = os.path.join(BUNDLE_CURRENT_PREFIX,
'install', 'deepracer_simulation', 'share',
'deepracer_simulation', 'routes', '{}.npy'.format(self.world_name))
waypoints = np.load(route_file_name)
self.is_loop = np.all(waypoints[0,:] == waypoints[-1,:])
if self.is_loop:
self.center_line = LinearRing(waypoints[:,0:2])
self.inner_border = LinearRing(waypoints[:,2:4])
self.outer_border = LinearRing(waypoints[:,4:6])
self.road_poly = Polygon(self.outer_border, [self.inner_border])
else:
self.center_line = LineString(waypoints[:,0:2])
self.inner_border = LineString(waypoints[:,2:4])
self.outer_border = LineString(waypoints[:,4:6])
self.road_poly = Polygon(np.vstack((self.outer_border, np.flipud(self.inner_border))))
self.center_dists = [self.center_line.project(Point(p), normalized=True) for p in self.center_line.coords[:-1]] + [1.0]
self.track_length = self.center_line.length
# Initialize state data
self.episodes = 0
self.start_dist = 0.0
self.round_robin = (self.job_type == TRAINING_JOB)
self.is_simulation_done = False
self.image = None
self.steering_angle = 0
self.speed = 0
self.action_taken = 0
self.prev_progress = 0
self.prev_point = Point(0, 0)
self.prev_point_2 = Point(0, 0)
self.next_state = None
self.reward = None
self.reward_in_episode = 0
self.done = False
self.steps = 0
self.simulation_start_time = 0
self.reverse_dir = False
def reset(self):
|
def racecar_reset(self):
rospy.wait_for_service('/gazebo/set_model_state')
# Compute the starting position and heading
next_point_index = bisect.bisect(self.center_dists, self.start_dist)
start_point = self.center_line.interpolate(self.start_dist, normalized=True)
start_yaw = math.atan2(
self.center_line.coords[next_point_index][1] - start_point.y,
self.center_line.coords[next_point_index][0] - start_point.x)
start_quaternion = Rotation.from_euler('zyx', [start_yaw, 0, 0]).as_quat()
# Construct the model state and send to Gazebo
modelState = ModelState()
modelState.model_name = 'racecar'
modelState.pose.position.x = start_point.x
modelState.pose.position.y = start_point.y
modelState.pose.position.z = 0
modelState.pose.orientation.x = start_quaternion[0]
modelState.pose.orientation.y = start_quaternion[1]
modelState.pose.orientation.z = start_quaternion[2]
modelState.pose.orientation.w = start_quaternion[3]
modelState.twist.linear.x = 0
modelState.twist.linear.y = 0
modelState.twist.linear.z = 0
modelState.twist.angular.x = 0
modelState.twist.angular.y = 0
modelState.twist.angular.z = 0
self.set_model_state(modelState)
def step(self, action):
if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample(), 0, False, {}
# Initialize next state, reward, done flag
self.next_state = None
self.reward = None
self.done = False
# Send this action to Gazebo and increment the step count
self.steering_angle = float(action[0])
self.speed = float(action[1])
self.send_action(self.steering_angle, self.speed)
time.sleep(SLEEP_BETWEEN_ACTION_AND_REWARD_CALCULATION_TIME_IN_SECOND)
self.steps += 1
# Compute the next state and reward
self.infer_reward_state(self.steering_angle, self.speed)
return self.next_state, self.reward, self.done, {}
def callback_image(self, data):
self.image = data
def send_action(self, steering_angle, speed):
ack_msg = AckermannDriveStamped()
ack_msg.header.stamp = rospy.Time.now()
ack_msg.drive.steering_angle = steering_angle
ack_msg.drive.speed = speed
self.ack_publisher.publish(ack_msg)
def infer_reward_state(self, steering_angle, speed):
rospy.wait_for_service('/gazebo/get_model_state')
rospy.wait_for_service('/gazebo/get_link_state')
# Wait till we have a image from the camera
# btown TODO: Incorporate feedback from callejae@ here (CR-6434645 rev1)
while not self.image:
time.sleep(SLEEP_WAITING_FOR_IMAGE_TIME_IN_SECOND)
# Read model state from Gazebo
model_state = self.get_model_state('racecar', '')
model_orientation = Rotation.from_quat([
model_state.pose.orientation.x,
model_state.pose.orientation.y,
model_state.pose.orientation.z,
model_state.pose.orientation.w])
model_location = np.array([
model_state.pose.position.x,
model_state.pose.position.y,
model_state.pose.position.z]) + \
model_orientation.apply(RELATIVE_POSITION_OF_FRONT_OF_CAR)
model_point = Point(model_location[0], model_location[1])
model_heading = model_orientation.as_euler('zyx')[0]
# Read the wheel locations from Gazebo
left_rear_wheel_state = self.get_link_state('racecar::left_rear_wheel', '')
left_front_wheel_state = self.get_link_state('racecar::left_front_wheel', '')
right_rear_wheel_state = self.get_link_state('racecar::right_rear_wheel', '')
right_front_wheel_state = self.get_link_state('racecar::right_front_wheel', '')
wheel_points = [
Point(left_rear_wheel_state.link_state.pose.position.x,
left_rear_wheel_state.link_state.pose.position.y),
Point(left_front_wheel_state.link_state.pose.position.x,
left_front_wheel_state.link_state.pose.position.y),
Point(right_rear_wheel_state.link_state.pose.position.x,
right_rear_wheel_state.link_state.pose.position.y),
Point(right_front_wheel_state.link_state.pose.position.x,
right_front_wheel_state.link_state.pose.position.y)
]
# Project the current location onto the center line and find nearest points
current_dist = self.center_line.project(model_point, normalized=True)
next_waypoint_index = max(0, min(bisect.bisect(self.center_dists, current_dist), len(self.center_dists) - 1))
prev_waypoint_index = next_waypoint_index - 1
distance_from_next = model_point.distance(Point(self.center_line.coords[next_waypoint_index]))
distance_from_prev = model_point.distance(Point(self.center_line.coords[prev_waypoint_index]))
closest_waypoint_index = (prev_waypoint_index, next_waypoint_index)[distance_from_next < distance_from_prev]
# Compute distance from center and road width
nearest_point_center = self.center_line.interpolate(current_dist, normalized=True)
nearest_point_inner = self.inner_border.interpolate(self.inner_border.project(nearest_point_center))
nearest_point_outer = self.outer_border.interpolate(self.outer_border.project(nearest_point_center))
distance_from_center = nearest_point_center.distance(model_point)
distance_from_inner = nearest_point_inner.distance(model_point)
distance_from_outer = nearest_point_outer.distance(model_point)
track_width = nearest_point_inner.distance(nearest_point_outer)
is_left_of_center = (distance_from_outer < distance_from_inner) if self.reverse_dir \
else (distance_from_inner < distance_from_outer)
# Convert current progress to be [0,100] starting at the initial waypoint
current_progress = current_dist - self.start_dist
if current_progress < 0.0: current_progress = current_progress + 1.0
current_progress = 100 * current_progress
if current_progress < self.prev_progress:
# Either: (1) we wrapped around and have finished the track,
delta1 = current_progress + 100 - self.prev_progress
# or (2) for some reason the car went backwards (this should be rare)
delta2 = self.prev_progress - current_progress
current_progress = (self.prev_progress, 100)[delta1 < delta2]
# Car is off track if all wheels are outside the borders
wheel_on_track = [self.road_poly.contains(p) for p in wheel_points]
all_wheels_on_track = all(wheel_on_track)
any_wheels_on_track = any(wheel_on_track)
# Compute the reward
if any_wheels_on_track:
done = False
params = {
'all_wheels_on_track': all_wheels_on_track,
'x': model_point.x,
'y': model_point.y,
'heading': model_heading * 180.0 / math.pi,
'distance_from_center': distance_from_center,
'progress': current_progress,
'steps': self.steps,
'speed': speed,
'steering_angle': steering_angle * 180.0 / math.pi,
'track_width': track_width,
'waypoints': list(self.center_line.coords),
'closest_waypoints': [prev_waypoint_index, next_waypoint_index],
'is_left_of_center': is_left_of_center,
'is_reversed': self.reverse_dir
}
reward = self.reward_function(params)
else:
done = True
reward = CRASHED
# Reset if the car position hasn't changed in the last 2 steps
if min(model_point.distance(self.prev_point), model_point.distance(self.prev_point_2)) <= 0.0001:
done = True
reward = CRASHED # stuck
# Simulation jobs are done when progress reaches 100
if current_progress >= 100:
done = True
# Keep data from the previous step around
self.prev_point_2 = self.prev_point
self.prev_point = model_point
self.prev_progress = current_progress
# Read the image and resize to get the state
image = Image.frombytes('RGB', (self.image.width, self.image.height), self.image.data, 'raw', 'RGB', 0, 1)
image = image.resize(TRAINING_IMAGE_SIZE, resample=2)
state = np.array(image)
# Set the next state, reward, and done flag
self.next_state = state
self.reward = reward
self.reward_in_episode += reward
self.done = done
# Trace logs to help us debug and visualize the training runs
# btown TODO: This should be written to S3, not to CWL.
stdout_ = 'SIM_TRACE_LOG:%d,%d,%.4f,%.4f,%.4f,%.2f,%.2f,%d,%.4f,%s,%s,%.4f,%d,%.2f,%s\n' % (
self.episodes, self.steps, model_location[0], model_location[1], model_heading,
self.steering_angle,
self.speed,
self.action_taken,
self.reward,
self.done,
all_wheels_on_track,
current_progress,
closest_waypoint_index,
self.track_length,
time.time())
print(stdout_)
# Terminate this episode when ready
if self.done and node_type == SIMULATION_WORKER:
self.finish_episode(current_progress)
def finish_episode(self, progress):
# Stop the car from moving
self.send_action(0, 0)
# Increment episode count, update start dist for round robin
self.episodes += 1
if self.round_robin:
self.start_dist = (self.start_dist + ROUND_ROBIN_ADVANCE_DIST) % 1.0
# Update metrics based on job type
if self.job_type == TRAINING_JOB:
self.send_reward_to_cloudwatch(self.reward_in_episode)
self.update_training_metrics()
self.write_metrics_to_s3()
if self.is_training_done():
self.cancel_simulation_job()
elif self.job_type == EVALUATION_JOB:
self.number_of_trials += 1
self.update_eval_metrics(progress)
self.write_metrics_to_s3()
if self.is_evaluation_done():
self.cancel_simulation_job()
def update_eval_metrics(self, progress):
eval_metric = {}
eval_metric['completion_percentage'] = int(progress)
eval_metric['metric_time'] = int(round(time.time() * 1000))
eval_metric['start_time'] = int(round(self.simulation_start_time * 1000))
eval_metric['elapsed_time_in_milliseconds'] = int(round((time.time() - self.simulation_start_time) * 1000))
eval_metric['trial'] = int(self.number_of_trials)
self.metrics.append(eval_metric)
def update_training_metrics(self):
training_metric = {}
training_metric['reward_score'] = int(round(self.reward_in_episode))
training_metric['metric_time'] = int(round(time.time() * 1000))
training_metric['start_time'] = int(round(self.simulation_start_time * 1000))
training_metric['elapsed_time_in_milliseconds'] = int(round((time.time() - self.simulation_start_time) * 1000))
training_metric['episode'] = int(self.episodes)
self.metrics.append(training_metric)
def write_metrics_to_s3(self):
session = boto3.session.Session()
s3_url = os.environ.get('S3_ENDPOINT_URL')
s3_client = session.client('s3', region_name=self.aws_region, endpoint_url=s3_url)
metrics_body = json.dumps({'metrics': self.metrics})
s3_client.put_object(
Bucket=self.metrics_s3_bucket,
Key=self.metrics_s3_object_key,
Body=bytes(metrics_body, encoding='utf-8')
)
def is_evaluation_done(self):
if ((self.target_number_of_trials > 0) and (self.target_number_of_trials == self.number_of_trials)):
self.is_simulation_done = True
return self.is_simulation_done
def is_training_done(self):
if ((self.target_number_of_episodes > 0) and (self.target_number_of_episodes == self.episodes)) or \
((self.is_number(self.target_reward_score)) and (self.target_reward_score <= self.reward_in_episode)):
self.is_simulation_done = True
return self.is_simulation_done
def is_number(self, value_to_check):
try:
float(value_to_check)
return True
except ValueError:
return False
def cancel_simulation_job(self):
self.send_action(0, 0)
session = boto3.session.Session()
robomaker_client = session.client('robomaker', region_name=self.aws_region)
robomaker_client.cancel_simulation_job(
job=self.simulation_job_arn
)
def send_reward_to_cloudwatch(self, reward):
isLocal = os.environ.get("LOCAL")
if isLocal == None:
session = boto3.session.Session()
cloudwatch_client = session.client('cloudwatch', region_name=self.aws_region)
cloudwatch_client.put_metric_data(
MetricData=[
{
'MetricName': self.metric_name,
'Dimensions': [
{
'Name': 'TRAINING_JOB_ARN',
'Value': self.training_job_arn
},
],
'Unit': 'None',
'Value': reward
},
],
Namespace=self.metric_namespace
)
else:
print("{}: {}".format(self.metric_name, reward))
class DeepRacerRacetrackCustomActionSpaceEnv(DeepRacerRacetrackEnv):
def __init__(self):
DeepRacerRacetrackEnv.__init__(self)
try:
# Try loading the custom model metadata (may or may not be present)
with open('custom_files/model_metadata.json', 'r') as f:
model_metadata = json.load(f)
self.json_actions = model_metadata['action_space']
logger.info("Loaded action space from file: {}".format(self.json_actions))
except:
# Failed to load, fall back on the default action space
from markov.defaults import model_metadata
self.json_actions = model_metadata['action_space']
logger.info("Loaded default action space: {}".format(self.json_actions))
self.action_space = spaces.Discrete(len(self.json_actions))
def step(self, action):
self.steering_angle = float(self.json_actions[action]['steering_angle']) * math.pi / 180.0
self.speed = float(self.json_actions[action]['speed'])
self.action_taken = action
return super().step([self.steering_angle, self.speed])
| if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample()
# Simulation is done - so RoboMaker will start to shut down the app.
# Till RoboMaker shuts down the app, do nothing more else metrics may show unexpected data.
if (node_type == SIMULATION_WORKER) and self.is_simulation_done:
while True:
time.sleep(1)
self.image = None
self.steering_angle = 0
self.speed = 0
self.action_taken = 0
self.prev_progress = 0
self.prev_point = Point(0, 0)
self.prev_point_2 = Point(0, 0)
self.next_state = None
self.reward = None
self.reward_in_episode = 0
self.done = False
# Reset the car and record the simulation start time
self.send_action(0, 0)
self.racecar_reset()
time.sleep(SLEEP_AFTER_RESET_TIME_IN_SECOND)
self.steps = 0
self.simulation_start_time = time.time()
# Compute the initial state
self.infer_reward_state(0, 0)
return self.next_state | identifier_body |
deepracer_racetrack_env.py | from __future__ import print_function
import bisect
import boto3
import json
import logging
import math
import os
import time
import gym
import numpy as np
from gym import spaces
from PIL import Image
logger = logging.getLogger(__name__)
# Type of worker
SIMULATION_WORKER = "SIMULATION_WORKER"
SAGEMAKER_TRAINING_WORKER = "SAGEMAKER_TRAINING_WORKER"
node_type = os.environ.get("NODE_TYPE", SIMULATION_WORKER)
if node_type == SIMULATION_WORKER:
import rospy
from ackermann_msgs.msg import AckermannDriveStamped
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import GetLinkState, GetModelState, SetModelState
from scipy.spatial.transform import Rotation
from sensor_msgs.msg import Image as sensor_image
from shapely.geometry import Point, Polygon
from shapely.geometry.polygon import LinearRing, LineString
# Type of job
TRAINING_JOB = 'TRAINING'
EVALUATION_JOB = 'EVALUATION'
# Sleep intervals
SLEEP_AFTER_RESET_TIME_IN_SECOND = 0.5
SLEEP_BETWEEN_ACTION_AND_REWARD_CALCULATION_TIME_IN_SECOND = 0.1
SLEEP_WAITING_FOR_IMAGE_TIME_IN_SECOND = 0.01
# Dimensions of the input training image
TRAINING_IMAGE_SIZE = (160, 120)
# Local offset of the front of the car
RELATIVE_POSITION_OF_FRONT_OF_CAR = [0.14, 0, 0]
# Normalized track distance to move with each reset
ROUND_ROBIN_ADVANCE_DIST = 0.05
# Reward to give the car when it "crashes"
CRASHED = 1e-8
### Gym Env ###
class DeepRacerRacetrackEnv(gym.Env):
def __init__(self):
# Create the observation space
img_width = TRAINING_IMAGE_SIZE[0]
img_height = TRAINING_IMAGE_SIZE[1]
self.observation_space = spaces.Box(low=0, high=255, shape=(img_height, img_width, 3), dtype=np.uint8)
# Create the action space
self.action_space = spaces.Box(low=np.array([-1, 0]), high=np.array([+1, +1]), dtype=np.float32)
if node_type == SIMULATION_WORKER:
# ROS initialization
rospy.init_node('rl_coach', anonymous=True)
rospy.Subscriber('/camera/zed/rgb/image_rect_color', sensor_image, self.callback_image)
self.ack_publisher = rospy.Publisher('/vesc/low_level/ackermann_cmd_mux/output',
AckermannDriveStamped, queue_size=100)
self.set_model_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self.get_model_state = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
self.get_link_state = rospy.ServiceProxy('/gazebo/get_link_state', GetLinkState)
# Read in parameters
self.world_name = rospy.get_param('WORLD_NAME')
self.job_type = rospy.get_param('JOB_TYPE')
self.aws_region = rospy.get_param('AWS_REGION')
self.metrics_s3_bucket = rospy.get_param('METRICS_S3_BUCKET')
self.metrics_s3_object_key = rospy.get_param('METRICS_S3_OBJECT_KEY')
self.metrics = []
self.simulation_job_arn = 'arn:aws:robomaker:' + self.aws_region + ':' + \
rospy.get_param('ROBOMAKER_SIMULATION_JOB_ACCOUNT_ID') + \
':simulation-job/' + rospy.get_param('AWS_ROBOMAKER_SIMULATION_JOB_ID')
if self.job_type == TRAINING_JOB:
from custom_files.customer_reward_function import reward_function
self.reward_function = reward_function
self.metric_name = rospy.get_param('METRIC_NAME')
self.metric_namespace = rospy.get_param('METRIC_NAMESPACE')
self.training_job_arn = rospy.get_param('TRAINING_JOB_ARN')
self.target_number_of_episodes = rospy.get_param('NUMBER_OF_EPISODES')
self.target_reward_score = rospy.get_param('TARGET_REWARD_SCORE')
else:
from markov.defaults import reward_function
self.reward_function = reward_function
self.number_of_trials = 0
self.target_number_of_trials = rospy.get_param('NUMBER_OF_TRIALS')
# Read in the waypoints
BUNDLE_CURRENT_PREFIX = os.environ.get("BUNDLE_CURRENT_PREFIX", None)
if not BUNDLE_CURRENT_PREFIX:
raise ValueError("Cannot get BUNDLE_CURRENT_PREFIX")
route_file_name = os.path.join(BUNDLE_CURRENT_PREFIX,
'install', 'deepracer_simulation', 'share',
'deepracer_simulation', 'routes', '{}.npy'.format(self.world_name))
waypoints = np.load(route_file_name)
self.is_loop = np.all(waypoints[0,:] == waypoints[-1,:])
if self.is_loop:
self.center_line = LinearRing(waypoints[:,0:2])
self.inner_border = LinearRing(waypoints[:,2:4])
self.outer_border = LinearRing(waypoints[:,4:6])
self.road_poly = Polygon(self.outer_border, [self.inner_border])
else:
self.center_line = LineString(waypoints[:,0:2])
self.inner_border = LineString(waypoints[:,2:4])
self.outer_border = LineString(waypoints[:,4:6])
self.road_poly = Polygon(np.vstack((self.outer_border, np.flipud(self.inner_border))))
self.center_dists = [self.center_line.project(Point(p), normalized=True) for p in self.center_line.coords[:-1]] + [1.0]
self.track_length = self.center_line.length
# Initialize state data
self.episodes = 0
self.start_dist = 0.0
self.round_robin = (self.job_type == TRAINING_JOB)
self.is_simulation_done = False
self.image = None
self.steering_angle = 0
self.speed = 0
self.action_taken = 0
self.prev_progress = 0
self.prev_point = Point(0, 0)
self.prev_point_2 = Point(0, 0)
self.next_state = None
self.reward = None
self.reward_in_episode = 0
self.done = False
self.steps = 0
self.simulation_start_time = 0
self.reverse_dir = False
def reset(self):
if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample()
# Simulation is done - so RoboMaker will start to shut down the app.
# Till RoboMaker shuts down the app, do nothing more else metrics may show unexpected data.
if (node_type == SIMULATION_WORKER) and self.is_simulation_done:
while True:
time.sleep(1)
self.image = None
self.steering_angle = 0
self.speed = 0
self.action_taken = 0
self.prev_progress = 0
self.prev_point = Point(0, 0)
self.prev_point_2 = Point(0, 0)
self.next_state = None
self.reward = None
self.reward_in_episode = 0
self.done = False
# Reset the car and record the simulation start time
self.send_action(0, 0)
self.racecar_reset()
time.sleep(SLEEP_AFTER_RESET_TIME_IN_SECOND)
self.steps = 0
self.simulation_start_time = time.time()
# Compute the initial state
self.infer_reward_state(0, 0)
return self.next_state
def racecar_reset(self):
rospy.wait_for_service('/gazebo/set_model_state')
# Compute the starting position and heading
next_point_index = bisect.bisect(self.center_dists, self.start_dist)
start_point = self.center_line.interpolate(self.start_dist, normalized=True)
start_yaw = math.atan2(
self.center_line.coords[next_point_index][1] - start_point.y,
self.center_line.coords[next_point_index][0] - start_point.x)
start_quaternion = Rotation.from_euler('zyx', [start_yaw, 0, 0]).as_quat()
# Construct the model state and send to Gazebo
modelState = ModelState()
modelState.model_name = 'racecar'
modelState.pose.position.x = start_point.x
modelState.pose.position.y = start_point.y
modelState.pose.position.z = 0
modelState.pose.orientation.x = start_quaternion[0]
modelState.pose.orientation.y = start_quaternion[1]
modelState.pose.orientation.z = start_quaternion[2]
modelState.pose.orientation.w = start_quaternion[3]
modelState.twist.linear.x = 0
modelState.twist.linear.y = 0
modelState.twist.linear.z = 0
modelState.twist.angular.x = 0
modelState.twist.angular.y = 0
modelState.twist.angular.z = 0
self.set_model_state(modelState)
def step(self, action):
if node_type == SAGEMAKER_TRAINING_WORKER:
return self.observation_space.sample(), 0, False, {}
# Initialize next state, reward, done flag
self.next_state = None
self.reward = None
self.done = False
# Send this action to Gazebo and increment the step count
self.steering_angle = float(action[0])
self.speed = float(action[1])
self.send_action(self.steering_angle, self.speed)
time.sleep(SLEEP_BETWEEN_ACTION_AND_REWARD_CALCULATION_TIME_IN_SECOND)
self.steps += 1
# Compute the next state and reward
self.infer_reward_state(self.steering_angle, self.speed)
return self.next_state, self.reward, self.done, {}
def callback_image(self, data):
self.image = data
def send_action(self, steering_angle, speed):
ack_msg = AckermannDriveStamped()
ack_msg.header.stamp = rospy.Time.now()
ack_msg.drive.steering_angle = steering_angle
ack_msg.drive.speed = speed
self.ack_publisher.publish(ack_msg)
def infer_reward_state(self, steering_angle, speed):
rospy.wait_for_service('/gazebo/get_model_state')
rospy.wait_for_service('/gazebo/get_link_state')
# Wait till we have a image from the camera
# btown TODO: Incorporate feedback from callejae@ here (CR-6434645 rev1)
while not self.image:
time.sleep(SLEEP_WAITING_FOR_IMAGE_TIME_IN_SECOND)
# Read model state from Gazebo
model_state = self.get_model_state('racecar', '')
model_orientation = Rotation.from_quat([
model_state.pose.orientation.x,
model_state.pose.orientation.y,
model_state.pose.orientation.z,
model_state.pose.orientation.w])
model_location = np.array([
model_state.pose.position.x,
model_state.pose.position.y,
model_state.pose.position.z]) + \
model_orientation.apply(RELATIVE_POSITION_OF_FRONT_OF_CAR)
model_point = Point(model_location[0], model_location[1])
model_heading = model_orientation.as_euler('zyx')[0]
# Read the wheel locations from Gazebo
left_rear_wheel_state = self.get_link_state('racecar::left_rear_wheel', '')
left_front_wheel_state = self.get_link_state('racecar::left_front_wheel', '')
right_rear_wheel_state = self.get_link_state('racecar::right_rear_wheel', '')
right_front_wheel_state = self.get_link_state('racecar::right_front_wheel', '')
wheel_points = [
Point(left_rear_wheel_state.link_state.pose.position.x,
left_rear_wheel_state.link_state.pose.position.y),
Point(left_front_wheel_state.link_state.pose.position.x,
left_front_wheel_state.link_state.pose.position.y),
Point(right_rear_wheel_state.link_state.pose.position.x,
right_rear_wheel_state.link_state.pose.position.y),
Point(right_front_wheel_state.link_state.pose.position.x,
right_front_wheel_state.link_state.pose.position.y)
]
# Project the current location onto the center line and find nearest points
current_dist = self.center_line.project(model_point, normalized=True)
next_waypoint_index = max(0, min(bisect.bisect(self.center_dists, current_dist), len(self.center_dists) - 1))
prev_waypoint_index = next_waypoint_index - 1
distance_from_next = model_point.distance(Point(self.center_line.coords[next_waypoint_index]))
distance_from_prev = model_point.distance(Point(self.center_line.coords[prev_waypoint_index]))
closest_waypoint_index = (prev_waypoint_index, next_waypoint_index)[distance_from_next < distance_from_prev]
# Compute distance from center and road width
nearest_point_center = self.center_line.interpolate(current_dist, normalized=True)
nearest_point_inner = self.inner_border.interpolate(self.inner_border.project(nearest_point_center))
nearest_point_outer = self.outer_border.interpolate(self.outer_border.project(nearest_point_center))
distance_from_center = nearest_point_center.distance(model_point)
distance_from_inner = nearest_point_inner.distance(model_point)
distance_from_outer = nearest_point_outer.distance(model_point)
track_width = nearest_point_inner.distance(nearest_point_outer)
is_left_of_center = (distance_from_outer < distance_from_inner) if self.reverse_dir \
else (distance_from_inner < distance_from_outer)
# Convert current progress to be [0,100] starting at the initial waypoint
current_progress = current_dist - self.start_dist
if current_progress < 0.0: current_progress = current_progress + 1.0
current_progress = 100 * current_progress
if current_progress < self.prev_progress:
# Either: (1) we wrapped around and have finished the track,
delta1 = current_progress + 100 - self.prev_progress
# or (2) for some reason the car went backwards (this should be rare)
delta2 = self.prev_progress - current_progress
current_progress = (self.prev_progress, 100)[delta1 < delta2]
# Car is off track if all wheels are outside the borders
wheel_on_track = [self.road_poly.contains(p) for p in wheel_points]
all_wheels_on_track = all(wheel_on_track)
any_wheels_on_track = any(wheel_on_track)
# Compute the reward
if any_wheels_on_track:
done = False
params = {
'all_wheels_on_track': all_wheels_on_track,
'x': model_point.x,
'y': model_point.y,
'heading': model_heading * 180.0 / math.pi,
'distance_from_center': distance_from_center,
'progress': current_progress,
'steps': self.steps,
'speed': speed,
'steering_angle': steering_angle * 180.0 / math.pi,
'track_width': track_width,
'waypoints': list(self.center_line.coords),
'closest_waypoints': [prev_waypoint_index, next_waypoint_index],
'is_left_of_center': is_left_of_center,
'is_reversed': self.reverse_dir
}
reward = self.reward_function(params)
else:
done = True
reward = CRASHED
# Reset if the car position hasn't changed in the last 2 steps
if min(model_point.distance(self.prev_point), model_point.distance(self.prev_point_2)) <= 0.0001:
done = True
reward = CRASHED # stuck
# Simulation jobs are done when progress reaches 100
if current_progress >= 100:
done = True
# Keep data from the previous step around
self.prev_point_2 = self.prev_point
self.prev_point = model_point
self.prev_progress = current_progress
# Read the image and resize to get the state
image = Image.frombytes('RGB', (self.image.width, self.image.height), self.image.data, 'raw', 'RGB', 0, 1)
image = image.resize(TRAINING_IMAGE_SIZE, resample=2)
state = np.array(image)
# Set the next state, reward, and done flag
self.next_state = state
self.reward = reward
self.reward_in_episode += reward
self.done = done
# Trace logs to help us debug and visualize the training runs
# btown TODO: This should be written to S3, not to CWL.
stdout_ = 'SIM_TRACE_LOG:%d,%d,%.4f,%.4f,%.4f,%.2f,%.2f,%d,%.4f,%s,%s,%.4f,%d,%.2f,%s\n' % (
self.episodes, self.steps, model_location[0], model_location[1], model_heading,
self.steering_angle,
self.speed,
self.action_taken,
self.reward,
self.done,
all_wheels_on_track,
current_progress,
closest_waypoint_index,
self.track_length,
time.time())
print(stdout_)
# Terminate this episode when ready
if self.done and node_type == SIMULATION_WORKER:
self.finish_episode(current_progress)
def finish_episode(self, progress):
# Stop the car from moving
self.send_action(0, 0)
# Increment episode count, update start dist for round robin
self.episodes += 1
if self.round_robin:
self.start_dist = (self.start_dist + ROUND_ROBIN_ADVANCE_DIST) % 1.0
# Update metrics based on job type
if self.job_type == TRAINING_JOB:
self.send_reward_to_cloudwatch(self.reward_in_episode)
self.update_training_metrics()
self.write_metrics_to_s3()
if self.is_training_done():
self.cancel_simulation_job()
elif self.job_type == EVALUATION_JOB:
self.number_of_trials += 1
self.update_eval_metrics(progress)
self.write_metrics_to_s3()
if self.is_evaluation_done():
self.cancel_simulation_job()
def update_eval_metrics(self, progress):
eval_metric = {}
eval_metric['completion_percentage'] = int(progress)
eval_metric['metric_time'] = int(round(time.time() * 1000))
eval_metric['start_time'] = int(round(self.simulation_start_time * 1000))
eval_metric['elapsed_time_in_milliseconds'] = int(round((time.time() - self.simulation_start_time) * 1000))
eval_metric['trial'] = int(self.number_of_trials)
self.metrics.append(eval_metric)
def update_training_metrics(self):
training_metric = {}
training_metric['reward_score'] = int(round(self.reward_in_episode))
training_metric['metric_time'] = int(round(time.time() * 1000))
training_metric['start_time'] = int(round(self.simulation_start_time * 1000))
training_metric['elapsed_time_in_milliseconds'] = int(round((time.time() - self.simulation_start_time) * 1000))
training_metric['episode'] = int(self.episodes)
self.metrics.append(training_metric)
def write_metrics_to_s3(self):
session = boto3.session.Session()
s3_url = os.environ.get('S3_ENDPOINT_URL')
s3_client = session.client('s3', region_name=self.aws_region, endpoint_url=s3_url)
metrics_body = json.dumps({'metrics': self.metrics})
s3_client.put_object(
Bucket=self.metrics_s3_bucket,
Key=self.metrics_s3_object_key,
Body=bytes(metrics_body, encoding='utf-8')
)
def is_evaluation_done(self):
if ((self.target_number_of_trials > 0) and (self.target_number_of_trials == self.number_of_trials)):
self.is_simulation_done = True
return self.is_simulation_done
def is_training_done(self):
if ((self.target_number_of_episodes > 0) and (self.target_number_of_episodes == self.episodes)) or \
((self.is_number(self.target_reward_score)) and (self.target_reward_score <= self.reward_in_episode)):
self.is_simulation_done = True
return self.is_simulation_done
def is_number(self, value_to_check):
try:
float(value_to_check)
return True
except ValueError:
return False
def cancel_simulation_job(self):
self.send_action(0, 0)
session = boto3.session.Session()
robomaker_client = session.client('robomaker', region_name=self.aws_region)
robomaker_client.cancel_simulation_job(
job=self.simulation_job_arn
)
def send_reward_to_cloudwatch(self, reward):
isLocal = os.environ.get("LOCAL")
if isLocal == None:
session = boto3.session.Session()
cloudwatch_client = session.client('cloudwatch', region_name=self.aws_region)
cloudwatch_client.put_metric_data(
MetricData=[
{
'MetricName': self.metric_name,
'Dimensions': [
{
'Name': 'TRAINING_JOB_ARN',
'Value': self.training_job_arn
},
],
'Unit': 'None',
'Value': reward
}, | Namespace=self.metric_namespace
)
else:
print("{}: {}".format(self.metric_name, reward))
class DeepRacerRacetrackCustomActionSpaceEnv(DeepRacerRacetrackEnv):
def __init__(self):
DeepRacerRacetrackEnv.__init__(self)
try:
# Try loading the custom model metadata (may or may not be present)
with open('custom_files/model_metadata.json', 'r') as f:
model_metadata = json.load(f)
self.json_actions = model_metadata['action_space']
logger.info("Loaded action space from file: {}".format(self.json_actions))
except:
# Failed to load, fall back on the default action space
from markov.defaults import model_metadata
self.json_actions = model_metadata['action_space']
logger.info("Loaded default action space: {}".format(self.json_actions))
self.action_space = spaces.Discrete(len(self.json_actions))
def step(self, action):
self.steering_angle = float(self.json_actions[action]['steering_angle']) * math.pi / 180.0
self.speed = float(self.json_actions[action]['speed'])
self.action_taken = action
return super().step([self.steering_angle, self.speed]) | ], | random_line_split |
rabbitmq_server_relations.py | #!/usr/bin/python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import sys
import subprocess
import glob
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa
import rabbit_utils as rabbit
import ssl_utils
from lib.utils import (
chown, chmod,
is_newer,
)
from charmhelpers.contrib.hahelpers.cluster import (
is_clustered,
is_elected_leader
)
from charmhelpers.contrib.openstack.utils import (
is_unit_paused_set,
)
import charmhelpers.contrib.storage.linux.ceph as ceph
from charmhelpers.contrib.openstack.utils import save_script_rc
from charmhelpers.contrib.hardening.harden import harden
from charmhelpers.fetch import (
add_source,
apt_update,
apt_install,
)
from charmhelpers.core.hookenv import (
open_port,
close_port,
log,
ERROR,
INFO,
relation_get,
relation_clear,
relation_set,
relation_ids,
related_units,
service_name,
local_unit,
config,
is_relation_made,
Hooks,
UnregisteredHookError,
is_leader,
charm_dir,
status_set,
unit_private_ip,
)
from charmhelpers.core.host import (
cmp_pkgrevno,
rsync,
service_stop,
service_restart,
write_file,
)
from charmhelpers.contrib.charmsupport import nrpe
from charmhelpers.contrib.peerstorage import (
peer_echo,
peer_retrieve,
peer_store,
peer_store_and_set,
peer_retrieve_by_prefix,
leader_get,
)
hooks = Hooks()
SERVICE_NAME = os.getenv('JUJU_UNIT_NAME').split('/')[0]
POOL_NAME = SERVICE_NAME
RABBIT_DIR = '/var/lib/rabbitmq'
RABBIT_USER = 'rabbitmq'
RABBIT_GROUP = 'rabbitmq'
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
SCRIPTS_DIR = '/usr/local/bin'
STATS_CRONFILE = '/etc/cron.d/rabbitmq-stats'
STATS_DATAFILE = os.path.join(RABBIT_DIR, 'data',
'{}_queue_stats.dat'
''.format(rabbit.get_unit_hostname()))
@hooks.hook('install.real')
@harden()
def install():
pre_install_hooks()
# NOTE(jamespage) install actually happens in config_changed hook
def configure_amqp(username, vhost, admin=False):
# get and update service password
password = rabbit.get_rabbit_password(username)
# update vhost
rabbit.create_vhost(vhost)
rabbit.create_user(username, password, admin)
rabbit.grant_permissions(username, vhost)
# NOTE(freyes): after rabbitmq-server 3.0 the method to define HA in the
# queues is different
# http://www.rabbitmq.com/blog/2012/11/19/breaking-things-with-rabbitmq-3-0
if config('mirroring-queues'):
rabbit.set_ha_mode(vhost, 'all')
return password
def update_clients():
"""Update amqp client relation hooks
IFF leader node is ready. Client nodes are considered ready once the leader
has already run amqp_changed.
"""
if rabbit.leader_node_is_ready() or rabbit.client_node_is_ready():
for rid in relation_ids('amqp'):
for unit in related_units(rid):
amqp_changed(relation_id=rid, remote_unit=unit)
@hooks.hook('amqp-relation-changed')
def amqp_changed(relation_id=None, remote_unit=None):
host_addr = rabbit.get_unit_ip()
# TODO: Simplify what the non-leader needs to do
if not is_leader() and rabbit.client_node_is_ready():
# NOTE(jamespage) clear relation to deal with data being
# removed from peer storage
relation_clear(relation_id)
# Each unit needs to set the db information otherwise if the unit
# with the info dies the settings die with it Bug# 1355848
exc_list = ['hostname', 'private-address']
for rel_id in relation_ids('amqp'):
peerdb_settings = peer_retrieve_by_prefix(rel_id,
exc_list=exc_list)
peerdb_settings['hostname'] = host_addr
peerdb_settings['private-address'] = host_addr
if 'password' in peerdb_settings:
relation_set(relation_id=rel_id, **peerdb_settings)
log('amqp_changed(): Deferring amqp_changed'
' to the leader.')
# NOTE: active/active case
if config('prefer-ipv6'):
relation_settings = {'private-address': host_addr}
relation_set(relation_id=relation_id,
relation_settings=relation_settings)
return
# Bail if not completely ready
if not rabbit.leader_node_is_ready():
return
relation_settings = {}
settings = relation_get(rid=relation_id, unit=remote_unit)
singleset = set(['username', 'vhost'])
if singleset.issubset(settings):
if None in [settings['username'], settings['vhost']]:
log('amqp_changed(): Relation not ready.')
return
relation_settings['password'] = configure_amqp(
username=settings['username'],
vhost=settings['vhost'],
admin=settings.get('admin', False))
else:
queues = {}
for k, v in settings.iteritems():
amqp = k.split('_')[0]
x = '_'.join(k.split('_')[1:])
if amqp not in queues:
queues[amqp] = {}
queues[amqp][x] = v
for amqp in queues:
if singleset.issubset(queues[amqp]):
relation_settings[
'_'.join([amqp, 'password'])] = configure_amqp(
queues[amqp]['username'],
queues[amqp]['vhost'])
relation_settings['hostname'] = \
relation_settings['private-address'] = \
rabbit.get_unit_ip()
ssl_utils.configure_client_ssl(relation_settings)
if is_clustered():
relation_settings['clustered'] = 'true'
if is_relation_made('ha'):
# active/passive settings
relation_settings['vip'] = config('vip')
# or ha-vip-only to support active/active, but
# accessed via a VIP for older clients.
if config('ha-vip-only') is True:
relation_settings['ha-vip-only'] = 'true'
# set if need HA queues or not
if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
relation_settings['ha_queues'] = True
peer_store_and_set(relation_id=relation_id,
relation_settings=relation_settings)
@hooks.hook('cluster-relation-joined')
def cluster_joined(relation_id=None):
relation_settings = {
'hostname': rabbit.get_unit_hostname(),
'private-address':
rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG,
interface=rabbit.CLUSTER_INTERFACE),
}
relation_set(relation_id=relation_id,
relation_settings=relation_settings)
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.')
return
try:
if not is_leader():
log('Not the leader, deferring cookie propagation to leader')
return
except NotImplementedError:
if is_newer():
log('cluster_joined: Relation greater.')
return
if not os.path.isfile(rabbit.COOKIE_PATH):
|
if is_leader():
log('Leader peer_storing cookie', level=INFO)
cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
peer_store('cookie', cookie)
peer_store('leader_node_ip', unit_private_ip())
peer_store('leader_node_hostname', rabbit.get_unit_hostname())
@hooks.hook('cluster-relation-changed')
def cluster_changed(relation_id=None, remote_unit=None):
# Future travelers beware ordering is significant
rdata = relation_get(rid=relation_id, unit=remote_unit)
# sync passwords
blacklist = ['hostname', 'private-address', 'public-address']
whitelist = [a for a in rdata.keys() if a not in blacklist]
peer_echo(includes=whitelist)
cookie = peer_retrieve('cookie')
if not cookie:
log('cluster_changed: cookie not yet set.', level=INFO)
return
if rdata:
hostname = rdata.get('hostname', None)
private_address = rdata.get('private-address', None)
if hostname and private_address:
rabbit.update_hosts_file({private_address: hostname})
# sync the cookie with peers if necessary
update_cookie()
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.', level=INFO)
return
# cluster with node?
if not is_leader():
rabbit.cluster_with()
update_nrpe_checks()
def update_cookie(leaders_cookie=None):
# sync cookie
if leaders_cookie:
cookie = leaders_cookie
else:
cookie = peer_retrieve('cookie')
cookie_local = None
with open(rabbit.COOKIE_PATH, 'r') as f:
cookie_local = f.read().strip()
if cookie_local == cookie:
log('Cookie already synchronized with peer.')
return
service_stop('rabbitmq-server')
with open(rabbit.COOKIE_PATH, 'wb') as out:
out.write(cookie)
if not is_unit_paused_set():
service_restart('rabbitmq-server')
rabbit.wait_app()
@hooks.hook('ha-relation-joined')
def ha_joined():
corosync_bindiface = config('ha-bindiface')
corosync_mcastport = config('ha-mcastport')
vip = config('vip')
vip_iface = config('vip_iface')
vip_cidr = config('vip_cidr')
rbd_name = config('rbd-name')
vip_only = config('ha-vip-only')
if None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr, rbd_name] and vip_only is False:
log('Insufficient configuration data to configure hacluster.',
level=ERROR)
sys.exit(1)
elif None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr] and vip_only is True:
log('Insufficient configuration data to configure VIP-only hacluster.',
level=ERROR)
sys.exit(1)
if not is_relation_made('ceph', 'auth') and vip_only is False:
log('ha_joined: No ceph relation yet, deferring.')
return
name = '%s@localhost' % SERVICE_NAME
if rabbit.get_node_name() != name and vip_only is False:
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
rabbit.update_rmq_env_conf(hostname='%s@localhost' % SERVICE_NAME,
ipv6=config('prefer-ipv6'))
else:
log('Node name already set to %s.' % name)
relation_settings = {}
relation_settings['corosync_bindiface'] = corosync_bindiface
relation_settings['corosync_mcastport'] = corosync_mcastport
if vip_only is True:
relation_settings['resources'] = {
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
}
relation_settings['resource_params'] = {
'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
}
else:
relation_settings['resources'] = {
'res_rabbitmq_rbd': 'ocf:ceph:rbd',
'res_rabbitmq_fs': 'ocf:heartbeat:Filesystem',
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
'res_rabbitmq-server': 'lsb:rabbitmq-server',
}
relation_settings['resource_params'] = {
'res_rabbitmq_rbd': 'params name="%s" pool="%s" user="%s" '
'secret="%s"' %
(rbd_name, POOL_NAME,
SERVICE_NAME, ceph._keyfile_path(
SERVICE_NAME)),
'res_rabbitmq_fs': 'params device="/dev/rbd/%s/%s" directory="%s" '
'fstype="ext4" op start start-delay="10s"' %
(POOL_NAME, rbd_name, RABBIT_DIR),
'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
'res_rabbitmq-server': 'op start start-delay="5s" '
'op monitor interval="5s"',
}
relation_settings['groups'] = {
'grp_rabbitmq':
'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip '
'res_rabbitmq-server',
}
for rel_id in relation_ids('ha'):
relation_set(relation_id=rel_id, relation_settings=relation_settings)
env_vars = {
'OPENSTACK_PORT_EPMD': 4369,
'OPENSTACK_PORT_MCASTPORT': config('ha-mcastport'),
}
save_script_rc(**env_vars)
@hooks.hook('ha-relation-changed')
def ha_changed():
if not is_clustered():
return
vip = config('vip')
log('ha_changed(): We are now HA clustered. '
'Advertising our VIP (%s) to all AMQP clients.' %
vip)
@hooks.hook('ceph-relation-joined')
def ceph_joined():
log('Start Ceph Relation Joined')
# NOTE fixup
# utils.configure_source()
ceph.install()
log('Finish Ceph Relation Joined')
@hooks.hook('ceph-relation-changed')
def ceph_changed():
log('Start Ceph Relation Changed')
auth = relation_get('auth')
key = relation_get('key')
use_syslog = str(config('use-syslog')).lower()
if None in [auth, key]:
log('Missing key or auth in relation')
sys.exit(0)
ceph.configure(service=SERVICE_NAME, key=key, auth=auth,
use_syslog=use_syslog)
if is_elected_leader('res_rabbitmq_vip'):
rbd_img = config('rbd-name')
rbd_size = config('rbd-size')
sizemb = int(rbd_size.split('G')[0]) * 1024
blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
ceph.create_pool(service=SERVICE_NAME, name=POOL_NAME,
replicas=int(config('ceph-osd-replication-count')))
ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
rbd_img=rbd_img, sizemb=sizemb,
fstype='ext4', mount_point=RABBIT_DIR,
blk_device=blk_device,
system_services=['rabbitmq-server'])
subprocess.check_call(['chown', '-R', '%s:%s' %
(RABBIT_USER, RABBIT_GROUP), RABBIT_DIR])
else:
log('This is not the peer leader. Not configuring RBD.')
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
# If 'ha' relation has been made before the 'ceph' relation
# it is important to make sure the ha-relation data is being
# sent.
if is_relation_made('ha'):
log('*ha* relation exists. Triggering ha_joined()')
ha_joined()
else:
log('*ha* relation does not exist.')
log('Finish Ceph Relation Changed')
@hooks.hook('nrpe-external-master-relation-changed')
def update_nrpe_checks():
if os.path.isdir(NAGIOS_PLUGINS):
rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts',
'check_rabbitmq.py'),
os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py'))
rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts',
'check_rabbitmq_queues.py'),
os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_queues.py'))
if config('stats_cron_schedule'):
script = os.path.join(SCRIPTS_DIR, 'collect_rabbitmq_stats.sh')
cronjob = "{} root {}\n".format(config('stats_cron_schedule'), script)
rsync(os.path.join(charm_dir(), 'scripts',
'collect_rabbitmq_stats.sh'), script)
write_file(STATS_CRONFILE, cronjob)
elif os.path.isfile(STATS_CRONFILE):
os.remove(STATS_CRONFILE)
# Find out if nrpe set nagios_hostname
hostname = nrpe.get_nagios_hostname()
myunit = nrpe.get_nagios_unit_name()
# create unique user and vhost for each unit
current_unit = local_unit().replace('/', '-')
user = 'nagios-%s' % current_unit
vhost = 'nagios-%s' % current_unit
password = rabbit.get_rabbit_password(user, local=True)
rabbit.create_vhost(vhost)
rabbit.create_user(user, password)
rabbit.grant_permissions(user, vhost)
nrpe_compat = nrpe.NRPE(hostname=hostname)
nrpe_compat.add_check(
shortname=rabbit.RABBIT_USER,
description='Check RabbitMQ {%s}' % myunit,
check_cmd='{}/check_rabbitmq.py --user {} --password {} --vhost {}'
''.format(NAGIOS_PLUGINS, user, password, vhost)
)
if config('queue_thresholds'):
cmd = ""
# If value of queue_thresholds is incorrect we want the hook to fail
for item in yaml.safe_load(config('queue_thresholds')):
cmd += ' -c "{}" "{}" {} {}'.format(*item)
nrpe_compat.add_check(
shortname=rabbit.RABBIT_USER + '_queue',
description='Check RabbitMQ Queues',
check_cmd='{}/check_rabbitmq_queues.py{} {}'.format(
NAGIOS_PLUGINS, cmd, STATS_DATAFILE)
)
nrpe_compat.write()
@hooks.hook('upgrade-charm')
@harden()
def upgrade_charm():
pre_install_hooks()
add_source(config('source'), config('key'))
apt_update(fatal=True)
# Ensure older passwd files in /var/lib/juju are moved to
# /var/lib/rabbitmq which will end up replicated if clustered
for f in [f for f in os.listdir('/var/lib/juju')
if os.path.isfile(os.path.join('/var/lib/juju', f))]:
if f.endswith('.passwd'):
s = os.path.join('/var/lib/juju', f)
d = os.path.join('/var/lib/charm/{}'.format(service_name()), f)
log('upgrade_charm: Migrating stored passwd'
' from %s to %s.' % (s, d))
shutil.move(s, d)
if is_elected_leader('res_rabbitmq_vip'):
rabbit.migrate_passwords_to_peer_relation()
# explicitly update buggy file name naigos.passwd
old = os.path.join('var/lib/rabbitmq', 'naigos.passwd')
if os.path.isfile(old):
new = os.path.join('var/lib/rabbitmq', 'nagios.passwd')
shutil.move(old, new)
MAN_PLUGIN = 'rabbitmq_management'
@hooks.hook('config-changed')
@rabbit.restart_on_change(rabbit.restart_map())
@harden()
def config_changed():
# Update hosts with this unit's information
rabbit.update_hosts_file(
{rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG,
interface=rabbit.CLUSTER_INTERFACE):
rabbit.get_unit_hostname()})
# Add archive source if provided
add_source(config('source'), config('key'))
apt_update(fatal=True)
# Copy in defaults file for updated ulimits
shutil.copyfile(
'templates/rabbitmq-server',
'/etc/default/rabbitmq-server')
# Install packages to ensure any changes to source
# result in an upgrade if applicable.
status_set('maintenance', 'Installing/upgrading RabbitMQ packages')
apt_install(rabbit.PACKAGES, fatal=True)
open_port(5672)
chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
chmod(RABBIT_DIR, 0o775)
if config('management_plugin') is True:
rabbit.enable_plugin(MAN_PLUGIN)
open_port(rabbit.get_managment_port())
else:
rabbit.disable_plugin(MAN_PLUGIN)
close_port(rabbit.get_managment_port())
# LY: Close the old managment port since it may have been opened in a
# previous version of the charm. close_port is a noop if the port
# is not open
close_port(55672)
rabbit.ConfigRenderer(
rabbit.CONFIG_FILES).write_all()
# Only set values if this is the leader
if not is_leader():
return
rabbit.set_all_mirroring_queues(config('mirroring-queues'))
if is_relation_made("ha"):
ha_is_active_active = config("ha-vip-only")
if ha_is_active_active:
update_nrpe_checks()
else:
if is_elected_leader('res_rabbitmq_vip'):
update_nrpe_checks()
else:
log("hacluster relation is present but this node is not active"
" skipping update nrpe checks")
else:
update_nrpe_checks()
# Update cluster in case min-cluster-size has changed
for rid in relation_ids('cluster'):
for unit in related_units(rid):
cluster_changed(relation_id=rid, remote_unit=unit)
@hooks.hook('leader-elected')
def leader_elected():
status_set("maintenance", "{} is the elected leader".format(local_unit()))
@hooks.hook('leader-settings-changed')
def leader_settings_changed():
if not os.path.exists(rabbit.RABBITMQ_CTL):
log('Deferring cookie configuration, RabbitMQ not yet installed')
return
# Get cookie from leader, update cookie locally and
# force cluster-relation-changed hooks to run on peers
cookie = leader_get(attribute='cookie')
if cookie:
update_cookie(leaders_cookie=cookie)
# Force cluster-relation-changed hooks to run on peers
# This will precipitate peer clustering
# Without this a chicken and egg scenario prevails when
# using LE and peerstorage
for rid in relation_ids('cluster'):
relation_set(relation_id=rid, relation_settings={'cookie': cookie})
def pre_install_hooks():
for f in glob.glob('exec.d/*/charm-pre-install'):
if os.path.isfile(f) and os.access(f, os.X_OK):
subprocess.check_call(['sh', '-c', f])
@hooks.hook('update-status')
@harden()
def update_status():
log('Updating status.')
if __name__ == '__main__':
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e))
# Gated client updates
update_clients()
rabbit.assess_status(rabbit.ConfigRenderer(rabbit.CONFIG_FILES))
| log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
level=ERROR)
return | conditional_block |
rabbitmq_server_relations.py | #!/usr/bin/python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import sys
import subprocess
import glob
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa
import rabbit_utils as rabbit
import ssl_utils
from lib.utils import (
chown, chmod,
is_newer,
)
from charmhelpers.contrib.hahelpers.cluster import (
is_clustered,
is_elected_leader
)
from charmhelpers.contrib.openstack.utils import (
is_unit_paused_set,
)
import charmhelpers.contrib.storage.linux.ceph as ceph
from charmhelpers.contrib.openstack.utils import save_script_rc
from charmhelpers.contrib.hardening.harden import harden
from charmhelpers.fetch import (
add_source,
apt_update,
apt_install,
)
from charmhelpers.core.hookenv import (
open_port,
close_port,
log,
ERROR,
INFO,
relation_get,
relation_clear,
relation_set,
relation_ids,
related_units,
service_name,
local_unit,
config,
is_relation_made,
Hooks,
UnregisteredHookError,
is_leader,
charm_dir,
status_set,
unit_private_ip,
)
from charmhelpers.core.host import (
cmp_pkgrevno,
rsync,
service_stop,
service_restart,
write_file,
)
from charmhelpers.contrib.charmsupport import nrpe
from charmhelpers.contrib.peerstorage import (
peer_echo,
peer_retrieve,
peer_store,
peer_store_and_set,
peer_retrieve_by_prefix,
leader_get,
)
hooks = Hooks()
SERVICE_NAME = os.getenv('JUJU_UNIT_NAME').split('/')[0]
POOL_NAME = SERVICE_NAME
RABBIT_DIR = '/var/lib/rabbitmq'
RABBIT_USER = 'rabbitmq'
RABBIT_GROUP = 'rabbitmq'
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
SCRIPTS_DIR = '/usr/local/bin'
STATS_CRONFILE = '/etc/cron.d/rabbitmq-stats'
STATS_DATAFILE = os.path.join(RABBIT_DIR, 'data',
'{}_queue_stats.dat'
''.format(rabbit.get_unit_hostname()))
@hooks.hook('install.real')
@harden()
def install():
pre_install_hooks()
# NOTE(jamespage) install actually happens in config_changed hook
def configure_amqp(username, vhost, admin=False):
# get and update service password
|
def update_clients():
"""Update amqp client relation hooks
IFF leader node is ready. Client nodes are considered ready once the leader
has already run amqp_changed.
"""
if rabbit.leader_node_is_ready() or rabbit.client_node_is_ready():
for rid in relation_ids('amqp'):
for unit in related_units(rid):
amqp_changed(relation_id=rid, remote_unit=unit)
@hooks.hook('amqp-relation-changed')
def amqp_changed(relation_id=None, remote_unit=None):
host_addr = rabbit.get_unit_ip()
# TODO: Simplify what the non-leader needs to do
if not is_leader() and rabbit.client_node_is_ready():
# NOTE(jamespage) clear relation to deal with data being
# removed from peer storage
relation_clear(relation_id)
# Each unit needs to set the db information otherwise if the unit
# with the info dies the settings die with it Bug# 1355848
exc_list = ['hostname', 'private-address']
for rel_id in relation_ids('amqp'):
peerdb_settings = peer_retrieve_by_prefix(rel_id,
exc_list=exc_list)
peerdb_settings['hostname'] = host_addr
peerdb_settings['private-address'] = host_addr
if 'password' in peerdb_settings:
relation_set(relation_id=rel_id, **peerdb_settings)
log('amqp_changed(): Deferring amqp_changed'
' to the leader.')
# NOTE: active/active case
if config('prefer-ipv6'):
relation_settings = {'private-address': host_addr}
relation_set(relation_id=relation_id,
relation_settings=relation_settings)
return
# Bail if not completely ready
if not rabbit.leader_node_is_ready():
return
relation_settings = {}
settings = relation_get(rid=relation_id, unit=remote_unit)
singleset = set(['username', 'vhost'])
if singleset.issubset(settings):
if None in [settings['username'], settings['vhost']]:
log('amqp_changed(): Relation not ready.')
return
relation_settings['password'] = configure_amqp(
username=settings['username'],
vhost=settings['vhost'],
admin=settings.get('admin', False))
else:
queues = {}
for k, v in settings.iteritems():
amqp = k.split('_')[0]
x = '_'.join(k.split('_')[1:])
if amqp not in queues:
queues[amqp] = {}
queues[amqp][x] = v
for amqp in queues:
if singleset.issubset(queues[amqp]):
relation_settings[
'_'.join([amqp, 'password'])] = configure_amqp(
queues[amqp]['username'],
queues[amqp]['vhost'])
relation_settings['hostname'] = \
relation_settings['private-address'] = \
rabbit.get_unit_ip()
ssl_utils.configure_client_ssl(relation_settings)
if is_clustered():
relation_settings['clustered'] = 'true'
if is_relation_made('ha'):
# active/passive settings
relation_settings['vip'] = config('vip')
# or ha-vip-only to support active/active, but
# accessed via a VIP for older clients.
if config('ha-vip-only') is True:
relation_settings['ha-vip-only'] = 'true'
# set if need HA queues or not
if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
relation_settings['ha_queues'] = True
peer_store_and_set(relation_id=relation_id,
relation_settings=relation_settings)
@hooks.hook('cluster-relation-joined')
def cluster_joined(relation_id=None):
relation_settings = {
'hostname': rabbit.get_unit_hostname(),
'private-address':
rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG,
interface=rabbit.CLUSTER_INTERFACE),
}
relation_set(relation_id=relation_id,
relation_settings=relation_settings)
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.')
return
try:
if not is_leader():
log('Not the leader, deferring cookie propagation to leader')
return
except NotImplementedError:
if is_newer():
log('cluster_joined: Relation greater.')
return
if not os.path.isfile(rabbit.COOKIE_PATH):
log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
level=ERROR)
return
if is_leader():
log('Leader peer_storing cookie', level=INFO)
cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
peer_store('cookie', cookie)
peer_store('leader_node_ip', unit_private_ip())
peer_store('leader_node_hostname', rabbit.get_unit_hostname())
@hooks.hook('cluster-relation-changed')
def cluster_changed(relation_id=None, remote_unit=None):
# Future travelers beware ordering is significant
rdata = relation_get(rid=relation_id, unit=remote_unit)
# sync passwords
blacklist = ['hostname', 'private-address', 'public-address']
whitelist = [a for a in rdata.keys() if a not in blacklist]
peer_echo(includes=whitelist)
cookie = peer_retrieve('cookie')
if not cookie:
log('cluster_changed: cookie not yet set.', level=INFO)
return
if rdata:
hostname = rdata.get('hostname', None)
private_address = rdata.get('private-address', None)
if hostname and private_address:
rabbit.update_hosts_file({private_address: hostname})
# sync the cookie with peers if necessary
update_cookie()
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.', level=INFO)
return
# cluster with node?
if not is_leader():
rabbit.cluster_with()
update_nrpe_checks()
def update_cookie(leaders_cookie=None):
# sync cookie
if leaders_cookie:
cookie = leaders_cookie
else:
cookie = peer_retrieve('cookie')
cookie_local = None
with open(rabbit.COOKIE_PATH, 'r') as f:
cookie_local = f.read().strip()
if cookie_local == cookie:
log('Cookie already synchronized with peer.')
return
service_stop('rabbitmq-server')
with open(rabbit.COOKIE_PATH, 'wb') as out:
out.write(cookie)
if not is_unit_paused_set():
service_restart('rabbitmq-server')
rabbit.wait_app()
@hooks.hook('ha-relation-joined')
def ha_joined():
corosync_bindiface = config('ha-bindiface')
corosync_mcastport = config('ha-mcastport')
vip = config('vip')
vip_iface = config('vip_iface')
vip_cidr = config('vip_cidr')
rbd_name = config('rbd-name')
vip_only = config('ha-vip-only')
if None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr, rbd_name] and vip_only is False:
log('Insufficient configuration data to configure hacluster.',
level=ERROR)
sys.exit(1)
elif None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr] and vip_only is True:
log('Insufficient configuration data to configure VIP-only hacluster.',
level=ERROR)
sys.exit(1)
if not is_relation_made('ceph', 'auth') and vip_only is False:
log('ha_joined: No ceph relation yet, deferring.')
return
name = '%s@localhost' % SERVICE_NAME
if rabbit.get_node_name() != name and vip_only is False:
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
rabbit.update_rmq_env_conf(hostname='%s@localhost' % SERVICE_NAME,
ipv6=config('prefer-ipv6'))
else:
log('Node name already set to %s.' % name)
relation_settings = {}
relation_settings['corosync_bindiface'] = corosync_bindiface
relation_settings['corosync_mcastport'] = corosync_mcastport
if vip_only is True:
relation_settings['resources'] = {
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
}
relation_settings['resource_params'] = {
'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
}
else:
relation_settings['resources'] = {
'res_rabbitmq_rbd': 'ocf:ceph:rbd',
'res_rabbitmq_fs': 'ocf:heartbeat:Filesystem',
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
'res_rabbitmq-server': 'lsb:rabbitmq-server',
}
relation_settings['resource_params'] = {
'res_rabbitmq_rbd': 'params name="%s" pool="%s" user="%s" '
'secret="%s"' %
(rbd_name, POOL_NAME,
SERVICE_NAME, ceph._keyfile_path(
SERVICE_NAME)),
'res_rabbitmq_fs': 'params device="/dev/rbd/%s/%s" directory="%s" '
'fstype="ext4" op start start-delay="10s"' %
(POOL_NAME, rbd_name, RABBIT_DIR),
'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
'res_rabbitmq-server': 'op start start-delay="5s" '
'op monitor interval="5s"',
}
relation_settings['groups'] = {
'grp_rabbitmq':
'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip '
'res_rabbitmq-server',
}
for rel_id in relation_ids('ha'):
relation_set(relation_id=rel_id, relation_settings=relation_settings)
env_vars = {
'OPENSTACK_PORT_EPMD': 4369,
'OPENSTACK_PORT_MCASTPORT': config('ha-mcastport'),
}
save_script_rc(**env_vars)
@hooks.hook('ha-relation-changed')
def ha_changed():
if not is_clustered():
return
vip = config('vip')
log('ha_changed(): We are now HA clustered. '
'Advertising our VIP (%s) to all AMQP clients.' %
vip)
@hooks.hook('ceph-relation-joined')
def ceph_joined():
log('Start Ceph Relation Joined')
# NOTE fixup
# utils.configure_source()
ceph.install()
log('Finish Ceph Relation Joined')
@hooks.hook('ceph-relation-changed')
def ceph_changed():
log('Start Ceph Relation Changed')
auth = relation_get('auth')
key = relation_get('key')
use_syslog = str(config('use-syslog')).lower()
if None in [auth, key]:
log('Missing key or auth in relation')
sys.exit(0)
ceph.configure(service=SERVICE_NAME, key=key, auth=auth,
use_syslog=use_syslog)
if is_elected_leader('res_rabbitmq_vip'):
rbd_img = config('rbd-name')
rbd_size = config('rbd-size')
sizemb = int(rbd_size.split('G')[0]) * 1024
blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
ceph.create_pool(service=SERVICE_NAME, name=POOL_NAME,
replicas=int(config('ceph-osd-replication-count')))
ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
rbd_img=rbd_img, sizemb=sizemb,
fstype='ext4', mount_point=RABBIT_DIR,
blk_device=blk_device,
system_services=['rabbitmq-server'])
subprocess.check_call(['chown', '-R', '%s:%s' %
(RABBIT_USER, RABBIT_GROUP), RABBIT_DIR])
else:
log('This is not the peer leader. Not configuring RBD.')
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
# If 'ha' relation has been made before the 'ceph' relation
# it is important to make sure the ha-relation data is being
# sent.
if is_relation_made('ha'):
log('*ha* relation exists. Triggering ha_joined()')
ha_joined()
else:
log('*ha* relation does not exist.')
log('Finish Ceph Relation Changed')
@hooks.hook('nrpe-external-master-relation-changed')
def update_nrpe_checks():
if os.path.isdir(NAGIOS_PLUGINS):
rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts',
'check_rabbitmq.py'),
os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py'))
rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts',
'check_rabbitmq_queues.py'),
os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_queues.py'))
if config('stats_cron_schedule'):
script = os.path.join(SCRIPTS_DIR, 'collect_rabbitmq_stats.sh')
cronjob = "{} root {}\n".format(config('stats_cron_schedule'), script)
rsync(os.path.join(charm_dir(), 'scripts',
'collect_rabbitmq_stats.sh'), script)
write_file(STATS_CRONFILE, cronjob)
elif os.path.isfile(STATS_CRONFILE):
os.remove(STATS_CRONFILE)
# Find out if nrpe set nagios_hostname
hostname = nrpe.get_nagios_hostname()
myunit = nrpe.get_nagios_unit_name()
# create unique user and vhost for each unit
current_unit = local_unit().replace('/', '-')
user = 'nagios-%s' % current_unit
vhost = 'nagios-%s' % current_unit
password = rabbit.get_rabbit_password(user, local=True)
rabbit.create_vhost(vhost)
rabbit.create_user(user, password)
rabbit.grant_permissions(user, vhost)
nrpe_compat = nrpe.NRPE(hostname=hostname)
nrpe_compat.add_check(
shortname=rabbit.RABBIT_USER,
description='Check RabbitMQ {%s}' % myunit,
check_cmd='{}/check_rabbitmq.py --user {} --password {} --vhost {}'
''.format(NAGIOS_PLUGINS, user, password, vhost)
)
if config('queue_thresholds'):
cmd = ""
# If value of queue_thresholds is incorrect we want the hook to fail
for item in yaml.safe_load(config('queue_thresholds')):
cmd += ' -c "{}" "{}" {} {}'.format(*item)
nrpe_compat.add_check(
shortname=rabbit.RABBIT_USER + '_queue',
description='Check RabbitMQ Queues',
check_cmd='{}/check_rabbitmq_queues.py{} {}'.format(
NAGIOS_PLUGINS, cmd, STATS_DATAFILE)
)
nrpe_compat.write()
@hooks.hook('upgrade-charm')
@harden()
def upgrade_charm():
pre_install_hooks()
add_source(config('source'), config('key'))
apt_update(fatal=True)
# Ensure older passwd files in /var/lib/juju are moved to
# /var/lib/rabbitmq which will end up replicated if clustered
for f in [f for f in os.listdir('/var/lib/juju')
if os.path.isfile(os.path.join('/var/lib/juju', f))]:
if f.endswith('.passwd'):
s = os.path.join('/var/lib/juju', f)
d = os.path.join('/var/lib/charm/{}'.format(service_name()), f)
log('upgrade_charm: Migrating stored passwd'
' from %s to %s.' % (s, d))
shutil.move(s, d)
if is_elected_leader('res_rabbitmq_vip'):
rabbit.migrate_passwords_to_peer_relation()
# explicitly update buggy file name naigos.passwd
old = os.path.join('var/lib/rabbitmq', 'naigos.passwd')
if os.path.isfile(old):
new = os.path.join('var/lib/rabbitmq', 'nagios.passwd')
shutil.move(old, new)
MAN_PLUGIN = 'rabbitmq_management'
@hooks.hook('config-changed')
@rabbit.restart_on_change(rabbit.restart_map())
@harden()
def config_changed():
# Update hosts with this unit's information
rabbit.update_hosts_file(
{rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG,
interface=rabbit.CLUSTER_INTERFACE):
rabbit.get_unit_hostname()})
# Add archive source if provided
add_source(config('source'), config('key'))
apt_update(fatal=True)
# Copy in defaults file for updated ulimits
shutil.copyfile(
'templates/rabbitmq-server',
'/etc/default/rabbitmq-server')
# Install packages to ensure any changes to source
# result in an upgrade if applicable.
status_set('maintenance', 'Installing/upgrading RabbitMQ packages')
apt_install(rabbit.PACKAGES, fatal=True)
open_port(5672)
chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
chmod(RABBIT_DIR, 0o775)
if config('management_plugin') is True:
rabbit.enable_plugin(MAN_PLUGIN)
open_port(rabbit.get_managment_port())
else:
rabbit.disable_plugin(MAN_PLUGIN)
close_port(rabbit.get_managment_port())
# LY: Close the old managment port since it may have been opened in a
# previous version of the charm. close_port is a noop if the port
# is not open
close_port(55672)
rabbit.ConfigRenderer(
rabbit.CONFIG_FILES).write_all()
# Only set values if this is the leader
if not is_leader():
return
rabbit.set_all_mirroring_queues(config('mirroring-queues'))
if is_relation_made("ha"):
ha_is_active_active = config("ha-vip-only")
if ha_is_active_active:
update_nrpe_checks()
else:
if is_elected_leader('res_rabbitmq_vip'):
update_nrpe_checks()
else:
log("hacluster relation is present but this node is not active"
" skipping update nrpe checks")
else:
update_nrpe_checks()
# Update cluster in case min-cluster-size has changed
for rid in relation_ids('cluster'):
for unit in related_units(rid):
cluster_changed(relation_id=rid, remote_unit=unit)
@hooks.hook('leader-elected')
def leader_elected():
status_set("maintenance", "{} is the elected leader".format(local_unit()))
@hooks.hook('leader-settings-changed')
def leader_settings_changed():
if not os.path.exists(rabbit.RABBITMQ_CTL):
log('Deferring cookie configuration, RabbitMQ not yet installed')
return
# Get cookie from leader, update cookie locally and
# force cluster-relation-changed hooks to run on peers
cookie = leader_get(attribute='cookie')
if cookie:
update_cookie(leaders_cookie=cookie)
# Force cluster-relation-changed hooks to run on peers
# This will precipitate peer clustering
# Without this a chicken and egg scenario prevails when
# using LE and peerstorage
for rid in relation_ids('cluster'):
relation_set(relation_id=rid, relation_settings={'cookie': cookie})
def pre_install_hooks():
for f in glob.glob('exec.d/*/charm-pre-install'):
if os.path.isfile(f) and os.access(f, os.X_OK):
subprocess.check_call(['sh', '-c', f])
@hooks.hook('update-status')
@harden()
def update_status():
log('Updating status.')
if __name__ == '__main__':
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e))
# Gated client updates
update_clients()
rabbit.assess_status(rabbit.ConfigRenderer(rabbit.CONFIG_FILES))
| password = rabbit.get_rabbit_password(username)
# update vhost
rabbit.create_vhost(vhost)
rabbit.create_user(username, password, admin)
rabbit.grant_permissions(username, vhost)
# NOTE(freyes): after rabbitmq-server 3.0 the method to define HA in the
# queues is different
# http://www.rabbitmq.com/blog/2012/11/19/breaking-things-with-rabbitmq-3-0
if config('mirroring-queues'):
rabbit.set_ha_mode(vhost, 'all')
return password | identifier_body |
rabbitmq_server_relations.py | #!/usr/bin/python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import sys
import subprocess
import glob
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa
import rabbit_utils as rabbit
import ssl_utils
from lib.utils import (
chown, chmod,
is_newer,
)
from charmhelpers.contrib.hahelpers.cluster import (
is_clustered,
is_elected_leader
)
from charmhelpers.contrib.openstack.utils import (
is_unit_paused_set,
)
import charmhelpers.contrib.storage.linux.ceph as ceph
from charmhelpers.contrib.openstack.utils import save_script_rc
from charmhelpers.contrib.hardening.harden import harden
from charmhelpers.fetch import (
add_source,
apt_update,
apt_install,
)
from charmhelpers.core.hookenv import (
open_port,
close_port,
log,
ERROR,
INFO,
relation_get,
relation_clear,
relation_set,
relation_ids,
related_units,
service_name,
local_unit,
config,
is_relation_made,
Hooks,
UnregisteredHookError,
is_leader,
charm_dir,
status_set,
unit_private_ip,
)
from charmhelpers.core.host import (
cmp_pkgrevno,
rsync,
service_stop,
service_restart,
write_file,
)
from charmhelpers.contrib.charmsupport import nrpe
from charmhelpers.contrib.peerstorage import (
peer_echo,
peer_retrieve,
peer_store,
peer_store_and_set,
peer_retrieve_by_prefix,
leader_get,
)
hooks = Hooks()
SERVICE_NAME = os.getenv('JUJU_UNIT_NAME').split('/')[0]
POOL_NAME = SERVICE_NAME
RABBIT_DIR = '/var/lib/rabbitmq'
RABBIT_USER = 'rabbitmq'
RABBIT_GROUP = 'rabbitmq'
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
SCRIPTS_DIR = '/usr/local/bin'
STATS_CRONFILE = '/etc/cron.d/rabbitmq-stats'
STATS_DATAFILE = os.path.join(RABBIT_DIR, 'data',
'{}_queue_stats.dat'
''.format(rabbit.get_unit_hostname()))
@hooks.hook('install.real')
@harden()
def install():
pre_install_hooks()
# NOTE(jamespage) install actually happens in config_changed hook
def configure_amqp(username, vhost, admin=False):
# get and update service password
password = rabbit.get_rabbit_password(username)
# update vhost
rabbit.create_vhost(vhost)
rabbit.create_user(username, password, admin)
rabbit.grant_permissions(username, vhost)
# NOTE(freyes): after rabbitmq-server 3.0 the method to define HA in the
# queues is different
# http://www.rabbitmq.com/blog/2012/11/19/breaking-things-with-rabbitmq-3-0
if config('mirroring-queues'):
rabbit.set_ha_mode(vhost, 'all')
return password
def update_clients():
"""Update amqp client relation hooks
IFF leader node is ready. Client nodes are considered ready once the leader
has already run amqp_changed.
"""
if rabbit.leader_node_is_ready() or rabbit.client_node_is_ready():
for rid in relation_ids('amqp'):
for unit in related_units(rid):
amqp_changed(relation_id=rid, remote_unit=unit)
@hooks.hook('amqp-relation-changed')
def amqp_changed(relation_id=None, remote_unit=None):
host_addr = rabbit.get_unit_ip()
# TODO: Simplify what the non-leader needs to do
if not is_leader() and rabbit.client_node_is_ready():
# NOTE(jamespage) clear relation to deal with data being
# removed from peer storage
relation_clear(relation_id)
# Each unit needs to set the db information otherwise if the unit
# with the info dies the settings die with it Bug# 1355848
exc_list = ['hostname', 'private-address']
for rel_id in relation_ids('amqp'):
peerdb_settings = peer_retrieve_by_prefix(rel_id,
exc_list=exc_list)
peerdb_settings['hostname'] = host_addr
peerdb_settings['private-address'] = host_addr
if 'password' in peerdb_settings:
relation_set(relation_id=rel_id, **peerdb_settings)
log('amqp_changed(): Deferring amqp_changed'
' to the leader.')
# NOTE: active/active case
if config('prefer-ipv6'):
relation_settings = {'private-address': host_addr}
relation_set(relation_id=relation_id,
relation_settings=relation_settings)
return
# Bail if not completely ready
if not rabbit.leader_node_is_ready():
return
relation_settings = {}
settings = relation_get(rid=relation_id, unit=remote_unit)
singleset = set(['username', 'vhost'])
if singleset.issubset(settings):
if None in [settings['username'], settings['vhost']]:
log('amqp_changed(): Relation not ready.')
return
relation_settings['password'] = configure_amqp(
username=settings['username'],
vhost=settings['vhost'],
admin=settings.get('admin', False))
else:
queues = {}
for k, v in settings.iteritems():
amqp = k.split('_')[0]
x = '_'.join(k.split('_')[1:])
if amqp not in queues:
queues[amqp] = {}
queues[amqp][x] = v
for amqp in queues:
if singleset.issubset(queues[amqp]):
relation_settings[
'_'.join([amqp, 'password'])] = configure_amqp(
queues[amqp]['username'],
queues[amqp]['vhost'])
relation_settings['hostname'] = \
relation_settings['private-address'] = \
rabbit.get_unit_ip()
ssl_utils.configure_client_ssl(relation_settings)
if is_clustered():
relation_settings['clustered'] = 'true'
if is_relation_made('ha'):
# active/passive settings
relation_settings['vip'] = config('vip')
# or ha-vip-only to support active/active, but
# accessed via a VIP for older clients.
if config('ha-vip-only') is True:
relation_settings['ha-vip-only'] = 'true'
# set if need HA queues or not
if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
relation_settings['ha_queues'] = True
peer_store_and_set(relation_id=relation_id,
relation_settings=relation_settings)
@hooks.hook('cluster-relation-joined')
def cluster_joined(relation_id=None):
relation_settings = {
'hostname': rabbit.get_unit_hostname(),
'private-address':
rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG,
interface=rabbit.CLUSTER_INTERFACE),
}
relation_set(relation_id=relation_id,
relation_settings=relation_settings)
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.')
return
try:
if not is_leader():
log('Not the leader, deferring cookie propagation to leader')
return
except NotImplementedError:
if is_newer():
log('cluster_joined: Relation greater.')
return
if not os.path.isfile(rabbit.COOKIE_PATH):
log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
level=ERROR)
return
if is_leader():
log('Leader peer_storing cookie', level=INFO)
cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
peer_store('cookie', cookie)
peer_store('leader_node_ip', unit_private_ip())
peer_store('leader_node_hostname', rabbit.get_unit_hostname())
@hooks.hook('cluster-relation-changed')
def cluster_changed(relation_id=None, remote_unit=None):
# Future travelers beware ordering is significant
rdata = relation_get(rid=relation_id, unit=remote_unit)
# sync passwords
blacklist = ['hostname', 'private-address', 'public-address']
whitelist = [a for a in rdata.keys() if a not in blacklist]
peer_echo(includes=whitelist)
cookie = peer_retrieve('cookie')
if not cookie:
log('cluster_changed: cookie not yet set.', level=INFO)
return
if rdata:
hostname = rdata.get('hostname', None)
private_address = rdata.get('private-address', None)
if hostname and private_address:
rabbit.update_hosts_file({private_address: hostname})
# sync the cookie with peers if necessary
update_cookie()
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.', level=INFO)
return
# cluster with node?
if not is_leader():
rabbit.cluster_with()
update_nrpe_checks()
def update_cookie(leaders_cookie=None):
# sync cookie
if leaders_cookie:
cookie = leaders_cookie
else:
cookie = peer_retrieve('cookie')
cookie_local = None
with open(rabbit.COOKIE_PATH, 'r') as f:
cookie_local = f.read().strip()
if cookie_local == cookie:
log('Cookie already synchronized with peer.')
return
service_stop('rabbitmq-server')
with open(rabbit.COOKIE_PATH, 'wb') as out:
out.write(cookie)
if not is_unit_paused_set():
service_restart('rabbitmq-server')
rabbit.wait_app()
@hooks.hook('ha-relation-joined')
def ha_joined():
corosync_bindiface = config('ha-bindiface')
corosync_mcastport = config('ha-mcastport')
vip = config('vip')
vip_iface = config('vip_iface')
vip_cidr = config('vip_cidr')
rbd_name = config('rbd-name')
vip_only = config('ha-vip-only')
if None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr, rbd_name] and vip_only is False:
log('Insufficient configuration data to configure hacluster.',
level=ERROR)
sys.exit(1)
elif None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr] and vip_only is True:
log('Insufficient configuration data to configure VIP-only hacluster.',
level=ERROR)
sys.exit(1)
if not is_relation_made('ceph', 'auth') and vip_only is False:
log('ha_joined: No ceph relation yet, deferring.')
return
name = '%s@localhost' % SERVICE_NAME
if rabbit.get_node_name() != name and vip_only is False:
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
rabbit.update_rmq_env_conf(hostname='%s@localhost' % SERVICE_NAME,
ipv6=config('prefer-ipv6'))
else:
log('Node name already set to %s.' % name)
relation_settings = {}
relation_settings['corosync_bindiface'] = corosync_bindiface
relation_settings['corosync_mcastport'] = corosync_mcastport
if vip_only is True:
relation_settings['resources'] = {
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
}
relation_settings['resource_params'] = {
'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
}
else:
relation_settings['resources'] = {
'res_rabbitmq_rbd': 'ocf:ceph:rbd',
'res_rabbitmq_fs': 'ocf:heartbeat:Filesystem',
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
'res_rabbitmq-server': 'lsb:rabbitmq-server',
}
relation_settings['resource_params'] = {
'res_rabbitmq_rbd': 'params name="%s" pool="%s" user="%s" '
'secret="%s"' %
(rbd_name, POOL_NAME,
SERVICE_NAME, ceph._keyfile_path(
SERVICE_NAME)),
'res_rabbitmq_fs': 'params device="/dev/rbd/%s/%s" directory="%s" '
'fstype="ext4" op start start-delay="10s"' %
(POOL_NAME, rbd_name, RABBIT_DIR),
'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
'res_rabbitmq-server': 'op start start-delay="5s" '
'op monitor interval="5s"',
}
relation_settings['groups'] = {
'grp_rabbitmq':
'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip '
'res_rabbitmq-server',
}
for rel_id in relation_ids('ha'):
relation_set(relation_id=rel_id, relation_settings=relation_settings)
env_vars = {
'OPENSTACK_PORT_EPMD': 4369,
'OPENSTACK_PORT_MCASTPORT': config('ha-mcastport'),
}
save_script_rc(**env_vars)
@hooks.hook('ha-relation-changed')
def ha_changed():
if not is_clustered():
return
vip = config('vip')
log('ha_changed(): We are now HA clustered. '
'Advertising our VIP (%s) to all AMQP clients.' %
vip)
@hooks.hook('ceph-relation-joined')
def ceph_joined():
log('Start Ceph Relation Joined')
# NOTE fixup
# utils.configure_source()
ceph.install()
log('Finish Ceph Relation Joined')
@hooks.hook('ceph-relation-changed')
def ceph_changed():
log('Start Ceph Relation Changed')
auth = relation_get('auth')
key = relation_get('key')
use_syslog = str(config('use-syslog')).lower()
if None in [auth, key]:
log('Missing key or auth in relation')
sys.exit(0)
ceph.configure(service=SERVICE_NAME, key=key, auth=auth,
use_syslog=use_syslog)
if is_elected_leader('res_rabbitmq_vip'):
rbd_img = config('rbd-name')
rbd_size = config('rbd-size')
sizemb = int(rbd_size.split('G')[0]) * 1024
blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
ceph.create_pool(service=SERVICE_NAME, name=POOL_NAME,
replicas=int(config('ceph-osd-replication-count')))
ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
rbd_img=rbd_img, sizemb=sizemb,
fstype='ext4', mount_point=RABBIT_DIR,
blk_device=blk_device,
system_services=['rabbitmq-server'])
subprocess.check_call(['chown', '-R', '%s:%s' %
(RABBIT_USER, RABBIT_GROUP), RABBIT_DIR])
else:
log('This is not the peer leader. Not configuring RBD.')
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
# If 'ha' relation has been made before the 'ceph' relation
# it is important to make sure the ha-relation data is being
# sent.
if is_relation_made('ha'):
log('*ha* relation exists. Triggering ha_joined()')
ha_joined()
else:
log('*ha* relation does not exist.')
log('Finish Ceph Relation Changed')
@hooks.hook('nrpe-external-master-relation-changed')
def | ():
if os.path.isdir(NAGIOS_PLUGINS):
rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts',
'check_rabbitmq.py'),
os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py'))
rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts',
'check_rabbitmq_queues.py'),
os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_queues.py'))
if config('stats_cron_schedule'):
script = os.path.join(SCRIPTS_DIR, 'collect_rabbitmq_stats.sh')
cronjob = "{} root {}\n".format(config('stats_cron_schedule'), script)
rsync(os.path.join(charm_dir(), 'scripts',
'collect_rabbitmq_stats.sh'), script)
write_file(STATS_CRONFILE, cronjob)
elif os.path.isfile(STATS_CRONFILE):
os.remove(STATS_CRONFILE)
# Find out if nrpe set nagios_hostname
hostname = nrpe.get_nagios_hostname()
myunit = nrpe.get_nagios_unit_name()
# create unique user and vhost for each unit
current_unit = local_unit().replace('/', '-')
user = 'nagios-%s' % current_unit
vhost = 'nagios-%s' % current_unit
password = rabbit.get_rabbit_password(user, local=True)
rabbit.create_vhost(vhost)
rabbit.create_user(user, password)
rabbit.grant_permissions(user, vhost)
nrpe_compat = nrpe.NRPE(hostname=hostname)
nrpe_compat.add_check(
shortname=rabbit.RABBIT_USER,
description='Check RabbitMQ {%s}' % myunit,
check_cmd='{}/check_rabbitmq.py --user {} --password {} --vhost {}'
''.format(NAGIOS_PLUGINS, user, password, vhost)
)
if config('queue_thresholds'):
cmd = ""
# If value of queue_thresholds is incorrect we want the hook to fail
for item in yaml.safe_load(config('queue_thresholds')):
cmd += ' -c "{}" "{}" {} {}'.format(*item)
nrpe_compat.add_check(
shortname=rabbit.RABBIT_USER + '_queue',
description='Check RabbitMQ Queues',
check_cmd='{}/check_rabbitmq_queues.py{} {}'.format(
NAGIOS_PLUGINS, cmd, STATS_DATAFILE)
)
nrpe_compat.write()
@hooks.hook('upgrade-charm')
@harden()
def upgrade_charm():
pre_install_hooks()
add_source(config('source'), config('key'))
apt_update(fatal=True)
# Ensure older passwd files in /var/lib/juju are moved to
# /var/lib/rabbitmq which will end up replicated if clustered
for f in [f for f in os.listdir('/var/lib/juju')
if os.path.isfile(os.path.join('/var/lib/juju', f))]:
if f.endswith('.passwd'):
s = os.path.join('/var/lib/juju', f)
d = os.path.join('/var/lib/charm/{}'.format(service_name()), f)
log('upgrade_charm: Migrating stored passwd'
' from %s to %s.' % (s, d))
shutil.move(s, d)
if is_elected_leader('res_rabbitmq_vip'):
rabbit.migrate_passwords_to_peer_relation()
# explicitly update buggy file name naigos.passwd
old = os.path.join('var/lib/rabbitmq', 'naigos.passwd')
if os.path.isfile(old):
new = os.path.join('var/lib/rabbitmq', 'nagios.passwd')
shutil.move(old, new)
MAN_PLUGIN = 'rabbitmq_management'
@hooks.hook('config-changed')
@rabbit.restart_on_change(rabbit.restart_map())
@harden()
def config_changed():
# Update hosts with this unit's information
rabbit.update_hosts_file(
{rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG,
interface=rabbit.CLUSTER_INTERFACE):
rabbit.get_unit_hostname()})
# Add archive source if provided
add_source(config('source'), config('key'))
apt_update(fatal=True)
# Copy in defaults file for updated ulimits
shutil.copyfile(
'templates/rabbitmq-server',
'/etc/default/rabbitmq-server')
# Install packages to ensure any changes to source
# result in an upgrade if applicable.
status_set('maintenance', 'Installing/upgrading RabbitMQ packages')
apt_install(rabbit.PACKAGES, fatal=True)
open_port(5672)
chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
chmod(RABBIT_DIR, 0o775)
if config('management_plugin') is True:
rabbit.enable_plugin(MAN_PLUGIN)
open_port(rabbit.get_managment_port())
else:
rabbit.disable_plugin(MAN_PLUGIN)
close_port(rabbit.get_managment_port())
# LY: Close the old managment port since it may have been opened in a
# previous version of the charm. close_port is a noop if the port
# is not open
close_port(55672)
rabbit.ConfigRenderer(
rabbit.CONFIG_FILES).write_all()
# Only set values if this is the leader
if not is_leader():
return
rabbit.set_all_mirroring_queues(config('mirroring-queues'))
if is_relation_made("ha"):
ha_is_active_active = config("ha-vip-only")
if ha_is_active_active:
update_nrpe_checks()
else:
if is_elected_leader('res_rabbitmq_vip'):
update_nrpe_checks()
else:
log("hacluster relation is present but this node is not active"
" skipping update nrpe checks")
else:
update_nrpe_checks()
# Update cluster in case min-cluster-size has changed
for rid in relation_ids('cluster'):
for unit in related_units(rid):
cluster_changed(relation_id=rid, remote_unit=unit)
@hooks.hook('leader-elected')
def leader_elected():
status_set("maintenance", "{} is the elected leader".format(local_unit()))
@hooks.hook('leader-settings-changed')
def leader_settings_changed():
if not os.path.exists(rabbit.RABBITMQ_CTL):
log('Deferring cookie configuration, RabbitMQ not yet installed')
return
# Get cookie from leader, update cookie locally and
# force cluster-relation-changed hooks to run on peers
cookie = leader_get(attribute='cookie')
if cookie:
update_cookie(leaders_cookie=cookie)
# Force cluster-relation-changed hooks to run on peers
# This will precipitate peer clustering
# Without this a chicken and egg scenario prevails when
# using LE and peerstorage
for rid in relation_ids('cluster'):
relation_set(relation_id=rid, relation_settings={'cookie': cookie})
def pre_install_hooks():
for f in glob.glob('exec.d/*/charm-pre-install'):
if os.path.isfile(f) and os.access(f, os.X_OK):
subprocess.check_call(['sh', '-c', f])
@hooks.hook('update-status')
@harden()
def update_status():
log('Updating status.')
if __name__ == '__main__':
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e))
# Gated client updates
update_clients()
rabbit.assess_status(rabbit.ConfigRenderer(rabbit.CONFIG_FILES))
| update_nrpe_checks | identifier_name |
rabbitmq_server_relations.py | #!/usr/bin/python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import sys
import subprocess
import glob
try:
import yaml # flake8: noqa
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # flake8: noqa
import rabbit_utils as rabbit
import ssl_utils
from lib.utils import (
chown, chmod,
is_newer,
)
from charmhelpers.contrib.hahelpers.cluster import (
is_clustered,
is_elected_leader
)
from charmhelpers.contrib.openstack.utils import (
is_unit_paused_set,
)
import charmhelpers.contrib.storage.linux.ceph as ceph
from charmhelpers.contrib.openstack.utils import save_script_rc
from charmhelpers.contrib.hardening.harden import harden
from charmhelpers.fetch import (
add_source,
apt_update,
apt_install,
)
from charmhelpers.core.hookenv import (
open_port,
close_port,
log,
ERROR,
INFO,
relation_get,
relation_clear,
relation_set,
relation_ids,
related_units,
service_name,
local_unit,
config,
is_relation_made,
Hooks,
UnregisteredHookError,
is_leader,
charm_dir,
status_set,
unit_private_ip,
)
from charmhelpers.core.host import (
cmp_pkgrevno,
rsync,
service_stop,
service_restart,
write_file,
)
from charmhelpers.contrib.charmsupport import nrpe
from charmhelpers.contrib.peerstorage import (
peer_echo,
peer_retrieve,
peer_store,
peer_store_and_set,
peer_retrieve_by_prefix,
leader_get,
)
hooks = Hooks()
SERVICE_NAME = os.getenv('JUJU_UNIT_NAME').split('/')[0]
POOL_NAME = SERVICE_NAME
RABBIT_DIR = '/var/lib/rabbitmq'
RABBIT_USER = 'rabbitmq'
RABBIT_GROUP = 'rabbitmq'
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
SCRIPTS_DIR = '/usr/local/bin'
STATS_CRONFILE = '/etc/cron.d/rabbitmq-stats'
STATS_DATAFILE = os.path.join(RABBIT_DIR, 'data',
'{}_queue_stats.dat'
''.format(rabbit.get_unit_hostname()))
@hooks.hook('install.real')
@harden()
def install():
pre_install_hooks()
# NOTE(jamespage) install actually happens in config_changed hook
def configure_amqp(username, vhost, admin=False):
# get and update service password
password = rabbit.get_rabbit_password(username)
# update vhost
rabbit.create_vhost(vhost)
rabbit.create_user(username, password, admin)
rabbit.grant_permissions(username, vhost)
# NOTE(freyes): after rabbitmq-server 3.0 the method to define HA in the
# queues is different
# http://www.rabbitmq.com/blog/2012/11/19/breaking-things-with-rabbitmq-3-0
if config('mirroring-queues'):
rabbit.set_ha_mode(vhost, 'all')
return password
def update_clients():
"""Update amqp client relation hooks
IFF leader node is ready. Client nodes are considered ready once the leader
has already run amqp_changed.
"""
if rabbit.leader_node_is_ready() or rabbit.client_node_is_ready():
for rid in relation_ids('amqp'):
for unit in related_units(rid):
amqp_changed(relation_id=rid, remote_unit=unit)
@hooks.hook('amqp-relation-changed')
def amqp_changed(relation_id=None, remote_unit=None):
host_addr = rabbit.get_unit_ip()
# TODO: Simplify what the non-leader needs to do
if not is_leader() and rabbit.client_node_is_ready():
# NOTE(jamespage) clear relation to deal with data being
# removed from peer storage
relation_clear(relation_id)
# Each unit needs to set the db information otherwise if the unit
# with the info dies the settings die with it Bug# 1355848
exc_list = ['hostname', 'private-address']
for rel_id in relation_ids('amqp'):
peerdb_settings = peer_retrieve_by_prefix(rel_id,
exc_list=exc_list)
peerdb_settings['hostname'] = host_addr
peerdb_settings['private-address'] = host_addr
if 'password' in peerdb_settings:
relation_set(relation_id=rel_id, **peerdb_settings)
log('amqp_changed(): Deferring amqp_changed'
' to the leader.')
# NOTE: active/active case
if config('prefer-ipv6'):
relation_settings = {'private-address': host_addr}
relation_set(relation_id=relation_id,
relation_settings=relation_settings)
return
# Bail if not completely ready
if not rabbit.leader_node_is_ready():
return
relation_settings = {}
settings = relation_get(rid=relation_id, unit=remote_unit)
singleset = set(['username', 'vhost'])
if singleset.issubset(settings):
if None in [settings['username'], settings['vhost']]:
log('amqp_changed(): Relation not ready.')
return
relation_settings['password'] = configure_amqp(
username=settings['username'],
vhost=settings['vhost'],
admin=settings.get('admin', False))
else:
queues = {}
for k, v in settings.iteritems():
amqp = k.split('_')[0]
x = '_'.join(k.split('_')[1:])
if amqp not in queues:
queues[amqp] = {}
queues[amqp][x] = v
for amqp in queues:
if singleset.issubset(queues[amqp]):
relation_settings[
'_'.join([amqp, 'password'])] = configure_amqp(
queues[amqp]['username'],
queues[amqp]['vhost'])
relation_settings['hostname'] = \
relation_settings['private-address'] = \
rabbit.get_unit_ip()
ssl_utils.configure_client_ssl(relation_settings)
if is_clustered():
relation_settings['clustered'] = 'true'
if is_relation_made('ha'):
# active/passive settings
relation_settings['vip'] = config('vip')
# or ha-vip-only to support active/active, but
# accessed via a VIP for older clients.
if config('ha-vip-only') is True:
relation_settings['ha-vip-only'] = 'true'
# set if need HA queues or not
if cmp_pkgrevno('rabbitmq-server', '3.0.1') < 0:
relation_settings['ha_queues'] = True
peer_store_and_set(relation_id=relation_id,
relation_settings=relation_settings)
@hooks.hook('cluster-relation-joined')
def cluster_joined(relation_id=None):
relation_settings = {
'hostname': rabbit.get_unit_hostname(),
'private-address':
rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG,
interface=rabbit.CLUSTER_INTERFACE),
}
relation_set(relation_id=relation_id,
relation_settings=relation_settings)
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.')
return
try:
if not is_leader():
log('Not the leader, deferring cookie propagation to leader')
return
except NotImplementedError:
if is_newer():
log('cluster_joined: Relation greater.')
return
if not os.path.isfile(rabbit.COOKIE_PATH):
log('erlang cookie missing from %s' % rabbit.COOKIE_PATH,
level=ERROR)
return
if is_leader():
log('Leader peer_storing cookie', level=INFO)
cookie = open(rabbit.COOKIE_PATH, 'r').read().strip()
peer_store('cookie', cookie)
peer_store('leader_node_ip', unit_private_ip())
peer_store('leader_node_hostname', rabbit.get_unit_hostname())
@hooks.hook('cluster-relation-changed')
def cluster_changed(relation_id=None, remote_unit=None):
# Future travelers beware ordering is significant
rdata = relation_get(rid=relation_id, unit=remote_unit)
# sync passwords
blacklist = ['hostname', 'private-address', 'public-address']
whitelist = [a for a in rdata.keys() if a not in blacklist]
peer_echo(includes=whitelist)
cookie = peer_retrieve('cookie')
if not cookie:
log('cluster_changed: cookie not yet set.', level=INFO)
return
if rdata:
hostname = rdata.get('hostname', None)
private_address = rdata.get('private-address', None)
if hostname and private_address:
rabbit.update_hosts_file({private_address: hostname})
# sync the cookie with peers if necessary
update_cookie()
if is_relation_made('ha') and \
config('ha-vip-only') is False:
log('hacluster relation is present, skipping native '
'rabbitmq cluster config.', level=INFO)
return
# cluster with node?
if not is_leader():
rabbit.cluster_with()
update_nrpe_checks()
def update_cookie(leaders_cookie=None):
# sync cookie
if leaders_cookie:
cookie = leaders_cookie
else:
cookie = peer_retrieve('cookie')
cookie_local = None
with open(rabbit.COOKIE_PATH, 'r') as f:
cookie_local = f.read().strip()
if cookie_local == cookie:
log('Cookie already synchronized with peer.')
return
service_stop('rabbitmq-server')
with open(rabbit.COOKIE_PATH, 'wb') as out:
out.write(cookie)
if not is_unit_paused_set():
service_restart('rabbitmq-server')
rabbit.wait_app()
@hooks.hook('ha-relation-joined')
def ha_joined():
corosync_bindiface = config('ha-bindiface')
corosync_mcastport = config('ha-mcastport')
vip = config('vip')
vip_iface = config('vip_iface')
vip_cidr = config('vip_cidr')
rbd_name = config('rbd-name')
vip_only = config('ha-vip-only')
if None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr, rbd_name] and vip_only is False:
log('Insufficient configuration data to configure hacluster.',
level=ERROR)
sys.exit(1)
elif None in [corosync_bindiface, corosync_mcastport, vip, vip_iface,
vip_cidr] and vip_only is True:
log('Insufficient configuration data to configure VIP-only hacluster.',
level=ERROR)
sys.exit(1)
if not is_relation_made('ceph', 'auth') and vip_only is False:
log('ha_joined: No ceph relation yet, deferring.')
return
name = '%s@localhost' % SERVICE_NAME
if rabbit.get_node_name() != name and vip_only is False:
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
rabbit.update_rmq_env_conf(hostname='%s@localhost' % SERVICE_NAME,
ipv6=config('prefer-ipv6'))
else:
log('Node name already set to %s.' % name)
relation_settings = {}
relation_settings['corosync_bindiface'] = corosync_bindiface
relation_settings['corosync_mcastport'] = corosync_mcastport
if vip_only is True:
relation_settings['resources'] = {
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
}
relation_settings['resource_params'] = {
'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
}
else:
relation_settings['resources'] = {
'res_rabbitmq_rbd': 'ocf:ceph:rbd',
'res_rabbitmq_fs': 'ocf:heartbeat:Filesystem',
'res_rabbitmq_vip': 'ocf:heartbeat:IPaddr2',
'res_rabbitmq-server': 'lsb:rabbitmq-server',
}
relation_settings['resource_params'] = {
'res_rabbitmq_rbd': 'params name="%s" pool="%s" user="%s" '
'secret="%s"' %
(rbd_name, POOL_NAME,
SERVICE_NAME, ceph._keyfile_path(
SERVICE_NAME)),
'res_rabbitmq_fs': 'params device="/dev/rbd/%s/%s" directory="%s" '
'fstype="ext4" op start start-delay="10s"' %
(POOL_NAME, rbd_name, RABBIT_DIR),
'res_rabbitmq_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
(vip, vip_cidr, vip_iface),
'res_rabbitmq-server': 'op start start-delay="5s" '
'op monitor interval="5s"',
}
relation_settings['groups'] = {
'grp_rabbitmq':
'res_rabbitmq_rbd res_rabbitmq_fs res_rabbitmq_vip '
'res_rabbitmq-server',
}
for rel_id in relation_ids('ha'):
relation_set(relation_id=rel_id, relation_settings=relation_settings)
env_vars = {
'OPENSTACK_PORT_EPMD': 4369,
'OPENSTACK_PORT_MCASTPORT': config('ha-mcastport'),
}
save_script_rc(**env_vars)
@hooks.hook('ha-relation-changed')
def ha_changed():
if not is_clustered():
return
vip = config('vip')
log('ha_changed(): We are now HA clustered. '
'Advertising our VIP (%s) to all AMQP clients.' %
vip)
@hooks.hook('ceph-relation-joined')
def ceph_joined():
log('Start Ceph Relation Joined')
# NOTE fixup
# utils.configure_source()
ceph.install()
log('Finish Ceph Relation Joined')
@hooks.hook('ceph-relation-changed')
def ceph_changed():
log('Start Ceph Relation Changed')
auth = relation_get('auth')
key = relation_get('key')
use_syslog = str(config('use-syslog')).lower()
if None in [auth, key]:
log('Missing key or auth in relation')
sys.exit(0)
ceph.configure(service=SERVICE_NAME, key=key, auth=auth,
use_syslog=use_syslog)
if is_elected_leader('res_rabbitmq_vip'):
rbd_img = config('rbd-name')
rbd_size = config('rbd-size')
sizemb = int(rbd_size.split('G')[0]) * 1024
blk_device = '/dev/rbd/%s/%s' % (POOL_NAME, rbd_img)
ceph.create_pool(service=SERVICE_NAME, name=POOL_NAME,
replicas=int(config('ceph-osd-replication-count')))
ceph.ensure_ceph_storage(service=SERVICE_NAME, pool=POOL_NAME,
rbd_img=rbd_img, sizemb=sizemb,
fstype='ext4', mount_point=RABBIT_DIR,
blk_device=blk_device,
system_services=['rabbitmq-server'])
subprocess.check_call(['chown', '-R', '%s:%s' %
(RABBIT_USER, RABBIT_GROUP), RABBIT_DIR])
else:
log('This is not the peer leader. Not configuring RBD.')
log('Stopping rabbitmq-server.')
service_stop('rabbitmq-server')
# If 'ha' relation has been made before the 'ceph' relation
# it is important to make sure the ha-relation data is being
# sent.
if is_relation_made('ha'):
log('*ha* relation exists. Triggering ha_joined()')
ha_joined()
else:
log('*ha* relation does not exist.')
log('Finish Ceph Relation Changed')
@hooks.hook('nrpe-external-master-relation-changed')
def update_nrpe_checks():
if os.path.isdir(NAGIOS_PLUGINS):
rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts',
'check_rabbitmq.py'),
os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq.py'))
rsync(os.path.join(os.getenv('CHARM_DIR'), 'scripts',
'check_rabbitmq_queues.py'),
os.path.join(NAGIOS_PLUGINS, 'check_rabbitmq_queues.py'))
if config('stats_cron_schedule'):
script = os.path.join(SCRIPTS_DIR, 'collect_rabbitmq_stats.sh')
cronjob = "{} root {}\n".format(config('stats_cron_schedule'), script)
rsync(os.path.join(charm_dir(), 'scripts',
'collect_rabbitmq_stats.sh'), script)
write_file(STATS_CRONFILE, cronjob)
elif os.path.isfile(STATS_CRONFILE):
os.remove(STATS_CRONFILE)
# Find out if nrpe set nagios_hostname
hostname = nrpe.get_nagios_hostname()
myunit = nrpe.get_nagios_unit_name()
# create unique user and vhost for each unit
current_unit = local_unit().replace('/', '-')
user = 'nagios-%s' % current_unit
vhost = 'nagios-%s' % current_unit
password = rabbit.get_rabbit_password(user, local=True)
rabbit.create_vhost(vhost)
rabbit.create_user(user, password)
rabbit.grant_permissions(user, vhost)
nrpe_compat = nrpe.NRPE(hostname=hostname)
nrpe_compat.add_check(
shortname=rabbit.RABBIT_USER,
description='Check RabbitMQ {%s}' % myunit,
check_cmd='{}/check_rabbitmq.py --user {} --password {} --vhost {}'
''.format(NAGIOS_PLUGINS, user, password, vhost)
)
if config('queue_thresholds'):
cmd = ""
# If value of queue_thresholds is incorrect we want the hook to fail
for item in yaml.safe_load(config('queue_thresholds')):
cmd += ' -c "{}" "{}" {} {}'.format(*item)
nrpe_compat.add_check(
shortname=rabbit.RABBIT_USER + '_queue',
description='Check RabbitMQ Queues',
check_cmd='{}/check_rabbitmq_queues.py{} {}'.format(
NAGIOS_PLUGINS, cmd, STATS_DATAFILE)
)
nrpe_compat.write()
@hooks.hook('upgrade-charm')
@harden()
def upgrade_charm():
pre_install_hooks()
add_source(config('source'), config('key'))
apt_update(fatal=True)
# Ensure older passwd files in /var/lib/juju are moved to
# /var/lib/rabbitmq which will end up replicated if clustered
for f in [f for f in os.listdir('/var/lib/juju')
if os.path.isfile(os.path.join('/var/lib/juju', f))]:
if f.endswith('.passwd'):
s = os.path.join('/var/lib/juju', f)
d = os.path.join('/var/lib/charm/{}'.format(service_name()), f)
log('upgrade_charm: Migrating stored passwd'
' from %s to %s.' % (s, d))
shutil.move(s, d)
if is_elected_leader('res_rabbitmq_vip'):
rabbit.migrate_passwords_to_peer_relation()
# explicitly update buggy file name naigos.passwd
old = os.path.join('var/lib/rabbitmq', 'naigos.passwd')
if os.path.isfile(old):
new = os.path.join('var/lib/rabbitmq', 'nagios.passwd')
shutil.move(old, new)
MAN_PLUGIN = 'rabbitmq_management'
@hooks.hook('config-changed')
@rabbit.restart_on_change(rabbit.restart_map())
@harden()
def config_changed():
# Update hosts with this unit's information
rabbit.update_hosts_file(
{rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG,
interface=rabbit.CLUSTER_INTERFACE):
rabbit.get_unit_hostname()})
# Add archive source if provided
add_source(config('source'), config('key'))
apt_update(fatal=True)
# Copy in defaults file for updated ulimits
shutil.copyfile(
'templates/rabbitmq-server',
'/etc/default/rabbitmq-server')
# Install packages to ensure any changes to source
# result in an upgrade if applicable.
status_set('maintenance', 'Installing/upgrading RabbitMQ packages')
apt_install(rabbit.PACKAGES, fatal=True)
open_port(5672)
chown(RABBIT_DIR, rabbit.RABBIT_USER, rabbit.RABBIT_USER)
chmod(RABBIT_DIR, 0o775)
if config('management_plugin') is True:
rabbit.enable_plugin(MAN_PLUGIN)
open_port(rabbit.get_managment_port())
else:
rabbit.disable_plugin(MAN_PLUGIN)
close_port(rabbit.get_managment_port())
# LY: Close the old managment port since it may have been opened in a
# previous version of the charm. close_port is a noop if the port
# is not open
close_port(55672)
rabbit.ConfigRenderer(
rabbit.CONFIG_FILES).write_all()
# Only set values if this is the leader
if not is_leader():
return
rabbit.set_all_mirroring_queues(config('mirroring-queues'))
if is_relation_made("ha"):
ha_is_active_active = config("ha-vip-only")
if ha_is_active_active:
update_nrpe_checks()
else:
if is_elected_leader('res_rabbitmq_vip'):
update_nrpe_checks()
else:
log("hacluster relation is present but this node is not active"
" skipping update nrpe checks")
else:
update_nrpe_checks()
# Update cluster in case min-cluster-size has changed
for rid in relation_ids('cluster'):
for unit in related_units(rid):
cluster_changed(relation_id=rid, remote_unit=unit)
@hooks.hook('leader-elected')
def leader_elected():
status_set("maintenance", "{} is the elected leader".format(local_unit()))
@hooks.hook('leader-settings-changed')
def leader_settings_changed():
if not os.path.exists(rabbit.RABBITMQ_CTL):
log('Deferring cookie configuration, RabbitMQ not yet installed')
return
# Get cookie from leader, update cookie locally and
# force cluster-relation-changed hooks to run on peers
cookie = leader_get(attribute='cookie')
if cookie:
update_cookie(leaders_cookie=cookie)
# Force cluster-relation-changed hooks to run on peers
# This will precipitate peer clustering
# Without this a chicken and egg scenario prevails when
# using LE and peerstorage
for rid in relation_ids('cluster'):
relation_set(relation_id=rid, relation_settings={'cookie': cookie})
def pre_install_hooks():
for f in glob.glob('exec.d/*/charm-pre-install'):
if os.path.isfile(f) and os.access(f, os.X_OK):
subprocess.check_call(['sh', '-c', f])
@hooks.hook('update-status')
@harden()
def update_status():
log('Updating status.')
if __name__ == '__main__':
try: | log('Unknown hook {} - skipping.'.format(e))
# Gated client updates
update_clients()
rabbit.assess_status(rabbit.ConfigRenderer(rabbit.CONFIG_FILES)) | hooks.execute(sys.argv)
except UnregisteredHookError as e: | random_line_split |
preference_aggregation_featureless_online.py | import logging
from copy import deepcopy
import gin
import numpy as np
from backend.rating_fields import MAX_VALUE
from matplotlib import pyplot as plt
from scipy.optimize import golden
from .preference_aggregation_featureless_np import loss_fcn_np
@gin.configurable
class FeaturelessOnlineUpdater(object):
"""Update weights online."""
def __init__(self, hypers=None, golden_params=None):
if golden_params is None:
golden_params = {}
self.golden_params = golden_params
self.hypers = hypers
self.model_tensor = None
self.minibatch = None
self.to_subtract = {}
self.silent = False
def current_loss(self, key='loss'):
return self.get_closure_loss((0, 0, 0), key=key)(None)
def set_subtract(self, key='loss'):
self.to_subtract = self.current_loss(key=key)['metrics']
def set_minibatch(self, mb):
for key, val in mb.items():
assert isinstance(key, str), key
assert isinstance(val, np.ndarray), val
self.minibatch = mb
def set_model_tensor(self, mt):
assert isinstance(mt, np.ndarray), mt
self.model_tensor = mt
def get_value(self, indices):
"""Get one from the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
return self.model_tensor[indices[0], indices[1], indices[2]]
def set_value(self, indices, val):
"""Set value to the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
self.model_tensor[indices[0], indices[1], indices[2]] = val
return val
def get_closure_loss(self, indices, key='loss'):
"""Return a function that computes the loss given model_tensor[indices]."""
def f(x=None, self=self, indices=indices[:], key=str(key)):
if self.minibatch is None:
raise ValueError("Please set the minibatch first")
if self.model_tensor is None:
raise ValueError("Please set the tensor")
if self.hypers is None:
raise ValueError("Please use gin to configure hyperparameters")
if x is not None:
self.set_value(indices=indices, val=x)
result = loss_fcn_np(model_tensor=self.model_tensor,
**self.hypers, **self.minibatch)
return {
'loss': result[key] - self.to_subtract.get(key, 0.0),
'metrics': {**result, 'param': x}
}
return f
def best_value_many_indices(self, indices_list, **kwargs):
"""Given a list of indices (with possible repeats), run optimization and return stats."""
indices_list = [tuple(x) for x in indices_list]
stats = {indices: [] for indices in set(indices_list)}
for indices in indices_list:
stats[indices].append(self.best_value_indices(indices=indices, **kwargs))
return stats
def best_value_indices(self, indices, key='loss', assign_at_end=False,
give_history=True):
"""Given indices, use Golden Ratio search to compute the best model_tensor[indices]."""
# remember the original value
orig_value = self.get_value(indices=indices)
# function to compute the loss
loss_closure = self.get_closure_loss(indices=indices, key=key)
orig_loss = loss_closure(orig_value)['loss']
# history of metrics
history = []
def closure_with_history(x, give_history=give_history):
"""Given a value, compute the loss, store the value (optionally) and return by key."""
result = loss_closure(x)
if give_history:
history.append(result['metrics'])
return result['loss']
params = {x: y for x, y in self.golden_params.items()}
if 'smartbracket' in self.golden_params:
del params['smartbracket']
params['brack'] = tuple([orig_value + t for t in self.golden_params['smartbracket']])
# running optimization
best_value, best_loss, iterations = golden(closure_with_history,
full_output=True, **params)
# restoring the value if requested
if not assign_at_end:
self.set_value(indices=indices, val=orig_value)
else:
if best_loss < orig_loss:
self.set_value(indices=indices, val=best_value)
else:
if not self.silent:
logging.warning(f"No improvement {indices} {key} {assign_at_end}")
self.set_value(indices=indices, val=orig_value)
best_value = orig_value
best_loss = orig_loss
return {
'assigned': assign_at_end,
'history': history,
'orig_value_param': orig_value,
'best_value': best_value,
'best_loss': best_loss,
'iterations': iterations
}
# Plotting functions
def get_history(result, ind, only_last=True):
if only_last:
res = [z['history'][-1] for z in result[ind]]
else:
res = [x for z in result[ind] for x in z['history']]
return res
def visualize_result_loss(result, indices_lst):
values_byloss = {'loss': []}
ind_offset = {ind: 0 for ind in set(indices_lst)}
for ind in indices_lst:
val = result[ind][ind_offset[ind]]['best_loss']
ind_offset[ind] += 1
values_byloss['loss'].append(val)
# for key in val:
# if key.startswith('loss'):
# if key not in values_byloss:
# values_byloss[key] = []
# values_byloss[key].append(val[key])
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(values_byloss.keys()), 1):
plt.subplot(1, len(values_byloss), i)
plt.title(key)
plt.plot(values_byloss[key])
# plt.axhline(online.to_subtract[key])
plt.show()
def visualize_byindex(result, indices_lst, initial_value):
for ind in set(indices_lst):
res = get_history(result, ind)
res_dct = lstdct2dctlst(res)
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(res_dct.keys()), 1):
hst = res_dct[key]
plt.subplot(1, len(res_dct), i)
plt.title(key + ' ' + str(ind))
if key in initial_value[ind]['metrics']:
| hst = [initial] + hst
plt.axhline(initial)
if np.min(hst) > 0:
plt.yscale('log')
plt.plot(hst)
plt.show()
def lstdct2dctlst(lst):
"""List of dictionaries -> dictionary of lists."""
keys = lst[0].keys()
res = {key: [x[key] for x in lst] for key in keys}
return res
def compute_online_update(rating_value, mb_np_orig,
model_tensor_orig,
idx_set,
users_get_value=None,
n_repeats=1,
hotfix_update_hypers=None,
plot_charts=False,
pbars=None,
cancel_updates=False,
**kwargs):
"""Compute an online update given a pre-computed minibatch of data, and a model tensor.
Args:
rating_value: value of the rating in (-1, 1) to set
mb_np_orig: minibatch for the loss function
model_tensor_orig: current scores (users + aggregate)
idx_set: which rating ID in the minibatch to update?
users_get_value: if pbar is set, this user's comparison will be used
n_repeats: number of repetitions of the golden ratio search
hotfix_update_hypers: updates for hyperparameters
plot_charts: show visualization
cancel_updates: do a dummy run without optimization (return current values)
pbars: if not None, set progress bar to the value of comparison (0-100) for user_get_value
**kwargs: hotfix updates for the golden ratio algorithm
Returns:
if pbar is set, returns None (only sets the value of the progress bar)
otherwise, returns a dictionary with a response.
"""
# internal object IDs to update
obj1 = mb_np_orig['objects_rating_v1'][idx_set]
obj2 = mb_np_orig['objects_rating_v2'][idx_set]
# copying the minibatch and the model tensor (will be updated)
mb_np_copy = deepcopy(mb_np_orig)
model_tensor_copy = deepcopy(model_tensor_orig)
if not cancel_updates:
# SETTING THE RATING VALUE
mb_np_copy['cmp'][idx_set, 0] = rating_value
# creating the updater
online = FeaturelessOnlineUpdater()
online.hypers['aggregate_index'] = -1
# hotfix parameter updates
if hotfix_update_hypers is not None:
for key, value in hotfix_update_hypers.items():
online.hypers[key] = value
for key, value in kwargs.items():
online.golden_params[key] = value
# setting data
online.set_minibatch(mb_np_copy)
online.set_model_tensor(model_tensor_copy)
online.set_subtract()
online.silent = True
# CONFIGURATION FOR INDICES
indices_lst = []
for i in range(model_tensor_orig.shape[0]):
indices_lst.append((i, obj1, 0))
indices_lst.append((i, obj2, 0))
indices_lst *= n_repeats
# initial value for the loss/index
initial_value = {ind: online.get_closure_loss(ind)(online.get_value(ind)) for ind in
set(indices_lst)}
if not cancel_updates:
# RUNNING OPTIMIZATION with GOLDEN RATIO
result = online.best_value_many_indices(indices_lst, assign_at_end=True)
# plotting
if plot_charts:
visualize_result_loss(result, indices_lst)
visualize_byindex(result, indices_lst, initial_value)
else:
result = None
if pbars is not None:
if 'comparison' in pbars:
assert len(users_get_value) == len(pbars['comparison'])
for user, pbar in zip(users_get_value, pbars['comparison']):
# obtaining model scores
score1 = online.get_value((user, obj1, 0))
score2 = online.get_value((user, obj2, 0))
# computing the comparison
comparison = 1 / (1 + np.exp(score1 - score2)) * MAX_VALUE
pbar.value = comparison
if 'v1' in pbars:
assert len(users_get_value) == len(pbars['v1'])
for user, pbar in zip(users_get_value, pbars['v1']):
# obtaining model scores
score1 = online.get_value((user, obj1, 0))
pbar.value = score1
if 'v2' in pbars:
assert len(users_get_value) == len(pbars['v2'])
for user, pbar in zip(users_get_value, pbars['v2']):
# obtaining model scores
score1 = online.get_value((user, obj2, 0))
pbar.value = score1
return None
else:
return {
'new_model_tensor': model_tensor_copy,
'new_minibatch': mb_np_copy,
'online_learner': online,
'indices_lst': indices_lst,
'result': result,
} | initial = initial_value[ind]['metrics'][key]
| random_line_split |
preference_aggregation_featureless_online.py | import logging
from copy import deepcopy
import gin
import numpy as np
from backend.rating_fields import MAX_VALUE
from matplotlib import pyplot as plt
from scipy.optimize import golden
from .preference_aggregation_featureless_np import loss_fcn_np
@gin.configurable
class FeaturelessOnlineUpdater(object):
"""Update weights online."""
def __init__(self, hypers=None, golden_params=None):
if golden_params is None:
golden_params = {}
self.golden_params = golden_params
self.hypers = hypers
self.model_tensor = None
self.minibatch = None
self.to_subtract = {}
self.silent = False
def current_loss(self, key='loss'):
return self.get_closure_loss((0, 0, 0), key=key)(None)
def set_subtract(self, key='loss'):
self.to_subtract = self.current_loss(key=key)['metrics']
def set_minibatch(self, mb):
for key, val in mb.items():
assert isinstance(key, str), key
assert isinstance(val, np.ndarray), val
self.minibatch = mb
def set_model_tensor(self, mt):
assert isinstance(mt, np.ndarray), mt
self.model_tensor = mt
def get_value(self, indices):
"""Get one from the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
return self.model_tensor[indices[0], indices[1], indices[2]]
def set_value(self, indices, val):
"""Set value to the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
self.model_tensor[indices[0], indices[1], indices[2]] = val
return val
def get_closure_loss(self, indices, key='loss'):
"""Return a function that computes the loss given model_tensor[indices]."""
def f(x=None, self=self, indices=indices[:], key=str(key)):
if self.minibatch is None:
raise ValueError("Please set the minibatch first")
if self.model_tensor is None:
raise ValueError("Please set the tensor")
if self.hypers is None:
raise ValueError("Please use gin to configure hyperparameters")
if x is not None:
self.set_value(indices=indices, val=x)
result = loss_fcn_np(model_tensor=self.model_tensor,
**self.hypers, **self.minibatch)
return {
'loss': result[key] - self.to_subtract.get(key, 0.0),
'metrics': {**result, 'param': x}
}
return f
def best_value_many_indices(self, indices_list, **kwargs):
"""Given a list of indices (with possible repeats), run optimization and return stats."""
indices_list = [tuple(x) for x in indices_list]
stats = {indices: [] for indices in set(indices_list)}
for indices in indices_list:
stats[indices].append(self.best_value_indices(indices=indices, **kwargs))
return stats
def best_value_indices(self, indices, key='loss', assign_at_end=False,
give_history=True):
"""Given indices, use Golden Ratio search to compute the best model_tensor[indices]."""
# remember the original value
orig_value = self.get_value(indices=indices)
# function to compute the loss
loss_closure = self.get_closure_loss(indices=indices, key=key)
orig_loss = loss_closure(orig_value)['loss']
# history of metrics
history = []
def closure_with_history(x, give_history=give_history):
"""Given a value, compute the loss, store the value (optionally) and return by key."""
result = loss_closure(x)
if give_history:
history.append(result['metrics'])
return result['loss']
params = {x: y for x, y in self.golden_params.items()}
if 'smartbracket' in self.golden_params:
del params['smartbracket']
params['brack'] = tuple([orig_value + t for t in self.golden_params['smartbracket']])
# running optimization
best_value, best_loss, iterations = golden(closure_with_history,
full_output=True, **params)
# restoring the value if requested
if not assign_at_end:
self.set_value(indices=indices, val=orig_value)
else:
if best_loss < orig_loss:
self.set_value(indices=indices, val=best_value)
else:
if not self.silent:
logging.warning(f"No improvement {indices} {key} {assign_at_end}")
self.set_value(indices=indices, val=orig_value)
best_value = orig_value
best_loss = orig_loss
return {
'assigned': assign_at_end,
'history': history,
'orig_value_param': orig_value,
'best_value': best_value,
'best_loss': best_loss,
'iterations': iterations
}
# Plotting functions
def get_history(result, ind, only_last=True):
if only_last:
res = [z['history'][-1] for z in result[ind]]
else:
res = [x for z in result[ind] for x in z['history']]
return res
def visualize_result_loss(result, indices_lst):
values_byloss = {'loss': []}
ind_offset = {ind: 0 for ind in set(indices_lst)}
for ind in indices_lst:
val = result[ind][ind_offset[ind]]['best_loss']
ind_offset[ind] += 1
values_byloss['loss'].append(val)
# for key in val:
# if key.startswith('loss'):
# if key not in values_byloss:
# values_byloss[key] = []
# values_byloss[key].append(val[key])
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(values_byloss.keys()), 1):
plt.subplot(1, len(values_byloss), i)
plt.title(key)
plt.plot(values_byloss[key])
# plt.axhline(online.to_subtract[key])
plt.show()
def visualize_byindex(result, indices_lst, initial_value):
for ind in set(indices_lst):
res = get_history(result, ind)
res_dct = lstdct2dctlst(res)
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(res_dct.keys()), 1):
hst = res_dct[key]
plt.subplot(1, len(res_dct), i)
plt.title(key + ' ' + str(ind))
if key in initial_value[ind]['metrics']:
initial = initial_value[ind]['metrics'][key]
hst = [initial] + hst
plt.axhline(initial)
if np.min(hst) > 0:
plt.yscale('log')
plt.plot(hst)
plt.show()
def lstdct2dctlst(lst):
"""List of dictionaries -> dictionary of lists."""
keys = lst[0].keys()
res = {key: [x[key] for x in lst] for key in keys}
return res
def compute_online_update(rating_value, mb_np_orig,
model_tensor_orig,
idx_set,
users_get_value=None,
n_repeats=1,
hotfix_update_hypers=None,
plot_charts=False,
pbars=None,
cancel_updates=False,
**kwargs):
"""Compute an online update given a pre-computed minibatch of data, and a model tensor.
Args:
rating_value: value of the rating in (-1, 1) to set
mb_np_orig: minibatch for the loss function
model_tensor_orig: current scores (users + aggregate)
idx_set: which rating ID in the minibatch to update?
users_get_value: if pbar is set, this user's comparison will be used
n_repeats: number of repetitions of the golden ratio search
hotfix_update_hypers: updates for hyperparameters
plot_charts: show visualization
cancel_updates: do a dummy run without optimization (return current values)
pbars: if not None, set progress bar to the value of comparison (0-100) for user_get_value
**kwargs: hotfix updates for the golden ratio algorithm
Returns:
if pbar is set, returns None (only sets the value of the progress bar)
otherwise, returns a dictionary with a response.
"""
# internal object IDs to update
obj1 = mb_np_orig['objects_rating_v1'][idx_set]
obj2 = mb_np_orig['objects_rating_v2'][idx_set]
# copying the minibatch and the model tensor (will be updated)
mb_np_copy = deepcopy(mb_np_orig)
model_tensor_copy = deepcopy(model_tensor_orig)
if not cancel_updates:
# SETTING THE RATING VALUE
mb_np_copy['cmp'][idx_set, 0] = rating_value
# creating the updater
online = FeaturelessOnlineUpdater()
online.hypers['aggregate_index'] = -1
# hotfix parameter updates
if hotfix_update_hypers is not None:
for key, value in hotfix_update_hypers.items():
online.hypers[key] = value
for key, value in kwargs.items():
|
# setting data
online.set_minibatch(mb_np_copy)
online.set_model_tensor(model_tensor_copy)
online.set_subtract()
online.silent = True
# CONFIGURATION FOR INDICES
indices_lst = []
for i in range(model_tensor_orig.shape[0]):
indices_lst.append((i, obj1, 0))
indices_lst.append((i, obj2, 0))
indices_lst *= n_repeats
# initial value for the loss/index
initial_value = {ind: online.get_closure_loss(ind)(online.get_value(ind)) for ind in
set(indices_lst)}
if not cancel_updates:
# RUNNING OPTIMIZATION with GOLDEN RATIO
result = online.best_value_many_indices(indices_lst, assign_at_end=True)
# plotting
if plot_charts:
visualize_result_loss(result, indices_lst)
visualize_byindex(result, indices_lst, initial_value)
else:
result = None
if pbars is not None:
if 'comparison' in pbars:
assert len(users_get_value) == len(pbars['comparison'])
for user, pbar in zip(users_get_value, pbars['comparison']):
# obtaining model scores
score1 = online.get_value((user, obj1, 0))
score2 = online.get_value((user, obj2, 0))
# computing the comparison
comparison = 1 / (1 + np.exp(score1 - score2)) * MAX_VALUE
pbar.value = comparison
if 'v1' in pbars:
assert len(users_get_value) == len(pbars['v1'])
for user, pbar in zip(users_get_value, pbars['v1']):
# obtaining model scores
score1 = online.get_value((user, obj1, 0))
pbar.value = score1
if 'v2' in pbars:
assert len(users_get_value) == len(pbars['v2'])
for user, pbar in zip(users_get_value, pbars['v2']):
# obtaining model scores
score1 = online.get_value((user, obj2, 0))
pbar.value = score1
return None
else:
return {
'new_model_tensor': model_tensor_copy,
'new_minibatch': mb_np_copy,
'online_learner': online,
'indices_lst': indices_lst,
'result': result,
}
| online.golden_params[key] = value | conditional_block |
preference_aggregation_featureless_online.py | import logging
from copy import deepcopy
import gin
import numpy as np
from backend.rating_fields import MAX_VALUE
from matplotlib import pyplot as plt
from scipy.optimize import golden
from .preference_aggregation_featureless_np import loss_fcn_np
@gin.configurable
class | (object):
"""Update weights online."""
def __init__(self, hypers=None, golden_params=None):
if golden_params is None:
golden_params = {}
self.golden_params = golden_params
self.hypers = hypers
self.model_tensor = None
self.minibatch = None
self.to_subtract = {}
self.silent = False
def current_loss(self, key='loss'):
return self.get_closure_loss((0, 0, 0), key=key)(None)
def set_subtract(self, key='loss'):
self.to_subtract = self.current_loss(key=key)['metrics']
def set_minibatch(self, mb):
for key, val in mb.items():
assert isinstance(key, str), key
assert isinstance(val, np.ndarray), val
self.minibatch = mb
def set_model_tensor(self, mt):
assert isinstance(mt, np.ndarray), mt
self.model_tensor = mt
def get_value(self, indices):
"""Get one from the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
return self.model_tensor[indices[0], indices[1], indices[2]]
def set_value(self, indices, val):
"""Set value to the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
self.model_tensor[indices[0], indices[1], indices[2]] = val
return val
def get_closure_loss(self, indices, key='loss'):
"""Return a function that computes the loss given model_tensor[indices]."""
def f(x=None, self=self, indices=indices[:], key=str(key)):
if self.minibatch is None:
raise ValueError("Please set the minibatch first")
if self.model_tensor is None:
raise ValueError("Please set the tensor")
if self.hypers is None:
raise ValueError("Please use gin to configure hyperparameters")
if x is not None:
self.set_value(indices=indices, val=x)
result = loss_fcn_np(model_tensor=self.model_tensor,
**self.hypers, **self.minibatch)
return {
'loss': result[key] - self.to_subtract.get(key, 0.0),
'metrics': {**result, 'param': x}
}
return f
def best_value_many_indices(self, indices_list, **kwargs):
"""Given a list of indices (with possible repeats), run optimization and return stats."""
indices_list = [tuple(x) for x in indices_list]
stats = {indices: [] for indices in set(indices_list)}
for indices in indices_list:
stats[indices].append(self.best_value_indices(indices=indices, **kwargs))
return stats
def best_value_indices(self, indices, key='loss', assign_at_end=False,
give_history=True):
"""Given indices, use Golden Ratio search to compute the best model_tensor[indices]."""
# remember the original value
orig_value = self.get_value(indices=indices)
# function to compute the loss
loss_closure = self.get_closure_loss(indices=indices, key=key)
orig_loss = loss_closure(orig_value)['loss']
# history of metrics
history = []
def closure_with_history(x, give_history=give_history):
"""Given a value, compute the loss, store the value (optionally) and return by key."""
result = loss_closure(x)
if give_history:
history.append(result['metrics'])
return result['loss']
params = {x: y for x, y in self.golden_params.items()}
if 'smartbracket' in self.golden_params:
del params['smartbracket']
params['brack'] = tuple([orig_value + t for t in self.golden_params['smartbracket']])
# running optimization
best_value, best_loss, iterations = golden(closure_with_history,
full_output=True, **params)
# restoring the value if requested
if not assign_at_end:
self.set_value(indices=indices, val=orig_value)
else:
if best_loss < orig_loss:
self.set_value(indices=indices, val=best_value)
else:
if not self.silent:
logging.warning(f"No improvement {indices} {key} {assign_at_end}")
self.set_value(indices=indices, val=orig_value)
best_value = orig_value
best_loss = orig_loss
return {
'assigned': assign_at_end,
'history': history,
'orig_value_param': orig_value,
'best_value': best_value,
'best_loss': best_loss,
'iterations': iterations
}
# Plotting functions
def get_history(result, ind, only_last=True):
if only_last:
res = [z['history'][-1] for z in result[ind]]
else:
res = [x for z in result[ind] for x in z['history']]
return res
def visualize_result_loss(result, indices_lst):
values_byloss = {'loss': []}
ind_offset = {ind: 0 for ind in set(indices_lst)}
for ind in indices_lst:
val = result[ind][ind_offset[ind]]['best_loss']
ind_offset[ind] += 1
values_byloss['loss'].append(val)
# for key in val:
# if key.startswith('loss'):
# if key not in values_byloss:
# values_byloss[key] = []
# values_byloss[key].append(val[key])
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(values_byloss.keys()), 1):
plt.subplot(1, len(values_byloss), i)
plt.title(key)
plt.plot(values_byloss[key])
# plt.axhline(online.to_subtract[key])
plt.show()
def visualize_byindex(result, indices_lst, initial_value):
for ind in set(indices_lst):
res = get_history(result, ind)
res_dct = lstdct2dctlst(res)
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(res_dct.keys()), 1):
hst = res_dct[key]
plt.subplot(1, len(res_dct), i)
plt.title(key + ' ' + str(ind))
if key in initial_value[ind]['metrics']:
initial = initial_value[ind]['metrics'][key]
hst = [initial] + hst
plt.axhline(initial)
if np.min(hst) > 0:
plt.yscale('log')
plt.plot(hst)
plt.show()
def lstdct2dctlst(lst):
"""List of dictionaries -> dictionary of lists."""
keys = lst[0].keys()
res = {key: [x[key] for x in lst] for key in keys}
return res
def compute_online_update(rating_value, mb_np_orig,
model_tensor_orig,
idx_set,
users_get_value=None,
n_repeats=1,
hotfix_update_hypers=None,
plot_charts=False,
pbars=None,
cancel_updates=False,
**kwargs):
"""Compute an online update given a pre-computed minibatch of data, and a model tensor.
Args:
rating_value: value of the rating in (-1, 1) to set
mb_np_orig: minibatch for the loss function
model_tensor_orig: current scores (users + aggregate)
idx_set: which rating ID in the minibatch to update?
users_get_value: if pbar is set, this user's comparison will be used
n_repeats: number of repetitions of the golden ratio search
hotfix_update_hypers: updates for hyperparameters
plot_charts: show visualization
cancel_updates: do a dummy run without optimization (return current values)
pbars: if not None, set progress bar to the value of comparison (0-100) for user_get_value
**kwargs: hotfix updates for the golden ratio algorithm
Returns:
if pbar is set, returns None (only sets the value of the progress bar)
otherwise, returns a dictionary with a response.
"""
# internal object IDs to update
obj1 = mb_np_orig['objects_rating_v1'][idx_set]
obj2 = mb_np_orig['objects_rating_v2'][idx_set]
# copying the minibatch and the model tensor (will be updated)
mb_np_copy = deepcopy(mb_np_orig)
model_tensor_copy = deepcopy(model_tensor_orig)
if not cancel_updates:
# SETTING THE RATING VALUE
mb_np_copy['cmp'][idx_set, 0] = rating_value
# creating the updater
online = FeaturelessOnlineUpdater()
online.hypers['aggregate_index'] = -1
# hotfix parameter updates
if hotfix_update_hypers is not None:
for key, value in hotfix_update_hypers.items():
online.hypers[key] = value
for key, value in kwargs.items():
online.golden_params[key] = value
# setting data
online.set_minibatch(mb_np_copy)
online.set_model_tensor(model_tensor_copy)
online.set_subtract()
online.silent = True
# CONFIGURATION FOR INDICES
indices_lst = []
for i in range(model_tensor_orig.shape[0]):
indices_lst.append((i, obj1, 0))
indices_lst.append((i, obj2, 0))
indices_lst *= n_repeats
# initial value for the loss/index
initial_value = {ind: online.get_closure_loss(ind)(online.get_value(ind)) for ind in
set(indices_lst)}
if not cancel_updates:
# RUNNING OPTIMIZATION with GOLDEN RATIO
result = online.best_value_many_indices(indices_lst, assign_at_end=True)
# plotting
if plot_charts:
visualize_result_loss(result, indices_lst)
visualize_byindex(result, indices_lst, initial_value)
else:
result = None
if pbars is not None:
if 'comparison' in pbars:
assert len(users_get_value) == len(pbars['comparison'])
for user, pbar in zip(users_get_value, pbars['comparison']):
# obtaining model scores
score1 = online.get_value((user, obj1, 0))
score2 = online.get_value((user, obj2, 0))
# computing the comparison
comparison = 1 / (1 + np.exp(score1 - score2)) * MAX_VALUE
pbar.value = comparison
if 'v1' in pbars:
assert len(users_get_value) == len(pbars['v1'])
for user, pbar in zip(users_get_value, pbars['v1']):
# obtaining model scores
score1 = online.get_value((user, obj1, 0))
pbar.value = score1
if 'v2' in pbars:
assert len(users_get_value) == len(pbars['v2'])
for user, pbar in zip(users_get_value, pbars['v2']):
# obtaining model scores
score1 = online.get_value((user, obj2, 0))
pbar.value = score1
return None
else:
return {
'new_model_tensor': model_tensor_copy,
'new_minibatch': mb_np_copy,
'online_learner': online,
'indices_lst': indices_lst,
'result': result,
}
| FeaturelessOnlineUpdater | identifier_name |
preference_aggregation_featureless_online.py | import logging
from copy import deepcopy
import gin
import numpy as np
from backend.rating_fields import MAX_VALUE
from matplotlib import pyplot as plt
from scipy.optimize import golden
from .preference_aggregation_featureless_np import loss_fcn_np
@gin.configurable
class FeaturelessOnlineUpdater(object):
"""Update weights online."""
def __init__(self, hypers=None, golden_params=None):
if golden_params is None:
golden_params = {}
self.golden_params = golden_params
self.hypers = hypers
self.model_tensor = None
self.minibatch = None
self.to_subtract = {}
self.silent = False
def current_loss(self, key='loss'):
return self.get_closure_loss((0, 0, 0), key=key)(None)
def set_subtract(self, key='loss'):
self.to_subtract = self.current_loss(key=key)['metrics']
def set_minibatch(self, mb):
for key, val in mb.items():
assert isinstance(key, str), key
assert isinstance(val, np.ndarray), val
self.minibatch = mb
def set_model_tensor(self, mt):
assert isinstance(mt, np.ndarray), mt
self.model_tensor = mt
def get_value(self, indices):
"""Get one from the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
return self.model_tensor[indices[0], indices[1], indices[2]]
def set_value(self, indices, val):
"""Set value to the tensor."""
assert len(indices) == 3, indices
if self.model_tensor is None:
raise ValueError("Please set the tensor")
self.model_tensor[indices[0], indices[1], indices[2]] = val
return val
def get_closure_loss(self, indices, key='loss'):
"""Return a function that computes the loss given model_tensor[indices]."""
def f(x=None, self=self, indices=indices[:], key=str(key)):
if self.minibatch is None:
raise ValueError("Please set the minibatch first")
if self.model_tensor is None:
raise ValueError("Please set the tensor")
if self.hypers is None:
raise ValueError("Please use gin to configure hyperparameters")
if x is not None:
self.set_value(indices=indices, val=x)
result = loss_fcn_np(model_tensor=self.model_tensor,
**self.hypers, **self.minibatch)
return {
'loss': result[key] - self.to_subtract.get(key, 0.0),
'metrics': {**result, 'param': x}
}
return f
def best_value_many_indices(self, indices_list, **kwargs):
"""Given a list of indices (with possible repeats), run optimization and return stats."""
indices_list = [tuple(x) for x in indices_list]
stats = {indices: [] for indices in set(indices_list)}
for indices in indices_list:
stats[indices].append(self.best_value_indices(indices=indices, **kwargs))
return stats
def best_value_indices(self, indices, key='loss', assign_at_end=False,
give_history=True):
"""Given indices, use Golden Ratio search to compute the best model_tensor[indices]."""
# remember the original value
orig_value = self.get_value(indices=indices)
# function to compute the loss
loss_closure = self.get_closure_loss(indices=indices, key=key)
orig_loss = loss_closure(orig_value)['loss']
# history of metrics
history = []
def closure_with_history(x, give_history=give_history):
"""Given a value, compute the loss, store the value (optionally) and return by key."""
result = loss_closure(x)
if give_history:
history.append(result['metrics'])
return result['loss']
params = {x: y for x, y in self.golden_params.items()}
if 'smartbracket' in self.golden_params:
del params['smartbracket']
params['brack'] = tuple([orig_value + t for t in self.golden_params['smartbracket']])
# running optimization
best_value, best_loss, iterations = golden(closure_with_history,
full_output=True, **params)
# restoring the value if requested
if not assign_at_end:
self.set_value(indices=indices, val=orig_value)
else:
if best_loss < orig_loss:
self.set_value(indices=indices, val=best_value)
else:
if not self.silent:
logging.warning(f"No improvement {indices} {key} {assign_at_end}")
self.set_value(indices=indices, val=orig_value)
best_value = orig_value
best_loss = orig_loss
return {
'assigned': assign_at_end,
'history': history,
'orig_value_param': orig_value,
'best_value': best_value,
'best_loss': best_loss,
'iterations': iterations
}
# Plotting functions
def get_history(result, ind, only_last=True):
if only_last:
res = [z['history'][-1] for z in result[ind]]
else:
res = [x for z in result[ind] for x in z['history']]
return res
def visualize_result_loss(result, indices_lst):
values_byloss = {'loss': []}
ind_offset = {ind: 0 for ind in set(indices_lst)}
for ind in indices_lst:
val = result[ind][ind_offset[ind]]['best_loss']
ind_offset[ind] += 1
values_byloss['loss'].append(val)
# for key in val:
# if key.startswith('loss'):
# if key not in values_byloss:
# values_byloss[key] = []
# values_byloss[key].append(val[key])
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(values_byloss.keys()), 1):
plt.subplot(1, len(values_byloss), i)
plt.title(key)
plt.plot(values_byloss[key])
# plt.axhline(online.to_subtract[key])
plt.show()
def visualize_byindex(result, indices_lst, initial_value):
|
def lstdct2dctlst(lst):
"""List of dictionaries -> dictionary of lists."""
keys = lst[0].keys()
res = {key: [x[key] for x in lst] for key in keys}
return res
def compute_online_update(rating_value, mb_np_orig,
model_tensor_orig,
idx_set,
users_get_value=None,
n_repeats=1,
hotfix_update_hypers=None,
plot_charts=False,
pbars=None,
cancel_updates=False,
**kwargs):
"""Compute an online update given a pre-computed minibatch of data, and a model tensor.
Args:
rating_value: value of the rating in (-1, 1) to set
mb_np_orig: minibatch for the loss function
model_tensor_orig: current scores (users + aggregate)
idx_set: which rating ID in the minibatch to update?
users_get_value: if pbar is set, this user's comparison will be used
n_repeats: number of repetitions of the golden ratio search
hotfix_update_hypers: updates for hyperparameters
plot_charts: show visualization
cancel_updates: do a dummy run without optimization (return current values)
pbars: if not None, set progress bar to the value of comparison (0-100) for user_get_value
**kwargs: hotfix updates for the golden ratio algorithm
Returns:
if pbar is set, returns None (only sets the value of the progress bar)
otherwise, returns a dictionary with a response.
"""
# internal object IDs to update
obj1 = mb_np_orig['objects_rating_v1'][idx_set]
obj2 = mb_np_orig['objects_rating_v2'][idx_set]
# copying the minibatch and the model tensor (will be updated)
mb_np_copy = deepcopy(mb_np_orig)
model_tensor_copy = deepcopy(model_tensor_orig)
if not cancel_updates:
# SETTING THE RATING VALUE
mb_np_copy['cmp'][idx_set, 0] = rating_value
# creating the updater
online = FeaturelessOnlineUpdater()
online.hypers['aggregate_index'] = -1
# hotfix parameter updates
if hotfix_update_hypers is not None:
for key, value in hotfix_update_hypers.items():
online.hypers[key] = value
for key, value in kwargs.items():
online.golden_params[key] = value
# setting data
online.set_minibatch(mb_np_copy)
online.set_model_tensor(model_tensor_copy)
online.set_subtract()
online.silent = True
# CONFIGURATION FOR INDICES
indices_lst = []
for i in range(model_tensor_orig.shape[0]):
indices_lst.append((i, obj1, 0))
indices_lst.append((i, obj2, 0))
indices_lst *= n_repeats
# initial value for the loss/index
initial_value = {ind: online.get_closure_loss(ind)(online.get_value(ind)) for ind in
set(indices_lst)}
if not cancel_updates:
# RUNNING OPTIMIZATION with GOLDEN RATIO
result = online.best_value_many_indices(indices_lst, assign_at_end=True)
# plotting
if plot_charts:
visualize_result_loss(result, indices_lst)
visualize_byindex(result, indices_lst, initial_value)
else:
result = None
if pbars is not None:
if 'comparison' in pbars:
assert len(users_get_value) == len(pbars['comparison'])
for user, pbar in zip(users_get_value, pbars['comparison']):
# obtaining model scores
score1 = online.get_value((user, obj1, 0))
score2 = online.get_value((user, obj2, 0))
# computing the comparison
comparison = 1 / (1 + np.exp(score1 - score2)) * MAX_VALUE
pbar.value = comparison
if 'v1' in pbars:
assert len(users_get_value) == len(pbars['v1'])
for user, pbar in zip(users_get_value, pbars['v1']):
# obtaining model scores
score1 = online.get_value((user, obj1, 0))
pbar.value = score1
if 'v2' in pbars:
assert len(users_get_value) == len(pbars['v2'])
for user, pbar in zip(users_get_value, pbars['v2']):
# obtaining model scores
score1 = online.get_value((user, obj2, 0))
pbar.value = score1
return None
else:
return {
'new_model_tensor': model_tensor_copy,
'new_minibatch': mb_np_copy,
'online_learner': online,
'indices_lst': indices_lst,
'result': result,
}
| for ind in set(indices_lst):
res = get_history(result, ind)
res_dct = lstdct2dctlst(res)
plt.figure(figsize=(13, 3))
for i, key in enumerate(sorted(res_dct.keys()), 1):
hst = res_dct[key]
plt.subplot(1, len(res_dct), i)
plt.title(key + ' ' + str(ind))
if key in initial_value[ind]['metrics']:
initial = initial_value[ind]['metrics'][key]
hst = [initial] + hst
plt.axhline(initial)
if np.min(hst) > 0:
plt.yscale('log')
plt.plot(hst)
plt.show() | identifier_body |
smd.rs | use std::fs::File;
use std::io::{BufReader};
use std::path::PathBuf;
use cgmath::{Matrix4, Deg, Vector4, SquareMatrix, Vector3, Euler, Quaternion, Rotation};
use soto::task::{task_log};
use soto::Error;
use sotolib_fbx::{RawFbx, id_name, friendly_name, ObjectTreeNode};
use sotolib_fbx::animation::{Animation};
use sotolib_fbx::simple::{Object, SimpleFbx, ObjectType, ModelProperties, Geometry};
use sotolib_smd::{Smd, SmdVertex, SmdTriangle, SmdAnimationFrameBone, SmdBone};
pub fn create_reference_smd(fbx: &PathBuf, flip_fix_list: &Vec<String>) -> Result<Smd, Error> {
// Read in the fbx we got told to convert
let file = BufReader::new(File::open(&fbx).unwrap());
let fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap();
let fbx_tree = ObjectTreeNode::from_simple(&fbx);
// Go over all FBX root nodes and turn them into SMD data
let mut smd = Smd::new();
process_fbx_node(
&fbx,
&fbx_tree, &mut smd,
&Matrix4::identity(),
None,
flip_fix_list
)?;
Ok(smd)
}
pub fn create_animation_smd(
ref_smd: &Smd, fbx: &PathBuf, flip_fix_list: &Vec<String>,
) -> Result<Smd, Error> {
// Read in the fbx we got told to convert
let file = BufReader::new(File::open(&fbx).unwrap());
let mut fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap();
// Read in the animation data itself
let animation = Animation::from_simple(&fbx).unwrap();
// Count and log frames
let frame_count = animation.frame_count(&fbx);
task_log(format!("Animation has {} frames", frame_count));
// Copy over every bone to the new animation SMD
let mut smd = Smd::new();
for bone in &ref_smd.bones {
smd.bones.push(bone.clone());
}
// Finally, turn the animation data into bone positions in the SMD
for frame in 0..frame_count {
// First transform the FBX for this frame
animation.transform_fbx_to_frame(&mut fbx, frame);
// Now go over all models
for (_, model) in fbx.objects.iter().filter(|&(_, o)| o.class.type_name() == "Model") {
// For this model, look up the matching BoneId in the reference SMD
if let Some(bone_id) = ref_smd.id_of_bone(&id_name(&model.name).unwrap()) {
// Now that we have a model and a bone, we need the current translation and rotation
// for the model
let (translation, rotation) = calculate_animation_transforms_for(&fbx, model, flip_fix_list);
// And now that we have those, finally add the bone data to the animation SMD
smd.set_animation(frame, bone_id, SmdAnimationFrameBone {
translation: translation.into(),
rotation: rotation.into(),
});
}
}
}
Ok(smd)
}
fn process_fbx_node(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, mut smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
// Perform node type specific information
match fbx_node.object.class {
ObjectType::Geometry(ref geometry) =>
process_geometry(smd, geometry, matrix, current_bone.unwrap()),
ObjectType::Model(ref _model) =>
process_model(fbx, fbx_node, smd, matrix, current_bone, flip_fix_list)?,
_ => {
// Just go straight to the children
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, matrix, current_bone, flip_fix_list)?;
}
}
}
Ok(())
}
fn process_geometry(smd: &mut Smd, geometry: &Geometry, matrix: &Matrix4<f32>, current_bone: &SmdBone) {
// Add triangles to parent node
let tris = geometry.triangles();
for tri in tris {
// Turn the vertices in this triangle to SMD vertices
let mut smd_verts: [SmdVertex; 3] = Default::default();
for (i, vert) in tri.iter().enumerate() {
// Multiply the vectors that need to be multiplied
let pos = matrix * Vector4::new(vert.0[0], vert.0[1], vert.0[2], 1.0);
let norm = matrix * Vector4::new(vert.1[0], vert.1[1], vert.1[2], 0.0);
smd_verts[i] = SmdVertex {
parent_bone: current_bone.id, // This is overwritten by links
position: pos.truncate().into(),
normal: norm.truncate().into(),
uv: vert.2,
links: vec!(
/*Not needed, we aren't using weights anyways so this done by parent_bone
SmdLink {
bone: bone_id,
weight: 1.0,
}*/
)
};
}
// Add the actual SMD triangle
smd.triangles.push(SmdTriangle {
material: "layl_test_texture".into(),
vertices: smd_verts,
});
}
}
fn process_model(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
task_log(format!("Adding model \"{}\" to SMD data", friendly_name(&fbx_node.object.name)));
let properties = ModelProperties::from_generic(&fbx_node.object.properties);
// Create a new transformation matrix
let local_matrix = local_matrices(&properties);
// Create a new bone
let new_bone = smd.new_bone(
&id_name(&fbx_node.object.name).unwrap(),
current_bone.map(|b| b.id)
)
.ok_or_else(|| Error::Task(format!(
"Bone \"{}\" exists multiple times in the FBX",
&fbx_node.object.name
)))?
.clone(); // Clone needed to avoid a borrow since we need to mut borrow SMD later
// Set the transformations on this bone
let (translation, rotation) = calculate_animation_transforms_for(&fbx, &fbx_node.object, flip_fix_list);
let first_frame = SmdAnimationFrameBone {
// This needs to be derived from the matrix to get the right location
translation: translation.into(),
// This can just be directly copied over
rotation: rotation.into(),
};
smd.set_animation(0, new_bone.id, first_frame);
// Make new matrices for children
let matrix = matrix * local_matrix;
// Make sure the child nodes will receive this new bone
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, &matrix, Some(&new_bone), flip_fix_list)?;
}
Ok(())
}
/// Returns (Translation, Rotation)
fn calculate_animation_transforms_for(
fbx: &SimpleFbx, obj: &Object, flip_fix_list: &Vec<String>,
) -> (Vector3<f32>, Vector3<f32>) {
let properties = ModelProperties::from_generic(&obj.properties);
// Get the bone's translation
let parent_after_rot_translation = calculate_parent_after_rot_translation(fbx, obj);
let prop_translation: Vector3<_> = properties.translation.into();
let prop_rot_offset: Vector3<_> = properties.rotation_offset.into();
let prop_rot_pivot: Vector3<_> = properties.rotation_pivot.into();
let translation = parent_after_rot_translation + prop_translation + prop_rot_offset + prop_rot_pivot;
// Check if this bone's in the flip fix list
// TODO: Get an actual fix instead of this dirty manual hack
let flip = flip_fix_list.iter().any(|n| n == &id_name(&obj.name).unwrap());
// We want the rotation, but we've got multiple rotations, so combine them
let pre_rotation = Quaternion::from(Euler::new(
Deg(properties.pre_rotation[0]), Deg(properties.pre_rotation[1]), Deg(properties.pre_rotation[2])
));
let rotation = Quaternion::from(Euler::new(
Deg(properties.rotation[0]), Deg(properties.rotation[1]), Deg(properties.rotation[2])
));
let post_rotation = Quaternion::from(Euler::new(
Deg(properties.post_rotation[0]), Deg(properties.post_rotation[1]), Deg(properties.post_rotation[2])
));
let total_rotation = if !flip {
Euler::from(post_rotation.invert() * rotation * pre_rotation)
} else {
Euler::from(post_rotation.invert() * rotation.invert() * pre_rotation)
};
let rotation = Vector3::new(
total_rotation.x.0,
total_rotation.y.0,
total_rotation.z.0,
);
| }
fn calculate_parent_after_rot_translation(fbx: &SimpleFbx, obj: &Object) -> Vector3<f32> {
// First actually get the parent's model data
let parent_obj = if let Some(v) = fbx.parent_of(obj.id) {
if v == 0 {
// At root, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
}
v
} else {
// No parent, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
};
let props = ModelProperties::from_generic(&fbx.objects[&parent_obj].properties);
// Now add up all the translations applied after rotation
let rotation_pivot: Vector3<_> = props.rotation_pivot.into();
let scale_offset: Vector3<_> = props.scale_offset.into();
let translation = -rotation_pivot + scale_offset;
translation
}
fn local_matrices(properties: &ModelProperties) -> Matrix4<f32> {
// Create various matrices
let rotation_offset = properties.rotation_offset.into();
let rotation_offset_mat = Matrix4::from_translation(rotation_offset);
let rotation_pivot: Vector3<_> = properties.rotation_pivot.into();
let rotation_pivot_mat = Matrix4::from_translation(rotation_pivot);
let pre_rotation = euler_rotation_to_matrix(properties.pre_rotation);
let rotation = euler_rotation_to_matrix(properties.rotation);
let post_rotation = euler_rotation_to_matrix(properties.post_rotation);
let scale_offset = properties.scale_offset.into();
let scale_offset_mat = Matrix4::from_translation(scale_offset);
let scale_pivot: Vector3<_> = properties.scale_pivot.into();
let scale_pivot_mat = Matrix4::from_translation(scale_pivot);
let scale = Matrix4::from_nonuniform_scale(
properties.scale[0],
properties.scale[1],
properties.scale[2]
);
let local_matrix_for_vertices =
Matrix4::from_translation(properties.translation.into()) *
// Rotation
rotation_offset_mat *
rotation_pivot_mat *
pre_rotation *
rotation *
post_rotation.invert().unwrap() *
rotation_pivot_mat.invert().unwrap() *
// Scale
scale_offset_mat *
scale_pivot_mat *
scale *
scale_pivot_mat.invert().unwrap();
local_matrix_for_vertices
}
fn euler_rotation_to_matrix(rot_degs: [f32; 3]) -> Matrix4<f32> {
Matrix4::from_angle_z(Deg(rot_degs[2])) *
Matrix4::from_angle_y(Deg(rot_degs[1])) *
Matrix4::from_angle_x(Deg(rot_degs[0]))
} | (translation, rotation) | random_line_split |
smd.rs | use std::fs::File;
use std::io::{BufReader};
use std::path::PathBuf;
use cgmath::{Matrix4, Deg, Vector4, SquareMatrix, Vector3, Euler, Quaternion, Rotation};
use soto::task::{task_log};
use soto::Error;
use sotolib_fbx::{RawFbx, id_name, friendly_name, ObjectTreeNode};
use sotolib_fbx::animation::{Animation};
use sotolib_fbx::simple::{Object, SimpleFbx, ObjectType, ModelProperties, Geometry};
use sotolib_smd::{Smd, SmdVertex, SmdTriangle, SmdAnimationFrameBone, SmdBone};
pub fn create_reference_smd(fbx: &PathBuf, flip_fix_list: &Vec<String>) -> Result<Smd, Error> {
// Read in the fbx we got told to convert
let file = BufReader::new(File::open(&fbx).unwrap());
let fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap();
let fbx_tree = ObjectTreeNode::from_simple(&fbx);
// Go over all FBX root nodes and turn them into SMD data
let mut smd = Smd::new();
process_fbx_node(
&fbx,
&fbx_tree, &mut smd,
&Matrix4::identity(),
None,
flip_fix_list
)?;
Ok(smd)
}
pub fn create_animation_smd(
ref_smd: &Smd, fbx: &PathBuf, flip_fix_list: &Vec<String>,
) -> Result<Smd, Error> {
// Read in the fbx we got told to convert
let file = BufReader::new(File::open(&fbx).unwrap());
let mut fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap();
// Read in the animation data itself
let animation = Animation::from_simple(&fbx).unwrap();
// Count and log frames
let frame_count = animation.frame_count(&fbx);
task_log(format!("Animation has {} frames", frame_count));
// Copy over every bone to the new animation SMD
let mut smd = Smd::new();
for bone in &ref_smd.bones {
smd.bones.push(bone.clone());
}
// Finally, turn the animation data into bone positions in the SMD
for frame in 0..frame_count {
// First transform the FBX for this frame
animation.transform_fbx_to_frame(&mut fbx, frame);
// Now go over all models
for (_, model) in fbx.objects.iter().filter(|&(_, o)| o.class.type_name() == "Model") {
// For this model, look up the matching BoneId in the reference SMD
if let Some(bone_id) = ref_smd.id_of_bone(&id_name(&model.name).unwrap()) {
// Now that we have a model and a bone, we need the current translation and rotation
// for the model
let (translation, rotation) = calculate_animation_transforms_for(&fbx, model, flip_fix_list);
// And now that we have those, finally add the bone data to the animation SMD
smd.set_animation(frame, bone_id, SmdAnimationFrameBone {
translation: translation.into(),
rotation: rotation.into(),
});
}
}
}
Ok(smd)
}
fn process_fbx_node(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, mut smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
// Perform node type specific information
match fbx_node.object.class {
ObjectType::Geometry(ref geometry) =>
process_geometry(smd, geometry, matrix, current_bone.unwrap()),
ObjectType::Model(ref _model) =>
process_model(fbx, fbx_node, smd, matrix, current_bone, flip_fix_list)?,
_ => {
// Just go straight to the children
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, matrix, current_bone, flip_fix_list)?;
}
}
}
Ok(())
}
fn process_geometry(smd: &mut Smd, geometry: &Geometry, matrix: &Matrix4<f32>, current_bone: &SmdBone) {
// Add triangles to parent node
let tris = geometry.triangles();
for tri in tris {
// Turn the vertices in this triangle to SMD vertices
let mut smd_verts: [SmdVertex; 3] = Default::default();
for (i, vert) in tri.iter().enumerate() {
// Multiply the vectors that need to be multiplied
let pos = matrix * Vector4::new(vert.0[0], vert.0[1], vert.0[2], 1.0);
let norm = matrix * Vector4::new(vert.1[0], vert.1[1], vert.1[2], 0.0);
smd_verts[i] = SmdVertex {
parent_bone: current_bone.id, // This is overwritten by links
position: pos.truncate().into(),
normal: norm.truncate().into(),
uv: vert.2,
links: vec!(
/*Not needed, we aren't using weights anyways so this done by parent_bone
SmdLink {
bone: bone_id,
weight: 1.0,
}*/
)
};
}
// Add the actual SMD triangle
smd.triangles.push(SmdTriangle {
material: "layl_test_texture".into(),
vertices: smd_verts,
});
}
}
fn process_model(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
task_log(format!("Adding model \"{}\" to SMD data", friendly_name(&fbx_node.object.name)));
let properties = ModelProperties::from_generic(&fbx_node.object.properties);
// Create a new transformation matrix
let local_matrix = local_matrices(&properties);
// Create a new bone
let new_bone = smd.new_bone(
&id_name(&fbx_node.object.name).unwrap(),
current_bone.map(|b| b.id)
)
.ok_or_else(|| Error::Task(format!(
"Bone \"{}\" exists multiple times in the FBX",
&fbx_node.object.name
)))?
.clone(); // Clone needed to avoid a borrow since we need to mut borrow SMD later
// Set the transformations on this bone
let (translation, rotation) = calculate_animation_transforms_for(&fbx, &fbx_node.object, flip_fix_list);
let first_frame = SmdAnimationFrameBone {
// This needs to be derived from the matrix to get the right location
translation: translation.into(),
// This can just be directly copied over
rotation: rotation.into(),
};
smd.set_animation(0, new_bone.id, first_frame);
// Make new matrices for children
let matrix = matrix * local_matrix;
// Make sure the child nodes will receive this new bone
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, &matrix, Some(&new_bone), flip_fix_list)?;
}
Ok(())
}
/// Returns (Translation, Rotation)
fn calculate_animation_transforms_for(
fbx: &SimpleFbx, obj: &Object, flip_fix_list: &Vec<String>,
) -> (Vector3<f32>, Vector3<f32>) {
let properties = ModelProperties::from_generic(&obj.properties);
// Get the bone's translation
let parent_after_rot_translation = calculate_parent_after_rot_translation(fbx, obj);
let prop_translation: Vector3<_> = properties.translation.into();
let prop_rot_offset: Vector3<_> = properties.rotation_offset.into();
let prop_rot_pivot: Vector3<_> = properties.rotation_pivot.into();
let translation = parent_after_rot_translation + prop_translation + prop_rot_offset + prop_rot_pivot;
// Check if this bone's in the flip fix list
// TODO: Get an actual fix instead of this dirty manual hack
let flip = flip_fix_list.iter().any(|n| n == &id_name(&obj.name).unwrap());
// We want the rotation, but we've got multiple rotations, so combine them
let pre_rotation = Quaternion::from(Euler::new(
Deg(properties.pre_rotation[0]), Deg(properties.pre_rotation[1]), Deg(properties.pre_rotation[2])
));
let rotation = Quaternion::from(Euler::new(
Deg(properties.rotation[0]), Deg(properties.rotation[1]), Deg(properties.rotation[2])
));
let post_rotation = Quaternion::from(Euler::new(
Deg(properties.post_rotation[0]), Deg(properties.post_rotation[1]), Deg(properties.post_rotation[2])
));
let total_rotation = if !flip {
Euler::from(post_rotation.invert() * rotation * pre_rotation)
} else {
Euler::from(post_rotation.invert() * rotation.invert() * pre_rotation)
};
let rotation = Vector3::new(
total_rotation.x.0,
total_rotation.y.0,
total_rotation.z.0,
);
(translation, rotation)
}
fn calculate_parent_after_rot_translation(fbx: &SimpleFbx, obj: &Object) -> Vector3<f32> {
// First actually get the parent's model data
let parent_obj = if let Some(v) = fbx.parent_of(obj.id) {
if v == 0 {
// At root, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
}
v
} else {
// No parent, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
};
let props = ModelProperties::from_generic(&fbx.objects[&parent_obj].properties);
// Now add up all the translations applied after rotation
let rotation_pivot: Vector3<_> = props.rotation_pivot.into();
let scale_offset: Vector3<_> = props.scale_offset.into();
let translation = -rotation_pivot + scale_offset;
translation
}
fn local_matrices(properties: &ModelProperties) -> Matrix4<f32> {
// Create various matrices
let rotation_offset = properties.rotation_offset.into();
let rotation_offset_mat = Matrix4::from_translation(rotation_offset);
let rotation_pivot: Vector3<_> = properties.rotation_pivot.into();
let rotation_pivot_mat = Matrix4::from_translation(rotation_pivot);
let pre_rotation = euler_rotation_to_matrix(properties.pre_rotation);
let rotation = euler_rotation_to_matrix(properties.rotation);
let post_rotation = euler_rotation_to_matrix(properties.post_rotation);
let scale_offset = properties.scale_offset.into();
let scale_offset_mat = Matrix4::from_translation(scale_offset);
let scale_pivot: Vector3<_> = properties.scale_pivot.into();
let scale_pivot_mat = Matrix4::from_translation(scale_pivot);
let scale = Matrix4::from_nonuniform_scale(
properties.scale[0],
properties.scale[1],
properties.scale[2]
);
let local_matrix_for_vertices =
Matrix4::from_translation(properties.translation.into()) *
// Rotation
rotation_offset_mat *
rotation_pivot_mat *
pre_rotation *
rotation *
post_rotation.invert().unwrap() *
rotation_pivot_mat.invert().unwrap() *
// Scale
scale_offset_mat *
scale_pivot_mat *
scale *
scale_pivot_mat.invert().unwrap();
local_matrix_for_vertices
}
fn | (rot_degs: [f32; 3]) -> Matrix4<f32> {
Matrix4::from_angle_z(Deg(rot_degs[2])) *
Matrix4::from_angle_y(Deg(rot_degs[1])) *
Matrix4::from_angle_x(Deg(rot_degs[0]))
}
| euler_rotation_to_matrix | identifier_name |
smd.rs | use std::fs::File;
use std::io::{BufReader};
use std::path::PathBuf;
use cgmath::{Matrix4, Deg, Vector4, SquareMatrix, Vector3, Euler, Quaternion, Rotation};
use soto::task::{task_log};
use soto::Error;
use sotolib_fbx::{RawFbx, id_name, friendly_name, ObjectTreeNode};
use sotolib_fbx::animation::{Animation};
use sotolib_fbx::simple::{Object, SimpleFbx, ObjectType, ModelProperties, Geometry};
use sotolib_smd::{Smd, SmdVertex, SmdTriangle, SmdAnimationFrameBone, SmdBone};
pub fn create_reference_smd(fbx: &PathBuf, flip_fix_list: &Vec<String>) -> Result<Smd, Error> {
// Read in the fbx we got told to convert
let file = BufReader::new(File::open(&fbx).unwrap());
let fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap();
let fbx_tree = ObjectTreeNode::from_simple(&fbx);
// Go over all FBX root nodes and turn them into SMD data
let mut smd = Smd::new();
process_fbx_node(
&fbx,
&fbx_tree, &mut smd,
&Matrix4::identity(),
None,
flip_fix_list
)?;
Ok(smd)
}
pub fn create_animation_smd(
ref_smd: &Smd, fbx: &PathBuf, flip_fix_list: &Vec<String>,
) -> Result<Smd, Error> {
// Read in the fbx we got told to convert
let file = BufReader::new(File::open(&fbx).unwrap());
let mut fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap();
// Read in the animation data itself
let animation = Animation::from_simple(&fbx).unwrap();
// Count and log frames
let frame_count = animation.frame_count(&fbx);
task_log(format!("Animation has {} frames", frame_count));
// Copy over every bone to the new animation SMD
let mut smd = Smd::new();
for bone in &ref_smd.bones {
smd.bones.push(bone.clone());
}
// Finally, turn the animation data into bone positions in the SMD
for frame in 0..frame_count {
// First transform the FBX for this frame
animation.transform_fbx_to_frame(&mut fbx, frame);
// Now go over all models
for (_, model) in fbx.objects.iter().filter(|&(_, o)| o.class.type_name() == "Model") {
// For this model, look up the matching BoneId in the reference SMD
if let Some(bone_id) = ref_smd.id_of_bone(&id_name(&model.name).unwrap()) {
// Now that we have a model and a bone, we need the current translation and rotation
// for the model
let (translation, rotation) = calculate_animation_transforms_for(&fbx, model, flip_fix_list);
// And now that we have those, finally add the bone data to the animation SMD
smd.set_animation(frame, bone_id, SmdAnimationFrameBone {
translation: translation.into(),
rotation: rotation.into(),
});
}
}
}
Ok(smd)
}
fn process_fbx_node(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, mut smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
// Perform node type specific information
match fbx_node.object.class {
ObjectType::Geometry(ref geometry) =>
process_geometry(smd, geometry, matrix, current_bone.unwrap()),
ObjectType::Model(ref _model) =>
process_model(fbx, fbx_node, smd, matrix, current_bone, flip_fix_list)?,
_ => {
// Just go straight to the children
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, matrix, current_bone, flip_fix_list)?;
}
}
}
Ok(())
}
fn process_geometry(smd: &mut Smd, geometry: &Geometry, matrix: &Matrix4<f32>, current_bone: &SmdBone) {
// Add triangles to parent node
let tris = geometry.triangles();
for tri in tris {
// Turn the vertices in this triangle to SMD vertices
let mut smd_verts: [SmdVertex; 3] = Default::default();
for (i, vert) in tri.iter().enumerate() {
// Multiply the vectors that need to be multiplied
let pos = matrix * Vector4::new(vert.0[0], vert.0[1], vert.0[2], 1.0);
let norm = matrix * Vector4::new(vert.1[0], vert.1[1], vert.1[2], 0.0);
smd_verts[i] = SmdVertex {
parent_bone: current_bone.id, // This is overwritten by links
position: pos.truncate().into(),
normal: norm.truncate().into(),
uv: vert.2,
links: vec!(
/*Not needed, we aren't using weights anyways so this done by parent_bone
SmdLink {
bone: bone_id,
weight: 1.0,
}*/
)
};
}
// Add the actual SMD triangle
smd.triangles.push(SmdTriangle {
material: "layl_test_texture".into(),
vertices: smd_verts,
});
}
}
fn process_model(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
task_log(format!("Adding model \"{}\" to SMD data", friendly_name(&fbx_node.object.name)));
let properties = ModelProperties::from_generic(&fbx_node.object.properties);
// Create a new transformation matrix
let local_matrix = local_matrices(&properties);
// Create a new bone
let new_bone = smd.new_bone(
&id_name(&fbx_node.object.name).unwrap(),
current_bone.map(|b| b.id)
)
.ok_or_else(|| Error::Task(format!(
"Bone \"{}\" exists multiple times in the FBX",
&fbx_node.object.name
)))?
.clone(); // Clone needed to avoid a borrow since we need to mut borrow SMD later
// Set the transformations on this bone
let (translation, rotation) = calculate_animation_transforms_for(&fbx, &fbx_node.object, flip_fix_list);
let first_frame = SmdAnimationFrameBone {
// This needs to be derived from the matrix to get the right location
translation: translation.into(),
// This can just be directly copied over
rotation: rotation.into(),
};
smd.set_animation(0, new_bone.id, first_frame);
// Make new matrices for children
let matrix = matrix * local_matrix;
// Make sure the child nodes will receive this new bone
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, &matrix, Some(&new_bone), flip_fix_list)?;
}
Ok(())
}
/// Returns (Translation, Rotation)
fn calculate_animation_transforms_for(
fbx: &SimpleFbx, obj: &Object, flip_fix_list: &Vec<String>,
) -> (Vector3<f32>, Vector3<f32>) |
fn calculate_parent_after_rot_translation(fbx: &SimpleFbx, obj: &Object) -> Vector3<f32> {
// First actually get the parent's model data
let parent_obj = if let Some(v) = fbx.parent_of(obj.id) {
if v == 0 {
// At root, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
}
v
} else {
// No parent, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
};
let props = ModelProperties::from_generic(&fbx.objects[&parent_obj].properties);
// Now add up all the translations applied after rotation
let rotation_pivot: Vector3<_> = props.rotation_pivot.into();
let scale_offset: Vector3<_> = props.scale_offset.into();
let translation = -rotation_pivot + scale_offset;
translation
}
fn local_matrices(properties: &ModelProperties) -> Matrix4<f32> {
// Create various matrices
let rotation_offset = properties.rotation_offset.into();
let rotation_offset_mat = Matrix4::from_translation(rotation_offset);
let rotation_pivot: Vector3<_> = properties.rotation_pivot.into();
let rotation_pivot_mat = Matrix4::from_translation(rotation_pivot);
let pre_rotation = euler_rotation_to_matrix(properties.pre_rotation);
let rotation = euler_rotation_to_matrix(properties.rotation);
let post_rotation = euler_rotation_to_matrix(properties.post_rotation);
let scale_offset = properties.scale_offset.into();
let scale_offset_mat = Matrix4::from_translation(scale_offset);
let scale_pivot: Vector3<_> = properties.scale_pivot.into();
let scale_pivot_mat = Matrix4::from_translation(scale_pivot);
let scale = Matrix4::from_nonuniform_scale(
properties.scale[0],
properties.scale[1],
properties.scale[2]
);
let local_matrix_for_vertices =
Matrix4::from_translation(properties.translation.into()) *
// Rotation
rotation_offset_mat *
rotation_pivot_mat *
pre_rotation *
rotation *
post_rotation.invert().unwrap() *
rotation_pivot_mat.invert().unwrap() *
// Scale
scale_offset_mat *
scale_pivot_mat *
scale *
scale_pivot_mat.invert().unwrap();
local_matrix_for_vertices
}
fn euler_rotation_to_matrix(rot_degs: [f32; 3]) -> Matrix4<f32> {
Matrix4::from_angle_z(Deg(rot_degs[2])) *
Matrix4::from_angle_y(Deg(rot_degs[1])) *
Matrix4::from_angle_x(Deg(rot_degs[0]))
}
| {
let properties = ModelProperties::from_generic(&obj.properties);
// Get the bone's translation
let parent_after_rot_translation = calculate_parent_after_rot_translation(fbx, obj);
let prop_translation: Vector3<_> = properties.translation.into();
let prop_rot_offset: Vector3<_> = properties.rotation_offset.into();
let prop_rot_pivot: Vector3<_> = properties.rotation_pivot.into();
let translation = parent_after_rot_translation + prop_translation + prop_rot_offset + prop_rot_pivot;
// Check if this bone's in the flip fix list
// TODO: Get an actual fix instead of this dirty manual hack
let flip = flip_fix_list.iter().any(|n| n == &id_name(&obj.name).unwrap());
// We want the rotation, but we've got multiple rotations, so combine them
let pre_rotation = Quaternion::from(Euler::new(
Deg(properties.pre_rotation[0]), Deg(properties.pre_rotation[1]), Deg(properties.pre_rotation[2])
));
let rotation = Quaternion::from(Euler::new(
Deg(properties.rotation[0]), Deg(properties.rotation[1]), Deg(properties.rotation[2])
));
let post_rotation = Quaternion::from(Euler::new(
Deg(properties.post_rotation[0]), Deg(properties.post_rotation[1]), Deg(properties.post_rotation[2])
));
let total_rotation = if !flip {
Euler::from(post_rotation.invert() * rotation * pre_rotation)
} else {
Euler::from(post_rotation.invert() * rotation.invert() * pre_rotation)
};
let rotation = Vector3::new(
total_rotation.x.0,
total_rotation.y.0,
total_rotation.z.0,
);
(translation, rotation)
} | identifier_body |
smd.rs | use std::fs::File;
use std::io::{BufReader};
use std::path::PathBuf;
use cgmath::{Matrix4, Deg, Vector4, SquareMatrix, Vector3, Euler, Quaternion, Rotation};
use soto::task::{task_log};
use soto::Error;
use sotolib_fbx::{RawFbx, id_name, friendly_name, ObjectTreeNode};
use sotolib_fbx::animation::{Animation};
use sotolib_fbx::simple::{Object, SimpleFbx, ObjectType, ModelProperties, Geometry};
use sotolib_smd::{Smd, SmdVertex, SmdTriangle, SmdAnimationFrameBone, SmdBone};
pub fn create_reference_smd(fbx: &PathBuf, flip_fix_list: &Vec<String>) -> Result<Smd, Error> {
// Read in the fbx we got told to convert
let file = BufReader::new(File::open(&fbx).unwrap());
let fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap();
let fbx_tree = ObjectTreeNode::from_simple(&fbx);
// Go over all FBX root nodes and turn them into SMD data
let mut smd = Smd::new();
process_fbx_node(
&fbx,
&fbx_tree, &mut smd,
&Matrix4::identity(),
None,
flip_fix_list
)?;
Ok(smd)
}
pub fn create_animation_smd(
ref_smd: &Smd, fbx: &PathBuf, flip_fix_list: &Vec<String>,
) -> Result<Smd, Error> {
// Read in the fbx we got told to convert
let file = BufReader::new(File::open(&fbx).unwrap());
let mut fbx = SimpleFbx::from_raw(&RawFbx::parse(file).unwrap()).unwrap();
// Read in the animation data itself
let animation = Animation::from_simple(&fbx).unwrap();
// Count and log frames
let frame_count = animation.frame_count(&fbx);
task_log(format!("Animation has {} frames", frame_count));
// Copy over every bone to the new animation SMD
let mut smd = Smd::new();
for bone in &ref_smd.bones {
smd.bones.push(bone.clone());
}
// Finally, turn the animation data into bone positions in the SMD
for frame in 0..frame_count {
// First transform the FBX for this frame
animation.transform_fbx_to_frame(&mut fbx, frame);
// Now go over all models
for (_, model) in fbx.objects.iter().filter(|&(_, o)| o.class.type_name() == "Model") {
// For this model, look up the matching BoneId in the reference SMD
if let Some(bone_id) = ref_smd.id_of_bone(&id_name(&model.name).unwrap()) {
// Now that we have a model and a bone, we need the current translation and rotation
// for the model
let (translation, rotation) = calculate_animation_transforms_for(&fbx, model, flip_fix_list);
// And now that we have those, finally add the bone data to the animation SMD
smd.set_animation(frame, bone_id, SmdAnimationFrameBone {
translation: translation.into(),
rotation: rotation.into(),
});
}
}
}
Ok(smd)
}
fn process_fbx_node(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, mut smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
// Perform node type specific information
match fbx_node.object.class {
ObjectType::Geometry(ref geometry) =>
process_geometry(smd, geometry, matrix, current_bone.unwrap()),
ObjectType::Model(ref _model) =>
process_model(fbx, fbx_node, smd, matrix, current_bone, flip_fix_list)?,
_ => {
// Just go straight to the children
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, matrix, current_bone, flip_fix_list)?;
}
}
}
Ok(())
}
fn process_geometry(smd: &mut Smd, geometry: &Geometry, matrix: &Matrix4<f32>, current_bone: &SmdBone) {
// Add triangles to parent node
let tris = geometry.triangles();
for tri in tris {
// Turn the vertices in this triangle to SMD vertices
let mut smd_verts: [SmdVertex; 3] = Default::default();
for (i, vert) in tri.iter().enumerate() {
// Multiply the vectors that need to be multiplied
let pos = matrix * Vector4::new(vert.0[0], vert.0[1], vert.0[2], 1.0);
let norm = matrix * Vector4::new(vert.1[0], vert.1[1], vert.1[2], 0.0);
smd_verts[i] = SmdVertex {
parent_bone: current_bone.id, // This is overwritten by links
position: pos.truncate().into(),
normal: norm.truncate().into(),
uv: vert.2,
links: vec!(
/*Not needed, we aren't using weights anyways so this done by parent_bone
SmdLink {
bone: bone_id,
weight: 1.0,
}*/
)
};
}
// Add the actual SMD triangle
smd.triangles.push(SmdTriangle {
material: "layl_test_texture".into(),
vertices: smd_verts,
});
}
}
fn process_model(
fbx: &SimpleFbx,
fbx_node: &ObjectTreeNode, smd: &mut Smd,
matrix: &Matrix4<f32>,
current_bone: Option<&SmdBone>,
flip_fix_list: &Vec<String>,
) -> Result<(), Error> {
task_log(format!("Adding model \"{}\" to SMD data", friendly_name(&fbx_node.object.name)));
let properties = ModelProperties::from_generic(&fbx_node.object.properties);
// Create a new transformation matrix
let local_matrix = local_matrices(&properties);
// Create a new bone
let new_bone = smd.new_bone(
&id_name(&fbx_node.object.name).unwrap(),
current_bone.map(|b| b.id)
)
.ok_or_else(|| Error::Task(format!(
"Bone \"{}\" exists multiple times in the FBX",
&fbx_node.object.name
)))?
.clone(); // Clone needed to avoid a borrow since we need to mut borrow SMD later
// Set the transformations on this bone
let (translation, rotation) = calculate_animation_transforms_for(&fbx, &fbx_node.object, flip_fix_list);
let first_frame = SmdAnimationFrameBone {
// This needs to be derived from the matrix to get the right location
translation: translation.into(),
// This can just be directly copied over
rotation: rotation.into(),
};
smd.set_animation(0, new_bone.id, first_frame);
// Make new matrices for children
let matrix = matrix * local_matrix;
// Make sure the child nodes will receive this new bone
for node in &fbx_node.nodes {
process_fbx_node(fbx, node, smd, &matrix, Some(&new_bone), flip_fix_list)?;
}
Ok(())
}
/// Returns (Translation, Rotation)
fn calculate_animation_transforms_for(
fbx: &SimpleFbx, obj: &Object, flip_fix_list: &Vec<String>,
) -> (Vector3<f32>, Vector3<f32>) {
let properties = ModelProperties::from_generic(&obj.properties);
// Get the bone's translation
let parent_after_rot_translation = calculate_parent_after_rot_translation(fbx, obj);
let prop_translation: Vector3<_> = properties.translation.into();
let prop_rot_offset: Vector3<_> = properties.rotation_offset.into();
let prop_rot_pivot: Vector3<_> = properties.rotation_pivot.into();
let translation = parent_after_rot_translation + prop_translation + prop_rot_offset + prop_rot_pivot;
// Check if this bone's in the flip fix list
// TODO: Get an actual fix instead of this dirty manual hack
let flip = flip_fix_list.iter().any(|n| n == &id_name(&obj.name).unwrap());
// We want the rotation, but we've got multiple rotations, so combine them
let pre_rotation = Quaternion::from(Euler::new(
Deg(properties.pre_rotation[0]), Deg(properties.pre_rotation[1]), Deg(properties.pre_rotation[2])
));
let rotation = Quaternion::from(Euler::new(
Deg(properties.rotation[0]), Deg(properties.rotation[1]), Deg(properties.rotation[2])
));
let post_rotation = Quaternion::from(Euler::new(
Deg(properties.post_rotation[0]), Deg(properties.post_rotation[1]), Deg(properties.post_rotation[2])
));
let total_rotation = if !flip | else {
Euler::from(post_rotation.invert() * rotation.invert() * pre_rotation)
};
let rotation = Vector3::new(
total_rotation.x.0,
total_rotation.y.0,
total_rotation.z.0,
);
(translation, rotation)
}
fn calculate_parent_after_rot_translation(fbx: &SimpleFbx, obj: &Object) -> Vector3<f32> {
// First actually get the parent's model data
let parent_obj = if let Some(v) = fbx.parent_of(obj.id) {
if v == 0 {
// At root, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
}
v
} else {
// No parent, no extra translation
return Vector3::new(0.0, 0.0, 0.0)
};
let props = ModelProperties::from_generic(&fbx.objects[&parent_obj].properties);
// Now add up all the translations applied after rotation
let rotation_pivot: Vector3<_> = props.rotation_pivot.into();
let scale_offset: Vector3<_> = props.scale_offset.into();
let translation = -rotation_pivot + scale_offset;
translation
}
fn local_matrices(properties: &ModelProperties) -> Matrix4<f32> {
// Create various matrices
let rotation_offset = properties.rotation_offset.into();
let rotation_offset_mat = Matrix4::from_translation(rotation_offset);
let rotation_pivot: Vector3<_> = properties.rotation_pivot.into();
let rotation_pivot_mat = Matrix4::from_translation(rotation_pivot);
let pre_rotation = euler_rotation_to_matrix(properties.pre_rotation);
let rotation = euler_rotation_to_matrix(properties.rotation);
let post_rotation = euler_rotation_to_matrix(properties.post_rotation);
let scale_offset = properties.scale_offset.into();
let scale_offset_mat = Matrix4::from_translation(scale_offset);
let scale_pivot: Vector3<_> = properties.scale_pivot.into();
let scale_pivot_mat = Matrix4::from_translation(scale_pivot);
let scale = Matrix4::from_nonuniform_scale(
properties.scale[0],
properties.scale[1],
properties.scale[2]
);
let local_matrix_for_vertices =
Matrix4::from_translation(properties.translation.into()) *
// Rotation
rotation_offset_mat *
rotation_pivot_mat *
pre_rotation *
rotation *
post_rotation.invert().unwrap() *
rotation_pivot_mat.invert().unwrap() *
// Scale
scale_offset_mat *
scale_pivot_mat *
scale *
scale_pivot_mat.invert().unwrap();
local_matrix_for_vertices
}
fn euler_rotation_to_matrix(rot_degs: [f32; 3]) -> Matrix4<f32> {
Matrix4::from_angle_z(Deg(rot_degs[2])) *
Matrix4::from_angle_y(Deg(rot_degs[1])) *
Matrix4::from_angle_x(Deg(rot_degs[0]))
}
| {
Euler::from(post_rotation.invert() * rotation * pre_rotation)
} | conditional_block |
tf_linear_reg.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 23 11:54:38 2018
@author: s.agrawalairan
"""
import tensorflow as tf
import numpy as np
import os
import pickle as pk
import csv
import pandas as pd
import logging
import pprint
import pdb
flags = tf.app.flags
FLAGS = flags.FLAGS
logging.basicConfig(level=logging.INFO)
pprint.PrettyPrinter().pprint(FLAGS.__flags)
# Select optimizer
def get_optimizer(optimizer, learning_rate):
if optimizer == "sgd":
return tf.train.GradientDescentOptimizer(learning_rate)
elif optimizer == "adadelta":
return tf.train.AdadeltaOptimizer(learning_rate)
elif optimizer == "adagrad":
return tf.train.AdagradOptimizer(learning_rate)
elif optimizer == "adam":
return tf.train.AdamOptimizer(learning_rate)
elif optimizer == "ftrl":
return tf.train.FtrlOptimizer(learning_rate)
elif optimizer == "rmsprop":
return tf.train.RMSPropOptimizer(learning_rate)
else:
print("Unknow optimizer, exit now")
exit(1)
def linear_reg_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("linear_reg"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Regularization Term
reg_loss = tf.reduce_sum(tf.abs(W_ls[0]))
# Create the neural net graph
# y_est = tf.reduce_sum(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum")) + Bias_ls[0]
y_est = tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0]
return [y_est,reg_loss]
def dnn_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("dnn_model"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE, hidden_nodes[0]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [hidden_nodes[0],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
for i in range(num_layers-1):
W_ls.append(tf.get_variable(
"weights_"+str(i+1), [hidden_nodes[i], hidden_nodes[i+1]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(i+1), [hidden_nodes[i+1],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
W_ls.append(tf.get_variable(
"weights_"+str(num_layers), [hidden_nodes[num_layers-1], LABEL_SIZE], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(num_layers), [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Create the neural net graph
y_est = tf.nn.relu(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0])
for i in range(num_layers):
y_est = tf.nn.relu(tf.matmul(y_est, W_ls[i+1])+Bias_ls[i+1])
return y_est
#def sparse_full_connect(sparse_ids,sparse_values,weights_shape,biases_shape,is_train=True):
# weights = tf.get_variable(
# "weights", weights_shape, initializer=tf.initializers.zeros())
# biases = tf.get_variable(
# "biases", biases_shape, initializer=tf.initializers.zeros())
# return tf.nn.embedding_lookup_sparse(
# weights, sparse_ids, sparse_values, combiner="sum") + biases
## TO BE CHANGED
#def logistic_regression_inference(sparse_ids, sparse_values, is_train=True):
# with tf.variable_scope("logistic_regression"):
# layer = sparse_full_connect(sparse_ids, sparse_values,[FEATURE_SIZE, LABEL_SIZE], [LABEL_SIZE])
# return layer
#
#
## TO BE CHANGED
#def inference(sparse_ids, sparse_values, is_train=True):
# return logistic_regression_inference(sparse_ids, sparse_values, is_train)
def | (filenames, batch_size):
# Define a `tf.contrib.data.Dataset` for iterating over one epoch of the data.
dataset = (tf.data.TFRecordDataset(filenames).
shuffle(buffer_size=MIN_AFTER_DEQUEUE).
batch(batch_size))
return dataset.make_initializable_iterator()
def get_features(tfrecords_file,batch_size):
iterator = input_pipeline(tfrecords_file, batch_size)
features_obj = iterator.get_next()
features = tf.parse_example(
features_obj,
# Defaults are not specified since both keys are required.
features={
"label": tf.FixedLenFeature([], tf.float32),
"ids": tf.VarLenFeature(tf.int64),
"values": tf.VarLenFeature(tf.float32),
})
return iterator, features
def main():
with tf.Session() as sess:
sess.run(init_op)
for _ in range(EPOCH_NUMBER):
# Resets the iterator at the beginning of an epoch.
sess.run(train_iterator.initializer)
sess.run(validate_iterator.initializer)
try:
while True:
_, step = sess.run([train_op, global_step])
# Print state while training
if step % STEPS_TO_VALIDATE == 0:
# logging.info("batch_ids are {0},batch_values are {0},batch_labels are {0}".format(sess.run([batch_ids,batch_values,batch_labels])))
# logging.info("valid_batch_ids are {0},valid_batch_labels are {0}".format(sess.run([validate_batch_values,validate_batch_labels])))
train_loss_val, vald_loss_val,wts_val,bias_val,model_output = sess.run([train_loss, vald_loss, wts,biases,model_output_tr])
print "Just above logging info line"
logging.info("Step: {}, train_loss: {}, vald_loss: {} wts_val: {} bias_val: {} y_tr:{}".format(step, train_loss_val,vald_loss_val,wts_val,bias_val,model_output))
except tf.errors.OutOfRangeError:
pass
print('Done training, epoch reached')
if __name__ == '__main__':
loaddir = '/Users/s.agrawalairan/OneDrive - CRITEO/InternProject/Data/TfSynData/'
MIN_AFTER_DEQUEUE = 20 #00000 #100
BATCH_SIZE = 100
VALIDATE_BATCH_SIZE = 100 # Size of
EPOCH_NUMBER = 500
OPTIMIZER = "sgd"
learning_rate = 0.01
STEPS_TO_VALIDATE = 5
REG_CONSTANT = 0
MIN_Y_VAL = 0.01 # For lognormal loss
FEATURE_SIZE = 11
LABEL_SIZE = 1
NUM_LAYERS = 3 # number of hidden layers in dnn (no. of weight matrices and biases vectors will be NUM_LAYERS + 1,)
HIDDEN_NODES = [FEATURE_SIZE]*NUM_LAYERS
# Get all FileNames
tfr_tr_filenames = [loaddir+"SynDataset1.tfrecords",loaddir+"SynDataset3.tfrecords"]
tfr_vald_filenames = [loaddir+"SynDataset2.tfrecords",loaddir+"SynDataset4.tfrecords"]
tf.reset_default_graph()
# Get a batch of y and X in tr_features
train_iterator, train_features = get_features(tfr_tr_filenames, BATCH_SIZE)
batch_labels = train_features["label"]
batch_ids = train_features["ids"]
batch_values = train_features["values"]
# Get a batch of y and X in vald_features
validate_iterator,validate_features = get_features(tfr_vald_filenames,VALIDATE_BATCH_SIZE)
validate_batch_labels = validate_features["label"]
validate_batch_ids = validate_features["ids"]
validate_batch_values = validate_features["values"]
# Model stored in inference function
# model_output_tr = dnn_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
[model_output_tr,reg_loss_tr] = linear_reg_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
# Define Loss function
# loss = tf.reduce_sum(tf.nn.log_poisson_loss(batch_labels,model_output,name='Poisson_loss')) # Poisson Loss
# loss = tf.reduce_mean(tf.square(tf.log(model_output_tr) - tf.log(tf.add(batch_labels,MIN_Y_VAL)))) # Log Normal Loss
loss = tf.reduce_mean(tf.square(model_output_tr - batch_labels)) + REG_CONSTANT*reg_loss_tr # Regularized MSE
# Setting up optimizer, global_step is used to keep track of number of batches seen far
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = get_optimizer(OPTIMIZER, learning_rate)
# Define train_operation
train_op = optimizer.minimize(loss, global_step=global_step)
# Variables of model being resused in order to compute training and validation error
tf.get_variable_scope().reuse_variables()
# Add more evaluation metrics for training data (TO BE CHANGED...)
# model_output_tr = dnn_inference(batch_ids, batch_values, HIDDEN_NODES, NUM_LAYERS)
[model_output_tr,_] = linear_reg_inference(batch_ids, batch_values, HIDDEN_NODES, NUM_LAYERS)
train_loss = tf.reduce_mean(tf.square(model_output_tr - tf.add(batch_labels,MIN_Y_VAL))) # MSE
wts = tf.get_variable("linear_reg/weights_0", [FEATURE_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64))
biases = tf.get_variable("linear_reg/biases_0", [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64))
# Add more evaluation metrixs for testing data (TO BE CHANGED...)
# model_output_vald = dnn_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
[model_output_vald,_] = linear_reg_inference(validate_batch_ids, validate_batch_values, HIDDEN_NODES, NUM_LAYERS)
vald_loss = tf.reduce_mean(tf.square(model_output_vald - tf.add(validate_batch_labels,MIN_Y_VAL))) #MSE
# Define init_op
init_op = [tf.local_variables_initializer(), tf.global_variables_initializer()]
main()
| input_pipeline | identifier_name |
tf_linear_reg.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 23 11:54:38 2018
@author: s.agrawalairan
"""
import tensorflow as tf
import numpy as np
import os
import pickle as pk
import csv
import pandas as pd
import logging
import pprint
import pdb
flags = tf.app.flags
FLAGS = flags.FLAGS
logging.basicConfig(level=logging.INFO)
pprint.PrettyPrinter().pprint(FLAGS.__flags)
# Select optimizer
def get_optimizer(optimizer, learning_rate):
if optimizer == "sgd":
return tf.train.GradientDescentOptimizer(learning_rate)
elif optimizer == "adadelta":
return tf.train.AdadeltaOptimizer(learning_rate)
elif optimizer == "adagrad":
return tf.train.AdagradOptimizer(learning_rate)
elif optimizer == "adam":
return tf.train.AdamOptimizer(learning_rate)
elif optimizer == "ftrl":
return tf.train.FtrlOptimizer(learning_rate)
elif optimizer == "rmsprop":
return tf.train.RMSPropOptimizer(learning_rate)
else:
print("Unknow optimizer, exit now")
exit(1)
def linear_reg_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("linear_reg"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Regularization Term
reg_loss = tf.reduce_sum(tf.abs(W_ls[0]))
# Create the neural net graph
# y_est = tf.reduce_sum(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum")) + Bias_ls[0]
y_est = tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0]
return [y_est,reg_loss]
def dnn_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("dnn_model"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE, hidden_nodes[0]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [hidden_nodes[0],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
for i in range(num_layers-1):
W_ls.append(tf.get_variable(
"weights_"+str(i+1), [hidden_nodes[i], hidden_nodes[i+1]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(i+1), [hidden_nodes[i+1],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
W_ls.append(tf.get_variable(
"weights_"+str(num_layers), [hidden_nodes[num_layers-1], LABEL_SIZE], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(num_layers), [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Create the neural net graph
y_est = tf.nn.relu(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0])
for i in range(num_layers):
y_est = tf.nn.relu(tf.matmul(y_est, W_ls[i+1])+Bias_ls[i+1])
return y_est
#def sparse_full_connect(sparse_ids,sparse_values,weights_shape,biases_shape,is_train=True):
# weights = tf.get_variable(
# "weights", weights_shape, initializer=tf.initializers.zeros())
# biases = tf.get_variable(
# "biases", biases_shape, initializer=tf.initializers.zeros())
# return tf.nn.embedding_lookup_sparse(
# weights, sparse_ids, sparse_values, combiner="sum") + biases
## TO BE CHANGED
#def logistic_regression_inference(sparse_ids, sparse_values, is_train=True):
# with tf.variable_scope("logistic_regression"):
# layer = sparse_full_connect(sparse_ids, sparse_values,[FEATURE_SIZE, LABEL_SIZE], [LABEL_SIZE])
# return layer
#
#
## TO BE CHANGED
#def inference(sparse_ids, sparse_values, is_train=True):
# return logistic_regression_inference(sparse_ids, sparse_values, is_train)
def input_pipeline(filenames, batch_size):
# Define a `tf.contrib.data.Dataset` for iterating over one epoch of the data.
dataset = (tf.data.TFRecordDataset(filenames).
shuffle(buffer_size=MIN_AFTER_DEQUEUE).
batch(batch_size))
return dataset.make_initializable_iterator()
def get_features(tfrecords_file,batch_size):
iterator = input_pipeline(tfrecords_file, batch_size)
features_obj = iterator.get_next()
features = tf.parse_example(
features_obj,
# Defaults are not specified since both keys are required.
features={
"label": tf.FixedLenFeature([], tf.float32),
"ids": tf.VarLenFeature(tf.int64),
"values": tf.VarLenFeature(tf.float32),
})
return iterator, features
def main():
with tf.Session() as sess:
sess.run(init_op)
for _ in range(EPOCH_NUMBER):
# Resets the iterator at the beginning of an epoch.
sess.run(train_iterator.initializer)
sess.run(validate_iterator.initializer)
try:
while True:
_, step = sess.run([train_op, global_step])
# Print state while training
if step % STEPS_TO_VALIDATE == 0:
# logging.info("batch_ids are {0},batch_values are {0},batch_labels are {0}".format(sess.run([batch_ids,batch_values,batch_labels])))
# logging.info("valid_batch_ids are {0},valid_batch_labels are {0}".format(sess.run([validate_batch_values,validate_batch_labels])))
train_loss_val, vald_loss_val,wts_val,bias_val,model_output = sess.run([train_loss, vald_loss, wts,biases,model_output_tr])
print "Just above logging info line"
logging.info("Step: {}, train_loss: {}, vald_loss: {} wts_val: {} bias_val: {} y_tr:{}".format(step, train_loss_val,vald_loss_val,wts_val,bias_val,model_output))
except tf.errors.OutOfRangeError:
pass
print('Done training, epoch reached')
if __name__ == '__main__':
loaddir = '/Users/s.agrawalairan/OneDrive - CRITEO/InternProject/Data/TfSynData/'
MIN_AFTER_DEQUEUE = 20 #00000 #100
BATCH_SIZE = 100
VALIDATE_BATCH_SIZE = 100 # Size of
EPOCH_NUMBER = 500
OPTIMIZER = "sgd"
learning_rate = 0.01
STEPS_TO_VALIDATE = 5
REG_CONSTANT = 0
MIN_Y_VAL = 0.01 # For lognormal loss
FEATURE_SIZE = 11
LABEL_SIZE = 1
NUM_LAYERS = 3 # number of hidden layers in dnn (no. of weight matrices and biases vectors will be NUM_LAYERS + 1,)
HIDDEN_NODES = [FEATURE_SIZE]*NUM_LAYERS | tfr_vald_filenames = [loaddir+"SynDataset2.tfrecords",loaddir+"SynDataset4.tfrecords"]
tf.reset_default_graph()
# Get a batch of y and X in tr_features
train_iterator, train_features = get_features(tfr_tr_filenames, BATCH_SIZE)
batch_labels = train_features["label"]
batch_ids = train_features["ids"]
batch_values = train_features["values"]
# Get a batch of y and X in vald_features
validate_iterator,validate_features = get_features(tfr_vald_filenames,VALIDATE_BATCH_SIZE)
validate_batch_labels = validate_features["label"]
validate_batch_ids = validate_features["ids"]
validate_batch_values = validate_features["values"]
# Model stored in inference function
# model_output_tr = dnn_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
[model_output_tr,reg_loss_tr] = linear_reg_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
# Define Loss function
# loss = tf.reduce_sum(tf.nn.log_poisson_loss(batch_labels,model_output,name='Poisson_loss')) # Poisson Loss
# loss = tf.reduce_mean(tf.square(tf.log(model_output_tr) - tf.log(tf.add(batch_labels,MIN_Y_VAL)))) # Log Normal Loss
loss = tf.reduce_mean(tf.square(model_output_tr - batch_labels)) + REG_CONSTANT*reg_loss_tr # Regularized MSE
# Setting up optimizer, global_step is used to keep track of number of batches seen far
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = get_optimizer(OPTIMIZER, learning_rate)
# Define train_operation
train_op = optimizer.minimize(loss, global_step=global_step)
# Variables of model being resused in order to compute training and validation error
tf.get_variable_scope().reuse_variables()
# Add more evaluation metrics for training data (TO BE CHANGED...)
# model_output_tr = dnn_inference(batch_ids, batch_values, HIDDEN_NODES, NUM_LAYERS)
[model_output_tr,_] = linear_reg_inference(batch_ids, batch_values, HIDDEN_NODES, NUM_LAYERS)
train_loss = tf.reduce_mean(tf.square(model_output_tr - tf.add(batch_labels,MIN_Y_VAL))) # MSE
wts = tf.get_variable("linear_reg/weights_0", [FEATURE_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64))
biases = tf.get_variable("linear_reg/biases_0", [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64))
# Add more evaluation metrixs for testing data (TO BE CHANGED...)
# model_output_vald = dnn_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
[model_output_vald,_] = linear_reg_inference(validate_batch_ids, validate_batch_values, HIDDEN_NODES, NUM_LAYERS)
vald_loss = tf.reduce_mean(tf.square(model_output_vald - tf.add(validate_batch_labels,MIN_Y_VAL))) #MSE
# Define init_op
init_op = [tf.local_variables_initializer(), tf.global_variables_initializer()]
main() | # Get all FileNames
tfr_tr_filenames = [loaddir+"SynDataset1.tfrecords",loaddir+"SynDataset3.tfrecords"] | random_line_split |
tf_linear_reg.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 23 11:54:38 2018
@author: s.agrawalairan
"""
import tensorflow as tf
import numpy as np
import os
import pickle as pk
import csv
import pandas as pd
import logging
import pprint
import pdb
flags = tf.app.flags
FLAGS = flags.FLAGS
logging.basicConfig(level=logging.INFO)
pprint.PrettyPrinter().pprint(FLAGS.__flags)
# Select optimizer
def get_optimizer(optimizer, learning_rate):
if optimizer == "sgd":
return tf.train.GradientDescentOptimizer(learning_rate)
elif optimizer == "adadelta":
return tf.train.AdadeltaOptimizer(learning_rate)
elif optimizer == "adagrad":
return tf.train.AdagradOptimizer(learning_rate)
elif optimizer == "adam":
return tf.train.AdamOptimizer(learning_rate)
elif optimizer == "ftrl":
return tf.train.FtrlOptimizer(learning_rate)
elif optimizer == "rmsprop":
return tf.train.RMSPropOptimizer(learning_rate)
else:
print("Unknow optimizer, exit now")
exit(1)
def linear_reg_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("linear_reg"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Regularization Term
reg_loss = tf.reduce_sum(tf.abs(W_ls[0]))
# Create the neural net graph
# y_est = tf.reduce_sum(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum")) + Bias_ls[0]
y_est = tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0]
return [y_est,reg_loss]
def dnn_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("dnn_model"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE, hidden_nodes[0]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [hidden_nodes[0],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
for i in range(num_layers-1):
W_ls.append(tf.get_variable(
"weights_"+str(i+1), [hidden_nodes[i], hidden_nodes[i+1]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(i+1), [hidden_nodes[i+1],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
W_ls.append(tf.get_variable(
"weights_"+str(num_layers), [hidden_nodes[num_layers-1], LABEL_SIZE], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(num_layers), [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Create the neural net graph
y_est = tf.nn.relu(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0])
for i in range(num_layers):
y_est = tf.nn.relu(tf.matmul(y_est, W_ls[i+1])+Bias_ls[i+1])
return y_est
#def sparse_full_connect(sparse_ids,sparse_values,weights_shape,biases_shape,is_train=True):
# weights = tf.get_variable(
# "weights", weights_shape, initializer=tf.initializers.zeros())
# biases = tf.get_variable(
# "biases", biases_shape, initializer=tf.initializers.zeros())
# return tf.nn.embedding_lookup_sparse(
# weights, sparse_ids, sparse_values, combiner="sum") + biases
## TO BE CHANGED
#def logistic_regression_inference(sparse_ids, sparse_values, is_train=True):
# with tf.variable_scope("logistic_regression"):
# layer = sparse_full_connect(sparse_ids, sparse_values,[FEATURE_SIZE, LABEL_SIZE], [LABEL_SIZE])
# return layer
#
#
## TO BE CHANGED
#def inference(sparse_ids, sparse_values, is_train=True):
# return logistic_regression_inference(sparse_ids, sparse_values, is_train)
def input_pipeline(filenames, batch_size):
# Define a `tf.contrib.data.Dataset` for iterating over one epoch of the data.
|
def get_features(tfrecords_file,batch_size):
iterator = input_pipeline(tfrecords_file, batch_size)
features_obj = iterator.get_next()
features = tf.parse_example(
features_obj,
# Defaults are not specified since both keys are required.
features={
"label": tf.FixedLenFeature([], tf.float32),
"ids": tf.VarLenFeature(tf.int64),
"values": tf.VarLenFeature(tf.float32),
})
return iterator, features
def main():
with tf.Session() as sess:
sess.run(init_op)
for _ in range(EPOCH_NUMBER):
# Resets the iterator at the beginning of an epoch.
sess.run(train_iterator.initializer)
sess.run(validate_iterator.initializer)
try:
while True:
_, step = sess.run([train_op, global_step])
# Print state while training
if step % STEPS_TO_VALIDATE == 0:
# logging.info("batch_ids are {0},batch_values are {0},batch_labels are {0}".format(sess.run([batch_ids,batch_values,batch_labels])))
# logging.info("valid_batch_ids are {0},valid_batch_labels are {0}".format(sess.run([validate_batch_values,validate_batch_labels])))
train_loss_val, vald_loss_val,wts_val,bias_val,model_output = sess.run([train_loss, vald_loss, wts,biases,model_output_tr])
print "Just above logging info line"
logging.info("Step: {}, train_loss: {}, vald_loss: {} wts_val: {} bias_val: {} y_tr:{}".format(step, train_loss_val,vald_loss_val,wts_val,bias_val,model_output))
except tf.errors.OutOfRangeError:
pass
print('Done training, epoch reached')
if __name__ == '__main__':
loaddir = '/Users/s.agrawalairan/OneDrive - CRITEO/InternProject/Data/TfSynData/'
MIN_AFTER_DEQUEUE = 20 #00000 #100
BATCH_SIZE = 100
VALIDATE_BATCH_SIZE = 100 # Size of
EPOCH_NUMBER = 500
OPTIMIZER = "sgd"
learning_rate = 0.01
STEPS_TO_VALIDATE = 5
REG_CONSTANT = 0
MIN_Y_VAL = 0.01 # For lognormal loss
FEATURE_SIZE = 11
LABEL_SIZE = 1
NUM_LAYERS = 3 # number of hidden layers in dnn (no. of weight matrices and biases vectors will be NUM_LAYERS + 1,)
HIDDEN_NODES = [FEATURE_SIZE]*NUM_LAYERS
# Get all FileNames
tfr_tr_filenames = [loaddir+"SynDataset1.tfrecords",loaddir+"SynDataset3.tfrecords"]
tfr_vald_filenames = [loaddir+"SynDataset2.tfrecords",loaddir+"SynDataset4.tfrecords"]
tf.reset_default_graph()
# Get a batch of y and X in tr_features
train_iterator, train_features = get_features(tfr_tr_filenames, BATCH_SIZE)
batch_labels = train_features["label"]
batch_ids = train_features["ids"]
batch_values = train_features["values"]
# Get a batch of y and X in vald_features
validate_iterator,validate_features = get_features(tfr_vald_filenames,VALIDATE_BATCH_SIZE)
validate_batch_labels = validate_features["label"]
validate_batch_ids = validate_features["ids"]
validate_batch_values = validate_features["values"]
# Model stored in inference function
# model_output_tr = dnn_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
[model_output_tr,reg_loss_tr] = linear_reg_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
# Define Loss function
# loss = tf.reduce_sum(tf.nn.log_poisson_loss(batch_labels,model_output,name='Poisson_loss')) # Poisson Loss
# loss = tf.reduce_mean(tf.square(tf.log(model_output_tr) - tf.log(tf.add(batch_labels,MIN_Y_VAL)))) # Log Normal Loss
loss = tf.reduce_mean(tf.square(model_output_tr - batch_labels)) + REG_CONSTANT*reg_loss_tr # Regularized MSE
# Setting up optimizer, global_step is used to keep track of number of batches seen far
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = get_optimizer(OPTIMIZER, learning_rate)
# Define train_operation
train_op = optimizer.minimize(loss, global_step=global_step)
# Variables of model being resused in order to compute training and validation error
tf.get_variable_scope().reuse_variables()
# Add more evaluation metrics for training data (TO BE CHANGED...)
# model_output_tr = dnn_inference(batch_ids, batch_values, HIDDEN_NODES, NUM_LAYERS)
[model_output_tr,_] = linear_reg_inference(batch_ids, batch_values, HIDDEN_NODES, NUM_LAYERS)
train_loss = tf.reduce_mean(tf.square(model_output_tr - tf.add(batch_labels,MIN_Y_VAL))) # MSE
wts = tf.get_variable("linear_reg/weights_0", [FEATURE_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64))
biases = tf.get_variable("linear_reg/biases_0", [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64))
# Add more evaluation metrixs for testing data (TO BE CHANGED...)
# model_output_vald = dnn_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
[model_output_vald,_] = linear_reg_inference(validate_batch_ids, validate_batch_values, HIDDEN_NODES, NUM_LAYERS)
vald_loss = tf.reduce_mean(tf.square(model_output_vald - tf.add(validate_batch_labels,MIN_Y_VAL))) #MSE
# Define init_op
init_op = [tf.local_variables_initializer(), tf.global_variables_initializer()]
main()
| dataset = (tf.data.TFRecordDataset(filenames).
shuffle(buffer_size=MIN_AFTER_DEQUEUE).
batch(batch_size))
return dataset.make_initializable_iterator() | identifier_body |
tf_linear_reg.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 23 11:54:38 2018
@author: s.agrawalairan
"""
import tensorflow as tf
import numpy as np
import os
import pickle as pk
import csv
import pandas as pd
import logging
import pprint
import pdb
flags = tf.app.flags
FLAGS = flags.FLAGS
logging.basicConfig(level=logging.INFO)
pprint.PrettyPrinter().pprint(FLAGS.__flags)
# Select optimizer
def get_optimizer(optimizer, learning_rate):
if optimizer == "sgd":
return tf.train.GradientDescentOptimizer(learning_rate)
elif optimizer == "adadelta":
return tf.train.AdadeltaOptimizer(learning_rate)
elif optimizer == "adagrad":
return tf.train.AdagradOptimizer(learning_rate)
elif optimizer == "adam":
return tf.train.AdamOptimizer(learning_rate)
elif optimizer == "ftrl":
return tf.train.FtrlOptimizer(learning_rate)
elif optimizer == "rmsprop":
return tf.train.RMSPropOptimizer(learning_rate)
else:
|
def linear_reg_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("linear_reg"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Regularization Term
reg_loss = tf.reduce_sum(tf.abs(W_ls[0]))
# Create the neural net graph
# y_est = tf.reduce_sum(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum")) + Bias_ls[0]
y_est = tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0]
return [y_est,reg_loss]
def dnn_inference(sparse_ids,sparse_values,hidden_nodes,num_layers):
# train_sz = np.shape(Xtrain)[0]
W_ls = []
Bias_ls = []
# Reset the graph
# tf.reset_default_graph()
with tf.variable_scope("dnn_model"):
W_ls.append(tf.get_variable(
"weights_0", [FEATURE_SIZE, hidden_nodes[0]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_0", [hidden_nodes[0],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
for i in range(num_layers-1):
W_ls.append(tf.get_variable(
"weights_"+str(i+1), [hidden_nodes[i], hidden_nodes[i+1]], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(i+1), [hidden_nodes[i+1],], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
W_ls.append(tf.get_variable(
"weights_"+str(num_layers), [hidden_nodes[num_layers-1], LABEL_SIZE], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
Bias_ls.append(tf.get_variable(
"biases_"+str(num_layers), [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64)))
# Create the neural net graph
y_est = tf.nn.relu(tf.nn.embedding_lookup_sparse(W_ls[0], sparse_ids, sparse_values, combiner="sum") + Bias_ls[0])
for i in range(num_layers):
y_est = tf.nn.relu(tf.matmul(y_est, W_ls[i+1])+Bias_ls[i+1])
return y_est
#def sparse_full_connect(sparse_ids,sparse_values,weights_shape,biases_shape,is_train=True):
# weights = tf.get_variable(
# "weights", weights_shape, initializer=tf.initializers.zeros())
# biases = tf.get_variable(
# "biases", biases_shape, initializer=tf.initializers.zeros())
# return tf.nn.embedding_lookup_sparse(
# weights, sparse_ids, sparse_values, combiner="sum") + biases
## TO BE CHANGED
#def logistic_regression_inference(sparse_ids, sparse_values, is_train=True):
# with tf.variable_scope("logistic_regression"):
# layer = sparse_full_connect(sparse_ids, sparse_values,[FEATURE_SIZE, LABEL_SIZE], [LABEL_SIZE])
# return layer
#
#
## TO BE CHANGED
#def inference(sparse_ids, sparse_values, is_train=True):
# return logistic_regression_inference(sparse_ids, sparse_values, is_train)
def input_pipeline(filenames, batch_size):
# Define a `tf.contrib.data.Dataset` for iterating over one epoch of the data.
dataset = (tf.data.TFRecordDataset(filenames).
shuffle(buffer_size=MIN_AFTER_DEQUEUE).
batch(batch_size))
return dataset.make_initializable_iterator()
def get_features(tfrecords_file,batch_size):
iterator = input_pipeline(tfrecords_file, batch_size)
features_obj = iterator.get_next()
features = tf.parse_example(
features_obj,
# Defaults are not specified since both keys are required.
features={
"label": tf.FixedLenFeature([], tf.float32),
"ids": tf.VarLenFeature(tf.int64),
"values": tf.VarLenFeature(tf.float32),
})
return iterator, features
def main():
with tf.Session() as sess:
sess.run(init_op)
for _ in range(EPOCH_NUMBER):
# Resets the iterator at the beginning of an epoch.
sess.run(train_iterator.initializer)
sess.run(validate_iterator.initializer)
try:
while True:
_, step = sess.run([train_op, global_step])
# Print state while training
if step % STEPS_TO_VALIDATE == 0:
# logging.info("batch_ids are {0},batch_values are {0},batch_labels are {0}".format(sess.run([batch_ids,batch_values,batch_labels])))
# logging.info("valid_batch_ids are {0},valid_batch_labels are {0}".format(sess.run([validate_batch_values,validate_batch_labels])))
train_loss_val, vald_loss_val,wts_val,bias_val,model_output = sess.run([train_loss, vald_loss, wts,biases,model_output_tr])
print "Just above logging info line"
logging.info("Step: {}, train_loss: {}, vald_loss: {} wts_val: {} bias_val: {} y_tr:{}".format(step, train_loss_val,vald_loss_val,wts_val,bias_val,model_output))
except tf.errors.OutOfRangeError:
pass
print('Done training, epoch reached')
if __name__ == '__main__':
loaddir = '/Users/s.agrawalairan/OneDrive - CRITEO/InternProject/Data/TfSynData/'
MIN_AFTER_DEQUEUE = 20 #00000 #100
BATCH_SIZE = 100
VALIDATE_BATCH_SIZE = 100 # Size of
EPOCH_NUMBER = 500
OPTIMIZER = "sgd"
learning_rate = 0.01
STEPS_TO_VALIDATE = 5
REG_CONSTANT = 0
MIN_Y_VAL = 0.01 # For lognormal loss
FEATURE_SIZE = 11
LABEL_SIZE = 1
NUM_LAYERS = 3 # number of hidden layers in dnn (no. of weight matrices and biases vectors will be NUM_LAYERS + 1,)
HIDDEN_NODES = [FEATURE_SIZE]*NUM_LAYERS
# Get all FileNames
tfr_tr_filenames = [loaddir+"SynDataset1.tfrecords",loaddir+"SynDataset3.tfrecords"]
tfr_vald_filenames = [loaddir+"SynDataset2.tfrecords",loaddir+"SynDataset4.tfrecords"]
tf.reset_default_graph()
# Get a batch of y and X in tr_features
train_iterator, train_features = get_features(tfr_tr_filenames, BATCH_SIZE)
batch_labels = train_features["label"]
batch_ids = train_features["ids"]
batch_values = train_features["values"]
# Get a batch of y and X in vald_features
validate_iterator,validate_features = get_features(tfr_vald_filenames,VALIDATE_BATCH_SIZE)
validate_batch_labels = validate_features["label"]
validate_batch_ids = validate_features["ids"]
validate_batch_values = validate_features["values"]
# Model stored in inference function
# model_output_tr = dnn_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
[model_output_tr,reg_loss_tr] = linear_reg_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
# Define Loss function
# loss = tf.reduce_sum(tf.nn.log_poisson_loss(batch_labels,model_output,name='Poisson_loss')) # Poisson Loss
# loss = tf.reduce_mean(tf.square(tf.log(model_output_tr) - tf.log(tf.add(batch_labels,MIN_Y_VAL)))) # Log Normal Loss
loss = tf.reduce_mean(tf.square(model_output_tr - batch_labels)) + REG_CONSTANT*reg_loss_tr # Regularized MSE
# Setting up optimizer, global_step is used to keep track of number of batches seen far
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = get_optimizer(OPTIMIZER, learning_rate)
# Define train_operation
train_op = optimizer.minimize(loss, global_step=global_step)
# Variables of model being resused in order to compute training and validation error
tf.get_variable_scope().reuse_variables()
# Add more evaluation metrics for training data (TO BE CHANGED...)
# model_output_tr = dnn_inference(batch_ids, batch_values, HIDDEN_NODES, NUM_LAYERS)
[model_output_tr,_] = linear_reg_inference(batch_ids, batch_values, HIDDEN_NODES, NUM_LAYERS)
train_loss = tf.reduce_mean(tf.square(model_output_tr - tf.add(batch_labels,MIN_Y_VAL))) # MSE
wts = tf.get_variable("linear_reg/weights_0", [FEATURE_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64))
biases = tf.get_variable("linear_reg/biases_0", [LABEL_SIZE,], initializer=tf.initializers.random_uniform(minval=-1,maxval=1,dtype=tf.float64))
# Add more evaluation metrixs for testing data (TO BE CHANGED...)
# model_output_vald = dnn_inference(batch_ids,batch_values,HIDDEN_NODES,NUM_LAYERS)
[model_output_vald,_] = linear_reg_inference(validate_batch_ids, validate_batch_values, HIDDEN_NODES, NUM_LAYERS)
vald_loss = tf.reduce_mean(tf.square(model_output_vald - tf.add(validate_batch_labels,MIN_Y_VAL))) #MSE
# Define init_op
init_op = [tf.local_variables_initializer(), tf.global_variables_initializer()]
main()
| print("Unknow optimizer, exit now")
exit(1) | conditional_block |
lexer.go | package lexer
import (
"bytes"
"fmt"
"github.com/fadion/aria/reader"
"github.com/fadion/aria/reporter"
"github.com/fadion/aria/token"
)
// Lexer represents the lexer.
type Lexer struct {
reader *reader.Reader
char rune
row int
col int
token token.Token
rewinded bool
symbol *Symbol
}
// New initializes a Lexer.
func New(reader *reader.Reader) *Lexer {
l := &Lexer{
reader: reader,
row: 1,
col: 1,
rewinded: false,
symbol: &Symbol{},
}
// List of valid keywords.
l.symbol.Insert("true", token.BOOLEAN)
l.symbol.Insert("false", token.BOOLEAN)
l.symbol.Insert("nil", token.NIL)
l.symbol.Insert("let", token.LET)
l.symbol.Insert("var", token.VAR)
l.symbol.Insert("func", token.FUNCTION)
l.symbol.Insert("do", token.DO)
l.symbol.Insert("end", token.END)
l.symbol.Insert("if", token.IF)
l.symbol.Insert("else", token.ELSE)
l.symbol.Insert("for", token.FOR)
l.symbol.Insert("in", token.IN)
l.symbol.Insert("is", token.IS)
l.symbol.Insert("as", token.AS)
l.symbol.Insert("return", token.RETURN)
l.symbol.Insert("then", token.THEN)
l.symbol.Insert("switch", token.SWITCH)
l.symbol.Insert("case", token.CASE)
l.symbol.Insert("default", token.DEFAULT)
l.symbol.Insert("break", token.BREAK)
l.symbol.Insert("continue", token.CONTINUE)
l.symbol.Insert("module", token.MODULE)
l.symbol.Insert("import", token.IMPORT)
// Move to the first token.
l.advance()
return l
}
// NextToken returns the next token.
func (l *Lexer) NextToken() token.Token {
// Ignore any number of sequential whitespace.
l.consumeWhitespace()
switch {
case l.char == 0:
l.assignToken(token.EOF, "")
case l.char == '=':
switch l.peek() {
case '=': // ==
l.advance()
l.assignToken(token.EQ, "==")
case '>': // =>
l.advance()
l.assignToken(token.FATARROW, "=>")
default: // =
l.assignToken(token.ASSIGN, string(l.char))
}
case l.char == '>':
switch l.peek() {
case '=': // >=
l.advance()
l.assignToken(token.GTE, ">=")
case '>': // >>
l.advance()
l.assignToken(token.BITSHRIGHT, ">>")
default: // >
l.assignToken(token.GT, string(l.char))
}
case l.char == '<':
switch l.peek() {
case '=': // <=
l.advance()
l.assignToken(token.LTE, "<=")
case '<': // <<
l.advance()
l.assignToken(token.BITSHLEFT, "<<")
default: // <
l.assignToken(token.LT, string(l.char))
}
case l.char == '+':
switch l.peek() {
case '=': // +=
l.advance()
l.assignToken(token.ASSIGNPLUS, "+=")
default: // +
l.assignToken(token.PLUS, string(l.char))
}
case l.char == '-':
switch l.peek() {
case '>': // ->
l.advance()
l.assignToken(token.ARROW, string("->"))
case '=': // -=
l.advance()
l.assignToken(token.ASSIGNMIN, "-=")
default:
l.assignToken(token.MINUS, string(l.char))
}
case l.char == '*':
switch l.peek() {
case '*': // **
l.advance()
l.assignToken(token.POWER, "**")
case '=': // *=
l.advance()
l.assignToken(token.ASSIGNMULT, "*=")
default: // *
l.assignToken(token.ASTERISK, string(l.char))
}
case l.char == '/':
switch l.peek() {
case '/': // single line comment
l.advance()
l.consumeComment()
case '*': // multiline comment
l.advance()
l.consumeMultilineComment()
case '=': // /=
l.advance()
l.assignToken(token.ASSIGNDIV, "/=")
default:
l.assignToken(token.SLASH, string(l.char))
}
case l.char == '%':
l.assignToken(token.MODULO, string(l.char))
case l.char == ',':
l.assignToken(token.COMMA, string(l.char))
case l.char == '.':
switch l.peek() {
case '.':
l.advance()
switch l.peek() {
case '.': // ...
l.advance()
l.assignToken(token.ELLIPSIS, "...")
default: // ..
l.assignToken(token.RANGE, "..")
}
default: // .
l.assignToken(token.DOT, string(l.char))
}
case l.char == '|':
switch l.peek() {
case '|': // ||
l.advance()
l.assignToken(token.OR, "||")
case '>': // |>
l.advance()
l.assignToken(token.PIPE, "|>")
default: // |
l.assignToken(token.BITOR, string(l.char))
}
case l.char == '&':
switch l.peek() {
case '&': // &&
l.advance()
l.assignToken(token.AND, "&&")
default: // &
l.assignToken(token.BITAND, string(l.char))
}
case l.char == '~':
l.assignToken(token.BITNOT, string(l.char))
case l.char == '!':
switch l.peek() {
case '=': // !=
l.advance()
l.assignToken(token.UNEQ, "!=")
default: // !
l.assignToken(token.BANG, string(l.char))
}
case l.char == '(':
l.assignToken(token.LPAREN, "(")
case l.char == ')':
l.assignToken(token.RPAREN, ")")
case l.char == '[':
l.assignToken(token.LBRACK, "[")
case l.char == ']':
l.assignToken(token.RBRACK, "]")
case l.char == '?':
l.assignToken(token.QUESTION, "?")
case l.char == ':':
l.assignToken(token.COLON, ":")
case l.char == '_':
l.assignToken(token.UNDERSCORE, "_")
case l.char == '\n':
l.assignToken(token.NEWLINE, "\\n")
case l.char == '"': // Anything inside double quotes is a string.
l.consumeString()
case l.char == '0' && l.peek() == 'x': // Hex.
l.consumeSpecialInteger(l.isHex)
case l.char == '0' && l.peek() == 'o': // Octal.
l.consumeSpecialInteger(l.isOctal)
case l.char == '0' && l.peek() == 'b': // Binary.
l.consumeSpecialInteger(l.isBinary)
case l.isNumber(l.char): // Numeric literal.
l.consumeNumeric()
default:
// Identifier or keyword.
if l.isName(l.char) {
l.consumeIdent()
} else {
l.reportError(fmt.Sprintf("Unidentified character '%s'", string(l.char)))
}
}
l.advance()
return l.token
}
// Move the cursor ahead.
func (l *Lexer) advance() |
// Check characters ahead but don't move the cursor.
func (l *Lexer) peek() rune {
rn, err := l.reader.Peek()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
return rn
}
// Move the cursor to the previous character.
func (l *Lexer) rewind() {
if err := l.reader.Unread(); err != nil {
l.reportError("Unable to move to the previous character. This is an internal Lexer fail.")
}
l.rewinded = true
}
// Move row and column cursor.
func (l *Lexer) moveLocation() {
switch l.char {
case '\n':
l.row += 1
l.col = 2
default:
l.col += 1
}
}
// Pass a token to the active token cursor.
func (l *Lexer) assignToken(toktype token.TokenType, value string) {
l.token = token.Token{
Type: toktype,
Lexeme: value,
Location: token.Location{Row: l.row, Col: l.col},
}
}
// Check if the character makes for a valid identifier
// or keyword.
func (l *Lexer) isName(char rune) bool {
return (char >= 'a' && char <= 'z') || (char >= 'A' && char <= 'Z') ||
(char >= '0' && char <= '9') || char == '_' || char == '!' || char == '?'
}
// Check if the character is a number.
func (l *Lexer) isNumber(char rune) bool {
return char >= '0' && char <= '9'
}
// Check if the character is an hexadecimal.
func (l *Lexer) isHex(char rune) bool {
return l.isNumber(char) || (char >= 'a' && char <= 'f' || char >= 'A' && char <= 'F')
}
// Check if the character is an octal.
func (l *Lexer) isOctal(char rune) bool {
return char >= '0' && char <= '7'
}
// Check if the character is a binary.
func (l *Lexer) isBinary(char rune) bool {
return char == '0' || char == '1'
}
// Read all valid characters from an identifier or keyword.
func (l *Lexer) readName() string {
var out bytes.Buffer
out.WriteRune(l.char)
// Read until a non-name character is found.
for l.isName(l.peek()) {
l.advance()
out.WriteRune(l.char)
}
return out.String()
}
// Move the cursor until it exhausts whitespace.
func (l *Lexer) consumeWhitespace() {
for l.char == ' ' || l.char == '\t' || l.char == '\r' {
l.advance()
}
}
// Read a string literal.
func (l *Lexer) consumeString() {
var out bytes.Buffer
// Move past the opening double quote.
l.advance()
loop:
for {
switch l.char {
case '\\': // escape characters
l.advance()
switch l.char {
case '"': // \"
out.WriteRune('\\')
out.WriteRune('"')
case '\\': // \\
out.WriteRune('\\')
case 'n', 't', 'r', 'a', 'b', 'f', 'v':
out.WriteRune('\\')
out.WriteRune(l.char)
default:
l.reportError(fmt.Sprintf("Invalid escape character '%s", string(l.char)))
}
case 0:
// String should be closed before the end of file.
l.reportError("Unterminated string")
break loop
case '"': // Closing quote.
break loop
default:
out.WriteRune(l.char)
}
l.advance()
}
l.assignToken(token.STRING, out.String())
}
// Read a numeric literal.
func (l *Lexer) consumeNumeric() {
var out bytes.Buffer
// Write the first character, as we're sure
// it's numeric.
out.WriteRune(l.char)
floatFound := false
scientificFound := false
loop:
for {
l.advance()
switch {
case l.isNumber(l.char):
out.WriteRune(l.char)
case l.char == '_': // Thousands separator is ignored.
case l.char == '.' && l.isNumber(l.peek()): // Float.
floatFound = true
out.WriteRune('.')
case l.char == 'e' && (l.isNumber(l.peek()) || l.peek() == '-'): // Scientific notation.
// Numbers in scientific notation are
// treated as floats for easy of use.
floatFound = true
scientificFound = true
out.WriteRune('e')
case l.char == '-' && scientificFound: // Negative scientific notation.
out.WriteRune('-')
case l.char == '.' && l.peek() == '.': // Range operator.
l.rewind()
break loop
case l.char == 0: // Don't rewind on EOF.
break loop
default:
l.rewind()
break loop
}
}
if floatFound {
l.assignToken(token.FLOAT, out.String())
} else {
l.assignToken(token.INTEGER, out.String())
}
}
// Read a binary, octal or hexadecimal literal.
func (l *Lexer) consumeSpecialInteger(fn func(rune) bool) {
var out bytes.Buffer
out.WriteRune(l.char)
out.WriteRune(l.peek())
// Move past the 'x', 'b' or 'o'.
l.advance()
for fn(l.peek()) {
out.WriteRune(l.peek())
l.advance()
}
ret := out.String()
// A starter like '0x' without other characters
// is not enough to make up an Integer.
if len(ret) == 2 {
l.reportError(fmt.Sprintf("Literal sequence '%s' started but not continued", ret))
}
l.assignToken(token.INTEGER, ret)
}
// Read a single line comment.
func (l *Lexer) consumeComment() {
var out bytes.Buffer
l.advance()
loop:
for {
switch l.char {
case '\n', 0: // Comment ends on a line break or EOF
break loop
case '\r': // Or possibly on a \r\n
l.advance()
switch l.char {
case '\n', 0:
break loop
default:
l.reportError("Unexpected comment line ending")
break loop
}
default:
out.WriteRune(l.char)
}
l.advance()
}
l.assignToken(token.COMMENT, out.String())
}
// Read multiline comment.
func (l *Lexer) consumeMultilineComment() {
var out bytes.Buffer
loop:
for {
l.advance()
switch l.char {
case '*':
switch l.peek() {
case '/': // Multiline comments end with */
l.advance()
break loop
}
case 0: // EOF and yet not comment terminator.
l.reportError("Unterminated multiline comment")
break loop
default:
out.WriteRune(l.char)
}
}
l.assignToken(token.COMMENT, out.String())
}
// Read an identifier or keyword.
func (l *Lexer) consumeIdent() {
ident := l.readName()
// Check the symbol table for a known keyword.
// Otherwise call it an Identifier.
if toktype, found := l.symbol.Lookup(ident); found {
l.assignToken(toktype, ident)
} else {
l.assignToken(token.IDENTIFIER, ident)
}
}
// Report an error in the current location.
func (l *Lexer) reportError(message string) {
reporter.Error(reporter.PARSE, token.Location{l.row, l.col}, message)
}
| {
rn, err := l.reader.Advance()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
// Don't move the location if it was a
// rewind, or it will report an incorrect
// line and column.
if !l.rewinded {
l.moveLocation()
}
l.rewinded = false
l.char = rn
} | identifier_body |
lexer.go | package lexer
import (
"bytes"
"fmt"
"github.com/fadion/aria/reader"
"github.com/fadion/aria/reporter"
"github.com/fadion/aria/token"
)
// Lexer represents the lexer.
type Lexer struct {
reader *reader.Reader
char rune
row int
col int
token token.Token
rewinded bool
symbol *Symbol
}
// New initializes a Lexer.
func New(reader *reader.Reader) *Lexer {
l := &Lexer{
reader: reader,
row: 1,
col: 1,
rewinded: false,
symbol: &Symbol{},
}
// List of valid keywords.
l.symbol.Insert("true", token.BOOLEAN)
l.symbol.Insert("false", token.BOOLEAN)
l.symbol.Insert("nil", token.NIL)
l.symbol.Insert("let", token.LET)
l.symbol.Insert("var", token.VAR)
l.symbol.Insert("func", token.FUNCTION)
l.symbol.Insert("do", token.DO)
l.symbol.Insert("end", token.END)
l.symbol.Insert("if", token.IF)
l.symbol.Insert("else", token.ELSE)
l.symbol.Insert("for", token.FOR)
l.symbol.Insert("in", token.IN)
l.symbol.Insert("is", token.IS)
l.symbol.Insert("as", token.AS)
l.symbol.Insert("return", token.RETURN)
l.symbol.Insert("then", token.THEN)
l.symbol.Insert("switch", token.SWITCH)
l.symbol.Insert("case", token.CASE)
l.symbol.Insert("default", token.DEFAULT)
l.symbol.Insert("break", token.BREAK)
l.symbol.Insert("continue", token.CONTINUE)
l.symbol.Insert("module", token.MODULE)
l.symbol.Insert("import", token.IMPORT)
// Move to the first token.
l.advance()
return l
}
// NextToken returns the next token.
func (l *Lexer) NextToken() token.Token {
// Ignore any number of sequential whitespace.
l.consumeWhitespace()
switch {
case l.char == 0:
l.assignToken(token.EOF, "")
case l.char == '=':
switch l.peek() {
case '=': // ==
l.advance()
l.assignToken(token.EQ, "==")
case '>': // =>
l.advance()
l.assignToken(token.FATARROW, "=>")
default: // =
l.assignToken(token.ASSIGN, string(l.char))
}
case l.char == '>':
switch l.peek() {
case '=': // >=
l.advance()
l.assignToken(token.GTE, ">=")
case '>': // >>
l.advance()
l.assignToken(token.BITSHRIGHT, ">>")
default: // >
l.assignToken(token.GT, string(l.char))
}
case l.char == '<':
switch l.peek() {
case '=': // <=
l.advance()
l.assignToken(token.LTE, "<=")
case '<': // <<
l.advance()
l.assignToken(token.BITSHLEFT, "<<")
default: // <
l.assignToken(token.LT, string(l.char))
}
case l.char == '+':
switch l.peek() {
case '=': // +=
l.advance()
l.assignToken(token.ASSIGNPLUS, "+=")
default: // +
l.assignToken(token.PLUS, string(l.char))
}
case l.char == '-':
switch l.peek() {
case '>': // ->
l.advance()
l.assignToken(token.ARROW, string("->"))
case '=': // -=
l.advance()
l.assignToken(token.ASSIGNMIN, "-=")
default:
l.assignToken(token.MINUS, string(l.char))
}
case l.char == '*':
switch l.peek() {
case '*': // **
l.advance()
l.assignToken(token.POWER, "**")
case '=': // *=
l.advance()
l.assignToken(token.ASSIGNMULT, "*=")
default: // *
l.assignToken(token.ASTERISK, string(l.char))
}
case l.char == '/':
switch l.peek() {
case '/': // single line comment
l.advance()
l.consumeComment()
case '*': // multiline comment
l.advance()
l.consumeMultilineComment()
case '=': // /=
l.advance()
l.assignToken(token.ASSIGNDIV, "/=")
default:
l.assignToken(token.SLASH, string(l.char))
}
case l.char == '%':
l.assignToken(token.MODULO, string(l.char))
case l.char == ',':
l.assignToken(token.COMMA, string(l.char))
case l.char == '.':
switch l.peek() {
case '.':
l.advance()
switch l.peek() {
case '.': // ...
l.advance()
l.assignToken(token.ELLIPSIS, "...")
default: // ..
l.assignToken(token.RANGE, "..")
}
default: // .
l.assignToken(token.DOT, string(l.char))
}
case l.char == '|':
switch l.peek() {
case '|': // ||
l.advance()
l.assignToken(token.OR, "||")
case '>': // |>
l.advance()
l.assignToken(token.PIPE, "|>")
default: // |
l.assignToken(token.BITOR, string(l.char))
}
case l.char == '&':
switch l.peek() {
case '&': // &&
l.advance()
l.assignToken(token.AND, "&&")
default: // &
l.assignToken(token.BITAND, string(l.char))
}
case l.char == '~':
l.assignToken(token.BITNOT, string(l.char))
case l.char == '!':
switch l.peek() {
case '=': // !=
l.advance()
l.assignToken(token.UNEQ, "!=")
default: // !
l.assignToken(token.BANG, string(l.char))
}
case l.char == '(':
l.assignToken(token.LPAREN, "(")
case l.char == ')':
l.assignToken(token.RPAREN, ")")
case l.char == '[':
l.assignToken(token.LBRACK, "[")
case l.char == ']':
l.assignToken(token.RBRACK, "]")
case l.char == '?':
l.assignToken(token.QUESTION, "?")
case l.char == ':':
l.assignToken(token.COLON, ":")
case l.char == '_':
l.assignToken(token.UNDERSCORE, "_")
case l.char == '\n':
l.assignToken(token.NEWLINE, "\\n")
case l.char == '"': // Anything inside double quotes is a string.
l.consumeString()
case l.char == '0' && l.peek() == 'x': // Hex.
l.consumeSpecialInteger(l.isHex)
case l.char == '0' && l.peek() == 'o': // Octal.
l.consumeSpecialInteger(l.isOctal)
case l.char == '0' && l.peek() == 'b': // Binary.
l.consumeSpecialInteger(l.isBinary)
case l.isNumber(l.char): // Numeric literal.
l.consumeNumeric()
default:
// Identifier or keyword.
if l.isName(l.char) {
l.consumeIdent()
} else {
l.reportError(fmt.Sprintf("Unidentified character '%s'", string(l.char)))
}
}
l.advance()
return l.token
}
// Move the cursor ahead.
func (l *Lexer) advance() {
rn, err := l.reader.Advance()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
// Don't move the location if it was a
// rewind, or it will report an incorrect
// line and column.
if !l.rewinded {
l.moveLocation()
}
l.rewinded = false
l.char = rn
}
// Check characters ahead but don't move the cursor.
func (l *Lexer) peek() rune {
rn, err := l.reader.Peek()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
return rn
}
// Move the cursor to the previous character.
func (l *Lexer) rewind() {
if err := l.reader.Unread(); err != nil {
l.reportError("Unable to move to the previous character. This is an internal Lexer fail.")
}
l.rewinded = true
}
// Move row and column cursor.
func (l *Lexer) | () {
switch l.char {
case '\n':
l.row += 1
l.col = 2
default:
l.col += 1
}
}
// Pass a token to the active token cursor.
func (l *Lexer) assignToken(toktype token.TokenType, value string) {
l.token = token.Token{
Type: toktype,
Lexeme: value,
Location: token.Location{Row: l.row, Col: l.col},
}
}
// Check if the character makes for a valid identifier
// or keyword.
func (l *Lexer) isName(char rune) bool {
return (char >= 'a' && char <= 'z') || (char >= 'A' && char <= 'Z') ||
(char >= '0' && char <= '9') || char == '_' || char == '!' || char == '?'
}
// Check if the character is a number.
func (l *Lexer) isNumber(char rune) bool {
return char >= '0' && char <= '9'
}
// Check if the character is an hexadecimal.
func (l *Lexer) isHex(char rune) bool {
return l.isNumber(char) || (char >= 'a' && char <= 'f' || char >= 'A' && char <= 'F')
}
// Check if the character is an octal.
func (l *Lexer) isOctal(char rune) bool {
return char >= '0' && char <= '7'
}
// Check if the character is a binary.
func (l *Lexer) isBinary(char rune) bool {
return char == '0' || char == '1'
}
// Read all valid characters from an identifier or keyword.
func (l *Lexer) readName() string {
var out bytes.Buffer
out.WriteRune(l.char)
// Read until a non-name character is found.
for l.isName(l.peek()) {
l.advance()
out.WriteRune(l.char)
}
return out.String()
}
// Move the cursor until it exhausts whitespace.
func (l *Lexer) consumeWhitespace() {
for l.char == ' ' || l.char == '\t' || l.char == '\r' {
l.advance()
}
}
// Read a string literal.
func (l *Lexer) consumeString() {
var out bytes.Buffer
// Move past the opening double quote.
l.advance()
loop:
for {
switch l.char {
case '\\': // escape characters
l.advance()
switch l.char {
case '"': // \"
out.WriteRune('\\')
out.WriteRune('"')
case '\\': // \\
out.WriteRune('\\')
case 'n', 't', 'r', 'a', 'b', 'f', 'v':
out.WriteRune('\\')
out.WriteRune(l.char)
default:
l.reportError(fmt.Sprintf("Invalid escape character '%s", string(l.char)))
}
case 0:
// String should be closed before the end of file.
l.reportError("Unterminated string")
break loop
case '"': // Closing quote.
break loop
default:
out.WriteRune(l.char)
}
l.advance()
}
l.assignToken(token.STRING, out.String())
}
// Read a numeric literal.
func (l *Lexer) consumeNumeric() {
var out bytes.Buffer
// Write the first character, as we're sure
// it's numeric.
out.WriteRune(l.char)
floatFound := false
scientificFound := false
loop:
for {
l.advance()
switch {
case l.isNumber(l.char):
out.WriteRune(l.char)
case l.char == '_': // Thousands separator is ignored.
case l.char == '.' && l.isNumber(l.peek()): // Float.
floatFound = true
out.WriteRune('.')
case l.char == 'e' && (l.isNumber(l.peek()) || l.peek() == '-'): // Scientific notation.
// Numbers in scientific notation are
// treated as floats for easy of use.
floatFound = true
scientificFound = true
out.WriteRune('e')
case l.char == '-' && scientificFound: // Negative scientific notation.
out.WriteRune('-')
case l.char == '.' && l.peek() == '.': // Range operator.
l.rewind()
break loop
case l.char == 0: // Don't rewind on EOF.
break loop
default:
l.rewind()
break loop
}
}
if floatFound {
l.assignToken(token.FLOAT, out.String())
} else {
l.assignToken(token.INTEGER, out.String())
}
}
// Read a binary, octal or hexadecimal literal.
func (l *Lexer) consumeSpecialInteger(fn func(rune) bool) {
var out bytes.Buffer
out.WriteRune(l.char)
out.WriteRune(l.peek())
// Move past the 'x', 'b' or 'o'.
l.advance()
for fn(l.peek()) {
out.WriteRune(l.peek())
l.advance()
}
ret := out.String()
// A starter like '0x' without other characters
// is not enough to make up an Integer.
if len(ret) == 2 {
l.reportError(fmt.Sprintf("Literal sequence '%s' started but not continued", ret))
}
l.assignToken(token.INTEGER, ret)
}
// Read a single line comment.
func (l *Lexer) consumeComment() {
var out bytes.Buffer
l.advance()
loop:
for {
switch l.char {
case '\n', 0: // Comment ends on a line break or EOF
break loop
case '\r': // Or possibly on a \r\n
l.advance()
switch l.char {
case '\n', 0:
break loop
default:
l.reportError("Unexpected comment line ending")
break loop
}
default:
out.WriteRune(l.char)
}
l.advance()
}
l.assignToken(token.COMMENT, out.String())
}
// Read multiline comment.
func (l *Lexer) consumeMultilineComment() {
var out bytes.Buffer
loop:
for {
l.advance()
switch l.char {
case '*':
switch l.peek() {
case '/': // Multiline comments end with */
l.advance()
break loop
}
case 0: // EOF and yet not comment terminator.
l.reportError("Unterminated multiline comment")
break loop
default:
out.WriteRune(l.char)
}
}
l.assignToken(token.COMMENT, out.String())
}
// Read an identifier or keyword.
func (l *Lexer) consumeIdent() {
ident := l.readName()
// Check the symbol table for a known keyword.
// Otherwise call it an Identifier.
if toktype, found := l.symbol.Lookup(ident); found {
l.assignToken(toktype, ident)
} else {
l.assignToken(token.IDENTIFIER, ident)
}
}
// Report an error in the current location.
func (l *Lexer) reportError(message string) {
reporter.Error(reporter.PARSE, token.Location{l.row, l.col}, message)
}
| moveLocation | identifier_name |
lexer.go | package lexer
import (
"bytes"
"fmt"
"github.com/fadion/aria/reader"
"github.com/fadion/aria/reporter"
"github.com/fadion/aria/token"
)
// Lexer represents the lexer.
type Lexer struct {
reader *reader.Reader
char rune
row int
col int
token token.Token
rewinded bool
symbol *Symbol
}
// New initializes a Lexer.
func New(reader *reader.Reader) *Lexer {
l := &Lexer{
reader: reader,
row: 1,
col: 1,
rewinded: false,
symbol: &Symbol{},
}
// List of valid keywords.
l.symbol.Insert("true", token.BOOLEAN)
l.symbol.Insert("false", token.BOOLEAN)
l.symbol.Insert("nil", token.NIL)
l.symbol.Insert("let", token.LET)
l.symbol.Insert("var", token.VAR)
l.symbol.Insert("func", token.FUNCTION)
l.symbol.Insert("do", token.DO)
l.symbol.Insert("end", token.END)
l.symbol.Insert("if", token.IF)
l.symbol.Insert("else", token.ELSE)
l.symbol.Insert("for", token.FOR)
l.symbol.Insert("in", token.IN)
l.symbol.Insert("is", token.IS)
l.symbol.Insert("as", token.AS)
l.symbol.Insert("return", token.RETURN)
l.symbol.Insert("then", token.THEN)
l.symbol.Insert("switch", token.SWITCH)
l.symbol.Insert("case", token.CASE)
l.symbol.Insert("default", token.DEFAULT)
l.symbol.Insert("break", token.BREAK)
l.symbol.Insert("continue", token.CONTINUE)
l.symbol.Insert("module", token.MODULE)
l.symbol.Insert("import", token.IMPORT)
// Move to the first token.
l.advance()
return l
}
// NextToken returns the next token.
func (l *Lexer) NextToken() token.Token {
// Ignore any number of sequential whitespace.
l.consumeWhitespace()
switch {
case l.char == 0:
l.assignToken(token.EOF, "")
case l.char == '=':
switch l.peek() {
case '=': // ==
l.advance()
l.assignToken(token.EQ, "==")
case '>': // =>
l.advance()
l.assignToken(token.FATARROW, "=>")
default: // =
l.assignToken(token.ASSIGN, string(l.char))
}
case l.char == '>':
switch l.peek() {
case '=': // >=
l.advance()
l.assignToken(token.GTE, ">=")
case '>': // >>
l.advance()
l.assignToken(token.BITSHRIGHT, ">>")
default: // >
l.assignToken(token.GT, string(l.char))
}
case l.char == '<':
switch l.peek() {
case '=': // <=
l.advance()
l.assignToken(token.LTE, "<=")
case '<': // <<
l.advance()
l.assignToken(token.BITSHLEFT, "<<")
default: // <
l.assignToken(token.LT, string(l.char))
}
case l.char == '+':
switch l.peek() {
case '=': // +=
l.advance()
l.assignToken(token.ASSIGNPLUS, "+=")
default: // +
l.assignToken(token.PLUS, string(l.char))
}
case l.char == '-':
switch l.peek() {
case '>': // ->
l.advance()
l.assignToken(token.ARROW, string("->"))
case '=': // -=
l.advance()
l.assignToken(token.ASSIGNMIN, "-=")
default:
l.assignToken(token.MINUS, string(l.char))
}
case l.char == '*':
switch l.peek() {
case '*': // **
l.advance()
l.assignToken(token.POWER, "**")
case '=': // *=
l.advance()
l.assignToken(token.ASSIGNMULT, "*=")
default: // *
l.assignToken(token.ASTERISK, string(l.char))
}
case l.char == '/':
switch l.peek() {
case '/': // single line comment
l.advance()
l.consumeComment()
case '*': // multiline comment
l.advance()
l.consumeMultilineComment()
case '=': // /=
l.advance()
l.assignToken(token.ASSIGNDIV, "/=")
default:
l.assignToken(token.SLASH, string(l.char))
}
case l.char == '%':
l.assignToken(token.MODULO, string(l.char))
case l.char == ',':
l.assignToken(token.COMMA, string(l.char))
case l.char == '.':
switch l.peek() {
case '.':
l.advance()
switch l.peek() {
case '.': // ...
l.advance()
l.assignToken(token.ELLIPSIS, "...")
default: // ..
l.assignToken(token.RANGE, "..")
}
default: // .
l.assignToken(token.DOT, string(l.char))
}
case l.char == '|':
switch l.peek() {
case '|': // ||
l.advance()
l.assignToken(token.OR, "||")
case '>': // |>
l.advance()
l.assignToken(token.PIPE, "|>")
default: // |
l.assignToken(token.BITOR, string(l.char))
}
case l.char == '&':
switch l.peek() {
case '&': // &&
l.advance()
l.assignToken(token.AND, "&&")
default: // &
l.assignToken(token.BITAND, string(l.char))
}
case l.char == '~':
l.assignToken(token.BITNOT, string(l.char))
case l.char == '!':
switch l.peek() {
case '=': // !=
l.advance()
l.assignToken(token.UNEQ, "!=")
default: // !
l.assignToken(token.BANG, string(l.char))
}
case l.char == '(':
l.assignToken(token.LPAREN, "(")
case l.char == ')':
l.assignToken(token.RPAREN, ")")
case l.char == '[':
l.assignToken(token.LBRACK, "[")
case l.char == ']':
l.assignToken(token.RBRACK, "]")
case l.char == '?':
l.assignToken(token.QUESTION, "?")
case l.char == ':':
l.assignToken(token.COLON, ":")
case l.char == '_':
l.assignToken(token.UNDERSCORE, "_")
case l.char == '\n':
l.assignToken(token.NEWLINE, "\\n")
case l.char == '"': // Anything inside double quotes is a string.
l.consumeString()
case l.char == '0' && l.peek() == 'x': // Hex.
l.consumeSpecialInteger(l.isHex)
case l.char == '0' && l.peek() == 'o': // Octal.
l.consumeSpecialInteger(l.isOctal)
case l.char == '0' && l.peek() == 'b': // Binary.
l.consumeSpecialInteger(l.isBinary)
case l.isNumber(l.char): // Numeric literal.
l.consumeNumeric()
default:
// Identifier or keyword.
if l.isName(l.char) {
l.consumeIdent()
} else {
l.reportError(fmt.Sprintf("Unidentified character '%s'", string(l.char)))
}
}
l.advance()
return l.token
}
// Move the cursor ahead.
func (l *Lexer) advance() {
rn, err := l.reader.Advance()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
// Don't move the location if it was a
// rewind, or it will report an incorrect
// line and column.
if !l.rewinded {
l.moveLocation()
}
l.rewinded = false
l.char = rn
}
// Check characters ahead but don't move the cursor.
func (l *Lexer) peek() rune {
rn, err := l.reader.Peek()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
return rn
}
// Move the cursor to the previous character.
func (l *Lexer) rewind() {
if err := l.reader.Unread(); err != nil {
l.reportError("Unable to move to the previous character. This is an internal Lexer fail.")
}
l.rewinded = true
}
// Move row and column cursor.
func (l *Lexer) moveLocation() {
switch l.char {
case '\n':
l.row += 1
l.col = 2
default:
l.col += 1
}
}
// Pass a token to the active token cursor.
func (l *Lexer) assignToken(toktype token.TokenType, value string) {
l.token = token.Token{
Type: toktype,
Lexeme: value,
Location: token.Location{Row: l.row, Col: l.col},
}
}
// Check if the character makes for a valid identifier
// or keyword.
func (l *Lexer) isName(char rune) bool {
return (char >= 'a' && char <= 'z') || (char >= 'A' && char <= 'Z') ||
(char >= '0' && char <= '9') || char == '_' || char == '!' || char == '?'
}
// Check if the character is a number.
func (l *Lexer) isNumber(char rune) bool {
return char >= '0' && char <= '9'
}
// Check if the character is an hexadecimal.
func (l *Lexer) isHex(char rune) bool {
return l.isNumber(char) || (char >= 'a' && char <= 'f' || char >= 'A' && char <= 'F')
}
// Check if the character is an octal.
func (l *Lexer) isOctal(char rune) bool {
return char >= '0' && char <= '7'
}
// Check if the character is a binary.
func (l *Lexer) isBinary(char rune) bool {
return char == '0' || char == '1'
}
// Read all valid characters from an identifier or keyword.
func (l *Lexer) readName() string {
var out bytes.Buffer
out.WriteRune(l.char)
// Read until a non-name character is found.
for l.isName(l.peek()) {
l.advance()
out.WriteRune(l.char)
}
return out.String()
}
// Move the cursor until it exhausts whitespace.
func (l *Lexer) consumeWhitespace() {
for l.char == ' ' || l.char == '\t' || l.char == '\r' {
l.advance()
}
}
// Read a string literal.
func (l *Lexer) consumeString() {
var out bytes.Buffer
// Move past the opening double quote.
l.advance()
loop:
for {
switch l.char {
case '\\': // escape characters
l.advance()
switch l.char {
case '"': // \"
out.WriteRune('\\')
out.WriteRune('"')
case '\\': // \\
out.WriteRune('\\')
case 'n', 't', 'r', 'a', 'b', 'f', 'v':
out.WriteRune('\\')
out.WriteRune(l.char)
default:
l.reportError(fmt.Sprintf("Invalid escape character '%s", string(l.char)))
}
case 0:
// String should be closed before the end of file.
l.reportError("Unterminated string")
break loop
case '"': // Closing quote.
break loop
default:
out.WriteRune(l.char)
}
l.advance()
}
l.assignToken(token.STRING, out.String())
}
// Read a numeric literal.
func (l *Lexer) consumeNumeric() {
var out bytes.Buffer
// Write the first character, as we're sure
// it's numeric.
out.WriteRune(l.char)
floatFound := false
scientificFound := false
loop:
for {
l.advance()
switch {
case l.isNumber(l.char):
out.WriteRune(l.char)
case l.char == '_': // Thousands separator is ignored.
case l.char == '.' && l.isNumber(l.peek()): // Float.
floatFound = true
out.WriteRune('.')
case l.char == 'e' && (l.isNumber(l.peek()) || l.peek() == '-'): // Scientific notation.
// Numbers in scientific notation are
// treated as floats for easy of use.
floatFound = true
scientificFound = true
out.WriteRune('e')
case l.char == '-' && scientificFound: // Negative scientific notation.
out.WriteRune('-')
case l.char == '.' && l.peek() == '.': // Range operator.
l.rewind()
break loop
case l.char == 0: // Don't rewind on EOF.
break loop
default:
l.rewind()
break loop
}
}
if floatFound {
l.assignToken(token.FLOAT, out.String())
} else {
l.assignToken(token.INTEGER, out.String())
}
}
// Read a binary, octal or hexadecimal literal.
func (l *Lexer) consumeSpecialInteger(fn func(rune) bool) {
var out bytes.Buffer
out.WriteRune(l.char)
out.WriteRune(l.peek())
// Move past the 'x', 'b' or 'o'.
l.advance()
for fn(l.peek()) {
out.WriteRune(l.peek())
l.advance()
}
ret := out.String()
// A starter like '0x' without other characters
// is not enough to make up an Integer.
if len(ret) == 2 {
l.reportError(fmt.Sprintf("Literal sequence '%s' started but not continued", ret))
}
l.assignToken(token.INTEGER, ret)
}
// Read a single line comment.
func (l *Lexer) consumeComment() {
var out bytes.Buffer
l.advance()
loop:
for {
switch l.char {
case '\n', 0: // Comment ends on a line break or EOF
break loop
case '\r': // Or possibly on a \r\n
l.advance()
switch l.char {
case '\n', 0: | }
default:
out.WriteRune(l.char)
}
l.advance()
}
l.assignToken(token.COMMENT, out.String())
}
// Read multiline comment.
func (l *Lexer) consumeMultilineComment() {
var out bytes.Buffer
loop:
for {
l.advance()
switch l.char {
case '*':
switch l.peek() {
case '/': // Multiline comments end with */
l.advance()
break loop
}
case 0: // EOF and yet not comment terminator.
l.reportError("Unterminated multiline comment")
break loop
default:
out.WriteRune(l.char)
}
}
l.assignToken(token.COMMENT, out.String())
}
// Read an identifier or keyword.
func (l *Lexer) consumeIdent() {
ident := l.readName()
// Check the symbol table for a known keyword.
// Otherwise call it an Identifier.
if toktype, found := l.symbol.Lookup(ident); found {
l.assignToken(toktype, ident)
} else {
l.assignToken(token.IDENTIFIER, ident)
}
}
// Report an error in the current location.
func (l *Lexer) reportError(message string) {
reporter.Error(reporter.PARSE, token.Location{l.row, l.col}, message)
} | break loop
default:
l.reportError("Unexpected comment line ending")
break loop | random_line_split |
lexer.go | package lexer
import (
"bytes"
"fmt"
"github.com/fadion/aria/reader"
"github.com/fadion/aria/reporter"
"github.com/fadion/aria/token"
)
// Lexer represents the lexer.
type Lexer struct {
reader *reader.Reader
char rune
row int
col int
token token.Token
rewinded bool
symbol *Symbol
}
// New initializes a Lexer.
func New(reader *reader.Reader) *Lexer {
l := &Lexer{
reader: reader,
row: 1,
col: 1,
rewinded: false,
symbol: &Symbol{},
}
// List of valid keywords.
l.symbol.Insert("true", token.BOOLEAN)
l.symbol.Insert("false", token.BOOLEAN)
l.symbol.Insert("nil", token.NIL)
l.symbol.Insert("let", token.LET)
l.symbol.Insert("var", token.VAR)
l.symbol.Insert("func", token.FUNCTION)
l.symbol.Insert("do", token.DO)
l.symbol.Insert("end", token.END)
l.symbol.Insert("if", token.IF)
l.symbol.Insert("else", token.ELSE)
l.symbol.Insert("for", token.FOR)
l.symbol.Insert("in", token.IN)
l.symbol.Insert("is", token.IS)
l.symbol.Insert("as", token.AS)
l.symbol.Insert("return", token.RETURN)
l.symbol.Insert("then", token.THEN)
l.symbol.Insert("switch", token.SWITCH)
l.symbol.Insert("case", token.CASE)
l.symbol.Insert("default", token.DEFAULT)
l.symbol.Insert("break", token.BREAK)
l.symbol.Insert("continue", token.CONTINUE)
l.symbol.Insert("module", token.MODULE)
l.symbol.Insert("import", token.IMPORT)
// Move to the first token.
l.advance()
return l
}
// NextToken returns the next token.
func (l *Lexer) NextToken() token.Token {
// Ignore any number of sequential whitespace.
l.consumeWhitespace()
switch {
case l.char == 0:
l.assignToken(token.EOF, "")
case l.char == '=':
switch l.peek() {
case '=': // ==
l.advance()
l.assignToken(token.EQ, "==")
case '>': // =>
l.advance()
l.assignToken(token.FATARROW, "=>")
default: // =
l.assignToken(token.ASSIGN, string(l.char))
}
case l.char == '>':
switch l.peek() {
case '=': // >=
l.advance()
l.assignToken(token.GTE, ">=")
case '>': // >>
l.advance()
l.assignToken(token.BITSHRIGHT, ">>")
default: // >
l.assignToken(token.GT, string(l.char))
}
case l.char == '<':
switch l.peek() {
case '=': // <=
l.advance()
l.assignToken(token.LTE, "<=")
case '<': // <<
l.advance()
l.assignToken(token.BITSHLEFT, "<<")
default: // <
l.assignToken(token.LT, string(l.char))
}
case l.char == '+':
switch l.peek() {
case '=': // +=
l.advance()
l.assignToken(token.ASSIGNPLUS, "+=")
default: // +
l.assignToken(token.PLUS, string(l.char))
}
case l.char == '-':
switch l.peek() {
case '>': // ->
l.advance()
l.assignToken(token.ARROW, string("->"))
case '=': // -=
l.advance()
l.assignToken(token.ASSIGNMIN, "-=")
default:
l.assignToken(token.MINUS, string(l.char))
}
case l.char == '*':
switch l.peek() {
case '*': // **
l.advance()
l.assignToken(token.POWER, "**")
case '=': // *=
l.advance()
l.assignToken(token.ASSIGNMULT, "*=")
default: // *
l.assignToken(token.ASTERISK, string(l.char))
}
case l.char == '/':
switch l.peek() {
case '/': // single line comment
l.advance()
l.consumeComment()
case '*': // multiline comment
l.advance()
l.consumeMultilineComment()
case '=': // /=
l.advance()
l.assignToken(token.ASSIGNDIV, "/=")
default:
l.assignToken(token.SLASH, string(l.char))
}
case l.char == '%':
l.assignToken(token.MODULO, string(l.char))
case l.char == ',':
l.assignToken(token.COMMA, string(l.char))
case l.char == '.':
switch l.peek() {
case '.':
l.advance()
switch l.peek() {
case '.': // ...
l.advance()
l.assignToken(token.ELLIPSIS, "...")
default: // ..
l.assignToken(token.RANGE, "..")
}
default: // .
l.assignToken(token.DOT, string(l.char))
}
case l.char == '|':
switch l.peek() {
case '|': // ||
l.advance()
l.assignToken(token.OR, "||")
case '>': // |>
l.advance()
l.assignToken(token.PIPE, "|>")
default: // |
l.assignToken(token.BITOR, string(l.char))
}
case l.char == '&':
switch l.peek() {
case '&': // &&
l.advance()
l.assignToken(token.AND, "&&")
default: // &
l.assignToken(token.BITAND, string(l.char))
}
case l.char == '~':
l.assignToken(token.BITNOT, string(l.char))
case l.char == '!':
switch l.peek() {
case '=': // !=
l.advance()
l.assignToken(token.UNEQ, "!=")
default: // !
l.assignToken(token.BANG, string(l.char))
}
case l.char == '(':
l.assignToken(token.LPAREN, "(")
case l.char == ')':
l.assignToken(token.RPAREN, ")")
case l.char == '[':
l.assignToken(token.LBRACK, "[")
case l.char == ']':
l.assignToken(token.RBRACK, "]")
case l.char == '?':
l.assignToken(token.QUESTION, "?")
case l.char == ':':
l.assignToken(token.COLON, ":")
case l.char == '_':
l.assignToken(token.UNDERSCORE, "_")
case l.char == '\n':
l.assignToken(token.NEWLINE, "\\n")
case l.char == '"': // Anything inside double quotes is a string.
l.consumeString()
case l.char == '0' && l.peek() == 'x': // Hex.
l.consumeSpecialInteger(l.isHex)
case l.char == '0' && l.peek() == 'o': // Octal.
l.consumeSpecialInteger(l.isOctal)
case l.char == '0' && l.peek() == 'b': // Binary.
l.consumeSpecialInteger(l.isBinary)
case l.isNumber(l.char): // Numeric literal.
l.consumeNumeric()
default:
// Identifier or keyword.
if l.isName(l.char) {
l.consumeIdent()
} else {
l.reportError(fmt.Sprintf("Unidentified character '%s'", string(l.char)))
}
}
l.advance()
return l.token
}
// Move the cursor ahead.
func (l *Lexer) advance() {
rn, err := l.reader.Advance()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
// Don't move the location if it was a
// rewind, or it will report an incorrect
// line and column.
if !l.rewinded |
l.rewinded = false
l.char = rn
}
// Check characters ahead but don't move the cursor.
func (l *Lexer) peek() rune {
rn, err := l.reader.Peek()
if err != nil {
l.reportError(fmt.Sprintf("Invalid '%s' character in source file", string(rn)))
}
return rn
}
// Move the cursor to the previous character.
func (l *Lexer) rewind() {
if err := l.reader.Unread(); err != nil {
l.reportError("Unable to move to the previous character. This is an internal Lexer fail.")
}
l.rewinded = true
}
// Move row and column cursor.
func (l *Lexer) moveLocation() {
switch l.char {
case '\n':
l.row += 1
l.col = 2
default:
l.col += 1
}
}
// Pass a token to the active token cursor.
func (l *Lexer) assignToken(toktype token.TokenType, value string) {
l.token = token.Token{
Type: toktype,
Lexeme: value,
Location: token.Location{Row: l.row, Col: l.col},
}
}
// Check if the character makes for a valid identifier
// or keyword.
func (l *Lexer) isName(char rune) bool {
return (char >= 'a' && char <= 'z') || (char >= 'A' && char <= 'Z') ||
(char >= '0' && char <= '9') || char == '_' || char == '!' || char == '?'
}
// Check if the character is a number.
func (l *Lexer) isNumber(char rune) bool {
return char >= '0' && char <= '9'
}
// Check if the character is an hexadecimal.
func (l *Lexer) isHex(char rune) bool {
return l.isNumber(char) || (char >= 'a' && char <= 'f' || char >= 'A' && char <= 'F')
}
// Check if the character is an octal.
func (l *Lexer) isOctal(char rune) bool {
return char >= '0' && char <= '7'
}
// Check if the character is a binary.
func (l *Lexer) isBinary(char rune) bool {
return char == '0' || char == '1'
}
// Read all valid characters from an identifier or keyword.
func (l *Lexer) readName() string {
var out bytes.Buffer
out.WriteRune(l.char)
// Read until a non-name character is found.
for l.isName(l.peek()) {
l.advance()
out.WriteRune(l.char)
}
return out.String()
}
// Move the cursor until it exhausts whitespace.
func (l *Lexer) consumeWhitespace() {
for l.char == ' ' || l.char == '\t' || l.char == '\r' {
l.advance()
}
}
// Read a string literal.
func (l *Lexer) consumeString() {
var out bytes.Buffer
// Move past the opening double quote.
l.advance()
loop:
for {
switch l.char {
case '\\': // escape characters
l.advance()
switch l.char {
case '"': // \"
out.WriteRune('\\')
out.WriteRune('"')
case '\\': // \\
out.WriteRune('\\')
case 'n', 't', 'r', 'a', 'b', 'f', 'v':
out.WriteRune('\\')
out.WriteRune(l.char)
default:
l.reportError(fmt.Sprintf("Invalid escape character '%s", string(l.char)))
}
case 0:
// String should be closed before the end of file.
l.reportError("Unterminated string")
break loop
case '"': // Closing quote.
break loop
default:
out.WriteRune(l.char)
}
l.advance()
}
l.assignToken(token.STRING, out.String())
}
// Read a numeric literal.
func (l *Lexer) consumeNumeric() {
var out bytes.Buffer
// Write the first character, as we're sure
// it's numeric.
out.WriteRune(l.char)
floatFound := false
scientificFound := false
loop:
for {
l.advance()
switch {
case l.isNumber(l.char):
out.WriteRune(l.char)
case l.char == '_': // Thousands separator is ignored.
case l.char == '.' && l.isNumber(l.peek()): // Float.
floatFound = true
out.WriteRune('.')
case l.char == 'e' && (l.isNumber(l.peek()) || l.peek() == '-'): // Scientific notation.
// Numbers in scientific notation are
// treated as floats for easy of use.
floatFound = true
scientificFound = true
out.WriteRune('e')
case l.char == '-' && scientificFound: // Negative scientific notation.
out.WriteRune('-')
case l.char == '.' && l.peek() == '.': // Range operator.
l.rewind()
break loop
case l.char == 0: // Don't rewind on EOF.
break loop
default:
l.rewind()
break loop
}
}
if floatFound {
l.assignToken(token.FLOAT, out.String())
} else {
l.assignToken(token.INTEGER, out.String())
}
}
// Read a binary, octal or hexadecimal literal.
func (l *Lexer) consumeSpecialInteger(fn func(rune) bool) {
var out bytes.Buffer
out.WriteRune(l.char)
out.WriteRune(l.peek())
// Move past the 'x', 'b' or 'o'.
l.advance()
for fn(l.peek()) {
out.WriteRune(l.peek())
l.advance()
}
ret := out.String()
// A starter like '0x' without other characters
// is not enough to make up an Integer.
if len(ret) == 2 {
l.reportError(fmt.Sprintf("Literal sequence '%s' started but not continued", ret))
}
l.assignToken(token.INTEGER, ret)
}
// Read a single line comment.
func (l *Lexer) consumeComment() {
var out bytes.Buffer
l.advance()
loop:
for {
switch l.char {
case '\n', 0: // Comment ends on a line break or EOF
break loop
case '\r': // Or possibly on a \r\n
l.advance()
switch l.char {
case '\n', 0:
break loop
default:
l.reportError("Unexpected comment line ending")
break loop
}
default:
out.WriteRune(l.char)
}
l.advance()
}
l.assignToken(token.COMMENT, out.String())
}
// Read multiline comment.
func (l *Lexer) consumeMultilineComment() {
var out bytes.Buffer
loop:
for {
l.advance()
switch l.char {
case '*':
switch l.peek() {
case '/': // Multiline comments end with */
l.advance()
break loop
}
case 0: // EOF and yet not comment terminator.
l.reportError("Unterminated multiline comment")
break loop
default:
out.WriteRune(l.char)
}
}
l.assignToken(token.COMMENT, out.String())
}
// Read an identifier or keyword.
func (l *Lexer) consumeIdent() {
ident := l.readName()
// Check the symbol table for a known keyword.
// Otherwise call it an Identifier.
if toktype, found := l.symbol.Lookup(ident); found {
l.assignToken(toktype, ident)
} else {
l.assignToken(token.IDENTIFIER, ident)
}
}
// Report an error in the current location.
func (l *Lexer) reportError(message string) {
reporter.Error(reporter.PARSE, token.Location{l.row, l.col}, message)
}
| {
l.moveLocation()
} | conditional_block |
watcher.go | package consul
import (
"crypto/x509"
"sync"
"time"
slog "github.com/go-eden/slf4go"
"github.com/hashicorp/consul/api"
)
const (
errorWaitTime = 5 * time.Second
)
var log = slog.NewLogger("consul-watcher")
type ConsulConfig struct {
// Address is the address of the Consul server
Address string
// Scheme is the URI scheme for the Consul server
Scheme string
// Datacenter to use. If not provided, the default agent datacenter is used.
Datacenter string
// Transport is the Transport to use for the http client.
//Transport *http.Transport
// HttpClient is the client to use. Default will be
// used if not provided.
//HttpClient *http.Client
// HttpAuth is the auth info to use for http access.
//HttpAuth *HttpBasicAuth
// WaitTime limits how long a Watch will block. If not provided,
// the agent default values will be used.
//WaitTime time.Duration
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
// TokenFile is a file containing the current token to use for this client.
// If provided it is read once at startup and never again.
//TokenFile string
// Namespace is the name of the namespace to send along for the request
// when no other Namespace ispresent in the QueryOptions
Namespace string
//TLSConfig TLSConfig
}
type service struct {
name string
instances []*api.ServiceEntry
intentions []*api.Intention
gatewayService *api.GatewayService
leaf *certLeaf
ready sync.WaitGroup
done bool
}
type certLeaf struct {
Cert []byte
Key []byte
done bool
}
//Watcher struct for TG config
type Watcher struct {
settings api.Config
id string
name string
namespace string
address string
port int
consul *api.Client
token string
C chan Config
lock sync.Mutex
ready sync.WaitGroup
services map[string]*service
certCAs [][]byte
certCAPool *x509.CertPool
leaf *certLeaf
update chan struct{}
}
//New Watcher
func New() *Watcher {
log.Info("creating new Consul watcher")
return &Watcher{
C: make(chan Config),
services: make(map[string]*service),
update: make(chan struct{}, 1),
}
}
func (w *Watcher) Init(c ConsulConfig, gatewayName string, namespace string) error {
var err error
log.Infof("initializing Consul watcher for gateway: %+v", gatewayName)
w.name = gatewayName
w.namespace = namespace
w.settings = *api.DefaultConfig()
w.settings.Address = c.Address
w.settings.Scheme = c.Scheme
w.settings.Token = c.Token
w.settings.Namespace = c.Namespace
w.consul, err = api.NewClient(&w.settings)
if err != nil {
return err
}
return nil
}
//Run Watcher
func (w *Watcher) Run() error {
//Debug
log.Debugf("running watcher for gateway: %s\n", w.name)
w.ready.Add(3)
go w.watchService(w.name, true, "terminating-gateway")
go w.watchGateway()
go w.watchCA()
w.ready.Wait()
for range w.update {
w.C <- w.genCfg()
}
return nil
}
//Reload Configuration
func (w *Watcher) Reload() {
w.C <- w.genCfg()
}
func (w *Watcher) watchLeaf(service string, first bool) {
log.Debugf("watching leaf cert for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
cert, meta, err := w.consul.Agent().ConnectCALeaf(service, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("consul error fetching leaf cert for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("leaf cert for service %s changed, serial: %s, valid before: %s, valid after: %s", service, cert.SerialNumber, cert.ValidBefore, cert.ValidAfter)
w.lock.Lock()
if w.services[service].leaf == nil {
w.services[service].leaf = &certLeaf{}
}
w.services[service].leaf.Cert = []byte(cert.CertPEM)
w.services[service].leaf.Key = []byte(cert.PrivateKeyPEM)
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("leaf cert for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchIntention(service string, first bool) {
log.Debugf("watching intentions for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
intentionList, meta, err := w.consul.Connect().Intentions(&api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
Filter: "DestinationName==" + service,
})
if err != nil {
log.Errorf("consul error fetching intentions for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("intentions for service %s changed", service)
w.lock.Lock()
w.services[service].intentions = intentionList
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("intentions for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchGateway() |
func (w *Watcher) watchService(service string, first bool, kind string) {
log.Infof("watching downstream: %s", service)
dFirst := true
var lastIndex uint64
var nSpace string
for {
if kind != "terminating-gateway" {
if &w.services[service].gatewayService.Service.Namespace != nil {
nSpace = w.services[service].gatewayService.Service.Namespace
}
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
} else {
nSpace = w.namespace
}
srv, meta, err := w.consul.Health().Service(service, "", false, &api.QueryOptions{
WaitIndex: lastIndex,
WaitTime: 10 * time.Minute,
Namespace: nSpace,
})
if err != nil {
log.Errorf("error fetching service %s definition: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Debugf("service %s changed", service)
if len(srv) == 0 {
log.Infof("no service definition found for: %s", service)
continue
} else if len(srv) > 1 && kind == "terminating-gateway" {
log.Errorf("too many service definitions found for: %s", service)
continue
}
w.lock.Lock()
if kind == "terminating-gateway" {
w.id = srv[0].Service.ID
w.name = srv[0].Service.Service
w.address = srv[0].Service.Address
w.port = srv[0].Service.Port
} else {
w.services[service].instances = srv
}
w.lock.Unlock()
if dFirst && kind != "terminating-gateway" {
w.services[service].ready.Wait()
dFirst = false
}
w.notifyChanged()
}
if first {
log.Infof("service config for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) handleProxyChange(first bool, gwServices *[]*api.GatewayService) {
keep := make(map[string]bool)
if gwServices != nil {
for _, down := range *gwServices {
keep[down.Service.Name] = true
w.lock.Lock()
_, ok := w.services[down.Service.Name]
w.lock.Unlock()
if !ok {
if first {
w.ready.Add(3)
}
w.startService(down, first)
}
}
}
for name := range w.services {
if !keep[name] {
w.removeService(name)
}
}
}
func (w *Watcher) startService(down *api.GatewayService, first bool) {
d := &service{
name: down.Service.Name,
gatewayService: down,
}
w.lock.Lock()
w.services[down.Service.Name] = d
w.lock.Unlock()
d.ready.Add(2)
go w.watchService(d.name, first, "")
go w.watchLeaf(d.name, first)
go w.watchIntention(d.name, first)
}
func (w *Watcher) removeService(name string) {
log.Infof("removing downstream for service %s", name)
w.lock.Lock()
w.services[name].done = true
delete(w.services, name)
w.lock.Unlock()
w.notifyChanged()
}
func (w *Watcher) watchCA() {
log.Debugf("watching ca certs")
first := true
var lastIndex uint64
for {
caList, meta, err := w.consul.Agent().ConnectCARoots(&api.QueryOptions{
WaitIndex: lastIndex,
WaitTime: 10 * time.Minute,
})
if err != nil {
log.Errorf("error fetching cas: %s", err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("CA certs changed, active root id: %s", caList.ActiveRootID)
w.lock.Lock()
w.certCAs = w.certCAs[:0]
w.certCAPool = x509.NewCertPool()
for _, ca := range caList.Roots {
w.certCAs = append(w.certCAs, []byte(ca.RootCertPEM))
ok := w.certCAPool.AppendCertsFromPEM([]byte(ca.RootCertPEM))
if !ok {
log.Warn("unable to add CA certificate to pool")
}
}
w.lock.Unlock()
w.notifyChanged()
}
if first {
log.Infof("CA certs ready")
w.ready.Done()
first = false
}
}
}
func (w *Watcher) genCfg() Config {
w.lock.Lock()
defer func() {
w.lock.Unlock()
log.Debugf("done generating configuration")
}()
if len(w.services) == 0 {
return Config{}
}
watcherConfig := Config{
GatewayName: w.name,
GatewayID: w.id,
GatewayAddress: w.address,
GatewayPort: w.port,
CAsPool: w.certCAPool,
CAs: w.certCAs,
}
for _, down := range w.services {
downstream := NewService(down)
downstream.TLS.CAs = w.certCAs
watcherConfig.Services = append(watcherConfig.Services, downstream)
}
return watcherConfig
}
func (w *Watcher) notifyChanged() {
select {
case w.update <- struct{}{}:
default:
}
}
| {
var lastIndex uint64
first := true
for {
gwServices, meta, err := w.consul.Catalog().GatewayServices(w.name, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("error fetching linked services for gateway %s: %s", w.name, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("linked services changed for gateway %s", w.name)
if first && len(gwServices) == 0 {
log.Infof("no linked services defined for gateway %s", w.name)
continue
}
w.handleProxyChange(first, &gwServices)
}
if first {
log.Infof("linked services for %s ready", w.name)
first = false
w.ready.Done()
}
}
} | identifier_body |
watcher.go | package consul
import (
"crypto/x509"
"sync"
"time"
slog "github.com/go-eden/slf4go"
"github.com/hashicorp/consul/api"
)
const (
errorWaitTime = 5 * time.Second
)
var log = slog.NewLogger("consul-watcher")
type ConsulConfig struct {
// Address is the address of the Consul server
Address string
// Scheme is the URI scheme for the Consul server
Scheme string
// Datacenter to use. If not provided, the default agent datacenter is used.
Datacenter string
// Transport is the Transport to use for the http client.
//Transport *http.Transport
// HttpClient is the client to use. Default will be
// used if not provided.
//HttpClient *http.Client
// HttpAuth is the auth info to use for http access.
//HttpAuth *HttpBasicAuth
// WaitTime limits how long a Watch will block. If not provided,
// the agent default values will be used.
//WaitTime time.Duration
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
// TokenFile is a file containing the current token to use for this client.
// If provided it is read once at startup and never again.
//TokenFile string
// Namespace is the name of the namespace to send along for the request
// when no other Namespace ispresent in the QueryOptions
Namespace string
//TLSConfig TLSConfig
}
type service struct {
name string
instances []*api.ServiceEntry
intentions []*api.Intention
gatewayService *api.GatewayService
leaf *certLeaf
ready sync.WaitGroup
done bool
}
type certLeaf struct {
Cert []byte
Key []byte
done bool
}
//Watcher struct for TG config
type Watcher struct {
settings api.Config
id string
name string
namespace string
address string
port int
consul *api.Client
token string
C chan Config
lock sync.Mutex
ready sync.WaitGroup
services map[string]*service
certCAs [][]byte
certCAPool *x509.CertPool
leaf *certLeaf
update chan struct{}
}
//New Watcher
func New() *Watcher {
log.Info("creating new Consul watcher")
return &Watcher{
C: make(chan Config),
services: make(map[string]*service),
update: make(chan struct{}, 1),
}
}
func (w *Watcher) Init(c ConsulConfig, gatewayName string, namespace string) error { |
log.Infof("initializing Consul watcher for gateway: %+v", gatewayName)
w.name = gatewayName
w.namespace = namespace
w.settings = *api.DefaultConfig()
w.settings.Address = c.Address
w.settings.Scheme = c.Scheme
w.settings.Token = c.Token
w.settings.Namespace = c.Namespace
w.consul, err = api.NewClient(&w.settings)
if err != nil {
return err
}
return nil
}
//Run Watcher
func (w *Watcher) Run() error {
//Debug
log.Debugf("running watcher for gateway: %s\n", w.name)
w.ready.Add(3)
go w.watchService(w.name, true, "terminating-gateway")
go w.watchGateway()
go w.watchCA()
w.ready.Wait()
for range w.update {
w.C <- w.genCfg()
}
return nil
}
//Reload Configuration
func (w *Watcher) Reload() {
w.C <- w.genCfg()
}
func (w *Watcher) watchLeaf(service string, first bool) {
log.Debugf("watching leaf cert for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
cert, meta, err := w.consul.Agent().ConnectCALeaf(service, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("consul error fetching leaf cert for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("leaf cert for service %s changed, serial: %s, valid before: %s, valid after: %s", service, cert.SerialNumber, cert.ValidBefore, cert.ValidAfter)
w.lock.Lock()
if w.services[service].leaf == nil {
w.services[service].leaf = &certLeaf{}
}
w.services[service].leaf.Cert = []byte(cert.CertPEM)
w.services[service].leaf.Key = []byte(cert.PrivateKeyPEM)
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("leaf cert for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchIntention(service string, first bool) {
log.Debugf("watching intentions for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
intentionList, meta, err := w.consul.Connect().Intentions(&api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
Filter: "DestinationName==" + service,
})
if err != nil {
log.Errorf("consul error fetching intentions for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("intentions for service %s changed", service)
w.lock.Lock()
w.services[service].intentions = intentionList
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("intentions for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchGateway() {
var lastIndex uint64
first := true
for {
gwServices, meta, err := w.consul.Catalog().GatewayServices(w.name, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("error fetching linked services for gateway %s: %s", w.name, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("linked services changed for gateway %s", w.name)
if first && len(gwServices) == 0 {
log.Infof("no linked services defined for gateway %s", w.name)
continue
}
w.handleProxyChange(first, &gwServices)
}
if first {
log.Infof("linked services for %s ready", w.name)
first = false
w.ready.Done()
}
}
}
func (w *Watcher) watchService(service string, first bool, kind string) {
log.Infof("watching downstream: %s", service)
dFirst := true
var lastIndex uint64
var nSpace string
for {
if kind != "terminating-gateway" {
if &w.services[service].gatewayService.Service.Namespace != nil {
nSpace = w.services[service].gatewayService.Service.Namespace
}
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
} else {
nSpace = w.namespace
}
srv, meta, err := w.consul.Health().Service(service, "", false, &api.QueryOptions{
WaitIndex: lastIndex,
WaitTime: 10 * time.Minute,
Namespace: nSpace,
})
if err != nil {
log.Errorf("error fetching service %s definition: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Debugf("service %s changed", service)
if len(srv) == 0 {
log.Infof("no service definition found for: %s", service)
continue
} else if len(srv) > 1 && kind == "terminating-gateway" {
log.Errorf("too many service definitions found for: %s", service)
continue
}
w.lock.Lock()
if kind == "terminating-gateway" {
w.id = srv[0].Service.ID
w.name = srv[0].Service.Service
w.address = srv[0].Service.Address
w.port = srv[0].Service.Port
} else {
w.services[service].instances = srv
}
w.lock.Unlock()
if dFirst && kind != "terminating-gateway" {
w.services[service].ready.Wait()
dFirst = false
}
w.notifyChanged()
}
if first {
log.Infof("service config for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) handleProxyChange(first bool, gwServices *[]*api.GatewayService) {
keep := make(map[string]bool)
if gwServices != nil {
for _, down := range *gwServices {
keep[down.Service.Name] = true
w.lock.Lock()
_, ok := w.services[down.Service.Name]
w.lock.Unlock()
if !ok {
if first {
w.ready.Add(3)
}
w.startService(down, first)
}
}
}
for name := range w.services {
if !keep[name] {
w.removeService(name)
}
}
}
func (w *Watcher) startService(down *api.GatewayService, first bool) {
d := &service{
name: down.Service.Name,
gatewayService: down,
}
w.lock.Lock()
w.services[down.Service.Name] = d
w.lock.Unlock()
d.ready.Add(2)
go w.watchService(d.name, first, "")
go w.watchLeaf(d.name, first)
go w.watchIntention(d.name, first)
}
func (w *Watcher) removeService(name string) {
log.Infof("removing downstream for service %s", name)
w.lock.Lock()
w.services[name].done = true
delete(w.services, name)
w.lock.Unlock()
w.notifyChanged()
}
func (w *Watcher) watchCA() {
log.Debugf("watching ca certs")
first := true
var lastIndex uint64
for {
caList, meta, err := w.consul.Agent().ConnectCARoots(&api.QueryOptions{
WaitIndex: lastIndex,
WaitTime: 10 * time.Minute,
})
if err != nil {
log.Errorf("error fetching cas: %s", err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("CA certs changed, active root id: %s", caList.ActiveRootID)
w.lock.Lock()
w.certCAs = w.certCAs[:0]
w.certCAPool = x509.NewCertPool()
for _, ca := range caList.Roots {
w.certCAs = append(w.certCAs, []byte(ca.RootCertPEM))
ok := w.certCAPool.AppendCertsFromPEM([]byte(ca.RootCertPEM))
if !ok {
log.Warn("unable to add CA certificate to pool")
}
}
w.lock.Unlock()
w.notifyChanged()
}
if first {
log.Infof("CA certs ready")
w.ready.Done()
first = false
}
}
}
func (w *Watcher) genCfg() Config {
w.lock.Lock()
defer func() {
w.lock.Unlock()
log.Debugf("done generating configuration")
}()
if len(w.services) == 0 {
return Config{}
}
watcherConfig := Config{
GatewayName: w.name,
GatewayID: w.id,
GatewayAddress: w.address,
GatewayPort: w.port,
CAsPool: w.certCAPool,
CAs: w.certCAs,
}
for _, down := range w.services {
downstream := NewService(down)
downstream.TLS.CAs = w.certCAs
watcherConfig.Services = append(watcherConfig.Services, downstream)
}
return watcherConfig
}
func (w *Watcher) notifyChanged() {
select {
case w.update <- struct{}{}:
default:
}
} | var err error | random_line_split |
watcher.go | package consul
import (
"crypto/x509"
"sync"
"time"
slog "github.com/go-eden/slf4go"
"github.com/hashicorp/consul/api"
)
const (
errorWaitTime = 5 * time.Second
)
var log = slog.NewLogger("consul-watcher")
type ConsulConfig struct {
// Address is the address of the Consul server
Address string
// Scheme is the URI scheme for the Consul server
Scheme string
// Datacenter to use. If not provided, the default agent datacenter is used.
Datacenter string
// Transport is the Transport to use for the http client.
//Transport *http.Transport
// HttpClient is the client to use. Default will be
// used if not provided.
//HttpClient *http.Client
// HttpAuth is the auth info to use for http access.
//HttpAuth *HttpBasicAuth
// WaitTime limits how long a Watch will block. If not provided,
// the agent default values will be used.
//WaitTime time.Duration
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
// TokenFile is a file containing the current token to use for this client.
// If provided it is read once at startup and never again.
//TokenFile string
// Namespace is the name of the namespace to send along for the request
// when no other Namespace ispresent in the QueryOptions
Namespace string
//TLSConfig TLSConfig
}
type service struct {
name string
instances []*api.ServiceEntry
intentions []*api.Intention
gatewayService *api.GatewayService
leaf *certLeaf
ready sync.WaitGroup
done bool
}
type certLeaf struct {
Cert []byte
Key []byte
done bool
}
//Watcher struct for TG config
type Watcher struct {
settings api.Config
id string
name string
namespace string
address string
port int
consul *api.Client
token string
C chan Config
lock sync.Mutex
ready sync.WaitGroup
services map[string]*service
certCAs [][]byte
certCAPool *x509.CertPool
leaf *certLeaf
update chan struct{}
}
//New Watcher
func New() *Watcher {
log.Info("creating new Consul watcher")
return &Watcher{
C: make(chan Config),
services: make(map[string]*service),
update: make(chan struct{}, 1),
}
}
func (w *Watcher) Init(c ConsulConfig, gatewayName string, namespace string) error {
var err error
log.Infof("initializing Consul watcher for gateway: %+v", gatewayName)
w.name = gatewayName
w.namespace = namespace
w.settings = *api.DefaultConfig()
w.settings.Address = c.Address
w.settings.Scheme = c.Scheme
w.settings.Token = c.Token
w.settings.Namespace = c.Namespace
w.consul, err = api.NewClient(&w.settings)
if err != nil {
return err
}
return nil
}
//Run Watcher
func (w *Watcher) Run() error {
//Debug
log.Debugf("running watcher for gateway: %s\n", w.name)
w.ready.Add(3)
go w.watchService(w.name, true, "terminating-gateway")
go w.watchGateway()
go w.watchCA()
w.ready.Wait()
for range w.update {
w.C <- w.genCfg()
}
return nil
}
//Reload Configuration
func (w *Watcher) Reload() {
w.C <- w.genCfg()
}
func (w *Watcher) watchLeaf(service string, first bool) {
log.Debugf("watching leaf cert for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
cert, meta, err := w.consul.Agent().ConnectCALeaf(service, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("consul error fetching leaf cert for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("leaf cert for service %s changed, serial: %s, valid before: %s, valid after: %s", service, cert.SerialNumber, cert.ValidBefore, cert.ValidAfter)
w.lock.Lock()
if w.services[service].leaf == nil {
w.services[service].leaf = &certLeaf{}
}
w.services[service].leaf.Cert = []byte(cert.CertPEM)
w.services[service].leaf.Key = []byte(cert.PrivateKeyPEM)
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("leaf cert for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchIntention(service string, first bool) {
log.Debugf("watching intentions for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
intentionList, meta, err := w.consul.Connect().Intentions(&api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
Filter: "DestinationName==" + service,
})
if err != nil {
log.Errorf("consul error fetching intentions for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("intentions for service %s changed", service)
w.lock.Lock()
w.services[service].intentions = intentionList
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("intentions for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchGateway() {
var lastIndex uint64
first := true
for {
gwServices, meta, err := w.consul.Catalog().GatewayServices(w.name, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("error fetching linked services for gateway %s: %s", w.name, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("linked services changed for gateway %s", w.name)
if first && len(gwServices) == 0 {
log.Infof("no linked services defined for gateway %s", w.name)
continue
}
w.handleProxyChange(first, &gwServices)
}
if first {
log.Infof("linked services for %s ready", w.name)
first = false
w.ready.Done()
}
}
}
func (w *Watcher) watchService(service string, first bool, kind string) {
log.Infof("watching downstream: %s", service)
dFirst := true
var lastIndex uint64
var nSpace string
for {
if kind != "terminating-gateway" {
if &w.services[service].gatewayService.Service.Namespace != nil {
nSpace = w.services[service].gatewayService.Service.Namespace
}
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
} else {
nSpace = w.namespace
}
srv, meta, err := w.consul.Health().Service(service, "", false, &api.QueryOptions{
WaitIndex: lastIndex,
WaitTime: 10 * time.Minute,
Namespace: nSpace,
})
if err != nil {
log.Errorf("error fetching service %s definition: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Debugf("service %s changed", service)
if len(srv) == 0 {
log.Infof("no service definition found for: %s", service)
continue
} else if len(srv) > 1 && kind == "terminating-gateway" {
log.Errorf("too many service definitions found for: %s", service)
continue
}
w.lock.Lock()
if kind == "terminating-gateway" {
w.id = srv[0].Service.ID
w.name = srv[0].Service.Service
w.address = srv[0].Service.Address
w.port = srv[0].Service.Port
} else {
w.services[service].instances = srv
}
w.lock.Unlock()
if dFirst && kind != "terminating-gateway" {
w.services[service].ready.Wait()
dFirst = false
}
w.notifyChanged()
}
if first {
log.Infof("service config for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) handleProxyChange(first bool, gwServices *[]*api.GatewayService) {
keep := make(map[string]bool)
if gwServices != nil |
for name := range w.services {
if !keep[name] {
w.removeService(name)
}
}
}
func (w *Watcher) startService(down *api.GatewayService, first bool) {
d := &service{
name: down.Service.Name,
gatewayService: down,
}
w.lock.Lock()
w.services[down.Service.Name] = d
w.lock.Unlock()
d.ready.Add(2)
go w.watchService(d.name, first, "")
go w.watchLeaf(d.name, first)
go w.watchIntention(d.name, first)
}
func (w *Watcher) removeService(name string) {
log.Infof("removing downstream for service %s", name)
w.lock.Lock()
w.services[name].done = true
delete(w.services, name)
w.lock.Unlock()
w.notifyChanged()
}
func (w *Watcher) watchCA() {
log.Debugf("watching ca certs")
first := true
var lastIndex uint64
for {
caList, meta, err := w.consul.Agent().ConnectCARoots(&api.QueryOptions{
WaitIndex: lastIndex,
WaitTime: 10 * time.Minute,
})
if err != nil {
log.Errorf("error fetching cas: %s", err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("CA certs changed, active root id: %s", caList.ActiveRootID)
w.lock.Lock()
w.certCAs = w.certCAs[:0]
w.certCAPool = x509.NewCertPool()
for _, ca := range caList.Roots {
w.certCAs = append(w.certCAs, []byte(ca.RootCertPEM))
ok := w.certCAPool.AppendCertsFromPEM([]byte(ca.RootCertPEM))
if !ok {
log.Warn("unable to add CA certificate to pool")
}
}
w.lock.Unlock()
w.notifyChanged()
}
if first {
log.Infof("CA certs ready")
w.ready.Done()
first = false
}
}
}
func (w *Watcher) genCfg() Config {
w.lock.Lock()
defer func() {
w.lock.Unlock()
log.Debugf("done generating configuration")
}()
if len(w.services) == 0 {
return Config{}
}
watcherConfig := Config{
GatewayName: w.name,
GatewayID: w.id,
GatewayAddress: w.address,
GatewayPort: w.port,
CAsPool: w.certCAPool,
CAs: w.certCAs,
}
for _, down := range w.services {
downstream := NewService(down)
downstream.TLS.CAs = w.certCAs
watcherConfig.Services = append(watcherConfig.Services, downstream)
}
return watcherConfig
}
func (w *Watcher) notifyChanged() {
select {
case w.update <- struct{}{}:
default:
}
}
| {
for _, down := range *gwServices {
keep[down.Service.Name] = true
w.lock.Lock()
_, ok := w.services[down.Service.Name]
w.lock.Unlock()
if !ok {
if first {
w.ready.Add(3)
}
w.startService(down, first)
}
}
} | conditional_block |
watcher.go | package consul
import (
"crypto/x509"
"sync"
"time"
slog "github.com/go-eden/slf4go"
"github.com/hashicorp/consul/api"
)
const (
errorWaitTime = 5 * time.Second
)
var log = slog.NewLogger("consul-watcher")
type ConsulConfig struct {
// Address is the address of the Consul server
Address string
// Scheme is the URI scheme for the Consul server
Scheme string
// Datacenter to use. If not provided, the default agent datacenter is used.
Datacenter string
// Transport is the Transport to use for the http client.
//Transport *http.Transport
// HttpClient is the client to use. Default will be
// used if not provided.
//HttpClient *http.Client
// HttpAuth is the auth info to use for http access.
//HttpAuth *HttpBasicAuth
// WaitTime limits how long a Watch will block. If not provided,
// the agent default values will be used.
//WaitTime time.Duration
// Token is used to provide a per-request ACL token
// which overrides the agent's default token.
Token string
// TokenFile is a file containing the current token to use for this client.
// If provided it is read once at startup and never again.
//TokenFile string
// Namespace is the name of the namespace to send along for the request
// when no other Namespace ispresent in the QueryOptions
Namespace string
//TLSConfig TLSConfig
}
type service struct {
name string
instances []*api.ServiceEntry
intentions []*api.Intention
gatewayService *api.GatewayService
leaf *certLeaf
ready sync.WaitGroup
done bool
}
type certLeaf struct {
Cert []byte
Key []byte
done bool
}
//Watcher struct for TG config
type Watcher struct {
settings api.Config
id string
name string
namespace string
address string
port int
consul *api.Client
token string
C chan Config
lock sync.Mutex
ready sync.WaitGroup
services map[string]*service
certCAs [][]byte
certCAPool *x509.CertPool
leaf *certLeaf
update chan struct{}
}
//New Watcher
func New() *Watcher {
log.Info("creating new Consul watcher")
return &Watcher{
C: make(chan Config),
services: make(map[string]*service),
update: make(chan struct{}, 1),
}
}
func (w *Watcher) Init(c ConsulConfig, gatewayName string, namespace string) error {
var err error
log.Infof("initializing Consul watcher for gateway: %+v", gatewayName)
w.name = gatewayName
w.namespace = namespace
w.settings = *api.DefaultConfig()
w.settings.Address = c.Address
w.settings.Scheme = c.Scheme
w.settings.Token = c.Token
w.settings.Namespace = c.Namespace
w.consul, err = api.NewClient(&w.settings)
if err != nil {
return err
}
return nil
}
//Run Watcher
func (w *Watcher) Run() error {
//Debug
log.Debugf("running watcher for gateway: %s\n", w.name)
w.ready.Add(3)
go w.watchService(w.name, true, "terminating-gateway")
go w.watchGateway()
go w.watchCA()
w.ready.Wait()
for range w.update {
w.C <- w.genCfg()
}
return nil
}
//Reload Configuration
func (w *Watcher) | () {
w.C <- w.genCfg()
}
func (w *Watcher) watchLeaf(service string, first bool) {
log.Debugf("watching leaf cert for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
cert, meta, err := w.consul.Agent().ConnectCALeaf(service, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("consul error fetching leaf cert for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("leaf cert for service %s changed, serial: %s, valid before: %s, valid after: %s", service, cert.SerialNumber, cert.ValidBefore, cert.ValidAfter)
w.lock.Lock()
if w.services[service].leaf == nil {
w.services[service].leaf = &certLeaf{}
}
w.services[service].leaf.Cert = []byte(cert.CertPEM)
w.services[service].leaf.Key = []byte(cert.PrivateKeyPEM)
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("leaf cert for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchIntention(service string, first bool) {
log.Debugf("watching intentions for %s", service)
dFirst := true
var lastIndex uint64
for {
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
intentionList, meta, err := w.consul.Connect().Intentions(&api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
Filter: "DestinationName==" + service,
})
if err != nil {
log.Errorf("consul error fetching intentions for service %s: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("intentions for service %s changed", service)
w.lock.Lock()
w.services[service].intentions = intentionList
w.lock.Unlock()
if dFirst {
w.services[service].ready.Done()
dFirst = false
} else {
w.notifyChanged()
}
}
if first {
log.Infof("intentions for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) watchGateway() {
var lastIndex uint64
first := true
for {
gwServices, meta, err := w.consul.Catalog().GatewayServices(w.name, &api.QueryOptions{
WaitTime: 10 * time.Minute,
WaitIndex: lastIndex,
})
if err != nil {
log.Errorf("error fetching linked services for gateway %s: %s", w.name, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("linked services changed for gateway %s", w.name)
if first && len(gwServices) == 0 {
log.Infof("no linked services defined for gateway %s", w.name)
continue
}
w.handleProxyChange(first, &gwServices)
}
if first {
log.Infof("linked services for %s ready", w.name)
first = false
w.ready.Done()
}
}
}
func (w *Watcher) watchService(service string, first bool, kind string) {
log.Infof("watching downstream: %s", service)
dFirst := true
var lastIndex uint64
var nSpace string
for {
if kind != "terminating-gateway" {
if &w.services[service].gatewayService.Service.Namespace != nil {
nSpace = w.services[service].gatewayService.Service.Namespace
}
if w.services[service] == nil {
return
} else if w.services[service].done {
return
}
} else {
nSpace = w.namespace
}
srv, meta, err := w.consul.Health().Service(service, "", false, &api.QueryOptions{
WaitIndex: lastIndex,
WaitTime: 10 * time.Minute,
Namespace: nSpace,
})
if err != nil {
log.Errorf("error fetching service %s definition: %s", service, err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Debugf("service %s changed", service)
if len(srv) == 0 {
log.Infof("no service definition found for: %s", service)
continue
} else if len(srv) > 1 && kind == "terminating-gateway" {
log.Errorf("too many service definitions found for: %s", service)
continue
}
w.lock.Lock()
if kind == "terminating-gateway" {
w.id = srv[0].Service.ID
w.name = srv[0].Service.Service
w.address = srv[0].Service.Address
w.port = srv[0].Service.Port
} else {
w.services[service].instances = srv
}
w.lock.Unlock()
if dFirst && kind != "terminating-gateway" {
w.services[service].ready.Wait()
dFirst = false
}
w.notifyChanged()
}
if first {
log.Infof("service config for %s ready", service)
w.ready.Done()
first = false
}
}
}
func (w *Watcher) handleProxyChange(first bool, gwServices *[]*api.GatewayService) {
keep := make(map[string]bool)
if gwServices != nil {
for _, down := range *gwServices {
keep[down.Service.Name] = true
w.lock.Lock()
_, ok := w.services[down.Service.Name]
w.lock.Unlock()
if !ok {
if first {
w.ready.Add(3)
}
w.startService(down, first)
}
}
}
for name := range w.services {
if !keep[name] {
w.removeService(name)
}
}
}
func (w *Watcher) startService(down *api.GatewayService, first bool) {
d := &service{
name: down.Service.Name,
gatewayService: down,
}
w.lock.Lock()
w.services[down.Service.Name] = d
w.lock.Unlock()
d.ready.Add(2)
go w.watchService(d.name, first, "")
go w.watchLeaf(d.name, first)
go w.watchIntention(d.name, first)
}
func (w *Watcher) removeService(name string) {
log.Infof("removing downstream for service %s", name)
w.lock.Lock()
w.services[name].done = true
delete(w.services, name)
w.lock.Unlock()
w.notifyChanged()
}
func (w *Watcher) watchCA() {
log.Debugf("watching ca certs")
first := true
var lastIndex uint64
for {
caList, meta, err := w.consul.Agent().ConnectCARoots(&api.QueryOptions{
WaitIndex: lastIndex,
WaitTime: 10 * time.Minute,
})
if err != nil {
log.Errorf("error fetching cas: %s", err)
time.Sleep(errorWaitTime)
if meta != nil {
if meta.LastIndex < lastIndex || meta.LastIndex < 1 {
lastIndex = 0
}
}
continue
}
changed := lastIndex != meta.LastIndex
lastIndex = meta.LastIndex
if changed {
log.Infof("CA certs changed, active root id: %s", caList.ActiveRootID)
w.lock.Lock()
w.certCAs = w.certCAs[:0]
w.certCAPool = x509.NewCertPool()
for _, ca := range caList.Roots {
w.certCAs = append(w.certCAs, []byte(ca.RootCertPEM))
ok := w.certCAPool.AppendCertsFromPEM([]byte(ca.RootCertPEM))
if !ok {
log.Warn("unable to add CA certificate to pool")
}
}
w.lock.Unlock()
w.notifyChanged()
}
if first {
log.Infof("CA certs ready")
w.ready.Done()
first = false
}
}
}
func (w *Watcher) genCfg() Config {
w.lock.Lock()
defer func() {
w.lock.Unlock()
log.Debugf("done generating configuration")
}()
if len(w.services) == 0 {
return Config{}
}
watcherConfig := Config{
GatewayName: w.name,
GatewayID: w.id,
GatewayAddress: w.address,
GatewayPort: w.port,
CAsPool: w.certCAPool,
CAs: w.certCAs,
}
for _, down := range w.services {
downstream := NewService(down)
downstream.TLS.CAs = w.certCAs
watcherConfig.Services = append(watcherConfig.Services, downstream)
}
return watcherConfig
}
func (w *Watcher) notifyChanged() {
select {
case w.update <- struct{}{}:
default:
}
}
| Reload | identifier_name |
practice.js | // -- PRELOAD --
// Note: Waiting for init() call
var preload = new createjs.LoadQueue();
preload.on("progress", handleOverallProgress, this);
preload.on("complete", handleComplete, this);
var manifest = [
{src: 'img/practice_bg.png', id: 'bg'},
{src: 'img/life.png', id: 'life'},
{src: 'img/no_life.png', id: 'nolife'},
{src: 'img/answer.png', id: 'ans'},
{src: 'img/button_back.png', id: 'back'}
];
function handleOverallProgress(event) {
var progressPercent = (preload.progress * 100).toFixed(2) + "%";
$("#loading-indicator").text(progressPercent);
}
function handleComplete(event) {
console.log("All files loaded");
$("#loading-div").hide();
initGame();
}
// -- END PRELOAD --
// EaselJS
var canvas, stage;
var backButton;
// Game Info
var OPERATORS = ["+", "-", "x", "/"];
var questions = [];
var answers = [];
var currentAnswer; // Note: This is an object, to access the answer value use currentAnswer.answer
var correct = 0;
var incorrect = 0;
var correctIndicator, incorrectIndicator;
// Difficulty
var DIFFICULTY_COUNT = 20; // Every (20) corrects progresses
var difficultyController = new DifficultyController(); // Controls the difficulty
var termRange = {min: 2, max: 2}; // Only supports 2-3 terms
var operatorRange = { min: 0, max: 1}; // 0 = +, 1 = -, 2 = x, 3 = /
// Layers
var overlayLayer = new createjs.Container();
var foregroundLayer = new createjs.Container();
var midLayer = new createjs.Container(); // Only contains questions
var backgroundLayer = new createjs.Container();
// DisplayObjects
var scoreDisplay;
var timerDisplay;
var livesDisplay;
// Audio
var sfxEnabled, bgmEnabled;
// -- INITIALIZATION --
// Sets up the canvas, stage, and preloads all the assets.
function init() {
// Initialize all base variables and preload assets. Once assets are loaded it will call init.
// Canvas info
canvas = document.getElementById("canvas");
fullScreenCanvas(canvas); // Sets width and height to fill screen
// Stage info
stage = new createjs.Stage(canvas); // Creates a EaselJS Stage for drawing
stage.addChild(backgroundLayer, midLayer, foregroundLayer, overlayLayer); // Add layers
// Detection
createjs.Touch.enable(stage);
// Initialize global variables for layout and sizing
initializeVariables(canvas.width, canvas.height);
// Preload all assets (crucial for first rendering)
preload.loadManifest(manifest);
}
function initGame() {
// Audio:
sfxEnabled = (localStorage.getItem("sfx-muted") == "true") ? false : true;
bgmEnabled = (localStorage.getItem("bgm-muted") == "true") ? false : true;
createjs.Sound.initializeDefaultPlugins();
var audiopath ="sound/";
var sounds = [
{src:"game_bgm.wav", id:"bg_music"},
{src:"hitsound1.wav", id:"correct"},
{src:"miss.wav", id:"incorrect"},
{src:"failsound.mp3", id:"gameover"},
{src:"achievement.mp3", id:"achieved"}
];
createjs.Sound.addEventListener("fileload", bgm); // Will call bgm() when loaded
createjs.Sound.registerSounds(sounds, audiopath);
// Initialization:
// Background
var bgImage = preload.getResult("bg");
var background = new createjs.Bitmap(bgImage);
setScaleFactor(background, canvas.width, canvas.height);
backgroundLayer.addChild(background);
// Indicator stuff
backButton = foregroundLayer.addChild(new Button('back'));
correctIndicator = foregroundLayer.addChild(new CorrectIndicator());
incorrectIndicator = foregroundLayer.addChild(new IncorrectIndicator());
// Answers and questions (in this order)
initializeAnswers();
initializeQuestions();
updateCurrentAnswer();
// Initial positions and sizing
initializeAnswerPositions();
initializeQuestionPositions();
// Looper
createjs.Ticker.setFPS(60);
// Handles all the update logic
createjs.Ticker.on("tick", handleTick);
// Achievements
// No condition
checkAchievement(achieve.YOURTRAININGBEGINS_KEY, achieve.YOURTRAININGBEGINS_SRC);
}
// -- HANDLERS --
function handleTick(event) {
if (!event.paused) {
// Render
stage.update();
}
}
// -- METHODS --
// INITIALIZERS
function initializeAnswers() {
for (i = 0; i < 5; i++) {
var nextAnswer = generateNextAnswer();
nextAnswer.index = i; // We need the index so we can replace them properly
answers.push(nextAnswer);
}
}
function initializeQuestions() {
for (i = 0; i < 3; i++) {
questions.push(generateNextQuestion());
}
}
function initializeQuestionPositions() |
function initializeAnswerPositions() {
for (a = 0; a < 5; a++) {
// x and y of the CENTER of the container. (not top left)
answers[a].x = (properties.ANS_SIZE / 2) + (a)*(properties.ANS_SIZE);
console.log("Ans x: " + answers[a].x + " y: " + answers[a].y);
}
}
// AUDIO
function bgm(event){
console.log("Audio loaded");
if(bgmEnabled){
var instance = createjs.Sound.play("bg_music", { loop: -1 });
}
}
function correctSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("correct"); }
}
function incorrectSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("incorrect"); }
}
function gameoverSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("gameover"); }
}
function buttonSound() {
if (sfxEnabled) { var sound = new Audio("buttonSound"); }
}
// GAME LOGIC
// Creates the next answer
function generateNextAnswer() {
var randInt;
// Loop until there is no overlap
outer:
while (true) {
// Get the new value
randInt = getRandomInt(1, 20);
// Check if it exists already
for (j = 0; j < answers.length; j++) {
if (answers[j].answer == randInt) {
continue outer; // Yes - retry
}
}
// No - return the value
break;
}
// Create the next answer
// Finalize setup
var nextAnswer = foregroundLayer.addChild(new Answer(randInt)); // Create the actual object
if (currentAnswer != null) {
nextAnswer.index = currentAnswer.index; // Pass on parent index
nextAnswer.x = currentAnswer.x; // Pass on parent position
}
// Return the displayObject
return nextAnswer;
}
// Gathers are all the necessary info before generating the next answer
function prepareNextQuestion() {
// Obtain information about the current board
var availableArray = [];
// Note: foreach loop not working very well
for (a=0; a<answers.length; a++) {
if (answers[a].available == true) {
availableArray.push(answers[a]);
}
}
// Select one of the avaiable numbers
var randAvailable = availableArray[getRandomInt(0, availableArray.length - 1)];
// Toggle availibility off
randAvailable.available = false;
// Retrieve the answer
return randAvailable.answer;
}
function generateNextQuestion() {
// Init the question
var question;
// Retrieve the next answer
var answer = prepareNextQuestion();
// Generate a 2 or 3 term question
var randTerm = getRandomInt(termRange["min"], termRange["max"]);
// Generating a 2 term question
if (randTerm == 2) {
// Initialize the pieces (operands, operators, answer)
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Calculate the pieces
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
numSet = generateSet(answer, operatorSet[0]);
// Assemble all the pieces
fullSet[0] = numSet;
fullSet[1] = operatorSet;
fullSet[2] = answer;
// Create the question
question = new Question(fullSet);
// Generating a 3 term question
} else {
// Init
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Set up (random) operators
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
operatorSet[1] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
// Begin generation logic
var operatorCode = operatorSet[0] + operatorSet[1];
switch (operatorCode) {
// Calculate left to right (normal)
case "++":
case "+-":
case "+x":
case "+/":
case "-x":
case "-/":
case "xx":
case "x/":
var numSetL = generateSet(answer, operatorSet[0]); // #1 OP #2 OP ?
// take middle operator and expand // #1 OP (#2) OP ?
var numSetR = generateSet(numSetL[1], operatorSet[1]); // #1 OP #3 OP #4
// Assemble numSet, nsl[0] OP nsr[0] OP nsr[1]
numSet = [numSetL[0], numSetR[0], numSetR[1]];
break;
// Calculate right to left (reversed)
case "-+":
case "--":
case "x+":
case "x-":
case "/+":
case "/-":
case "/x":
case "//":
// Calculate right to left
var numSetR = generateSet(answer, operatorSet[1]); // ? OP #1 OP #2
// take middle operator and expand // ? OP (#1) OP #2
var numSetL = generateSet(numSetR[0], operatorSet[0]); // #3 OP #4 OP #2
// Assemble, nsl[0] +- nsl[1] x/ nsr[1];
numSet = [numSetL[0], numSetL[1], numSetR[1]];
break;
}
// Assemble numbers and operators
fullSet[0] = numSet;
fullSet[1] = operatorSet;
fullSet[2] = answer; // From above
// Create the question with the set
question = new Question(fullSet);
}
// Return the display object
return midLayer.addChild(question);
}
// Returns an array of numbers, and an array of operators (for the question).
function generateSet(answer, operator) {
switch (operator) {
case "+":
return generateSum(answer);
break;
case "-":
return generateMinus(answer);
break;
case "x":
return generateMultiplication(answer);
break;
case "/":
return generateDivision(answer);
break;
default:
console.log("something went wrong with generateNextQuestion()");
break;
}
}
function generateSum(answer) {
var numA = getRandomInt(0, answer);
var numB = answer - numA;
var numSet = [numA, numB]; // [[nums][operators]answer];
return numSet;
}
function generateMinus(answer) {
// TODO: The difference will always be from 1-20, might want to base it off the answer it self
var numA = getRandomInt(answer, answer + 20);
var numB = numA - answer;
var numSet = [numA, numB];
return numSet;
}
function generateMultiplication(answer) {
do {
var numA = getRandomInt(1,10);
} while (answer%numA != 0)
var numB = answer / numA;
var numSet = [numA, numB];
return numSet;
}
function generateDivision(answer) {
var numA = getRandomInt(1, 10);
var numB = answer * numA;
var numSet = [numB, numA];
return numSet;
}
// Move all objects up one position (overwritting the first)
function advanceRows(newQuestion) {
console.log("advanceRows()");
// Animations: (Individually animate each one)
// Bottom question
questions[0].animateGone();
// 2nd question
questions[1].animate1stPosition();
// 3rd question
questions[2].animate2ndPosition();
// New question
newQuestion.animate3rdPosition();
// Advance the questions internally
questions[0] = questions[1];
questions[1] = questions[2];
questions[2] = newQuestion
}
function advanceAnswers(nextAnswer) {
// Animations:
// Current answer
currentAnswer.animateGone();
// Next answer
nextAnswer.animateNew();
// Advance (replace) the answer internally
answers[nextAnswer.index] = nextAnswer; // Replace parent
}
// ANSWER CHECKING
function checkAnswer(answer) {
return (answer == questions[0].answer);
}
function answerCorrect() {
// GAME-LOGIC
correct++;
correctIndicator.txt.text = correct;
updateDifficulty();
// Play sound
correctSfx();
// GAME-FUNCTIONS
advanceAnswers(generateNextAnswer()); // Create the next answer, animate, and setup
advanceRows(generateNextQuestion()); // Create the next question, animate, and setup
updateCurrentAnswer();
}
function answerIncorrect() {
// GAME-LOGIC
incorrect++;
incorrectIndicator.txt.text = incorrect;
// Play sound
incorrectSfx();
// GAME-FUNCTIONS
advanceAnswers(generateNextAnswer()); // Create the next answer, animate, and setup
advanceRows(generateNextQuestion()); // Create the next question, animate, and setup
updateCurrentAnswer();
}
// Sets the currentAnswer to the answer object for the bottom most question.
function updateCurrentAnswer() {
for (a = 0; a < answers.length; a++) {
if (checkAnswer(answers[a].answer)) {
currentAnswer = answers[a];
}
}
}
// DIFFICULTY
// Cycles difficulty
function updateDifficulty() {
difficultyController.currentCount++;
if (difficultyController.currentCount >= DIFFICULTY_COUNT) {
difficultyController.currentCount = 0;
difficultyController.nextDifficulty();
}
}
// Object that holds info about difficulty and can control it
function DifficultyController() {
this.currentCount = 0;
// [[term min, term max, op min, op max], ...]
this.difficulties = [
[2, 2, 0, 1],
[2, 2, 0, 3],
[2, 3, 0, 1],
[2, 3, 0, 3],
[3, 3, 0, 3]
]
this.index = 0;
this.nextDifficulty = function() {
// Next difficulty
if (this.index == this.difficulties.length - 1) {
this.index = 0;
} else {
this.index++;
}
// Load difficulty
termRange.min = this.difficulties[this.index][0];
termRange.max = this.difficulties[this.index][1];
operatorRange.min = this.difficulties[this.index][2];
operatorRange.max = this.difficulties[this.index][3];
}
}
// Checks if achievement is unlocked, and creates it if it can
function checkAchievement(key, imageSource) {
// Not unlocked yet, unlock now!
if (localStorage.getItem(key) != "true") {
// Prep the image
var imageFile = new Image();
imageFile.src = imageSource
// Wait until done loading
imageFile.onload = function() {
var achievement = overlayLayer.addChild(new Achievement(imageFile));
achievement.animateAchievement();
if (sfxEnabled) { var instance = createjs.Sound.play("achieved"); }
}
// Unlock the achievement
localStorage.setItem(key, "true");
}
}
| {
for (q=0; q<3; q++) {
switch (q) {
case 0:
questions[q].y = layout.MID3; // Lowest
questions[q].scaleY = 1.66;
questions[q].txt.scaleY = 1.00;
questions[q].txt.scaleX = 1.66;
break;
case 1:
questions[q].y = layout.MID2;
questions[q].txt.scaleX = questions[q].txt.scaleY = 1.00;
break;
case 2:
questions[q].y = layout.MID1; // Most upper
break;
default:
console.log("Something went wrong with loadQuestions()");
break;
}
console.log("Ques x: " + questions[q].x + " y: " + questions[q].y );
}
} | identifier_body |
practice.js | // -- PRELOAD --
// Note: Waiting for init() call
var preload = new createjs.LoadQueue();
preload.on("progress", handleOverallProgress, this);
preload.on("complete", handleComplete, this);
var manifest = [
{src: 'img/practice_bg.png', id: 'bg'},
{src: 'img/life.png', id: 'life'},
{src: 'img/no_life.png', id: 'nolife'},
{src: 'img/answer.png', id: 'ans'},
{src: 'img/button_back.png', id: 'back'}
];
function handleOverallProgress(event) {
var progressPercent = (preload.progress * 100).toFixed(2) + "%";
$("#loading-indicator").text(progressPercent);
}
function handleComplete(event) {
console.log("All files loaded");
$("#loading-div").hide();
initGame();
}
// -- END PRELOAD --
// EaselJS
var canvas, stage;
var backButton;
// Game Info
var OPERATORS = ["+", "-", "x", "/"];
var questions = [];
var answers = [];
var currentAnswer; // Note: This is an object, to access the answer value use currentAnswer.answer
var correct = 0;
var incorrect = 0;
var correctIndicator, incorrectIndicator;
// Difficulty
var DIFFICULTY_COUNT = 20; // Every (20) corrects progresses
var difficultyController = new DifficultyController(); // Controls the difficulty
var termRange = {min: 2, max: 2}; // Only supports 2-3 terms
var operatorRange = { min: 0, max: 1}; // 0 = +, 1 = -, 2 = x, 3 = /
// Layers
var overlayLayer = new createjs.Container();
var foregroundLayer = new createjs.Container();
var midLayer = new createjs.Container(); // Only contains questions
var backgroundLayer = new createjs.Container();
// DisplayObjects
var scoreDisplay;
var timerDisplay;
var livesDisplay;
// Audio
var sfxEnabled, bgmEnabled;
// -- INITIALIZATION --
// Sets up the canvas, stage, and preloads all the assets.
function init() {
// Initialize all base variables and preload assets. Once assets are loaded it will call init.
// Canvas info
canvas = document.getElementById("canvas");
fullScreenCanvas(canvas); // Sets width and height to fill screen
// Stage info
stage = new createjs.Stage(canvas); // Creates a EaselJS Stage for drawing
stage.addChild(backgroundLayer, midLayer, foregroundLayer, overlayLayer); // Add layers
// Detection
createjs.Touch.enable(stage);
// Initialize global variables for layout and sizing
initializeVariables(canvas.width, canvas.height);
// Preload all assets (crucial for first rendering)
preload.loadManifest(manifest);
}
function initGame() {
// Audio:
sfxEnabled = (localStorage.getItem("sfx-muted") == "true") ? false : true;
bgmEnabled = (localStorage.getItem("bgm-muted") == "true") ? false : true;
createjs.Sound.initializeDefaultPlugins();
var audiopath ="sound/";
var sounds = [
{src:"game_bgm.wav", id:"bg_music"},
{src:"hitsound1.wav", id:"correct"},
{src:"miss.wav", id:"incorrect"},
{src:"failsound.mp3", id:"gameover"},
{src:"achievement.mp3", id:"achieved"}
];
createjs.Sound.addEventListener("fileload", bgm); // Will call bgm() when loaded
createjs.Sound.registerSounds(sounds, audiopath);
// Initialization:
// Background
var bgImage = preload.getResult("bg");
var background = new createjs.Bitmap(bgImage);
setScaleFactor(background, canvas.width, canvas.height);
backgroundLayer.addChild(background);
// Indicator stuff
backButton = foregroundLayer.addChild(new Button('back'));
correctIndicator = foregroundLayer.addChild(new CorrectIndicator());
incorrectIndicator = foregroundLayer.addChild(new IncorrectIndicator());
// Answers and questions (in this order)
initializeAnswers();
initializeQuestions();
updateCurrentAnswer();
// Initial positions and sizing
initializeAnswerPositions();
initializeQuestionPositions();
// Looper
createjs.Ticker.setFPS(60);
// Handles all the update logic
createjs.Ticker.on("tick", handleTick);
// Achievements
// No condition
checkAchievement(achieve.YOURTRAININGBEGINS_KEY, achieve.YOURTRAININGBEGINS_SRC);
}
// -- HANDLERS --
function handleTick(event) {
if (!event.paused) {
// Render
stage.update();
}
}
// -- METHODS --
// INITIALIZERS
function initializeAnswers() {
for (i = 0; i < 5; i++) {
var nextAnswer = generateNextAnswer();
nextAnswer.index = i; // We need the index so we can replace them properly
answers.push(nextAnswer);
}
}
function initializeQuestions() {
for (i = 0; i < 3; i++) {
questions.push(generateNextQuestion());
}
}
function initializeQuestionPositions() {
for (q=0; q<3; q++) {
switch (q) {
case 0:
questions[q].y = layout.MID3; // Lowest
questions[q].scaleY = 1.66;
questions[q].txt.scaleY = 1.00;
questions[q].txt.scaleX = 1.66;
break;
case 1:
questions[q].y = layout.MID2;
questions[q].txt.scaleX = questions[q].txt.scaleY = 1.00;
break;
case 2:
questions[q].y = layout.MID1; // Most upper
break;
default:
console.log("Something went wrong with loadQuestions()");
break;
}
console.log("Ques x: " + questions[q].x + " y: " + questions[q].y );
}
}
function initializeAnswerPositions() {
for (a = 0; a < 5; a++) {
// x and y of the CENTER of the container. (not top left)
answers[a].x = (properties.ANS_SIZE / 2) + (a)*(properties.ANS_SIZE);
console.log("Ans x: " + answers[a].x + " y: " + answers[a].y);
}
}
// AUDIO
function bgm(event){
console.log("Audio loaded");
if(bgmEnabled){
var instance = createjs.Sound.play("bg_music", { loop: -1 });
}
}
function correctSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("correct"); }
}
function incorrectSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("incorrect"); }
}
function gameoverSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("gameover"); }
}
function buttonSound() {
if (sfxEnabled) { var sound = new Audio("buttonSound"); }
}
// GAME LOGIC
// Creates the next answer
function generateNextAnswer() {
var randInt;
// Loop until there is no overlap
outer:
while (true) {
// Get the new value
randInt = getRandomInt(1, 20);
// Check if it exists already
for (j = 0; j < answers.length; j++) {
if (answers[j].answer == randInt) {
continue outer; // Yes - retry
}
}
// No - return the value
break;
}
// Create the next answer
// Finalize setup
var nextAnswer = foregroundLayer.addChild(new Answer(randInt)); // Create the actual object
if (currentAnswer != null) {
nextAnswer.index = currentAnswer.index; // Pass on parent index
nextAnswer.x = currentAnswer.x; // Pass on parent position
}
// Return the displayObject
return nextAnswer;
}
// Gathers are all the necessary info before generating the next answer
function prepareNextQuestion() {
// Obtain information about the current board
var availableArray = [];
// Note: foreach loop not working very well
for (a=0; a<answers.length; a++) {
if (answers[a].available == true) {
availableArray.push(answers[a]);
}
}
// Select one of the avaiable numbers
var randAvailable = availableArray[getRandomInt(0, availableArray.length - 1)];
// Toggle availibility off
randAvailable.available = false;
// Retrieve the answer
return randAvailable.answer;
}
function generateNextQuestion() {
// Init the question
var question;
// Retrieve the next answer
var answer = prepareNextQuestion();
// Generate a 2 or 3 term question
var randTerm = getRandomInt(termRange["min"], termRange["max"]);
// Generating a 2 term question
if (randTerm == 2) {
// Initialize the pieces (operands, operators, answer)
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Calculate the pieces
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
numSet = generateSet(answer, operatorSet[0]);
// Assemble all the pieces
fullSet[0] = numSet;
fullSet[1] = operatorSet;
fullSet[2] = answer;
// Create the question
question = new Question(fullSet);
// Generating a 3 term question
} else {
// Init
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Set up (random) operators
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
operatorSet[1] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
// Begin generation logic
var operatorCode = operatorSet[0] + operatorSet[1];
switch (operatorCode) {
// Calculate left to right (normal)
case "++":
case "+-":
case "+x":
case "+/":
case "-x":
case "-/":
case "xx":
case "x/":
var numSetL = generateSet(answer, operatorSet[0]); // #1 OP #2 OP ?
// take middle operator and expand // #1 OP (#2) OP ?
var numSetR = generateSet(numSetL[1], operatorSet[1]); // #1 OP #3 OP #4
// Assemble numSet, nsl[0] OP nsr[0] OP nsr[1]
numSet = [numSetL[0], numSetR[0], numSetR[1]];
break;
// Calculate right to left (reversed)
case "-+":
case "--":
case "x+":
case "x-":
case "/+":
case "/-":
case "/x":
case "//":
// Calculate right to left
var numSetR = generateSet(answer, operatorSet[1]); // ? OP #1 OP #2
// take middle operator and expand // ? OP (#1) OP #2
var numSetL = generateSet(numSetR[0], operatorSet[0]); // #3 OP #4 OP #2
// Assemble, nsl[0] +- nsl[1] x/ nsr[1];
numSet = [numSetL[0], numSetL[1], numSetR[1]];
break;
}
// Assemble numbers and operators
fullSet[0] = numSet;
fullSet[1] = operatorSet;
fullSet[2] = answer; // From above
// Create the question with the set
question = new Question(fullSet);
}
// Return the display object
return midLayer.addChild(question);
}
// Returns an array of numbers, and an array of operators (for the question).
function generateSet(answer, operator) {
switch (operator) {
case "+":
return generateSum(answer);
break;
case "-":
return generateMinus(answer);
break;
case "x":
return generateMultiplication(answer);
break;
case "/":
return generateDivision(answer);
break;
default:
console.log("something went wrong with generateNextQuestion()");
break;
}
}
function generateSum(answer) {
var numA = getRandomInt(0, answer);
var numB = answer - numA;
var numSet = [numA, numB]; // [[nums][operators]answer];
return numSet;
}
function generateMinus(answer) {
// TODO: The difference will always be from 1-20, might want to base it off the answer it self
var numA = getRandomInt(answer, answer + 20);
var numB = numA - answer;
var numSet = [numA, numB];
return numSet;
}
function generateMultiplication(answer) {
do | while (answer%numA != 0)
var numB = answer / numA;
var numSet = [numA, numB];
return numSet;
}
function generateDivision(answer) {
var numA = getRandomInt(1, 10);
var numB = answer * numA;
var numSet = [numB, numA];
return numSet;
}
// Move all objects up one position (overwritting the first)
function advanceRows(newQuestion) {
console.log("advanceRows()");
// Animations: (Individually animate each one)
// Bottom question
questions[0].animateGone();
// 2nd question
questions[1].animate1stPosition();
// 3rd question
questions[2].animate2ndPosition();
// New question
newQuestion.animate3rdPosition();
// Advance the questions internally
questions[0] = questions[1];
questions[1] = questions[2];
questions[2] = newQuestion
}
function advanceAnswers(nextAnswer) {
// Animations:
// Current answer
currentAnswer.animateGone();
// Next answer
nextAnswer.animateNew();
// Advance (replace) the answer internally
answers[nextAnswer.index] = nextAnswer; // Replace parent
}
// ANSWER CHECKING
function checkAnswer(answer) {
return (answer == questions[0].answer);
}
function answerCorrect() {
// GAME-LOGIC
correct++;
correctIndicator.txt.text = correct;
updateDifficulty();
// Play sound
correctSfx();
// GAME-FUNCTIONS
advanceAnswers(generateNextAnswer()); // Create the next answer, animate, and setup
advanceRows(generateNextQuestion()); // Create the next question, animate, and setup
updateCurrentAnswer();
}
function answerIncorrect() {
// GAME-LOGIC
incorrect++;
incorrectIndicator.txt.text = incorrect;
// Play sound
incorrectSfx();
// GAME-FUNCTIONS
advanceAnswers(generateNextAnswer()); // Create the next answer, animate, and setup
advanceRows(generateNextQuestion()); // Create the next question, animate, and setup
updateCurrentAnswer();
}
// Sets the currentAnswer to the answer object for the bottom most question.
function updateCurrentAnswer() {
for (a = 0; a < answers.length; a++) {
if (checkAnswer(answers[a].answer)) {
currentAnswer = answers[a];
}
}
}
// DIFFICULTY
// Cycles difficulty
function updateDifficulty() {
difficultyController.currentCount++;
if (difficultyController.currentCount >= DIFFICULTY_COUNT) {
difficultyController.currentCount = 0;
difficultyController.nextDifficulty();
}
}
// Object that holds info about difficulty and can control it
function DifficultyController() {
this.currentCount = 0;
// [[term min, term max, op min, op max], ...]
this.difficulties = [
[2, 2, 0, 1],
[2, 2, 0, 3],
[2, 3, 0, 1],
[2, 3, 0, 3],
[3, 3, 0, 3]
]
this.index = 0;
this.nextDifficulty = function() {
// Next difficulty
if (this.index == this.difficulties.length - 1) {
this.index = 0;
} else {
this.index++;
}
// Load difficulty
termRange.min = this.difficulties[this.index][0];
termRange.max = this.difficulties[this.index][1];
operatorRange.min = this.difficulties[this.index][2];
operatorRange.max = this.difficulties[this.index][3];
}
}
// Checks if achievement is unlocked, and creates it if it can
function checkAchievement(key, imageSource) {
// Not unlocked yet, unlock now!
if (localStorage.getItem(key) != "true") {
// Prep the image
var imageFile = new Image();
imageFile.src = imageSource
// Wait until done loading
imageFile.onload = function() {
var achievement = overlayLayer.addChild(new Achievement(imageFile));
achievement.animateAchievement();
if (sfxEnabled) { var instance = createjs.Sound.play("achieved"); }
}
// Unlock the achievement
localStorage.setItem(key, "true");
}
}
| {
var numA = getRandomInt(1,10);
} | conditional_block |
practice.js | // -- PRELOAD --
// Note: Waiting for init() call
var preload = new createjs.LoadQueue();
preload.on("progress", handleOverallProgress, this);
preload.on("complete", handleComplete, this);
var manifest = [
{src: 'img/practice_bg.png', id: 'bg'},
{src: 'img/life.png', id: 'life'},
{src: 'img/no_life.png', id: 'nolife'},
{src: 'img/answer.png', id: 'ans'},
{src: 'img/button_back.png', id: 'back'}
];
function handleOverallProgress(event) {
var progressPercent = (preload.progress * 100).toFixed(2) + "%";
$("#loading-indicator").text(progressPercent);
}
function handleComplete(event) {
console.log("All files loaded");
$("#loading-div").hide();
initGame();
}
// -- END PRELOAD --
// EaselJS
var canvas, stage;
var backButton;
// Game Info
var OPERATORS = ["+", "-", "x", "/"];
var questions = [];
var answers = [];
var currentAnswer; // Note: This is an object, to access the answer value use currentAnswer.answer
var correct = 0;
var incorrect = 0;
var correctIndicator, incorrectIndicator;
// Difficulty
var DIFFICULTY_COUNT = 20; // Every (20) corrects progresses
var difficultyController = new DifficultyController(); // Controls the difficulty
var termRange = {min: 2, max: 2}; // Only supports 2-3 terms
var operatorRange = { min: 0, max: 1}; // 0 = +, 1 = -, 2 = x, 3 = /
// Layers
var overlayLayer = new createjs.Container();
var foregroundLayer = new createjs.Container();
var midLayer = new createjs.Container(); // Only contains questions
var backgroundLayer = new createjs.Container();
// DisplayObjects
var scoreDisplay;
var timerDisplay;
var livesDisplay;
// Audio
var sfxEnabled, bgmEnabled;
// -- INITIALIZATION --
// Sets up the canvas, stage, and preloads all the assets.
function init() {
// Initialize all base variables and preload assets. Once assets are loaded it will call init.
// Canvas info
canvas = document.getElementById("canvas");
fullScreenCanvas(canvas); // Sets width and height to fill screen
// Stage info
stage = new createjs.Stage(canvas); // Creates a EaselJS Stage for drawing
stage.addChild(backgroundLayer, midLayer, foregroundLayer, overlayLayer); // Add layers
// Detection
createjs.Touch.enable(stage);
// Initialize global variables for layout and sizing
initializeVariables(canvas.width, canvas.height);
// Preload all assets (crucial for first rendering)
preload.loadManifest(manifest);
}
function initGame() {
// Audio:
sfxEnabled = (localStorage.getItem("sfx-muted") == "true") ? false : true;
bgmEnabled = (localStorage.getItem("bgm-muted") == "true") ? false : true;
createjs.Sound.initializeDefaultPlugins();
var audiopath ="sound/";
var sounds = [
{src:"game_bgm.wav", id:"bg_music"},
{src:"hitsound1.wav", id:"correct"},
{src:"miss.wav", id:"incorrect"},
{src:"failsound.mp3", id:"gameover"},
{src:"achievement.mp3", id:"achieved"}
];
createjs.Sound.addEventListener("fileload", bgm); // Will call bgm() when loaded
createjs.Sound.registerSounds(sounds, audiopath);
// Initialization:
// Background
var bgImage = preload.getResult("bg");
var background = new createjs.Bitmap(bgImage);
setScaleFactor(background, canvas.width, canvas.height);
backgroundLayer.addChild(background);
// Indicator stuff
backButton = foregroundLayer.addChild(new Button('back'));
correctIndicator = foregroundLayer.addChild(new CorrectIndicator());
incorrectIndicator = foregroundLayer.addChild(new IncorrectIndicator());
// Answers and questions (in this order)
initializeAnswers();
initializeQuestions();
updateCurrentAnswer();
// Initial positions and sizing
initializeAnswerPositions();
initializeQuestionPositions();
// Looper
createjs.Ticker.setFPS(60);
// Handles all the update logic
createjs.Ticker.on("tick", handleTick);
// Achievements
// No condition
checkAchievement(achieve.YOURTRAININGBEGINS_KEY, achieve.YOURTRAININGBEGINS_SRC);
}
// -- HANDLERS --
function handleTick(event) {
if (!event.paused) {
// Render
stage.update();
}
}
// -- METHODS --
// INITIALIZERS
function initializeAnswers() {
for (i = 0; i < 5; i++) {
var nextAnswer = generateNextAnswer();
nextAnswer.index = i; // We need the index so we can replace them properly
answers.push(nextAnswer);
}
}
function initializeQuestions() {
for (i = 0; i < 3; i++) {
questions.push(generateNextQuestion());
}
}
function initializeQuestionPositions() {
for (q=0; q<3; q++) {
switch (q) {
case 0:
questions[q].y = layout.MID3; // Lowest
questions[q].scaleY = 1.66;
questions[q].txt.scaleY = 1.00;
questions[q].txt.scaleX = 1.66;
break;
case 1:
questions[q].y = layout.MID2;
questions[q].txt.scaleX = questions[q].txt.scaleY = 1.00;
break;
case 2:
questions[q].y = layout.MID1; // Most upper
break;
default:
console.log("Something went wrong with loadQuestions()");
break;
}
console.log("Ques x: " + questions[q].x + " y: " + questions[q].y );
}
}
function initializeAnswerPositions() {
for (a = 0; a < 5; a++) {
// x and y of the CENTER of the container. (not top left)
answers[a].x = (properties.ANS_SIZE / 2) + (a)*(properties.ANS_SIZE);
console.log("Ans x: " + answers[a].x + " y: " + answers[a].y);
}
}
// AUDIO
function bgm(event){
console.log("Audio loaded");
if(bgmEnabled){
var instance = createjs.Sound.play("bg_music", { loop: -1 });
}
}
function correctSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("correct"); }
}
function incorrectSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("incorrect"); }
}
function gameoverSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("gameover"); }
}
function buttonSound() {
if (sfxEnabled) { var sound = new Audio("buttonSound"); }
}
// GAME LOGIC
// Creates the next answer
function generateNextAnswer() {
var randInt;
// Loop until there is no overlap
outer:
while (true) {
// Get the new value
randInt = getRandomInt(1, 20);
// Check if it exists already
for (j = 0; j < answers.length; j++) {
if (answers[j].answer == randInt) {
continue outer; // Yes - retry
}
}
// No - return the value
break;
}
// Create the next answer
// Finalize setup
var nextAnswer = foregroundLayer.addChild(new Answer(randInt)); // Create the actual object
if (currentAnswer != null) {
nextAnswer.index = currentAnswer.index; // Pass on parent index
nextAnswer.x = currentAnswer.x; // Pass on parent position
}
// Return the displayObject
return nextAnswer;
}
// Gathers are all the necessary info before generating the next answer
function prepareNextQuestion() {
// Obtain information about the current board
var availableArray = [];
// Note: foreach loop not working very well
for (a=0; a<answers.length; a++) {
if (answers[a].available == true) {
availableArray.push(answers[a]);
}
}
// Select one of the avaiable numbers
var randAvailable = availableArray[getRandomInt(0, availableArray.length - 1)];
// Toggle availibility off
randAvailable.available = false;
// Retrieve the answer
return randAvailable.answer;
}
function generateNextQuestion() {
// Init the question
var question;
// Retrieve the next answer
var answer = prepareNextQuestion();
// Generate a 2 or 3 term question
var randTerm = getRandomInt(termRange["min"], termRange["max"]);
// Generating a 2 term question
if (randTerm == 2) {
// Initialize the pieces (operands, operators, answer)
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Calculate the pieces
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
numSet = generateSet(answer, operatorSet[0]);
// Assemble all the pieces
fullSet[0] = numSet;
fullSet[1] = operatorSet;
fullSet[2] = answer;
// Create the question
question = new Question(fullSet);
// Generating a 3 term question
} else {
// Init
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Set up (random) operators
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
operatorSet[1] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
// Begin generation logic
var operatorCode = operatorSet[0] + operatorSet[1];
switch (operatorCode) {
// Calculate left to right (normal)
case "++":
case "+-":
case "+x":
case "+/":
case "-x":
case "-/":
case "xx":
case "x/":
var numSetL = generateSet(answer, operatorSet[0]); // #1 OP #2 OP ?
// take middle operator and expand // #1 OP (#2) OP ?
var numSetR = generateSet(numSetL[1], operatorSet[1]); // #1 OP #3 OP #4
// Assemble numSet, nsl[0] OP nsr[0] OP nsr[1]
numSet = [numSetL[0], numSetR[0], numSetR[1]];
break;
// Calculate right to left (reversed)
case "-+":
case "--":
case "x+":
case "x-":
case "/+":
case "/-":
case "/x":
case "//":
// Calculate right to left
var numSetR = generateSet(answer, operatorSet[1]); // ? OP #1 OP #2
// take middle operator and expand // ? OP (#1) OP #2
var numSetL = generateSet(numSetR[0], operatorSet[0]); // #3 OP #4 OP #2
// Assemble, nsl[0] +- nsl[1] x/ nsr[1];
numSet = [numSetL[0], numSetL[1], numSetR[1]];
break;
}
// Assemble numbers and operators
fullSet[0] = numSet;
fullSet[1] = operatorSet;
fullSet[2] = answer; // From above
// Create the question with the set
question = new Question(fullSet);
}
// Return the display object
return midLayer.addChild(question);
}
// Returns an array of numbers, and an array of operators (for the question).
function generateSet(answer, operator) {
switch (operator) {
case "+":
return generateSum(answer);
break;
case "-":
return generateMinus(answer);
break;
case "x":
return generateMultiplication(answer);
break;
case "/":
return generateDivision(answer);
break;
default:
console.log("something went wrong with generateNextQuestion()");
break;
}
}
function generateSum(answer) {
var numA = getRandomInt(0, answer);
var numB = answer - numA;
var numSet = [numA, numB]; // [[nums][operators]answer];
return numSet;
}
function generateMinus(answer) {
// TODO: The difference will always be from 1-20, might want to base it off the answer it self
var numA = getRandomInt(answer, answer + 20);
var numB = numA - answer;
var numSet = [numA, numB];
return numSet;
}
function generateMultiplication(answer) {
do {
var numA = getRandomInt(1,10);
} while (answer%numA != 0)
var numB = answer / numA;
var numSet = [numA, numB];
return numSet;
}
function generateDivision(answer) {
var numA = getRandomInt(1, 10);
var numB = answer * numA;
var numSet = [numB, numA];
return numSet;
}
// Move all objects up one position (overwritting the first)
function advanceRows(newQuestion) {
console.log("advanceRows()");
// Animations: (Individually animate each one)
// Bottom question
questions[0].animateGone();
// 2nd question
questions[1].animate1stPosition();
// 3rd question
questions[2].animate2ndPosition();
// New question
newQuestion.animate3rdPosition();
// Advance the questions internally
questions[0] = questions[1];
questions[1] = questions[2];
questions[2] = newQuestion
}
function advanceAnswers(nextAnswer) {
// Animations:
// Current answer
currentAnswer.animateGone();
// Next answer
nextAnswer.animateNew();
// Advance (replace) the answer internally
answers[nextAnswer.index] = nextAnswer; // Replace parent
}
// ANSWER CHECKING
function | (answer) {
return (answer == questions[0].answer);
}
function answerCorrect() {
// GAME-LOGIC
correct++;
correctIndicator.txt.text = correct;
updateDifficulty();
// Play sound
correctSfx();
// GAME-FUNCTIONS
advanceAnswers(generateNextAnswer()); // Create the next answer, animate, and setup
advanceRows(generateNextQuestion()); // Create the next question, animate, and setup
updateCurrentAnswer();
}
function answerIncorrect() {
// GAME-LOGIC
incorrect++;
incorrectIndicator.txt.text = incorrect;
// Play sound
incorrectSfx();
// GAME-FUNCTIONS
advanceAnswers(generateNextAnswer()); // Create the next answer, animate, and setup
advanceRows(generateNextQuestion()); // Create the next question, animate, and setup
updateCurrentAnswer();
}
// Sets the currentAnswer to the answer object for the bottom most question.
function updateCurrentAnswer() {
for (a = 0; a < answers.length; a++) {
if (checkAnswer(answers[a].answer)) {
currentAnswer = answers[a];
}
}
}
// DIFFICULTY
// Cycles difficulty
function updateDifficulty() {
difficultyController.currentCount++;
if (difficultyController.currentCount >= DIFFICULTY_COUNT) {
difficultyController.currentCount = 0;
difficultyController.nextDifficulty();
}
}
// Object that holds info about difficulty and can control it
function DifficultyController() {
this.currentCount = 0;
// [[term min, term max, op min, op max], ...]
this.difficulties = [
[2, 2, 0, 1],
[2, 2, 0, 3],
[2, 3, 0, 1],
[2, 3, 0, 3],
[3, 3, 0, 3]
]
this.index = 0;
this.nextDifficulty = function() {
// Next difficulty
if (this.index == this.difficulties.length - 1) {
this.index = 0;
} else {
this.index++;
}
// Load difficulty
termRange.min = this.difficulties[this.index][0];
termRange.max = this.difficulties[this.index][1];
operatorRange.min = this.difficulties[this.index][2];
operatorRange.max = this.difficulties[this.index][3];
}
}
// Checks if achievement is unlocked, and creates it if it can
function checkAchievement(key, imageSource) {
// Not unlocked yet, unlock now!
if (localStorage.getItem(key) != "true") {
// Prep the image
var imageFile = new Image();
imageFile.src = imageSource
// Wait until done loading
imageFile.onload = function() {
var achievement = overlayLayer.addChild(new Achievement(imageFile));
achievement.animateAchievement();
if (sfxEnabled) { var instance = createjs.Sound.play("achieved"); }
}
// Unlock the achievement
localStorage.setItem(key, "true");
}
}
| checkAnswer | identifier_name |
practice.js | // -- PRELOAD --
// Note: Waiting for init() call
var preload = new createjs.LoadQueue();
preload.on("progress", handleOverallProgress, this);
preload.on("complete", handleComplete, this);
var manifest = [
{src: 'img/practice_bg.png', id: 'bg'},
{src: 'img/life.png', id: 'life'},
{src: 'img/no_life.png', id: 'nolife'},
{src: 'img/answer.png', id: 'ans'},
{src: 'img/button_back.png', id: 'back'}
];
function handleOverallProgress(event) {
var progressPercent = (preload.progress * 100).toFixed(2) + "%";
$("#loading-indicator").text(progressPercent);
}
function handleComplete(event) {
console.log("All files loaded");
$("#loading-div").hide();
initGame();
}
// -- END PRELOAD --
// EaselJS
var canvas, stage;
var backButton;
// Game Info
var OPERATORS = ["+", "-", "x", "/"];
var questions = [];
var answers = [];
var currentAnswer; // Note: This is an object, to access the answer value use currentAnswer.answer
var correct = 0;
var incorrect = 0;
var correctIndicator, incorrectIndicator;
// Difficulty
var DIFFICULTY_COUNT = 20; // Every (20) corrects progresses
var difficultyController = new DifficultyController(); // Controls the difficulty
var termRange = {min: 2, max: 2}; // Only supports 2-3 terms
var operatorRange = { min: 0, max: 1}; // 0 = +, 1 = -, 2 = x, 3 = /
// Layers
var overlayLayer = new createjs.Container();
var foregroundLayer = new createjs.Container();
var midLayer = new createjs.Container(); // Only contains questions
var backgroundLayer = new createjs.Container();
// DisplayObjects
var scoreDisplay;
var timerDisplay;
var livesDisplay;
// Audio
var sfxEnabled, bgmEnabled;
// -- INITIALIZATION --
// Sets up the canvas, stage, and preloads all the assets.
function init() {
// Initialize all base variables and preload assets. Once assets are loaded it will call init.
// Canvas info
canvas = document.getElementById("canvas");
fullScreenCanvas(canvas); // Sets width and height to fill screen
// Stage info
stage = new createjs.Stage(canvas); // Creates a EaselJS Stage for drawing
stage.addChild(backgroundLayer, midLayer, foregroundLayer, overlayLayer); // Add layers
// Detection
createjs.Touch.enable(stage);
// Initialize global variables for layout and sizing
initializeVariables(canvas.width, canvas.height);
// Preload all assets (crucial for first rendering)
preload.loadManifest(manifest);
}
function initGame() {
// Audio:
sfxEnabled = (localStorage.getItem("sfx-muted") == "true") ? false : true;
bgmEnabled = (localStorage.getItem("bgm-muted") == "true") ? false : true;
createjs.Sound.initializeDefaultPlugins();
var audiopath ="sound/";
var sounds = [
{src:"game_bgm.wav", id:"bg_music"},
{src:"hitsound1.wav", id:"correct"},
{src:"miss.wav", id:"incorrect"},
{src:"failsound.mp3", id:"gameover"},
{src:"achievement.mp3", id:"achieved"}
];
createjs.Sound.addEventListener("fileload", bgm); // Will call bgm() when loaded
createjs.Sound.registerSounds(sounds, audiopath);
// Initialization:
// Background
var bgImage = preload.getResult("bg");
var background = new createjs.Bitmap(bgImage);
setScaleFactor(background, canvas.width, canvas.height);
backgroundLayer.addChild(background);
// Indicator stuff
backButton = foregroundLayer.addChild(new Button('back'));
correctIndicator = foregroundLayer.addChild(new CorrectIndicator());
incorrectIndicator = foregroundLayer.addChild(new IncorrectIndicator());
// Answers and questions (in this order)
initializeAnswers();
initializeQuestions();
updateCurrentAnswer();
// Initial positions and sizing
initializeAnswerPositions();
initializeQuestionPositions();
// Looper
createjs.Ticker.setFPS(60);
// Handles all the update logic
createjs.Ticker.on("tick", handleTick);
// Achievements
// No condition
checkAchievement(achieve.YOURTRAININGBEGINS_KEY, achieve.YOURTRAININGBEGINS_SRC);
}
// -- HANDLERS --
function handleTick(event) {
if (!event.paused) {
// Render
stage.update();
}
}
// -- METHODS --
// INITIALIZERS
function initializeAnswers() {
for (i = 0; i < 5; i++) {
var nextAnswer = generateNextAnswer();
nextAnswer.index = i; // We need the index so we can replace them properly
answers.push(nextAnswer);
}
}
function initializeQuestions() {
for (i = 0; i < 3; i++) {
questions.push(generateNextQuestion());
}
}
function initializeQuestionPositions() {
for (q=0; q<3; q++) {
switch (q) {
case 0:
questions[q].y = layout.MID3; // Lowest
questions[q].scaleY = 1.66;
questions[q].txt.scaleY = 1.00;
questions[q].txt.scaleX = 1.66;
break;
case 1:
questions[q].y = layout.MID2;
questions[q].txt.scaleX = questions[q].txt.scaleY = 1.00;
break;
case 2:
questions[q].y = layout.MID1; // Most upper
break;
default:
console.log("Something went wrong with loadQuestions()");
break;
}
console.log("Ques x: " + questions[q].x + " y: " + questions[q].y );
}
}
function initializeAnswerPositions() {
for (a = 0; a < 5; a++) {
// x and y of the CENTER of the container. (not top left)
answers[a].x = (properties.ANS_SIZE / 2) + (a)*(properties.ANS_SIZE);
console.log("Ans x: " + answers[a].x + " y: " + answers[a].y);
}
}
// AUDIO
function bgm(event){
console.log("Audio loaded");
if(bgmEnabled){
var instance = createjs.Sound.play("bg_music", { loop: -1 });
}
}
function correctSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("correct"); }
}
function incorrectSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("incorrect"); }
}
function gameoverSfx() {
if (sfxEnabled) { var instance = createjs.Sound.play("gameover"); }
}
function buttonSound() {
if (sfxEnabled) { var sound = new Audio("buttonSound"); }
}
// GAME LOGIC
// Creates the next answer
function generateNextAnswer() {
var randInt;
// Loop until there is no overlap
outer:
while (true) {
// Get the new value
randInt = getRandomInt(1, 20);
// Check if it exists already
for (j = 0; j < answers.length; j++) {
if (answers[j].answer == randInt) {
continue outer; // Yes - retry
}
}
// No - return the value
break;
}
// Create the next answer
// Finalize setup
var nextAnswer = foregroundLayer.addChild(new Answer(randInt)); // Create the actual object
if (currentAnswer != null) {
nextAnswer.index = currentAnswer.index; // Pass on parent index
nextAnswer.x = currentAnswer.x; // Pass on parent position
}
// Return the displayObject
return nextAnswer;
}
| function prepareNextQuestion() {
// Obtain information about the current board
var availableArray = [];
// Note: foreach loop not working very well
for (a=0; a<answers.length; a++) {
if (answers[a].available == true) {
availableArray.push(answers[a]);
}
}
// Select one of the avaiable numbers
var randAvailable = availableArray[getRandomInt(0, availableArray.length - 1)];
// Toggle availibility off
randAvailable.available = false;
// Retrieve the answer
return randAvailable.answer;
}
function generateNextQuestion() {
// Init the question
var question;
// Retrieve the next answer
var answer = prepareNextQuestion();
// Generate a 2 or 3 term question
var randTerm = getRandomInt(termRange["min"], termRange["max"]);
// Generating a 2 term question
if (randTerm == 2) {
// Initialize the pieces (operands, operators, answer)
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Calculate the pieces
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
numSet = generateSet(answer, operatorSet[0]);
// Assemble all the pieces
fullSet[0] = numSet;
fullSet[1] = operatorSet;
fullSet[2] = answer;
// Create the question
question = new Question(fullSet);
// Generating a 3 term question
} else {
// Init
var fullSet = [];
var numSet = [];
var operatorSet = [];
// Set up (random) operators
operatorSet[0] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
operatorSet[1] = OPERATORS[getRandomInt(operatorRange["min"], operatorRange["max"])];
// Begin generation logic
var operatorCode = operatorSet[0] + operatorSet[1];
switch (operatorCode) {
// Calculate left to right (normal)
case "++":
case "+-":
case "+x":
case "+/":
case "-x":
case "-/":
case "xx":
case "x/":
var numSetL = generateSet(answer, operatorSet[0]); // #1 OP #2 OP ?
// take middle operator and expand // #1 OP (#2) OP ?
var numSetR = generateSet(numSetL[1], operatorSet[1]); // #1 OP #3 OP #4
// Assemble numSet, nsl[0] OP nsr[0] OP nsr[1]
numSet = [numSetL[0], numSetR[0], numSetR[1]];
break;
// Calculate right to left (reversed)
case "-+":
case "--":
case "x+":
case "x-":
case "/+":
case "/-":
case "/x":
case "//":
// Calculate right to left
var numSetR = generateSet(answer, operatorSet[1]); // ? OP #1 OP #2
// take middle operator and expand // ? OP (#1) OP #2
var numSetL = generateSet(numSetR[0], operatorSet[0]); // #3 OP #4 OP #2
// Assemble, nsl[0] +- nsl[1] x/ nsr[1];
numSet = [numSetL[0], numSetL[1], numSetR[1]];
break;
}
// Assemble numbers and operators
fullSet[0] = numSet;
fullSet[1] = operatorSet;
fullSet[2] = answer; // From above
// Create the question with the set
question = new Question(fullSet);
}
// Return the display object
return midLayer.addChild(question);
}
// Returns an array of numbers, and an array of operators (for the question).
function generateSet(answer, operator) {
switch (operator) {
case "+":
return generateSum(answer);
break;
case "-":
return generateMinus(answer);
break;
case "x":
return generateMultiplication(answer);
break;
case "/":
return generateDivision(answer);
break;
default:
console.log("something went wrong with generateNextQuestion()");
break;
}
}
function generateSum(answer) {
var numA = getRandomInt(0, answer);
var numB = answer - numA;
var numSet = [numA, numB]; // [[nums][operators]answer];
return numSet;
}
function generateMinus(answer) {
// TODO: The difference will always be from 1-20, might want to base it off the answer it self
var numA = getRandomInt(answer, answer + 20);
var numB = numA - answer;
var numSet = [numA, numB];
return numSet;
}
function generateMultiplication(answer) {
do {
var numA = getRandomInt(1,10);
} while (answer%numA != 0)
var numB = answer / numA;
var numSet = [numA, numB];
return numSet;
}
function generateDivision(answer) {
var numA = getRandomInt(1, 10);
var numB = answer * numA;
var numSet = [numB, numA];
return numSet;
}
// Move all objects up one position (overwritting the first)
function advanceRows(newQuestion) {
console.log("advanceRows()");
// Animations: (Individually animate each one)
// Bottom question
questions[0].animateGone();
// 2nd question
questions[1].animate1stPosition();
// 3rd question
questions[2].animate2ndPosition();
// New question
newQuestion.animate3rdPosition();
// Advance the questions internally
questions[0] = questions[1];
questions[1] = questions[2];
questions[2] = newQuestion
}
function advanceAnswers(nextAnswer) {
// Animations:
// Current answer
currentAnswer.animateGone();
// Next answer
nextAnswer.animateNew();
// Advance (replace) the answer internally
answers[nextAnswer.index] = nextAnswer; // Replace parent
}
// ANSWER CHECKING
function checkAnswer(answer) {
return (answer == questions[0].answer);
}
function answerCorrect() {
// GAME-LOGIC
correct++;
correctIndicator.txt.text = correct;
updateDifficulty();
// Play sound
correctSfx();
// GAME-FUNCTIONS
advanceAnswers(generateNextAnswer()); // Create the next answer, animate, and setup
advanceRows(generateNextQuestion()); // Create the next question, animate, and setup
updateCurrentAnswer();
}
function answerIncorrect() {
// GAME-LOGIC
incorrect++;
incorrectIndicator.txt.text = incorrect;
// Play sound
incorrectSfx();
// GAME-FUNCTIONS
advanceAnswers(generateNextAnswer()); // Create the next answer, animate, and setup
advanceRows(generateNextQuestion()); // Create the next question, animate, and setup
updateCurrentAnswer();
}
// Sets the currentAnswer to the answer object for the bottom most question.
function updateCurrentAnswer() {
for (a = 0; a < answers.length; a++) {
if (checkAnswer(answers[a].answer)) {
currentAnswer = answers[a];
}
}
}
// DIFFICULTY
// Cycles difficulty
function updateDifficulty() {
difficultyController.currentCount++;
if (difficultyController.currentCount >= DIFFICULTY_COUNT) {
difficultyController.currentCount = 0;
difficultyController.nextDifficulty();
}
}
// Object that holds info about difficulty and can control it
function DifficultyController() {
this.currentCount = 0;
// [[term min, term max, op min, op max], ...]
this.difficulties = [
[2, 2, 0, 1],
[2, 2, 0, 3],
[2, 3, 0, 1],
[2, 3, 0, 3],
[3, 3, 0, 3]
]
this.index = 0;
this.nextDifficulty = function() {
// Next difficulty
if (this.index == this.difficulties.length - 1) {
this.index = 0;
} else {
this.index++;
}
// Load difficulty
termRange.min = this.difficulties[this.index][0];
termRange.max = this.difficulties[this.index][1];
operatorRange.min = this.difficulties[this.index][2];
operatorRange.max = this.difficulties[this.index][3];
}
}
// Checks if achievement is unlocked, and creates it if it can
function checkAchievement(key, imageSource) {
// Not unlocked yet, unlock now!
if (localStorage.getItem(key) != "true") {
// Prep the image
var imageFile = new Image();
imageFile.src = imageSource
// Wait until done loading
imageFile.onload = function() {
var achievement = overlayLayer.addChild(new Achievement(imageFile));
achievement.animateAchievement();
if (sfxEnabled) { var instance = createjs.Sound.play("achieved"); }
}
// Unlock the achievement
localStorage.setItem(key, "true");
}
} | // Gathers are all the necessary info before generating the next answer | random_line_split |
in_memory.rs | // Copyright (c) SimpleStaking, Viable Systems and Tezedge Contributors
// SPDX-License-Identifier: MIT
//! Implementation of an in-memory repository.
use std::{
borrow::Cow,
collections::{hash_map::DefaultHasher, BTreeMap, VecDeque},
hash::Hasher,
mem::size_of,
sync::{atomic::Ordering, Arc},
thread::JoinHandle,
};
use crossbeam_channel::Sender;
use crypto::hash::ContextHash;
use tezos_timing::RepositoryMemoryUsage;
use crate::{
gc::{
worker::{Command, Cycles, GCThread, GC_PENDING_HASHIDS, PRESERVE_CYCLE_COUNT},
GarbageCollectionError, GarbageCollector,
},
hash::ObjectHash,
persistent::{DBError, Flushable, KeyValueStoreBackend, Persistable},
working_tree::{
shape::{DirectoryShapeId, DirectoryShapes, ShapeStrings},
storage::DirEntryId,
string_interner::{StringId, StringInterner},
},
Map,
};
use tezos_spsc::Consumer;
use super::{index_map::IndexMap, HashIdError};
use super::{HashId, VacantObjectHash};
#[derive(Debug)]
pub struct HashValueStore {
hashes: IndexMap<HashId, ObjectHash>,
values: IndexMap<HashId, Option<Arc<[u8]>>>,
free_ids: Option<Consumer<HashId>>,
new_ids: Vec<HashId>,
values_bytes: usize,
}
impl HashValueStore {
pub(crate) fn new<T>(consumer: T) -> Self
where
T: Into<Option<Consumer<HashId>>>,
{
Self {
hashes: IndexMap::new(),
values: IndexMap::new(),
free_ids: consumer.into(),
new_ids: Vec::with_capacity(1024),
values_bytes: 0,
}
}
pub fn get_memory_usage(&self) -> RepositoryMemoryUsage {
let values_bytes = self.values_bytes;
let values_capacity = self.values.capacity();
let hashes_capacity = self.hashes.capacity();
let total_bytes = values_bytes
.saturating_add(values_capacity * size_of::<Option<Arc<[u8]>>>())
.saturating_add(values_capacity * 16) // Each `Arc` has 16 extra bytes for the counters
.saturating_add(hashes_capacity * size_of::<ObjectHash>());
RepositoryMemoryUsage {
values_bytes,
values_capacity,
values_length: self.values.len(),
hashes_capacity,
hashes_length: self.hashes.len(),
total_bytes,
npending_free_ids: self.free_ids.as_ref().map(|c| c.len()).unwrap_or(0),
gc_npending_free_ids: GC_PENDING_HASHIDS.load(Ordering::Acquire),
nshapes: 0,
}
}
pub(crate) fn clear(&mut self) {
*self = Self {
hashes: IndexMap::new(),
values: IndexMap::new(),
free_ids: self.free_ids.take(),
new_ids: Vec::new(),
values_bytes: 0,
}
}
pub(crate) fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, HashIdError> {
let (hash_id, entry) = if let Some(free_id) = self.get_free_id() {
if let Some(old_value) = self.values.set(free_id, None)? {
self.values_bytes = self.values_bytes.saturating_sub(old_value.len());
}
(free_id, self.hashes.get_mut(free_id)?.ok_or(HashIdError)?)
} else {
self.hashes.get_vacant_entry()?
};
self.new_ids.push(hash_id);
Ok(VacantObjectHash {
entry: Some(entry),
hash_id,
})
} | self.free_ids.as_mut()?.pop().ok()
}
pub(crate) fn insert_value_at(
&mut self,
hash_id: HashId,
value: Arc<[u8]>,
) -> Result<(), HashIdError> {
self.values_bytes = self.values_bytes.saturating_add(value.len());
if let Some(old) = self.values.insert_at(hash_id, Some(value))? {
self.values_bytes = self.values_bytes.saturating_sub(old.len());
}
Ok(())
}
pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, HashIdError> {
self.hashes.get(hash_id)
}
pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, HashIdError> {
match self.values.get(hash_id)? {
Some(value) => Ok(value.as_ref().map(|v| v.as_ref())),
None => Ok(None),
}
}
pub(crate) fn contains(&self, hash_id: HashId) -> Result<bool, HashIdError> {
Ok(self.values.get(hash_id)?.unwrap_or(&None).is_some())
}
fn take_new_ids(&mut self) -> Vec<HashId> {
let new_ids = self.new_ids.clone();
self.new_ids.clear();
new_ids
}
}
pub struct InMemory {
current_cycle: BTreeMap<HashId, Option<Arc<[u8]>>>,
pub hashes: HashValueStore,
sender: Option<Sender<Command>>,
pub context_hashes: Map<u64, HashId>,
context_hashes_cycles: VecDeque<Vec<u64>>,
thread_handle: Option<JoinHandle<()>>,
shapes: DirectoryShapes,
string_interner: StringInterner,
}
impl GarbageCollector for InMemory {
fn new_cycle_started(&mut self) -> Result<(), GarbageCollectionError> {
self.new_cycle_started();
Ok(())
}
fn block_applied(
&mut self,
referenced_older_objects: Vec<HashId>,
) -> Result<(), GarbageCollectionError> {
self.block_applied(referenced_older_objects);
Ok(())
}
}
impl Flushable for InMemory {
fn flush(&self) -> Result<(), anyhow::Error> {
Ok(())
}
}
impl Persistable for InMemory {
fn is_persistent(&self) -> bool {
false
}
}
impl KeyValueStoreBackend for InMemory {
fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> {
self.write_batch(batch)
}
fn contains(&self, hash_id: HashId) -> Result<bool, DBError> {
self.contains(hash_id)
}
fn put_context_hash(&mut self, hash_id: HashId) -> Result<(), DBError> {
self.put_context_hash_impl(hash_id)
}
fn get_context_hash(&self, context_hash: &ContextHash) -> Result<Option<HashId>, DBError> {
Ok(self.get_context_hash_impl(context_hash))
}
fn get_hash(&self, hash_id: HashId) -> Result<Option<Cow<ObjectHash>>, DBError> {
Ok(self.get_hash(hash_id)?.map(Cow::Borrowed))
}
fn get_value(&self, hash_id: HashId) -> Result<Option<Cow<[u8]>>, DBError> {
Ok(self.get_value(hash_id)?.map(Cow::Borrowed))
}
fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, DBError> {
self.get_vacant_entry_hash()
}
fn clear_objects(&mut self) -> Result<(), DBError> {
// `InMemory` has its own garbage collection
Ok(())
}
fn memory_usage(&self) -> RepositoryMemoryUsage {
let mut mem = self.hashes.get_memory_usage();
mem.nshapes = self.shapes.nshapes();
mem
}
fn get_shape(&self, shape_id: DirectoryShapeId) -> Result<ShapeStrings, DBError> {
self.shapes
.get_shape(shape_id)
.map(ShapeStrings::SliceIds)
.map_err(Into::into)
}
fn make_shape(
&mut self,
dir: &[(StringId, DirEntryId)],
) -> Result<Option<DirectoryShapeId>, DBError> {
self.shapes.make_shape(dir).map_err(Into::into)
}
fn synchronize_strings(&mut self, string_interner: &StringInterner) -> Result<(), DBError> {
self.string_interner.extend_from(string_interner);
Ok(())
}
fn get_str(&self, string_id: StringId) -> Option<&str> {
self.string_interner.get(string_id)
}
}
impl InMemory {
pub fn try_new() -> Result<Self, std::io::Error> {
// TODO - TE-210: Remove once we hace proper support for history modes.
let garbage_collector_disabled = std::env::var("DISABLE_INMEM_CONTEXT_GC")
.unwrap_or_else(|_| "false".to_string())
.parse::<bool>()
.expect("Provided `DISABLE_INMEM_CONTEXT_GC` value cannot be converted to bool");
let (sender, cons, thread_handle) = if garbage_collector_disabled {
(None, None, None)
} else {
let (sender, recv) = crossbeam_channel::unbounded();
let (prod, cons) = tezos_spsc::bounded(2_000_000);
let thread_handle = std::thread::Builder::new()
.name("ctx-inmem-gc-thread".to_string())
.spawn(move || {
GCThread {
cycles: Cycles::default(),
recv,
free_ids: prod,
pending: Vec::new(),
}
.run()
})?;
(Some(sender), Some(cons), Some(thread_handle))
};
let current_cycle = Default::default();
let hashes = HashValueStore::new(cons);
let context_hashes = Default::default();
let mut context_hashes_cycles = VecDeque::with_capacity(PRESERVE_CYCLE_COUNT);
for _ in 0..PRESERVE_CYCLE_COUNT {
context_hashes_cycles.push_back(Default::default())
}
Ok(Self {
current_cycle,
hashes,
sender,
context_hashes,
context_hashes_cycles,
thread_handle,
shapes: DirectoryShapes::default(),
string_interner: StringInterner::default(),
})
}
pub(crate) fn get_vacant_entry_hash(&mut self) -> Result<VacantObjectHash, DBError> {
self.hashes.get_vacant_object_hash().map_err(Into::into)
}
pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, DBError> {
self.hashes.get_hash(hash_id).map_err(Into::into)
}
pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, DBError> {
self.hashes.get_value(hash_id).map_err(Into::into)
}
fn contains(&self, hash_id: HashId) -> Result<bool, DBError> {
self.hashes.contains(hash_id).map_err(Into::into)
}
pub fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> {
for (hash_id, value) in batch {
self.hashes.insert_value_at(hash_id, Arc::clone(&value))?;
self.current_cycle.insert(hash_id, Some(value));
}
Ok(())
}
pub fn new_cycle_started(&mut self) {
if let Some(sender) = &self.sender {
let values_in_cycle = std::mem::take(&mut self.current_cycle);
let new_ids = self.hashes.take_new_ids();
if let Err(e) = sender.try_send(Command::StartNewCycle {
values_in_cycle,
new_ids,
}) {
eprintln!("Fail to send Command::StartNewCycle to GC worker: {:?}", e);
}
if let Some(unused) = self.context_hashes_cycles.pop_front() {
for hash in unused {
self.context_hashes.remove(&hash);
}
}
self.context_hashes_cycles.push_back(Default::default());
}
}
pub fn block_applied(&mut self, reused: Vec<HashId>) {
if let Some(sender) = &self.sender {
if let Err(e) = sender.send(Command::MarkReused { reused }) {
eprintln!("Fail to send Command::MarkReused to GC worker: {:?}", e);
}
}
}
pub fn get_context_hash_impl(&self, context_hash: &ContextHash) -> Option<HashId> {
let mut hasher = DefaultHasher::new();
hasher.write(context_hash.as_ref());
let hashed = hasher.finish();
self.context_hashes.get(&hashed).cloned()
}
pub fn put_context_hash_impl(&mut self, commit_hash_id: HashId) -> Result<(), DBError> {
let commit_hash = self
.hashes
.get_hash(commit_hash_id)?
.ok_or(DBError::MissingObject {
hash_id: commit_hash_id,
})?;
let mut hasher = DefaultHasher::new();
hasher.write(&commit_hash[..]);
let hashed = hasher.finish();
self.context_hashes.insert(hashed, commit_hash_id);
if let Some(back) = self.context_hashes_cycles.back_mut() {
back.push(hashed);
};
Ok(())
}
#[cfg(test)]
pub(crate) fn put_object_hash(&mut self, entry_hash: ObjectHash) -> HashId {
let vacant = self.get_vacant_entry_hash().unwrap();
vacant.write_with(|entry| *entry = entry_hash)
}
}
impl Drop for InMemory {
fn drop(&mut self) {
let sender = match self.sender.take() {
Some(sender) => sender,
None => return,
};
if let Err(e) = sender.send(Command::Close) {
eprintln!("Fail to send Command::Close to GC worker: {:?}", e);
return;
}
let thread_handle = match self.thread_handle.take() {
Some(thread_handle) => thread_handle,
None => return,
};
if let Err(e) = thread_handle.join() {
eprintln!("Fail to join GC worker thread: {:?}", e);
}
}
} |
fn get_free_id(&mut self) -> Option<HashId> { | random_line_split |
in_memory.rs | // Copyright (c) SimpleStaking, Viable Systems and Tezedge Contributors
// SPDX-License-Identifier: MIT
//! Implementation of an in-memory repository.
use std::{
borrow::Cow,
collections::{hash_map::DefaultHasher, BTreeMap, VecDeque},
hash::Hasher,
mem::size_of,
sync::{atomic::Ordering, Arc},
thread::JoinHandle,
};
use crossbeam_channel::Sender;
use crypto::hash::ContextHash;
use tezos_timing::RepositoryMemoryUsage;
use crate::{
gc::{
worker::{Command, Cycles, GCThread, GC_PENDING_HASHIDS, PRESERVE_CYCLE_COUNT},
GarbageCollectionError, GarbageCollector,
},
hash::ObjectHash,
persistent::{DBError, Flushable, KeyValueStoreBackend, Persistable},
working_tree::{
shape::{DirectoryShapeId, DirectoryShapes, ShapeStrings},
storage::DirEntryId,
string_interner::{StringId, StringInterner},
},
Map,
};
use tezos_spsc::Consumer;
use super::{index_map::IndexMap, HashIdError};
use super::{HashId, VacantObjectHash};
#[derive(Debug)]
pub struct HashValueStore {
hashes: IndexMap<HashId, ObjectHash>,
values: IndexMap<HashId, Option<Arc<[u8]>>>,
free_ids: Option<Consumer<HashId>>,
new_ids: Vec<HashId>,
values_bytes: usize,
}
impl HashValueStore {
pub(crate) fn new<T>(consumer: T) -> Self
where
T: Into<Option<Consumer<HashId>>>,
{
Self {
hashes: IndexMap::new(),
values: IndexMap::new(),
free_ids: consumer.into(),
new_ids: Vec::with_capacity(1024),
values_bytes: 0,
}
}
pub fn get_memory_usage(&self) -> RepositoryMemoryUsage {
let values_bytes = self.values_bytes;
let values_capacity = self.values.capacity();
let hashes_capacity = self.hashes.capacity();
let total_bytes = values_bytes
.saturating_add(values_capacity * size_of::<Option<Arc<[u8]>>>())
.saturating_add(values_capacity * 16) // Each `Arc` has 16 extra bytes for the counters
.saturating_add(hashes_capacity * size_of::<ObjectHash>());
RepositoryMemoryUsage {
values_bytes,
values_capacity,
values_length: self.values.len(),
hashes_capacity,
hashes_length: self.hashes.len(),
total_bytes,
npending_free_ids: self.free_ids.as_ref().map(|c| c.len()).unwrap_or(0),
gc_npending_free_ids: GC_PENDING_HASHIDS.load(Ordering::Acquire),
nshapes: 0,
}
}
pub(crate) fn clear(&mut self) {
*self = Self {
hashes: IndexMap::new(),
values: IndexMap::new(),
free_ids: self.free_ids.take(),
new_ids: Vec::new(),
values_bytes: 0,
}
}
pub(crate) fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, HashIdError> {
let (hash_id, entry) = if let Some(free_id) = self.get_free_id() {
if let Some(old_value) = self.values.set(free_id, None)? {
self.values_bytes = self.values_bytes.saturating_sub(old_value.len());
}
(free_id, self.hashes.get_mut(free_id)?.ok_or(HashIdError)?)
} else {
self.hashes.get_vacant_entry()?
};
self.new_ids.push(hash_id);
Ok(VacantObjectHash {
entry: Some(entry),
hash_id,
})
}
fn get_free_id(&mut self) -> Option<HashId> {
self.free_ids.as_mut()?.pop().ok()
}
pub(crate) fn insert_value_at(
&mut self,
hash_id: HashId,
value: Arc<[u8]>,
) -> Result<(), HashIdError> {
self.values_bytes = self.values_bytes.saturating_add(value.len());
if let Some(old) = self.values.insert_at(hash_id, Some(value))? {
self.values_bytes = self.values_bytes.saturating_sub(old.len());
}
Ok(())
}
pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, HashIdError> {
self.hashes.get(hash_id)
}
pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, HashIdError> {
match self.values.get(hash_id)? {
Some(value) => Ok(value.as_ref().map(|v| v.as_ref())),
None => Ok(None),
}
}
pub(crate) fn contains(&self, hash_id: HashId) -> Result<bool, HashIdError> {
Ok(self.values.get(hash_id)?.unwrap_or(&None).is_some())
}
fn take_new_ids(&mut self) -> Vec<HashId> {
let new_ids = self.new_ids.clone();
self.new_ids.clear();
new_ids
}
}
pub struct InMemory {
current_cycle: BTreeMap<HashId, Option<Arc<[u8]>>>,
pub hashes: HashValueStore,
sender: Option<Sender<Command>>,
pub context_hashes: Map<u64, HashId>,
context_hashes_cycles: VecDeque<Vec<u64>>,
thread_handle: Option<JoinHandle<()>>,
shapes: DirectoryShapes,
string_interner: StringInterner,
}
impl GarbageCollector for InMemory {
fn new_cycle_started(&mut self) -> Result<(), GarbageCollectionError> {
self.new_cycle_started();
Ok(())
}
fn block_applied(
&mut self,
referenced_older_objects: Vec<HashId>,
) -> Result<(), GarbageCollectionError> {
self.block_applied(referenced_older_objects);
Ok(())
}
}
impl Flushable for InMemory {
fn flush(&self) -> Result<(), anyhow::Error> {
Ok(())
}
}
impl Persistable for InMemory {
fn is_persistent(&self) -> bool {
false
}
}
impl KeyValueStoreBackend for InMemory {
fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> {
self.write_batch(batch)
}
fn contains(&self, hash_id: HashId) -> Result<bool, DBError> {
self.contains(hash_id)
}
fn put_context_hash(&mut self, hash_id: HashId) -> Result<(), DBError> {
self.put_context_hash_impl(hash_id)
}
fn get_context_hash(&self, context_hash: &ContextHash) -> Result<Option<HashId>, DBError> {
Ok(self.get_context_hash_impl(context_hash))
}
fn get_hash(&self, hash_id: HashId) -> Result<Option<Cow<ObjectHash>>, DBError> {
Ok(self.get_hash(hash_id)?.map(Cow::Borrowed))
}
fn get_value(&self, hash_id: HashId) -> Result<Option<Cow<[u8]>>, DBError> {
Ok(self.get_value(hash_id)?.map(Cow::Borrowed))
}
fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, DBError> {
self.get_vacant_entry_hash()
}
fn clear_objects(&mut self) -> Result<(), DBError> {
// `InMemory` has its own garbage collection
Ok(())
}
fn memory_usage(&self) -> RepositoryMemoryUsage {
let mut mem = self.hashes.get_memory_usage();
mem.nshapes = self.shapes.nshapes();
mem
}
fn get_shape(&self, shape_id: DirectoryShapeId) -> Result<ShapeStrings, DBError> {
self.shapes
.get_shape(shape_id)
.map(ShapeStrings::SliceIds)
.map_err(Into::into)
}
fn make_shape(
&mut self,
dir: &[(StringId, DirEntryId)],
) -> Result<Option<DirectoryShapeId>, DBError> {
self.shapes.make_shape(dir).map_err(Into::into)
}
fn synchronize_strings(&mut self, string_interner: &StringInterner) -> Result<(), DBError> {
self.string_interner.extend_from(string_interner);
Ok(())
}
fn get_str(&self, string_id: StringId) -> Option<&str> {
self.string_interner.get(string_id)
}
}
impl InMemory {
pub fn try_new() -> Result<Self, std::io::Error> {
// TODO - TE-210: Remove once we hace proper support for history modes.
let garbage_collector_disabled = std::env::var("DISABLE_INMEM_CONTEXT_GC")
.unwrap_or_else(|_| "false".to_string())
.parse::<bool>()
.expect("Provided `DISABLE_INMEM_CONTEXT_GC` value cannot be converted to bool");
let (sender, cons, thread_handle) = if garbage_collector_disabled {
(None, None, None)
} else {
let (sender, recv) = crossbeam_channel::unbounded();
let (prod, cons) = tezos_spsc::bounded(2_000_000);
let thread_handle = std::thread::Builder::new()
.name("ctx-inmem-gc-thread".to_string())
.spawn(move || {
GCThread {
cycles: Cycles::default(),
recv,
free_ids: prod,
pending: Vec::new(),
}
.run()
})?;
(Some(sender), Some(cons), Some(thread_handle))
};
let current_cycle = Default::default();
let hashes = HashValueStore::new(cons);
let context_hashes = Default::default();
let mut context_hashes_cycles = VecDeque::with_capacity(PRESERVE_CYCLE_COUNT);
for _ in 0..PRESERVE_CYCLE_COUNT {
context_hashes_cycles.push_back(Default::default())
}
Ok(Self {
current_cycle,
hashes,
sender,
context_hashes,
context_hashes_cycles,
thread_handle,
shapes: DirectoryShapes::default(),
string_interner: StringInterner::default(),
})
}
pub(crate) fn get_vacant_entry_hash(&mut self) -> Result<VacantObjectHash, DBError> {
self.hashes.get_vacant_object_hash().map_err(Into::into)
}
pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, DBError> {
self.hashes.get_hash(hash_id).map_err(Into::into)
}
pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, DBError> {
self.hashes.get_value(hash_id).map_err(Into::into)
}
fn contains(&self, hash_id: HashId) -> Result<bool, DBError> {
self.hashes.contains(hash_id).map_err(Into::into)
}
pub fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> {
for (hash_id, value) in batch {
self.hashes.insert_value_at(hash_id, Arc::clone(&value))?;
self.current_cycle.insert(hash_id, Some(value));
}
Ok(())
}
pub fn new_cycle_started(&mut self) {
if let Some(sender) = &self.sender {
let values_in_cycle = std::mem::take(&mut self.current_cycle);
let new_ids = self.hashes.take_new_ids();
if let Err(e) = sender.try_send(Command::StartNewCycle {
values_in_cycle,
new_ids,
}) {
eprintln!("Fail to send Command::StartNewCycle to GC worker: {:?}", e);
}
if let Some(unused) = self.context_hashes_cycles.pop_front() {
for hash in unused {
self.context_hashes.remove(&hash);
}
}
self.context_hashes_cycles.push_back(Default::default());
}
}
pub fn block_applied(&mut self, reused: Vec<HashId>) {
if let Some(sender) = &self.sender {
if let Err(e) = sender.send(Command::MarkReused { reused }) {
eprintln!("Fail to send Command::MarkReused to GC worker: {:?}", e);
}
}
}
pub fn get_context_hash_impl(&self, context_hash: &ContextHash) -> Option<HashId> |
pub fn put_context_hash_impl(&mut self, commit_hash_id: HashId) -> Result<(), DBError> {
let commit_hash = self
.hashes
.get_hash(commit_hash_id)?
.ok_or(DBError::MissingObject {
hash_id: commit_hash_id,
})?;
let mut hasher = DefaultHasher::new();
hasher.write(&commit_hash[..]);
let hashed = hasher.finish();
self.context_hashes.insert(hashed, commit_hash_id);
if let Some(back) = self.context_hashes_cycles.back_mut() {
back.push(hashed);
};
Ok(())
}
#[cfg(test)]
pub(crate) fn put_object_hash(&mut self, entry_hash: ObjectHash) -> HashId {
let vacant = self.get_vacant_entry_hash().unwrap();
vacant.write_with(|entry| *entry = entry_hash)
}
}
impl Drop for InMemory {
fn drop(&mut self) {
let sender = match self.sender.take() {
Some(sender) => sender,
None => return,
};
if let Err(e) = sender.send(Command::Close) {
eprintln!("Fail to send Command::Close to GC worker: {:?}", e);
return;
}
let thread_handle = match self.thread_handle.take() {
Some(thread_handle) => thread_handle,
None => return,
};
if let Err(e) = thread_handle.join() {
eprintln!("Fail to join GC worker thread: {:?}", e);
}
}
}
| {
let mut hasher = DefaultHasher::new();
hasher.write(context_hash.as_ref());
let hashed = hasher.finish();
self.context_hashes.get(&hashed).cloned()
} | identifier_body |
in_memory.rs | // Copyright (c) SimpleStaking, Viable Systems and Tezedge Contributors
// SPDX-License-Identifier: MIT
//! Implementation of an in-memory repository.
use std::{
borrow::Cow,
collections::{hash_map::DefaultHasher, BTreeMap, VecDeque},
hash::Hasher,
mem::size_of,
sync::{atomic::Ordering, Arc},
thread::JoinHandle,
};
use crossbeam_channel::Sender;
use crypto::hash::ContextHash;
use tezos_timing::RepositoryMemoryUsage;
use crate::{
gc::{
worker::{Command, Cycles, GCThread, GC_PENDING_HASHIDS, PRESERVE_CYCLE_COUNT},
GarbageCollectionError, GarbageCollector,
},
hash::ObjectHash,
persistent::{DBError, Flushable, KeyValueStoreBackend, Persistable},
working_tree::{
shape::{DirectoryShapeId, DirectoryShapes, ShapeStrings},
storage::DirEntryId,
string_interner::{StringId, StringInterner},
},
Map,
};
use tezos_spsc::Consumer;
use super::{index_map::IndexMap, HashIdError};
use super::{HashId, VacantObjectHash};
#[derive(Debug)]
pub struct HashValueStore {
hashes: IndexMap<HashId, ObjectHash>,
values: IndexMap<HashId, Option<Arc<[u8]>>>,
free_ids: Option<Consumer<HashId>>,
new_ids: Vec<HashId>,
values_bytes: usize,
}
impl HashValueStore {
pub(crate) fn new<T>(consumer: T) -> Self
where
T: Into<Option<Consumer<HashId>>>,
{
Self {
hashes: IndexMap::new(),
values: IndexMap::new(),
free_ids: consumer.into(),
new_ids: Vec::with_capacity(1024),
values_bytes: 0,
}
}
pub fn | (&self) -> RepositoryMemoryUsage {
let values_bytes = self.values_bytes;
let values_capacity = self.values.capacity();
let hashes_capacity = self.hashes.capacity();
let total_bytes = values_bytes
.saturating_add(values_capacity * size_of::<Option<Arc<[u8]>>>())
.saturating_add(values_capacity * 16) // Each `Arc` has 16 extra bytes for the counters
.saturating_add(hashes_capacity * size_of::<ObjectHash>());
RepositoryMemoryUsage {
values_bytes,
values_capacity,
values_length: self.values.len(),
hashes_capacity,
hashes_length: self.hashes.len(),
total_bytes,
npending_free_ids: self.free_ids.as_ref().map(|c| c.len()).unwrap_or(0),
gc_npending_free_ids: GC_PENDING_HASHIDS.load(Ordering::Acquire),
nshapes: 0,
}
}
pub(crate) fn clear(&mut self) {
*self = Self {
hashes: IndexMap::new(),
values: IndexMap::new(),
free_ids: self.free_ids.take(),
new_ids: Vec::new(),
values_bytes: 0,
}
}
pub(crate) fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, HashIdError> {
let (hash_id, entry) = if let Some(free_id) = self.get_free_id() {
if let Some(old_value) = self.values.set(free_id, None)? {
self.values_bytes = self.values_bytes.saturating_sub(old_value.len());
}
(free_id, self.hashes.get_mut(free_id)?.ok_or(HashIdError)?)
} else {
self.hashes.get_vacant_entry()?
};
self.new_ids.push(hash_id);
Ok(VacantObjectHash {
entry: Some(entry),
hash_id,
})
}
fn get_free_id(&mut self) -> Option<HashId> {
self.free_ids.as_mut()?.pop().ok()
}
pub(crate) fn insert_value_at(
&mut self,
hash_id: HashId,
value: Arc<[u8]>,
) -> Result<(), HashIdError> {
self.values_bytes = self.values_bytes.saturating_add(value.len());
if let Some(old) = self.values.insert_at(hash_id, Some(value))? {
self.values_bytes = self.values_bytes.saturating_sub(old.len());
}
Ok(())
}
pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, HashIdError> {
self.hashes.get(hash_id)
}
pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, HashIdError> {
match self.values.get(hash_id)? {
Some(value) => Ok(value.as_ref().map(|v| v.as_ref())),
None => Ok(None),
}
}
pub(crate) fn contains(&self, hash_id: HashId) -> Result<bool, HashIdError> {
Ok(self.values.get(hash_id)?.unwrap_or(&None).is_some())
}
fn take_new_ids(&mut self) -> Vec<HashId> {
let new_ids = self.new_ids.clone();
self.new_ids.clear();
new_ids
}
}
pub struct InMemory {
current_cycle: BTreeMap<HashId, Option<Arc<[u8]>>>,
pub hashes: HashValueStore,
sender: Option<Sender<Command>>,
pub context_hashes: Map<u64, HashId>,
context_hashes_cycles: VecDeque<Vec<u64>>,
thread_handle: Option<JoinHandle<()>>,
shapes: DirectoryShapes,
string_interner: StringInterner,
}
impl GarbageCollector for InMemory {
fn new_cycle_started(&mut self) -> Result<(), GarbageCollectionError> {
self.new_cycle_started();
Ok(())
}
fn block_applied(
&mut self,
referenced_older_objects: Vec<HashId>,
) -> Result<(), GarbageCollectionError> {
self.block_applied(referenced_older_objects);
Ok(())
}
}
impl Flushable for InMemory {
fn flush(&self) -> Result<(), anyhow::Error> {
Ok(())
}
}
impl Persistable for InMemory {
fn is_persistent(&self) -> bool {
false
}
}
impl KeyValueStoreBackend for InMemory {
fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> {
self.write_batch(batch)
}
fn contains(&self, hash_id: HashId) -> Result<bool, DBError> {
self.contains(hash_id)
}
fn put_context_hash(&mut self, hash_id: HashId) -> Result<(), DBError> {
self.put_context_hash_impl(hash_id)
}
fn get_context_hash(&self, context_hash: &ContextHash) -> Result<Option<HashId>, DBError> {
Ok(self.get_context_hash_impl(context_hash))
}
fn get_hash(&self, hash_id: HashId) -> Result<Option<Cow<ObjectHash>>, DBError> {
Ok(self.get_hash(hash_id)?.map(Cow::Borrowed))
}
fn get_value(&self, hash_id: HashId) -> Result<Option<Cow<[u8]>>, DBError> {
Ok(self.get_value(hash_id)?.map(Cow::Borrowed))
}
fn get_vacant_object_hash(&mut self) -> Result<VacantObjectHash, DBError> {
self.get_vacant_entry_hash()
}
fn clear_objects(&mut self) -> Result<(), DBError> {
// `InMemory` has its own garbage collection
Ok(())
}
fn memory_usage(&self) -> RepositoryMemoryUsage {
let mut mem = self.hashes.get_memory_usage();
mem.nshapes = self.shapes.nshapes();
mem
}
fn get_shape(&self, shape_id: DirectoryShapeId) -> Result<ShapeStrings, DBError> {
self.shapes
.get_shape(shape_id)
.map(ShapeStrings::SliceIds)
.map_err(Into::into)
}
fn make_shape(
&mut self,
dir: &[(StringId, DirEntryId)],
) -> Result<Option<DirectoryShapeId>, DBError> {
self.shapes.make_shape(dir).map_err(Into::into)
}
fn synchronize_strings(&mut self, string_interner: &StringInterner) -> Result<(), DBError> {
self.string_interner.extend_from(string_interner);
Ok(())
}
fn get_str(&self, string_id: StringId) -> Option<&str> {
self.string_interner.get(string_id)
}
}
impl InMemory {
pub fn try_new() -> Result<Self, std::io::Error> {
// TODO - TE-210: Remove once we hace proper support for history modes.
let garbage_collector_disabled = std::env::var("DISABLE_INMEM_CONTEXT_GC")
.unwrap_or_else(|_| "false".to_string())
.parse::<bool>()
.expect("Provided `DISABLE_INMEM_CONTEXT_GC` value cannot be converted to bool");
let (sender, cons, thread_handle) = if garbage_collector_disabled {
(None, None, None)
} else {
let (sender, recv) = crossbeam_channel::unbounded();
let (prod, cons) = tezos_spsc::bounded(2_000_000);
let thread_handle = std::thread::Builder::new()
.name("ctx-inmem-gc-thread".to_string())
.spawn(move || {
GCThread {
cycles: Cycles::default(),
recv,
free_ids: prod,
pending: Vec::new(),
}
.run()
})?;
(Some(sender), Some(cons), Some(thread_handle))
};
let current_cycle = Default::default();
let hashes = HashValueStore::new(cons);
let context_hashes = Default::default();
let mut context_hashes_cycles = VecDeque::with_capacity(PRESERVE_CYCLE_COUNT);
for _ in 0..PRESERVE_CYCLE_COUNT {
context_hashes_cycles.push_back(Default::default())
}
Ok(Self {
current_cycle,
hashes,
sender,
context_hashes,
context_hashes_cycles,
thread_handle,
shapes: DirectoryShapes::default(),
string_interner: StringInterner::default(),
})
}
pub(crate) fn get_vacant_entry_hash(&mut self) -> Result<VacantObjectHash, DBError> {
self.hashes.get_vacant_object_hash().map_err(Into::into)
}
pub(crate) fn get_hash(&self, hash_id: HashId) -> Result<Option<&ObjectHash>, DBError> {
self.hashes.get_hash(hash_id).map_err(Into::into)
}
pub(crate) fn get_value(&self, hash_id: HashId) -> Result<Option<&[u8]>, DBError> {
self.hashes.get_value(hash_id).map_err(Into::into)
}
fn contains(&self, hash_id: HashId) -> Result<bool, DBError> {
self.hashes.contains(hash_id).map_err(Into::into)
}
pub fn write_batch(&mut self, batch: Vec<(HashId, Arc<[u8]>)>) -> Result<(), DBError> {
for (hash_id, value) in batch {
self.hashes.insert_value_at(hash_id, Arc::clone(&value))?;
self.current_cycle.insert(hash_id, Some(value));
}
Ok(())
}
pub fn new_cycle_started(&mut self) {
if let Some(sender) = &self.sender {
let values_in_cycle = std::mem::take(&mut self.current_cycle);
let new_ids = self.hashes.take_new_ids();
if let Err(e) = sender.try_send(Command::StartNewCycle {
values_in_cycle,
new_ids,
}) {
eprintln!("Fail to send Command::StartNewCycle to GC worker: {:?}", e);
}
if let Some(unused) = self.context_hashes_cycles.pop_front() {
for hash in unused {
self.context_hashes.remove(&hash);
}
}
self.context_hashes_cycles.push_back(Default::default());
}
}
pub fn block_applied(&mut self, reused: Vec<HashId>) {
if let Some(sender) = &self.sender {
if let Err(e) = sender.send(Command::MarkReused { reused }) {
eprintln!("Fail to send Command::MarkReused to GC worker: {:?}", e);
}
}
}
pub fn get_context_hash_impl(&self, context_hash: &ContextHash) -> Option<HashId> {
let mut hasher = DefaultHasher::new();
hasher.write(context_hash.as_ref());
let hashed = hasher.finish();
self.context_hashes.get(&hashed).cloned()
}
pub fn put_context_hash_impl(&mut self, commit_hash_id: HashId) -> Result<(), DBError> {
let commit_hash = self
.hashes
.get_hash(commit_hash_id)?
.ok_or(DBError::MissingObject {
hash_id: commit_hash_id,
})?;
let mut hasher = DefaultHasher::new();
hasher.write(&commit_hash[..]);
let hashed = hasher.finish();
self.context_hashes.insert(hashed, commit_hash_id);
if let Some(back) = self.context_hashes_cycles.back_mut() {
back.push(hashed);
};
Ok(())
}
#[cfg(test)]
pub(crate) fn put_object_hash(&mut self, entry_hash: ObjectHash) -> HashId {
let vacant = self.get_vacant_entry_hash().unwrap();
vacant.write_with(|entry| *entry = entry_hash)
}
}
impl Drop for InMemory {
fn drop(&mut self) {
let sender = match self.sender.take() {
Some(sender) => sender,
None => return,
};
if let Err(e) = sender.send(Command::Close) {
eprintln!("Fail to send Command::Close to GC worker: {:?}", e);
return;
}
let thread_handle = match self.thread_handle.take() {
Some(thread_handle) => thread_handle,
None => return,
};
if let Err(e) = thread_handle.join() {
eprintln!("Fail to join GC worker thread: {:?}", e);
}
}
}
| get_memory_usage | identifier_name |
ghttp_server_router.go | // Copyright 2018 gf Author(https://gitee.com/johng/gf). All Rights Reserved.
//
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT was not distributed with this file,
// You can obtain one at https://gitee.com/johng/gf.
// 路由控制.
package ghttp
import (
"errors"
"strings"
"container/list"
"gitee.com/johng/gf/g/util/gregex"
"gitee.com/johng/gf/g/container/gset"
"fmt"
)
// 查询请求处理方法.
// 内部带锁机制,可以并发读,但是不能并发写;并且有缓存机制,按照Host、Method、Path进行缓存.
func (s *Server) getHandlerWithCache(r *Request) *handlerRegisterItem {
var cacheItem *handlerParsedItem
cacheKey := s.handlerKey(r.Method, r.URL.Path, r.GetHost())
if v := s.handlerCache.Get(cacheKey); v == nil {
cacheItem = s.searchHandler(r.Method, r.URL.Path, r.GetHost())
if cacheItem != nil {
s.handlerCache.Set(cacheKey, cacheItem, 0)
}
} else {
cacheItem = v.(*handlerParsedItem)
}
if cacheItem != nil {
if r.Router == nil {
for k, v := range cacheItem.values {
r.routerVars[k] = v
}
r.Router = cacheItem.item.router
}
return cacheItem.item
}
return nil
}
// 解析pattern
func (s *Server)parsePattern(pattern string) (domain, method, uri string, err error) {
uri = pattern
domain = gDEFAULT_DOMAIN
method = gDEFAULT_METHOD
if array, err := gregex.MatchString(`([a-zA-Z]+):(.+)`, pattern); len(array) > 1 && err == nil {
method = array[1]
uri = array[2]
}
if array, err := gregex.MatchString(`(.+)@([\w\.\-]+)`, uri); len(array) > 1 && err == nil {
uri = array[1]
domain = array[2]
}
if uri == "" {
err = errors.New("invalid pattern")
}
// 去掉末尾的"/"符号,与路由匹配时处理一直
if uri != "/" {
uri = strings.TrimRight(uri, "/")
}
return
}
// 路由注册处理方法。
// 如果带有hook参数,表示是回调注册方法,否则为普通路由执行方法。
func (s *Server) setHandler(pattern string, handler *handlerItem, hook ... string) error {
// Web Server正字运行时无法动态注册路由方法
if s.status == gSERVER_STATUS_RUNNING {
return errors.New("cannnot bind handler while server running")
}
var hookName string
if len(hook) > 0 {
hookName = hook[0]
}
domain, method, uri, err := s.parsePattern(pattern)
if err != nil {
return errors.New("invalid pattern")
}
// 路由对象
router := &Router {
Uri : uri,
Domain : domain,
Method : method,
Priority : strings.Count(uri[1:], "/"),
}
router.RegRule, router.RegNames = s.patternToRegRule(uri)
// 注册对象
registerItem := &handlerRegisterItem {
handler : handler,
hooks : make(map[string]*list.List),
router : router,
}
if len(hookName) > 0 {
registerItem.handler = nil
registerItem.hooks[hookName] = list.New()
registerItem.hooks[hookName].PushBack(handler)
}
// 动态注册,首先需要判断是否是动态注册,如果不是那么就没必要添加到动态注册记录变量中。
// 非叶节点为哈希表检索节点,按照URI注册的层级进行高效检索,直至到叶子链表节点;
// 叶子节点是链表,按照优先级进行排序,优先级高的排前面,按照遍历检索,按照哈希表层级检索后的叶子链表数据量不会很大,所以效率比较高;
if _, ok := s.handlerTree[domain]; !ok {
s.handlerTree[domain] = make(map[string]interface{})
}
// 用于遍历的指针
p := s.handlerTree[domain]
// 当前节点的规则链表
lists := make([]*list.List, 0)
array := ([]string)(nil)
if strings.EqualFold("/", uri) {
array = []string{"/"}
} else {
array = strings.Split(uri[1:], "/")
}
// 键名"*fuzz"代表模糊匹配节点,其下会有一个链表;
// 键名"*list"代表链表,叶子节点和模糊匹配节点都有该属性;
for k, v := range array {
if len(v) == 0 {
continue
}
// 判断是否模糊匹配规则
if gregex.IsMatchString(`^[:\*]|{[\w\.\-]+}`, v) {
v = "*fuzz"
// 由于是模糊规则,因此这里会有一个*list,用以将后续的路由规则加进来,
// 检索会从叶子节点的链表往根节点按照优先级进行检索
if v, ok := p.(map[string]interface{})["*list"]; !ok {
p.(map[string]interface{})["*list"] = list.New()
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
} else {
lists = append(lists, v.(*list.List))
}
}
// 属性层级数据写入
if _, ok := p.(map[string]interface{})[v]; !ok {
p.(map[string]interface{})[v] = make(map[string]interface{})
}
p = p.(map[string]interface{})[v]
// 到达叶子节点,往list中增加匹配规则(条件 v != "*fuzz" 是因为模糊节点的话在前面已经添加了*list链表)
if k == len(array) - 1 && v != "*fuzz" {
if v, ok := p.(map[string]interface{})["*list"]; !ok {
p.(map[string]interface{})["*list"] = list.New()
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
} else {
lists = append(lists, v.(*list.List))
}
}
}
// 得到的lists是该路由规则一路匹配下来相关的模糊匹配链表(注意不是这棵树所有的链表),
// 从头开始遍历每个节点的模糊匹配链表,将该路由项插入进去(按照优先级高的放在前面)
item := (*handlerRegisterItem)(nil)
// 用以标记 *handlerRegisterItem 指向的对象是否已经处理过,因为多个节点可能会关联同一个该对象
pushedItemSet := gset.NewStringSet()
if len(hookName) == 0 {
// 普通方法路由注册,追加或者覆盖
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
item.handler = handler
pushed = true
break
}
if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
} else {
// 回调方法路由注册,将方法追加到链表末尾
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
if _, ok := item.hooks[hookName]; !ok {
item.hooks[hookName] = list.New()
}
item.hooks[hookName].PushBack(handler)
pushed = true
break
}
if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
}
//gutil.Dump(s.handlerTree)
return nil
}
// 对比两个handlerItem的优先级,需要非常注意的是,注意新老对比项的参数先后顺序
// 优先级比较规则:
// 1、层级越深优先级越高(对比/数量);
// 2、模糊规则优先级:{xxx} > :xxx > *xxx;
func (s *Server) compareRouterPriority(newRouter, oldRouter *Router) bool {
if newRouter.Priority > oldRouter.Priority {
return true
}
if newRouter.Priority < oldRouter.Priority {
return false
}
// 例如:/{user}/{act} 比 /:user/:act 优先级高
if strings.Count(newRouter.Uri, "{") > strings.Count(oldRouter.Uri, "{") {
return true
}
// 例如: /:name/update 比 /:name/:action优先级高
if strings.Count(newRouter.Uri, "/:") < strings.Count(oldRouter.Uri, "/:") {
// 例如: /:name/:action 比 /:name/*any 优先级高
if strings.Count(newRouter.Uri, "/*") < strings.Count(oldRouter.Uri, "/*") {
return true
}
return false
}
return false
}
// 服务方法检索
func (s *Server) searchHandler(method, path, domain string) *handlerParsedItem {
domains := []string{ gDEFAULT_DOMAIN }
if !strings.EqualFold(gDEFAULT_DOMAIN, domain) {
domains = append(domains, domain)
}
array := ([]string)(nil)
if strings.EqualFold("/", path) {
array = []string{"/"}
} else {
array = strings.Split(path[1:], "/")
}
for _, domain := range domains {
p, ok := s.handlerTree[domain]
if !ok {
continue
}
// 多层链表(每个节点都有一个*list链表)的目的是当叶子节点未有任何规则匹配时,让父级模糊匹配规则继续处理
lists := make([]*list.List, 0)
for k, v := range array {
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
}
if _, ok := p.(map[string]interface{})[v]; ok {
p = p.(map[string]interface{})[v]
if k == len(array) - 1 {
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
break
}
}
} else {
if _, ok := p.(map[string]interface{})["*fuzz"]; ok {
p = p.(map[string]interface{})["*fuzz"]
}
}
// 如果是叶子节点,同时判断当前层级的"*fuzz"键名,解决例 | on 匹配 /user 的规则
if k == len(array) - 1 {
if _, ok := p.(map[string]interface{})["*fuzz"]; ok {
p = p.(map[string]interface{})["*fuzz"]
}
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
}
}
}
// 多层链表遍历检索,从数组末尾的链表开始遍历,末尾的深度高优先级也高
for i := len(lists) - 1; i >= 0; i-- {
for e := lists[i].Front(); e != nil; e = e.Next() {
item := e.Value.(*handlerRegisterItem)
// 动态匹配规则带有gDEFAULT_METHOD的情况,不会像静态规则那样直接解析为所有的HTTP METHOD存储
if strings.EqualFold(item.router.Method, gDEFAULT_METHOD) || strings.EqualFold(item.router.Method, method) {
// 注意当不带任何动态路由规则时,len(match) == 1
if match, err := gregex.MatchString(item.router.RegRule, path); err == nil && len(match) > 0 {
//gutil.Dump(match)
//gutil.Dump(names)
handlerItem := &handlerParsedItem{item, nil}
// 如果需要query匹配,那么需要重新正则解析URL
if len(item.router.RegNames) > 0 {
if len(match) > len(item.router.RegNames) {
handlerItem.values = make(map[string][]string)
// 如果存在存在同名路由参数名称,那么执行数组追加
for i, name := range item.router.RegNames {
if _, ok := handlerItem.values[name]; ok {
handlerItem.values[name] = append(handlerItem.values[name], match[i + 1])
} else {
handlerItem.values[name] = []string{match[i + 1]}
}
}
}
}
return handlerItem
}
}
}
}
}
return nil
}
// 将pattern(不带method和domain)解析成正则表达式匹配以及对应的query字符串
func (s *Server) patternToRegRule(rule string) (regrule string, names []string) {
if len(rule) < 2 {
return rule, nil
}
regrule = "^"
array := strings.Split(rule[1:], "/")
for _, v := range array {
if len(v) == 0 {
continue
}
switch v[0] {
case ':':
regrule += `/([\w\.\-]+)`
names = append(names, v[1:])
case '*':
regrule += `/{0,1}(.*)`
names = append(names, v[1:])
default:
s, _ := gregex.ReplaceStringFunc(`{[\w\.\-]+}`, v, func(s string) string {
names = append(names, s[1 : len(s) - 1])
return `([\w\.\-]+)`
})
if strings.EqualFold(s, v) {
regrule += "/" + v
} else {
regrule += "/" + s
}
}
}
regrule += `$`
return
}
// 生成回调方法查询的Key
func (s *Server) handlerKey(method, path, domain string) string {
return strings.ToUpper(method) + ":" + path + "@" + strings.ToLower(domain)
}
| 如:/user/*acti | identifier_name |
ghttp_server_router.go | // Copyright 2018 gf Author(https://gitee.com/johng/gf). All Rights Reserved.
//
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT was not distributed with this file,
// You can obtain one at https://gitee.com/johng/gf.
// 路由控制.
package ghttp
import (
"errors"
"strings"
"container/list"
"gitee.com/johng/gf/g/util/gregex"
"gitee.com/johng/gf/g/container/gset"
"fmt"
)
// 查询请求处理方法.
// 内部带锁机制,可以并发读,但是不能并发写;并且有缓存机制,按照Host、Method、Path进行缓存.
func (s *Server) getHandlerWithCache(r *Request) *handlerRegisterItem {
var cacheItem *handlerParsedItem
cacheKey := s.handlerKey(r.Method, r.URL.Path, r.GetHost())
if v := s.handlerCache.Get(cacheKey); v == nil {
cacheItem = s.searchHandler(r.Method, r.URL.Path, r.GetHost())
if cacheItem != nil {
s.handlerCache.Set(cacheKey, cacheItem, 0)
}
} else {
cacheItem = v.(*handlerParsedItem)
}
if cacheItem != nil {
if r.Router == nil {
for k, v := range cacheItem.values {
r.routerVars[k] = v
}
r.Router = cacheItem.item.router
}
return cacheItem.item
}
return nil
}
// 解析pattern
func (s *Server)parsePattern(pattern string) (domain, method, uri string, err error) {
uri = pattern
domain = gDEFAULT_DOMAIN
method = gDEFAULT_METHOD
if array, err := gregex.MatchString(`([a-zA-Z]+):(.+)`, pattern); len(array) > 1 && err == nil {
method = array[1]
uri = array[2]
}
if array, err := gregex.MatchString(`(.+)@([\w\.\-]+)`, uri); len(array) > 1 && err == nil {
uri = array[1]
domain = array[2]
}
if uri == "" {
err = errors.New("invalid pattern")
}
// 去掉末尾的"/"符号,与路由匹配时处理一直
if uri != "/" {
uri = strings.TrimRight(uri, "/")
}
return
}
// 路由注册处理方法。
// 如果带有hook参数,表示是回调注册方法,否则为普通路由执行方法。
func (s *Server) setHandler(pattern string, handler *handlerItem, hook ... string) error {
// Web Server正字运行时无法动态注册路由方法
if s.status == gSERVER_STATUS_RUNNING {
return errors.New("cannnot bind handler while server running")
}
var hookName string
if len(hook) > 0 {
hookName = hook[0]
}
domain, method, uri, err := s.parsePattern(pattern)
if err != nil {
return errors.New("invalid pattern")
}
// 路由对象
router := &Router {
Uri : uri,
Domain : domain,
Method : method,
Priority : strings.Count(uri[1:], "/"),
}
router.RegRule, router.RegNames = s.patternToRegRule(uri)
// 注册对象
registerItem := &handlerRegisterItem {
handler : handler,
hooks : make(map[string]*list.List),
router : router,
}
if len(hookName) > 0 {
registerItem.handler = nil
registerItem.hooks[hookName] = list.New()
registerItem.hooks[hookName].PushBack(handler)
}
// 动态注册,首先需要判断是否是动态注册,如果不是那么就没必要添加到动态注册记录变量中。
// 非叶节点为哈希表检索节点,按照URI注册的层级进行高效检索,直至到叶子链表节点;
// 叶子节点是链表,按照优先级进行排序,优先级高的排前面,按照遍历检索,按照哈希表层级检索后的叶子链表数据量不会很大,所以效率比较高;
if _, ok := s.handlerTree[domain]; !ok {
s.handlerTree[domain] = make(map[string]interface{})
}
// 用于遍历的指针
p := s.handlerTree[domain]
// 当前节点的规则链表
lists := make([]*list.List, 0)
array := ([]string)(nil)
if strings.EqualFold("/", uri) {
array = []string{"/"}
} else {
array = strings.Split(uri[1:], "/")
}
// 键名"*fuzz"代表模糊匹配节点,其下会有一个链表;
// 键名"*list"代表链表,叶子节点和模糊匹配节点都有该属性;
for k, v := range array {
if len(v) == 0 {
continue
}
// 判断是否模糊匹配规则
if gregex.IsMatchString(`^[:\*]|{[\w\.\-]+}`, v) {
v = "*fuzz"
// 由于是模糊规则,因此这里会有一个*list,用以将后续的路由规则加进来,
// 检索会从叶子节点的链表往根节点按照优先级进行检索
if v, ok := p.(map[string]interface{})["*list"]; !ok {
p.(map[string]interface{})["*list"] = list.New()
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
} else {
lists = append(lists, v.(*list.List))
}
}
// 属性层级数据写入
if _, ok := p.(map[string]interface{})[v]; !ok {
p.(map[string]interface{})[v] = make(map[string]interface{})
}
p = p.(map[string]interface{})[v]
// 到达叶子节点,往list中增加匹配规则(条件 v != "*fuzz" 是因为模糊节点的话在前面已经添加了*list链表)
if k == len(array) - 1 && v != "*fuzz" {
if v, ok := p.(map[string]interface{})["*list"]; !ok {
p.(map[string]interface{})["*list"] = list.New()
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
} else {
lists = append(lists, v.(*list.List))
}
}
}
// 得到的lists是该路由规则一路匹配下来相关的模糊匹配链表(注意不是这棵树所有的链表),
// 从头开始遍历每个节点的模糊匹配链表,将该路由项插入进去(按照优先级高的放在前面)
item := (*handlerRegisterItem)(nil)
// 用以标记 *handlerRegisterItem 指向的对象是否已经处理过,因为多个节点可能会关联同一个该对象
pushedItemSet := gset.NewStringSet()
if len(hookName) == 0 {
// 普通方法路由注册,追加或者覆盖
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
item.handler = handler
pushed = true
break
}
if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
} else {
// 回调方法路由注册,将方法追加到链表末尾
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
if _, ok := item.hooks[hookName]; !ok {
item.hooks[hookName] = list.New()
}
item.hooks[hookName].PushBack(handler)
pushed = true
break
}
if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
}
//gutil.Dump(s.handlerTree)
return nil
}
// 对比两个handlerItem的优先级,需要非常注意的是,注意新老对比项的参数先后顺序
// 优先级比较规则:
// 1、层级越深优先级越高(对比/数量);
// 2、模糊规则优先级:{xxx} > :xxx > *xxx;
func (s *Server) compareRouterPriority(newRouter, oldRouter *Router) bool {
if newRouter.Priority > oldRouter.Priority {
return true
}
if newRouter.Priority < oldRouter.Priority {
return false
}
// 例如:/{user}/{act} 比 /:user/:act 优先级高
if strings.Count(newRouter.Uri, "{") > strings.Count(oldRouter.Uri, "{") {
return true
}
// 例如: /:name/update 比 /:name/:action优先级高
if strings.Count(newRouter.Uri, "/:") < strings.Count(oldRouter.Uri, "/:") {
// 例如: /:name/:action 比 /:name/*any 优先级高
if strings.Count(newRouter.Uri, "/*") < strings.Count(oldRouter.Uri, "/*") {
return true
}
return false
}
return false
}
// 服务方法检索
func (s *Server) searchHandler(method, path, domain string) *handlerParsedItem {
domains := []string{ gDEFAULT_DOMAIN }
if !strings.EqualFold(gDEFAULT_DOMAIN, domain) {
domains = append(domains, domain)
}
array := ([]string)(nil)
if strings.EqualFold("/", path) {
array = []string{"/"}
} else {
array = strings.Split(path[1:], "/")
}
for _, domain := range domains {
p, ok := s.handlerTree[domain]
if !ok {
continue
}
// 多层链表(每个节点都有一个*list链表)的目的是当叶子节点未有任何规则匹配时,让父级模糊匹配规则继续处理
lists := make([]*list.List, 0)
for k, v := range array {
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
}
if _, ok := p.(map[string]interface{})[v]; ok {
p = p.(map[string]interface{})[v]
if k == len(array) - 1 {
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
break
}
}
} else {
if _, ok := p.(map[string]interface{})["*fuzz"]; ok {
p = p.(map[string]interface{})["*fuzz"]
}
}
// 如果是叶子节点,同时判断当前层级的"*fuzz"键名,解决例如:/user/*action 匹配 /user 的规则
if k == len(array) - 1 {
if _, ok := p.(map[string]interface{})["*fuzz"]; ok {
p = p.(map[string]interface{})["*fuzz"]
}
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
}
}
}
// 多层链表遍历检索,从数组末尾的链表开始遍历,末尾的深度高优先级也高
for i := len(lists) - 1; i >= 0; i-- {
for e := lists[i].Front(); e != nil; e = e.Next() {
item := e.Value.(*handlerRegisterItem)
// 动态匹配规则带有gDEFAULT_METHOD的情况,不会像静态规则那样直接解析为所有的HTTP METHOD存储
if strings.EqualFold(item.router.Method, gDEFAULT_METHOD) || strings.EqualFold(item.router.Method, method) {
// 注意当不带任何动态路由规则时,len(match) == 1
if match, err := gregex.MatchString(item.router.RegRule, path); err == nil && len(match) > 0 {
//gutil.Dump(match)
//gutil.Dump(names)
handlerItem := &handlerParsedItem{item, nil}
// 如果需要query匹配,那么需要重新正则解析URL
if len(item.router.RegNames) > 0 {
if len(match) > len(item.router.RegNames) {
handlerItem.values = make(map[string][]string)
// 如果存在存在同名路由参数名称,那么执行数组追加
for i, name := range item.router.RegNames {
if _, ok := handlerItem.values[name]; ok {
handlerItem.values[name] = append(handlerItem.values[name], match[i + 1])
} else {
handlerItem.values[name] = []string{match[i + 1]}
}
}
}
}
return handlerItem
}
}
}
}
}
return nil
}
// 将pattern(不带method和domain)解析成正则表达式匹配以及对应的query字符串
func (s *Server) patternToRegRule(rule string) (regrule string, names []string) {
if len(rule) < 2 {
return rule, nil
}
regrule = "^"
array := strings.Split(rule[1:], "/")
for _, v := range array {
if len(v) == 0 {
continue
}
switch v[0] {
case ':':
regrule += `/([\w\.\-]+)`
names = append(names, v[1:])
case '*':
regrule += `/{0,1}(.*)`
names = append(names, v[1:]) | })
if strings.EqualFold(s, v) {
regrule += "/" + v
} else {
regrule += "/" + s
}
}
}
regrule += `$`
return
}
// 生成回调方法查询的Key
func (s *Server) handlerKey(method, path, domain string) string {
return strings.ToUpper(method) + ":" + path + "@" + strings.ToLower(domain)
} | default:
s, _ := gregex.ReplaceStringFunc(`{[\w\.\-]+}`, v, func(s string) string {
names = append(names, s[1 : len(s) - 1])
return `([\w\.\-]+)` | random_line_split |
ghttp_server_router.go | // Copyright 2018 gf Author(https://gitee.com/johng/gf). All Rights Reserved.
//
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT was not distributed with this file,
// You can obtain one at https://gitee.com/johng/gf.
// 路由控制.
package ghttp
import (
"errors"
"strings"
"container/list"
"gitee.com/johng/gf/g/util/gregex"
"gitee.com/johng/gf/g/container/gset"
"fmt"
)
// 查询请求处理方法.
// 内部带锁机制,可以并发读,但是不能并发写;并且有缓存机制,按照Host、Method、Path进行缓存.
func (s *Server) getHandlerWithCache(r *Request) *handlerRegisterItem {
var cacheItem *handlerParsedItem
cacheKey := s.handlerKey(r.Method, r.URL.Path, r.GetHost())
if v := s.handlerCache.Get(cacheKey); v == nil {
cacheItem = s.searchHandler(r.Method, r.URL.Path, r.GetHost())
if cacheItem != nil {
s.handlerCache.Set(cacheKey, cacheItem, 0)
}
} else {
cacheItem = v.(*handlerParsedItem)
}
if cacheItem != nil {
if r.Router == nil {
for k, v := range cacheItem.values {
r.routerVars[k] = v
}
r.Router = cacheItem.item.router
}
return cacheItem.item
}
return nil
}
// 解析pattern
func (s *Server)parsePattern(pattern string) (domain, method, uri string, err error) {
uri = pattern
domain = gDEFAULT_DOMAIN
method = gDEFAULT_METHOD
if array, err := gregex.MatchString(`([a-zA-Z]+):(.+)`, pattern); len(array) > 1 && err == nil {
method = array[1]
uri = array[2]
}
if array, err := gregex.MatchString(`(.+)@([\w\.\-]+)`, uri); len(array) > 1 && err == nil {
uri = array[1]
domain = array[2]
}
if uri == "" {
err = errors.New("invalid pattern")
}
// 去掉末尾的"/"符号,与路由匹配时处理一直
if uri != "/" {
uri = strings.TrimRight(uri, "/")
}
return
}
// 路由注册处理方法。
// 如果带有hook参数,表示是回调注册方法,否则为普通路由执行方法。
func (s *Server) setHandler(pattern string, handler *handlerItem, hook ... string) error {
// Web Server正字运行时无法动态注册路由方法
if s.status == gSERVER_STATUS_RUNNING {
return errors.New("cannnot bind handler while server running")
}
var hookName string
if len(hook) > 0 {
hookName = hook[0]
}
domain, method, uri, err := s.parsePattern(pattern)
if err != nil {
return errors.New("invalid pattern")
}
// 路由对象
router := &Router {
Uri : uri,
Domain : domain,
Method : method,
Priority : strings.Count(uri[1:], "/"),
}
router.RegRule, router.RegNames = s.patternToRegRule(uri)
// 注册对象
registerItem := &handlerRegisterItem {
handler : handler,
hooks : make(map[string]*list.List),
router : router,
}
if len(hookName) > 0 {
registerItem.handler = nil
registerItem.hooks[hookName] = list.New()
registerItem.hooks[hookName].PushBack(handler)
}
// 动态注册,首先需要判断是否是动态注册,如果不是那么就没必要添加到动态注册记录变量中。
// 非叶节点为哈希表检索节点,按照URI注册的层级进行高效检索,直至到叶子链表节点;
// 叶子节点是链表,按照优先级进行排序,优先级高的排前面,按照遍历检索,按照哈希表层级检索后的叶子链表数据量不会很大,所以效率比较高;
if _, ok := s.handlerTree[domain]; !ok {
s.handlerTree[domain] = make(map[string]interface{})
}
// 用于遍历的指针
p := s.handlerTree[domain]
// 当前节点的规则链表
lists := make([]*list.List, 0)
array := ([]string)(nil)
if strings.EqualFold("/", uri) {
array = []string{"/"}
} else {
array = strings.Split(uri[1:], "/")
}
// 键名"*fuzz"代表模糊匹配节点,其下会有一个链表;
// 键名"*list"代表链表,叶子节点和模糊匹配节点都有该属性;
for k, v := range array {
if len(v) == 0 {
continue
}
// 判断是否模糊匹配规则
if gregex.IsMatchString(`^[:\*]|{[\w\.\-]+}`, v) {
v = "*fuzz"
// 由于是模糊规则,因此这里会有一个*list,用以将后续的路由规则加进来,
// 检索会从叶子节点的链表往根节点按照优先级进行检索
if v, ok := p.(map[string]interface{})["*list"]; !ok {
p.(map[string]interface{})["*list"] = list.New()
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
} else {
lists = append(lists, v.(*list.List))
}
}
// 属性层级数据写入
if _, ok := p.(map[string]interface{})[v]; !ok {
p.(map[string]interface{})[v] = make(map[string]interface{})
}
p = p.(map[string]interface{})[v]
// 到达叶子节点,往list中增加匹配规则(条件 v != "*fuzz" 是因为模糊节点的话在前面已经添加了*list链表)
if k == len(array) - 1 && v != "*fuzz" {
if v, ok := p.(map[string]interface{})["*list"]; !ok {
p.(map[string]interface{})["*list"] = list.New()
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
} else {
lists = append(lists, v.(*list.List))
}
}
}
// 得到的lists是该路由规则一路匹配下来相关的模糊匹配链表(注意不是这棵树所有的链表),
// 从头开始遍历每个节点的模糊匹配链表,将该路由项插入进去(按照优先级高的放在前面)
item := (*handlerRegisterItem)(nil)
// 用以标记 *handlerRegisterItem 指向的对象是否已经处理过,因为多个节点可能会关联同一个该对象
pushedItemSet := gset.NewStringSet()
if len(hookName) == 0 {
// 普通方法路由注册,追加或者覆盖
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
item.handler = handler
pushed = true
break
}
if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
} else {
// 回调方法路由注册,将方法追加到链表末尾
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
if _, ok := item.hooks[hookName]; !ok {
item.hooks[hookName] = list.New()
}
item.hooks[hookName].PushBack(handler)
pushed = true
break
}
| pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
}
//gutil.Dump(s.handlerTree)
return nil
}
// 对比两个handlerItem的优先级,需要非常注意的是,注意新老对比项的参数先后顺序
// 优先级比较规则:
// 1、层级越深优先级越高(对比/数量);
// 2、模糊规则优先级:{xxx} > :xxx > *xxx;
func (s *Server) compareRouterPriority(newRouter, oldRouter *Router) bool {
if newRouter.Priority > oldRouter.Priority {
return true
}
if newRouter.Priority < oldRouter.Priority {
return false
}
// 例如:/{user}/{act} 比 /:user/:act 优先级高
if strings.Count(newRouter.Uri, "{") > strings.Count(oldRouter.Uri, "{") {
return true
}
// 例如: /:name/update 比 /:name/:action优先级高
if strings.Count(newRouter.Uri, "/:") < strings.Count(oldRouter.Uri, "/:") {
// 例如: /:name/:action 比 /:name/*any 优先级高
if strings.Count(newRouter.Uri, "/*") < strings.Count(oldRouter.Uri, "/*") {
return true
}
return false
}
return false
}
// 服务方法检索
func (s *Server) searchHandler(method, path, domain string) *handlerParsedItem {
domains := []string{ gDEFAULT_DOMAIN }
if !strings.EqualFold(gDEFAULT_DOMAIN, domain) {
domains = append(domains, domain)
}
array := ([]string)(nil)
if strings.EqualFold("/", path) {
array = []string{"/"}
} else {
array = strings.Split(path[1:], "/")
}
for _, domain := range domains {
p, ok := s.handlerTree[domain]
if !ok {
continue
}
// 多层链表(每个节点都有一个*list链表)的目的是当叶子节点未有任何规则匹配时,让父级模糊匹配规则继续处理
lists := make([]*list.List, 0)
for k, v := range array {
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
}
if _, ok := p.(map[string]interface{})[v]; ok {
p = p.(map[string]interface{})[v]
if k == len(array) - 1 {
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
break
}
}
} else {
if _, ok := p.(map[string]interface{})["*fuzz"]; ok {
p = p.(map[string]interface{})["*fuzz"]
}
}
// 如果是叶子节点,同时判断当前层级的"*fuzz"键名,解决例如:/user/*action 匹配 /user 的规则
if k == len(array) - 1 {
if _, ok := p.(map[string]interface{})["*fuzz"]; ok {
p = p.(map[string]interface{})["*fuzz"]
}
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
}
}
}
// 多层链表遍历检索,从数组末尾的链表开始遍历,末尾的深度高优先级也高
for i := len(lists) - 1; i >= 0; i-- {
for e := lists[i].Front(); e != nil; e = e.Next() {
item := e.Value.(*handlerRegisterItem)
// 动态匹配规则带有gDEFAULT_METHOD的情况,不会像静态规则那样直接解析为所有的HTTP METHOD存储
if strings.EqualFold(item.router.Method, gDEFAULT_METHOD) || strings.EqualFold(item.router.Method, method) {
// 注意当不带任何动态路由规则时,len(match) == 1
if match, err := gregex.MatchString(item.router.RegRule, path); err == nil && len(match) > 0 {
//gutil.Dump(match)
//gutil.Dump(names)
handlerItem := &handlerParsedItem{item, nil}
// 如果需要query匹配,那么需要重新正则解析URL
if len(item.router.RegNames) > 0 {
if len(match) > len(item.router.RegNames) {
handlerItem.values = make(map[string][]string)
// 如果存在存在同名路由参数名称,那么执行数组追加
for i, name := range item.router.RegNames {
if _, ok := handlerItem.values[name]; ok {
handlerItem.values[name] = append(handlerItem.values[name], match[i + 1])
} else {
handlerItem.values[name] = []string{match[i + 1]}
}
}
}
}
return handlerItem
}
}
}
}
}
return nil
}
// 将pattern(不带method和domain)解析成正则表达式匹配以及对应的query字符串
func (s *Server) patternToRegRule(rule string) (regrule string, names []string) {
if len(rule) < 2 {
return rule, nil
}
regrule = "^"
array := strings.Split(rule[1:], "/")
for _, v := range array {
if len(v) == 0 {
continue
}
switch v[0] {
case ':':
regrule += `/([\w\.\-]+)`
names = append(names, v[1:])
case '*':
regrule += `/{0,1}(.*)`
names = append(names, v[1:])
default:
s, _ := gregex.ReplaceStringFunc(`{[\w\.\-]+}`, v, func(s string) string {
names = append(names, s[1 : len(s) - 1])
return `([\w\.\-]+)`
})
if strings.EqualFold(s, v) {
regrule += "/" + v
} else {
regrule += "/" + s
}
}
}
regrule += `$`
return
}
// 生成回调方法查询的Key
func (s *Server) handlerKey(method, path, domain string) string {
return strings.ToUpper(method) + ":" + path + "@" + strings.ToLower(domain)
}
| if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
| conditional_block |
ghttp_server_router.go | // Copyright 2018 gf Author(https://gitee.com/johng/gf). All Rights Reserved.
//
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT was not distributed with this file,
// You can obtain one at https://gitee.com/johng/gf.
// 路由控制.
package ghttp
import (
"errors"
"strings"
"container/list"
"gitee.com/johng/gf/g/util/gregex"
"gitee.com/johng/gf/g/container/gset"
"fmt"
)
// 查询请求处理方法.
// 内部带锁机制,可以并发读,但是不能并发写;并且有缓存机制,按照Host、Method、Path进行缓存.
func (s *Server) getHandlerWithCache(r *Request) *handlerRegisterItem {
var cacheItem *handlerParsedItem
cacheKey := s.handlerKey(r.Method, r.URL.Path, r.GetHost())
if v := s.handlerCache.Get(cacheKey); v == nil {
cacheItem = s.searchHandler(r.Method, r.URL.Path, r.GetHost())
if cacheItem != nil {
s.handlerCache.Set(cacheKey, cacheItem, 0)
}
} else {
cacheItem = v.(*handlerParsedItem)
}
if cacheItem != nil {
if r.Router == nil {
for k, v := range cacheItem.values {
r.routerVars[k] = v
}
r.Router = cacheItem.item.router
}
return cacheItem.item
}
return nil
}
// 解析pattern
func (s *Server)parsePattern(pattern string) (domain, method, uri string, err error) {
uri = pattern
domain = gDEFAULT_DOMAIN
method = gDEFAULT_METHOD
if array, err := gregex.MatchString(`([a-zA-Z]+):(.+)`, pattern); len(array) > 1 && err == nil {
method = array[1]
uri = array[2]
}
if array, err := gregex.MatchString(`(.+)@([\w\.\-]+)`, uri); len(array) > 1 && err == nil {
uri = array[1]
domain = array[2]
}
if uri == "" {
err = errors.New("invalid pattern")
}
// 去掉末尾的"/"符号,与路由匹配时处理一直
if uri != "/" {
uri = strings.TrimRight(uri, "/")
}
return
}
// 路由注册处理方法。
// 如果带有hook参数,表示是回调注册方法,否则为普通路由执行方法。
func (s *Server) setHandler(pattern string, handler *handlerItem, hook ... string) error {
// Web Server正字运行时无法动态注册路由方法
if s.status == gSERVER_STATUS_RUNNING {
return errors.New("cannnot bind handler while server running")
}
var hookName string
if len(hook) > 0 {
hookName = hook[0]
}
domain, method, uri, err := s.parsePattern(pattern)
if err != nil {
return errors.New("invalid pattern")
}
// 路由对象
router := &Router {
Uri : uri,
Domain : domain,
Method : method,
Priority : strings.Count(uri[1:], "/"),
}
router.RegRule, router.RegNames = s.patternToRegRule(uri)
// 注册对象
registerItem := &handlerRegisterItem {
handler : handler,
hooks : make(map[string]*list.List),
router : router,
}
if len(hookName) > 0 {
registerItem.handler = nil
registerItem.hooks[hookName] = list.New()
registerItem.hooks[hookName].PushBack(handler)
}
// 动态注册,首先需要判断是否是动态注册,如果不是那么就没必要添加到动态注册记录变量中。
// 非叶节点为哈希表检索节点,按照URI注册的层级进行高效检索,直至到叶子链表节点;
// 叶子节点是链表,按照优先级进行排序,优先级高的排前面,按照遍历检索,按照哈希表层级检索后的叶子链表数据量不会很大,所以效率比较高;
if _, ok := s.handlerTree[domain]; !ok {
s.handlerTree[domain] = make(map[string]interface{})
}
// 用于遍历的指针
p := s.handlerTree[domain]
// 当前节点的规则链表
lists := make([]*list.List, 0)
array := ([]string)(nil)
if strings.EqualFold("/", uri) {
array = []string{"/"}
} else {
array = strings.Split(uri[1:], "/")
}
// 键名"*fuzz"代表模糊匹配节点,其下会有一个链表;
// 键名"*list"代表链表,叶子节点和模糊匹配节点都有该属性;
for k, v := range array {
if len(v) == 0 {
continue
}
// 判断是否模糊匹配规则
if gregex.IsMatchString(`^[:\*]|{[\w\.\-]+}`, v) {
v = "*fuzz"
// 由于是模糊规则,因此这里会有一个*list,用以将后续的路由规则加进来,
// 检索会从叶子节点的链表往根节点按照优先级进行检索
if v, ok := p.(map[string]interface{})["*list"]; !ok {
p.(map[string]interface{})["*list"] = list.New()
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
} else {
lists = append(lists, v.(*list.List))
}
}
// 属性层级数据写入
if _, ok := p.(map[string]interface{})[v]; !ok {
p.(map[string]interface{})[v] = make(map[string]interface{})
}
p = p.(map[string]interface{})[v]
// 到达叶子节点,往list中增加匹配规则(条件 v != "*fuzz" 是因为模糊节点的话在前面已经添加了*list链表)
if k == len(array) - 1 && v != "*fuzz" {
if v, ok := p.(map[string]interface{})["*list"]; !ok {
p.(map[string]interface{})["*list"] = list.New()
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
} else {
lists = append(lists, v.(*list.List))
}
}
}
// 得到的lists是该路由规则一路匹配下来相关的模糊匹配链表(注意不是这棵树所有的链表),
// 从头开始遍历每个节点的模糊匹配链表,将该路由项插入进去(按照优先级高的放在前面)
item := (*handlerRegisterItem)(nil)
// 用以标记 *handlerRegisterItem 指向的对象是否已经处理过,因为多个节点可能会关联同一个该对象
pushedItemSet := gset.NewStringSet()
if len(hookName) == 0 {
// 普通方法路由注册,追加或者覆盖
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
item.handler = handler
pushed = true
break
}
if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
} else {
// 回调方法路由注册,将方法追加到链表末尾
for _, l := range lists {
pushed := false
address := ""
for e := l.Front(); e != nil; e = e.Next() {
item = e.Value.(*handlerRegisterItem)
address = fmt.Sprintf("%p", item)
if pushedItemSet.Contains(address) {
pushed = true
break
}
// 判断是否已存在相同的路由注册项
if strings.EqualFold(router.Domain, item.router.Domain) &&
strings.EqualFold(router.Method, item.router.Method) &&
strings.EqualFold(router.Uri, item.router.Uri) {
if _, ok := item.hooks[hookName]; !ok {
item.hooks[hookName] = list.New()
}
item.hooks[hookName].PushBack(handler)
pushed = true
break
}
if s.compareRouterPriority(router, item.router) {
l.InsertBefore(registerItem, e)
pushed = true
break
}
}
if pushed {
if len(address) > 0 {
pushedItemSet.Add(address)
}
} else {
l.PushBack(registerItem)
}
}
}
//gutil.Dump(s.handlerTree)
return nil
}
// 对比两个handlerItem的优先级,需要非常注意的是,注意新老对比项的参数先后顺序
// 优先级比较规则:
// 1、层级越深优先级越高(对比/数量);
// 2、模糊规则优先级:{xxx} > :xxx > *xxx;
func (s *Server) compareRouterPriority(newRouter, oldRouter *Router) bool {
if newRouter.Priority > oldRouter.Priority {
return true
}
if newRouter.Priority < oldRouter.Priority {
return false
}
// 例如:/{user}/{act} 比 /:user/:act 优先级高
if strings.Count(newRouter.Uri, "{") > strings.Count(oldRouter.Uri, "{") {
return true
}
// 例如: /:name/update 比 /:name/:action优先级高
if strings.Count(newRouter.Uri, "/:") < strings.Count(oldRouter.Uri, "/:") {
// 例如: /:name/:action 比 /:name/*any 优先级高
if strings.Count(newRouter.Uri, "/*") < strings.Count(oldRouter.Uri, "/*") {
return true
}
return false
}
return false
}
// 服务方法检索
func (s *Server) searchHandler(method, path, domain string) *handlerParsedItem {
domains := []string{ gDEFAULT_DOMAIN }
if !strings.EqualFold(gDEFAULT_DOMAIN, domain) {
domains = append(domains, domain)
}
array := ([]string)(nil)
if strings.EqualFold("/", path) {
array = []string{"/"}
} else {
array = strings.Split(path[1:], "/")
}
for _, domain := range domains {
p, ok := s.handlerTree[domain]
if !ok {
continue
}
// 多层链表(每个节点都有一个*list链表)的目的是当叶子节点未有任何规则匹配时,让父级模糊匹配规则继续处理
lists := make([]*list.List, 0)
for k, v := range array {
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
}
if _, ok := p.(map[string]interface{})[v]; ok {
p = p.(map[string]interface{})[v]
if k == len(array) - 1 {
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
break
}
}
} else {
if _, ok := p.(map[string]interface{})["*fuzz"]; ok {
p = p.(map[string]interface{})["*fuzz"]
}
}
// 如果是叶子节点,同时判断当前层级的"*fuzz"键名,解决例如:/user/*action 匹配 /user 的规则
if k == len(array) - | 1 {
if _, ok := p.(map[string]interface{})["*fuzz"]; ok {
p = p.(map[string]interface{})["*fuzz"]
}
if _, ok := p.(map[string]interface{})["*list"]; ok {
lists = append(lists, p.(map[string]interface{})["*list"].(*list.List))
}
}
}
// 多层链表遍历检索,从数组末尾的链表开始遍历,末尾的深度高优先级也高
for i := len(lists) - 1; i >= 0; i-- {
for e := lists[i].Front(); e != nil; e = e.Next() {
item := e.Value.(*handlerRegisterItem)
// 动态匹配规则带有gDEFAULT_METHOD的情况,不会像静态规则那样直接解析为所有的HTTP METHOD存储
if strings.EqualFold(item.router.Method, gDEFAULT_METHOD) || strings.EqualFold(item.router.Method, method) {
// 注意当不带任何动态路由规则时,len(match) == 1
if match, err := gregex.MatchString(item.router.RegRule, path); err == nil && len(match) > 0 {
//gutil.Dump(match)
//gutil.Dump(names)
handlerItem := &handlerParsedItem{item, nil}
// 如果需要query匹配,那么需要重新正则解析URL
if len(item.router.RegNames) > 0 {
if len(match) > len(item.router.RegNames) {
handlerItem.values = make(map[string][]string)
// 如果存在存在同名路由参数名称,那么执行数组追加
for i, name := range item.router.RegNames {
if _, ok := handlerItem.values[name]; ok {
handlerItem.values[name] = append(handlerItem.values[name], match[i + 1])
} else {
handlerItem.values[name] = []string{match[i + 1]}
}
}
}
}
return handlerItem
}
}
}
}
}
return nil
}
// 将pattern(不带method和domain)解析成正则表达式匹配以及对应的query字符串
func (s *Server) patternToRegRule(rule string) (regrule string, names []string) {
if len(rule) < 2 {
return rule, nil
}
regrule = "^"
array := strings.Split(rule[1:], "/")
for _, v := range array {
if len(v) == 0 {
continue
}
switch v[0] {
case ':':
regrule += `/([\w\.\-]+)`
names = append(names, v[1:])
case '*':
regrule += `/{0,1}(.*)`
names = append(names, v[1:])
default:
s, _ := gregex.ReplaceStringFunc(`{[\w\.\-]+}`, v, func(s string) string {
names = append(names, s[1 : len(s) - 1])
return `([\w\.\-]+)`
})
if strings.EqualFold(s, v) {
regrule += "/" + v
} else {
regrule += "/" + s
}
}
}
regrule += `$`
return
}
// 生成回调方法查询的Key
func (s *Server) handlerKey(method, path, domain string) string {
return strings.ToUpper(method) + ":" + path + "@" + strings.ToLower(domain)
}
| identifier_body |
|
licensePlateDetectorOptimized.py | print("\n\nLOADING PROGRAM\n\n")
import cv2 as cv
print("Loaded CV")
import numpy as np
print("Loaded NP")
import tensorflow as tf
print("Loaded TF")
import imutils
print("Loaded IMUTILS")
import os
print("Loaded OS")
'''
SOME NOTES ABOUT THE PROGRAM:
1) Make sure to change the paths at the top of the file to reflect the correct paths to your files
2) The program is slow right now. I am working on improvements
3) All togglable settings are at the top of the file and in the __init__ / settings_init functions of the FindPlate class
Have fun!! :)
'''
### CHANGE THESE PATHS ###
#the paths below are the paths that work on my machine!!
video_path = "/Users/tristanbrigham/Downloads/BostonVid.mp4"
folder_path = os.getcwd() + "/MrCrutcherProjects/LicensePlateProject/Tristans_Code/"
training_data_path = "/Users/tristanbrigham/GithubProjects/AI_Training_Data/LicensePlateProject/"
### GLOBAL VARIABLES FOR THE PROGRAM ###
show_images_bool = True #if true, shows the images that are being processed
collect_data = False #if true, asks the user for data on what letter is detected. input nothing if image is not a letter or contains more than one letter
get_chars = True #if true, applies the algorithm model to the characters that are detected to get what the plate says
optimize = True #checks to see whether the user only wants the program to analyze the bottom portion of the vid/image
debug = False #if true, shows the gray ROI's and the license plate ROI's
checkIndividual = False #if true, this waits for user input before showing each contour on each image
start_frame_number = 0 #where does the user want the video to start?
frames_skipped = 30 #how many frames pass before the frame is analyzed (for instance, analyze every 20th frame if this value is 20)
letter_dict = {} #init the letter dictionary to get the letters that are put through the NN
model = tf.keras.models.load_model(folder_path + "kerasModelandData/model.h5") #getting the model and loading it
########################################################################################
#################################### GENERAL SETUP #####################################
########################################################################################
def skip_forward(): #a function to skip forward in the video
frame_count = cap.get(cv.CAP_PROP_POS_FRAMES)
cap.set(cv.CAP_PROP_POS_FRAMES, frame_count + 1000)
def setup_dictionary(): #a function to set up the dictionary mapping values to letters for the NN
alphabet = open(folder_path + "kerasModelandData/alphabet.txt")
for count, line in enumerate(alphabet.readlines()):
letter_dict[count] = line[0]
print(letter_dict)
### Class for plate detection
########################################################################################
#################################### SETUP PROGRAM! ####################################
########################################################################################
class FindPlate:
#should maybe make the parameters global variables or controlled by the command line
# Have to adjust so that the min and max are larger when analyzing the images and smaller when looking at the vids
def __init__(self, counter, check_wait = False, imgAddress = None, img = None):
self.check_wait = check_wait #initializing whether we need to wait between drawing contours for debugging
if imgAddress is None and img is not None: #getting the image from the video
self.img = img
elif imgAddress is not None and img is None:
self.img = cv.resize(cv.imread(imgAddress), (860, 480))
else:
print("-----------------------ERROR FINDING IMAGE-----------------------")
exit(0)
if(counter == 0):
self.setup_exec() #execute the program every certain amount of frames
elif show_images_bool:
self.show_images() #Show the images if that option is on
self.check_keys()
def setup_exec(self):
self.settings_init() #initializing all of the settings
if optimize: #Currently, optimize makes it so that only the bottom portion of the screen is analyzed
self.offset = int(self.img.shape[0] * (self.divideArea - 1) / self.divideArea) #How many pixels in the y direction are not analyzed from the top
self.top_img = self.img[ : self.offset ] #Taking the top potion of the image and saving it for later
self.img = self.img[self.offset : ] #reassigning the image to the portion being analyed
self.x = self.img.shape[1] #getting the width of the image
self.img_copy = self.img.copy() #getting copies for analysis/blurring
self.img_rects = self.img.copy() #the copy that will be used for bounding rectangles
self.Canny = None #Initializing variable to hold Canny image
self.run()
if optimize: #After being run, rejoin the images if optimize is on
self.img = np.append(self.top_img, self.img, axis=0)
self.img_rects = np.append(self.top_img, self.img_rects, axis=0)
if show_images_bool: #if show the images, show the images
self.show_images_exec()
def settings_init(self):
self.divideArea = 2.5 #This is the denominator for how much of the screen is analyzed (analyzes the [1/(variable)] portion of the image/vid)
#For example, if the bottom half should be analyzed, put in '2'
self.amt_digits = 6 #defining the amount of characters and digits that should be found on the license plate
self.ratio_max = 3 #This is the maximum width to height ratio that a license plate can be in the program (for the US, about 4 is good while for EU plates, about 6-7 is good)
self.ratio_min = 1.5 #This is the minimum width to height ratio
self.angle_min = 84 #After the angle of the cv.areaMinRect has a modulo 90 applied to it, the angle either needs to be close to upright (this value or above)
self.angle_max = 6 # or close to horizontal (this value or below) in degrees
self.img_size = self.img.shape[0] * self.img.shape[1]
#current size: about 240,000 pixels
self.area_min = int(self.img_size / 5000) #minimum area of the accepted bounding boxes -- it recognizes plates with smaller values but there is no way that characters can be picked out. No use to have smaller
self.area_max = int(self.img_size / 600) #max area of the accepted bounding boxes
self.lower_canny = 110 #upper value for canny thresholding
self.upper_canny = 120 #Lower value for canny thresholding
#ASPECT variables are not used:
self.aspect_max = 1 #the max amount of area that the license plate can cover within a bounding box to be considered
self.aspect_min = 0.3 #the minimum amount of area that a license plate can cover within a bounding box to be considered
self.img_dilate = 40 #specifying the value that the pixels which are being brightened will be increased by
self.blur = (9, 9) #initializing the size component of the gaussian blur that is applied to the image
self.offset = 0 #initializing the variable which keeps track of how far from the top of the image the program begins to analyze
self.top_img = None #initializing the variable which may hold the top part of the image for later
self.element_structure = cv.getStructuringElement(shape=cv.MORPH_RECT, ksize=(5, 5)) #basic elem structure for blurring
self.letter_contour_min = 2000 #The minimum size a contour has to be for it to be considered for letter analysis
self.roi_array = [] #array for holding the ROI's
def run(self): #master run function for the program
_ = self.contour_manipulation(self.preprocess_canny_contours())
license_plate_arr = self.analyze_image()
########################################################################################
#################################### EXECUTE PROGRAM ###################################
########################################################################################
def contour_manipulation(self, contours):
ret = [] #init the ret array
for c in contours:
boolRect = self.check_min_rect(c) #checking whether the bounding rect encapsulates a valid license plate region
if boolRect: #if the region is valid, then append it to the return array
ret.append(c)
if checkIndividual: #if the check individual option is on, then go through the contours one-by-one, write them to the image, and show the image
checkIndividual = self.check_indiv_contour(c)
return ret
def show_images_exec(self, height = 300): #shows the important images that are being used for execution
# cv.imshow("Contours", imutils.resize(self.img_copy, height = height))
cv.imshow("Bounding Rects", self.img_rects)
cv.imshow("Canny", imutils.resize(self.Canny, height = height))
self.show_images()
self.check_keys()
def check_indiv_contour(self, c): #show the image and block for a key input to figure out what to do next
print("\n\nCONTOUR: {}".format(cv.contourArea(c)))
cv.imshow("Bounding Rects", self.img_rects)
if cv.waitKey(0) & 0xFF == ord('c'): #This cycles through to the next contour
return True
elif cv.waitKey(0) & 0xFF == ord('f'): #This makes it so that the rest of the contours are drawn in an instant
return False
elif cv.waitKey(0) & 0xFF == ord('q'): #quits the program
exit()
########################################################################################
############################# ANALYZING/ANNOTATING IMAGES ##############################
########################################################################################
def preprocess_canny_contours(self):
gray = cv.cvtColor(self.img, cv.COLOR_BGR2GRAY) #get grayscale image
gray = cv.GaussianBlur(gray, self.blur, 0) #Apply a blur
edged = cv.Canny(gray, self.lower_canny, self.upper_canny) #Getting the canny contours
self.Canny = edged #assign the variable
contours = cv.findContours(edged.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE) #Get the contours of the Canny image [remember that this will return more contours than we need
#Because Canny just returns lines]
contours = imutils.grab_contours(contours) #Get the contours using imutils
return contours
def analyze_image(self): # getting an array of the potential letters from each license plate ROI
str_arr = []
#SHOWING THE ROI's
for count, (regionOfInterest, x, y, w, h) in enumerate(self.roi_array): #iterate through the ROI array
data = self.process_ROI(regionOfInterest, count) #process the ROI for letter regions
if data is not None and get_chars: #check if the data is found and if the get_chars option is on
str_arr.append(self.show_bounds_and_text(data, x, y, w, h)) #if get_chars, run the plate through the NN for the characters
return str_arr #return an array of the strings that were found on the screen
def show_bounds_and_text(self, data, x, y, w, h):
cv.rectangle(self.img_rects, (x, y), (x + w, y + h), (0, 255, 0), thickness=4) #draw a bounding rectangle on the img_rects image
neuralNet = NeuralNetwork() #init a NN (neural network)
ret_str = neuralNet.get_chars_string(data) #get the character string from the plate
cv.putText(self.img_rects, ret_str, (x, y - 5), cv.FONT_HERSHEY_DUPLEX, 1.7, (0, 255, 0), thickness=2) #write those characters to the image
return ret_str #return the string
########################################################################################
################################### CHECKING CONTOURS ##################################
########################################################################################
def process_ROI(self, roi, counter): #takes a license plate ROI as an input with a counter (for showing images) and segments the character ROI's
regionOfInterest = roi[int(roi.shape[0] / 4) : roi.shape[0] - int(roi.shape[0] / 5), int(roi.shape[1] / 18) : roi.shape[1] - int(roi.shape[1] / 18)]
#^cut down on the region of interest that is passed (values determined through trial and error)
name = "ROI {}".format(counter) #format the name
regionOfInterest = cv.cvtColor(regionOfInterest, cv.COLOR_BGR2GRAY) #convert the color to grayscale
regionOfInterest[: int(regionOfInterest.shape[0] / 6), :] += self.img_dilate #Increasing the brightness of the top of the image (BREAKS WITH HIGH VALUES because of overflow)
regionOfInterest = imutils.resize(regionOfInterest, height=200, inter=cv.INTER_AREA) #resize the region of interest bigger
image = cv.GaussianBlur(regionOfInterest, (0, 0), 3) #try to sharpen the image using a blur of 0, 0
image = cv.addWeighted(image, 1.5, regionOfInterest, -0.5, 0) #continue to try to sharpen the image
_, thresh = cv.threshold(image, 0, 255, cv.THRESH_BINARY+cv.THRESH_OTSU) #threshold the image for the characters
thresh = cv.bitwise_not(thresh) #convert to bitwise not (so that the chars are now the white parts)
thresh = cv.erode(thresh, (81, 61), iterations = 15) #erode to try to break the erroneous connections that the computer picks up between the chars
contours, _ = cv.findContours(thresh, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE) #find the contours
contours = self.sort_contours_left(contours) #sort the contours from left to right
image = cv.cvtColor(image, cv.COLOR_GRAY2BGR) #convert the image colors back
letters = []
for contour in contours:
if cv.contourArea(contour) > self.letter_contour_min: #check if each contour is too small or not (makes sure we are analyzing potential chars)
x, y, w, h = cv.boundingRect(contour) #bouding rect around the supposed character
letterInterest = thresh[0 : y + h, x : x + w] #making sure that the top of the letter is not cut off when getting the ROI
cv.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0)) #bounding box on the passed image
letterImage = cv.resize(letterInterest, (60, 80)) #resize the char ROI
letters.append(letterImage) #append the char ROI
if debug: #if debugging, show all relevant images
cv.imshow("GRAY {}".format(counter), imutils.resize(thresh, height=200))
cv.imshow(name, image) #showing and resizing image
cv.moveWindow(name, 0, 110 * counter - 50) #Moving the ROI windows into the right spot on the screen
if len(letters) > 4: #if atleast four letters are detected, then return the array
if collect_data:
NeuralNetwork.label_letter(letters) #if we are collecting data, then do what is necessary there to create the label
return letters
else: return None
def check_min_rect(self, contour): #function for getting the min-area rectangle and validating whether it is ok
rect = cv.minAreaRect(contour) #get the min area rect
rx, ry, rw, rh = cv.boundingRect(contour) #get the bounding rectangle coordinates
if self.validateRatio(rect, rw, rh): #check the ratios of the ROI
brect = self.img[ry : ry + rh, rx : rx + rw] #get the ROI
self.roi_array.append((brect, rx, ry, rw, rh)) #append this ROI to the ROI array
return True #if everything is right, then return true to show that it is valid
else:
return False #else, return false
def validateRatio(self, rect, rw, rh): #more checking that the contour could be a license plate
(x, y), (width, height), angle = rect #get all of the data about the minarea bounding rectangle
if width == 0 or height == 0: #to defend against illegal math operations which panic the program
return False
angle = angle % 90 #getting the angle in the most basic form
area = width * height #calc the area
if not ((angle < self.angle_max or angle > self.angle_min) and (area > self.area_min and area < self.area_max)):
return False #if something is off, then return false (check that the angle is almost 90 or almost 0 and that the area is ok)
if rw < rh: #if the width is less than the height, return false
return False
return self.rat_check(width, height) #check the ratios
def rat_check(self, width, height):
ratio = float(width) / float(height) #check whether the width to height ratio is wrong
if ratio < 1:
ratio = 1 / ratio #making it so that the ratio is always more than 1
return not (ratio < self.ratio_min or ratio > self.ratio_max) #if the area is not in range or the ratio is off, return false
########################################################################################
#################################### SHOWING IMAGES ####################################
########################################################################################
def show_images(self, height = 300): #showing the image which is necessary every iteration
cv.imshow("Original", imutils.resize(self.img, height = 200))
def check_keys(self):
if self.check_wait: #if going through the contours, check if q is pressed
key = cv.waitKey(0) & 0xFF
print("NEXT IMAGE")
if key == ord('q'): #exit button
exit(0)
else:
key = cv.waitKey(1)
if key & 0xFF == ord('q'): #exit button
exit(0)
elif key == ord('s'): #skip forward in the video
skip_forward()
elif key & 0xFF == ord('p'): # this creates a pause button for the video, in essence
|
########################################################################################
#################################### CONTOUR SORTING ###################################
########################################################################################
def sort_contours_left(self, contours): #This sorts the contours based on how far the contours are from the middle of the screen (only looks at the x-pos)
retContourMapping = []
for i, contour in enumerate(contours): #for every contour, first get the middle part of the bouding box x-pos wise
x, _, _, _ = cv.boundingRect(contour) #then we take the abs. value of that and sort those values in increasing fashion
retContourMapping.append((contour, x, i))
retContourMapping.sort(key=lambda tup: tup[1]) # sorts in place by distance from vertical horizontal line
contours = []
for contour, _, _ in retContourMapping:
contours.append(contour)
return contours
############# NOT BEING USED #############
def sort_contours_middle(self, contours): #This sorts the contours based on how far the contours are from the middle of the screen (only looks at the x-pos)
rects = []
for c in contours:
rects.append((cv.boundingRect(c), c)) #Creating a tuple array with bouding rects and the contours
retContourMapping = []
for i in range(len(rects)): #for every contour, first get the middle part of the bouding box x-pos wise
rect, contour = rects[i] #Then we are going to subtract that value from the middle of the screen
x, _, w, _ = rect #then we take the abs. value of that and sort those values in increasing fashion
x = int(self.x / 2) - x #If necessary, this would allow us to put a cap on processing and only look at contours in the middle of the screen
x = abs(x + int(w/2))
retContourMapping.append((i, x, rects[i], contour))
retContourMapping.sort(key=lambda tup: tup[1]) # sorts in place by distance from vertical horizontal line
keys = []
for index, _, _, _ in retContourMapping:
keys.append(index)
return keys
########################################################################################
#################################### NEURAL NETWORK ####################################
########################################################################################
imageNumber = 0
training_file_keys = training_data_path + "training_data.txt"
class NeuralNetwork:
def __init__(self):
self.model = model #initializing all of the pertinent settings
self.plate_ret = ""
################ TRAINING THE MODEL ################
def label_letter(self, imagearr):
for image in imagearr:
print("FRAME COUNT: {}".format(cap.get(cv.CAP_PROP_POS_FRAMES))) #printing the frame count incase my progress is lost
global imageNumber #getting the global variable names
global training_file_keys
cv.imshow("POSSIBLE LETTER", image) #show the ROI to get the character
cv.waitKey(1) #wait for a key for one millis
imageNumber = imageNumber + 1 #increasing the image number
letter = input("Please input the letter: ").upper() #turning the input to Upper case
hexval = ":".join("{:02x}".format(ord(c)) for c in letter) #print the hexval for each character that is input
if len(letter) < 1 or hexval == "0c": #if no letter is input or the value is a space, then input the letter as underscore (unknown)
letter = '_'
else:
letter = letter[0] #make the input letter the first character which is input
file = open(training_data_path + str(imageNumber) + ".txt", "w") #write the image file as a txt file
for row in image: #save the image to the txt file
np.savetxt(file, row)
print("Letter passed: " + letter) #print the letter passed and append the passed letter to the keys file (for training)
training_file = open(training_file_keys, "a")
training_file.write("\n" + str(letter))
################ PREDICTING WITH THE MODEL ################
def get_chars(self, array): #getting the character string
ret = ""
for image in array: #for each character...
ret += self.predict_char(image) #run it through the NN to get the character
return ret
def predict_char(self, image): #predict the input character using the neural network
image_formatted = self.setup_array(image) #setup the array
pred = model.predict(image_formatted) #predict the character
return letter_dict[int(np.argmax(pred))] #return the letter using the dict which was set up earlier
def setup_array(self, image): #formatting the input image to make sure it plays nice with the Neural Network
number_array = np.zeros((1, 80, 60, 1), dtype="float32")
number_array[0] = image.reshape(80, 60, 1) #sizing and inputting the array
return number_array
################ MODEL FUNCTIONS ################
def network_summary(self): #get a summary of the model (not used)
return self.model.summary()
########################################################################################
############################### VID PROCESSING AND SETUP ###############################
########################################################################################
if __name__ == "__main__":
setup_dictionary()
#addresses for testing my code on still images:
imageAddresses = [
"kerasModelandData/licensePlate.jpeg",
"kerasModelandData/licensePlate2.jpeg",
"kerasModelandData/licensePlate3.jpeg",
"kerasModelandData/licensePlate4.jpeg",
"kerasModelandData/licensePlate5.jpeg"
]
print("\n\nWelcome\nPlease press q to quit the program\nPlease press p to pause and unpause during the video\nPlease press s to skip forward within the video\nPlease press anything else to continue through the images when analyzing individual images")
print("\nOnce you have looked at all of the still images, the video will begin\n\n")
print("Green boxes signify possible license plate regions \nwhile red ones show other ROI's which were picked up and discarded")
print("\nUnderscore characters ('_') represent characters which the Neural Network could not decipher with a high confidence")
cap = cv.VideoCapture(video_path)
print("Starting Video @ frame " + str(start_frame_number))
cap.set(cv.CAP_PROP_POS_FRAMES, start_frame_number) #setting the starting frame number to the correct number
if collect_data:
file_keys = open(training_file_keys, "r")
imageNumber = int(file_keys.readline().rstrip())
print("INDEX: " + str(imageNumber))
counter = 0
while(cap.isOpened()): #reading and analyzing the video as it runs
counter = counter + 1
counter = counter % frames_skipped
ret, img = cap.read()
if ret == True:
FindPlate(counter=counter, img = img)
else:
break
cap.release()
cv.destroyAllWindows()
| print("VIDEO PAUSED @ FRAME {}".format(cap.get(cv.CAP_PROP_POS_FRAMES)))
while True:
key = cv.waitKey(25) & 0xFF
if key == ord('p'): #unpause
break
elif key == ord('q'): #quit the program button
exit(0)
elif key == ord('s'): #skip forward
skip_forward() | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.