repo
stringlengths 5
54
| path
stringlengths 4
155
| func_name
stringlengths 1
118
| original_string
stringlengths 52
85.5k
| language
stringclasses 1
value | code
stringlengths 52
85.5k
| code_tokens
sequence | docstring
stringlengths 6
2.61k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 85
252
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
nats-io/nats-streaming-server | server/partitions.go | initPartitions | func (s *StanServer) initPartitions() error {
// The option says that the server should only use the pre-defined channels,
// but none was specified. Don't see the point in continuing...
if len(s.opts.StoreLimits.PerChannel) == 0 {
return ErrNoChannel
}
nc, err := s.createNatsClientConn("pc")
if err != nil {
return err
}
p := &partitions{
s: s,
nc: nc,
}
// Now that the connection is created, we need to set s.partitioning to cp
// so that server shutdown can properly close this connection.
s.partitions = p
p.createChannelsMapAndSublist(s.opts.StoreLimits.PerChannel)
p.sendListSubject = partitionsPrefix + "." + s.opts.ID
// Use the partitions' own connection for channels list requests
p.processChanSub, err = p.nc.Subscribe(p.sendListSubject, p.processChannelsListRequests)
if err != nil {
return fmt.Errorf("unable to subscribe: %v", err)
}
p.processChanSub.SetPendingLimits(-1, -1)
p.inboxSub, err = p.nc.SubscribeSync(nats.NewInbox())
if err != nil {
return fmt.Errorf("unable to subscribe: %v", err)
}
p.Lock()
// Set this before the first attempt so we don't miss any notification
// of a change in topology. Since we hold the lock, and even if there
// was a notification happening now, the callback will execute only
// after we are done with the initial check.
nc.SetDiscoveredServersHandler(p.topologyChanged)
// Now send our list and check if any server is complaining
// about having one channel in common.
if err := p.checkChannelsUniqueInCluster(); err != nil {
p.Unlock()
return err
}
p.Unlock()
return nil
} | go | func (s *StanServer) initPartitions() error {
// The option says that the server should only use the pre-defined channels,
// but none was specified. Don't see the point in continuing...
if len(s.opts.StoreLimits.PerChannel) == 0 {
return ErrNoChannel
}
nc, err := s.createNatsClientConn("pc")
if err != nil {
return err
}
p := &partitions{
s: s,
nc: nc,
}
// Now that the connection is created, we need to set s.partitioning to cp
// so that server shutdown can properly close this connection.
s.partitions = p
p.createChannelsMapAndSublist(s.opts.StoreLimits.PerChannel)
p.sendListSubject = partitionsPrefix + "." + s.opts.ID
// Use the partitions' own connection for channels list requests
p.processChanSub, err = p.nc.Subscribe(p.sendListSubject, p.processChannelsListRequests)
if err != nil {
return fmt.Errorf("unable to subscribe: %v", err)
}
p.processChanSub.SetPendingLimits(-1, -1)
p.inboxSub, err = p.nc.SubscribeSync(nats.NewInbox())
if err != nil {
return fmt.Errorf("unable to subscribe: %v", err)
}
p.Lock()
// Set this before the first attempt so we don't miss any notification
// of a change in topology. Since we hold the lock, and even if there
// was a notification happening now, the callback will execute only
// after we are done with the initial check.
nc.SetDiscoveredServersHandler(p.topologyChanged)
// Now send our list and check if any server is complaining
// about having one channel in common.
if err := p.checkChannelsUniqueInCluster(); err != nil {
p.Unlock()
return err
}
p.Unlock()
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"initPartitions",
"(",
")",
"error",
"{",
"// The option says that the server should only use the pre-defined channels,",
"// but none was specified. Don't see the point in continuing...",
"if",
"len",
"(",
"s",
".",
"opts",
".",
"StoreLimits",
".",
"PerChannel",
")",
"==",
"0",
"{",
"return",
"ErrNoChannel",
"\n",
"}",
"\n",
"nc",
",",
"err",
":=",
"s",
".",
"createNatsClientConn",
"(",
"\"",
"\"",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"p",
":=",
"&",
"partitions",
"{",
"s",
":",
"s",
",",
"nc",
":",
"nc",
",",
"}",
"\n",
"// Now that the connection is created, we need to set s.partitioning to cp",
"// so that server shutdown can properly close this connection.",
"s",
".",
"partitions",
"=",
"p",
"\n",
"p",
".",
"createChannelsMapAndSublist",
"(",
"s",
".",
"opts",
".",
"StoreLimits",
".",
"PerChannel",
")",
"\n",
"p",
".",
"sendListSubject",
"=",
"partitionsPrefix",
"+",
"\"",
"\"",
"+",
"s",
".",
"opts",
".",
"ID",
"\n",
"// Use the partitions' own connection for channels list requests",
"p",
".",
"processChanSub",
",",
"err",
"=",
"p",
".",
"nc",
".",
"Subscribe",
"(",
"p",
".",
"sendListSubject",
",",
"p",
".",
"processChannelsListRequests",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"p",
".",
"processChanSub",
".",
"SetPendingLimits",
"(",
"-",
"1",
",",
"-",
"1",
")",
"\n",
"p",
".",
"inboxSub",
",",
"err",
"=",
"p",
".",
"nc",
".",
"SubscribeSync",
"(",
"nats",
".",
"NewInbox",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"p",
".",
"Lock",
"(",
")",
"\n",
"// Set this before the first attempt so we don't miss any notification",
"// of a change in topology. Since we hold the lock, and even if there",
"// was a notification happening now, the callback will execute only",
"// after we are done with the initial check.",
"nc",
".",
"SetDiscoveredServersHandler",
"(",
"p",
".",
"topologyChanged",
")",
"\n",
"// Now send our list and check if any server is complaining",
"// about having one channel in common.",
"if",
"err",
":=",
"p",
".",
"checkChannelsUniqueInCluster",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"p",
".",
"Unlock",
"(",
")",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"p",
".",
"Unlock",
"(",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // Initialize the channels partitions objects and issue the first
// request to check if other servers in the cluster incorrectly have
// any of the channel that this server is supposed to handle. | [
"Initialize",
"the",
"channels",
"partitions",
"objects",
"and",
"issue",
"the",
"first",
"request",
"to",
"check",
"if",
"other",
"servers",
"in",
"the",
"cluster",
"incorrectly",
"have",
"any",
"of",
"the",
"channel",
"that",
"this",
"server",
"is",
"supposed",
"to",
"handle",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/partitions.go#L64-L107 | train |
nats-io/nats-streaming-server | server/partitions.go | createChannelsMapAndSublist | func (p *partitions) createChannelsMapAndSublist(storeChannels map[string]*stores.ChannelLimits) {
p.channels = make([]string, 0, len(storeChannels))
p.sl = util.NewSublist()
for c := range storeChannels {
p.channels = append(p.channels, c)
// When creating the store, we have already checked that channel names
// were valid. So this call cannot fail.
p.sl.Insert(c, channelInterest)
}
} | go | func (p *partitions) createChannelsMapAndSublist(storeChannels map[string]*stores.ChannelLimits) {
p.channels = make([]string, 0, len(storeChannels))
p.sl = util.NewSublist()
for c := range storeChannels {
p.channels = append(p.channels, c)
// When creating the store, we have already checked that channel names
// were valid. So this call cannot fail.
p.sl.Insert(c, channelInterest)
}
} | [
"func",
"(",
"p",
"*",
"partitions",
")",
"createChannelsMapAndSublist",
"(",
"storeChannels",
"map",
"[",
"string",
"]",
"*",
"stores",
".",
"ChannelLimits",
")",
"{",
"p",
".",
"channels",
"=",
"make",
"(",
"[",
"]",
"string",
",",
"0",
",",
"len",
"(",
"storeChannels",
")",
")",
"\n",
"p",
".",
"sl",
"=",
"util",
".",
"NewSublist",
"(",
")",
"\n",
"for",
"c",
":=",
"range",
"storeChannels",
"{",
"p",
".",
"channels",
"=",
"append",
"(",
"p",
".",
"channels",
",",
"c",
")",
"\n",
"// When creating the store, we have already checked that channel names",
"// were valid. So this call cannot fail.",
"p",
".",
"sl",
".",
"Insert",
"(",
"c",
",",
"channelInterest",
")",
"\n",
"}",
"\n",
"}"
] | // Creates the channels map based on the store's PerChannel map that was given. | [
"Creates",
"the",
"channels",
"map",
"based",
"on",
"the",
"store",
"s",
"PerChannel",
"map",
"that",
"was",
"given",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/partitions.go#L110-L119 | train |
nats-io/nats-streaming-server | server/partitions.go | topologyChanged | func (p *partitions) topologyChanged(_ *nats.Conn) {
p.Lock()
defer p.Unlock()
if p.isShutdown {
return
}
// Let's wait before checking (sending the list and waiting for a reply)
// so that the new NATS Server has a chance to send its local
// subscriptions to the rest of the cluster. That will reduce the risk
// of missing the reply from the new server.
time.Sleep(partitionsWaitOnChange)
if err := p.checkChannelsUniqueInCluster(); err != nil {
// If server is started from command line, the Fatalf
// call will cause the process to exit. If the server
// is run programmatically and no logger has been set
// we need to exit with the panic.
p.s.log.Fatalf("Partitioning error: %v", err)
// For tests
if partitionsNoPanic {
p.s.setLastError(err)
return
}
panic(err)
}
} | go | func (p *partitions) topologyChanged(_ *nats.Conn) {
p.Lock()
defer p.Unlock()
if p.isShutdown {
return
}
// Let's wait before checking (sending the list and waiting for a reply)
// so that the new NATS Server has a chance to send its local
// subscriptions to the rest of the cluster. That will reduce the risk
// of missing the reply from the new server.
time.Sleep(partitionsWaitOnChange)
if err := p.checkChannelsUniqueInCluster(); err != nil {
// If server is started from command line, the Fatalf
// call will cause the process to exit. If the server
// is run programmatically and no logger has been set
// we need to exit with the panic.
p.s.log.Fatalf("Partitioning error: %v", err)
// For tests
if partitionsNoPanic {
p.s.setLastError(err)
return
}
panic(err)
}
} | [
"func",
"(",
"p",
"*",
"partitions",
")",
"topologyChanged",
"(",
"_",
"*",
"nats",
".",
"Conn",
")",
"{",
"p",
".",
"Lock",
"(",
")",
"\n",
"defer",
"p",
".",
"Unlock",
"(",
")",
"\n",
"if",
"p",
".",
"isShutdown",
"{",
"return",
"\n",
"}",
"\n",
"// Let's wait before checking (sending the list and waiting for a reply)",
"// so that the new NATS Server has a chance to send its local",
"// subscriptions to the rest of the cluster. That will reduce the risk",
"// of missing the reply from the new server.",
"time",
".",
"Sleep",
"(",
"partitionsWaitOnChange",
")",
"\n",
"if",
"err",
":=",
"p",
".",
"checkChannelsUniqueInCluster",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"// If server is started from command line, the Fatalf",
"// call will cause the process to exit. If the server",
"// is run programmatically and no logger has been set",
"// we need to exit with the panic.",
"p",
".",
"s",
".",
"log",
".",
"Fatalf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"// For tests",
"if",
"partitionsNoPanic",
"{",
"p",
".",
"s",
".",
"setLastError",
"(",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"}"
] | // Topology changed. Sends the list of channels. | [
"Topology",
"changed",
".",
"Sends",
"the",
"list",
"of",
"channels",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/partitions.go#L122-L146 | train |
nats-io/nats-streaming-server | server/partitions.go | initSubscriptions | func (p *partitions) initSubscriptions() error {
// NOTE: Use the server's nc connection here, not the partitions' one.
for _, channelName := range p.channels {
pubSubject := fmt.Sprintf("%s.%s", p.s.info.Publish, channelName)
if _, err := p.s.nc.Subscribe(pubSubject, p.s.processClientPublish); err != nil {
return fmt.Errorf("could not subscribe to publish subject %q, %v", channelName, err)
}
}
return nil
} | go | func (p *partitions) initSubscriptions() error {
// NOTE: Use the server's nc connection here, not the partitions' one.
for _, channelName := range p.channels {
pubSubject := fmt.Sprintf("%s.%s", p.s.info.Publish, channelName)
if _, err := p.s.nc.Subscribe(pubSubject, p.s.processClientPublish); err != nil {
return fmt.Errorf("could not subscribe to publish subject %q, %v", channelName, err)
}
}
return nil
} | [
"func",
"(",
"p",
"*",
"partitions",
")",
"initSubscriptions",
"(",
")",
"error",
"{",
"// NOTE: Use the server's nc connection here, not the partitions' one.",
"for",
"_",
",",
"channelName",
":=",
"range",
"p",
".",
"channels",
"{",
"pubSubject",
":=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"p",
".",
"s",
".",
"info",
".",
"Publish",
",",
"channelName",
")",
"\n",
"if",
"_",
",",
"err",
":=",
"p",
".",
"s",
".",
"nc",
".",
"Subscribe",
"(",
"pubSubject",
",",
"p",
".",
"s",
".",
"processClientPublish",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"channelName",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // Create the internal subscriptions on the list of channels. | [
"Create",
"the",
"internal",
"subscriptions",
"on",
"the",
"list",
"of",
"channels",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/partitions.go#L149-L158 | train |
nats-io/nats-streaming-server | server/partitions.go | processChannelsListRequests | func (p *partitions) processChannelsListRequests(m *nats.Msg) {
// Message cannot be empty, we are supposed to receive
// a spb.CtrlMsg_Partitioning protocol. We should also
// have a repy subject
if len(m.Data) == 0 || m.Reply == "" {
return
}
req := spb.CtrlMsg{}
if err := req.Unmarshal(m.Data); err != nil {
p.s.log.Errorf("Error processing partitioning request: %v", err)
return
}
// If this is our own request, ignore
if req.ServerID == p.s.serverID {
return
}
channels, err := util.DecodeChannels(req.Data)
if err != nil {
p.s.log.Errorf("Error processing partitioning request: %v", err)
return
}
// Check that we don't have any of these channels defined.
// If we do, send a reply with simply the name of the offending
// channel in reply.Data
reply := spb.CtrlMsg{
ServerID: p.s.serverID,
MsgType: spb.CtrlMsg_Partitioning,
}
gotError := false
sl := util.NewSublist()
for _, c := range channels {
if r := p.sl.Match(c); len(r) > 0 {
reply.Data = []byte(c)
gotError = true
break
}
sl.Insert(c, channelInterest)
}
if !gotError {
// Go over our channels and check with the other server sublist
for _, c := range p.channels {
if r := sl.Match(c); len(r) > 0 {
reply.Data = []byte(c)
break
}
}
}
replyBytes, _ := reply.Marshal()
// If there is no duplicate, reply.Data will be empty, which means
// that there was no conflict.
if err := p.nc.Publish(m.Reply, replyBytes); err != nil {
p.s.log.Errorf("Error sending reply to partitioning request: %v", err)
}
} | go | func (p *partitions) processChannelsListRequests(m *nats.Msg) {
// Message cannot be empty, we are supposed to receive
// a spb.CtrlMsg_Partitioning protocol. We should also
// have a repy subject
if len(m.Data) == 0 || m.Reply == "" {
return
}
req := spb.CtrlMsg{}
if err := req.Unmarshal(m.Data); err != nil {
p.s.log.Errorf("Error processing partitioning request: %v", err)
return
}
// If this is our own request, ignore
if req.ServerID == p.s.serverID {
return
}
channels, err := util.DecodeChannels(req.Data)
if err != nil {
p.s.log.Errorf("Error processing partitioning request: %v", err)
return
}
// Check that we don't have any of these channels defined.
// If we do, send a reply with simply the name of the offending
// channel in reply.Data
reply := spb.CtrlMsg{
ServerID: p.s.serverID,
MsgType: spb.CtrlMsg_Partitioning,
}
gotError := false
sl := util.NewSublist()
for _, c := range channels {
if r := p.sl.Match(c); len(r) > 0 {
reply.Data = []byte(c)
gotError = true
break
}
sl.Insert(c, channelInterest)
}
if !gotError {
// Go over our channels and check with the other server sublist
for _, c := range p.channels {
if r := sl.Match(c); len(r) > 0 {
reply.Data = []byte(c)
break
}
}
}
replyBytes, _ := reply.Marshal()
// If there is no duplicate, reply.Data will be empty, which means
// that there was no conflict.
if err := p.nc.Publish(m.Reply, replyBytes); err != nil {
p.s.log.Errorf("Error sending reply to partitioning request: %v", err)
}
} | [
"func",
"(",
"p",
"*",
"partitions",
")",
"processChannelsListRequests",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"// Message cannot be empty, we are supposed to receive",
"// a spb.CtrlMsg_Partitioning protocol. We should also",
"// have a repy subject",
"if",
"len",
"(",
"m",
".",
"Data",
")",
"==",
"0",
"||",
"m",
".",
"Reply",
"==",
"\"",
"\"",
"{",
"return",
"\n",
"}",
"\n",
"req",
":=",
"spb",
".",
"CtrlMsg",
"{",
"}",
"\n",
"if",
"err",
":=",
"req",
".",
"Unmarshal",
"(",
"m",
".",
"Data",
")",
";",
"err",
"!=",
"nil",
"{",
"p",
".",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"// If this is our own request, ignore",
"if",
"req",
".",
"ServerID",
"==",
"p",
".",
"s",
".",
"serverID",
"{",
"return",
"\n",
"}",
"\n",
"channels",
",",
"err",
":=",
"util",
".",
"DecodeChannels",
"(",
"req",
".",
"Data",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"p",
".",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"// Check that we don't have any of these channels defined.",
"// If we do, send a reply with simply the name of the offending",
"// channel in reply.Data",
"reply",
":=",
"spb",
".",
"CtrlMsg",
"{",
"ServerID",
":",
"p",
".",
"s",
".",
"serverID",
",",
"MsgType",
":",
"spb",
".",
"CtrlMsg_Partitioning",
",",
"}",
"\n",
"gotError",
":=",
"false",
"\n",
"sl",
":=",
"util",
".",
"NewSublist",
"(",
")",
"\n",
"for",
"_",
",",
"c",
":=",
"range",
"channels",
"{",
"if",
"r",
":=",
"p",
".",
"sl",
".",
"Match",
"(",
"c",
")",
";",
"len",
"(",
"r",
")",
">",
"0",
"{",
"reply",
".",
"Data",
"=",
"[",
"]",
"byte",
"(",
"c",
")",
"\n",
"gotError",
"=",
"true",
"\n",
"break",
"\n",
"}",
"\n",
"sl",
".",
"Insert",
"(",
"c",
",",
"channelInterest",
")",
"\n",
"}",
"\n",
"if",
"!",
"gotError",
"{",
"// Go over our channels and check with the other server sublist",
"for",
"_",
",",
"c",
":=",
"range",
"p",
".",
"channels",
"{",
"if",
"r",
":=",
"sl",
".",
"Match",
"(",
"c",
")",
";",
"len",
"(",
"r",
")",
">",
"0",
"{",
"reply",
".",
"Data",
"=",
"[",
"]",
"byte",
"(",
"c",
")",
"\n",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"replyBytes",
",",
"_",
":=",
"reply",
".",
"Marshal",
"(",
")",
"\n",
"// If there is no duplicate, reply.Data will be empty, which means",
"// that there was no conflict.",
"if",
"err",
":=",
"p",
".",
"nc",
".",
"Publish",
"(",
"m",
".",
"Reply",
",",
"replyBytes",
")",
";",
"err",
"!=",
"nil",
"{",
"p",
".",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"}"
] | // Decode the incoming partitioning protocol message.
// It can be an HB, in which case, if it is from a new server
// we send our list to the cluster, or it can be a request
// from another server. If so, we reply to the given inbox
// with either an empty Data field or the name of the first
// channel we have in common. | [
"Decode",
"the",
"incoming",
"partitioning",
"protocol",
"message",
".",
"It",
"can",
"be",
"an",
"HB",
"in",
"which",
"case",
"if",
"it",
"is",
"from",
"a",
"new",
"server",
"we",
"send",
"our",
"list",
"to",
"the",
"cluster",
"or",
"it",
"can",
"be",
"a",
"request",
"from",
"another",
"server",
".",
"If",
"so",
"we",
"reply",
"to",
"the",
"given",
"inbox",
"with",
"either",
"an",
"empty",
"Data",
"field",
"or",
"the",
"name",
"of",
"the",
"first",
"channel",
"we",
"have",
"in",
"common",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/partitions.go#L196-L249 | train |
nats-io/nats-streaming-server | server/partitions.go | shutdown | func (p *partitions) shutdown() {
p.Lock()
defer p.Unlock()
if p.isShutdown {
return
}
p.isShutdown = true
if p.nc != nil {
p.nc.Close()
}
} | go | func (p *partitions) shutdown() {
p.Lock()
defer p.Unlock()
if p.isShutdown {
return
}
p.isShutdown = true
if p.nc != nil {
p.nc.Close()
}
} | [
"func",
"(",
"p",
"*",
"partitions",
")",
"shutdown",
"(",
")",
"{",
"p",
".",
"Lock",
"(",
")",
"\n",
"defer",
"p",
".",
"Unlock",
"(",
")",
"\n",
"if",
"p",
".",
"isShutdown",
"{",
"return",
"\n",
"}",
"\n",
"p",
".",
"isShutdown",
"=",
"true",
"\n",
"if",
"p",
".",
"nc",
"!=",
"nil",
"{",
"p",
".",
"nc",
".",
"Close",
"(",
")",
"\n",
"}",
"\n",
"}"
] | // Notifies all go-routines used by partitioning code that the
// server is shuting down and closes the internal NATS connection. | [
"Notifies",
"all",
"go",
"-",
"routines",
"used",
"by",
"partitioning",
"code",
"that",
"the",
"server",
"is",
"shuting",
"down",
"and",
"closes",
"the",
"internal",
"NATS",
"connection",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/partitions.go#L253-L263 | train |
nats-io/nats-streaming-server | server/conf.go | checkType | func checkType(name string, kind reflect.Kind, v interface{}) error {
actualKind := reflect.TypeOf(v).Kind()
if actualKind != kind {
return fmt.Errorf("parameter %q value is expected to be %v, got %v",
name, kind.String(), actualKind.String())
}
return nil
} | go | func checkType(name string, kind reflect.Kind, v interface{}) error {
actualKind := reflect.TypeOf(v).Kind()
if actualKind != kind {
return fmt.Errorf("parameter %q value is expected to be %v, got %v",
name, kind.String(), actualKind.String())
}
return nil
} | [
"func",
"checkType",
"(",
"name",
"string",
",",
"kind",
"reflect",
".",
"Kind",
",",
"v",
"interface",
"{",
"}",
")",
"error",
"{",
"actualKind",
":=",
"reflect",
".",
"TypeOf",
"(",
"v",
")",
".",
"Kind",
"(",
")",
"\n",
"if",
"actualKind",
"!=",
"kind",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"name",
",",
"kind",
".",
"String",
"(",
")",
",",
"actualKind",
".",
"String",
"(",
")",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // checkType returns a formatted error if `v` is not of the expected kind. | [
"checkType",
"returns",
"a",
"formatted",
"error",
"if",
"v",
"is",
"not",
"of",
"the",
"expected",
"kind",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L179-L186 | train |
nats-io/nats-streaming-server | server/conf.go | parseTLS | func parseTLS(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("expected TLS to be a map/struct, got %v", itf)
}
for k, v := range m {
name := strings.ToLower(k)
switch name {
case "client_cert":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ClientCert = v.(string)
case "client_key":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ClientKey = v.(string)
case "client_ca", "client_cacert":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ClientCA = v.(string)
}
}
return nil
} | go | func parseTLS(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("expected TLS to be a map/struct, got %v", itf)
}
for k, v := range m {
name := strings.ToLower(k)
switch name {
case "client_cert":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ClientCert = v.(string)
case "client_key":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ClientKey = v.(string)
case "client_ca", "client_cacert":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ClientCA = v.(string)
}
}
return nil
} | [
"func",
"parseTLS",
"(",
"itf",
"interface",
"{",
"}",
",",
"opts",
"*",
"Options",
")",
"error",
"{",
"m",
",",
"ok",
":=",
"itf",
".",
"(",
"map",
"[",
"string",
"]",
"interface",
"{",
"}",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"itf",
")",
"\n",
"}",
"\n",
"for",
"k",
",",
"v",
":=",
"range",
"m",
"{",
"name",
":=",
"strings",
".",
"ToLower",
"(",
"k",
")",
"\n",
"switch",
"name",
"{",
"case",
"\"",
"\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"String",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"opts",
".",
"ClientCert",
"=",
"v",
".",
"(",
"string",
")",
"\n",
"case",
"\"",
"\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"String",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"opts",
".",
"ClientKey",
"=",
"v",
".",
"(",
"string",
")",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"String",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"opts",
".",
"ClientCA",
"=",
"v",
".",
"(",
"string",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // parseTLS updates `opts` with TLS config | [
"parseTLS",
"updates",
"opts",
"with",
"TLS",
"config"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L189-L215 | train |
nats-io/nats-streaming-server | server/conf.go | parseStoreLimits | func parseStoreLimits(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("expected store limits to be a map/struct, got %v", itf)
}
for k, v := range m {
name := strings.ToLower(k)
switch name {
case "mc", "max_channels", "maxchannels":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
opts.MaxChannels = int(v.(int64))
case "channels", "channels_limits", "channelslimits", "per_channel", "per_channel_limits":
if err := parsePerChannelLimits(v, opts); err != nil {
return err
}
default:
// Check for the global limits (MaxMsgs, MaxBytes, etc..)
if err := parseChannelLimits(&opts.ChannelLimits, k, name, v, true); err != nil {
return err
}
}
}
return nil
} | go | func parseStoreLimits(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("expected store limits to be a map/struct, got %v", itf)
}
for k, v := range m {
name := strings.ToLower(k)
switch name {
case "mc", "max_channels", "maxchannels":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
opts.MaxChannels = int(v.(int64))
case "channels", "channels_limits", "channelslimits", "per_channel", "per_channel_limits":
if err := parsePerChannelLimits(v, opts); err != nil {
return err
}
default:
// Check for the global limits (MaxMsgs, MaxBytes, etc..)
if err := parseChannelLimits(&opts.ChannelLimits, k, name, v, true); err != nil {
return err
}
}
}
return nil
} | [
"func",
"parseStoreLimits",
"(",
"itf",
"interface",
"{",
"}",
",",
"opts",
"*",
"Options",
")",
"error",
"{",
"m",
",",
"ok",
":=",
"itf",
".",
"(",
"map",
"[",
"string",
"]",
"interface",
"{",
"}",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"itf",
")",
"\n",
"}",
"\n",
"for",
"k",
",",
"v",
":=",
"range",
"m",
"{",
"name",
":=",
"strings",
".",
"ToLower",
"(",
"k",
")",
"\n",
"switch",
"name",
"{",
"case",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"Int64",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"opts",
".",
"MaxChannels",
"=",
"int",
"(",
"v",
".",
"(",
"int64",
")",
")",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
":",
"if",
"err",
":=",
"parsePerChannelLimits",
"(",
"v",
",",
"opts",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"default",
":",
"// Check for the global limits (MaxMsgs, MaxBytes, etc..)",
"if",
"err",
":=",
"parseChannelLimits",
"(",
"&",
"opts",
".",
"ChannelLimits",
",",
"k",
",",
"name",
",",
"v",
",",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // parseStoreLimits updates `opts` with store limits | [
"parseStoreLimits",
"updates",
"opts",
"with",
"store",
"limits"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L306-L331 | train |
nats-io/nats-streaming-server | server/conf.go | parseChannelLimits | func parseChannelLimits(cl *stores.ChannelLimits, k, name string, v interface{}, isGlobal bool) error {
switch name {
case "msu", "max_subs", "max_subscriptions", "maxsubscriptions":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
cl.MaxSubscriptions = int(v.(int64))
if !isGlobal && cl.MaxSubscriptions == 0 {
cl.MaxSubscriptions = -1
}
case "mm", "max_msgs", "maxmsgs", "max_count", "maxcount":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
cl.MaxMsgs = int(v.(int64))
if !isGlobal && cl.MaxMsgs == 0 {
cl.MaxMsgs = -1
}
case "mb", "max_bytes", "maxbytes":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
cl.MaxBytes = v.(int64)
if !isGlobal && cl.MaxBytes == 0 {
cl.MaxBytes = -1
}
case "ma", "max_age", "maxage":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
dur, err := time.ParseDuration(v.(string))
if err != nil {
return err
}
cl.MaxAge = dur
if !isGlobal && cl.MaxAge == 0 {
cl.MaxAge = -1
}
case "mi", "max_inactivity", "maxinactivity":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
dur, err := time.ParseDuration(v.(string))
if err != nil {
return err
}
cl.MaxInactivity = dur
if !isGlobal && cl.MaxInactivity == 0 {
cl.MaxInactivity = -1
}
}
return nil
} | go | func parseChannelLimits(cl *stores.ChannelLimits, k, name string, v interface{}, isGlobal bool) error {
switch name {
case "msu", "max_subs", "max_subscriptions", "maxsubscriptions":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
cl.MaxSubscriptions = int(v.(int64))
if !isGlobal && cl.MaxSubscriptions == 0 {
cl.MaxSubscriptions = -1
}
case "mm", "max_msgs", "maxmsgs", "max_count", "maxcount":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
cl.MaxMsgs = int(v.(int64))
if !isGlobal && cl.MaxMsgs == 0 {
cl.MaxMsgs = -1
}
case "mb", "max_bytes", "maxbytes":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
cl.MaxBytes = v.(int64)
if !isGlobal && cl.MaxBytes == 0 {
cl.MaxBytes = -1
}
case "ma", "max_age", "maxage":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
dur, err := time.ParseDuration(v.(string))
if err != nil {
return err
}
cl.MaxAge = dur
if !isGlobal && cl.MaxAge == 0 {
cl.MaxAge = -1
}
case "mi", "max_inactivity", "maxinactivity":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
dur, err := time.ParseDuration(v.(string))
if err != nil {
return err
}
cl.MaxInactivity = dur
if !isGlobal && cl.MaxInactivity == 0 {
cl.MaxInactivity = -1
}
}
return nil
} | [
"func",
"parseChannelLimits",
"(",
"cl",
"*",
"stores",
".",
"ChannelLimits",
",",
"k",
",",
"name",
"string",
",",
"v",
"interface",
"{",
"}",
",",
"isGlobal",
"bool",
")",
"error",
"{",
"switch",
"name",
"{",
"case",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"Int64",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"cl",
".",
"MaxSubscriptions",
"=",
"int",
"(",
"v",
".",
"(",
"int64",
")",
")",
"\n",
"if",
"!",
"isGlobal",
"&&",
"cl",
".",
"MaxSubscriptions",
"==",
"0",
"{",
"cl",
".",
"MaxSubscriptions",
"=",
"-",
"1",
"\n",
"}",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"Int64",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"cl",
".",
"MaxMsgs",
"=",
"int",
"(",
"v",
".",
"(",
"int64",
")",
")",
"\n",
"if",
"!",
"isGlobal",
"&&",
"cl",
".",
"MaxMsgs",
"==",
"0",
"{",
"cl",
".",
"MaxMsgs",
"=",
"-",
"1",
"\n",
"}",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"Int64",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"cl",
".",
"MaxBytes",
"=",
"v",
".",
"(",
"int64",
")",
"\n",
"if",
"!",
"isGlobal",
"&&",
"cl",
".",
"MaxBytes",
"==",
"0",
"{",
"cl",
".",
"MaxBytes",
"=",
"-",
"1",
"\n",
"}",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"String",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"dur",
",",
"err",
":=",
"time",
".",
"ParseDuration",
"(",
"v",
".",
"(",
"string",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"cl",
".",
"MaxAge",
"=",
"dur",
"\n",
"if",
"!",
"isGlobal",
"&&",
"cl",
".",
"MaxAge",
"==",
"0",
"{",
"cl",
".",
"MaxAge",
"=",
"-",
"1",
"\n",
"}",
"\n",
"case",
"\"",
"\"",
",",
"\"",
"\"",
",",
"\"",
"\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"String",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"dur",
",",
"err",
":=",
"time",
".",
"ParseDuration",
"(",
"v",
".",
"(",
"string",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"cl",
".",
"MaxInactivity",
"=",
"dur",
"\n",
"if",
"!",
"isGlobal",
"&&",
"cl",
".",
"MaxInactivity",
"==",
"0",
"{",
"cl",
".",
"MaxInactivity",
"=",
"-",
"1",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // parseChannelLimits updates `cl` with channel limits. | [
"parseChannelLimits",
"updates",
"cl",
"with",
"channel",
"limits",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L334-L386 | train |
nats-io/nats-streaming-server | server/conf.go | parsePerChannelLimits | func parsePerChannelLimits(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("expected per channel limits to be a map/struct, got %v", itf)
}
for channelName, limits := range m {
limitsMap, ok := limits.(map[string]interface{})
if !ok {
return fmt.Errorf("expected channel limits to be a map/struct, got %v", limits)
}
if !util.IsChannelNameValid(channelName, true) {
return fmt.Errorf("invalid channel name %q", channelName)
}
cl := &stores.ChannelLimits{}
for k, v := range limitsMap {
name := strings.ToLower(k)
if err := parseChannelLimits(cl, k, name, v, false); err != nil {
return err
}
}
sl := &opts.StoreLimits
sl.AddPerChannel(channelName, cl)
}
return nil
} | go | func parsePerChannelLimits(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("expected per channel limits to be a map/struct, got %v", itf)
}
for channelName, limits := range m {
limitsMap, ok := limits.(map[string]interface{})
if !ok {
return fmt.Errorf("expected channel limits to be a map/struct, got %v", limits)
}
if !util.IsChannelNameValid(channelName, true) {
return fmt.Errorf("invalid channel name %q", channelName)
}
cl := &stores.ChannelLimits{}
for k, v := range limitsMap {
name := strings.ToLower(k)
if err := parseChannelLimits(cl, k, name, v, false); err != nil {
return err
}
}
sl := &opts.StoreLimits
sl.AddPerChannel(channelName, cl)
}
return nil
} | [
"func",
"parsePerChannelLimits",
"(",
"itf",
"interface",
"{",
"}",
",",
"opts",
"*",
"Options",
")",
"error",
"{",
"m",
",",
"ok",
":=",
"itf",
".",
"(",
"map",
"[",
"string",
"]",
"interface",
"{",
"}",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"itf",
")",
"\n",
"}",
"\n",
"for",
"channelName",
",",
"limits",
":=",
"range",
"m",
"{",
"limitsMap",
",",
"ok",
":=",
"limits",
".",
"(",
"map",
"[",
"string",
"]",
"interface",
"{",
"}",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"limits",
")",
"\n",
"}",
"\n",
"if",
"!",
"util",
".",
"IsChannelNameValid",
"(",
"channelName",
",",
"true",
")",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"channelName",
")",
"\n",
"}",
"\n",
"cl",
":=",
"&",
"stores",
".",
"ChannelLimits",
"{",
"}",
"\n",
"for",
"k",
",",
"v",
":=",
"range",
"limitsMap",
"{",
"name",
":=",
"strings",
".",
"ToLower",
"(",
"k",
")",
"\n",
"if",
"err",
":=",
"parseChannelLimits",
"(",
"cl",
",",
"k",
",",
"name",
",",
"v",
",",
"false",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"sl",
":=",
"&",
"opts",
".",
"StoreLimits",
"\n",
"sl",
".",
"AddPerChannel",
"(",
"channelName",
",",
"cl",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // parsePerChannelLimits updates `opts` with per channel limits. | [
"parsePerChannelLimits",
"updates",
"opts",
"with",
"per",
"channel",
"limits",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L389-L413 | train |
nats-io/nats-streaming-server | server/conf.go | getBytes | func getBytes(f *flag.Flag) (int64, error) {
var res map[string]interface{}
// Use NATS parser to do the conversion for us.
res, err := conf.Parse(fmt.Sprintf("bytes: %v", f.Value.String()))
if err != nil {
return 0, err
}
resVal := res["bytes"]
if resVal == nil || reflect.TypeOf(resVal).Kind() != reflect.Int64 {
return 0, fmt.Errorf("%v should be a size, got '%v'", f.Name, resVal)
}
return resVal.(int64), nil
} | go | func getBytes(f *flag.Flag) (int64, error) {
var res map[string]interface{}
// Use NATS parser to do the conversion for us.
res, err := conf.Parse(fmt.Sprintf("bytes: %v", f.Value.String()))
if err != nil {
return 0, err
}
resVal := res["bytes"]
if resVal == nil || reflect.TypeOf(resVal).Kind() != reflect.Int64 {
return 0, fmt.Errorf("%v should be a size, got '%v'", f.Name, resVal)
}
return resVal.(int64), nil
} | [
"func",
"getBytes",
"(",
"f",
"*",
"flag",
".",
"Flag",
")",
"(",
"int64",
",",
"error",
")",
"{",
"var",
"res",
"map",
"[",
"string",
"]",
"interface",
"{",
"}",
"\n",
"// Use NATS parser to do the conversion for us.",
"res",
",",
"err",
":=",
"conf",
".",
"Parse",
"(",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"f",
".",
"Value",
".",
"String",
"(",
")",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"err",
"\n",
"}",
"\n",
"resVal",
":=",
"res",
"[",
"\"",
"\"",
"]",
"\n",
"if",
"resVal",
"==",
"nil",
"||",
"reflect",
".",
"TypeOf",
"(",
"resVal",
")",
".",
"Kind",
"(",
")",
"!=",
"reflect",
".",
"Int64",
"{",
"return",
"0",
",",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"f",
".",
"Name",
",",
"resVal",
")",
"\n",
"}",
"\n",
"return",
"resVal",
".",
"(",
"int64",
")",
",",
"nil",
"\n",
"}"
] | // getBytes returns the number of bytes from the flag's String size.
// For instance, 1KB would return 1024. | [
"getBytes",
"returns",
"the",
"number",
"of",
"bytes",
"from",
"the",
"flag",
"s",
"String",
"size",
".",
"For",
"instance",
"1KB",
"would",
"return",
"1024",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L703-L715 | train |
nats-io/nats-streaming-server | util/sublist.go | addToCache | func (s *Sublist) addToCache(subject string, element interface{}) {
for k, r := range s.cache {
if matchLiteral(k, subject) {
// Copy since others may have a reference.
nr := append([]interface{}(nil), r...)
nr = append(nr, element)
s.cache[k] = nr
}
}
} | go | func (s *Sublist) addToCache(subject string, element interface{}) {
for k, r := range s.cache {
if matchLiteral(k, subject) {
// Copy since others may have a reference.
nr := append([]interface{}(nil), r...)
nr = append(nr, element)
s.cache[k] = nr
}
}
} | [
"func",
"(",
"s",
"*",
"Sublist",
")",
"addToCache",
"(",
"subject",
"string",
",",
"element",
"interface",
"{",
"}",
")",
"{",
"for",
"k",
",",
"r",
":=",
"range",
"s",
".",
"cache",
"{",
"if",
"matchLiteral",
"(",
"k",
",",
"subject",
")",
"{",
"// Copy since others may have a reference.",
"nr",
":=",
"append",
"(",
"[",
"]",
"interface",
"{",
"}",
"(",
"nil",
")",
",",
"r",
"...",
")",
"\n",
"nr",
"=",
"append",
"(",
"nr",
",",
"element",
")",
"\n",
"s",
".",
"cache",
"[",
"k",
"]",
"=",
"nr",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // addToCache will add the new entry to existing cache
// entries if needed. Assumes write lock is held. | [
"addToCache",
"will",
"add",
"the",
"new",
"entry",
"to",
"existing",
"cache",
"entries",
"if",
"needed",
".",
"Assumes",
"write",
"lock",
"is",
"held",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L139-L148 | train |
nats-io/nats-streaming-server | util/sublist.go | removeFromCache | func (s *Sublist) removeFromCache(subject string) {
for k := range s.cache {
if !matchLiteral(k, subject) {
continue
}
// Since someone else may be referencing, can't modify the list
// safely, just let it re-populate.
delete(s.cache, k)
}
} | go | func (s *Sublist) removeFromCache(subject string) {
for k := range s.cache {
if !matchLiteral(k, subject) {
continue
}
// Since someone else may be referencing, can't modify the list
// safely, just let it re-populate.
delete(s.cache, k)
}
} | [
"func",
"(",
"s",
"*",
"Sublist",
")",
"removeFromCache",
"(",
"subject",
"string",
")",
"{",
"for",
"k",
":=",
"range",
"s",
".",
"cache",
"{",
"if",
"!",
"matchLiteral",
"(",
"k",
",",
"subject",
")",
"{",
"continue",
"\n",
"}",
"\n",
"// Since someone else may be referencing, can't modify the list",
"// safely, just let it re-populate.",
"delete",
"(",
"s",
".",
"cache",
",",
"k",
")",
"\n",
"}",
"\n",
"}"
] | // removeFromCache will remove any active cache entries on that subject.
// Assumes write lock is held. | [
"removeFromCache",
"will",
"remove",
"any",
"active",
"cache",
"entries",
"on",
"that",
"subject",
".",
"Assumes",
"write",
"lock",
"is",
"held",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L152-L161 | train |
nats-io/nats-streaming-server | util/sublist.go | Match | func (s *Sublist) Match(subject string) []interface{} {
s.RLock()
rc, ok := s.cache[subject]
s.RUnlock()
if ok {
return rc
}
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
result := make([]interface{}, 0, 4)
s.Lock()
matchLevel(s.root, tokens, &result)
// Add to our cache
s.cache[subject] = result
// Bound the number of entries to sublistMaxCache
if len(s.cache) > slCacheMax {
for k := range s.cache {
delete(s.cache, k)
break
}
}
s.Unlock()
return result
} | go | func (s *Sublist) Match(subject string) []interface{} {
s.RLock()
rc, ok := s.cache[subject]
s.RUnlock()
if ok {
return rc
}
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
result := make([]interface{}, 0, 4)
s.Lock()
matchLevel(s.root, tokens, &result)
// Add to our cache
s.cache[subject] = result
// Bound the number of entries to sublistMaxCache
if len(s.cache) > slCacheMax {
for k := range s.cache {
delete(s.cache, k)
break
}
}
s.Unlock()
return result
} | [
"func",
"(",
"s",
"*",
"Sublist",
")",
"Match",
"(",
"subject",
"string",
")",
"[",
"]",
"interface",
"{",
"}",
"{",
"s",
".",
"RLock",
"(",
")",
"\n",
"rc",
",",
"ok",
":=",
"s",
".",
"cache",
"[",
"subject",
"]",
"\n",
"s",
".",
"RUnlock",
"(",
")",
"\n",
"if",
"ok",
"{",
"return",
"rc",
"\n",
"}",
"\n\n",
"tsa",
":=",
"[",
"32",
"]",
"string",
"{",
"}",
"\n",
"tokens",
":=",
"tsa",
"[",
":",
"0",
"]",
"\n",
"start",
":=",
"0",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"len",
"(",
"subject",
")",
";",
"i",
"++",
"{",
"if",
"subject",
"[",
"i",
"]",
"==",
"btsep",
"{",
"tokens",
"=",
"append",
"(",
"tokens",
",",
"subject",
"[",
"start",
":",
"i",
"]",
")",
"\n",
"start",
"=",
"i",
"+",
"1",
"\n",
"}",
"\n",
"}",
"\n",
"tokens",
"=",
"append",
"(",
"tokens",
",",
"subject",
"[",
"start",
":",
"]",
")",
"\n",
"result",
":=",
"make",
"(",
"[",
"]",
"interface",
"{",
"}",
",",
"0",
",",
"4",
")",
"\n\n",
"s",
".",
"Lock",
"(",
")",
"\n",
"matchLevel",
"(",
"s",
".",
"root",
",",
"tokens",
",",
"&",
"result",
")",
"\n\n",
"// Add to our cache",
"s",
".",
"cache",
"[",
"subject",
"]",
"=",
"result",
"\n",
"// Bound the number of entries to sublistMaxCache",
"if",
"len",
"(",
"s",
".",
"cache",
")",
">",
"slCacheMax",
"{",
"for",
"k",
":=",
"range",
"s",
".",
"cache",
"{",
"delete",
"(",
"s",
".",
"cache",
",",
"k",
")",
"\n",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"s",
".",
"Unlock",
"(",
")",
"\n\n",
"return",
"result",
"\n",
"}"
] | // Match will match all entries to the literal subject.
// It will return a set of results. | [
"Match",
"will",
"match",
"all",
"entries",
"to",
"the",
"literal",
"subject",
".",
"It",
"will",
"return",
"a",
"set",
"of",
"results",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L165-L200 | train |
nats-io/nats-streaming-server | util/sublist.go | Remove | func (s *Sublist) Remove(subject string, element interface{}) error {
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
s.Lock()
defer s.Unlock()
sfwc := false
l := s.root
var n *node
// Track levels for pruning
var lnts [32]lnt
levels := lnts[:0]
for _, t := range tokens {
if len(t) == 0 || sfwc {
return ErrInvalidSubject
}
if l == nil {
return ErrNotFound
}
switch t[0] {
case pwc:
n = l.pwc
case fwc:
n = l.fwc
sfwc = true
default:
n = l.nodes[t]
}
if n != nil {
levels = append(levels, lnt{l, n, t})
l = n.next
} else {
l = nil
}
}
if !s.removeFromNode(n, element) {
return ErrNotFound
}
s.count--
for i := len(levels) - 1; i >= 0; i-- {
l, n, t := levels[i].l, levels[i].n, levels[i].t
if n.isEmpty() {
l.pruneNode(n, t)
}
}
s.removeFromCache(subject)
return nil
} | go | func (s *Sublist) Remove(subject string, element interface{}) error {
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
s.Lock()
defer s.Unlock()
sfwc := false
l := s.root
var n *node
// Track levels for pruning
var lnts [32]lnt
levels := lnts[:0]
for _, t := range tokens {
if len(t) == 0 || sfwc {
return ErrInvalidSubject
}
if l == nil {
return ErrNotFound
}
switch t[0] {
case pwc:
n = l.pwc
case fwc:
n = l.fwc
sfwc = true
default:
n = l.nodes[t]
}
if n != nil {
levels = append(levels, lnt{l, n, t})
l = n.next
} else {
l = nil
}
}
if !s.removeFromNode(n, element) {
return ErrNotFound
}
s.count--
for i := len(levels) - 1; i >= 0; i-- {
l, n, t := levels[i].l, levels[i].n, levels[i].t
if n.isEmpty() {
l.pruneNode(n, t)
}
}
s.removeFromCache(subject)
return nil
} | [
"func",
"(",
"s",
"*",
"Sublist",
")",
"Remove",
"(",
"subject",
"string",
",",
"element",
"interface",
"{",
"}",
")",
"error",
"{",
"tsa",
":=",
"[",
"32",
"]",
"string",
"{",
"}",
"\n",
"tokens",
":=",
"tsa",
"[",
":",
"0",
"]",
"\n",
"start",
":=",
"0",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"len",
"(",
"subject",
")",
";",
"i",
"++",
"{",
"if",
"subject",
"[",
"i",
"]",
"==",
"btsep",
"{",
"tokens",
"=",
"append",
"(",
"tokens",
",",
"subject",
"[",
"start",
":",
"i",
"]",
")",
"\n",
"start",
"=",
"i",
"+",
"1",
"\n",
"}",
"\n",
"}",
"\n",
"tokens",
"=",
"append",
"(",
"tokens",
",",
"subject",
"[",
"start",
":",
"]",
")",
"\n\n",
"s",
".",
"Lock",
"(",
")",
"\n",
"defer",
"s",
".",
"Unlock",
"(",
")",
"\n\n",
"sfwc",
":=",
"false",
"\n",
"l",
":=",
"s",
".",
"root",
"\n",
"var",
"n",
"*",
"node",
"\n\n",
"// Track levels for pruning",
"var",
"lnts",
"[",
"32",
"]",
"lnt",
"\n",
"levels",
":=",
"lnts",
"[",
":",
"0",
"]",
"\n\n",
"for",
"_",
",",
"t",
":=",
"range",
"tokens",
"{",
"if",
"len",
"(",
"t",
")",
"==",
"0",
"||",
"sfwc",
"{",
"return",
"ErrInvalidSubject",
"\n",
"}",
"\n",
"if",
"l",
"==",
"nil",
"{",
"return",
"ErrNotFound",
"\n",
"}",
"\n",
"switch",
"t",
"[",
"0",
"]",
"{",
"case",
"pwc",
":",
"n",
"=",
"l",
".",
"pwc",
"\n",
"case",
"fwc",
":",
"n",
"=",
"l",
".",
"fwc",
"\n",
"sfwc",
"=",
"true",
"\n",
"default",
":",
"n",
"=",
"l",
".",
"nodes",
"[",
"t",
"]",
"\n",
"}",
"\n",
"if",
"n",
"!=",
"nil",
"{",
"levels",
"=",
"append",
"(",
"levels",
",",
"lnt",
"{",
"l",
",",
"n",
",",
"t",
"}",
")",
"\n",
"l",
"=",
"n",
".",
"next",
"\n",
"}",
"else",
"{",
"l",
"=",
"nil",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"!",
"s",
".",
"removeFromNode",
"(",
"n",
",",
"element",
")",
"{",
"return",
"ErrNotFound",
"\n",
"}",
"\n",
"s",
".",
"count",
"--",
"\n",
"for",
"i",
":=",
"len",
"(",
"levels",
")",
"-",
"1",
";",
"i",
">=",
"0",
";",
"i",
"--",
"{",
"l",
",",
"n",
",",
"t",
":=",
"levels",
"[",
"i",
"]",
".",
"l",
",",
"levels",
"[",
"i",
"]",
".",
"n",
",",
"levels",
"[",
"i",
"]",
".",
"t",
"\n",
"if",
"n",
".",
"isEmpty",
"(",
")",
"{",
"l",
".",
"pruneNode",
"(",
"n",
",",
"t",
")",
"\n",
"}",
"\n",
"}",
"\n",
"s",
".",
"removeFromCache",
"(",
"subject",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // Remove will remove an element from the sublist. | [
"Remove",
"will",
"remove",
"an",
"element",
"from",
"the",
"sublist",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L238-L296 | train |
nats-io/nats-streaming-server | util/sublist.go | removeFromList | func removeFromList(element interface{}, l []interface{}) ([]interface{}, bool) {
for i := 0; i < len(l); i++ {
if l[i] == element {
last := len(l) - 1
l[i] = l[last]
l[last] = nil
l = l[:last]
return shrinkAsNeeded(l), true
}
}
return l, false
} | go | func removeFromList(element interface{}, l []interface{}) ([]interface{}, bool) {
for i := 0; i < len(l); i++ {
if l[i] == element {
last := len(l) - 1
l[i] = l[last]
l[last] = nil
l = l[:last]
return shrinkAsNeeded(l), true
}
}
return l, false
} | [
"func",
"removeFromList",
"(",
"element",
"interface",
"{",
"}",
",",
"l",
"[",
"]",
"interface",
"{",
"}",
")",
"(",
"[",
"]",
"interface",
"{",
"}",
",",
"bool",
")",
"{",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"len",
"(",
"l",
")",
";",
"i",
"++",
"{",
"if",
"l",
"[",
"i",
"]",
"==",
"element",
"{",
"last",
":=",
"len",
"(",
"l",
")",
"-",
"1",
"\n",
"l",
"[",
"i",
"]",
"=",
"l",
"[",
"last",
"]",
"\n",
"l",
"[",
"last",
"]",
"=",
"nil",
"\n",
"l",
"=",
"l",
"[",
":",
"last",
"]",
"\n",
"return",
"shrinkAsNeeded",
"(",
"l",
")",
",",
"true",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"l",
",",
"false",
"\n",
"}"
] | // Removes an element from a list. | [
"Removes",
"an",
"element",
"from",
"a",
"list",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L336-L347 | train |
nats-io/nats-streaming-server | util/sublist.go | shrinkAsNeeded | func shrinkAsNeeded(l []interface{}) []interface{} {
ll := len(l)
cl := cap(l)
// Don't bother if list not too big
if cl <= 8 {
return l
}
pFree := float32(cl-ll) / float32(cl)
if pFree > 0.50 {
return append([]interface{}(nil), l...)
}
return l
} | go | func shrinkAsNeeded(l []interface{}) []interface{} {
ll := len(l)
cl := cap(l)
// Don't bother if list not too big
if cl <= 8 {
return l
}
pFree := float32(cl-ll) / float32(cl)
if pFree > 0.50 {
return append([]interface{}(nil), l...)
}
return l
} | [
"func",
"shrinkAsNeeded",
"(",
"l",
"[",
"]",
"interface",
"{",
"}",
")",
"[",
"]",
"interface",
"{",
"}",
"{",
"ll",
":=",
"len",
"(",
"l",
")",
"\n",
"cl",
":=",
"cap",
"(",
"l",
")",
"\n",
"// Don't bother if list not too big",
"if",
"cl",
"<=",
"8",
"{",
"return",
"l",
"\n",
"}",
"\n",
"pFree",
":=",
"float32",
"(",
"cl",
"-",
"ll",
")",
"/",
"float32",
"(",
"cl",
")",
"\n",
"if",
"pFree",
">",
"0.50",
"{",
"return",
"append",
"(",
"[",
"]",
"interface",
"{",
"}",
"(",
"nil",
")",
",",
"l",
"...",
")",
"\n",
"}",
"\n",
"return",
"l",
"\n",
"}"
] | // Checks if we need to do a resize. This is for very large growth then
// subsequent return to a more normal size from unsubscribe. | [
"Checks",
"if",
"we",
"need",
"to",
"do",
"a",
"resize",
".",
"This",
"is",
"for",
"very",
"large",
"growth",
"then",
"subsequent",
"return",
"to",
"a",
"more",
"normal",
"size",
"from",
"unsubscribe",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L360-L372 | train |
nats-io/nats-streaming-server | util/sublist.go | CacheCount | func (s *Sublist) CacheCount() int {
s.RLock()
defer s.RUnlock()
return len(s.cache)
} | go | func (s *Sublist) CacheCount() int {
s.RLock()
defer s.RUnlock()
return len(s.cache)
} | [
"func",
"(",
"s",
"*",
"Sublist",
")",
"CacheCount",
"(",
")",
"int",
"{",
"s",
".",
"RLock",
"(",
")",
"\n",
"defer",
"s",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"len",
"(",
"s",
".",
"cache",
")",
"\n",
"}"
] | // CacheCount returns the number of result sets in the cache. | [
"CacheCount",
"returns",
"the",
"number",
"of",
"result",
"sets",
"in",
"the",
"cache",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L382-L386 | train |
nats-io/nats-streaming-server | server/server.go | startDeleteTimer | func (c *channel) startDeleteTimer() {
c.activity.last = time.Now()
c.resetDeleteTimer(c.activity.maxInactivity)
} | go | func (c *channel) startDeleteTimer() {
c.activity.last = time.Now()
c.resetDeleteTimer(c.activity.maxInactivity)
} | [
"func",
"(",
"c",
"*",
"channel",
")",
"startDeleteTimer",
"(",
")",
"{",
"c",
".",
"activity",
".",
"last",
"=",
"time",
".",
"Now",
"(",
")",
"\n",
"c",
".",
"resetDeleteTimer",
"(",
"c",
".",
"activity",
".",
"maxInactivity",
")",
"\n",
"}"
] | // Starts the delete timer that when firing will post
// a channel delete request to the ioLoop.
// The channelStore's mutex must be held on entry. | [
"Starts",
"the",
"delete",
"timer",
"that",
"when",
"firing",
"will",
"post",
"a",
"channel",
"delete",
"request",
"to",
"the",
"ioLoop",
".",
"The",
"channelStore",
"s",
"mutex",
"must",
"be",
"held",
"on",
"entry",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L453-L456 | train |
nats-io/nats-streaming-server | server/server.go | stopDeleteTimer | func (c *channel) stopDeleteTimer() {
if c.activity.timer != nil {
c.activity.timer.Stop()
c.activity.timerSet = false
if c.stan.debug {
c.stan.log.Debugf("Channel %q delete timer stopped", c.name)
}
}
} | go | func (c *channel) stopDeleteTimer() {
if c.activity.timer != nil {
c.activity.timer.Stop()
c.activity.timerSet = false
if c.stan.debug {
c.stan.log.Debugf("Channel %q delete timer stopped", c.name)
}
}
} | [
"func",
"(",
"c",
"*",
"channel",
")",
"stopDeleteTimer",
"(",
")",
"{",
"if",
"c",
".",
"activity",
".",
"timer",
"!=",
"nil",
"{",
"c",
".",
"activity",
".",
"timer",
".",
"Stop",
"(",
")",
"\n",
"c",
".",
"activity",
".",
"timerSet",
"=",
"false",
"\n",
"if",
"c",
".",
"stan",
".",
"debug",
"{",
"c",
".",
"stan",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"c",
".",
"name",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // Stops the delete timer.
// The channelStore's mutex must be held on entry. | [
"Stops",
"the",
"delete",
"timer",
".",
"The",
"channelStore",
"s",
"mutex",
"must",
"be",
"held",
"on",
"entry",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L460-L468 | train |
nats-io/nats-streaming-server | server/server.go | resetDeleteTimer | func (c *channel) resetDeleteTimer(newDuration time.Duration) {
a := c.activity
if a.timer == nil {
a.timer = time.AfterFunc(newDuration, func() {
c.stan.sendDeleteChannelRequest(c)
})
} else {
a.timer.Reset(newDuration)
}
if c.stan.debug {
c.stan.log.Debugf("Channel %q delete timer set to fire in %v", c.name, newDuration)
}
a.timerSet = true
} | go | func (c *channel) resetDeleteTimer(newDuration time.Duration) {
a := c.activity
if a.timer == nil {
a.timer = time.AfterFunc(newDuration, func() {
c.stan.sendDeleteChannelRequest(c)
})
} else {
a.timer.Reset(newDuration)
}
if c.stan.debug {
c.stan.log.Debugf("Channel %q delete timer set to fire in %v", c.name, newDuration)
}
a.timerSet = true
} | [
"func",
"(",
"c",
"*",
"channel",
")",
"resetDeleteTimer",
"(",
"newDuration",
"time",
".",
"Duration",
")",
"{",
"a",
":=",
"c",
".",
"activity",
"\n",
"if",
"a",
".",
"timer",
"==",
"nil",
"{",
"a",
".",
"timer",
"=",
"time",
".",
"AfterFunc",
"(",
"newDuration",
",",
"func",
"(",
")",
"{",
"c",
".",
"stan",
".",
"sendDeleteChannelRequest",
"(",
"c",
")",
"\n",
"}",
")",
"\n",
"}",
"else",
"{",
"a",
".",
"timer",
".",
"Reset",
"(",
"newDuration",
")",
"\n",
"}",
"\n",
"if",
"c",
".",
"stan",
".",
"debug",
"{",
"c",
".",
"stan",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"c",
".",
"name",
",",
"newDuration",
")",
"\n",
"}",
"\n",
"a",
".",
"timerSet",
"=",
"true",
"\n",
"}"
] | // Resets the delete timer to the given duration.
// If the timer was not created, this call will create it.
// The channelStore's mutex must be held on entry. | [
"Resets",
"the",
"delete",
"timer",
"to",
"the",
"given",
"duration",
".",
"If",
"the",
"timer",
"was",
"not",
"created",
"this",
"call",
"will",
"create",
"it",
".",
"The",
"channelStore",
"s",
"mutex",
"must",
"be",
"held",
"on",
"entry",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L473-L486 | train |
nats-io/nats-streaming-server | server/server.go | pubMsgToMsgProto | func (c *channel) pubMsgToMsgProto(pm *pb.PubMsg, seq uint64) *pb.MsgProto {
m := &pb.MsgProto{
Sequence: seq,
Subject: pm.Subject,
Reply: pm.Reply,
Data: pm.Data,
Timestamp: time.Now().UnixNano(),
}
if c.lTimestamp > 0 && m.Timestamp < c.lTimestamp {
m.Timestamp = c.lTimestamp
}
c.lTimestamp = m.Timestamp
return m
} | go | func (c *channel) pubMsgToMsgProto(pm *pb.PubMsg, seq uint64) *pb.MsgProto {
m := &pb.MsgProto{
Sequence: seq,
Subject: pm.Subject,
Reply: pm.Reply,
Data: pm.Data,
Timestamp: time.Now().UnixNano(),
}
if c.lTimestamp > 0 && m.Timestamp < c.lTimestamp {
m.Timestamp = c.lTimestamp
}
c.lTimestamp = m.Timestamp
return m
} | [
"func",
"(",
"c",
"*",
"channel",
")",
"pubMsgToMsgProto",
"(",
"pm",
"*",
"pb",
".",
"PubMsg",
",",
"seq",
"uint64",
")",
"*",
"pb",
".",
"MsgProto",
"{",
"m",
":=",
"&",
"pb",
".",
"MsgProto",
"{",
"Sequence",
":",
"seq",
",",
"Subject",
":",
"pm",
".",
"Subject",
",",
"Reply",
":",
"pm",
".",
"Reply",
",",
"Data",
":",
"pm",
".",
"Data",
",",
"Timestamp",
":",
"time",
".",
"Now",
"(",
")",
".",
"UnixNano",
"(",
")",
",",
"}",
"\n",
"if",
"c",
".",
"lTimestamp",
">",
"0",
"&&",
"m",
".",
"Timestamp",
"<",
"c",
".",
"lTimestamp",
"{",
"m",
".",
"Timestamp",
"=",
"c",
".",
"lTimestamp",
"\n",
"}",
"\n",
"c",
".",
"lTimestamp",
"=",
"m",
".",
"Timestamp",
"\n",
"return",
"m",
"\n",
"}"
] | // pubMsgToMsgProto converts a PubMsg to a MsgProto and assigns a timestamp
// which is monotonic with respect to the channel. | [
"pubMsgToMsgProto",
"converts",
"a",
"PubMsg",
"to",
"a",
"MsgProto",
"and",
"assigns",
"a",
"timestamp",
"which",
"is",
"monotonic",
"with",
"respect",
"to",
"the",
"channel",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L490-L503 | train |
nats-io/nats-streaming-server | server/server.go | subToSnapshotRestoreRequests | func (s *StanServer) subToSnapshotRestoreRequests() error {
var (
msgBuf []byte
buf []byte
snapshotRestorePrefix = fmt.Sprintf("%s.%s.", defaultSnapshotPrefix, s.info.ClusterID)
prefixLen = len(snapshotRestorePrefix)
)
sub, err := s.ncsr.Subscribe(snapshotRestorePrefix+">", func(m *nats.Msg) {
if len(m.Data) != 16 {
s.log.Errorf("Invalid snapshot request, data len=%v", len(m.Data))
return
}
cname := m.Subject[prefixLen:]
c := s.channels.getIfNotAboutToBeDeleted(cname)
if c == nil {
s.ncsr.Publish(m.Reply, nil)
return
}
start := util.ByteOrder.Uint64(m.Data[:8])
end := util.ByteOrder.Uint64(m.Data[8:])
for seq := start; seq <= end; seq++ {
msg, err := c.store.Msgs.Lookup(seq)
if err != nil {
s.log.Errorf("Snapshot restore request error for channel %q, error looking up message %v: %v", c.name, seq, err)
return
}
if msg == nil {
// We don't have this message because of channel limits.
// Return nil to caller to signal this state.
buf = nil
} else {
msgBuf = util.EnsureBufBigEnough(msgBuf, msg.Size())
n, err := msg.MarshalTo(msgBuf)
if err != nil {
panic(err)
}
buf = msgBuf[:n]
}
if err := s.ncsr.Publish(m.Reply, buf); err != nil {
s.log.Errorf("Snapshot restore request error for channel %q, unable to send response for seq %v: %v", c.name, seq, err)
}
if buf == nil {
return
}
select {
case <-s.shutdownCh:
return
default:
}
}
})
if err != nil {
return err
}
sub.SetPendingLimits(-1, -1)
s.snapReqSub = sub
return nil
} | go | func (s *StanServer) subToSnapshotRestoreRequests() error {
var (
msgBuf []byte
buf []byte
snapshotRestorePrefix = fmt.Sprintf("%s.%s.", defaultSnapshotPrefix, s.info.ClusterID)
prefixLen = len(snapshotRestorePrefix)
)
sub, err := s.ncsr.Subscribe(snapshotRestorePrefix+">", func(m *nats.Msg) {
if len(m.Data) != 16 {
s.log.Errorf("Invalid snapshot request, data len=%v", len(m.Data))
return
}
cname := m.Subject[prefixLen:]
c := s.channels.getIfNotAboutToBeDeleted(cname)
if c == nil {
s.ncsr.Publish(m.Reply, nil)
return
}
start := util.ByteOrder.Uint64(m.Data[:8])
end := util.ByteOrder.Uint64(m.Data[8:])
for seq := start; seq <= end; seq++ {
msg, err := c.store.Msgs.Lookup(seq)
if err != nil {
s.log.Errorf("Snapshot restore request error for channel %q, error looking up message %v: %v", c.name, seq, err)
return
}
if msg == nil {
// We don't have this message because of channel limits.
// Return nil to caller to signal this state.
buf = nil
} else {
msgBuf = util.EnsureBufBigEnough(msgBuf, msg.Size())
n, err := msg.MarshalTo(msgBuf)
if err != nil {
panic(err)
}
buf = msgBuf[:n]
}
if err := s.ncsr.Publish(m.Reply, buf); err != nil {
s.log.Errorf("Snapshot restore request error for channel %q, unable to send response for seq %v: %v", c.name, seq, err)
}
if buf == nil {
return
}
select {
case <-s.shutdownCh:
return
default:
}
}
})
if err != nil {
return err
}
sub.SetPendingLimits(-1, -1)
s.snapReqSub = sub
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"subToSnapshotRestoreRequests",
"(",
")",
"error",
"{",
"var",
"(",
"msgBuf",
"[",
"]",
"byte",
"\n",
"buf",
"[",
"]",
"byte",
"\n",
"snapshotRestorePrefix",
"=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"defaultSnapshotPrefix",
",",
"s",
".",
"info",
".",
"ClusterID",
")",
"\n",
"prefixLen",
"=",
"len",
"(",
"snapshotRestorePrefix",
")",
"\n",
")",
"\n",
"sub",
",",
"err",
":=",
"s",
".",
"ncsr",
".",
"Subscribe",
"(",
"snapshotRestorePrefix",
"+",
"\"",
"\"",
",",
"func",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"if",
"len",
"(",
"m",
".",
"Data",
")",
"!=",
"16",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"len",
"(",
"m",
".",
"Data",
")",
")",
"\n",
"return",
"\n",
"}",
"\n",
"cname",
":=",
"m",
".",
"Subject",
"[",
"prefixLen",
":",
"]",
"\n",
"c",
":=",
"s",
".",
"channels",
".",
"getIfNotAboutToBeDeleted",
"(",
"cname",
")",
"\n",
"if",
"c",
"==",
"nil",
"{",
"s",
".",
"ncsr",
".",
"Publish",
"(",
"m",
".",
"Reply",
",",
"nil",
")",
"\n",
"return",
"\n",
"}",
"\n",
"start",
":=",
"util",
".",
"ByteOrder",
".",
"Uint64",
"(",
"m",
".",
"Data",
"[",
":",
"8",
"]",
")",
"\n",
"end",
":=",
"util",
".",
"ByteOrder",
".",
"Uint64",
"(",
"m",
".",
"Data",
"[",
"8",
":",
"]",
")",
"\n\n",
"for",
"seq",
":=",
"start",
";",
"seq",
"<=",
"end",
";",
"seq",
"++",
"{",
"msg",
",",
"err",
":=",
"c",
".",
"store",
".",
"Msgs",
".",
"Lookup",
"(",
"seq",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"c",
".",
"name",
",",
"seq",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"msg",
"==",
"nil",
"{",
"// We don't have this message because of channel limits.",
"// Return nil to caller to signal this state.",
"buf",
"=",
"nil",
"\n",
"}",
"else",
"{",
"msgBuf",
"=",
"util",
".",
"EnsureBufBigEnough",
"(",
"msgBuf",
",",
"msg",
".",
"Size",
"(",
")",
")",
"\n",
"n",
",",
"err",
":=",
"msg",
".",
"MarshalTo",
"(",
"msgBuf",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"buf",
"=",
"msgBuf",
"[",
":",
"n",
"]",
"\n",
"}",
"\n",
"if",
"err",
":=",
"s",
".",
"ncsr",
".",
"Publish",
"(",
"m",
".",
"Reply",
",",
"buf",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"c",
".",
"name",
",",
"seq",
",",
"err",
")",
"\n",
"}",
"\n",
"if",
"buf",
"==",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"select",
"{",
"case",
"<-",
"s",
".",
"shutdownCh",
":",
"return",
"\n",
"default",
":",
"}",
"\n",
"}",
"\n",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"sub",
".",
"SetPendingLimits",
"(",
"-",
"1",
",",
"-",
"1",
")",
"\n",
"s",
".",
"snapReqSub",
"=",
"sub",
"\n",
"return",
"nil",
"\n",
"}"
] | // Sets a subscription that will handle snapshot restore requests from followers. | [
"Sets",
"a",
"subscription",
"that",
"will",
"handle",
"snapshot",
"restore",
"requests",
"from",
"followers",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L506-L564 | train |
nats-io/nats-streaming-server | server/server.go | lookupOrCreateChannel | func (s *StanServer) lookupOrCreateChannel(name string) (*channel, error) {
cs := s.channels
cs.RLock()
c := cs.channels[name]
if c != nil {
if c.activity != nil && c.activity.deleteInProgress {
cs.RUnlock()
return nil, ErrChanDelInProgress
}
cs.RUnlock()
return c, nil
}
cs.RUnlock()
return cs.createChannel(s, name)
} | go | func (s *StanServer) lookupOrCreateChannel(name string) (*channel, error) {
cs := s.channels
cs.RLock()
c := cs.channels[name]
if c != nil {
if c.activity != nil && c.activity.deleteInProgress {
cs.RUnlock()
return nil, ErrChanDelInProgress
}
cs.RUnlock()
return c, nil
}
cs.RUnlock()
return cs.createChannel(s, name)
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"lookupOrCreateChannel",
"(",
"name",
"string",
")",
"(",
"*",
"channel",
",",
"error",
")",
"{",
"cs",
":=",
"s",
".",
"channels",
"\n",
"cs",
".",
"RLock",
"(",
")",
"\n",
"c",
":=",
"cs",
".",
"channels",
"[",
"name",
"]",
"\n",
"if",
"c",
"!=",
"nil",
"{",
"if",
"c",
".",
"activity",
"!=",
"nil",
"&&",
"c",
".",
"activity",
".",
"deleteInProgress",
"{",
"cs",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"nil",
",",
"ErrChanDelInProgress",
"\n",
"}",
"\n",
"cs",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"c",
",",
"nil",
"\n",
"}",
"\n",
"cs",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"cs",
".",
"createChannel",
"(",
"s",
",",
"name",
")",
"\n",
"}"
] | // Looks up, or create a new channel if it does not exist | [
"Looks",
"up",
"or",
"create",
"a",
"new",
"channel",
"if",
"it",
"does",
"not",
"exist"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L746-L760 | train |
nats-io/nats-streaming-server | server/server.go | createSubStore | func (s *StanServer) createSubStore() *subStore {
subs := &subStore{
psubs: make([]*subState, 0, 4),
qsubs: make(map[string]*queueState),
durables: make(map[string]*subState),
acks: make(map[string]*subState),
stan: s,
}
return subs
} | go | func (s *StanServer) createSubStore() *subStore {
subs := &subStore{
psubs: make([]*subState, 0, 4),
qsubs: make(map[string]*queueState),
durables: make(map[string]*subState),
acks: make(map[string]*subState),
stan: s,
}
return subs
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"createSubStore",
"(",
")",
"*",
"subStore",
"{",
"subs",
":=",
"&",
"subStore",
"{",
"psubs",
":",
"make",
"(",
"[",
"]",
"*",
"subState",
",",
"0",
",",
"4",
")",
",",
"qsubs",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"*",
"queueState",
")",
",",
"durables",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"*",
"subState",
")",
",",
"acks",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"*",
"subState",
")",
",",
"stan",
":",
"s",
",",
"}",
"\n",
"return",
"subs",
"\n",
"}"
] | // createSubStore creates a new instance of `subStore`. | [
"createSubStore",
"creates",
"a",
"new",
"instance",
"of",
"subStore",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L788-L797 | train |
nats-io/nats-streaming-server | server/server.go | Store | func (ss *subStore) Store(sub *subState) error {
if sub == nil {
return nil
}
// Adds to storage.
// Use sub lock to avoid race with waitForAcks in some tests
sub.Lock()
err := sub.store.CreateSub(&sub.SubState)
sub.Unlock()
if err == nil {
err = sub.store.Flush()
}
if err != nil {
ss.stan.log.Errorf("Unable to store subscription [%v:%v] on [%s]: %v", sub.ClientID, sub.Inbox, sub.subject, err)
return err
}
ss.Lock()
ss.updateState(sub)
ss.Unlock()
return nil
} | go | func (ss *subStore) Store(sub *subState) error {
if sub == nil {
return nil
}
// Adds to storage.
// Use sub lock to avoid race with waitForAcks in some tests
sub.Lock()
err := sub.store.CreateSub(&sub.SubState)
sub.Unlock()
if err == nil {
err = sub.store.Flush()
}
if err != nil {
ss.stan.log.Errorf("Unable to store subscription [%v:%v] on [%s]: %v", sub.ClientID, sub.Inbox, sub.subject, err)
return err
}
ss.Lock()
ss.updateState(sub)
ss.Unlock()
return nil
} | [
"func",
"(",
"ss",
"*",
"subStore",
")",
"Store",
"(",
"sub",
"*",
"subState",
")",
"error",
"{",
"if",
"sub",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n\n",
"// Adds to storage.",
"// Use sub lock to avoid race with waitForAcks in some tests",
"sub",
".",
"Lock",
"(",
")",
"\n",
"err",
":=",
"sub",
".",
"store",
".",
"CreateSub",
"(",
"&",
"sub",
".",
"SubState",
")",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"if",
"err",
"==",
"nil",
"{",
"err",
"=",
"sub",
".",
"store",
".",
"Flush",
"(",
")",
"\n",
"}",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"ss",
".",
"stan",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"sub",
".",
"ClientID",
",",
"sub",
".",
"Inbox",
",",
"sub",
".",
"subject",
",",
"err",
")",
"\n",
"return",
"err",
"\n",
"}",
"\n\n",
"ss",
".",
"Lock",
"(",
")",
"\n",
"ss",
".",
"updateState",
"(",
"sub",
")",
"\n",
"ss",
".",
"Unlock",
"(",
")",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // Store adds this subscription to the server's `subStore` and also in storage | [
"Store",
"adds",
"this",
"subscription",
"to",
"the",
"server",
"s",
"subStore",
"and",
"also",
"in",
"storage"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L800-L823 | train |
nats-io/nats-streaming-server | server/server.go | hasActiveSubs | func (ss *subStore) hasActiveSubs() bool {
ss.RLock()
defer ss.RUnlock()
if len(ss.psubs) > 0 {
return true
}
for _, qsub := range ss.qsubs {
// For a durable queue group, when the group is offline,
// qsub.shadow is not nil, but the qsub.subs array should be
// empty.
if len(qsub.subs) > 0 {
return true
}
}
return false
} | go | func (ss *subStore) hasActiveSubs() bool {
ss.RLock()
defer ss.RUnlock()
if len(ss.psubs) > 0 {
return true
}
for _, qsub := range ss.qsubs {
// For a durable queue group, when the group is offline,
// qsub.shadow is not nil, but the qsub.subs array should be
// empty.
if len(qsub.subs) > 0 {
return true
}
}
return false
} | [
"func",
"(",
"ss",
"*",
"subStore",
")",
"hasActiveSubs",
"(",
")",
"bool",
"{",
"ss",
".",
"RLock",
"(",
")",
"\n",
"defer",
"ss",
".",
"RUnlock",
"(",
")",
"\n",
"if",
"len",
"(",
"ss",
".",
"psubs",
")",
">",
"0",
"{",
"return",
"true",
"\n",
"}",
"\n",
"for",
"_",
",",
"qsub",
":=",
"range",
"ss",
".",
"qsubs",
"{",
"// For a durable queue group, when the group is offline,",
"// qsub.shadow is not nil, but the qsub.subs array should be",
"// empty.",
"if",
"len",
"(",
"qsub",
".",
"subs",
")",
">",
"0",
"{",
"return",
"true",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"false",
"\n",
"}"
] | // hasSubs returns true if there is any active subscription for this subStore.
// That is, offline durable subscriptions are ignored. | [
"hasSubs",
"returns",
"true",
"if",
"there",
"is",
"any",
"active",
"subscription",
"for",
"this",
"subStore",
".",
"That",
"is",
"offline",
"durable",
"subscriptions",
"are",
"ignored",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L907-L922 | train |
nats-io/nats-streaming-server | server/server.go | LookupByDurable | func (ss *subStore) LookupByDurable(durableName string) *subState {
ss.RLock()
sub := ss.durables[durableName]
ss.RUnlock()
return sub
} | go | func (ss *subStore) LookupByDurable(durableName string) *subState {
ss.RLock()
sub := ss.durables[durableName]
ss.RUnlock()
return sub
} | [
"func",
"(",
"ss",
"*",
"subStore",
")",
"LookupByDurable",
"(",
"durableName",
"string",
")",
"*",
"subState",
"{",
"ss",
".",
"RLock",
"(",
")",
"\n",
"sub",
":=",
"ss",
".",
"durables",
"[",
"durableName",
"]",
"\n",
"ss",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"sub",
"\n",
"}"
] | // Lookup by durable name. | [
"Lookup",
"by",
"durable",
"name",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1138-L1143 | train |
nats-io/nats-streaming-server | server/server.go | LookupByAckInbox | func (ss *subStore) LookupByAckInbox(ackInbox string) *subState {
ss.RLock()
sub := ss.acks[ackInbox]
ss.RUnlock()
return sub
} | go | func (ss *subStore) LookupByAckInbox(ackInbox string) *subState {
ss.RLock()
sub := ss.acks[ackInbox]
ss.RUnlock()
return sub
} | [
"func",
"(",
"ss",
"*",
"subStore",
")",
"LookupByAckInbox",
"(",
"ackInbox",
"string",
")",
"*",
"subState",
"{",
"ss",
".",
"RLock",
"(",
")",
"\n",
"sub",
":=",
"ss",
".",
"acks",
"[",
"ackInbox",
"]",
"\n",
"ss",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"sub",
"\n",
"}"
] | // Lookup by ackInbox name. | [
"Lookup",
"by",
"ackInbox",
"name",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1146-L1151 | train |
nats-io/nats-streaming-server | server/server.go | Clone | func (o *Options) Clone() *Options {
// A simple copy covers pretty much everything
clone := *o
// But we have the problem of the PerChannel map that needs
// to be copied.
clone.PerChannel = (&o.StoreLimits).ClonePerChannelMap()
// Make a copy of the clustering peers
if len(o.Clustering.Peers) > 0 {
clone.Clustering.Peers = make([]string, 0, len(o.Clustering.Peers))
clone.Clustering.Peers = append(clone.Clustering.Peers, o.Clustering.Peers...)
}
return &clone
} | go | func (o *Options) Clone() *Options {
// A simple copy covers pretty much everything
clone := *o
// But we have the problem of the PerChannel map that needs
// to be copied.
clone.PerChannel = (&o.StoreLimits).ClonePerChannelMap()
// Make a copy of the clustering peers
if len(o.Clustering.Peers) > 0 {
clone.Clustering.Peers = make([]string, 0, len(o.Clustering.Peers))
clone.Clustering.Peers = append(clone.Clustering.Peers, o.Clustering.Peers...)
}
return &clone
} | [
"func",
"(",
"o",
"*",
"Options",
")",
"Clone",
"(",
")",
"*",
"Options",
"{",
"// A simple copy covers pretty much everything",
"clone",
":=",
"*",
"o",
"\n",
"// But we have the problem of the PerChannel map that needs",
"// to be copied.",
"clone",
".",
"PerChannel",
"=",
"(",
"&",
"o",
".",
"StoreLimits",
")",
".",
"ClonePerChannelMap",
"(",
")",
"\n",
"// Make a copy of the clustering peers",
"if",
"len",
"(",
"o",
".",
"Clustering",
".",
"Peers",
")",
">",
"0",
"{",
"clone",
".",
"Clustering",
".",
"Peers",
"=",
"make",
"(",
"[",
"]",
"string",
",",
"0",
",",
"len",
"(",
"o",
".",
"Clustering",
".",
"Peers",
")",
")",
"\n",
"clone",
".",
"Clustering",
".",
"Peers",
"=",
"append",
"(",
"clone",
".",
"Clustering",
".",
"Peers",
",",
"o",
".",
"Clustering",
".",
"Peers",
"...",
")",
"\n",
"}",
"\n",
"return",
"&",
"clone",
"\n",
"}"
] | // Clone returns a deep copy of the Options object. | [
"Clone",
"returns",
"a",
"deep",
"copy",
"of",
"the",
"Options",
"object",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1188-L1200 | train |
nats-io/nats-streaming-server | server/server.go | GetDefaultOptions | func GetDefaultOptions() (o *Options) {
opts := defaultOptions
opts.StoreLimits = stores.DefaultStoreLimits
return &opts
} | go | func GetDefaultOptions() (o *Options) {
opts := defaultOptions
opts.StoreLimits = stores.DefaultStoreLimits
return &opts
} | [
"func",
"GetDefaultOptions",
"(",
")",
"(",
"o",
"*",
"Options",
")",
"{",
"opts",
":=",
"defaultOptions",
"\n",
"opts",
".",
"StoreLimits",
"=",
"stores",
".",
"DefaultStoreLimits",
"\n",
"return",
"&",
"opts",
"\n",
"}"
] | // GetDefaultOptions returns default options for the NATS Streaming Server | [
"GetDefaultOptions",
"returns",
"default",
"options",
"for",
"the",
"NATS",
"Streaming",
"Server"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1217-L1221 | train |
nats-io/nats-streaming-server | server/server.go | RunServer | func RunServer(ID string) (*StanServer, error) {
sOpts := GetDefaultOptions()
sOpts.ID = ID
nOpts := DefaultNatsServerOptions
return RunServerWithOpts(sOpts, &nOpts)
} | go | func RunServer(ID string) (*StanServer, error) {
sOpts := GetDefaultOptions()
sOpts.ID = ID
nOpts := DefaultNatsServerOptions
return RunServerWithOpts(sOpts, &nOpts)
} | [
"func",
"RunServer",
"(",
"ID",
"string",
")",
"(",
"*",
"StanServer",
",",
"error",
")",
"{",
"sOpts",
":=",
"GetDefaultOptions",
"(",
")",
"\n",
"sOpts",
".",
"ID",
"=",
"ID",
"\n",
"nOpts",
":=",
"DefaultNatsServerOptions",
"\n",
"return",
"RunServerWithOpts",
"(",
"sOpts",
",",
"&",
"nOpts",
")",
"\n",
"}"
] | // RunServer will startup an embedded NATS Streaming Server and a nats-server to support it. | [
"RunServer",
"will",
"startup",
"an",
"embedded",
"NATS",
"Streaming",
"Server",
"and",
"a",
"nats",
"-",
"server",
"to",
"support",
"it",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1411-L1416 | train |
nats-io/nats-streaming-server | server/server.go | startRaftNode | func (s *StanServer) startRaftNode(hasStreamingState bool) error {
if err := s.createServerRaftNode(hasStreamingState); err != nil {
return err
}
node := s.raft
leaderWait := make(chan struct{}, 1)
leaderReady := func() {
select {
case leaderWait <- struct{}{}:
default:
}
}
if node.State() != raft.Leader {
leaderReady()
}
s.wg.Add(1)
go func() {
defer s.wg.Done()
for {
select {
case isLeader := <-node.notifyCh:
if isLeader {
err := s.leadershipAcquired()
leaderReady()
if err != nil {
s.log.Errorf("Error on leadership acquired: %v", err)
switch {
case err == raft.ErrRaftShutdown:
// Node shutdown, just return.
return
case err == raft.ErrLeadershipLost:
case err == raft.ErrNotLeader:
// Node lost leadership, continue loop.
continue
default:
// TODO: probably step down as leader?
panic(err)
}
}
} else {
s.leadershipLost()
}
case <-s.shutdownCh:
// Signal channel here to handle edge case where we might
// otherwise block forever on the channel when shutdown.
leaderReady()
return
}
}
}()
<-leaderWait
return nil
} | go | func (s *StanServer) startRaftNode(hasStreamingState bool) error {
if err := s.createServerRaftNode(hasStreamingState); err != nil {
return err
}
node := s.raft
leaderWait := make(chan struct{}, 1)
leaderReady := func() {
select {
case leaderWait <- struct{}{}:
default:
}
}
if node.State() != raft.Leader {
leaderReady()
}
s.wg.Add(1)
go func() {
defer s.wg.Done()
for {
select {
case isLeader := <-node.notifyCh:
if isLeader {
err := s.leadershipAcquired()
leaderReady()
if err != nil {
s.log.Errorf("Error on leadership acquired: %v", err)
switch {
case err == raft.ErrRaftShutdown:
// Node shutdown, just return.
return
case err == raft.ErrLeadershipLost:
case err == raft.ErrNotLeader:
// Node lost leadership, continue loop.
continue
default:
// TODO: probably step down as leader?
panic(err)
}
}
} else {
s.leadershipLost()
}
case <-s.shutdownCh:
// Signal channel here to handle edge case where we might
// otherwise block forever on the channel when shutdown.
leaderReady()
return
}
}
}()
<-leaderWait
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"startRaftNode",
"(",
"hasStreamingState",
"bool",
")",
"error",
"{",
"if",
"err",
":=",
"s",
".",
"createServerRaftNode",
"(",
"hasStreamingState",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"node",
":=",
"s",
".",
"raft",
"\n\n",
"leaderWait",
":=",
"make",
"(",
"chan",
"struct",
"{",
"}",
",",
"1",
")",
"\n",
"leaderReady",
":=",
"func",
"(",
")",
"{",
"select",
"{",
"case",
"leaderWait",
"<-",
"struct",
"{",
"}",
"{",
"}",
":",
"default",
":",
"}",
"\n",
"}",
"\n",
"if",
"node",
".",
"State",
"(",
")",
"!=",
"raft",
".",
"Leader",
"{",
"leaderReady",
"(",
")",
"\n",
"}",
"\n\n",
"s",
".",
"wg",
".",
"Add",
"(",
"1",
")",
"\n",
"go",
"func",
"(",
")",
"{",
"defer",
"s",
".",
"wg",
".",
"Done",
"(",
")",
"\n",
"for",
"{",
"select",
"{",
"case",
"isLeader",
":=",
"<-",
"node",
".",
"notifyCh",
":",
"if",
"isLeader",
"{",
"err",
":=",
"s",
".",
"leadershipAcquired",
"(",
")",
"\n",
"leaderReady",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"switch",
"{",
"case",
"err",
"==",
"raft",
".",
"ErrRaftShutdown",
":",
"// Node shutdown, just return.",
"return",
"\n",
"case",
"err",
"==",
"raft",
".",
"ErrLeadershipLost",
":",
"case",
"err",
"==",
"raft",
".",
"ErrNotLeader",
":",
"// Node lost leadership, continue loop.",
"continue",
"\n",
"default",
":",
"// TODO: probably step down as leader?",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"else",
"{",
"s",
".",
"leadershipLost",
"(",
")",
"\n",
"}",
"\n",
"case",
"<-",
"s",
".",
"shutdownCh",
":",
"// Signal channel here to handle edge case where we might",
"// otherwise block forever on the channel when shutdown.",
"leaderReady",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n\n",
"<-",
"leaderWait",
"\n",
"return",
"nil",
"\n",
"}"
] | // startRaftNode creates and starts the Raft group.
// This should only be called if the server is running in clustered mode. | [
"startRaftNode",
"creates",
"and",
"starts",
"the",
"Raft",
"group",
".",
"This",
"should",
"only",
"be",
"called",
"if",
"the",
"server",
"is",
"running",
"in",
"clustered",
"mode",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1878-L1933 | train |
nats-io/nats-streaming-server | server/server.go | leadershipAcquired | func (s *StanServer) leadershipAcquired() error {
s.log.Noticef("server became leader, performing leader promotion actions")
defer s.log.Noticef("finished leader promotion actions")
// If we were not the leader, there should be nothing in the ioChannel
// (processing of client publishes). However, since a node could go
// from leader to follower to leader again, let's make sure that we
// synchronize with the ioLoop before we touch the channels' nextSequence.
sc, sdc := s.sendSynchronziationRequest()
// Wait for the ioLoop to reach that special iopm and notifies us (or
// give up if server is shutting down).
select {
case <-sc:
case <-s.ioChannelQuit:
close(sdc)
return nil
}
// Then, we will notify it back to unlock it when were are done here.
defer close(sdc)
// Start listening to snapshot restore requests here...
if err := s.subToSnapshotRestoreRequests(); err != nil {
return err
}
// Use a barrier to ensure all preceding operations are applied to the FSM
if err := s.raft.Barrier(0).Error(); err != nil {
return err
}
channels := s.channels.getAll()
for _, c := range channels {
// Update next sequence to assign.
lastSequence, err := c.store.Msgs.LastSequence()
if err != nil {
return err
}
// It is possible that nextSequence be set when restoring
// from snapshots. Set it to the max value.
if c.nextSequence <= lastSequence {
c.nextSequence = lastSequence + 1
}
}
// Setup client heartbeats and subscribe to acks for each sub.
for _, client := range s.clients.getClients() {
client.RLock()
cID := client.info.ID
for _, sub := range client.subs {
if err := sub.startAckSub(s.nca, s.processAckMsg); err != nil {
client.RUnlock()
return err
}
}
client.RUnlock()
s.clients.setClientHB(cID, s.opts.ClientHBInterval, func() {
s.checkClientHealth(cID)
})
}
// Start the internal subscriptions so we receive protocols from clients.
if err := s.initInternalSubs(true); err != nil {
return err
}
var allSubs []*subState
for _, c := range channels {
subs := c.ss.getAllSubs()
if len(subs) > 0 {
allSubs = append(allSubs, subs...)
}
if c.activity != nil {
s.channels.maybeStartChannelDeleteTimer(c.name, c)
}
}
if len(allSubs) > 0 {
s.startGoRoutine(func() {
s.performRedeliveryOnStartup(allSubs)
s.wg.Done()
})
}
if err := s.nc.Flush(); err != nil {
return err
}
if err := s.nca.Flush(); err != nil {
return err
}
atomic.StoreInt64(&s.raft.leader, 1)
return nil
} | go | func (s *StanServer) leadershipAcquired() error {
s.log.Noticef("server became leader, performing leader promotion actions")
defer s.log.Noticef("finished leader promotion actions")
// If we were not the leader, there should be nothing in the ioChannel
// (processing of client publishes). However, since a node could go
// from leader to follower to leader again, let's make sure that we
// synchronize with the ioLoop before we touch the channels' nextSequence.
sc, sdc := s.sendSynchronziationRequest()
// Wait for the ioLoop to reach that special iopm and notifies us (or
// give up if server is shutting down).
select {
case <-sc:
case <-s.ioChannelQuit:
close(sdc)
return nil
}
// Then, we will notify it back to unlock it when were are done here.
defer close(sdc)
// Start listening to snapshot restore requests here...
if err := s.subToSnapshotRestoreRequests(); err != nil {
return err
}
// Use a barrier to ensure all preceding operations are applied to the FSM
if err := s.raft.Barrier(0).Error(); err != nil {
return err
}
channels := s.channels.getAll()
for _, c := range channels {
// Update next sequence to assign.
lastSequence, err := c.store.Msgs.LastSequence()
if err != nil {
return err
}
// It is possible that nextSequence be set when restoring
// from snapshots. Set it to the max value.
if c.nextSequence <= lastSequence {
c.nextSequence = lastSequence + 1
}
}
// Setup client heartbeats and subscribe to acks for each sub.
for _, client := range s.clients.getClients() {
client.RLock()
cID := client.info.ID
for _, sub := range client.subs {
if err := sub.startAckSub(s.nca, s.processAckMsg); err != nil {
client.RUnlock()
return err
}
}
client.RUnlock()
s.clients.setClientHB(cID, s.opts.ClientHBInterval, func() {
s.checkClientHealth(cID)
})
}
// Start the internal subscriptions so we receive protocols from clients.
if err := s.initInternalSubs(true); err != nil {
return err
}
var allSubs []*subState
for _, c := range channels {
subs := c.ss.getAllSubs()
if len(subs) > 0 {
allSubs = append(allSubs, subs...)
}
if c.activity != nil {
s.channels.maybeStartChannelDeleteTimer(c.name, c)
}
}
if len(allSubs) > 0 {
s.startGoRoutine(func() {
s.performRedeliveryOnStartup(allSubs)
s.wg.Done()
})
}
if err := s.nc.Flush(); err != nil {
return err
}
if err := s.nca.Flush(); err != nil {
return err
}
atomic.StoreInt64(&s.raft.leader, 1)
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"leadershipAcquired",
"(",
")",
"error",
"{",
"s",
".",
"log",
".",
"Noticef",
"(",
"\"",
"\"",
")",
"\n",
"defer",
"s",
".",
"log",
".",
"Noticef",
"(",
"\"",
"\"",
")",
"\n\n",
"// If we were not the leader, there should be nothing in the ioChannel",
"// (processing of client publishes). However, since a node could go",
"// from leader to follower to leader again, let's make sure that we",
"// synchronize with the ioLoop before we touch the channels' nextSequence.",
"sc",
",",
"sdc",
":=",
"s",
".",
"sendSynchronziationRequest",
"(",
")",
"\n\n",
"// Wait for the ioLoop to reach that special iopm and notifies us (or",
"// give up if server is shutting down).",
"select",
"{",
"case",
"<-",
"sc",
":",
"case",
"<-",
"s",
".",
"ioChannelQuit",
":",
"close",
"(",
"sdc",
")",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"// Then, we will notify it back to unlock it when were are done here.",
"defer",
"close",
"(",
"sdc",
")",
"\n\n",
"// Start listening to snapshot restore requests here...",
"if",
"err",
":=",
"s",
".",
"subToSnapshotRestoreRequests",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"// Use a barrier to ensure all preceding operations are applied to the FSM",
"if",
"err",
":=",
"s",
".",
"raft",
".",
"Barrier",
"(",
"0",
")",
".",
"Error",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"channels",
":=",
"s",
".",
"channels",
".",
"getAll",
"(",
")",
"\n",
"for",
"_",
",",
"c",
":=",
"range",
"channels",
"{",
"// Update next sequence to assign.",
"lastSequence",
",",
"err",
":=",
"c",
".",
"store",
".",
"Msgs",
".",
"LastSequence",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"// It is possible that nextSequence be set when restoring",
"// from snapshots. Set it to the max value.",
"if",
"c",
".",
"nextSequence",
"<=",
"lastSequence",
"{",
"c",
".",
"nextSequence",
"=",
"lastSequence",
"+",
"1",
"\n",
"}",
"\n",
"}",
"\n\n",
"// Setup client heartbeats and subscribe to acks for each sub.",
"for",
"_",
",",
"client",
":=",
"range",
"s",
".",
"clients",
".",
"getClients",
"(",
")",
"{",
"client",
".",
"RLock",
"(",
")",
"\n",
"cID",
":=",
"client",
".",
"info",
".",
"ID",
"\n",
"for",
"_",
",",
"sub",
":=",
"range",
"client",
".",
"subs",
"{",
"if",
"err",
":=",
"sub",
".",
"startAckSub",
"(",
"s",
".",
"nca",
",",
"s",
".",
"processAckMsg",
")",
";",
"err",
"!=",
"nil",
"{",
"client",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"client",
".",
"RUnlock",
"(",
")",
"\n",
"s",
".",
"clients",
".",
"setClientHB",
"(",
"cID",
",",
"s",
".",
"opts",
".",
"ClientHBInterval",
",",
"func",
"(",
")",
"{",
"s",
".",
"checkClientHealth",
"(",
"cID",
")",
"\n",
"}",
")",
"\n",
"}",
"\n\n",
"// Start the internal subscriptions so we receive protocols from clients.",
"if",
"err",
":=",
"s",
".",
"initInternalSubs",
"(",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"var",
"allSubs",
"[",
"]",
"*",
"subState",
"\n",
"for",
"_",
",",
"c",
":=",
"range",
"channels",
"{",
"subs",
":=",
"c",
".",
"ss",
".",
"getAllSubs",
"(",
")",
"\n",
"if",
"len",
"(",
"subs",
")",
">",
"0",
"{",
"allSubs",
"=",
"append",
"(",
"allSubs",
",",
"subs",
"...",
")",
"\n",
"}",
"\n",
"if",
"c",
".",
"activity",
"!=",
"nil",
"{",
"s",
".",
"channels",
".",
"maybeStartChannelDeleteTimer",
"(",
"c",
".",
"name",
",",
"c",
")",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"len",
"(",
"allSubs",
")",
">",
"0",
"{",
"s",
".",
"startGoRoutine",
"(",
"func",
"(",
")",
"{",
"s",
".",
"performRedeliveryOnStartup",
"(",
"allSubs",
")",
"\n",
"s",
".",
"wg",
".",
"Done",
"(",
")",
"\n",
"}",
")",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"s",
".",
"nc",
".",
"Flush",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"err",
":=",
"s",
".",
"nca",
".",
"Flush",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n\n",
"atomic",
".",
"StoreInt64",
"(",
"&",
"s",
".",
"raft",
".",
"leader",
",",
"1",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // leadershipAcquired should be called when this node is elected leader.
// This should only be called when the server is running in clustered mode. | [
"leadershipAcquired",
"should",
"be",
"called",
"when",
"this",
"node",
"is",
"elected",
"leader",
".",
"This",
"should",
"only",
"be",
"called",
"when",
"the",
"server",
"is",
"running",
"in",
"clustered",
"mode",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1945-L2037 | train |
nats-io/nats-streaming-server | server/server.go | leadershipLost | func (s *StanServer) leadershipLost() {
s.log.Noticef("server lost leadership, performing leader stepdown actions")
defer s.log.Noticef("finished leader stepdown actions")
// Cancel outstanding client heartbeats. We aren't concerned about races
// where new clients might be connecting because at this point, the server
// will no longer accept new client connections, but even if it did, the
// heartbeat would be automatically removed when it fires.
for _, client := range s.clients.getClients() {
s.clients.removeClientHB(client)
// Ensure subs ackTimer is stopped
subs := client.getSubsCopy()
for _, sub := range subs {
sub.Lock()
sub.stopAckSub()
sub.clearAckTimer()
s.clearSentAndAck(sub)
sub.Unlock()
}
}
// Unsubscribe to the snapshot request per channel since we are no longer
// leader.
for _, c := range s.channels.getAll() {
if c.activity != nil {
s.channels.stopDeleteTimer(c)
}
}
// Only the leader will receive protocols from clients
s.unsubscribeInternalSubs()
atomic.StoreInt64(&s.raft.leader, 0)
} | go | func (s *StanServer) leadershipLost() {
s.log.Noticef("server lost leadership, performing leader stepdown actions")
defer s.log.Noticef("finished leader stepdown actions")
// Cancel outstanding client heartbeats. We aren't concerned about races
// where new clients might be connecting because at this point, the server
// will no longer accept new client connections, but even if it did, the
// heartbeat would be automatically removed when it fires.
for _, client := range s.clients.getClients() {
s.clients.removeClientHB(client)
// Ensure subs ackTimer is stopped
subs := client.getSubsCopy()
for _, sub := range subs {
sub.Lock()
sub.stopAckSub()
sub.clearAckTimer()
s.clearSentAndAck(sub)
sub.Unlock()
}
}
// Unsubscribe to the snapshot request per channel since we are no longer
// leader.
for _, c := range s.channels.getAll() {
if c.activity != nil {
s.channels.stopDeleteTimer(c)
}
}
// Only the leader will receive protocols from clients
s.unsubscribeInternalSubs()
atomic.StoreInt64(&s.raft.leader, 0)
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"leadershipLost",
"(",
")",
"{",
"s",
".",
"log",
".",
"Noticef",
"(",
"\"",
"\"",
")",
"\n",
"defer",
"s",
".",
"log",
".",
"Noticef",
"(",
"\"",
"\"",
")",
"\n\n",
"// Cancel outstanding client heartbeats. We aren't concerned about races",
"// where new clients might be connecting because at this point, the server",
"// will no longer accept new client connections, but even if it did, the",
"// heartbeat would be automatically removed when it fires.",
"for",
"_",
",",
"client",
":=",
"range",
"s",
".",
"clients",
".",
"getClients",
"(",
")",
"{",
"s",
".",
"clients",
".",
"removeClientHB",
"(",
"client",
")",
"\n",
"// Ensure subs ackTimer is stopped",
"subs",
":=",
"client",
".",
"getSubsCopy",
"(",
")",
"\n",
"for",
"_",
",",
"sub",
":=",
"range",
"subs",
"{",
"sub",
".",
"Lock",
"(",
")",
"\n",
"sub",
".",
"stopAckSub",
"(",
")",
"\n",
"sub",
".",
"clearAckTimer",
"(",
")",
"\n",
"s",
".",
"clearSentAndAck",
"(",
"sub",
")",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"// Unsubscribe to the snapshot request per channel since we are no longer",
"// leader.",
"for",
"_",
",",
"c",
":=",
"range",
"s",
".",
"channels",
".",
"getAll",
"(",
")",
"{",
"if",
"c",
".",
"activity",
"!=",
"nil",
"{",
"s",
".",
"channels",
".",
"stopDeleteTimer",
"(",
"c",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"// Only the leader will receive protocols from clients",
"s",
".",
"unsubscribeInternalSubs",
"(",
")",
"\n\n",
"atomic",
".",
"StoreInt64",
"(",
"&",
"s",
".",
"raft",
".",
"leader",
",",
"0",
")",
"\n",
"}"
] | // leadershipLost should be called when this node loses leadership.
// This should only be called when the server is running in clustered mode. | [
"leadershipLost",
"should",
"be",
"called",
"when",
"this",
"node",
"loses",
"leadership",
".",
"This",
"should",
"only",
"be",
"called",
"when",
"the",
"server",
"is",
"running",
"in",
"clustered",
"mode",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2041-L2074 | train |
nats-io/nats-streaming-server | server/server.go | ensureRunningStandAlone | func (s *StanServer) ensureRunningStandAlone() error {
clusterID := s.info.ClusterID
hbInbox := nats.NewInbox()
timeout := time.Millisecond * 250
// We cannot use the client's API here as it will create a dependency
// cycle in the streaming client, so build our request and see if we
// get a response.
req := &pb.ConnectRequest{ClientID: clusterID, HeartbeatInbox: hbInbox}
b, _ := req.Marshal()
reply, err := s.nc.Request(s.info.Discovery, b, timeout)
if err == nats.ErrTimeout {
s.log.Debugf("Did not detect another server instance")
return nil
}
if err != nil {
return fmt.Errorf("request error detecting another server instance: %v", err)
}
// See if the response is valid and can be unmarshalled.
cr := &pb.ConnectResponse{}
err = cr.Unmarshal(reply.Data)
if err != nil {
// Something other than a compatible streaming server responded.
// This may cause other problems in the long run, so better fail
// the startup early.
return fmt.Errorf("unmarshall error while detecting another server instance: %v", err)
}
// Another streaming server was found, cleanup then return error.
clreq := &pb.CloseRequest{ClientID: clusterID}
b, _ = clreq.Marshal()
s.nc.Request(cr.CloseRequests, b, timeout)
return fmt.Errorf("discovered another streaming server with cluster ID %q", clusterID)
} | go | func (s *StanServer) ensureRunningStandAlone() error {
clusterID := s.info.ClusterID
hbInbox := nats.NewInbox()
timeout := time.Millisecond * 250
// We cannot use the client's API here as it will create a dependency
// cycle in the streaming client, so build our request and see if we
// get a response.
req := &pb.ConnectRequest{ClientID: clusterID, HeartbeatInbox: hbInbox}
b, _ := req.Marshal()
reply, err := s.nc.Request(s.info.Discovery, b, timeout)
if err == nats.ErrTimeout {
s.log.Debugf("Did not detect another server instance")
return nil
}
if err != nil {
return fmt.Errorf("request error detecting another server instance: %v", err)
}
// See if the response is valid and can be unmarshalled.
cr := &pb.ConnectResponse{}
err = cr.Unmarshal(reply.Data)
if err != nil {
// Something other than a compatible streaming server responded.
// This may cause other problems in the long run, so better fail
// the startup early.
return fmt.Errorf("unmarshall error while detecting another server instance: %v", err)
}
// Another streaming server was found, cleanup then return error.
clreq := &pb.CloseRequest{ClientID: clusterID}
b, _ = clreq.Marshal()
s.nc.Request(cr.CloseRequests, b, timeout)
return fmt.Errorf("discovered another streaming server with cluster ID %q", clusterID)
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"ensureRunningStandAlone",
"(",
")",
"error",
"{",
"clusterID",
":=",
"s",
".",
"info",
".",
"ClusterID",
"\n",
"hbInbox",
":=",
"nats",
".",
"NewInbox",
"(",
")",
"\n",
"timeout",
":=",
"time",
".",
"Millisecond",
"*",
"250",
"\n\n",
"// We cannot use the client's API here as it will create a dependency",
"// cycle in the streaming client, so build our request and see if we",
"// get a response.",
"req",
":=",
"&",
"pb",
".",
"ConnectRequest",
"{",
"ClientID",
":",
"clusterID",
",",
"HeartbeatInbox",
":",
"hbInbox",
"}",
"\n",
"b",
",",
"_",
":=",
"req",
".",
"Marshal",
"(",
")",
"\n",
"reply",
",",
"err",
":=",
"s",
".",
"nc",
".",
"Request",
"(",
"s",
".",
"info",
".",
"Discovery",
",",
"b",
",",
"timeout",
")",
"\n",
"if",
"err",
"==",
"nats",
".",
"ErrTimeout",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
")",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"// See if the response is valid and can be unmarshalled.",
"cr",
":=",
"&",
"pb",
".",
"ConnectResponse",
"{",
"}",
"\n",
"err",
"=",
"cr",
".",
"Unmarshal",
"(",
"reply",
".",
"Data",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"// Something other than a compatible streaming server responded.",
"// This may cause other problems in the long run, so better fail",
"// the startup early.",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"// Another streaming server was found, cleanup then return error.",
"clreq",
":=",
"&",
"pb",
".",
"CloseRequest",
"{",
"ClientID",
":",
"clusterID",
"}",
"\n",
"b",
",",
"_",
"=",
"clreq",
".",
"Marshal",
"(",
")",
"\n",
"s",
".",
"nc",
".",
"Request",
"(",
"cr",
".",
"CloseRequests",
",",
"b",
",",
"timeout",
")",
"\n",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"clusterID",
")",
"\n",
"}"
] | // ensureRunningStandAlone prevents this streaming server from starting
// if another is found using the same cluster ID - a possibility when
// routing is enabled.
// This runs under sever's lock so nothing should grab the server lock here. | [
"ensureRunningStandAlone",
"prevents",
"this",
"streaming",
"server",
"from",
"starting",
"if",
"another",
"is",
"found",
"using",
"the",
"same",
"cluster",
"ID",
"-",
"a",
"possibility",
"when",
"routing",
"is",
"enabled",
".",
"This",
"runs",
"under",
"sever",
"s",
"lock",
"so",
"nothing",
"should",
"grab",
"the",
"server",
"lock",
"here",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2200-L2232 | train |
nats-io/nats-streaming-server | server/server.go | processRecoveredClients | func (s *StanServer) processRecoveredClients(clients []*stores.Client) {
if !s.isClustered {
s.clients.recoverClients(clients)
}
} | go | func (s *StanServer) processRecoveredClients(clients []*stores.Client) {
if !s.isClustered {
s.clients.recoverClients(clients)
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processRecoveredClients",
"(",
"clients",
"[",
"]",
"*",
"stores",
".",
"Client",
")",
"{",
"if",
"!",
"s",
".",
"isClustered",
"{",
"s",
".",
"clients",
".",
"recoverClients",
"(",
"clients",
")",
"\n",
"}",
"\n",
"}"
] | // Binds server's view of a client with stored Client objects. | [
"Binds",
"server",
"s",
"view",
"of",
"a",
"client",
"with",
"stored",
"Client",
"objects",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2235-L2239 | train |
nats-io/nats-streaming-server | server/server.go | processRecoveredChannels | func (s *StanServer) processRecoveredChannels(channels map[string]*stores.RecoveredChannel) ([]*subState, error) {
allSubs := make([]*subState, 0, 16)
for channelName, recoveredChannel := range channels {
channel, err := s.channels.create(s, channelName, recoveredChannel.Channel)
if err != nil {
return nil, err
}
if !s.isClustered {
// Get the recovered subscriptions for this channel.
for _, recSub := range recoveredChannel.Subscriptions {
sub := s.recoverOneSub(channel, recSub.Sub, recSub.Pending, nil)
if sub != nil {
// Subscribe to subscription ACKs
if err := sub.startAckSub(s.nca, s.processAckMsg); err != nil {
return nil, err
}
allSubs = append(allSubs, sub)
}
}
// Now that we have recovered possible subscriptions for this channel,
// check if we should start the delete timer.
if channel.activity != nil {
s.channels.maybeStartChannelDeleteTimer(channelName, channel)
}
}
}
return allSubs, nil
} | go | func (s *StanServer) processRecoveredChannels(channels map[string]*stores.RecoveredChannel) ([]*subState, error) {
allSubs := make([]*subState, 0, 16)
for channelName, recoveredChannel := range channels {
channel, err := s.channels.create(s, channelName, recoveredChannel.Channel)
if err != nil {
return nil, err
}
if !s.isClustered {
// Get the recovered subscriptions for this channel.
for _, recSub := range recoveredChannel.Subscriptions {
sub := s.recoverOneSub(channel, recSub.Sub, recSub.Pending, nil)
if sub != nil {
// Subscribe to subscription ACKs
if err := sub.startAckSub(s.nca, s.processAckMsg); err != nil {
return nil, err
}
allSubs = append(allSubs, sub)
}
}
// Now that we have recovered possible subscriptions for this channel,
// check if we should start the delete timer.
if channel.activity != nil {
s.channels.maybeStartChannelDeleteTimer(channelName, channel)
}
}
}
return allSubs, nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processRecoveredChannels",
"(",
"channels",
"map",
"[",
"string",
"]",
"*",
"stores",
".",
"RecoveredChannel",
")",
"(",
"[",
"]",
"*",
"subState",
",",
"error",
")",
"{",
"allSubs",
":=",
"make",
"(",
"[",
"]",
"*",
"subState",
",",
"0",
",",
"16",
")",
"\n\n",
"for",
"channelName",
",",
"recoveredChannel",
":=",
"range",
"channels",
"{",
"channel",
",",
"err",
":=",
"s",
".",
"channels",
".",
"create",
"(",
"s",
",",
"channelName",
",",
"recoveredChannel",
".",
"Channel",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"if",
"!",
"s",
".",
"isClustered",
"{",
"// Get the recovered subscriptions for this channel.",
"for",
"_",
",",
"recSub",
":=",
"range",
"recoveredChannel",
".",
"Subscriptions",
"{",
"sub",
":=",
"s",
".",
"recoverOneSub",
"(",
"channel",
",",
"recSub",
".",
"Sub",
",",
"recSub",
".",
"Pending",
",",
"nil",
")",
"\n",
"if",
"sub",
"!=",
"nil",
"{",
"// Subscribe to subscription ACKs",
"if",
"err",
":=",
"sub",
".",
"startAckSub",
"(",
"s",
".",
"nca",
",",
"s",
".",
"processAckMsg",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"allSubs",
"=",
"append",
"(",
"allSubs",
",",
"sub",
")",
"\n",
"}",
"\n",
"}",
"\n",
"// Now that we have recovered possible subscriptions for this channel,",
"// check if we should start the delete timer.",
"if",
"channel",
".",
"activity",
"!=",
"nil",
"{",
"s",
".",
"channels",
".",
"maybeStartChannelDeleteTimer",
"(",
"channelName",
",",
"channel",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"allSubs",
",",
"nil",
"\n",
"}"
] | // Reconstruct the subscription state on restart. | [
"Reconstruct",
"the",
"subscription",
"state",
"on",
"restart",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2242-L2270 | train |
nats-io/nats-streaming-server | server/server.go | performRedeliveryOnStartup | func (s *StanServer) performRedeliveryOnStartup(recoveredSubs []*subState) {
queues := make(map[*queueState]*channel)
for _, sub := range recoveredSubs {
// Ignore subs that did not have any ack pendings on startup.
sub.Lock()
// Consider this subscription ready to receive messages
sub.initialized = true
// If this is a durable and it is offline, then skip the rest.
if sub.isOfflineDurableSubscriber() {
sub.newOnHold = false
sub.Unlock()
continue
}
// Unlock in order to call function below
sub.Unlock()
// Send old messages (lock is acquired in that function)
s.performAckExpirationRedelivery(sub, true)
// Regrab lock
sub.Lock()
// Allow new messages to be delivered
sub.newOnHold = false
subject := sub.subject
qs := sub.qstate
sub.Unlock()
c := s.channels.get(subject)
if c == nil {
continue
}
// Kick delivery of (possible) new messages
if qs != nil {
queues[qs] = c
} else {
s.sendAvailableMessages(c, sub)
}
}
// Kick delivery for queues that had members with newOnHold
for qs, c := range queues {
qs.Lock()
qs.newOnHold = false
qs.Unlock()
s.sendAvailableMessagesToQueue(c, qs)
}
} | go | func (s *StanServer) performRedeliveryOnStartup(recoveredSubs []*subState) {
queues := make(map[*queueState]*channel)
for _, sub := range recoveredSubs {
// Ignore subs that did not have any ack pendings on startup.
sub.Lock()
// Consider this subscription ready to receive messages
sub.initialized = true
// If this is a durable and it is offline, then skip the rest.
if sub.isOfflineDurableSubscriber() {
sub.newOnHold = false
sub.Unlock()
continue
}
// Unlock in order to call function below
sub.Unlock()
// Send old messages (lock is acquired in that function)
s.performAckExpirationRedelivery(sub, true)
// Regrab lock
sub.Lock()
// Allow new messages to be delivered
sub.newOnHold = false
subject := sub.subject
qs := sub.qstate
sub.Unlock()
c := s.channels.get(subject)
if c == nil {
continue
}
// Kick delivery of (possible) new messages
if qs != nil {
queues[qs] = c
} else {
s.sendAvailableMessages(c, sub)
}
}
// Kick delivery for queues that had members with newOnHold
for qs, c := range queues {
qs.Lock()
qs.newOnHold = false
qs.Unlock()
s.sendAvailableMessagesToQueue(c, qs)
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"performRedeliveryOnStartup",
"(",
"recoveredSubs",
"[",
"]",
"*",
"subState",
")",
"{",
"queues",
":=",
"make",
"(",
"map",
"[",
"*",
"queueState",
"]",
"*",
"channel",
")",
"\n\n",
"for",
"_",
",",
"sub",
":=",
"range",
"recoveredSubs",
"{",
"// Ignore subs that did not have any ack pendings on startup.",
"sub",
".",
"Lock",
"(",
")",
"\n",
"// Consider this subscription ready to receive messages",
"sub",
".",
"initialized",
"=",
"true",
"\n",
"// If this is a durable and it is offline, then skip the rest.",
"if",
"sub",
".",
"isOfflineDurableSubscriber",
"(",
")",
"{",
"sub",
".",
"newOnHold",
"=",
"false",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"continue",
"\n",
"}",
"\n",
"// Unlock in order to call function below",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"// Send old messages (lock is acquired in that function)",
"s",
".",
"performAckExpirationRedelivery",
"(",
"sub",
",",
"true",
")",
"\n",
"// Regrab lock",
"sub",
".",
"Lock",
"(",
")",
"\n",
"// Allow new messages to be delivered",
"sub",
".",
"newOnHold",
"=",
"false",
"\n",
"subject",
":=",
"sub",
".",
"subject",
"\n",
"qs",
":=",
"sub",
".",
"qstate",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"c",
":=",
"s",
".",
"channels",
".",
"get",
"(",
"subject",
")",
"\n",
"if",
"c",
"==",
"nil",
"{",
"continue",
"\n",
"}",
"\n",
"// Kick delivery of (possible) new messages",
"if",
"qs",
"!=",
"nil",
"{",
"queues",
"[",
"qs",
"]",
"=",
"c",
"\n",
"}",
"else",
"{",
"s",
".",
"sendAvailableMessages",
"(",
"c",
",",
"sub",
")",
"\n",
"}",
"\n",
"}",
"\n",
"// Kick delivery for queues that had members with newOnHold",
"for",
"qs",
",",
"c",
":=",
"range",
"queues",
"{",
"qs",
".",
"Lock",
"(",
")",
"\n",
"qs",
".",
"newOnHold",
"=",
"false",
"\n",
"qs",
".",
"Unlock",
"(",
")",
"\n",
"s",
".",
"sendAvailableMessagesToQueue",
"(",
"c",
",",
"qs",
")",
"\n",
"}",
"\n",
"}"
] | // Redelivers unacknowledged messages, releases the hold for new messages delivery,
// and kicks delivery of available messages. | [
"Redelivers",
"unacknowledged",
"messages",
"releases",
"the",
"hold",
"for",
"new",
"messages",
"delivery",
"and",
"kicks",
"delivery",
"of",
"available",
"messages",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2389-L2432 | train |
nats-io/nats-streaming-server | server/server.go | initSubscriptions | func (s *StanServer) initSubscriptions() error {
// Do not create internal subscriptions in clustered mode,
// the leader will when it gets elected.
if !s.isClustered {
createSubOnClientPublish := true
if s.partitions != nil {
// Receive published messages from clients, but only on the list
// of static channels.
if err := s.partitions.initSubscriptions(); err != nil {
return err
}
// Since we create a subscription per channel, do not create
// the internal subscription on the > wildcard
createSubOnClientPublish = false
}
if err := s.initInternalSubs(createSubOnClientPublish); err != nil {
return err
}
}
s.log.Debugf("Discover subject: %s", s.info.Discovery)
// For partitions, we actually print the list of channels
// in the startup banner, so we don't need to repeat them here.
if s.partitions != nil {
s.log.Debugf("Publish subjects root: %s", s.info.Publish)
} else {
s.log.Debugf("Publish subject: %s.>", s.info.Publish)
}
s.log.Debugf("Subscribe subject: %s", s.info.Subscribe)
s.log.Debugf("Subscription Close subject: %s", s.info.SubClose)
s.log.Debugf("Unsubscribe subject: %s", s.info.Unsubscribe)
s.log.Debugf("Close subject: %s", s.info.Close)
return nil
} | go | func (s *StanServer) initSubscriptions() error {
// Do not create internal subscriptions in clustered mode,
// the leader will when it gets elected.
if !s.isClustered {
createSubOnClientPublish := true
if s.partitions != nil {
// Receive published messages from clients, but only on the list
// of static channels.
if err := s.partitions.initSubscriptions(); err != nil {
return err
}
// Since we create a subscription per channel, do not create
// the internal subscription on the > wildcard
createSubOnClientPublish = false
}
if err := s.initInternalSubs(createSubOnClientPublish); err != nil {
return err
}
}
s.log.Debugf("Discover subject: %s", s.info.Discovery)
// For partitions, we actually print the list of channels
// in the startup banner, so we don't need to repeat them here.
if s.partitions != nil {
s.log.Debugf("Publish subjects root: %s", s.info.Publish)
} else {
s.log.Debugf("Publish subject: %s.>", s.info.Publish)
}
s.log.Debugf("Subscribe subject: %s", s.info.Subscribe)
s.log.Debugf("Subscription Close subject: %s", s.info.SubClose)
s.log.Debugf("Unsubscribe subject: %s", s.info.Unsubscribe)
s.log.Debugf("Close subject: %s", s.info.Close)
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"initSubscriptions",
"(",
")",
"error",
"{",
"// Do not create internal subscriptions in clustered mode,",
"// the leader will when it gets elected.",
"if",
"!",
"s",
".",
"isClustered",
"{",
"createSubOnClientPublish",
":=",
"true",
"\n\n",
"if",
"s",
".",
"partitions",
"!=",
"nil",
"{",
"// Receive published messages from clients, but only on the list",
"// of static channels.",
"if",
"err",
":=",
"s",
".",
"partitions",
".",
"initSubscriptions",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"// Since we create a subscription per channel, do not create",
"// the internal subscription on the > wildcard",
"createSubOnClientPublish",
"=",
"false",
"\n",
"}",
"\n\n",
"if",
"err",
":=",
"s",
".",
"initInternalSubs",
"(",
"createSubOnClientPublish",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"s",
".",
"info",
".",
"Discovery",
")",
"\n",
"// For partitions, we actually print the list of channels",
"// in the startup banner, so we don't need to repeat them here.",
"if",
"s",
".",
"partitions",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"s",
".",
"info",
".",
"Publish",
")",
"\n",
"}",
"else",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"s",
".",
"info",
".",
"Publish",
")",
"\n",
"}",
"\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"s",
".",
"info",
".",
"Subscribe",
")",
"\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"s",
".",
"info",
".",
"SubClose",
")",
"\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"s",
".",
"info",
".",
"Unsubscribe",
")",
"\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"s",
".",
"info",
".",
"Close",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // initSubscriptions will setup initial subscriptions for discovery etc. | [
"initSubscriptions",
"will",
"setup",
"initial",
"subscriptions",
"for",
"discovery",
"etc",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2435-L2471 | train |
nats-io/nats-streaming-server | server/server.go | connectCB | func (s *StanServer) connectCB(m *nats.Msg) {
req := &pb.ConnectRequest{}
err := req.Unmarshal(m.Data)
if err != nil || req.HeartbeatInbox == "" {
s.log.Errorf("[Client:?] Invalid conn request: ClientID=%s, Inbox=%s, err=%v",
req.ClientID, req.HeartbeatInbox, err)
s.sendConnectErr(m.Reply, ErrInvalidConnReq.Error())
return
}
if !clientIDRegEx.MatchString(req.ClientID) {
s.log.Errorf("[Client:%s] Invalid ClientID, only alphanumeric and `-` or `_` characters allowed", req.ClientID)
s.sendConnectErr(m.Reply, ErrInvalidClientID.Error())
return
}
// If the client ID is already registered, check to see if it's the case
// that the client refreshed (e.g. it crashed and came back) or if the
// connection is a duplicate. If it refreshed, we will close the old
// client and open a new one.
client := s.clients.lookup(req.ClientID)
if client != nil {
// When detecting a duplicate, the processing of the connect request
// is going to be processed in a go-routine. We need however to keep
// track and fail another request on the same client ID until the
// current one has finished.
s.cliDupCIDsMu.Lock()
if _, exists := s.cliDipCIDsMap[req.ClientID]; exists {
s.cliDupCIDsMu.Unlock()
s.log.Debugf("[Client:%s] Connect failed; already connected", req.ClientID)
s.sendConnectErr(m.Reply, ErrInvalidClient.Error())
return
}
s.cliDipCIDsMap[req.ClientID] = struct{}{}
s.cliDupCIDsMu.Unlock()
s.startGoRoutine(func() {
defer s.wg.Done()
isDup := false
if s.isDuplicateConnect(client) {
s.log.Debugf("[Client:%s] Connect failed; already connected", req.ClientID)
s.sendConnectErr(m.Reply, ErrInvalidClient.Error())
isDup = true
}
s.cliDupCIDsMu.Lock()
if !isDup {
s.handleConnect(req, m, true)
}
delete(s.cliDipCIDsMap, req.ClientID)
s.cliDupCIDsMu.Unlock()
})
return
}
s.cliDupCIDsMu.Lock()
s.handleConnect(req, m, false)
s.cliDupCIDsMu.Unlock()
} | go | func (s *StanServer) connectCB(m *nats.Msg) {
req := &pb.ConnectRequest{}
err := req.Unmarshal(m.Data)
if err != nil || req.HeartbeatInbox == "" {
s.log.Errorf("[Client:?] Invalid conn request: ClientID=%s, Inbox=%s, err=%v",
req.ClientID, req.HeartbeatInbox, err)
s.sendConnectErr(m.Reply, ErrInvalidConnReq.Error())
return
}
if !clientIDRegEx.MatchString(req.ClientID) {
s.log.Errorf("[Client:%s] Invalid ClientID, only alphanumeric and `-` or `_` characters allowed", req.ClientID)
s.sendConnectErr(m.Reply, ErrInvalidClientID.Error())
return
}
// If the client ID is already registered, check to see if it's the case
// that the client refreshed (e.g. it crashed and came back) or if the
// connection is a duplicate. If it refreshed, we will close the old
// client and open a new one.
client := s.clients.lookup(req.ClientID)
if client != nil {
// When detecting a duplicate, the processing of the connect request
// is going to be processed in a go-routine. We need however to keep
// track and fail another request on the same client ID until the
// current one has finished.
s.cliDupCIDsMu.Lock()
if _, exists := s.cliDipCIDsMap[req.ClientID]; exists {
s.cliDupCIDsMu.Unlock()
s.log.Debugf("[Client:%s] Connect failed; already connected", req.ClientID)
s.sendConnectErr(m.Reply, ErrInvalidClient.Error())
return
}
s.cliDipCIDsMap[req.ClientID] = struct{}{}
s.cliDupCIDsMu.Unlock()
s.startGoRoutine(func() {
defer s.wg.Done()
isDup := false
if s.isDuplicateConnect(client) {
s.log.Debugf("[Client:%s] Connect failed; already connected", req.ClientID)
s.sendConnectErr(m.Reply, ErrInvalidClient.Error())
isDup = true
}
s.cliDupCIDsMu.Lock()
if !isDup {
s.handleConnect(req, m, true)
}
delete(s.cliDipCIDsMap, req.ClientID)
s.cliDupCIDsMu.Unlock()
})
return
}
s.cliDupCIDsMu.Lock()
s.handleConnect(req, m, false)
s.cliDupCIDsMu.Unlock()
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"connectCB",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"req",
":=",
"&",
"pb",
".",
"ConnectRequest",
"{",
"}",
"\n",
"err",
":=",
"req",
".",
"Unmarshal",
"(",
"m",
".",
"Data",
")",
"\n",
"if",
"err",
"!=",
"nil",
"||",
"req",
".",
"HeartbeatInbox",
"==",
"\"",
"\"",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"req",
".",
"ClientID",
",",
"req",
".",
"HeartbeatInbox",
",",
"err",
")",
"\n",
"s",
".",
"sendConnectErr",
"(",
"m",
".",
"Reply",
",",
"ErrInvalidConnReq",
".",
"Error",
"(",
")",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"!",
"clientIDRegEx",
".",
"MatchString",
"(",
"req",
".",
"ClientID",
")",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"req",
".",
"ClientID",
")",
"\n",
"s",
".",
"sendConnectErr",
"(",
"m",
".",
"Reply",
",",
"ErrInvalidClientID",
".",
"Error",
"(",
")",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"// If the client ID is already registered, check to see if it's the case",
"// that the client refreshed (e.g. it crashed and came back) or if the",
"// connection is a duplicate. If it refreshed, we will close the old",
"// client and open a new one.",
"client",
":=",
"s",
".",
"clients",
".",
"lookup",
"(",
"req",
".",
"ClientID",
")",
"\n",
"if",
"client",
"!=",
"nil",
"{",
"// When detecting a duplicate, the processing of the connect request",
"// is going to be processed in a go-routine. We need however to keep",
"// track and fail another request on the same client ID until the",
"// current one has finished.",
"s",
".",
"cliDupCIDsMu",
".",
"Lock",
"(",
")",
"\n",
"if",
"_",
",",
"exists",
":=",
"s",
".",
"cliDipCIDsMap",
"[",
"req",
".",
"ClientID",
"]",
";",
"exists",
"{",
"s",
".",
"cliDupCIDsMu",
".",
"Unlock",
"(",
")",
"\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"req",
".",
"ClientID",
")",
"\n",
"s",
".",
"sendConnectErr",
"(",
"m",
".",
"Reply",
",",
"ErrInvalidClient",
".",
"Error",
"(",
")",
")",
"\n",
"return",
"\n",
"}",
"\n",
"s",
".",
"cliDipCIDsMap",
"[",
"req",
".",
"ClientID",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"s",
".",
"cliDupCIDsMu",
".",
"Unlock",
"(",
")",
"\n\n",
"s",
".",
"startGoRoutine",
"(",
"func",
"(",
")",
"{",
"defer",
"s",
".",
"wg",
".",
"Done",
"(",
")",
"\n",
"isDup",
":=",
"false",
"\n",
"if",
"s",
".",
"isDuplicateConnect",
"(",
"client",
")",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"req",
".",
"ClientID",
")",
"\n",
"s",
".",
"sendConnectErr",
"(",
"m",
".",
"Reply",
",",
"ErrInvalidClient",
".",
"Error",
"(",
")",
")",
"\n",
"isDup",
"=",
"true",
"\n",
"}",
"\n",
"s",
".",
"cliDupCIDsMu",
".",
"Lock",
"(",
")",
"\n",
"if",
"!",
"isDup",
"{",
"s",
".",
"handleConnect",
"(",
"req",
",",
"m",
",",
"true",
")",
"\n",
"}",
"\n",
"delete",
"(",
"s",
".",
"cliDipCIDsMap",
",",
"req",
".",
"ClientID",
")",
"\n",
"s",
".",
"cliDupCIDsMu",
".",
"Unlock",
"(",
")",
"\n",
"}",
")",
"\n",
"return",
"\n",
"}",
"\n",
"s",
".",
"cliDupCIDsMu",
".",
"Lock",
"(",
")",
"\n",
"s",
".",
"handleConnect",
"(",
"req",
",",
"m",
",",
"false",
")",
"\n",
"s",
".",
"cliDupCIDsMu",
".",
"Unlock",
"(",
")",
"\n",
"}"
] | // Process a client connect request | [
"Process",
"a",
"client",
"connect",
"request"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2558-L2613 | train |
nats-io/nats-streaming-server | server/server.go | isDuplicateConnect | func (s *StanServer) isDuplicateConnect(client *client) bool {
client.RLock()
hbInbox := client.info.HbInbox
client.RUnlock()
// This is the HbInbox from the "old" client. See if it is up and
// running by sending a ping to that inbox.
_, err := s.nc.Request(hbInbox, nil, s.dupCIDTimeout)
// If err is nil, the currently registered client responded, so this is a
// duplicate.
return err == nil
} | go | func (s *StanServer) isDuplicateConnect(client *client) bool {
client.RLock()
hbInbox := client.info.HbInbox
client.RUnlock()
// This is the HbInbox from the "old" client. See if it is up and
// running by sending a ping to that inbox.
_, err := s.nc.Request(hbInbox, nil, s.dupCIDTimeout)
// If err is nil, the currently registered client responded, so this is a
// duplicate.
return err == nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"isDuplicateConnect",
"(",
"client",
"*",
"client",
")",
"bool",
"{",
"client",
".",
"RLock",
"(",
")",
"\n",
"hbInbox",
":=",
"client",
".",
"info",
".",
"HbInbox",
"\n",
"client",
".",
"RUnlock",
"(",
")",
"\n\n",
"// This is the HbInbox from the \"old\" client. See if it is up and",
"// running by sending a ping to that inbox.",
"_",
",",
"err",
":=",
"s",
".",
"nc",
".",
"Request",
"(",
"hbInbox",
",",
"nil",
",",
"s",
".",
"dupCIDTimeout",
")",
"\n\n",
"// If err is nil, the currently registered client responded, so this is a",
"// duplicate.",
"return",
"err",
"==",
"nil",
"\n",
"}"
] | // isDuplicateConnect determines if the given client ID is a duplicate
// connection by pinging the old client's heartbeat inbox and checking if it
// responds. If it does, it's a duplicate connection. | [
"isDuplicateConnect",
"determines",
"if",
"the",
"given",
"client",
"ID",
"is",
"a",
"duplicate",
"connection",
"by",
"pinging",
"the",
"old",
"client",
"s",
"heartbeat",
"inbox",
"and",
"checking",
"if",
"it",
"responds",
".",
"If",
"it",
"does",
"it",
"s",
"a",
"duplicate",
"connection",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2638-L2650 | train |
nats-io/nats-streaming-server | server/server.go | replicateDeleteChannel | func (s *StanServer) replicateDeleteChannel(channel string) {
op := &spb.RaftOperation{
OpType: spb.RaftOperation_DeleteChannel,
Channel: channel,
}
data, err := op.Marshal()
if err != nil {
panic(err)
}
// Wait on result of replication.
if err = s.raft.Apply(data, 0).Error(); err != nil {
// If we have lost leadership, clear the deleteInProgress flag.
cs := s.channels
cs.Lock()
c := cs.channels[channel]
if c != nil && c.activity != nil {
c.activity.deleteInProgress = false
}
cs.Unlock()
}
} | go | func (s *StanServer) replicateDeleteChannel(channel string) {
op := &spb.RaftOperation{
OpType: spb.RaftOperation_DeleteChannel,
Channel: channel,
}
data, err := op.Marshal()
if err != nil {
panic(err)
}
// Wait on result of replication.
if err = s.raft.Apply(data, 0).Error(); err != nil {
// If we have lost leadership, clear the deleteInProgress flag.
cs := s.channels
cs.Lock()
c := cs.channels[channel]
if c != nil && c.activity != nil {
c.activity.deleteInProgress = false
}
cs.Unlock()
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"replicateDeleteChannel",
"(",
"channel",
"string",
")",
"{",
"op",
":=",
"&",
"spb",
".",
"RaftOperation",
"{",
"OpType",
":",
"spb",
".",
"RaftOperation_DeleteChannel",
",",
"Channel",
":",
"channel",
",",
"}",
"\n",
"data",
",",
"err",
":=",
"op",
".",
"Marshal",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"// Wait on result of replication.",
"if",
"err",
"=",
"s",
".",
"raft",
".",
"Apply",
"(",
"data",
",",
"0",
")",
".",
"Error",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"// If we have lost leadership, clear the deleteInProgress flag.",
"cs",
":=",
"s",
".",
"channels",
"\n",
"cs",
".",
"Lock",
"(",
")",
"\n",
"c",
":=",
"cs",
".",
"channels",
"[",
"channel",
"]",
"\n",
"if",
"c",
"!=",
"nil",
"&&",
"c",
".",
"activity",
"!=",
"nil",
"{",
"c",
".",
"activity",
".",
"deleteInProgress",
"=",
"false",
"\n",
"}",
"\n",
"cs",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"}"
] | // Leader invokes this to replicate the command to delete a channel. | [
"Leader",
"invokes",
"this",
"to",
"replicate",
"the",
"command",
"to",
"delete",
"a",
"channel",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2672-L2692 | train |
nats-io/nats-streaming-server | server/server.go | handleChannelDelete | func (s *StanServer) handleChannelDelete(c *channel) {
delete := false
cs := s.channels
cs.Lock()
a := c.activity
if a.preventDelete || a.deleteInProgress || c.ss.hasActiveSubs() {
if s.debug {
s.log.Debugf("Channel %q cannot be deleted: preventDelete=%v inProgress=%v hasActiveSubs=%v",
c.name, a.preventDelete, a.deleteInProgress, c.ss.hasActiveSubs())
}
c.stopDeleteTimer()
} else {
elapsed := time.Since(a.last)
if elapsed >= a.maxInactivity {
if s.debug {
s.log.Debugf("Channel %q is being deleted", c.name)
}
c.stopDeleteTimer()
// Leave in map for now, but mark as deleted. If we removed before
// completion of the removal, a new lookup could re-create while
// in the process of deleting it.
a.deleteInProgress = true
delete = true
} else {
var next time.Duration
if elapsed < 0 {
next = a.maxInactivity
} else {
// elapsed < a.maxInactivity
next = a.maxInactivity - elapsed
}
if s.debug {
s.log.Debugf("Channel %q cannot be deleted now, reset timer to fire in %v",
c.name, next)
}
c.resetDeleteTimer(next)
}
}
cs.Unlock()
if delete {
if testDeleteChannel {
time.Sleep(time.Second)
}
if s.isClustered {
s.replicateDeleteChannel(c.name)
} else {
s.processDeleteChannel(c.name)
}
}
} | go | func (s *StanServer) handleChannelDelete(c *channel) {
delete := false
cs := s.channels
cs.Lock()
a := c.activity
if a.preventDelete || a.deleteInProgress || c.ss.hasActiveSubs() {
if s.debug {
s.log.Debugf("Channel %q cannot be deleted: preventDelete=%v inProgress=%v hasActiveSubs=%v",
c.name, a.preventDelete, a.deleteInProgress, c.ss.hasActiveSubs())
}
c.stopDeleteTimer()
} else {
elapsed := time.Since(a.last)
if elapsed >= a.maxInactivity {
if s.debug {
s.log.Debugf("Channel %q is being deleted", c.name)
}
c.stopDeleteTimer()
// Leave in map for now, but mark as deleted. If we removed before
// completion of the removal, a new lookup could re-create while
// in the process of deleting it.
a.deleteInProgress = true
delete = true
} else {
var next time.Duration
if elapsed < 0 {
next = a.maxInactivity
} else {
// elapsed < a.maxInactivity
next = a.maxInactivity - elapsed
}
if s.debug {
s.log.Debugf("Channel %q cannot be deleted now, reset timer to fire in %v",
c.name, next)
}
c.resetDeleteTimer(next)
}
}
cs.Unlock()
if delete {
if testDeleteChannel {
time.Sleep(time.Second)
}
if s.isClustered {
s.replicateDeleteChannel(c.name)
} else {
s.processDeleteChannel(c.name)
}
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"handleChannelDelete",
"(",
"c",
"*",
"channel",
")",
"{",
"delete",
":=",
"false",
"\n",
"cs",
":=",
"s",
".",
"channels",
"\n",
"cs",
".",
"Lock",
"(",
")",
"\n",
"a",
":=",
"c",
".",
"activity",
"\n",
"if",
"a",
".",
"preventDelete",
"||",
"a",
".",
"deleteInProgress",
"||",
"c",
".",
"ss",
".",
"hasActiveSubs",
"(",
")",
"{",
"if",
"s",
".",
"debug",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"c",
".",
"name",
",",
"a",
".",
"preventDelete",
",",
"a",
".",
"deleteInProgress",
",",
"c",
".",
"ss",
".",
"hasActiveSubs",
"(",
")",
")",
"\n",
"}",
"\n",
"c",
".",
"stopDeleteTimer",
"(",
")",
"\n",
"}",
"else",
"{",
"elapsed",
":=",
"time",
".",
"Since",
"(",
"a",
".",
"last",
")",
"\n",
"if",
"elapsed",
">=",
"a",
".",
"maxInactivity",
"{",
"if",
"s",
".",
"debug",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"c",
".",
"name",
")",
"\n",
"}",
"\n",
"c",
".",
"stopDeleteTimer",
"(",
")",
"\n",
"// Leave in map for now, but mark as deleted. If we removed before",
"// completion of the removal, a new lookup could re-create while",
"// in the process of deleting it.",
"a",
".",
"deleteInProgress",
"=",
"true",
"\n",
"delete",
"=",
"true",
"\n",
"}",
"else",
"{",
"var",
"next",
"time",
".",
"Duration",
"\n",
"if",
"elapsed",
"<",
"0",
"{",
"next",
"=",
"a",
".",
"maxInactivity",
"\n",
"}",
"else",
"{",
"// elapsed < a.maxInactivity",
"next",
"=",
"a",
".",
"maxInactivity",
"-",
"elapsed",
"\n",
"}",
"\n",
"if",
"s",
".",
"debug",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"c",
".",
"name",
",",
"next",
")",
"\n",
"}",
"\n",
"c",
".",
"resetDeleteTimer",
"(",
"next",
")",
"\n",
"}",
"\n",
"}",
"\n",
"cs",
".",
"Unlock",
"(",
")",
"\n",
"if",
"delete",
"{",
"if",
"testDeleteChannel",
"{",
"time",
".",
"Sleep",
"(",
"time",
".",
"Second",
")",
"\n",
"}",
"\n",
"if",
"s",
".",
"isClustered",
"{",
"s",
".",
"replicateDeleteChannel",
"(",
"c",
".",
"name",
")",
"\n",
"}",
"else",
"{",
"s",
".",
"processDeleteChannel",
"(",
"c",
".",
"name",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // Check if the channel can be deleted. If so, do it in place.
// This is called from the ioLoop by the leader or a standlone server. | [
"Check",
"if",
"the",
"channel",
"can",
"be",
"deleted",
".",
"If",
"so",
"do",
"it",
"in",
"place",
".",
"This",
"is",
"called",
"from",
"the",
"ioLoop",
"by",
"the",
"leader",
"or",
"a",
"standlone",
"server",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2696-L2745 | train |
nats-io/nats-streaming-server | server/server.go | processDeleteChannel | func (s *StanServer) processDeleteChannel(channel string) {
cs := s.channels
cs.Lock()
defer cs.Unlock()
c := cs.channels[channel]
if c == nil {
s.log.Errorf("Error deleting channel %q: not found", channel)
return
}
if c.activity != nil && c.activity.preventDelete {
s.log.Errorf("The channel %q cannot be deleted at this time since a subscription has been created", channel)
return
}
// Delete from store
if err := cs.store.DeleteChannel(channel); err != nil {
s.log.Errorf("Error deleting channel %q: %v", channel, err)
if c.activity != nil {
c.activity.deleteInProgress = false
c.startDeleteTimer()
}
return
}
delete(s.channels.channels, channel)
s.log.Noticef("Channel %q has been deleted", channel)
} | go | func (s *StanServer) processDeleteChannel(channel string) {
cs := s.channels
cs.Lock()
defer cs.Unlock()
c := cs.channels[channel]
if c == nil {
s.log.Errorf("Error deleting channel %q: not found", channel)
return
}
if c.activity != nil && c.activity.preventDelete {
s.log.Errorf("The channel %q cannot be deleted at this time since a subscription has been created", channel)
return
}
// Delete from store
if err := cs.store.DeleteChannel(channel); err != nil {
s.log.Errorf("Error deleting channel %q: %v", channel, err)
if c.activity != nil {
c.activity.deleteInProgress = false
c.startDeleteTimer()
}
return
}
delete(s.channels.channels, channel)
s.log.Noticef("Channel %q has been deleted", channel)
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processDeleteChannel",
"(",
"channel",
"string",
")",
"{",
"cs",
":=",
"s",
".",
"channels",
"\n",
"cs",
".",
"Lock",
"(",
")",
"\n",
"defer",
"cs",
".",
"Unlock",
"(",
")",
"\n",
"c",
":=",
"cs",
".",
"channels",
"[",
"channel",
"]",
"\n",
"if",
"c",
"==",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"channel",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"c",
".",
"activity",
"!=",
"nil",
"&&",
"c",
".",
"activity",
".",
"preventDelete",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"channel",
")",
"\n",
"return",
"\n",
"}",
"\n",
"// Delete from store",
"if",
"err",
":=",
"cs",
".",
"store",
".",
"DeleteChannel",
"(",
"channel",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"channel",
",",
"err",
")",
"\n",
"if",
"c",
".",
"activity",
"!=",
"nil",
"{",
"c",
".",
"activity",
".",
"deleteInProgress",
"=",
"false",
"\n",
"c",
".",
"startDeleteTimer",
"(",
")",
"\n",
"}",
"\n",
"return",
"\n",
"}",
"\n",
"delete",
"(",
"s",
".",
"channels",
".",
"channels",
",",
"channel",
")",
"\n",
"s",
".",
"log",
".",
"Noticef",
"(",
"\"",
"\"",
",",
"channel",
")",
"\n",
"}"
] | // Actual deletetion of the channel. | [
"Actual",
"deletetion",
"of",
"the",
"channel",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2748-L2772 | train |
nats-io/nats-streaming-server | server/server.go | checkClientHealth | func (s *StanServer) checkClientHealth(clientID string) {
client := s.clients.lookup(clientID)
if client == nil {
return
}
// If clustered and we lost leadership, we should stop
// heartbeating as the new leader will take over.
if s.isClustered && !s.isLeader() {
// Do not remove client HB here. We do that in
// leadershipLost. We could be here because the
// callback fired while we are not yet finished
// acquiring leadership.
client.Lock()
if client.hbt != nil {
client.hbt.Reset(s.opts.ClientHBInterval)
}
client.Unlock()
return
}
client.RLock()
hbInbox := client.info.HbInbox
client.RUnlock()
// Sends the HB request. This call blocks for ClientHBTimeout,
// do not hold the lock for that long!
_, err := s.nc.Request(hbInbox, nil, s.opts.ClientHBTimeout)
// Grab the lock now.
client.Lock()
// Client could have been unregistered, in which case
// client.hbt will be nil.
if client.hbt == nil {
client.Unlock()
return
}
hadFailed := client.fhb > 0
// If we did not get the reply, increase the number of
// failed heartbeats.
if err != nil {
client.fhb++
// If we have reached the max number of failures
if client.fhb > s.opts.ClientHBFailCount {
s.log.Debugf("[Client:%s] Timed out on heartbeats", clientID)
// close the client (connection). This locks the
// client object internally so unlock here.
client.Unlock()
// If clustered, thread operations through Raft.
if s.isClustered {
s.barrier(func() {
if err := s.replicateConnClose(&pb.CloseRequest{ClientID: clientID}); err != nil {
s.log.Errorf("[Client:%s] Failed to replicate disconnect on heartbeat expiration: %v",
clientID, err)
}
})
} else {
s.closeClient(clientID)
}
return
}
} else {
// We got the reply, reset the number of failed heartbeats.
client.fhb = 0
}
// Reset the timer to fire again.
client.hbt.Reset(s.opts.ClientHBInterval)
var (
subs []*subState
hasFailedHB = client.fhb > 0
)
if (hadFailed && !hasFailedHB) || (!hadFailed && hasFailedHB) {
// Get a copy of subscribers and client.fhb while under lock
subs = client.getSubsCopy()
}
client.Unlock()
if len(subs) > 0 {
// Push the info about presence of failed heartbeats down to
// subscribers, so they have easier access to that info in
// the redelivery attempt code.
for _, sub := range subs {
sub.Lock()
sub.hasFailedHB = hasFailedHB
sub.Unlock()
}
}
} | go | func (s *StanServer) checkClientHealth(clientID string) {
client := s.clients.lookup(clientID)
if client == nil {
return
}
// If clustered and we lost leadership, we should stop
// heartbeating as the new leader will take over.
if s.isClustered && !s.isLeader() {
// Do not remove client HB here. We do that in
// leadershipLost. We could be here because the
// callback fired while we are not yet finished
// acquiring leadership.
client.Lock()
if client.hbt != nil {
client.hbt.Reset(s.opts.ClientHBInterval)
}
client.Unlock()
return
}
client.RLock()
hbInbox := client.info.HbInbox
client.RUnlock()
// Sends the HB request. This call blocks for ClientHBTimeout,
// do not hold the lock for that long!
_, err := s.nc.Request(hbInbox, nil, s.opts.ClientHBTimeout)
// Grab the lock now.
client.Lock()
// Client could have been unregistered, in which case
// client.hbt will be nil.
if client.hbt == nil {
client.Unlock()
return
}
hadFailed := client.fhb > 0
// If we did not get the reply, increase the number of
// failed heartbeats.
if err != nil {
client.fhb++
// If we have reached the max number of failures
if client.fhb > s.opts.ClientHBFailCount {
s.log.Debugf("[Client:%s] Timed out on heartbeats", clientID)
// close the client (connection). This locks the
// client object internally so unlock here.
client.Unlock()
// If clustered, thread operations through Raft.
if s.isClustered {
s.barrier(func() {
if err := s.replicateConnClose(&pb.CloseRequest{ClientID: clientID}); err != nil {
s.log.Errorf("[Client:%s] Failed to replicate disconnect on heartbeat expiration: %v",
clientID, err)
}
})
} else {
s.closeClient(clientID)
}
return
}
} else {
// We got the reply, reset the number of failed heartbeats.
client.fhb = 0
}
// Reset the timer to fire again.
client.hbt.Reset(s.opts.ClientHBInterval)
var (
subs []*subState
hasFailedHB = client.fhb > 0
)
if (hadFailed && !hasFailedHB) || (!hadFailed && hasFailedHB) {
// Get a copy of subscribers and client.fhb while under lock
subs = client.getSubsCopy()
}
client.Unlock()
if len(subs) > 0 {
// Push the info about presence of failed heartbeats down to
// subscribers, so they have easier access to that info in
// the redelivery attempt code.
for _, sub := range subs {
sub.Lock()
sub.hasFailedHB = hasFailedHB
sub.Unlock()
}
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"checkClientHealth",
"(",
"clientID",
"string",
")",
"{",
"client",
":=",
"s",
".",
"clients",
".",
"lookup",
"(",
"clientID",
")",
"\n",
"if",
"client",
"==",
"nil",
"{",
"return",
"\n",
"}",
"\n\n",
"// If clustered and we lost leadership, we should stop",
"// heartbeating as the new leader will take over.",
"if",
"s",
".",
"isClustered",
"&&",
"!",
"s",
".",
"isLeader",
"(",
")",
"{",
"// Do not remove client HB here. We do that in",
"// leadershipLost. We could be here because the",
"// callback fired while we are not yet finished",
"// acquiring leadership.",
"client",
".",
"Lock",
"(",
")",
"\n",
"if",
"client",
".",
"hbt",
"!=",
"nil",
"{",
"client",
".",
"hbt",
".",
"Reset",
"(",
"s",
".",
"opts",
".",
"ClientHBInterval",
")",
"\n",
"}",
"\n",
"client",
".",
"Unlock",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"client",
".",
"RLock",
"(",
")",
"\n",
"hbInbox",
":=",
"client",
".",
"info",
".",
"HbInbox",
"\n",
"client",
".",
"RUnlock",
"(",
")",
"\n\n",
"// Sends the HB request. This call blocks for ClientHBTimeout,",
"// do not hold the lock for that long!",
"_",
",",
"err",
":=",
"s",
".",
"nc",
".",
"Request",
"(",
"hbInbox",
",",
"nil",
",",
"s",
".",
"opts",
".",
"ClientHBTimeout",
")",
"\n",
"// Grab the lock now.",
"client",
".",
"Lock",
"(",
")",
"\n",
"// Client could have been unregistered, in which case",
"// client.hbt will be nil.",
"if",
"client",
".",
"hbt",
"==",
"nil",
"{",
"client",
".",
"Unlock",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n",
"hadFailed",
":=",
"client",
".",
"fhb",
">",
"0",
"\n",
"// If we did not get the reply, increase the number of",
"// failed heartbeats.",
"if",
"err",
"!=",
"nil",
"{",
"client",
".",
"fhb",
"++",
"\n",
"// If we have reached the max number of failures",
"if",
"client",
".",
"fhb",
">",
"s",
".",
"opts",
".",
"ClientHBFailCount",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"clientID",
")",
"\n",
"// close the client (connection). This locks the",
"// client object internally so unlock here.",
"client",
".",
"Unlock",
"(",
")",
"\n",
"// If clustered, thread operations through Raft.",
"if",
"s",
".",
"isClustered",
"{",
"s",
".",
"barrier",
"(",
"func",
"(",
")",
"{",
"if",
"err",
":=",
"s",
".",
"replicateConnClose",
"(",
"&",
"pb",
".",
"CloseRequest",
"{",
"ClientID",
":",
"clientID",
"}",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"clientID",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
")",
"\n",
"}",
"else",
"{",
"s",
".",
"closeClient",
"(",
"clientID",
")",
"\n",
"}",
"\n",
"return",
"\n",
"}",
"\n",
"}",
"else",
"{",
"// We got the reply, reset the number of failed heartbeats.",
"client",
".",
"fhb",
"=",
"0",
"\n",
"}",
"\n",
"// Reset the timer to fire again.",
"client",
".",
"hbt",
".",
"Reset",
"(",
"s",
".",
"opts",
".",
"ClientHBInterval",
")",
"\n",
"var",
"(",
"subs",
"[",
"]",
"*",
"subState",
"\n",
"hasFailedHB",
"=",
"client",
".",
"fhb",
">",
"0",
"\n",
")",
"\n",
"if",
"(",
"hadFailed",
"&&",
"!",
"hasFailedHB",
")",
"||",
"(",
"!",
"hadFailed",
"&&",
"hasFailedHB",
")",
"{",
"// Get a copy of subscribers and client.fhb while under lock",
"subs",
"=",
"client",
".",
"getSubsCopy",
"(",
")",
"\n",
"}",
"\n",
"client",
".",
"Unlock",
"(",
")",
"\n",
"if",
"len",
"(",
"subs",
")",
">",
"0",
"{",
"// Push the info about presence of failed heartbeats down to",
"// subscribers, so they have easier access to that info in",
"// the redelivery attempt code.",
"for",
"_",
",",
"sub",
":=",
"range",
"subs",
"{",
"sub",
".",
"Lock",
"(",
")",
"\n",
"sub",
".",
"hasFailedHB",
"=",
"hasFailedHB",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // Send a heartbeat call to the client. | [
"Send",
"a",
"heartbeat",
"call",
"to",
"the",
"client",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2859-L2944 | train |
nats-io/nats-streaming-server | server/server.go | closeClient | func (s *StanServer) closeClient(clientID string) error {
s.closeMu.Lock()
defer s.closeMu.Unlock()
// Lookup client first, will unregister only after removing its subscriptions
client := s.clients.lookup(clientID)
if client == nil {
s.log.Errorf("Unknown client %q in close request", clientID)
return ErrUnknownClient
}
// Remove all non-durable subscribers.
s.removeAllNonDurableSubscribers(client)
// Remove from our clientStore.
if _, err := s.clients.unregister(clientID); err != nil {
s.log.Errorf("Error unregistering client %q: %v", clientID, err)
}
if s.debug {
client.RLock()
hbInbox := client.info.HbInbox
client.RUnlock()
s.log.Debugf("[Client:%s] Closed (Inbox=%v)", clientID, hbInbox)
}
return nil
} | go | func (s *StanServer) closeClient(clientID string) error {
s.closeMu.Lock()
defer s.closeMu.Unlock()
// Lookup client first, will unregister only after removing its subscriptions
client := s.clients.lookup(clientID)
if client == nil {
s.log.Errorf("Unknown client %q in close request", clientID)
return ErrUnknownClient
}
// Remove all non-durable subscribers.
s.removeAllNonDurableSubscribers(client)
// Remove from our clientStore.
if _, err := s.clients.unregister(clientID); err != nil {
s.log.Errorf("Error unregistering client %q: %v", clientID, err)
}
if s.debug {
client.RLock()
hbInbox := client.info.HbInbox
client.RUnlock()
s.log.Debugf("[Client:%s] Closed (Inbox=%v)", clientID, hbInbox)
}
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"closeClient",
"(",
"clientID",
"string",
")",
"error",
"{",
"s",
".",
"closeMu",
".",
"Lock",
"(",
")",
"\n",
"defer",
"s",
".",
"closeMu",
".",
"Unlock",
"(",
")",
"\n",
"// Lookup client first, will unregister only after removing its subscriptions",
"client",
":=",
"s",
".",
"clients",
".",
"lookup",
"(",
"clientID",
")",
"\n",
"if",
"client",
"==",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"clientID",
")",
"\n",
"return",
"ErrUnknownClient",
"\n",
"}",
"\n\n",
"// Remove all non-durable subscribers.",
"s",
".",
"removeAllNonDurableSubscribers",
"(",
"client",
")",
"\n\n",
"// Remove from our clientStore.",
"if",
"_",
",",
"err",
":=",
"s",
".",
"clients",
".",
"unregister",
"(",
"clientID",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"clientID",
",",
"err",
")",
"\n",
"}",
"\n\n",
"if",
"s",
".",
"debug",
"{",
"client",
".",
"RLock",
"(",
")",
"\n",
"hbInbox",
":=",
"client",
".",
"info",
".",
"HbInbox",
"\n",
"client",
".",
"RUnlock",
"(",
")",
"\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"clientID",
",",
"hbInbox",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // Close a client | [
"Close",
"a",
"client"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2947-L2972 | train |
nats-io/nats-streaming-server | server/server.go | processCloseRequest | func (s *StanServer) processCloseRequest(m *nats.Msg) {
req := &pb.CloseRequest{}
err := req.Unmarshal(m.Data)
if err != nil {
s.log.Errorf("Received invalid close request, subject=%s", m.Subject)
s.sendCloseResponse(m.Reply, ErrInvalidCloseReq)
return
}
s.barrier(func() {
var err error
// If clustered, thread operations through Raft.
if s.isClustered {
err = s.replicateConnClose(req)
} else {
err = s.closeClient(req.ClientID)
}
// If there was an error, it has been already logged.
// Send response, if err is nil, will be a success response.
s.sendCloseResponse(m.Reply, err)
})
} | go | func (s *StanServer) processCloseRequest(m *nats.Msg) {
req := &pb.CloseRequest{}
err := req.Unmarshal(m.Data)
if err != nil {
s.log.Errorf("Received invalid close request, subject=%s", m.Subject)
s.sendCloseResponse(m.Reply, ErrInvalidCloseReq)
return
}
s.barrier(func() {
var err error
// If clustered, thread operations through Raft.
if s.isClustered {
err = s.replicateConnClose(req)
} else {
err = s.closeClient(req.ClientID)
}
// If there was an error, it has been already logged.
// Send response, if err is nil, will be a success response.
s.sendCloseResponse(m.Reply, err)
})
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processCloseRequest",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"req",
":=",
"&",
"pb",
".",
"CloseRequest",
"{",
"}",
"\n",
"err",
":=",
"req",
".",
"Unmarshal",
"(",
"m",
".",
"Data",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"m",
".",
"Subject",
")",
"\n",
"s",
".",
"sendCloseResponse",
"(",
"m",
".",
"Reply",
",",
"ErrInvalidCloseReq",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"s",
".",
"barrier",
"(",
"func",
"(",
")",
"{",
"var",
"err",
"error",
"\n",
"// If clustered, thread operations through Raft.",
"if",
"s",
".",
"isClustered",
"{",
"err",
"=",
"s",
".",
"replicateConnClose",
"(",
"req",
")",
"\n",
"}",
"else",
"{",
"err",
"=",
"s",
".",
"closeClient",
"(",
"req",
".",
"ClientID",
")",
"\n",
"}",
"\n",
"// If there was an error, it has been already logged.",
"// Send response, if err is nil, will be a success response.",
"s",
".",
"sendCloseResponse",
"(",
"m",
".",
"Reply",
",",
"err",
")",
"\n",
"}",
")",
"\n",
"}"
] | // processCloseRequest will process connection close requests from clients. | [
"processCloseRequest",
"will",
"process",
"connection",
"close",
"requests",
"from",
"clients",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2975-L2997 | train |
nats-io/nats-streaming-server | server/server.go | processClientPublish | func (s *StanServer) processClientPublish(m *nats.Msg) {
iopm := &ioPendingMsg{m: m}
pm := &iopm.pm
if pm.Unmarshal(m.Data) != nil {
if s.processCtrlMsg(m) {
return
}
// else we will report an error below...
}
// Make sure we have a guid and valid channel name.
if pm.Guid == "" || !util.IsChannelNameValid(pm.Subject, false) {
s.log.Errorf("Received invalid client publish message %v", pm)
s.sendPublishErr(m.Reply, pm.Guid, ErrInvalidPubReq)
return
}
if s.debug {
s.log.Tracef("[Client:%s] Received message from publisher subj=%s guid=%s", pm.ClientID, pm.Subject, pm.Guid)
}
// Check if the client is valid. We do this after the clustered check so
// that only the leader performs this check.
valid := false
if s.partitions != nil {
// In partitioning mode it is possible that we get there
// before the connect request is processed. If so, make sure we wait
// for conn request to be processed first. Check clientCheckTimeout
// doc for details.
valid = s.clients.isValidWithTimeout(pm.ClientID, pm.ConnID, clientCheckTimeout)
} else {
valid = s.clients.isValid(pm.ClientID, pm.ConnID)
}
if !valid {
s.log.Errorf("Received invalid client publish message %v", pm)
s.sendPublishErr(m.Reply, pm.Guid, ErrInvalidPubReq)
return
}
s.ioChannel <- iopm
} | go | func (s *StanServer) processClientPublish(m *nats.Msg) {
iopm := &ioPendingMsg{m: m}
pm := &iopm.pm
if pm.Unmarshal(m.Data) != nil {
if s.processCtrlMsg(m) {
return
}
// else we will report an error below...
}
// Make sure we have a guid and valid channel name.
if pm.Guid == "" || !util.IsChannelNameValid(pm.Subject, false) {
s.log.Errorf("Received invalid client publish message %v", pm)
s.sendPublishErr(m.Reply, pm.Guid, ErrInvalidPubReq)
return
}
if s.debug {
s.log.Tracef("[Client:%s] Received message from publisher subj=%s guid=%s", pm.ClientID, pm.Subject, pm.Guid)
}
// Check if the client is valid. We do this after the clustered check so
// that only the leader performs this check.
valid := false
if s.partitions != nil {
// In partitioning mode it is possible that we get there
// before the connect request is processed. If so, make sure we wait
// for conn request to be processed first. Check clientCheckTimeout
// doc for details.
valid = s.clients.isValidWithTimeout(pm.ClientID, pm.ConnID, clientCheckTimeout)
} else {
valid = s.clients.isValid(pm.ClientID, pm.ConnID)
}
if !valid {
s.log.Errorf("Received invalid client publish message %v", pm)
s.sendPublishErr(m.Reply, pm.Guid, ErrInvalidPubReq)
return
}
s.ioChannel <- iopm
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processClientPublish",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"iopm",
":=",
"&",
"ioPendingMsg",
"{",
"m",
":",
"m",
"}",
"\n",
"pm",
":=",
"&",
"iopm",
".",
"pm",
"\n",
"if",
"pm",
".",
"Unmarshal",
"(",
"m",
".",
"Data",
")",
"!=",
"nil",
"{",
"if",
"s",
".",
"processCtrlMsg",
"(",
"m",
")",
"{",
"return",
"\n",
"}",
"\n",
"// else we will report an error below...",
"}",
"\n\n",
"// Make sure we have a guid and valid channel name.",
"if",
"pm",
".",
"Guid",
"==",
"\"",
"\"",
"||",
"!",
"util",
".",
"IsChannelNameValid",
"(",
"pm",
".",
"Subject",
",",
"false",
")",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"pm",
")",
"\n",
"s",
".",
"sendPublishErr",
"(",
"m",
".",
"Reply",
",",
"pm",
".",
"Guid",
",",
"ErrInvalidPubReq",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"if",
"s",
".",
"debug",
"{",
"s",
".",
"log",
".",
"Tracef",
"(",
"\"",
"\"",
",",
"pm",
".",
"ClientID",
",",
"pm",
".",
"Subject",
",",
"pm",
".",
"Guid",
")",
"\n",
"}",
"\n\n",
"// Check if the client is valid. We do this after the clustered check so",
"// that only the leader performs this check.",
"valid",
":=",
"false",
"\n",
"if",
"s",
".",
"partitions",
"!=",
"nil",
"{",
"// In partitioning mode it is possible that we get there",
"// before the connect request is processed. If so, make sure we wait",
"// for conn request\tto be processed first. Check clientCheckTimeout",
"// doc for details.",
"valid",
"=",
"s",
".",
"clients",
".",
"isValidWithTimeout",
"(",
"pm",
".",
"ClientID",
",",
"pm",
".",
"ConnID",
",",
"clientCheckTimeout",
")",
"\n",
"}",
"else",
"{",
"valid",
"=",
"s",
".",
"clients",
".",
"isValid",
"(",
"pm",
".",
"ClientID",
",",
"pm",
".",
"ConnID",
")",
"\n",
"}",
"\n",
"if",
"!",
"valid",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"pm",
")",
"\n",
"s",
".",
"sendPublishErr",
"(",
"m",
".",
"Reply",
",",
"pm",
".",
"Guid",
",",
"ErrInvalidPubReq",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"s",
".",
"ioChannel",
"<-",
"iopm",
"\n",
"}"
] | // processClientPublish process inbound messages from clients. | [
"processClientPublish",
"process",
"inbound",
"messages",
"from",
"clients",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3030-L3070 | train |
nats-io/nats-streaming-server | server/server.go | processClientPings | func (s *StanServer) processClientPings(m *nats.Msg) {
if len(m.Data) == 0 {
return
}
ping := &pb.Ping{}
if err := ping.Unmarshal(m.Data); err != nil {
return
}
var reply []byte
client := s.clients.lookupByConnID(ping.ConnID)
if client != nil {
// If the client has failed heartbeats and since the
// server just received a PING from the client, reset
// the server-to-client HB timer so that a PING is
// sent soon and the client's subscriptions failedHB
// is cleared.
client.RLock()
hasFailedHBs := client.fhb > 0
client.RUnlock()
if hasFailedHBs {
client.Lock()
client.hbt.Reset(time.Millisecond)
client.Unlock()
}
if s.pingResponseOKBytes == nil {
s.pingResponseOKBytes, _ = (&pb.PingResponse{}).Marshal()
}
reply = s.pingResponseOKBytes
} else {
if s.pingResponseInvalidClientBytes == nil {
pingError := &pb.PingResponse{
Error: "client has been replaced or is no longer registered",
}
s.pingResponseInvalidClientBytes, _ = pingError.Marshal()
}
reply = s.pingResponseInvalidClientBytes
}
s.ncs.Publish(m.Reply, reply)
} | go | func (s *StanServer) processClientPings(m *nats.Msg) {
if len(m.Data) == 0 {
return
}
ping := &pb.Ping{}
if err := ping.Unmarshal(m.Data); err != nil {
return
}
var reply []byte
client := s.clients.lookupByConnID(ping.ConnID)
if client != nil {
// If the client has failed heartbeats and since the
// server just received a PING from the client, reset
// the server-to-client HB timer so that a PING is
// sent soon and the client's subscriptions failedHB
// is cleared.
client.RLock()
hasFailedHBs := client.fhb > 0
client.RUnlock()
if hasFailedHBs {
client.Lock()
client.hbt.Reset(time.Millisecond)
client.Unlock()
}
if s.pingResponseOKBytes == nil {
s.pingResponseOKBytes, _ = (&pb.PingResponse{}).Marshal()
}
reply = s.pingResponseOKBytes
} else {
if s.pingResponseInvalidClientBytes == nil {
pingError := &pb.PingResponse{
Error: "client has been replaced or is no longer registered",
}
s.pingResponseInvalidClientBytes, _ = pingError.Marshal()
}
reply = s.pingResponseInvalidClientBytes
}
s.ncs.Publish(m.Reply, reply)
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processClientPings",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"if",
"len",
"(",
"m",
".",
"Data",
")",
"==",
"0",
"{",
"return",
"\n",
"}",
"\n",
"ping",
":=",
"&",
"pb",
".",
"Ping",
"{",
"}",
"\n",
"if",
"err",
":=",
"ping",
".",
"Unmarshal",
"(",
"m",
".",
"Data",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"var",
"reply",
"[",
"]",
"byte",
"\n",
"client",
":=",
"s",
".",
"clients",
".",
"lookupByConnID",
"(",
"ping",
".",
"ConnID",
")",
"\n",
"if",
"client",
"!=",
"nil",
"{",
"// If the client has failed heartbeats and since the",
"// server just received a PING from the client, reset",
"// the server-to-client HB timer so that a PING is",
"// sent soon and the client's subscriptions failedHB",
"// is cleared.",
"client",
".",
"RLock",
"(",
")",
"\n",
"hasFailedHBs",
":=",
"client",
".",
"fhb",
">",
"0",
"\n",
"client",
".",
"RUnlock",
"(",
")",
"\n",
"if",
"hasFailedHBs",
"{",
"client",
".",
"Lock",
"(",
")",
"\n",
"client",
".",
"hbt",
".",
"Reset",
"(",
"time",
".",
"Millisecond",
")",
"\n",
"client",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"if",
"s",
".",
"pingResponseOKBytes",
"==",
"nil",
"{",
"s",
".",
"pingResponseOKBytes",
",",
"_",
"=",
"(",
"&",
"pb",
".",
"PingResponse",
"{",
"}",
")",
".",
"Marshal",
"(",
")",
"\n",
"}",
"\n",
"reply",
"=",
"s",
".",
"pingResponseOKBytes",
"\n",
"}",
"else",
"{",
"if",
"s",
".",
"pingResponseInvalidClientBytes",
"==",
"nil",
"{",
"pingError",
":=",
"&",
"pb",
".",
"PingResponse",
"{",
"Error",
":",
"\"",
"\"",
",",
"}",
"\n",
"s",
".",
"pingResponseInvalidClientBytes",
",",
"_",
"=",
"pingError",
".",
"Marshal",
"(",
")",
"\n",
"}",
"\n",
"reply",
"=",
"s",
".",
"pingResponseInvalidClientBytes",
"\n",
"}",
"\n",
"s",
".",
"ncs",
".",
"Publish",
"(",
"m",
".",
"Reply",
",",
"reply",
")",
"\n",
"}"
] | // processClientPings receives a PING from a client. The payload is the client's UID.
// If the client is present, a response with nil payload is sent back to indicate
// success, otherwise the payload contains an error message. | [
"processClientPings",
"receives",
"a",
"PING",
"from",
"a",
"client",
".",
"The",
"payload",
"is",
"the",
"client",
"s",
"UID",
".",
"If",
"the",
"client",
"is",
"present",
"a",
"response",
"with",
"nil",
"payload",
"is",
"sent",
"back",
"to",
"indicate",
"success",
"otherwise",
"the",
"payload",
"contains",
"an",
"error",
"message",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3075-L3113 | train |
nats-io/nats-streaming-server | server/server.go | sendMsgToQueueGroup | func (s *StanServer) sendMsgToQueueGroup(qs *queueState, m *pb.MsgProto, force bool) (*subState, bool, bool) {
sub := findBestQueueSub(qs.subs)
if sub == nil {
return nil, false, false
}
sub.Lock()
wasStalled := sub.stalled
didSend, sendMore := s.sendMsgToSub(sub, m, force)
// If this is not a redelivery and the sub was not stalled, but now is,
// bump the number of stalled members.
if !force && !wasStalled && sub.stalled {
qs.stalledSubCount++
}
if didSend && sub.LastSent > qs.lastSent {
qs.lastSent = sub.LastSent
}
sub.Unlock()
return sub, didSend, sendMore
} | go | func (s *StanServer) sendMsgToQueueGroup(qs *queueState, m *pb.MsgProto, force bool) (*subState, bool, bool) {
sub := findBestQueueSub(qs.subs)
if sub == nil {
return nil, false, false
}
sub.Lock()
wasStalled := sub.stalled
didSend, sendMore := s.sendMsgToSub(sub, m, force)
// If this is not a redelivery and the sub was not stalled, but now is,
// bump the number of stalled members.
if !force && !wasStalled && sub.stalled {
qs.stalledSubCount++
}
if didSend && sub.LastSent > qs.lastSent {
qs.lastSent = sub.LastSent
}
sub.Unlock()
return sub, didSend, sendMore
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"sendMsgToQueueGroup",
"(",
"qs",
"*",
"queueState",
",",
"m",
"*",
"pb",
".",
"MsgProto",
",",
"force",
"bool",
")",
"(",
"*",
"subState",
",",
"bool",
",",
"bool",
")",
"{",
"sub",
":=",
"findBestQueueSub",
"(",
"qs",
".",
"subs",
")",
"\n",
"if",
"sub",
"==",
"nil",
"{",
"return",
"nil",
",",
"false",
",",
"false",
"\n",
"}",
"\n",
"sub",
".",
"Lock",
"(",
")",
"\n",
"wasStalled",
":=",
"sub",
".",
"stalled",
"\n",
"didSend",
",",
"sendMore",
":=",
"s",
".",
"sendMsgToSub",
"(",
"sub",
",",
"m",
",",
"force",
")",
"\n",
"// If this is not a redelivery and the sub was not stalled, but now is,",
"// bump the number of stalled members.",
"if",
"!",
"force",
"&&",
"!",
"wasStalled",
"&&",
"sub",
".",
"stalled",
"{",
"qs",
".",
"stalledSubCount",
"++",
"\n",
"}",
"\n",
"if",
"didSend",
"&&",
"sub",
".",
"LastSent",
">",
"qs",
".",
"lastSent",
"{",
"qs",
".",
"lastSent",
"=",
"sub",
".",
"LastSent",
"\n",
"}",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"return",
"sub",
",",
"didSend",
",",
"sendMore",
"\n",
"}"
] | // Send a message to the queue group
// Assumes qs lock held for write | [
"Send",
"a",
"message",
"to",
"the",
"queue",
"group",
"Assumes",
"qs",
"lock",
"held",
"for",
"write"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3174-L3192 | train |
nats-io/nats-streaming-server | server/server.go | processMsg | func (s *StanServer) processMsg(c *channel) {
ss := c.ss
// Since we iterate through them all.
ss.RLock()
// Walk the plain subscribers and deliver to each one
for _, sub := range ss.psubs {
s.sendAvailableMessages(c, sub)
}
// Check the queue subscribers
for _, qs := range ss.qsubs {
s.sendAvailableMessagesToQueue(c, qs)
}
ss.RUnlock()
} | go | func (s *StanServer) processMsg(c *channel) {
ss := c.ss
// Since we iterate through them all.
ss.RLock()
// Walk the plain subscribers and deliver to each one
for _, sub := range ss.psubs {
s.sendAvailableMessages(c, sub)
}
// Check the queue subscribers
for _, qs := range ss.qsubs {
s.sendAvailableMessagesToQueue(c, qs)
}
ss.RUnlock()
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processMsg",
"(",
"c",
"*",
"channel",
")",
"{",
"ss",
":=",
"c",
".",
"ss",
"\n\n",
"// Since we iterate through them all.",
"ss",
".",
"RLock",
"(",
")",
"\n",
"// Walk the plain subscribers and deliver to each one",
"for",
"_",
",",
"sub",
":=",
"range",
"ss",
".",
"psubs",
"{",
"s",
".",
"sendAvailableMessages",
"(",
"c",
",",
"sub",
")",
"\n",
"}",
"\n\n",
"// Check the queue subscribers",
"for",
"_",
",",
"qs",
":=",
"range",
"ss",
".",
"qsubs",
"{",
"s",
".",
"sendAvailableMessagesToQueue",
"(",
"c",
",",
"qs",
")",
"\n",
"}",
"\n",
"ss",
".",
"RUnlock",
"(",
")",
"\n",
"}"
] | // processMsg will process a message, and possibly send to clients, etc. | [
"processMsg",
"will",
"process",
"a",
"message",
"and",
"possibly",
"send",
"to",
"clients",
"etc",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3195-L3210 | train |
nats-io/nats-streaming-server | server/server.go | makeSortedSequences | func makeSortedSequences(sequences map[uint64]int64) []uint64 {
results := make([]uint64, 0, len(sequences))
for seq := range sequences {
results = append(results, seq)
}
sort.Sort(bySeq(results))
return results
} | go | func makeSortedSequences(sequences map[uint64]int64) []uint64 {
results := make([]uint64, 0, len(sequences))
for seq := range sequences {
results = append(results, seq)
}
sort.Sort(bySeq(results))
return results
} | [
"func",
"makeSortedSequences",
"(",
"sequences",
"map",
"[",
"uint64",
"]",
"int64",
")",
"[",
"]",
"uint64",
"{",
"results",
":=",
"make",
"(",
"[",
"]",
"uint64",
",",
"0",
",",
"len",
"(",
"sequences",
")",
")",
"\n",
"for",
"seq",
":=",
"range",
"sequences",
"{",
"results",
"=",
"append",
"(",
"results",
",",
"seq",
")",
"\n",
"}",
"\n",
"sort",
".",
"Sort",
"(",
"bySeq",
"(",
"results",
")",
")",
"\n",
"return",
"results",
"\n",
"}"
] | // Returns an array of message sequence numbers ordered by sequence. | [
"Returns",
"an",
"array",
"of",
"message",
"sequence",
"numbers",
"ordered",
"by",
"sequence",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3220-L3227 | train |
nats-io/nats-streaming-server | server/server.go | performDurableRedelivery | func (s *StanServer) performDurableRedelivery(c *channel, sub *subState) {
// Sort our messages outstanding from acksPending, grab some state and unlock.
sub.RLock()
sortedSeqs := makeSortedSequences(sub.acksPending)
clientID := sub.ClientID
newOnHold := sub.newOnHold
subID := sub.ID
sub.RUnlock()
if s.debug && len(sortedSeqs) > 0 {
sub.RLock()
durName := sub.DurableName
if durName == "" {
durName = sub.QGroup
}
sub.RUnlock()
s.log.Debugf("[Client:%s] Redelivering to subid=%d, durable=%s", clientID, subID, durName)
}
// If we don't find the client, we are done.
if s.clients.lookup(clientID) != nil {
// Go through all messages
for _, seq := range sortedSeqs {
m := s.getMsgForRedelivery(c, sub, seq)
if m == nil {
continue
}
if s.trace {
s.log.Tracef("[Client:%s] Redelivering to subid=%d, seq=%d", clientID, subID, m.Sequence)
}
// Flag as redelivered.
m.Redelivered = true
sub.Lock()
// Force delivery
s.sendMsgToSub(sub, m, forceDelivery)
sub.Unlock()
}
}
// Release newOnHold if needed.
if newOnHold {
sub.Lock()
sub.newOnHold = false
sub.Unlock()
}
} | go | func (s *StanServer) performDurableRedelivery(c *channel, sub *subState) {
// Sort our messages outstanding from acksPending, grab some state and unlock.
sub.RLock()
sortedSeqs := makeSortedSequences(sub.acksPending)
clientID := sub.ClientID
newOnHold := sub.newOnHold
subID := sub.ID
sub.RUnlock()
if s.debug && len(sortedSeqs) > 0 {
sub.RLock()
durName := sub.DurableName
if durName == "" {
durName = sub.QGroup
}
sub.RUnlock()
s.log.Debugf("[Client:%s] Redelivering to subid=%d, durable=%s", clientID, subID, durName)
}
// If we don't find the client, we are done.
if s.clients.lookup(clientID) != nil {
// Go through all messages
for _, seq := range sortedSeqs {
m := s.getMsgForRedelivery(c, sub, seq)
if m == nil {
continue
}
if s.trace {
s.log.Tracef("[Client:%s] Redelivering to subid=%d, seq=%d", clientID, subID, m.Sequence)
}
// Flag as redelivered.
m.Redelivered = true
sub.Lock()
// Force delivery
s.sendMsgToSub(sub, m, forceDelivery)
sub.Unlock()
}
}
// Release newOnHold if needed.
if newOnHold {
sub.Lock()
sub.newOnHold = false
sub.Unlock()
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"performDurableRedelivery",
"(",
"c",
"*",
"channel",
",",
"sub",
"*",
"subState",
")",
"{",
"// Sort our messages outstanding from acksPending, grab some state and unlock.",
"sub",
".",
"RLock",
"(",
")",
"\n",
"sortedSeqs",
":=",
"makeSortedSequences",
"(",
"sub",
".",
"acksPending",
")",
"\n",
"clientID",
":=",
"sub",
".",
"ClientID",
"\n",
"newOnHold",
":=",
"sub",
".",
"newOnHold",
"\n",
"subID",
":=",
"sub",
".",
"ID",
"\n",
"sub",
".",
"RUnlock",
"(",
")",
"\n\n",
"if",
"s",
".",
"debug",
"&&",
"len",
"(",
"sortedSeqs",
")",
">",
"0",
"{",
"sub",
".",
"RLock",
"(",
")",
"\n",
"durName",
":=",
"sub",
".",
"DurableName",
"\n",
"if",
"durName",
"==",
"\"",
"\"",
"{",
"durName",
"=",
"sub",
".",
"QGroup",
"\n",
"}",
"\n",
"sub",
".",
"RUnlock",
"(",
")",
"\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"",
"\"",
",",
"clientID",
",",
"subID",
",",
"durName",
")",
"\n",
"}",
"\n\n",
"// If we don't find the client, we are done.",
"if",
"s",
".",
"clients",
".",
"lookup",
"(",
"clientID",
")",
"!=",
"nil",
"{",
"// Go through all messages",
"for",
"_",
",",
"seq",
":=",
"range",
"sortedSeqs",
"{",
"m",
":=",
"s",
".",
"getMsgForRedelivery",
"(",
"c",
",",
"sub",
",",
"seq",
")",
"\n",
"if",
"m",
"==",
"nil",
"{",
"continue",
"\n",
"}",
"\n\n",
"if",
"s",
".",
"trace",
"{",
"s",
".",
"log",
".",
"Tracef",
"(",
"\"",
"\"",
",",
"clientID",
",",
"subID",
",",
"m",
".",
"Sequence",
")",
"\n",
"}",
"\n\n",
"// Flag as redelivered.",
"m",
".",
"Redelivered",
"=",
"true",
"\n\n",
"sub",
".",
"Lock",
"(",
")",
"\n",
"// Force delivery",
"s",
".",
"sendMsgToSub",
"(",
"sub",
",",
"m",
",",
"forceDelivery",
")",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"}",
"\n",
"// Release newOnHold if needed.",
"if",
"newOnHold",
"{",
"sub",
".",
"Lock",
"(",
")",
"\n",
"sub",
".",
"newOnHold",
"=",
"false",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"}"
] | // Redeliver all outstanding messages to a durable subscriber, used on resubscribe. | [
"Redeliver",
"all",
"outstanding",
"messages",
"to",
"a",
"durable",
"subscriber",
"used",
"on",
"resubscribe",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3256-L3303 | train |
nats-io/nats-streaming-server | server/server.go | collectSentOrAck | func (s *StanServer) collectSentOrAck(sub *subState, sent bool, sequence uint64) {
sr := s.ssarepl
if sub.replicate == nil {
sub.replicate = &subSentAndAck{
sent: make([]uint64, 0, 100),
ack: make([]uint64, 0, 100),
}
}
r := sub.replicate
if sent {
r.sent = append(r.sent, sequence)
} else {
r.ack = append(r.ack, sequence)
}
// This function is called with exactly one event at a time.
// Use exact count to decide when to add to given map. This
// avoid the need for booleans to not add more than once.
l := len(r.sent) + len(r.ack)
if l == 1 {
sr.waiting.Store(sub, struct{}{})
} else if l == 100 {
sr.waiting.Delete(sub)
sr.ready.Store(sub, struct{}{})
signalCh(sr.notifyCh)
}
} | go | func (s *StanServer) collectSentOrAck(sub *subState, sent bool, sequence uint64) {
sr := s.ssarepl
if sub.replicate == nil {
sub.replicate = &subSentAndAck{
sent: make([]uint64, 0, 100),
ack: make([]uint64, 0, 100),
}
}
r := sub.replicate
if sent {
r.sent = append(r.sent, sequence)
} else {
r.ack = append(r.ack, sequence)
}
// This function is called with exactly one event at a time.
// Use exact count to decide when to add to given map. This
// avoid the need for booleans to not add more than once.
l := len(r.sent) + len(r.ack)
if l == 1 {
sr.waiting.Store(sub, struct{}{})
} else if l == 100 {
sr.waiting.Delete(sub)
sr.ready.Store(sub, struct{}{})
signalCh(sr.notifyCh)
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"collectSentOrAck",
"(",
"sub",
"*",
"subState",
",",
"sent",
"bool",
",",
"sequence",
"uint64",
")",
"{",
"sr",
":=",
"s",
".",
"ssarepl",
"\n",
"if",
"sub",
".",
"replicate",
"==",
"nil",
"{",
"sub",
".",
"replicate",
"=",
"&",
"subSentAndAck",
"{",
"sent",
":",
"make",
"(",
"[",
"]",
"uint64",
",",
"0",
",",
"100",
")",
",",
"ack",
":",
"make",
"(",
"[",
"]",
"uint64",
",",
"0",
",",
"100",
")",
",",
"}",
"\n",
"}",
"\n",
"r",
":=",
"sub",
".",
"replicate",
"\n",
"if",
"sent",
"{",
"r",
".",
"sent",
"=",
"append",
"(",
"r",
".",
"sent",
",",
"sequence",
")",
"\n",
"}",
"else",
"{",
"r",
".",
"ack",
"=",
"append",
"(",
"r",
".",
"ack",
",",
"sequence",
")",
"\n",
"}",
"\n",
"// This function is called with exactly one event at a time.",
"// Use exact count to decide when to add to given map. This",
"// avoid the need for booleans to not add more than once.",
"l",
":=",
"len",
"(",
"r",
".",
"sent",
")",
"+",
"len",
"(",
"r",
".",
"ack",
")",
"\n",
"if",
"l",
"==",
"1",
"{",
"sr",
".",
"waiting",
".",
"Store",
"(",
"sub",
",",
"struct",
"{",
"}",
"{",
"}",
")",
"\n",
"}",
"else",
"if",
"l",
"==",
"100",
"{",
"sr",
".",
"waiting",
".",
"Delete",
"(",
"sub",
")",
"\n",
"sr",
".",
"ready",
".",
"Store",
"(",
"sub",
",",
"struct",
"{",
"}",
"{",
"}",
")",
"\n",
"signalCh",
"(",
"sr",
".",
"notifyCh",
")",
"\n",
"}",
"\n",
"}"
] | // Keep track of sent or ack messages.
// If the number of operations reach a certain threshold,
// the sub is added to list of subs that should be flushed asap.
// This call does not do actual RAFT replication and should not block.
// Caller holds the sub's Lock. | [
"Keep",
"track",
"of",
"sent",
"or",
"ack",
"messages",
".",
"If",
"the",
"number",
"of",
"operations",
"reach",
"a",
"certain",
"threshold",
"the",
"sub",
"is",
"added",
"to",
"list",
"of",
"subs",
"that",
"should",
"be",
"flushed",
"asap",
".",
"This",
"call",
"does",
"not",
"do",
"actual",
"RAFT",
"replication",
"and",
"should",
"not",
"block",
".",
"Caller",
"holds",
"the",
"sub",
"s",
"Lock",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3466-L3491 | train |
nats-io/nats-streaming-server | server/server.go | replicateSubSentAndAck | func (s *StanServer) replicateSubSentAndAck(sub *subState) {
var data []byte
sr := s.ssarepl
sub.Lock()
r := sub.replicate
if r != nil && len(r.sent)+len(r.ack) > 0 {
data = createSubSentAndAckProto(sub, r)
r.sent = r.sent[:0]
r.ack = r.ack[:0]
r.applying = true
}
sub.Unlock()
if data != nil {
if testSubSentAndAckSlowApply {
time.Sleep(100 * time.Millisecond)
}
s.raft.Apply(data, 0)
sub.Lock()
r = sub.replicate
// If r is nil it means either that the leader lost leadrship,
// in which case we don't do anything, or the sub/conn is being
// closed and endSubSentAndAckReplication() is waiting on a
// channel stored in "gates" map. If we find it, signal.
if r == nil {
if c, ok := sr.gates.Load(sub); ok {
sr.gates.Delete(sub)
signalCh(c.(chan struct{}))
}
} else {
r.applying = false
}
sub.Unlock()
}
} | go | func (s *StanServer) replicateSubSentAndAck(sub *subState) {
var data []byte
sr := s.ssarepl
sub.Lock()
r := sub.replicate
if r != nil && len(r.sent)+len(r.ack) > 0 {
data = createSubSentAndAckProto(sub, r)
r.sent = r.sent[:0]
r.ack = r.ack[:0]
r.applying = true
}
sub.Unlock()
if data != nil {
if testSubSentAndAckSlowApply {
time.Sleep(100 * time.Millisecond)
}
s.raft.Apply(data, 0)
sub.Lock()
r = sub.replicate
// If r is nil it means either that the leader lost leadrship,
// in which case we don't do anything, or the sub/conn is being
// closed and endSubSentAndAckReplication() is waiting on a
// channel stored in "gates" map. If we find it, signal.
if r == nil {
if c, ok := sr.gates.Load(sub); ok {
sr.gates.Delete(sub)
signalCh(c.(chan struct{}))
}
} else {
r.applying = false
}
sub.Unlock()
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"replicateSubSentAndAck",
"(",
"sub",
"*",
"subState",
")",
"{",
"var",
"data",
"[",
"]",
"byte",
"\n\n",
"sr",
":=",
"s",
".",
"ssarepl",
"\n",
"sub",
".",
"Lock",
"(",
")",
"\n",
"r",
":=",
"sub",
".",
"replicate",
"\n",
"if",
"r",
"!=",
"nil",
"&&",
"len",
"(",
"r",
".",
"sent",
")",
"+",
"len",
"(",
"r",
".",
"ack",
")",
">",
"0",
"{",
"data",
"=",
"createSubSentAndAckProto",
"(",
"sub",
",",
"r",
")",
"\n",
"r",
".",
"sent",
"=",
"r",
".",
"sent",
"[",
":",
"0",
"]",
"\n",
"r",
".",
"ack",
"=",
"r",
".",
"ack",
"[",
":",
"0",
"]",
"\n",
"r",
".",
"applying",
"=",
"true",
"\n",
"}",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n\n",
"if",
"data",
"!=",
"nil",
"{",
"if",
"testSubSentAndAckSlowApply",
"{",
"time",
".",
"Sleep",
"(",
"100",
"*",
"time",
".",
"Millisecond",
")",
"\n",
"}",
"\n",
"s",
".",
"raft",
".",
"Apply",
"(",
"data",
",",
"0",
")",
"\n\n",
"sub",
".",
"Lock",
"(",
")",
"\n",
"r",
"=",
"sub",
".",
"replicate",
"\n",
"// If r is nil it means either that the leader lost leadrship,",
"// in which case we don't do anything, or the sub/conn is being",
"// closed and endSubSentAndAckReplication() is waiting on a",
"// channel stored in \"gates\" map. If we find it, signal.",
"if",
"r",
"==",
"nil",
"{",
"if",
"c",
",",
"ok",
":=",
"sr",
".",
"gates",
".",
"Load",
"(",
"sub",
")",
";",
"ok",
"{",
"sr",
".",
"gates",
".",
"Delete",
"(",
"sub",
")",
"\n",
"signalCh",
"(",
"c",
".",
"(",
"chan",
"struct",
"{",
"}",
")",
")",
"\n",
"}",
"\n",
"}",
"else",
"{",
"r",
".",
"applying",
"=",
"false",
"\n",
"}",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"}"
] | // Replicates through RAFT | [
"Replicates",
"through",
"RAFT"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3494-L3530 | train |
nats-io/nats-streaming-server | server/server.go | createSubSentAndAckProto | func createSubSentAndAckProto(sub *subState, r *subSentAndAck) []byte {
op := &spb.RaftOperation{
OpType: spb.RaftOperation_SendAndAck,
SubSentAck: &spb.SubSentAndAck{
Channel: sub.subject,
AckInbox: sub.AckInbox,
Sent: r.sent,
Ack: r.ack,
},
}
data, err := op.Marshal()
if err != nil {
panic(err)
}
return data
} | go | func createSubSentAndAckProto(sub *subState, r *subSentAndAck) []byte {
op := &spb.RaftOperation{
OpType: spb.RaftOperation_SendAndAck,
SubSentAck: &spb.SubSentAndAck{
Channel: sub.subject,
AckInbox: sub.AckInbox,
Sent: r.sent,
Ack: r.ack,
},
}
data, err := op.Marshal()
if err != nil {
panic(err)
}
return data
} | [
"func",
"createSubSentAndAckProto",
"(",
"sub",
"*",
"subState",
",",
"r",
"*",
"subSentAndAck",
")",
"[",
"]",
"byte",
"{",
"op",
":=",
"&",
"spb",
".",
"RaftOperation",
"{",
"OpType",
":",
"spb",
".",
"RaftOperation_SendAndAck",
",",
"SubSentAck",
":",
"&",
"spb",
".",
"SubSentAndAck",
"{",
"Channel",
":",
"sub",
".",
"subject",
",",
"AckInbox",
":",
"sub",
".",
"AckInbox",
",",
"Sent",
":",
"r",
".",
"sent",
",",
"Ack",
":",
"r",
".",
"ack",
",",
"}",
",",
"}",
"\n",
"data",
",",
"err",
":=",
"op",
".",
"Marshal",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"return",
"data",
"\n",
"}"
] | // Little helper function to create a RaftOperation_SendAndAck protocol
// and serialize it. | [
"Little",
"helper",
"function",
"to",
"create",
"a",
"RaftOperation_SendAndAck",
"protocol",
"and",
"serialize",
"it",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3534-L3549 | train |
nats-io/nats-streaming-server | server/server.go | clearSentAndAck | func (s *StanServer) clearSentAndAck(sub *subState) {
sr := s.ssarepl
sr.waiting.Delete(sub)
sr.ready.Delete(sub)
sub.replicate = nil
} | go | func (s *StanServer) clearSentAndAck(sub *subState) {
sr := s.ssarepl
sr.waiting.Delete(sub)
sr.ready.Delete(sub)
sub.replicate = nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"clearSentAndAck",
"(",
"sub",
"*",
"subState",
")",
"{",
"sr",
":=",
"s",
".",
"ssarepl",
"\n",
"sr",
".",
"waiting",
".",
"Delete",
"(",
"sub",
")",
"\n",
"sr",
".",
"ready",
".",
"Delete",
"(",
"sub",
")",
"\n",
"sub",
".",
"replicate",
"=",
"nil",
"\n",
"}"
] | // Sub lock is held on entry | [
"Sub",
"lock",
"is",
"held",
"on",
"entry"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3588-L3593 | train |
nats-io/nats-streaming-server | server/server.go | sendMsgToSub | func (s *StanServer) sendMsgToSub(sub *subState, m *pb.MsgProto, force bool) (bool, bool) {
if sub == nil || m == nil || !sub.initialized || (sub.newOnHold && !m.Redelivered) {
return false, false
}
// Don't send if we have too many outstanding already, unless forced to send.
ap := int32(len(sub.acksPending))
if !force && (ap >= sub.MaxInFlight) {
sub.stalled = true
return false, false
}
if s.trace {
var action string
if m.Redelivered {
action = "Redelivering"
} else {
action = "Delivering"
}
s.log.Tracef("[Client:%s] %s msg to subid=%d, subject=%s, seq=%d",
sub.ClientID, action, sub.ID, m.Subject, m.Sequence)
}
// Marshal of a pb.MsgProto cannot fail
b, _ := m.Marshal()
// but protect against a store implementation that may incorrectly
// return an empty message.
if len(b) == 0 {
panic("store implementation returned an empty message")
}
if err := s.ncs.Publish(sub.Inbox, b); err != nil {
s.log.Errorf("[Client:%s] Failed sending to subid=%d, subject=%s, seq=%d, err=%v",
sub.ClientID, sub.ID, m.Subject, m.Sequence, err)
return false, false
}
// Setup the ackTimer as needed now. I don't want to use defer in this
// function, and want to make sure that if we exit before the end, the
// timer is set. It will be adjusted/stopped as needed.
if sub.ackTimer == nil {
s.setupAckTimer(sub, sub.ackWait)
}
// If this message is already pending, do not add it again to the store.
if expTime, present := sub.acksPending[m.Sequence]; present {
// However, update the next expiration time.
if expTime == 0 {
// That can happen after a server restart, so need to use
// the current time.
expTime = time.Now().UnixNano()
}
// bump the next expiration time with the sub's ackWait.
expTime += int64(sub.ackWait)
sub.acksPending[m.Sequence] = expTime
return true, true
}
// If in cluster mode, schedule replication of the sent event.
if s.isClustered {
s.collectSentOrAck(sub, replicateSent, m.Sequence)
}
// Store in storage
if err := sub.store.AddSeqPending(sub.ID, m.Sequence); err != nil {
s.log.Errorf("[Client:%s] Unable to add pending message to subid=%d, subject=%s, seq=%d, err=%v",
sub.ClientID, sub.ID, sub.subject, m.Sequence, err)
return false, false
}
// Update LastSent if applicable
if m.Sequence > sub.LastSent {
sub.LastSent = m.Sequence
}
// Store in ackPending.
// Use current time to compute expiration time instead of m.Timestamp.
// A message can be persisted in the log and send much later to a
// new subscriber. Basing expiration time on m.Timestamp would
// likely set the expiration time in the past!
sub.acksPending[m.Sequence] = time.Now().UnixNano() + int64(sub.ackWait)
// Now that we have added to acksPending, check again if we
// have reached the max and tell the caller that it should not
// be sending more at this time.
if !force && (ap+1 == sub.MaxInFlight) {
sub.stalled = true
return true, false
}
return true, true
} | go | func (s *StanServer) sendMsgToSub(sub *subState, m *pb.MsgProto, force bool) (bool, bool) {
if sub == nil || m == nil || !sub.initialized || (sub.newOnHold && !m.Redelivered) {
return false, false
}
// Don't send if we have too many outstanding already, unless forced to send.
ap := int32(len(sub.acksPending))
if !force && (ap >= sub.MaxInFlight) {
sub.stalled = true
return false, false
}
if s.trace {
var action string
if m.Redelivered {
action = "Redelivering"
} else {
action = "Delivering"
}
s.log.Tracef("[Client:%s] %s msg to subid=%d, subject=%s, seq=%d",
sub.ClientID, action, sub.ID, m.Subject, m.Sequence)
}
// Marshal of a pb.MsgProto cannot fail
b, _ := m.Marshal()
// but protect against a store implementation that may incorrectly
// return an empty message.
if len(b) == 0 {
panic("store implementation returned an empty message")
}
if err := s.ncs.Publish(sub.Inbox, b); err != nil {
s.log.Errorf("[Client:%s] Failed sending to subid=%d, subject=%s, seq=%d, err=%v",
sub.ClientID, sub.ID, m.Subject, m.Sequence, err)
return false, false
}
// Setup the ackTimer as needed now. I don't want to use defer in this
// function, and want to make sure that if we exit before the end, the
// timer is set. It will be adjusted/stopped as needed.
if sub.ackTimer == nil {
s.setupAckTimer(sub, sub.ackWait)
}
// If this message is already pending, do not add it again to the store.
if expTime, present := sub.acksPending[m.Sequence]; present {
// However, update the next expiration time.
if expTime == 0 {
// That can happen after a server restart, so need to use
// the current time.
expTime = time.Now().UnixNano()
}
// bump the next expiration time with the sub's ackWait.
expTime += int64(sub.ackWait)
sub.acksPending[m.Sequence] = expTime
return true, true
}
// If in cluster mode, schedule replication of the sent event.
if s.isClustered {
s.collectSentOrAck(sub, replicateSent, m.Sequence)
}
// Store in storage
if err := sub.store.AddSeqPending(sub.ID, m.Sequence); err != nil {
s.log.Errorf("[Client:%s] Unable to add pending message to subid=%d, subject=%s, seq=%d, err=%v",
sub.ClientID, sub.ID, sub.subject, m.Sequence, err)
return false, false
}
// Update LastSent if applicable
if m.Sequence > sub.LastSent {
sub.LastSent = m.Sequence
}
// Store in ackPending.
// Use current time to compute expiration time instead of m.Timestamp.
// A message can be persisted in the log and send much later to a
// new subscriber. Basing expiration time on m.Timestamp would
// likely set the expiration time in the past!
sub.acksPending[m.Sequence] = time.Now().UnixNano() + int64(sub.ackWait)
// Now that we have added to acksPending, check again if we
// have reached the max and tell the caller that it should not
// be sending more at this time.
if !force && (ap+1 == sub.MaxInFlight) {
sub.stalled = true
return true, false
}
return true, true
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"sendMsgToSub",
"(",
"sub",
"*",
"subState",
",",
"m",
"*",
"pb",
".",
"MsgProto",
",",
"force",
"bool",
")",
"(",
"bool",
",",
"bool",
")",
"{",
"if",
"sub",
"==",
"nil",
"||",
"m",
"==",
"nil",
"||",
"!",
"sub",
".",
"initialized",
"||",
"(",
"sub",
".",
"newOnHold",
"&&",
"!",
"m",
".",
"Redelivered",
")",
"{",
"return",
"false",
",",
"false",
"\n",
"}",
"\n\n",
"// Don't send if we have too many outstanding already, unless forced to send.",
"ap",
":=",
"int32",
"(",
"len",
"(",
"sub",
".",
"acksPending",
")",
")",
"\n",
"if",
"!",
"force",
"&&",
"(",
"ap",
">=",
"sub",
".",
"MaxInFlight",
")",
"{",
"sub",
".",
"stalled",
"=",
"true",
"\n",
"return",
"false",
",",
"false",
"\n",
"}",
"\n\n",
"if",
"s",
".",
"trace",
"{",
"var",
"action",
"string",
"\n",
"if",
"m",
".",
"Redelivered",
"{",
"action",
"=",
"\"",
"\"",
"\n",
"}",
"else",
"{",
"action",
"=",
"\"",
"\"",
"\n",
"}",
"\n",
"s",
".",
"log",
".",
"Tracef",
"(",
"\"",
"\"",
",",
"sub",
".",
"ClientID",
",",
"action",
",",
"sub",
".",
"ID",
",",
"m",
".",
"Subject",
",",
"m",
".",
"Sequence",
")",
"\n",
"}",
"\n\n",
"// Marshal of a pb.MsgProto cannot fail",
"b",
",",
"_",
":=",
"m",
".",
"Marshal",
"(",
")",
"\n",
"// but protect against a store implementation that may incorrectly",
"// return an empty message.",
"if",
"len",
"(",
"b",
")",
"==",
"0",
"{",
"panic",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"s",
".",
"ncs",
".",
"Publish",
"(",
"sub",
".",
"Inbox",
",",
"b",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"sub",
".",
"ClientID",
",",
"sub",
".",
"ID",
",",
"m",
".",
"Subject",
",",
"m",
".",
"Sequence",
",",
"err",
")",
"\n",
"return",
"false",
",",
"false",
"\n",
"}",
"\n\n",
"// Setup the ackTimer as needed now. I don't want to use defer in this",
"// function, and want to make sure that if we exit before the end, the",
"// timer is set. It will be adjusted/stopped as needed.",
"if",
"sub",
".",
"ackTimer",
"==",
"nil",
"{",
"s",
".",
"setupAckTimer",
"(",
"sub",
",",
"sub",
".",
"ackWait",
")",
"\n",
"}",
"\n\n",
"// If this message is already pending, do not add it again to the store.",
"if",
"expTime",
",",
"present",
":=",
"sub",
".",
"acksPending",
"[",
"m",
".",
"Sequence",
"]",
";",
"present",
"{",
"// However, update the next expiration time.",
"if",
"expTime",
"==",
"0",
"{",
"// That can happen after a server restart, so need to use",
"// the current time.",
"expTime",
"=",
"time",
".",
"Now",
"(",
")",
".",
"UnixNano",
"(",
")",
"\n",
"}",
"\n",
"// bump the next expiration time with the sub's ackWait.",
"expTime",
"+=",
"int64",
"(",
"sub",
".",
"ackWait",
")",
"\n",
"sub",
".",
"acksPending",
"[",
"m",
".",
"Sequence",
"]",
"=",
"expTime",
"\n",
"return",
"true",
",",
"true",
"\n",
"}",
"\n\n",
"// If in cluster mode, schedule replication of the sent event.",
"if",
"s",
".",
"isClustered",
"{",
"s",
".",
"collectSentOrAck",
"(",
"sub",
",",
"replicateSent",
",",
"m",
".",
"Sequence",
")",
"\n",
"}",
"\n\n",
"// Store in storage",
"if",
"err",
":=",
"sub",
".",
"store",
".",
"AddSeqPending",
"(",
"sub",
".",
"ID",
",",
"m",
".",
"Sequence",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"sub",
".",
"ClientID",
",",
"sub",
".",
"ID",
",",
"sub",
".",
"subject",
",",
"m",
".",
"Sequence",
",",
"err",
")",
"\n",
"return",
"false",
",",
"false",
"\n",
"}",
"\n\n",
"// Update LastSent if applicable",
"if",
"m",
".",
"Sequence",
">",
"sub",
".",
"LastSent",
"{",
"sub",
".",
"LastSent",
"=",
"m",
".",
"Sequence",
"\n",
"}",
"\n\n",
"// Store in ackPending.",
"// Use current time to compute expiration time instead of m.Timestamp.",
"// A message can be persisted in the log and send much later to a",
"// new subscriber. Basing expiration time on m.Timestamp would",
"// likely set the expiration time in the past!",
"sub",
".",
"acksPending",
"[",
"m",
".",
"Sequence",
"]",
"=",
"time",
".",
"Now",
"(",
")",
".",
"UnixNano",
"(",
")",
"+",
"int64",
"(",
"sub",
".",
"ackWait",
")",
"\n\n",
"// Now that we have added to acksPending, check again if we",
"// have reached the max and tell the caller that it should not",
"// be sending more at this time.",
"if",
"!",
"force",
"&&",
"(",
"ap",
"+",
"1",
"==",
"sub",
".",
"MaxInFlight",
")",
"{",
"sub",
".",
"stalled",
"=",
"true",
"\n",
"return",
"true",
",",
"false",
"\n",
"}",
"\n\n",
"return",
"true",
",",
"true",
"\n",
"}"
] | // Sends the message to the subscriber
// Unless `force` is true, in which case message is always sent, if the number
// of acksPending is greater or equal to the sub's MaxInFlight limit, messages
// are not sent and subscriber is marked as stalled.
// Sub lock should be held before calling. | [
"Sends",
"the",
"message",
"to",
"the",
"subscriber",
"Unless",
"force",
"is",
"true",
"in",
"which",
"case",
"message",
"is",
"always",
"sent",
"if",
"the",
"number",
"of",
"acksPending",
"is",
"greater",
"or",
"equal",
"to",
"the",
"sub",
"s",
"MaxInFlight",
"limit",
"messages",
"are",
"not",
"sent",
"and",
"subscriber",
"is",
"marked",
"as",
"stalled",
".",
"Sub",
"lock",
"should",
"be",
"held",
"before",
"calling",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3688-L3778 | train |
nats-io/nats-streaming-server | server/server.go | setupAckTimer | func (s *StanServer) setupAckTimer(sub *subState, d time.Duration) {
sub.ackTimer = time.AfterFunc(d, func() {
s.performAckExpirationRedelivery(sub, false)
})
} | go | func (s *StanServer) setupAckTimer(sub *subState, d time.Duration) {
sub.ackTimer = time.AfterFunc(d, func() {
s.performAckExpirationRedelivery(sub, false)
})
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"setupAckTimer",
"(",
"sub",
"*",
"subState",
",",
"d",
"time",
".",
"Duration",
")",
"{",
"sub",
".",
"ackTimer",
"=",
"time",
".",
"AfterFunc",
"(",
"d",
",",
"func",
"(",
")",
"{",
"s",
".",
"performAckExpirationRedelivery",
"(",
"sub",
",",
"false",
")",
"\n",
"}",
")",
"\n",
"}"
] | // Sets up the ackTimer to fire at the given duration.
// sub's lock held on entry. | [
"Sets",
"up",
"the",
"ackTimer",
"to",
"fire",
"at",
"the",
"given",
"duration",
".",
"sub",
"s",
"lock",
"held",
"on",
"entry",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3782-L3786 | train |
nats-io/nats-streaming-server | server/server.go | sendDeleteChannelRequest | func (s *StanServer) sendDeleteChannelRequest(c *channel) {
iopm := &ioPendingMsg{c: c, dc: true}
s.ioChannel <- iopm
} | go | func (s *StanServer) sendDeleteChannelRequest(c *channel) {
iopm := &ioPendingMsg{c: c, dc: true}
s.ioChannel <- iopm
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"sendDeleteChannelRequest",
"(",
"c",
"*",
"channel",
")",
"{",
"iopm",
":=",
"&",
"ioPendingMsg",
"{",
"c",
":",
"c",
",",
"dc",
":",
"true",
"}",
"\n",
"s",
".",
"ioChannel",
"<-",
"iopm",
"\n",
"}"
] | // Sends a special ioPendingMsg to indicate that we should attempt
// to delete the given channel. | [
"Sends",
"a",
"special",
"ioPendingMsg",
"to",
"indicate",
"that",
"we",
"should",
"attempt",
"to",
"delete",
"the",
"given",
"channel",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4007-L4010 | train |
nats-io/nats-streaming-server | server/server.go | ackPublisher | func (s *StanServer) ackPublisher(iopm *ioPendingMsg) {
msgAck := &iopm.pa
msgAck.Guid = iopm.pm.Guid
needed := msgAck.Size()
s.tmpBuf = util.EnsureBufBigEnough(s.tmpBuf, needed)
n, _ := msgAck.MarshalTo(s.tmpBuf)
if s.trace {
pm := &iopm.pm
s.log.Tracef("[Client:%s] Acking Publisher subj=%s guid=%s", pm.ClientID, pm.Subject, pm.Guid)
}
s.ncs.Publish(iopm.m.Reply, s.tmpBuf[:n])
} | go | func (s *StanServer) ackPublisher(iopm *ioPendingMsg) {
msgAck := &iopm.pa
msgAck.Guid = iopm.pm.Guid
needed := msgAck.Size()
s.tmpBuf = util.EnsureBufBigEnough(s.tmpBuf, needed)
n, _ := msgAck.MarshalTo(s.tmpBuf)
if s.trace {
pm := &iopm.pm
s.log.Tracef("[Client:%s] Acking Publisher subj=%s guid=%s", pm.ClientID, pm.Subject, pm.Guid)
}
s.ncs.Publish(iopm.m.Reply, s.tmpBuf[:n])
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"ackPublisher",
"(",
"iopm",
"*",
"ioPendingMsg",
")",
"{",
"msgAck",
":=",
"&",
"iopm",
".",
"pa",
"\n",
"msgAck",
".",
"Guid",
"=",
"iopm",
".",
"pm",
".",
"Guid",
"\n",
"needed",
":=",
"msgAck",
".",
"Size",
"(",
")",
"\n",
"s",
".",
"tmpBuf",
"=",
"util",
".",
"EnsureBufBigEnough",
"(",
"s",
".",
"tmpBuf",
",",
"needed",
")",
"\n",
"n",
",",
"_",
":=",
"msgAck",
".",
"MarshalTo",
"(",
"s",
".",
"tmpBuf",
")",
"\n",
"if",
"s",
".",
"trace",
"{",
"pm",
":=",
"&",
"iopm",
".",
"pm",
"\n",
"s",
".",
"log",
".",
"Tracef",
"(",
"\"",
"\"",
",",
"pm",
".",
"ClientID",
",",
"pm",
".",
"Subject",
",",
"pm",
".",
"Guid",
")",
"\n",
"}",
"\n",
"s",
".",
"ncs",
".",
"Publish",
"(",
"iopm",
".",
"m",
".",
"Reply",
",",
"s",
".",
"tmpBuf",
"[",
":",
"n",
"]",
")",
"\n",
"}"
] | // ackPublisher sends the ack for a message. | [
"ackPublisher",
"sends",
"the",
"ack",
"for",
"a",
"message",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4052-L4063 | train |
nats-io/nats-streaming-server | server/server.go | deleteFromList | func (sub *subState) deleteFromList(sl []*subState) ([]*subState, bool) {
for i := 0; i < len(sl); i++ {
if sl[i] == sub {
sl[i] = sl[len(sl)-1]
sl[len(sl)-1] = nil
sl = sl[:len(sl)-1]
return shrinkSubListIfNeeded(sl), true
}
}
return sl, false
} | go | func (sub *subState) deleteFromList(sl []*subState) ([]*subState, bool) {
for i := 0; i < len(sl); i++ {
if sl[i] == sub {
sl[i] = sl[len(sl)-1]
sl[len(sl)-1] = nil
sl = sl[:len(sl)-1]
return shrinkSubListIfNeeded(sl), true
}
}
return sl, false
} | [
"func",
"(",
"sub",
"*",
"subState",
")",
"deleteFromList",
"(",
"sl",
"[",
"]",
"*",
"subState",
")",
"(",
"[",
"]",
"*",
"subState",
",",
"bool",
")",
"{",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"len",
"(",
"sl",
")",
";",
"i",
"++",
"{",
"if",
"sl",
"[",
"i",
"]",
"==",
"sub",
"{",
"sl",
"[",
"i",
"]",
"=",
"sl",
"[",
"len",
"(",
"sl",
")",
"-",
"1",
"]",
"\n",
"sl",
"[",
"len",
"(",
"sl",
")",
"-",
"1",
"]",
"=",
"nil",
"\n",
"sl",
"=",
"sl",
"[",
":",
"len",
"(",
"sl",
")",
"-",
"1",
"]",
"\n",
"return",
"shrinkSubListIfNeeded",
"(",
"sl",
")",
",",
"true",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"sl",
",",
"false",
"\n",
"}"
] | // Delete a sub from a given list. | [
"Delete",
"a",
"sub",
"from",
"a",
"given",
"list",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4066-L4076 | train |
nats-io/nats-streaming-server | server/server.go | shrinkSubListIfNeeded | func shrinkSubListIfNeeded(sl []*subState) []*subState {
lsl := len(sl)
csl := cap(sl)
// Don't bother if list not too big
if csl <= 8 {
return sl
}
pFree := float32(csl-lsl) / float32(csl)
if pFree > 0.50 {
return append([]*subState(nil), sl...)
}
return sl
} | go | func shrinkSubListIfNeeded(sl []*subState) []*subState {
lsl := len(sl)
csl := cap(sl)
// Don't bother if list not too big
if csl <= 8 {
return sl
}
pFree := float32(csl-lsl) / float32(csl)
if pFree > 0.50 {
return append([]*subState(nil), sl...)
}
return sl
} | [
"func",
"shrinkSubListIfNeeded",
"(",
"sl",
"[",
"]",
"*",
"subState",
")",
"[",
"]",
"*",
"subState",
"{",
"lsl",
":=",
"len",
"(",
"sl",
")",
"\n",
"csl",
":=",
"cap",
"(",
"sl",
")",
"\n",
"// Don't bother if list not too big",
"if",
"csl",
"<=",
"8",
"{",
"return",
"sl",
"\n",
"}",
"\n",
"pFree",
":=",
"float32",
"(",
"csl",
"-",
"lsl",
")",
"/",
"float32",
"(",
"csl",
")",
"\n",
"if",
"pFree",
">",
"0.50",
"{",
"return",
"append",
"(",
"[",
"]",
"*",
"subState",
"(",
"nil",
")",
",",
"sl",
"...",
")",
"\n",
"}",
"\n",
"return",
"sl",
"\n",
"}"
] | // Checks if we need to do a resize. This is for very large growth then
// subsequent return to a more normal size. | [
"Checks",
"if",
"we",
"need",
"to",
"do",
"a",
"resize",
".",
"This",
"is",
"for",
"very",
"large",
"growth",
"then",
"subsequent",
"return",
"to",
"a",
"more",
"normal",
"size",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4080-L4092 | train |
nats-io/nats-streaming-server | server/server.go | removeAllNonDurableSubscribers | func (s *StanServer) removeAllNonDurableSubscribers(client *client) {
// client has been unregistered and no other routine can add/remove
// subscriptions, so it is safe to use the original.
client.RLock()
subs := client.subs
clientID := client.info.ID
client.RUnlock()
var (
storesToFlush = map[string]stores.SubStore{}
channels = map[string]struct{}{}
)
for _, sub := range subs {
sub.RLock()
subject := sub.subject
isDurable := sub.IsDurable
subStore := sub.store
sub.RUnlock()
// Get the channel
c := s.channels.get(subject)
if c == nil {
continue
}
// Don't remove durables
c.ss.Remove(c, sub, false)
// If the sub is a durable, there may have been an update to storage,
// so we will want to flush the store. In clustering, during replay,
// subStore may be nil.
if isDurable && subStore != nil {
storesToFlush[subject] = subStore
}
channels[subject] = struct{}{}
}
if len(storesToFlush) > 0 {
for subject, subStore := range storesToFlush {
if err := subStore.Flush(); err != nil {
s.log.Errorf("[Client:%s] Error flushing store while removing subscriptions: subject=%s, err=%v", clientID, subject, err)
}
}
}
for channel := range channels {
s.channels.maybeStartChannelDeleteTimer(channel, nil)
}
} | go | func (s *StanServer) removeAllNonDurableSubscribers(client *client) {
// client has been unregistered and no other routine can add/remove
// subscriptions, so it is safe to use the original.
client.RLock()
subs := client.subs
clientID := client.info.ID
client.RUnlock()
var (
storesToFlush = map[string]stores.SubStore{}
channels = map[string]struct{}{}
)
for _, sub := range subs {
sub.RLock()
subject := sub.subject
isDurable := sub.IsDurable
subStore := sub.store
sub.RUnlock()
// Get the channel
c := s.channels.get(subject)
if c == nil {
continue
}
// Don't remove durables
c.ss.Remove(c, sub, false)
// If the sub is a durable, there may have been an update to storage,
// so we will want to flush the store. In clustering, during replay,
// subStore may be nil.
if isDurable && subStore != nil {
storesToFlush[subject] = subStore
}
channels[subject] = struct{}{}
}
if len(storesToFlush) > 0 {
for subject, subStore := range storesToFlush {
if err := subStore.Flush(); err != nil {
s.log.Errorf("[Client:%s] Error flushing store while removing subscriptions: subject=%s, err=%v", clientID, subject, err)
}
}
}
for channel := range channels {
s.channels.maybeStartChannelDeleteTimer(channel, nil)
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"removeAllNonDurableSubscribers",
"(",
"client",
"*",
"client",
")",
"{",
"// client has been unregistered and no other routine can add/remove",
"// subscriptions, so it is safe to use the original.",
"client",
".",
"RLock",
"(",
")",
"\n",
"subs",
":=",
"client",
".",
"subs",
"\n",
"clientID",
":=",
"client",
".",
"info",
".",
"ID",
"\n",
"client",
".",
"RUnlock",
"(",
")",
"\n",
"var",
"(",
"storesToFlush",
"=",
"map",
"[",
"string",
"]",
"stores",
".",
"SubStore",
"{",
"}",
"\n",
"channels",
"=",
"map",
"[",
"string",
"]",
"struct",
"{",
"}",
"{",
"}",
"\n",
")",
"\n",
"for",
"_",
",",
"sub",
":=",
"range",
"subs",
"{",
"sub",
".",
"RLock",
"(",
")",
"\n",
"subject",
":=",
"sub",
".",
"subject",
"\n",
"isDurable",
":=",
"sub",
".",
"IsDurable",
"\n",
"subStore",
":=",
"sub",
".",
"store",
"\n",
"sub",
".",
"RUnlock",
"(",
")",
"\n",
"// Get the channel",
"c",
":=",
"s",
".",
"channels",
".",
"get",
"(",
"subject",
")",
"\n",
"if",
"c",
"==",
"nil",
"{",
"continue",
"\n",
"}",
"\n",
"// Don't remove durables",
"c",
".",
"ss",
".",
"Remove",
"(",
"c",
",",
"sub",
",",
"false",
")",
"\n",
"// If the sub is a durable, there may have been an update to storage,",
"// so we will want to flush the store. In clustering, during replay,",
"// subStore may be nil.",
"if",
"isDurable",
"&&",
"subStore",
"!=",
"nil",
"{",
"storesToFlush",
"[",
"subject",
"]",
"=",
"subStore",
"\n",
"}",
"\n",
"channels",
"[",
"subject",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n",
"if",
"len",
"(",
"storesToFlush",
")",
">",
"0",
"{",
"for",
"subject",
",",
"subStore",
":=",
"range",
"storesToFlush",
"{",
"if",
"err",
":=",
"subStore",
".",
"Flush",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"clientID",
",",
"subject",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"for",
"channel",
":=",
"range",
"channels",
"{",
"s",
".",
"channels",
".",
"maybeStartChannelDeleteTimer",
"(",
"channel",
",",
"nil",
")",
"\n",
"}",
"\n",
"}"
] | // removeAllNonDurableSubscribers will remove all non-durable subscribers for the client. | [
"removeAllNonDurableSubscribers",
"will",
"remove",
"all",
"non",
"-",
"durable",
"subscribers",
"for",
"the",
"client",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4095-L4137 | train |
nats-io/nats-streaming-server | server/server.go | processUnsubscribeRequest | func (s *StanServer) processUnsubscribeRequest(m *nats.Msg) {
req := &pb.UnsubscribeRequest{}
err := req.Unmarshal(m.Data)
if err != nil {
s.log.Errorf("Invalid unsub request from %s", m.Subject)
s.sendSubscriptionResponseErr(m.Reply, ErrInvalidUnsubReq)
return
}
s.performmUnsubOrCloseSubscription(m, req, false)
} | go | func (s *StanServer) processUnsubscribeRequest(m *nats.Msg) {
req := &pb.UnsubscribeRequest{}
err := req.Unmarshal(m.Data)
if err != nil {
s.log.Errorf("Invalid unsub request from %s", m.Subject)
s.sendSubscriptionResponseErr(m.Reply, ErrInvalidUnsubReq)
return
}
s.performmUnsubOrCloseSubscription(m, req, false)
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processUnsubscribeRequest",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"req",
":=",
"&",
"pb",
".",
"UnsubscribeRequest",
"{",
"}",
"\n",
"err",
":=",
"req",
".",
"Unmarshal",
"(",
"m",
".",
"Data",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"m",
".",
"Subject",
")",
"\n",
"s",
".",
"sendSubscriptionResponseErr",
"(",
"m",
".",
"Reply",
",",
"ErrInvalidUnsubReq",
")",
"\n",
"return",
"\n",
"}",
"\n",
"s",
".",
"performmUnsubOrCloseSubscription",
"(",
"m",
",",
"req",
",",
"false",
")",
"\n",
"}"
] | // processUnsubscribeRequest will process a unsubscribe request. | [
"processUnsubscribeRequest",
"will",
"process",
"a",
"unsubscribe",
"request",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4140-L4149 | train |
nats-io/nats-streaming-server | server/server.go | performmUnsubOrCloseSubscription | func (s *StanServer) performmUnsubOrCloseSubscription(m *nats.Msg, req *pb.UnsubscribeRequest, isSubClose bool) {
// With partitioning, first verify that this server is handling this
// channel. If not, do not return an error, since another server will
// handle it. If no other server is, the client will get a timeout.
if s.partitions != nil {
if r := s.partitions.sl.Match(req.Subject); len(r) == 0 {
return
}
}
s.barrier(func() {
var err error
if s.isClustered {
if isSubClose {
err = s.replicateCloseSubscription(req)
} else {
err = s.replicateRemoveSubscription(req)
}
} else {
s.closeMu.Lock()
err = s.unsubscribe(req, isSubClose)
s.closeMu.Unlock()
}
// If there was an error, it has been already logged.
if err == nil {
// This will check if the channel has MaxInactivity defined,
// if so and there is no active subscription, it will start the
// delete timer.
s.channels.maybeStartChannelDeleteTimer(req.Subject, nil)
}
// If err is nil, it will be a non-error response
s.sendSubscriptionResponseErr(m.Reply, err)
})
} | go | func (s *StanServer) performmUnsubOrCloseSubscription(m *nats.Msg, req *pb.UnsubscribeRequest, isSubClose bool) {
// With partitioning, first verify that this server is handling this
// channel. If not, do not return an error, since another server will
// handle it. If no other server is, the client will get a timeout.
if s.partitions != nil {
if r := s.partitions.sl.Match(req.Subject); len(r) == 0 {
return
}
}
s.barrier(func() {
var err error
if s.isClustered {
if isSubClose {
err = s.replicateCloseSubscription(req)
} else {
err = s.replicateRemoveSubscription(req)
}
} else {
s.closeMu.Lock()
err = s.unsubscribe(req, isSubClose)
s.closeMu.Unlock()
}
// If there was an error, it has been already logged.
if err == nil {
// This will check if the channel has MaxInactivity defined,
// if so and there is no active subscription, it will start the
// delete timer.
s.channels.maybeStartChannelDeleteTimer(req.Subject, nil)
}
// If err is nil, it will be a non-error response
s.sendSubscriptionResponseErr(m.Reply, err)
})
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"performmUnsubOrCloseSubscription",
"(",
"m",
"*",
"nats",
".",
"Msg",
",",
"req",
"*",
"pb",
".",
"UnsubscribeRequest",
",",
"isSubClose",
"bool",
")",
"{",
"// With partitioning, first verify that this server is handling this",
"// channel. If not, do not return an error, since another server will",
"// handle it. If no other server is, the client will get a timeout.",
"if",
"s",
".",
"partitions",
"!=",
"nil",
"{",
"if",
"r",
":=",
"s",
".",
"partitions",
".",
"sl",
".",
"Match",
"(",
"req",
".",
"Subject",
")",
";",
"len",
"(",
"r",
")",
"==",
"0",
"{",
"return",
"\n",
"}",
"\n",
"}",
"\n\n",
"s",
".",
"barrier",
"(",
"func",
"(",
")",
"{",
"var",
"err",
"error",
"\n",
"if",
"s",
".",
"isClustered",
"{",
"if",
"isSubClose",
"{",
"err",
"=",
"s",
".",
"replicateCloseSubscription",
"(",
"req",
")",
"\n",
"}",
"else",
"{",
"err",
"=",
"s",
".",
"replicateRemoveSubscription",
"(",
"req",
")",
"\n",
"}",
"\n",
"}",
"else",
"{",
"s",
".",
"closeMu",
".",
"Lock",
"(",
")",
"\n",
"err",
"=",
"s",
".",
"unsubscribe",
"(",
"req",
",",
"isSubClose",
")",
"\n",
"s",
".",
"closeMu",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"// If there was an error, it has been already logged.",
"if",
"err",
"==",
"nil",
"{",
"// This will check if the channel has MaxInactivity defined,",
"// if so and there is no active subscription, it will start the",
"// delete timer.",
"s",
".",
"channels",
".",
"maybeStartChannelDeleteTimer",
"(",
"req",
".",
"Subject",
",",
"nil",
")",
"\n",
"}",
"\n\n",
"// If err is nil, it will be a non-error response",
"s",
".",
"sendSubscriptionResponseErr",
"(",
"m",
".",
"Reply",
",",
"err",
")",
"\n",
"}",
")",
"\n",
"}"
] | // performmUnsubOrCloseSubscription processes the unsub or close subscription
// request. | [
"performmUnsubOrCloseSubscription",
"processes",
"the",
"unsub",
"or",
"close",
"subscription",
"request",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4184-L4219 | train |
nats-io/nats-streaming-server | server/server.go | clearAckTimer | func (sub *subState) clearAckTimer() {
if sub.ackTimer != nil {
sub.ackTimer.Stop()
sub.ackTimer = nil
}
} | go | func (sub *subState) clearAckTimer() {
if sub.ackTimer != nil {
sub.ackTimer.Stop()
sub.ackTimer = nil
}
} | [
"func",
"(",
"sub",
"*",
"subState",
")",
"clearAckTimer",
"(",
")",
"{",
"if",
"sub",
".",
"ackTimer",
"!=",
"nil",
"{",
"sub",
".",
"ackTimer",
".",
"Stop",
"(",
")",
"\n",
"sub",
".",
"ackTimer",
"=",
"nil",
"\n",
"}",
"\n",
"}"
] | // Clear the ackTimer.
// sub Lock held in entry. | [
"Clear",
"the",
"ackTimer",
".",
"sub",
"Lock",
"held",
"in",
"entry",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4305-L4310 | train |
nats-io/nats-streaming-server | server/server.go | adjustAckTimer | func (sub *subState) adjustAckTimer(nextExpirationTime int64) {
sub.Lock()
defer sub.Unlock()
// Possible that the subscriber has been destroyed, and timer cleared
if sub.ackTimer == nil {
return
}
// Check if there are still pending acks
if len(sub.acksPending) > 0 {
// Capture time
now := time.Now().UnixNano()
// If the next expiration time is 0 or less than now,
// use the default ackWait
if nextExpirationTime <= now {
sub.ackTimer.Reset(sub.ackWait)
} else {
// Compute the time the ackTimer should fire, based
// on the given next expiration time and now.
fireIn := (nextExpirationTime - now)
sub.ackTimer.Reset(time.Duration(fireIn))
}
} else {
// No more pending acks, clear the timer.
sub.clearAckTimer()
}
} | go | func (sub *subState) adjustAckTimer(nextExpirationTime int64) {
sub.Lock()
defer sub.Unlock()
// Possible that the subscriber has been destroyed, and timer cleared
if sub.ackTimer == nil {
return
}
// Check if there are still pending acks
if len(sub.acksPending) > 0 {
// Capture time
now := time.Now().UnixNano()
// If the next expiration time is 0 or less than now,
// use the default ackWait
if nextExpirationTime <= now {
sub.ackTimer.Reset(sub.ackWait)
} else {
// Compute the time the ackTimer should fire, based
// on the given next expiration time and now.
fireIn := (nextExpirationTime - now)
sub.ackTimer.Reset(time.Duration(fireIn))
}
} else {
// No more pending acks, clear the timer.
sub.clearAckTimer()
}
} | [
"func",
"(",
"sub",
"*",
"subState",
")",
"adjustAckTimer",
"(",
"nextExpirationTime",
"int64",
")",
"{",
"sub",
".",
"Lock",
"(",
")",
"\n",
"defer",
"sub",
".",
"Unlock",
"(",
")",
"\n\n",
"// Possible that the subscriber has been destroyed, and timer cleared",
"if",
"sub",
".",
"ackTimer",
"==",
"nil",
"{",
"return",
"\n",
"}",
"\n\n",
"// Check if there are still pending acks",
"if",
"len",
"(",
"sub",
".",
"acksPending",
")",
">",
"0",
"{",
"// Capture time",
"now",
":=",
"time",
".",
"Now",
"(",
")",
".",
"UnixNano",
"(",
")",
"\n\n",
"// If the next expiration time is 0 or less than now,",
"// use the default ackWait",
"if",
"nextExpirationTime",
"<=",
"now",
"{",
"sub",
".",
"ackTimer",
".",
"Reset",
"(",
"sub",
".",
"ackWait",
")",
"\n",
"}",
"else",
"{",
"// Compute the time the ackTimer should fire, based",
"// on the given next expiration time and now.",
"fireIn",
":=",
"(",
"nextExpirationTime",
"-",
"now",
")",
"\n",
"sub",
".",
"ackTimer",
".",
"Reset",
"(",
"time",
".",
"Duration",
"(",
"fireIn",
")",
")",
"\n",
"}",
"\n",
"}",
"else",
"{",
"// No more pending acks, clear the timer.",
"sub",
".",
"clearAckTimer",
"(",
")",
"\n",
"}",
"\n",
"}"
] | // adjustAckTimer adjusts the timer based on a given next
// expiration time.
// The timer will be stopped if there is no more pending ack.
// If there are pending acks, the timer will be reset to the
// default sub.ackWait value if the given expiration time is
// 0 or in the past. Otherwise, it is set to the remaining time
// between the given expiration time and now. | [
"adjustAckTimer",
"adjusts",
"the",
"timer",
"based",
"on",
"a",
"given",
"next",
"expiration",
"time",
".",
"The",
"timer",
"will",
"be",
"stopped",
"if",
"there",
"is",
"no",
"more",
"pending",
"ack",
".",
"If",
"there",
"are",
"pending",
"acks",
"the",
"timer",
"will",
"be",
"reset",
"to",
"the",
"default",
"sub",
".",
"ackWait",
"value",
"if",
"the",
"given",
"expiration",
"time",
"is",
"0",
"or",
"in",
"the",
"past",
".",
"Otherwise",
"it",
"is",
"set",
"to",
"the",
"remaining",
"time",
"between",
"the",
"given",
"expiration",
"time",
"and",
"now",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4319-L4347 | train |
nats-io/nats-streaming-server | server/server.go | startAckSub | func (sub *subState) startAckSub(nc *nats.Conn, cb nats.MsgHandler) error {
ackSub, err := nc.Subscribe(sub.AckInbox, cb)
if err != nil {
return err
}
sub.Lock()
// Should not occur, but if it was already set,
// unsubscribe old and replace.
sub.stopAckSub()
sub.ackSub = ackSub
sub.ackSub.SetPendingLimits(-1, -1)
sub.Unlock()
return nil
} | go | func (sub *subState) startAckSub(nc *nats.Conn, cb nats.MsgHandler) error {
ackSub, err := nc.Subscribe(sub.AckInbox, cb)
if err != nil {
return err
}
sub.Lock()
// Should not occur, but if it was already set,
// unsubscribe old and replace.
sub.stopAckSub()
sub.ackSub = ackSub
sub.ackSub.SetPendingLimits(-1, -1)
sub.Unlock()
return nil
} | [
"func",
"(",
"sub",
"*",
"subState",
")",
"startAckSub",
"(",
"nc",
"*",
"nats",
".",
"Conn",
",",
"cb",
"nats",
".",
"MsgHandler",
")",
"error",
"{",
"ackSub",
",",
"err",
":=",
"nc",
".",
"Subscribe",
"(",
"sub",
".",
"AckInbox",
",",
"cb",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"sub",
".",
"Lock",
"(",
")",
"\n",
"// Should not occur, but if it was already set,",
"// unsubscribe old and replace.",
"sub",
".",
"stopAckSub",
"(",
")",
"\n",
"sub",
".",
"ackSub",
"=",
"ackSub",
"\n",
"sub",
".",
"ackSub",
".",
"SetPendingLimits",
"(",
"-",
"1",
",",
"-",
"1",
")",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // Subscribes to the AckInbox subject in order to process subscription's acks
// if not already done.
// This function grabs and releases the sub's lock. | [
"Subscribes",
"to",
"the",
"AckInbox",
"subject",
"in",
"order",
"to",
"process",
"subscription",
"s",
"acks",
"if",
"not",
"already",
"done",
".",
"This",
"function",
"grabs",
"and",
"releases",
"the",
"sub",
"s",
"lock",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4352-L4365 | train |
nats-io/nats-streaming-server | server/server.go | stopAckSub | func (sub *subState) stopAckSub() {
if sub.ackSub != nil {
sub.ackSub.Unsubscribe()
sub.ackSub = nil
}
} | go | func (sub *subState) stopAckSub() {
if sub.ackSub != nil {
sub.ackSub.Unsubscribe()
sub.ackSub = nil
}
} | [
"func",
"(",
"sub",
"*",
"subState",
")",
"stopAckSub",
"(",
")",
"{",
"if",
"sub",
".",
"ackSub",
"!=",
"nil",
"{",
"sub",
".",
"ackSub",
".",
"Unsubscribe",
"(",
")",
"\n",
"sub",
".",
"ackSub",
"=",
"nil",
"\n",
"}",
"\n",
"}"
] | // Stops subscribing to AckInbox.
// Lock assumed held on entry. | [
"Stops",
"subscribing",
"to",
"AckInbox",
".",
"Lock",
"assumed",
"held",
"on",
"entry",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4369-L4374 | train |
nats-io/nats-streaming-server | server/server.go | isShadowQueueDurable | func (sub *subState) isShadowQueueDurable() bool {
return sub.IsDurable && sub.QGroup != "" && sub.ClientID == ""
} | go | func (sub *subState) isShadowQueueDurable() bool {
return sub.IsDurable && sub.QGroup != "" && sub.ClientID == ""
} | [
"func",
"(",
"sub",
"*",
"subState",
")",
"isShadowQueueDurable",
"(",
")",
"bool",
"{",
"return",
"sub",
".",
"IsDurable",
"&&",
"sub",
".",
"QGroup",
"!=",
"\"",
"\"",
"&&",
"sub",
".",
"ClientID",
"==",
"\"",
"\"",
"\n",
"}"
] | // Returns true if this is a "shadow" durable queue subscriber | [
"Returns",
"true",
"if",
"this",
"is",
"a",
"shadow",
"durable",
"queue",
"subscriber"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4395-L4397 | train |
nats-io/nats-streaming-server | server/server.go | replicateSub | func (s *StanServer) replicateSub(sr *pb.SubscriptionRequest, ackInbox string, subID uint64) (*subState, error) {
op := &spb.RaftOperation{
OpType: spb.RaftOperation_Subscribe,
Sub: &spb.AddSubscription{
Request: sr,
AckInbox: ackInbox,
ID: subID,
},
}
data, err := op.Marshal()
if err != nil {
panic(err)
}
// Replicate operation and wait on result.
future := s.raft.Apply(data, 0)
if err := future.Error(); err != nil {
return nil, err
}
rs := future.Response().(*replicatedSub)
return rs.sub, rs.err
} | go | func (s *StanServer) replicateSub(sr *pb.SubscriptionRequest, ackInbox string, subID uint64) (*subState, error) {
op := &spb.RaftOperation{
OpType: spb.RaftOperation_Subscribe,
Sub: &spb.AddSubscription{
Request: sr,
AckInbox: ackInbox,
ID: subID,
},
}
data, err := op.Marshal()
if err != nil {
panic(err)
}
// Replicate operation and wait on result.
future := s.raft.Apply(data, 0)
if err := future.Error(); err != nil {
return nil, err
}
rs := future.Response().(*replicatedSub)
return rs.sub, rs.err
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"replicateSub",
"(",
"sr",
"*",
"pb",
".",
"SubscriptionRequest",
",",
"ackInbox",
"string",
",",
"subID",
"uint64",
")",
"(",
"*",
"subState",
",",
"error",
")",
"{",
"op",
":=",
"&",
"spb",
".",
"RaftOperation",
"{",
"OpType",
":",
"spb",
".",
"RaftOperation_Subscribe",
",",
"Sub",
":",
"&",
"spb",
".",
"AddSubscription",
"{",
"Request",
":",
"sr",
",",
"AckInbox",
":",
"ackInbox",
",",
"ID",
":",
"subID",
",",
"}",
",",
"}",
"\n",
"data",
",",
"err",
":=",
"op",
".",
"Marshal",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"// Replicate operation and wait on result.",
"future",
":=",
"s",
".",
"raft",
".",
"Apply",
"(",
"data",
",",
"0",
")",
"\n",
"if",
"err",
":=",
"future",
".",
"Error",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"rs",
":=",
"future",
".",
"Response",
"(",
")",
".",
"(",
"*",
"replicatedSub",
")",
"\n",
"return",
"rs",
".",
"sub",
",",
"rs",
".",
"err",
"\n",
"}"
] | // replicateSub replicates the SubscriptionRequest to nodes in the cluster via
// Raft. | [
"replicateSub",
"replicates",
"the",
"SubscriptionRequest",
"to",
"nodes",
"in",
"the",
"cluster",
"via",
"Raft",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4419-L4439 | train |
nats-io/nats-streaming-server | server/server.go | addSubscription | func (s *StanServer) addSubscription(ss *subStore, sub *subState) error {
// Store in client
if !s.clients.addSub(sub.ClientID, sub) {
return fmt.Errorf("can't find clientID: %v", sub.ClientID)
}
// Store this subscription in subStore
if err := ss.Store(sub); err != nil {
s.clients.removeSub(sub.ClientID, sub)
return err
}
return nil
} | go | func (s *StanServer) addSubscription(ss *subStore, sub *subState) error {
// Store in client
if !s.clients.addSub(sub.ClientID, sub) {
return fmt.Errorf("can't find clientID: %v", sub.ClientID)
}
// Store this subscription in subStore
if err := ss.Store(sub); err != nil {
s.clients.removeSub(sub.ClientID, sub)
return err
}
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"addSubscription",
"(",
"ss",
"*",
"subStore",
",",
"sub",
"*",
"subState",
")",
"error",
"{",
"// Store in client",
"if",
"!",
"s",
".",
"clients",
".",
"addSub",
"(",
"sub",
".",
"ClientID",
",",
"sub",
")",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"sub",
".",
"ClientID",
")",
"\n",
"}",
"\n",
"// Store this subscription in subStore",
"if",
"err",
":=",
"ss",
".",
"Store",
"(",
"sub",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"clients",
".",
"removeSub",
"(",
"sub",
".",
"ClientID",
",",
"sub",
")",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // addSubscription adds `sub` to the client and store. | [
"addSubscription",
"adds",
"sub",
"to",
"the",
"client",
"and",
"store",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4442-L4453 | train |
nats-io/nats-streaming-server | server/server.go | updateDurable | func (s *StanServer) updateDurable(ss *subStore, sub *subState) error {
// Reset the hasFailedHB boolean since it may have been set
// if the client previously crashed and server set this
// flag to its subs.
sub.hasFailedHB = false
// Store in the client
if !s.clients.addSub(sub.ClientID, sub) {
return fmt.Errorf("can't find clientID: %v", sub.ClientID)
}
// Update this subscription in the store
if err := sub.store.UpdateSub(&sub.SubState); err != nil {
return err
}
ss.Lock()
// Do this only for durable subscribers (not durable queue subscribers).
if sub.isDurableSubscriber() {
// Add back into plain subscribers
ss.psubs = append(ss.psubs, sub)
}
// And in ackInbox lookup map.
ss.acks[sub.AckInbox] = sub
ss.Unlock()
return nil
} | go | func (s *StanServer) updateDurable(ss *subStore, sub *subState) error {
// Reset the hasFailedHB boolean since it may have been set
// if the client previously crashed and server set this
// flag to its subs.
sub.hasFailedHB = false
// Store in the client
if !s.clients.addSub(sub.ClientID, sub) {
return fmt.Errorf("can't find clientID: %v", sub.ClientID)
}
// Update this subscription in the store
if err := sub.store.UpdateSub(&sub.SubState); err != nil {
return err
}
ss.Lock()
// Do this only for durable subscribers (not durable queue subscribers).
if sub.isDurableSubscriber() {
// Add back into plain subscribers
ss.psubs = append(ss.psubs, sub)
}
// And in ackInbox lookup map.
ss.acks[sub.AckInbox] = sub
ss.Unlock()
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"updateDurable",
"(",
"ss",
"*",
"subStore",
",",
"sub",
"*",
"subState",
")",
"error",
"{",
"// Reset the hasFailedHB boolean since it may have been set",
"// if the client previously crashed and server set this",
"// flag to its subs.",
"sub",
".",
"hasFailedHB",
"=",
"false",
"\n",
"// Store in the client",
"if",
"!",
"s",
".",
"clients",
".",
"addSub",
"(",
"sub",
".",
"ClientID",
",",
"sub",
")",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"sub",
".",
"ClientID",
")",
"\n",
"}",
"\n",
"// Update this subscription in the store",
"if",
"err",
":=",
"sub",
".",
"store",
".",
"UpdateSub",
"(",
"&",
"sub",
".",
"SubState",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"ss",
".",
"Lock",
"(",
")",
"\n",
"// Do this only for durable subscribers (not durable queue subscribers).",
"if",
"sub",
".",
"isDurableSubscriber",
"(",
")",
"{",
"// Add back into plain subscribers",
"ss",
".",
"psubs",
"=",
"append",
"(",
"ss",
".",
"psubs",
",",
"sub",
")",
"\n",
"}",
"\n",
"// And in ackInbox lookup map.",
"ss",
".",
"acks",
"[",
"sub",
".",
"AckInbox",
"]",
"=",
"sub",
"\n",
"ss",
".",
"Unlock",
"(",
")",
"\n\n",
"return",
"nil",
"\n",
"}"
] | // updateDurable adds back `sub` to the client and updates the store.
// No lock is needed for `sub` since it has just been created. | [
"updateDurable",
"adds",
"back",
"sub",
"to",
"the",
"client",
"and",
"updates",
"the",
"store",
".",
"No",
"lock",
"is",
"needed",
"for",
"sub",
"since",
"it",
"has",
"just",
"been",
"created",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4457-L4481 | train |
nats-io/nats-streaming-server | server/server.go | processAckMsg | func (s *StanServer) processAckMsg(m *nats.Msg) {
ack := &pb.Ack{}
if ack.Unmarshal(m.Data) != nil {
if s.processCtrlMsg(m) {
return
}
}
c := s.channels.get(ack.Subject)
if c == nil {
s.log.Errorf("Unable to process ack seq=%d, channel %s not found", ack.Sequence, ack.Subject)
return
}
sub := c.ss.LookupByAckInbox(m.Subject)
if sub == nil {
return
}
s.processAck(c, sub, ack.Sequence, true)
} | go | func (s *StanServer) processAckMsg(m *nats.Msg) {
ack := &pb.Ack{}
if ack.Unmarshal(m.Data) != nil {
if s.processCtrlMsg(m) {
return
}
}
c := s.channels.get(ack.Subject)
if c == nil {
s.log.Errorf("Unable to process ack seq=%d, channel %s not found", ack.Sequence, ack.Subject)
return
}
sub := c.ss.LookupByAckInbox(m.Subject)
if sub == nil {
return
}
s.processAck(c, sub, ack.Sequence, true)
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processAckMsg",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"ack",
":=",
"&",
"pb",
".",
"Ack",
"{",
"}",
"\n",
"if",
"ack",
".",
"Unmarshal",
"(",
"m",
".",
"Data",
")",
"!=",
"nil",
"{",
"if",
"s",
".",
"processCtrlMsg",
"(",
"m",
")",
"{",
"return",
"\n",
"}",
"\n",
"}",
"\n",
"c",
":=",
"s",
".",
"channels",
".",
"get",
"(",
"ack",
".",
"Subject",
")",
"\n",
"if",
"c",
"==",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"ack",
".",
"Sequence",
",",
"ack",
".",
"Subject",
")",
"\n",
"return",
"\n",
"}",
"\n",
"sub",
":=",
"c",
".",
"ss",
".",
"LookupByAckInbox",
"(",
"m",
".",
"Subject",
")",
"\n",
"if",
"sub",
"==",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"s",
".",
"processAck",
"(",
"c",
",",
"sub",
",",
"ack",
".",
"Sequence",
",",
"true",
")",
"\n",
"}"
] | // processAckMsg processes inbound acks from clients for delivered messages. | [
"processAckMsg",
"processes",
"inbound",
"acks",
"from",
"clients",
"for",
"delivered",
"messages",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4879-L4896 | train |
nats-io/nats-streaming-server | server/server.go | processAck | func (s *StanServer) processAck(c *channel, sub *subState, sequence uint64, fromUser bool) {
var stalled bool
// This is immutable, so can grab outside of sub's lock.
// If we have a queue group, we want to grab queue's lock before
// sub's lock.
qs := sub.qstate
if qs != nil {
qs.Lock()
}
sub.Lock()
persistAck := func(aSub *subState) bool {
if err := aSub.store.AckSeqPending(aSub.ID, sequence); err != nil {
s.log.Errorf("[Client:%s] Unable to persist ack for subid=%d, subject=%s, seq=%d, err=%v",
aSub.ClientID, aSub.ID, aSub.subject, sequence, err)
return false
}
return true
}
if _, found := sub.acksPending[sequence]; found {
// If in cluster mode, schedule replication of the ack.
if s.isClustered {
s.collectSentOrAck(sub, replicateAck, sequence)
}
if s.trace && fromUser {
s.log.Tracef("[Client:%s] Processing ack for subid=%d, subject=%s, seq=%d",
sub.ClientID, sub.ID, sub.subject, sequence)
}
if !persistAck(sub) {
sub.Unlock()
if qs != nil {
qs.Unlock()
}
return
}
delete(sub.acksPending, sequence)
} else if qs != nil && fromUser {
// For queue members, if this is not an internally generated ACK
// and we don't find the sequence in this sub's pending, we are
// going to look for it in other members and process it if found.
sub.Unlock()
for _, qsub := range qs.subs {
if qsub == sub {
continue
}
qsub.Lock()
if _, found := qsub.acksPending[sequence]; found {
delete(qsub.acksPending, sequence)
persistAck(qsub)
qsub.Unlock()
break
}
qsub.Unlock()
}
sub.Lock()
// Proceed with original sub (regardless if member was found
// or not) so that server sends more messages if needed.
}
if sub.stalled && int32(len(sub.acksPending)) < sub.MaxInFlight {
// For queue, we must not check the queue stalled count here. The queue
// as a whole may not be stalled, yet, if this sub was stalled, it is
// not now since the pending acks is below MaxInflight. The server should
// try to send available messages.
// It works also if the queue *was* stalled (all members were stalled),
// then this member is no longer stalled, which release the queue.
// Trigger send of available messages by setting this to true.
stalled = true
// Clear the stalled flag from this sub
sub.stalled = false
// .. and update the queue's stalled members count if this is a queue sub.
if qs != nil && qs.stalledSubCount > 0 {
qs.stalledSubCount--
}
}
sub.Unlock()
if qs != nil {
qs.Unlock()
}
// Leave the reset/cancel of the ackTimer to the redelivery cb.
if !stalled {
return
}
if sub.qstate != nil {
s.sendAvailableMessagesToQueue(c, sub.qstate)
} else {
s.sendAvailableMessages(c, sub)
}
} | go | func (s *StanServer) processAck(c *channel, sub *subState, sequence uint64, fromUser bool) {
var stalled bool
// This is immutable, so can grab outside of sub's lock.
// If we have a queue group, we want to grab queue's lock before
// sub's lock.
qs := sub.qstate
if qs != nil {
qs.Lock()
}
sub.Lock()
persistAck := func(aSub *subState) bool {
if err := aSub.store.AckSeqPending(aSub.ID, sequence); err != nil {
s.log.Errorf("[Client:%s] Unable to persist ack for subid=%d, subject=%s, seq=%d, err=%v",
aSub.ClientID, aSub.ID, aSub.subject, sequence, err)
return false
}
return true
}
if _, found := sub.acksPending[sequence]; found {
// If in cluster mode, schedule replication of the ack.
if s.isClustered {
s.collectSentOrAck(sub, replicateAck, sequence)
}
if s.trace && fromUser {
s.log.Tracef("[Client:%s] Processing ack for subid=%d, subject=%s, seq=%d",
sub.ClientID, sub.ID, sub.subject, sequence)
}
if !persistAck(sub) {
sub.Unlock()
if qs != nil {
qs.Unlock()
}
return
}
delete(sub.acksPending, sequence)
} else if qs != nil && fromUser {
// For queue members, if this is not an internally generated ACK
// and we don't find the sequence in this sub's pending, we are
// going to look for it in other members and process it if found.
sub.Unlock()
for _, qsub := range qs.subs {
if qsub == sub {
continue
}
qsub.Lock()
if _, found := qsub.acksPending[sequence]; found {
delete(qsub.acksPending, sequence)
persistAck(qsub)
qsub.Unlock()
break
}
qsub.Unlock()
}
sub.Lock()
// Proceed with original sub (regardless if member was found
// or not) so that server sends more messages if needed.
}
if sub.stalled && int32(len(sub.acksPending)) < sub.MaxInFlight {
// For queue, we must not check the queue stalled count here. The queue
// as a whole may not be stalled, yet, if this sub was stalled, it is
// not now since the pending acks is below MaxInflight. The server should
// try to send available messages.
// It works also if the queue *was* stalled (all members were stalled),
// then this member is no longer stalled, which release the queue.
// Trigger send of available messages by setting this to true.
stalled = true
// Clear the stalled flag from this sub
sub.stalled = false
// .. and update the queue's stalled members count if this is a queue sub.
if qs != nil && qs.stalledSubCount > 0 {
qs.stalledSubCount--
}
}
sub.Unlock()
if qs != nil {
qs.Unlock()
}
// Leave the reset/cancel of the ackTimer to the redelivery cb.
if !stalled {
return
}
if sub.qstate != nil {
s.sendAvailableMessagesToQueue(c, sub.qstate)
} else {
s.sendAvailableMessages(c, sub)
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processAck",
"(",
"c",
"*",
"channel",
",",
"sub",
"*",
"subState",
",",
"sequence",
"uint64",
",",
"fromUser",
"bool",
")",
"{",
"var",
"stalled",
"bool",
"\n\n",
"// This is immutable, so can grab outside of sub's lock.",
"// If we have a queue group, we want to grab queue's lock before",
"// sub's lock.",
"qs",
":=",
"sub",
".",
"qstate",
"\n",
"if",
"qs",
"!=",
"nil",
"{",
"qs",
".",
"Lock",
"(",
")",
"\n",
"}",
"\n\n",
"sub",
".",
"Lock",
"(",
")",
"\n\n",
"persistAck",
":=",
"func",
"(",
"aSub",
"*",
"subState",
")",
"bool",
"{",
"if",
"err",
":=",
"aSub",
".",
"store",
".",
"AckSeqPending",
"(",
"aSub",
".",
"ID",
",",
"sequence",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"aSub",
".",
"ClientID",
",",
"aSub",
".",
"ID",
",",
"aSub",
".",
"subject",
",",
"sequence",
",",
"err",
")",
"\n",
"return",
"false",
"\n",
"}",
"\n",
"return",
"true",
"\n",
"}",
"\n\n",
"if",
"_",
",",
"found",
":=",
"sub",
".",
"acksPending",
"[",
"sequence",
"]",
";",
"found",
"{",
"// If in cluster mode, schedule replication of the ack.",
"if",
"s",
".",
"isClustered",
"{",
"s",
".",
"collectSentOrAck",
"(",
"sub",
",",
"replicateAck",
",",
"sequence",
")",
"\n",
"}",
"\n",
"if",
"s",
".",
"trace",
"&&",
"fromUser",
"{",
"s",
".",
"log",
".",
"Tracef",
"(",
"\"",
"\"",
",",
"sub",
".",
"ClientID",
",",
"sub",
".",
"ID",
",",
"sub",
".",
"subject",
",",
"sequence",
")",
"\n",
"}",
"\n",
"if",
"!",
"persistAck",
"(",
"sub",
")",
"{",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"if",
"qs",
"!=",
"nil",
"{",
"qs",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"return",
"\n",
"}",
"\n",
"delete",
"(",
"sub",
".",
"acksPending",
",",
"sequence",
")",
"\n",
"}",
"else",
"if",
"qs",
"!=",
"nil",
"&&",
"fromUser",
"{",
"// For queue members, if this is not an internally generated ACK",
"// and we don't find the sequence in this sub's pending, we are",
"// going to look for it in other members and process it if found.",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"for",
"_",
",",
"qsub",
":=",
"range",
"qs",
".",
"subs",
"{",
"if",
"qsub",
"==",
"sub",
"{",
"continue",
"\n",
"}",
"\n",
"qsub",
".",
"Lock",
"(",
")",
"\n",
"if",
"_",
",",
"found",
":=",
"qsub",
".",
"acksPending",
"[",
"sequence",
"]",
";",
"found",
"{",
"delete",
"(",
"qsub",
".",
"acksPending",
",",
"sequence",
")",
"\n",
"persistAck",
"(",
"qsub",
")",
"\n",
"qsub",
".",
"Unlock",
"(",
")",
"\n",
"break",
"\n",
"}",
"\n",
"qsub",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"sub",
".",
"Lock",
"(",
")",
"\n",
"// Proceed with original sub (regardless if member was found",
"// or not) so that server sends more messages if needed.",
"}",
"\n",
"if",
"sub",
".",
"stalled",
"&&",
"int32",
"(",
"len",
"(",
"sub",
".",
"acksPending",
")",
")",
"<",
"sub",
".",
"MaxInFlight",
"{",
"// For queue, we must not check the queue stalled count here. The queue",
"// as a whole may not be stalled, yet, if this sub was stalled, it is",
"// not now since the pending acks is below MaxInflight. The server should",
"// try to send available messages.",
"// It works also if the queue *was* stalled (all members were stalled),",
"// then this member is no longer stalled, which release the queue.",
"// Trigger send of available messages by setting this to true.",
"stalled",
"=",
"true",
"\n\n",
"// Clear the stalled flag from this sub",
"sub",
".",
"stalled",
"=",
"false",
"\n",
"// .. and update the queue's stalled members count if this is a queue sub.",
"if",
"qs",
"!=",
"nil",
"&&",
"qs",
".",
"stalledSubCount",
">",
"0",
"{",
"qs",
".",
"stalledSubCount",
"--",
"\n",
"}",
"\n",
"}",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"if",
"qs",
"!=",
"nil",
"{",
"qs",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n\n",
"// Leave the reset/cancel of the ackTimer to the redelivery cb.",
"if",
"!",
"stalled",
"{",
"return",
"\n",
"}",
"\n\n",
"if",
"sub",
".",
"qstate",
"!=",
"nil",
"{",
"s",
".",
"sendAvailableMessagesToQueue",
"(",
"c",
",",
"sub",
".",
"qstate",
")",
"\n",
"}",
"else",
"{",
"s",
".",
"sendAvailableMessages",
"(",
"c",
",",
"sub",
")",
"\n",
"}",
"\n",
"}"
] | // processAck processes an ack and if needed sends more messages. | [
"processAck",
"processes",
"an",
"ack",
"and",
"if",
"needed",
"sends",
"more",
"messages",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4899-L4994 | train |
nats-io/nats-streaming-server | server/server.go | sendAvailableMessagesToQueue | func (s *StanServer) sendAvailableMessagesToQueue(c *channel, qs *queueState) {
if c == nil || qs == nil {
return
}
qs.Lock()
// Short circuit if no active members
if len(qs.subs) == 0 {
qs.Unlock()
return
}
// If redelivery at startup in progress, don't attempt to deliver new messages
if qs.newOnHold {
qs.Unlock()
return
}
for nextSeq := qs.lastSent + 1; qs.stalledSubCount < len(qs.subs); nextSeq++ {
nextMsg := s.getNextMsg(c, &nextSeq, &qs.lastSent)
if nextMsg == nil {
break
}
if _, sent, sendMore := s.sendMsgToQueueGroup(qs, nextMsg, honorMaxInFlight); !sent || !sendMore {
break
}
}
qs.Unlock()
} | go | func (s *StanServer) sendAvailableMessagesToQueue(c *channel, qs *queueState) {
if c == nil || qs == nil {
return
}
qs.Lock()
// Short circuit if no active members
if len(qs.subs) == 0 {
qs.Unlock()
return
}
// If redelivery at startup in progress, don't attempt to deliver new messages
if qs.newOnHold {
qs.Unlock()
return
}
for nextSeq := qs.lastSent + 1; qs.stalledSubCount < len(qs.subs); nextSeq++ {
nextMsg := s.getNextMsg(c, &nextSeq, &qs.lastSent)
if nextMsg == nil {
break
}
if _, sent, sendMore := s.sendMsgToQueueGroup(qs, nextMsg, honorMaxInFlight); !sent || !sendMore {
break
}
}
qs.Unlock()
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"sendAvailableMessagesToQueue",
"(",
"c",
"*",
"channel",
",",
"qs",
"*",
"queueState",
")",
"{",
"if",
"c",
"==",
"nil",
"||",
"qs",
"==",
"nil",
"{",
"return",
"\n",
"}",
"\n\n",
"qs",
".",
"Lock",
"(",
")",
"\n",
"// Short circuit if no active members",
"if",
"len",
"(",
"qs",
".",
"subs",
")",
"==",
"0",
"{",
"qs",
".",
"Unlock",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n",
"// If redelivery at startup in progress, don't attempt to deliver new messages",
"if",
"qs",
".",
"newOnHold",
"{",
"qs",
".",
"Unlock",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n",
"for",
"nextSeq",
":=",
"qs",
".",
"lastSent",
"+",
"1",
";",
"qs",
".",
"stalledSubCount",
"<",
"len",
"(",
"qs",
".",
"subs",
")",
";",
"nextSeq",
"++",
"{",
"nextMsg",
":=",
"s",
".",
"getNextMsg",
"(",
"c",
",",
"&",
"nextSeq",
",",
"&",
"qs",
".",
"lastSent",
")",
"\n",
"if",
"nextMsg",
"==",
"nil",
"{",
"break",
"\n",
"}",
"\n",
"if",
"_",
",",
"sent",
",",
"sendMore",
":=",
"s",
".",
"sendMsgToQueueGroup",
"(",
"qs",
",",
"nextMsg",
",",
"honorMaxInFlight",
")",
";",
"!",
"sent",
"||",
"!",
"sendMore",
"{",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"qs",
".",
"Unlock",
"(",
")",
"\n",
"}"
] | // Send any messages that are ready to be sent that have been queued to the group. | [
"Send",
"any",
"messages",
"that",
"are",
"ready",
"to",
"be",
"sent",
"that",
"have",
"been",
"queued",
"to",
"the",
"group",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4997-L5023 | train |
nats-io/nats-streaming-server | server/server.go | sendAvailableMessages | func (s *StanServer) sendAvailableMessages(c *channel, sub *subState) {
sub.Lock()
for nextSeq := sub.LastSent + 1; !sub.stalled; nextSeq++ {
nextMsg := s.getNextMsg(c, &nextSeq, &sub.LastSent)
if nextMsg == nil {
break
}
if sent, sendMore := s.sendMsgToSub(sub, nextMsg, honorMaxInFlight); !sent || !sendMore {
break
}
}
sub.Unlock()
} | go | func (s *StanServer) sendAvailableMessages(c *channel, sub *subState) {
sub.Lock()
for nextSeq := sub.LastSent + 1; !sub.stalled; nextSeq++ {
nextMsg := s.getNextMsg(c, &nextSeq, &sub.LastSent)
if nextMsg == nil {
break
}
if sent, sendMore := s.sendMsgToSub(sub, nextMsg, honorMaxInFlight); !sent || !sendMore {
break
}
}
sub.Unlock()
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"sendAvailableMessages",
"(",
"c",
"*",
"channel",
",",
"sub",
"*",
"subState",
")",
"{",
"sub",
".",
"Lock",
"(",
")",
"\n",
"for",
"nextSeq",
":=",
"sub",
".",
"LastSent",
"+",
"1",
";",
"!",
"sub",
".",
"stalled",
";",
"nextSeq",
"++",
"{",
"nextMsg",
":=",
"s",
".",
"getNextMsg",
"(",
"c",
",",
"&",
"nextSeq",
",",
"&",
"sub",
".",
"LastSent",
")",
"\n",
"if",
"nextMsg",
"==",
"nil",
"{",
"break",
"\n",
"}",
"\n",
"if",
"sent",
",",
"sendMore",
":=",
"s",
".",
"sendMsgToSub",
"(",
"sub",
",",
"nextMsg",
",",
"honorMaxInFlight",
")",
";",
"!",
"sent",
"||",
"!",
"sendMore",
"{",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"}"
] | // Send any messages that are ready to be sent that have been queued. | [
"Send",
"any",
"messages",
"that",
"are",
"ready",
"to",
"be",
"sent",
"that",
"have",
"been",
"queued",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5026-L5038 | train |
nats-io/nats-streaming-server | server/server.go | setSubStartSequence | func (s *StanServer) setSubStartSequence(c *channel, sr *pb.SubscriptionRequest) (string, uint64, error) {
lastSent := uint64(0)
debugTrace := ""
// In all start position cases, if there is no message, ensure
// lastSent stays at 0.
switch sr.StartPosition {
case pb.StartPosition_NewOnly:
var err error
lastSent, err = c.store.Msgs.LastSequence()
if err != nil {
return "", 0, err
}
if s.debug {
debugTrace = fmt.Sprintf("new-only, seq=%d", lastSent+1)
}
case pb.StartPosition_LastReceived:
lastSeq, err := c.store.Msgs.LastSequence()
if err != nil {
return "", 0, err
}
if lastSeq > 0 {
lastSent = lastSeq - 1
}
if s.debug {
debugTrace = fmt.Sprintf("last message, seq=%d", lastSent+1)
}
case pb.StartPosition_TimeDeltaStart:
startTime := time.Now().UnixNano() - sr.StartTimeDelta
// If there is no message, seq will be 0.
seq, err := c.store.Msgs.GetSequenceFromTimestamp(startTime)
if err != nil {
return "", 0, err
}
if seq > 0 {
// If the time delta is in the future relative to the last
// message in the log, 'seq' will be equal to last sequence + 1,
// so this would translate to "new only" semantic.
lastSent = seq - 1
}
if s.debug {
debugTrace = fmt.Sprintf("from time time='%v' seq=%d", time.Unix(0, startTime), lastSent+1)
}
case pb.StartPosition_SequenceStart:
// If there is no message, firstSeq and lastSeq will be equal to 0.
firstSeq, lastSeq, err := c.store.Msgs.FirstAndLastSequence()
if err != nil {
return "", 0, err
}
// StartSequence is an uint64, so can't be lower than 0.
if sr.StartSequence < firstSeq {
// That translates to sending the first message available.
lastSent = firstSeq - 1
} else if sr.StartSequence > lastSeq {
// That translates to "new only"
lastSent = lastSeq
} else if sr.StartSequence > 0 {
// That translates to sending the message with StartSequence
// sequence number.
lastSent = sr.StartSequence - 1
}
if s.debug {
debugTrace = fmt.Sprintf("from sequence, asked_seq=%d actual_seq=%d", sr.StartSequence, lastSent+1)
}
case pb.StartPosition_First:
firstSeq, err := c.store.Msgs.FirstSequence()
if err != nil {
return "", 0, err
}
if firstSeq > 0 {
lastSent = firstSeq - 1
}
if s.debug {
debugTrace = fmt.Sprintf("from beginning, seq=%d", lastSent+1)
}
}
return debugTrace, lastSent, nil
} | go | func (s *StanServer) setSubStartSequence(c *channel, sr *pb.SubscriptionRequest) (string, uint64, error) {
lastSent := uint64(0)
debugTrace := ""
// In all start position cases, if there is no message, ensure
// lastSent stays at 0.
switch sr.StartPosition {
case pb.StartPosition_NewOnly:
var err error
lastSent, err = c.store.Msgs.LastSequence()
if err != nil {
return "", 0, err
}
if s.debug {
debugTrace = fmt.Sprintf("new-only, seq=%d", lastSent+1)
}
case pb.StartPosition_LastReceived:
lastSeq, err := c.store.Msgs.LastSequence()
if err != nil {
return "", 0, err
}
if lastSeq > 0 {
lastSent = lastSeq - 1
}
if s.debug {
debugTrace = fmt.Sprintf("last message, seq=%d", lastSent+1)
}
case pb.StartPosition_TimeDeltaStart:
startTime := time.Now().UnixNano() - sr.StartTimeDelta
// If there is no message, seq will be 0.
seq, err := c.store.Msgs.GetSequenceFromTimestamp(startTime)
if err != nil {
return "", 0, err
}
if seq > 0 {
// If the time delta is in the future relative to the last
// message in the log, 'seq' will be equal to last sequence + 1,
// so this would translate to "new only" semantic.
lastSent = seq - 1
}
if s.debug {
debugTrace = fmt.Sprintf("from time time='%v' seq=%d", time.Unix(0, startTime), lastSent+1)
}
case pb.StartPosition_SequenceStart:
// If there is no message, firstSeq and lastSeq will be equal to 0.
firstSeq, lastSeq, err := c.store.Msgs.FirstAndLastSequence()
if err != nil {
return "", 0, err
}
// StartSequence is an uint64, so can't be lower than 0.
if sr.StartSequence < firstSeq {
// That translates to sending the first message available.
lastSent = firstSeq - 1
} else if sr.StartSequence > lastSeq {
// That translates to "new only"
lastSent = lastSeq
} else if sr.StartSequence > 0 {
// That translates to sending the message with StartSequence
// sequence number.
lastSent = sr.StartSequence - 1
}
if s.debug {
debugTrace = fmt.Sprintf("from sequence, asked_seq=%d actual_seq=%d", sr.StartSequence, lastSent+1)
}
case pb.StartPosition_First:
firstSeq, err := c.store.Msgs.FirstSequence()
if err != nil {
return "", 0, err
}
if firstSeq > 0 {
lastSent = firstSeq - 1
}
if s.debug {
debugTrace = fmt.Sprintf("from beginning, seq=%d", lastSent+1)
}
}
return debugTrace, lastSent, nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"setSubStartSequence",
"(",
"c",
"*",
"channel",
",",
"sr",
"*",
"pb",
".",
"SubscriptionRequest",
")",
"(",
"string",
",",
"uint64",
",",
"error",
")",
"{",
"lastSent",
":=",
"uint64",
"(",
"0",
")",
"\n",
"debugTrace",
":=",
"\"",
"\"",
"\n\n",
"// In all start position cases, if there is no message, ensure",
"// lastSent stays at 0.",
"switch",
"sr",
".",
"StartPosition",
"{",
"case",
"pb",
".",
"StartPosition_NewOnly",
":",
"var",
"err",
"error",
"\n",
"lastSent",
",",
"err",
"=",
"c",
".",
"store",
".",
"Msgs",
".",
"LastSequence",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"\"",
"\"",
",",
"0",
",",
"err",
"\n",
"}",
"\n",
"if",
"s",
".",
"debug",
"{",
"debugTrace",
"=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"lastSent",
"+",
"1",
")",
"\n",
"}",
"\n",
"case",
"pb",
".",
"StartPosition_LastReceived",
":",
"lastSeq",
",",
"err",
":=",
"c",
".",
"store",
".",
"Msgs",
".",
"LastSequence",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"\"",
"\"",
",",
"0",
",",
"err",
"\n",
"}",
"\n",
"if",
"lastSeq",
">",
"0",
"{",
"lastSent",
"=",
"lastSeq",
"-",
"1",
"\n",
"}",
"\n",
"if",
"s",
".",
"debug",
"{",
"debugTrace",
"=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"lastSent",
"+",
"1",
")",
"\n",
"}",
"\n",
"case",
"pb",
".",
"StartPosition_TimeDeltaStart",
":",
"startTime",
":=",
"time",
".",
"Now",
"(",
")",
".",
"UnixNano",
"(",
")",
"-",
"sr",
".",
"StartTimeDelta",
"\n",
"// If there is no message, seq will be 0.",
"seq",
",",
"err",
":=",
"c",
".",
"store",
".",
"Msgs",
".",
"GetSequenceFromTimestamp",
"(",
"startTime",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"\"",
"\"",
",",
"0",
",",
"err",
"\n",
"}",
"\n",
"if",
"seq",
">",
"0",
"{",
"// If the time delta is in the future relative to the last",
"// message in the log, 'seq' will be equal to last sequence + 1,",
"// so this would translate to \"new only\" semantic.",
"lastSent",
"=",
"seq",
"-",
"1",
"\n",
"}",
"\n",
"if",
"s",
".",
"debug",
"{",
"debugTrace",
"=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"time",
".",
"Unix",
"(",
"0",
",",
"startTime",
")",
",",
"lastSent",
"+",
"1",
")",
"\n",
"}",
"\n",
"case",
"pb",
".",
"StartPosition_SequenceStart",
":",
"// If there is no message, firstSeq and lastSeq will be equal to 0.",
"firstSeq",
",",
"lastSeq",
",",
"err",
":=",
"c",
".",
"store",
".",
"Msgs",
".",
"FirstAndLastSequence",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"\"",
"\"",
",",
"0",
",",
"err",
"\n",
"}",
"\n",
"// StartSequence is an uint64, so can't be lower than 0.",
"if",
"sr",
".",
"StartSequence",
"<",
"firstSeq",
"{",
"// That translates to sending the first message available.",
"lastSent",
"=",
"firstSeq",
"-",
"1",
"\n",
"}",
"else",
"if",
"sr",
".",
"StartSequence",
">",
"lastSeq",
"{",
"// That translates to \"new only\"",
"lastSent",
"=",
"lastSeq",
"\n",
"}",
"else",
"if",
"sr",
".",
"StartSequence",
">",
"0",
"{",
"// That translates to sending the message with StartSequence",
"// sequence number.",
"lastSent",
"=",
"sr",
".",
"StartSequence",
"-",
"1",
"\n",
"}",
"\n",
"if",
"s",
".",
"debug",
"{",
"debugTrace",
"=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"sr",
".",
"StartSequence",
",",
"lastSent",
"+",
"1",
")",
"\n",
"}",
"\n",
"case",
"pb",
".",
"StartPosition_First",
":",
"firstSeq",
",",
"err",
":=",
"c",
".",
"store",
".",
"Msgs",
".",
"FirstSequence",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"\"",
"\"",
",",
"0",
",",
"err",
"\n",
"}",
"\n",
"if",
"firstSeq",
">",
"0",
"{",
"lastSent",
"=",
"firstSeq",
"-",
"1",
"\n",
"}",
"\n",
"if",
"s",
".",
"debug",
"{",
"debugTrace",
"=",
"fmt",
".",
"Sprintf",
"(",
"\"",
"\"",
",",
"lastSent",
"+",
"1",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"debugTrace",
",",
"lastSent",
",",
"nil",
"\n",
"}"
] | // Setup the start position for the subscriber. | [
"Setup",
"the",
"start",
"position",
"for",
"the",
"subscriber",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5070-L5148 | train |
nats-io/nats-streaming-server | server/server.go | startGoRoutine | func (s *StanServer) startGoRoutine(f func()) {
s.mu.Lock()
if !s.shutdown {
s.wg.Add(1)
go f()
}
s.mu.Unlock()
} | go | func (s *StanServer) startGoRoutine(f func()) {
s.mu.Lock()
if !s.shutdown {
s.wg.Add(1)
go f()
}
s.mu.Unlock()
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"startGoRoutine",
"(",
"f",
"func",
"(",
")",
")",
"{",
"s",
".",
"mu",
".",
"Lock",
"(",
")",
"\n",
"if",
"!",
"s",
".",
"shutdown",
"{",
"s",
".",
"wg",
".",
"Add",
"(",
"1",
")",
"\n",
"go",
"f",
"(",
")",
"\n",
"}",
"\n",
"s",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"}"
] | // startGoRoutine starts the given function as a go routine if and only if
// the server was not shutdown at that time. This is required because
// we cannot increment the wait group after the shutdown process has started. | [
"startGoRoutine",
"starts",
"the",
"given",
"function",
"as",
"a",
"go",
"routine",
"if",
"and",
"only",
"if",
"the",
"server",
"was",
"not",
"shutdown",
"at",
"that",
"time",
".",
"This",
"is",
"required",
"because",
"we",
"cannot",
"increment",
"the",
"wait",
"group",
"after",
"the",
"shutdown",
"process",
"has",
"started",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5153-L5160 | train |
nats-io/nats-streaming-server | server/server.go | ClusterID | func (s *StanServer) ClusterID() string {
s.mu.RLock()
defer s.mu.RUnlock()
return s.info.ClusterID
} | go | func (s *StanServer) ClusterID() string {
s.mu.RLock()
defer s.mu.RUnlock()
return s.info.ClusterID
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"ClusterID",
"(",
")",
"string",
"{",
"s",
".",
"mu",
".",
"RLock",
"(",
")",
"\n",
"defer",
"s",
".",
"mu",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"s",
".",
"info",
".",
"ClusterID",
"\n",
"}"
] | // ClusterID returns the NATS Streaming Server's ID. | [
"ClusterID",
"returns",
"the",
"NATS",
"Streaming",
"Server",
"s",
"ID",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5163-L5167 | train |
nats-io/nats-streaming-server | server/server.go | State | func (s *StanServer) State() State {
s.mu.RLock()
defer s.mu.RUnlock()
return s.state
} | go | func (s *StanServer) State() State {
s.mu.RLock()
defer s.mu.RUnlock()
return s.state
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"State",
"(",
")",
"State",
"{",
"s",
".",
"mu",
".",
"RLock",
"(",
")",
"\n",
"defer",
"s",
".",
"mu",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"s",
".",
"state",
"\n",
"}"
] | // State returns the state of this server. | [
"State",
"returns",
"the",
"state",
"of",
"this",
"server",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5170-L5174 | train |
nats-io/nats-streaming-server | server/server.go | setLastError | func (s *StanServer) setLastError(err error) {
s.mu.Lock()
s.lastError = err
s.state = Failed
s.mu.Unlock()
s.log.Fatalf("%v", err)
} | go | func (s *StanServer) setLastError(err error) {
s.mu.Lock()
s.lastError = err
s.state = Failed
s.mu.Unlock()
s.log.Fatalf("%v", err)
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"setLastError",
"(",
"err",
"error",
")",
"{",
"s",
".",
"mu",
".",
"Lock",
"(",
")",
"\n",
"s",
".",
"lastError",
"=",
"err",
"\n",
"s",
".",
"state",
"=",
"Failed",
"\n",
"s",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"s",
".",
"log",
".",
"Fatalf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}"
] | // setLastError sets the last fatal error that occurred. This is
// used in case of an async error that cannot directly be reported
// to the user. | [
"setLastError",
"sets",
"the",
"last",
"fatal",
"error",
"that",
"occurred",
".",
"This",
"is",
"used",
"in",
"case",
"of",
"an",
"async",
"error",
"that",
"cannot",
"directly",
"be",
"reported",
"to",
"the",
"user",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5179-L5185 | train |
nats-io/nats-streaming-server | server/server.go | LastError | func (s *StanServer) LastError() error {
s.mu.RLock()
defer s.mu.RUnlock()
return s.lastError
} | go | func (s *StanServer) LastError() error {
s.mu.RLock()
defer s.mu.RUnlock()
return s.lastError
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"LastError",
"(",
")",
"error",
"{",
"s",
".",
"mu",
".",
"RLock",
"(",
")",
"\n",
"defer",
"s",
".",
"mu",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"s",
".",
"lastError",
"\n",
"}"
] | // LastError returns the last fatal error the server experienced. | [
"LastError",
"returns",
"the",
"last",
"fatal",
"error",
"the",
"server",
"experienced",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5188-L5192 | train |
nats-io/nats-streaming-server | server/server.go | Shutdown | func (s *StanServer) Shutdown() {
s.log.Noticef("Shutting down.")
s.mu.Lock()
if s.shutdown {
s.mu.Unlock()
return
}
close(s.shutdownCh)
// Allows Shutdown() to be idempotent
s.shutdown = true
// Change the state too
s.state = Shutdown
// We need to make sure that the storeIOLoop returns before
// closing the Store
waitForIOStoreLoop := true
// Capture under lock
store := s.store
ns := s.natsServer
// Do not close and nil the connections here, they are used in many places
// without locking. Once closed, s.nc.xxx() calls will simply fail, but
// we won't panic.
ncs := s.ncs
ncr := s.ncr
ncsr := s.ncsr
nc := s.nc
ftnc := s.ftnc
nca := s.nca
// Stop processing subscriptions start requests
s.subStartQuit <- struct{}{}
if s.ioChannel != nil {
// Notify the IO channel that we are shutting down
close(s.ioChannelQuit)
} else {
waitForIOStoreLoop = false
}
// In case we are running in FT mode.
if s.ftQuit != nil {
s.ftQuit <- struct{}{}
}
// In case we are running in Partitioning mode
if s.partitions != nil {
s.partitions.shutdown()
}
s.mu.Unlock()
// Make sure the StoreIOLoop returns before closing the Store
if waitForIOStoreLoop {
s.ioChannelWG.Wait()
}
// Close Raft group before closing store.
if s.raft != nil {
if err := s.raft.shutdown(); err != nil {
s.log.Errorf("Failed to stop Raft node: %v", err)
}
}
// Close/Shutdown resources. Note that unless one instantiates StanServer
// directly (instead of calling RunServer() and the like), these should
// not be nil.
if store != nil {
store.Close()
}
if ncs != nil {
ncs.Close()
}
if ncr != nil {
ncr.Close()
}
if ncsr != nil {
ncsr.Close()
}
if nc != nil {
nc.Close()
}
if ftnc != nil {
ftnc.Close()
}
if nca != nil {
nca.Close()
}
if ns != nil {
ns.Shutdown()
}
// Wait for go-routines to return
s.wg.Wait()
} | go | func (s *StanServer) Shutdown() {
s.log.Noticef("Shutting down.")
s.mu.Lock()
if s.shutdown {
s.mu.Unlock()
return
}
close(s.shutdownCh)
// Allows Shutdown() to be idempotent
s.shutdown = true
// Change the state too
s.state = Shutdown
// We need to make sure that the storeIOLoop returns before
// closing the Store
waitForIOStoreLoop := true
// Capture under lock
store := s.store
ns := s.natsServer
// Do not close and nil the connections here, they are used in many places
// without locking. Once closed, s.nc.xxx() calls will simply fail, but
// we won't panic.
ncs := s.ncs
ncr := s.ncr
ncsr := s.ncsr
nc := s.nc
ftnc := s.ftnc
nca := s.nca
// Stop processing subscriptions start requests
s.subStartQuit <- struct{}{}
if s.ioChannel != nil {
// Notify the IO channel that we are shutting down
close(s.ioChannelQuit)
} else {
waitForIOStoreLoop = false
}
// In case we are running in FT mode.
if s.ftQuit != nil {
s.ftQuit <- struct{}{}
}
// In case we are running in Partitioning mode
if s.partitions != nil {
s.partitions.shutdown()
}
s.mu.Unlock()
// Make sure the StoreIOLoop returns before closing the Store
if waitForIOStoreLoop {
s.ioChannelWG.Wait()
}
// Close Raft group before closing store.
if s.raft != nil {
if err := s.raft.shutdown(); err != nil {
s.log.Errorf("Failed to stop Raft node: %v", err)
}
}
// Close/Shutdown resources. Note that unless one instantiates StanServer
// directly (instead of calling RunServer() and the like), these should
// not be nil.
if store != nil {
store.Close()
}
if ncs != nil {
ncs.Close()
}
if ncr != nil {
ncr.Close()
}
if ncsr != nil {
ncsr.Close()
}
if nc != nil {
nc.Close()
}
if ftnc != nil {
ftnc.Close()
}
if nca != nil {
nca.Close()
}
if ns != nil {
ns.Shutdown()
}
// Wait for go-routines to return
s.wg.Wait()
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"Shutdown",
"(",
")",
"{",
"s",
".",
"log",
".",
"Noticef",
"(",
"\"",
"\"",
")",
"\n\n",
"s",
".",
"mu",
".",
"Lock",
"(",
")",
"\n",
"if",
"s",
".",
"shutdown",
"{",
"s",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n\n",
"close",
"(",
"s",
".",
"shutdownCh",
")",
"\n\n",
"// Allows Shutdown() to be idempotent",
"s",
".",
"shutdown",
"=",
"true",
"\n",
"// Change the state too",
"s",
".",
"state",
"=",
"Shutdown",
"\n\n",
"// We need to make sure that the storeIOLoop returns before",
"// closing the Store",
"waitForIOStoreLoop",
":=",
"true",
"\n\n",
"// Capture under lock",
"store",
":=",
"s",
".",
"store",
"\n",
"ns",
":=",
"s",
".",
"natsServer",
"\n",
"// Do not close and nil the connections here, they are used in many places",
"// without locking. Once closed, s.nc.xxx() calls will simply fail, but",
"// we won't panic.",
"ncs",
":=",
"s",
".",
"ncs",
"\n",
"ncr",
":=",
"s",
".",
"ncr",
"\n",
"ncsr",
":=",
"s",
".",
"ncsr",
"\n",
"nc",
":=",
"s",
".",
"nc",
"\n",
"ftnc",
":=",
"s",
".",
"ftnc",
"\n",
"nca",
":=",
"s",
".",
"nca",
"\n\n",
"// Stop processing subscriptions start requests",
"s",
".",
"subStartQuit",
"<-",
"struct",
"{",
"}",
"{",
"}",
"\n\n",
"if",
"s",
".",
"ioChannel",
"!=",
"nil",
"{",
"// Notify the IO channel that we are shutting down",
"close",
"(",
"s",
".",
"ioChannelQuit",
")",
"\n",
"}",
"else",
"{",
"waitForIOStoreLoop",
"=",
"false",
"\n",
"}",
"\n",
"// In case we are running in FT mode.",
"if",
"s",
".",
"ftQuit",
"!=",
"nil",
"{",
"s",
".",
"ftQuit",
"<-",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n",
"// In case we are running in Partitioning mode",
"if",
"s",
".",
"partitions",
"!=",
"nil",
"{",
"s",
".",
"partitions",
".",
"shutdown",
"(",
")",
"\n",
"}",
"\n",
"s",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n\n",
"// Make sure the StoreIOLoop returns before closing the Store",
"if",
"waitForIOStoreLoop",
"{",
"s",
".",
"ioChannelWG",
".",
"Wait",
"(",
")",
"\n",
"}",
"\n\n",
"// Close Raft group before closing store.",
"if",
"s",
".",
"raft",
"!=",
"nil",
"{",
"if",
"err",
":=",
"s",
".",
"raft",
".",
"shutdown",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n\n",
"// Close/Shutdown resources. Note that unless one instantiates StanServer",
"// directly (instead of calling RunServer() and the like), these should",
"// not be nil.",
"if",
"store",
"!=",
"nil",
"{",
"store",
".",
"Close",
"(",
")",
"\n",
"}",
"\n",
"if",
"ncs",
"!=",
"nil",
"{",
"ncs",
".",
"Close",
"(",
")",
"\n",
"}",
"\n",
"if",
"ncr",
"!=",
"nil",
"{",
"ncr",
".",
"Close",
"(",
")",
"\n",
"}",
"\n",
"if",
"ncsr",
"!=",
"nil",
"{",
"ncsr",
".",
"Close",
"(",
")",
"\n",
"}",
"\n",
"if",
"nc",
"!=",
"nil",
"{",
"nc",
".",
"Close",
"(",
")",
"\n",
"}",
"\n",
"if",
"ftnc",
"!=",
"nil",
"{",
"ftnc",
".",
"Close",
"(",
")",
"\n",
"}",
"\n",
"if",
"nca",
"!=",
"nil",
"{",
"nca",
".",
"Close",
"(",
")",
"\n",
"}",
"\n",
"if",
"ns",
"!=",
"nil",
"{",
"ns",
".",
"Shutdown",
"(",
")",
"\n",
"}",
"\n\n",
"// Wait for go-routines to return",
"s",
".",
"wg",
".",
"Wait",
"(",
")",
"\n",
"}"
] | // Shutdown will close our NATS connection and shutdown any embedded NATS server. | [
"Shutdown",
"will",
"close",
"our",
"NATS",
"connection",
"and",
"shutdown",
"any",
"embedded",
"NATS",
"server",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L5195-L5289 | train |
nats-io/nats-streaming-server | logger/logger.go | SetLogger | func (s *StanLogger) SetLogger(log Logger, logtime, debug, trace bool, logfile string) {
s.mu.Lock()
s.log = log
s.ltime = logtime
s.debug = debug
s.trace = trace
s.lfile = logfile
s.mu.Unlock()
} | go | func (s *StanLogger) SetLogger(log Logger, logtime, debug, trace bool, logfile string) {
s.mu.Lock()
s.log = log
s.ltime = logtime
s.debug = debug
s.trace = trace
s.lfile = logfile
s.mu.Unlock()
} | [
"func",
"(",
"s",
"*",
"StanLogger",
")",
"SetLogger",
"(",
"log",
"Logger",
",",
"logtime",
",",
"debug",
",",
"trace",
"bool",
",",
"logfile",
"string",
")",
"{",
"s",
".",
"mu",
".",
"Lock",
"(",
")",
"\n",
"s",
".",
"log",
"=",
"log",
"\n",
"s",
".",
"ltime",
"=",
"logtime",
"\n",
"s",
".",
"debug",
"=",
"debug",
"\n",
"s",
".",
"trace",
"=",
"trace",
"\n",
"s",
".",
"lfile",
"=",
"logfile",
"\n",
"s",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"}"
] | // SetLogger sets the logger, debug and trace | [
"SetLogger",
"sets",
"the",
"logger",
"debug",
"and",
"trace"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/logger/logger.go#L48-L56 | train |
nats-io/nats-streaming-server | logger/logger.go | GetLogger | func (s *StanLogger) GetLogger() Logger {
s.mu.RLock()
l := s.log
s.mu.RUnlock()
return l
} | go | func (s *StanLogger) GetLogger() Logger {
s.mu.RLock()
l := s.log
s.mu.RUnlock()
return l
} | [
"func",
"(",
"s",
"*",
"StanLogger",
")",
"GetLogger",
"(",
")",
"Logger",
"{",
"s",
".",
"mu",
".",
"RLock",
"(",
")",
"\n",
"l",
":=",
"s",
".",
"log",
"\n",
"s",
".",
"mu",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"l",
"\n",
"}"
] | // GetLogger returns the logger | [
"GetLogger",
"returns",
"the",
"logger"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/logger/logger.go#L59-L64 | train |
nats-io/nats-streaming-server | logger/logger.go | ReopenLogFile | func (s *StanLogger) ReopenLogFile() {
s.mu.Lock()
if s.lfile == "" {
s.mu.Unlock()
s.Noticef("File log re-open ignored, not a file logger")
return
}
if l, ok := s.log.(io.Closer); ok {
if err := l.Close(); err != nil {
s.mu.Unlock()
s.Errorf("Unable to close logger: %v", err)
return
}
}
fileLog := natsdLogger.NewFileLogger(s.lfile, s.ltime, s.debug, s.trace, true)
s.log = fileLog
s.mu.Unlock()
s.Noticef("File log re-opened")
} | go | func (s *StanLogger) ReopenLogFile() {
s.mu.Lock()
if s.lfile == "" {
s.mu.Unlock()
s.Noticef("File log re-open ignored, not a file logger")
return
}
if l, ok := s.log.(io.Closer); ok {
if err := l.Close(); err != nil {
s.mu.Unlock()
s.Errorf("Unable to close logger: %v", err)
return
}
}
fileLog := natsdLogger.NewFileLogger(s.lfile, s.ltime, s.debug, s.trace, true)
s.log = fileLog
s.mu.Unlock()
s.Noticef("File log re-opened")
} | [
"func",
"(",
"s",
"*",
"StanLogger",
")",
"ReopenLogFile",
"(",
")",
"{",
"s",
".",
"mu",
".",
"Lock",
"(",
")",
"\n",
"if",
"s",
".",
"lfile",
"==",
"\"",
"\"",
"{",
"s",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"s",
".",
"Noticef",
"(",
"\"",
"\"",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"l",
",",
"ok",
":=",
"s",
".",
"log",
".",
"(",
"io",
".",
"Closer",
")",
";",
"ok",
"{",
"if",
"err",
":=",
"l",
".",
"Close",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"s",
".",
"Errorf",
"(",
"\"",
"\"",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"}",
"\n",
"fileLog",
":=",
"natsdLogger",
".",
"NewFileLogger",
"(",
"s",
".",
"lfile",
",",
"s",
".",
"ltime",
",",
"s",
".",
"debug",
",",
"s",
".",
"trace",
",",
"true",
")",
"\n",
"s",
".",
"log",
"=",
"fileLog",
"\n",
"s",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"s",
".",
"Noticef",
"(",
"\"",
"\"",
")",
"\n",
"}"
] | // ReopenLogFile closes and reopen the logfile.
// Does nothing if the logger is not a file based. | [
"ReopenLogFile",
"closes",
"and",
"reopen",
"the",
"logfile",
".",
"Does",
"nothing",
"if",
"the",
"logger",
"is",
"not",
"a",
"file",
"based",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/logger/logger.go#L68-L86 | train |
nats-io/nats-streaming-server | logger/logger.go | Close | func (s *StanLogger) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
if l, ok := s.log.(io.Closer); ok {
return l.Close()
}
return nil
} | go | func (s *StanLogger) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
if l, ok := s.log.(io.Closer); ok {
return l.Close()
}
return nil
} | [
"func",
"(",
"s",
"*",
"StanLogger",
")",
"Close",
"(",
")",
"error",
"{",
"s",
".",
"mu",
".",
"Lock",
"(",
")",
"\n",
"defer",
"s",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"if",
"l",
",",
"ok",
":=",
"s",
".",
"log",
".",
"(",
"io",
".",
"Closer",
")",
";",
"ok",
"{",
"return",
"l",
".",
"Close",
"(",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // Close closes this logger, releasing possible held resources. | [
"Close",
"closes",
"this",
"logger",
"releasing",
"possible",
"held",
"resources",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/logger/logger.go#L89-L96 | train |
nats-io/nats-streaming-server | logger/logger.go | Errorf | func (s *StanLogger) Errorf(format string, v ...interface{}) {
s.executeLogCall(func(log Logger, format string, v ...interface{}) {
log.Errorf(format, v...)
}, format, v...)
} | go | func (s *StanLogger) Errorf(format string, v ...interface{}) {
s.executeLogCall(func(log Logger, format string, v ...interface{}) {
log.Errorf(format, v...)
}, format, v...)
} | [
"func",
"(",
"s",
"*",
"StanLogger",
")",
"Errorf",
"(",
"format",
"string",
",",
"v",
"...",
"interface",
"{",
"}",
")",
"{",
"s",
".",
"executeLogCall",
"(",
"func",
"(",
"log",
"Logger",
",",
"format",
"string",
",",
"v",
"...",
"interface",
"{",
"}",
")",
"{",
"log",
".",
"Errorf",
"(",
"format",
",",
"v",
"...",
")",
"\n",
"}",
",",
"format",
",",
"v",
"...",
")",
"\n",
"}"
] | // Errorf logs an error | [
"Errorf",
"logs",
"an",
"error"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/logger/logger.go#L106-L110 | train |
nats-io/nats-streaming-server | logger/logger.go | Warnf | func (s *StanLogger) Warnf(format string, v ...interface{}) {
s.executeLogCall(func(logger Logger, format string, v ...interface{}) {
logger.Warnf(format, v...)
}, format, v...)
} | go | func (s *StanLogger) Warnf(format string, v ...interface{}) {
s.executeLogCall(func(logger Logger, format string, v ...interface{}) {
logger.Warnf(format, v...)
}, format, v...)
} | [
"func",
"(",
"s",
"*",
"StanLogger",
")",
"Warnf",
"(",
"format",
"string",
",",
"v",
"...",
"interface",
"{",
"}",
")",
"{",
"s",
".",
"executeLogCall",
"(",
"func",
"(",
"logger",
"Logger",
",",
"format",
"string",
",",
"v",
"...",
"interface",
"{",
"}",
")",
"{",
"logger",
".",
"Warnf",
"(",
"format",
",",
"v",
"...",
")",
"\n",
"}",
",",
"format",
",",
"v",
"...",
")",
"\n",
"}"
] | // Warnf logs a warning statement | [
"Warnf",
"logs",
"a",
"warning",
"statement"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/logger/logger.go#L139-L143 | train |
nats-io/nats-streaming-server | stores/filestore.go | BufferSize | func BufferSize(size int) FileStoreOption {
return func(o *FileStoreOptions) error {
if size < 0 {
return fmt.Errorf("buffer size value must be a positive number")
}
o.BufferSize = size
return nil
}
} | go | func BufferSize(size int) FileStoreOption {
return func(o *FileStoreOptions) error {
if size < 0 {
return fmt.Errorf("buffer size value must be a positive number")
}
o.BufferSize = size
return nil
}
} | [
"func",
"BufferSize",
"(",
"size",
"int",
")",
"FileStoreOption",
"{",
"return",
"func",
"(",
"o",
"*",
"FileStoreOptions",
")",
"error",
"{",
"if",
"size",
"<",
"0",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"o",
".",
"BufferSize",
"=",
"size",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"}"
] | // BufferSize is a FileStore option that sets the size of the buffer used
// during store writes. This can help improve write performance. | [
"BufferSize",
"is",
"a",
"FileStore",
"option",
"that",
"sets",
"the",
"size",
"of",
"the",
"buffer",
"used",
"during",
"store",
"writes",
".",
"This",
"can",
"help",
"improve",
"write",
"performance",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L210-L218 | train |
nats-io/nats-streaming-server | stores/filestore.go | CompactEnabled | func CompactEnabled(enabled bool) FileStoreOption {
return func(o *FileStoreOptions) error {
o.CompactEnabled = enabled
return nil
}
} | go | func CompactEnabled(enabled bool) FileStoreOption {
return func(o *FileStoreOptions) error {
o.CompactEnabled = enabled
return nil
}
} | [
"func",
"CompactEnabled",
"(",
"enabled",
"bool",
")",
"FileStoreOption",
"{",
"return",
"func",
"(",
"o",
"*",
"FileStoreOptions",
")",
"error",
"{",
"o",
".",
"CompactEnabled",
"=",
"enabled",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"}"
] | // CompactEnabled is a FileStore option that enables or disables file compaction.
// The value false will disable compaction. | [
"CompactEnabled",
"is",
"a",
"FileStore",
"option",
"that",
"enables",
"or",
"disables",
"file",
"compaction",
".",
"The",
"value",
"false",
"will",
"disable",
"compaction",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L222-L227 | train |
nats-io/nats-streaming-server | stores/filestore.go | CompactInterval | func CompactInterval(seconds int) FileStoreOption {
return func(o *FileStoreOptions) error {
if seconds <= 0 {
return fmt.Errorf("compact interval value must at least be 1 seconds")
}
o.CompactInterval = seconds
return nil
}
} | go | func CompactInterval(seconds int) FileStoreOption {
return func(o *FileStoreOptions) error {
if seconds <= 0 {
return fmt.Errorf("compact interval value must at least be 1 seconds")
}
o.CompactInterval = seconds
return nil
}
} | [
"func",
"CompactInterval",
"(",
"seconds",
"int",
")",
"FileStoreOption",
"{",
"return",
"func",
"(",
"o",
"*",
"FileStoreOptions",
")",
"error",
"{",
"if",
"seconds",
"<=",
"0",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"o",
".",
"CompactInterval",
"=",
"seconds",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"}"
] | // CompactInterval is a FileStore option that defines the minimum compaction interval.
// Compaction is not timer based, but instead when things get "deleted". This value
// prevents compaction to happen too often. | [
"CompactInterval",
"is",
"a",
"FileStore",
"option",
"that",
"defines",
"the",
"minimum",
"compaction",
"interval",
".",
"Compaction",
"is",
"not",
"timer",
"based",
"but",
"instead",
"when",
"things",
"get",
"deleted",
".",
"This",
"value",
"prevents",
"compaction",
"to",
"happen",
"too",
"often",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L232-L240 | train |
nats-io/nats-streaming-server | stores/filestore.go | CompactFragmentation | func CompactFragmentation(fragmentation int) FileStoreOption {
return func(o *FileStoreOptions) error {
if fragmentation <= 0 {
return fmt.Errorf("compact fragmentation value must at least be 1")
}
o.CompactFragmentation = fragmentation
return nil
}
} | go | func CompactFragmentation(fragmentation int) FileStoreOption {
return func(o *FileStoreOptions) error {
if fragmentation <= 0 {
return fmt.Errorf("compact fragmentation value must at least be 1")
}
o.CompactFragmentation = fragmentation
return nil
}
} | [
"func",
"CompactFragmentation",
"(",
"fragmentation",
"int",
")",
"FileStoreOption",
"{",
"return",
"func",
"(",
"o",
"*",
"FileStoreOptions",
")",
"error",
"{",
"if",
"fragmentation",
"<=",
"0",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"o",
".",
"CompactFragmentation",
"=",
"fragmentation",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"}"
] | // CompactFragmentation is a FileStore option that defines the fragmentation ratio
// below which compaction would not occur. For instance, specifying 50 means that
// if other variables would allow for compaction, the compaction would occur only
// after 50% of the file has data that is no longer valid. | [
"CompactFragmentation",
"is",
"a",
"FileStore",
"option",
"that",
"defines",
"the",
"fragmentation",
"ratio",
"below",
"which",
"compaction",
"would",
"not",
"occur",
".",
"For",
"instance",
"specifying",
"50",
"means",
"that",
"if",
"other",
"variables",
"would",
"allow",
"for",
"compaction",
"the",
"compaction",
"would",
"occur",
"only",
"after",
"50%",
"of",
"the",
"file",
"has",
"data",
"that",
"is",
"no",
"longer",
"valid",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L246-L254 | train |
nats-io/nats-streaming-server | stores/filestore.go | CompactMinFileSize | func CompactMinFileSize(fileSize int64) FileStoreOption {
return func(o *FileStoreOptions) error {
if fileSize < 0 {
return fmt.Errorf("compact minimum file size value must be a positive number")
}
o.CompactMinFileSize = fileSize
return nil
}
} | go | func CompactMinFileSize(fileSize int64) FileStoreOption {
return func(o *FileStoreOptions) error {
if fileSize < 0 {
return fmt.Errorf("compact minimum file size value must be a positive number")
}
o.CompactMinFileSize = fileSize
return nil
}
} | [
"func",
"CompactMinFileSize",
"(",
"fileSize",
"int64",
")",
"FileStoreOption",
"{",
"return",
"func",
"(",
"o",
"*",
"FileStoreOptions",
")",
"error",
"{",
"if",
"fileSize",
"<",
"0",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"o",
".",
"CompactMinFileSize",
"=",
"fileSize",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"}"
] | // CompactMinFileSize is a FileStore option that defines the minimum file size below
// which compaction would not occur. Specify `0` if you don't want any minimum. | [
"CompactMinFileSize",
"is",
"a",
"FileStore",
"option",
"that",
"defines",
"the",
"minimum",
"file",
"size",
"below",
"which",
"compaction",
"would",
"not",
"occur",
".",
"Specify",
"0",
"if",
"you",
"don",
"t",
"want",
"any",
"minimum",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L258-L266 | train |
nats-io/nats-streaming-server | stores/filestore.go | DoCRC | func DoCRC(enableCRC bool) FileStoreOption {
return func(o *FileStoreOptions) error {
o.DoCRC = enableCRC
return nil
}
} | go | func DoCRC(enableCRC bool) FileStoreOption {
return func(o *FileStoreOptions) error {
o.DoCRC = enableCRC
return nil
}
} | [
"func",
"DoCRC",
"(",
"enableCRC",
"bool",
")",
"FileStoreOption",
"{",
"return",
"func",
"(",
"o",
"*",
"FileStoreOptions",
")",
"error",
"{",
"o",
".",
"DoCRC",
"=",
"enableCRC",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"}"
] | // DoCRC is a FileStore option that defines if a CRC checksum verification should
// be performed when records are read from disk. | [
"DoCRC",
"is",
"a",
"FileStore",
"option",
"that",
"defines",
"if",
"a",
"CRC",
"checksum",
"verification",
"should",
"be",
"performed",
"when",
"records",
"are",
"read",
"from",
"disk",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L270-L275 | train |
nats-io/nats-streaming-server | stores/filestore.go | SliceConfig | func SliceConfig(maxMsgs int, maxBytes int64, maxAge time.Duration, script string) FileStoreOption {
return func(o *FileStoreOptions) error {
if maxMsgs < 0 || maxBytes < 0 || maxAge < 0 {
return fmt.Errorf("slice max values must be positive numbers")
}
o.SliceMaxMsgs = maxMsgs
o.SliceMaxBytes = maxBytes
o.SliceMaxAge = maxAge
o.SliceArchiveScript = script
return nil
}
} | go | func SliceConfig(maxMsgs int, maxBytes int64, maxAge time.Duration, script string) FileStoreOption {
return func(o *FileStoreOptions) error {
if maxMsgs < 0 || maxBytes < 0 || maxAge < 0 {
return fmt.Errorf("slice max values must be positive numbers")
}
o.SliceMaxMsgs = maxMsgs
o.SliceMaxBytes = maxBytes
o.SliceMaxAge = maxAge
o.SliceArchiveScript = script
return nil
}
} | [
"func",
"SliceConfig",
"(",
"maxMsgs",
"int",
",",
"maxBytes",
"int64",
",",
"maxAge",
"time",
".",
"Duration",
",",
"script",
"string",
")",
"FileStoreOption",
"{",
"return",
"func",
"(",
"o",
"*",
"FileStoreOptions",
")",
"error",
"{",
"if",
"maxMsgs",
"<",
"0",
"||",
"maxBytes",
"<",
"0",
"||",
"maxAge",
"<",
"0",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"",
"\"",
")",
"\n",
"}",
"\n",
"o",
".",
"SliceMaxMsgs",
"=",
"maxMsgs",
"\n",
"o",
".",
"SliceMaxBytes",
"=",
"maxBytes",
"\n",
"o",
".",
"SliceMaxAge",
"=",
"maxAge",
"\n",
"o",
".",
"SliceArchiveScript",
"=",
"script",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"}"
] | // SliceConfig is a FileStore option that allows the configuration of
// file slice limits and optional archive script file name. | [
"SliceConfig",
"is",
"a",
"FileStore",
"option",
"that",
"allows",
"the",
"configuration",
"of",
"file",
"slice",
"limits",
"and",
"optional",
"archive",
"script",
"file",
"name",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/filestore.go#L301-L312 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.