本文整理汇总了Golang中engine.NewQueryEngine函数的典型用法代码示例。如果您正苦于以下问题:Golang NewQueryEngine函数的具体用法?Golang NewQueryEngine怎么用?Golang NewQueryEngine使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewQueryEngine函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: NewServer
func NewServer(config *configuration.Configuration) (*Server, error) {
log.Info("Opening database at %s", config.DataDir)
db, err := datastore.NewLevelDbDatastore(config.DataDir, config.LevelDbMaxOpenFiles)
if err != nil {
return nil, err
}
clusterConfig := coordinator.NewClusterConfiguration(config)
raftServer := coordinator.NewRaftServer(config, clusterConfig)
coord := coordinator.NewCoordinatorImpl(db, raftServer, clusterConfig)
go coord.SyncLogs()
requestHandler := coordinator.NewProtobufRequestHandler(db, coord, clusterConfig)
protobufServer := coordinator.NewProtobufServer(config.ProtobufPortString(), requestHandler)
eng, err := engine.NewQueryEngine(coord)
if err != nil {
return nil, err
}
raftServer.AssignEngineAndCoordinator(eng, coord)
httpApi := http.NewHttpServer(config.ApiHttpPortString(), config.AdminAssetsDir, eng, coord, coord)
httpApi.EnableSsl(config.ApiHttpSslPortString(), config.ApiHttpCertPath)
adminServer := admin.NewHttpServer(config.AdminAssetsDir, config.AdminHttpPortString())
return &Server{
RaftServer: raftServer,
Db: db,
ProtobufServer: protobufServer,
ClusterConfig: clusterConfig,
HttpApi: httpApi,
Coordinator: coord,
AdminServer: adminServer,
Config: config,
RequestHandler: requestHandler}, nil
}
开发者ID:johann8384,项目名称:influxdb,代码行数:35,代码来源:server.go
示例2: getShardsAndProcessor
func (self *CoordinatorImpl) getShardsAndProcessor(querySpec *parser.QuerySpec, writer SeriesWriter) ([]*cluster.ShardData, cluster.QueryProcessor, chan bool, error) {
shards := self.clusterConfiguration.GetShards(querySpec)
shouldAggregateLocally := self.shouldAggregateLocally(shards, querySpec)
var err error
var processor cluster.QueryProcessor
responseChan := make(chan *protocol.Response)
seriesClosed := make(chan bool)
selectQuery := querySpec.SelectQuery()
if selectQuery != nil {
if !shouldAggregateLocally {
// if we should aggregate in the coordinator (i.e. aggregation
// isn't happening locally at the shard level), create an engine
processor, err = engine.NewQueryEngine(querySpec.SelectQuery(), responseChan)
} else {
// if we have a query with limit, then create an engine, or we can
// make the passthrough limit aware
processor = engine.NewPassthroughEngineWithLimit(responseChan, 100, selectQuery.Limit)
}
} else if !shouldAggregateLocally {
processor = engine.NewPassthroughEngine(responseChan, 100)
}
if err != nil {
return nil, nil, nil, err
}
if processor == nil {
return shards, nil, nil, nil
}
go func() {
for {
response := <-responseChan
if *response.Type == endStreamResponse || *response.Type == accessDeniedResponse {
writer.Close()
seriesClosed <- true
return
}
if !(*response.Type == queryResponse && querySpec.IsExplainQuery()) {
if response.Series != nil && len(response.Series.Points) > 0 {
writer.Write(response.Series)
}
}
}
}()
return shards, processor, seriesClosed, nil
}
开发者ID:qz267,项目名称:influxdb,代码行数:52,代码来源:coordinator.go
示例3: Query
func (self *ShardData) Query(querySpec *parser.QuerySpec, response chan *protocol.Response) error {
// This is only for queries that are deletes or drops. They need to be sent everywhere as opposed to just the local or one of the remote shards.
// But this boolean should only be set to true on the server that receives the initial query.
if querySpec.RunAgainstAllServersInShard {
if querySpec.IsDeleteFromSeriesQuery() {
return self.logAndHandleDeleteQuery(querySpec, response)
} else if querySpec.IsDropSeriesQuery() {
return self.logAndHandleDropSeriesQuery(querySpec, response)
}
}
if self.localShard != nil {
var processor QueryProcessor
if querySpec.IsListSeriesQuery() {
processor = engine.NewListSeriesEngine(response)
} else if querySpec.IsDeleteFromSeriesQuery() || querySpec.IsDropSeriesQuery() || querySpec.IsSinglePointQuery() {
maxDeleteResults := 10000
processor = engine.NewPassthroughEngine(response, maxDeleteResults)
} else {
if self.ShouldAggregateLocally(querySpec) {
processor = engine.NewQueryEngine(querySpec.SelectQuery(), response)
} else {
maxPointsToBufferBeforeSending := 1000
processor = engine.NewPassthroughEngine(response, maxPointsToBufferBeforeSending)
}
}
err := self.localShard.Query(querySpec, processor)
processor.Close()
return err
}
healthyServers := make([]*ClusterServer, 0, len(self.clusterServers))
for _, s := range self.clusterServers {
if !s.IsUp() {
continue
}
healthyServers = append(healthyServers, s)
}
healthyCount := len(healthyServers)
if healthyCount == 0 {
message := fmt.Sprintf("No servers up to query shard %d", self.id)
response <- &protocol.Response{Type: &endStreamResponse, ErrorMessage: &message}
return errors.New(message)
}
randServerIndex := int(time.Now().UnixNano() % int64(healthyCount))
server := healthyServers[randServerIndex]
request := self.createRequest(querySpec)
return server.MakeRequest(request, response)
}
开发者ID:rramos,项目名称:influxdb,代码行数:50,代码来源:shard.go
示例4: Query
func (self *ShardData) Query(querySpec *parser.QuerySpec, response chan *p.Response) {
log.Debug("QUERY: shard %d, query '%s'", self.Id(), querySpec.GetQueryString())
defer common.RecoverFunc(querySpec.Database(), querySpec.GetQueryString(), func(err interface{}) {
response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(fmt.Sprintf("%s", err))}
})
// This is only for queries that are deletes or drops. They need to be sent everywhere as opposed to just the local or one of the remote shards.
// But this boolean should only be set to true on the server that receives the initial query.
if querySpec.RunAgainstAllServersInShard {
if querySpec.IsDeleteFromSeriesQuery() {
self.logAndHandleDeleteQuery(querySpec, response)
} else if querySpec.IsDropSeriesQuery() {
self.logAndHandleDropSeriesQuery(querySpec, response)
}
}
if self.IsLocal {
var processor QueryProcessor
var err error
if querySpec.IsListSeriesQuery() {
processor = engine.NewListSeriesEngine(response)
} else if querySpec.IsDeleteFromSeriesQuery() || querySpec.IsDropSeriesQuery() || querySpec.IsSinglePointQuery() {
maxDeleteResults := 10000
processor = engine.NewPassthroughEngine(response, maxDeleteResults)
} else {
query := querySpec.SelectQuery()
if self.ShouldAggregateLocally(querySpec) {
log.Debug("creating a query engine")
processor, err = engine.NewQueryEngine(query, response)
if err != nil {
response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())}
log.Error("Error while creating engine: %s", err)
return
}
processor.SetShardInfo(int(self.Id()), self.IsLocal)
} else if query.HasAggregates() {
maxPointsToBufferBeforeSending := 1000
log.Debug("creating a passthrough engine")
processor = engine.NewPassthroughEngine(response, maxPointsToBufferBeforeSending)
} else {
maxPointsToBufferBeforeSending := 1000
log.Debug("creating a passthrough engine with limit")
processor = engine.NewPassthroughEngineWithLimit(response, maxPointsToBufferBeforeSending, query.Limit)
}
if query.GetFromClause().Type != parser.FromClauseInnerJoin {
// Joins do their own filtering since we need to get all
// points before filtering. This is due to the fact that some
// where expressions will be difficult to compute before the
// points are joined together, think where clause with
// left.column = 'something' or right.column =
// 'something_else'. We can't filter the individual series
// separately. The filtering happens in merge.go:55
processor = engine.NewFilteringEngine(query, processor)
}
}
shard, err := self.store.GetOrCreateShard(self.id)
if err != nil {
response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())}
log.Error("Error while getting shards: %s", err)
return
}
defer self.store.ReturnShard(self.id)
err = shard.Query(querySpec, processor)
// if we call Close() in case of an error it will mask the error
if err != nil {
response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())}
return
}
processor.Close()
response <- &p.Response{Type: &endStreamResponse}
return
}
if server := self.randomHealthyServer(); server != nil {
log.Debug("Querying server %d for shard %d", server.GetId(), self.Id())
request := self.createRequest(querySpec)
server.MakeRequest(request, response)
return
}
message := fmt.Sprintf("No servers up to query shard %d", self.id)
response <- &p.Response{Type: &endStreamResponse, ErrorMessage: &message}
log.Error(message)
}
开发者ID:jhermann,项目名称:influxdb,代码行数:87,代码来源:shard.go
示例5: Query
func (self *ShardData) Query(querySpec *parser.QuerySpec, response chan *p.Response) {
// This is only for queries that are deletes or drops. They need to be sent everywhere as opposed to just the local or one of the remote shards.
// But this boolean should only be set to true on the server that receives the initial query.
if querySpec.RunAgainstAllServersInShard {
if querySpec.IsDeleteFromSeriesQuery() {
self.logAndHandleDeleteQuery(querySpec, response)
} else if querySpec.IsDropSeriesQuery() {
self.logAndHandleDropSeriesQuery(querySpec, response)
}
}
if self.IsLocal {
var processor QueryProcessor
var err error
if querySpec.IsListSeriesQuery() {
processor = engine.NewListSeriesEngine(response)
} else if querySpec.IsDeleteFromSeriesQuery() || querySpec.IsDropSeriesQuery() || querySpec.IsSinglePointQuery() {
maxDeleteResults := 10000
processor = engine.NewPassthroughEngine(response, maxDeleteResults)
} else {
query := querySpec.SelectQuery()
if self.ShouldAggregateLocally(querySpec) {
log.Debug("creating a query engine\n")
processor, err = engine.NewQueryEngine(query, response)
if err != nil {
response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())}
log.Error("Error while creating engine: %s", err)
return
}
processor.SetShardInfo(int(self.Id()), self.IsLocal)
} else if query.HasAggregates() {
maxPointsToBufferBeforeSending := 1000
log.Debug("creating a passthrough engine\n")
processor = engine.NewPassthroughEngine(response, maxPointsToBufferBeforeSending)
} else {
maxPointsToBufferBeforeSending := 1000
log.Debug("creating a passthrough engine with limit\n")
processor = engine.NewPassthroughEngineWithLimit(response, maxPointsToBufferBeforeSending, query.Limit)
}
processor = engine.NewFilteringEngine(query, processor)
}
shard, err := self.store.GetOrCreateShard(self.id)
if err != nil {
response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())}
log.Error("Error while getting shards: %s", err)
return
}
defer self.store.ReturnShard(self.id)
err = shard.Query(querySpec, processor)
processor.Close()
if err != nil {
response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())}
}
response <- &p.Response{Type: &endStreamResponse}
return
}
healthyServers := make([]*ClusterServer, 0, len(self.clusterServers))
for _, s := range self.clusterServers {
if !s.IsUp() {
continue
}
healthyServers = append(healthyServers, s)
}
healthyCount := len(healthyServers)
if healthyCount == 0 {
message := fmt.Sprintf("No servers up to query shard %d", self.id)
response <- &p.Response{Type: &endStreamResponse, ErrorMessage: &message}
log.Error(message)
return
}
randServerIndex := int(time.Now().UnixNano() % int64(healthyCount))
server := healthyServers[randServerIndex]
request := self.createRequest(querySpec)
server.MakeRequest(request, response)
}
开发者ID:qq101,项目名称:influxdb,代码行数:78,代码来源:shard.go
示例6: main
func main() {
fileName := flag.String("config", "config.json.sample", "Config file")
wantsVersion := flag.Bool("v", false, "Get version number")
resetRootPassword := flag.Bool("reset-root", false, "Reset root password")
pidFile := flag.String("pidfile", "", "the pid file")
cpuProfiler := flag.String("cpuprofile", "", "filename where cpu profile data will be written")
runtime.GOMAXPROCS(runtime.NumCPU())
flag.Parse()
startProfiler(cpuProfiler)
if wantsVersion != nil && *wantsVersion {
fmt.Printf("InfluxDB v%s (git: %s)\n", version, gitSha)
return
}
config := configuration.LoadConfiguration(*fileName)
if pidFile != nil && *pidFile != "" {
pid := strconv.Itoa(os.Getpid())
if err := ioutil.WriteFile(*pidFile, []byte(pid), 0644); err != nil {
panic(err)
}
}
log.Println("Starting Influx Server...")
clusterConfig := coordinator.NewClusterConfiguration()
os.MkdirAll(config.RaftDir, 0744)
raftServer := coordinator.NewRaftServer(config.RaftDir, "localhost", config.RaftServerPort, clusterConfig)
go func() {
raftServer.ListenAndServe(config.SeedServers, false)
}()
if *resetRootPassword {
time.Sleep(2 * time.Second) // wait for the raft server to join the cluster
fmt.Printf("Resetting root's password to %s", coordinator.DEFAULT_ROOT_PWD)
if err := raftServer.CreateRootUser(); err != nil {
panic(err)
}
}
os.MkdirAll(config.DataDir, 0744)
log.Println("Opening database at ", config.DataDir)
db, err := datastore.NewLevelDbDatastore(config.DataDir)
if err != nil {
panic(err)
}
coord := coordinator.NewCoordinatorImpl(db, raftServer, clusterConfig)
eng, err := engine.NewQueryEngine(coord)
if err != nil {
panic(err)
}
log.Println()
adminServer := admin.NewHttpServer(config.AdminAssetsDir, config.AdminHttpPortString())
log.Println("Starting admin interface on port", config.AdminHttpPort)
go func() {
adminServer.ListenAndServe()
}()
log.Println("Starting Http Api server on port", config.ApiHttpPort)
server := http.NewHttpServer(config.ApiHttpPortString(), eng, coord, coord)
server.ListenAndServe()
}
开发者ID:nvdnkpr,项目名称:influxdb,代码行数:63,代码来源:server.go
示例7: runQuerySpec
func (self *CoordinatorImpl) runQuerySpec(querySpec *parser.QuerySpec, seriesWriter SeriesWriter) error {
shards := self.clusterConfiguration.GetShards(querySpec)
shouldAggregateLocally := true
var processor cluster.QueryProcessor
var responseChan chan *protocol.Response
var seriesClosed chan bool
for _, s := range shards {
// If the aggregation is done at the shard level, we don't need to
// do it here at the coordinator level.
if !s.ShouldAggregateLocally(querySpec) {
seriesClosed = make(chan bool)
shouldAggregateLocally = false
responseChan = make(chan *protocol.Response)
if querySpec.SelectQuery() != nil {
processor = engine.NewQueryEngine(querySpec.SelectQuery(), responseChan)
} else {
bufferSize := 100
processor = engine.NewPassthroughEngine(responseChan, bufferSize)
}
go func() {
for {
res := <-responseChan
if *res.Type == endStreamResponse || *res.Type == accessDeniedResponse {
seriesWriter.Close()
seriesClosed <- true
return
}
if res.Series != nil && len(res.Series.Points) > 0 {
seriesWriter.Write(res.Series)
}
}
}()
break
}
}
responses := make([]chan *protocol.Response, 0)
for _, shard := range shards {
responseChan := make(chan *protocol.Response, self.config.QueryShardBufferSize)
go shard.Query(querySpec, responseChan)
responses = append(responses, responseChan)
}
for i, responseChan := range responses {
log.Debug("READING: shard: ", shards[i].String())
for {
response := <-responseChan
log.Debug("GOT RESPONSE: ", response.Type, response.Series)
if *response.Type == endStreamResponse || *response.Type == accessDeniedResponse {
break
}
if shouldAggregateLocally {
log.Debug("WRITING: ", len(response.Series.Points))
seriesWriter.Write(response.Series)
log.Debug("WRITING (done)")
continue
}
// if the data wasn't aggregated at the shard level, aggregate
// the data here
log.Debug("YIELDING: ", len(response.Series.Points))
if response.Series != nil {
for _, p := range response.Series.Points {
processor.YieldPoint(response.Series.Name, response.Series.Fields, p)
}
}
}
log.Debug("DONE: shard: ", shards[i].String())
}
if !shouldAggregateLocally {
processor.Close()
<-seriesClosed
return nil
}
seriesWriter.Close()
return nil
}
开发者ID:rramos,项目名称:influxdb,代码行数:79,代码来源:coordinator.go
注:本文中的engine.NewQueryEngine函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论