本文整理汇总了Golang中vulcan/kubernetes/pkg/util.HandleCrash函数的典型用法代码示例。如果您正苦于以下问题:Golang HandleCrash函数的具体用法?Golang HandleCrash怎么用?Golang HandleCrash使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了HandleCrash函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: Run
// Runs e; will not return until stopCh is closed. workers determines how many
// endpoints will be handled in parallel.
func (e *endpointController) Run(workers int, stopCh <-chan struct{}) {
defer util.HandleCrash()
go e.serviceController.Run(stopCh)
go e.podController.Run(stopCh)
for i := 0; i < workers; i++ {
go util.Until(e.worker, time.Second, stopCh)
}
go func() {
defer util.HandleCrash()
time.Sleep(5 * time.Minute) // give time for our cache to fill
e.checkLeftoverEndpoints()
}()
<-stopCh
e.queue.ShutDown()
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:17,代码来源:endpoints_controller.go
示例2: translate
// translate pulls stuff from etcd, converts, and pushes out the outgoing channel. Meant to be
// called as a goroutine.
func (w *etcdWatcher) translate() {
defer close(w.outgoing)
defer util.HandleCrash()
for {
select {
case err := <-w.etcdError:
if err != nil {
w.emit(watch.Event{
Type: watch.Error,
Object: &unversioned.Status{
Status: unversioned.StatusFailure,
Message: err.Error(),
},
})
}
return
case <-w.userStop:
w.etcdStop <- true
return
case res, ok := <-w.etcdIncoming:
if ok {
if curLen := int64(len(w.etcdIncoming)); watchChannelHWM.Check(curLen) {
// Monitor if this gets backed up, and how much.
glog.V(2).Infof("watch: %v objects queued in channel.", curLen)
}
w.sendResult(res)
}
// If !ok, don't return here-- must wait for etcdError channel
// to give an error or be closed.
}
}
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:35,代码来源:etcd_watcher.go
示例3: Copy
// Copy the reader to the response. The created WebSocket is closed after this
// method completes.
func (r *Reader) Copy(w http.ResponseWriter, req *http.Request) error {
go func() {
defer util.HandleCrash()
websocket.Server{Handshake: r.handshake, Handler: r.handle}.ServeHTTP(w, req)
}()
return <-r.err
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:9,代码来源:stream.go
示例4: finishRequest
// finishRequest makes a given resultFunc asynchronous and handles errors returned by the response.
// Any api.Status object returned is considered an "error", which interrupts the normal response flow.
func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, err error) {
// these channels need to be buffered to prevent the goroutine below from hanging indefinitely
// when the select statement reads something other than the one the goroutine sends on.
ch := make(chan runtime.Object, 1)
errCh := make(chan error, 1)
panicCh := make(chan interface{}, 1)
go func() {
// panics don't cross goroutine boundaries, so we have to handle ourselves
defer util.HandleCrash(func(panicReason interface{}) {
// Propagate to parent goroutine
panicCh <- panicReason
})
if result, err := fn(); err != nil {
errCh <- err
} else {
ch <- result
}
}()
select {
case result = <-ch:
if status, ok := result.(*unversioned.Status); ok {
return nil, errors.FromObject(status)
}
return result, nil
case err = <-errCh:
return nil, err
case p := <-panicCh:
panic(p)
case <-time.After(timeout):
return nil, errors.NewTimeoutError("request did not complete within allowed duration", 0)
}
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:36,代码来源:resthandler.go
示例5: getBackendConn
func (udp *udpProxySocket) getBackendConn(activeClients *clientCache, cliAddr net.Addr, proxier *Proxier, service proxy.ServicePortName, timeout time.Duration) (net.Conn, error) {
activeClients.mu.Lock()
defer activeClients.mu.Unlock()
svrConn, found := activeClients.clients[cliAddr.String()]
if !found {
// TODO: This could spin up a new goroutine to make the outbound connection,
// and keep accepting inbound traffic.
glog.V(3).Infof("New UDP connection from %s", cliAddr)
var err error
svrConn, err = tryConnect(service, cliAddr, "udp", proxier)
if err != nil {
return nil, err
}
if err = svrConn.SetDeadline(time.Now().Add(timeout)); err != nil {
glog.Errorf("SetDeadline failed: %v", err)
return nil, err
}
activeClients.clients[cliAddr.String()] = svrConn
go func(cliAddr net.Addr, svrConn net.Conn, activeClients *clientCache, timeout time.Duration) {
defer util.HandleCrash()
udp.proxyClient(cliAddr, svrConn, activeClients, timeout)
}(cliAddr, svrConn, activeClients, timeout)
}
return svrConn, nil
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:26,代码来源:proxysocket.go
示例6: receive
// receive reads result from the decoder in a loop and sends down the result channel.
func (sw *StreamWatcher) receive() {
defer close(sw.result)
defer sw.Stop()
defer util.HandleCrash()
for {
action, obj, err := sw.source.Decode()
if err != nil {
// Ignore expected error.
if sw.stopping() {
return
}
switch err {
case io.EOF:
// watch closed normally
case io.ErrUnexpectedEOF:
glog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
default:
msg := "Unable to decode an event from the watch stream: %v"
if util.IsProbableEOF(err) {
glog.V(5).Infof(msg, err)
} else {
glog.Errorf(msg, err)
}
}
return
}
sw.result <- Event{
Type: action,
Object: obj,
}
}
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:33,代码来源:iowatcher.go
示例7: handleSchedulingError
// implementation of scheduling plugin's Error func; see plugin/pkg/scheduler
func (k *errorHandler) handleSchedulingError(pod *api.Pod, schedulingErr error) {
if schedulingErr == noSuchPodErr {
log.V(2).Infof("Not rescheduling non-existent pod %v", pod.Name)
return
}
log.Infof("Error scheduling %v: %v; retrying", pod.Name, schedulingErr)
defer util.HandleCrash()
// default upstream scheduler passes pod.Name as binding.PodID
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
podKey, err := podtask.MakePodKey(ctx, pod.Name)
if err != nil {
log.Errorf("Failed to construct pod key, aborting scheduling for pod %v: %v", pod.Name, err)
return
}
k.backoff.GC()
k.api.Lock()
defer k.api.Unlock()
switch task, state := k.api.tasks().ForPod(podKey); state {
case podtask.StateUnknown:
// if we don't have a mapping here any more then someone deleted the pod
log.V(2).Infof("Could not resolve pod to task, aborting pod reschdule: %s", podKey)
return
case podtask.StatePending:
if task.Has(podtask.Launched) {
log.V(2).Infof("Skipping re-scheduling for already-launched pod %v", podKey)
return
}
breakoutEarly := queue.BreakChan(nil)
if schedulingErr == noSuitableOffersErr {
log.V(3).Infof("adding backoff breakout handler for pod %v", podKey)
breakoutEarly = queue.BreakChan(k.api.offers().Listen(podKey, func(offer *mesos.Offer) bool {
k.api.Lock()
defer k.api.Unlock()
switch task, state := k.api.tasks().Get(task.ID); state {
case podtask.StatePending:
return !task.Has(podtask.Launched) && k.api.algorithm().FitPredicate()(task, offer)
default:
// no point in continuing to check for matching offers
return true
}
}))
}
delay := k.backoff.Get(podKey)
log.V(3).Infof("requeuing pod %v with delay %v", podKey, delay)
k.qr.requeue(&Pod{Pod: pod, delay: &delay, notify: breakoutEarly})
default:
log.V(2).Infof("Task is no longer pending, aborting reschedule for pod %v", podKey)
}
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:57,代码来源:plugin.go
示例8: ignoreReceives
// ignoreReceives reads from a WebSocket until it is closed, then returns. If timeout is set, the
// read and write deadlines are pushed every time a new message is received.
func ignoreReceives(ws *websocket.Conn, timeout time.Duration) {
defer util.HandleCrash()
var data []byte
for {
resetTimeout(ws, timeout)
if err := websocket.Message.Receive(ws, &data); err != nil {
return
}
}
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:12,代码来源:conn.go
示例9: detachDiskAndVerify
// Detaches the specified persistent disk device from node, verifies that it is detached, and retries if it fails.
// This function is intended to be called asynchronously as a go routine.
func detachDiskAndVerify(c *gcePersistentDiskCleaner) {
glog.V(5).Infof("detachDiskAndVerify(...) for pd %q. Will block for pending operations", c.pdName)
defer util.HandleCrash()
// Block execution until any pending attach/detach operations for this PD have completed
attachDetachMutex.LockKey(c.pdName)
defer attachDetachMutex.UnlockKey(c.pdName)
glog.V(5).Infof("detachDiskAndVerify(...) for pd %q. Awake and ready to execute.", c.pdName)
devicePaths := getDiskByIdPaths(c.gcePersistentDisk)
var gceCloud *gce_cloud.GCECloud
for numRetries := 0; numRetries < maxRetries; numRetries++ {
var err error
if gceCloud == nil {
gceCloud, err = getCloudProvider()
if err != nil || gceCloud == nil {
// Retry on error. See issue #11321
glog.Errorf("Error getting GCECloudProvider while detaching PD %q: %v", c.pdName, err)
time.Sleep(errorSleepDuration)
continue
}
}
if numRetries > 0 {
glog.Warningf("Retrying detach for GCE PD %q (retry count=%v).", c.pdName, numRetries)
}
if err := gceCloud.DetachDisk(c.pdName); err != nil {
glog.Errorf("Error detaching PD %q: %v", c.pdName, err)
time.Sleep(errorSleepDuration)
continue
}
for numChecks := 0; numChecks < maxChecks; numChecks++ {
allPathsRemoved, err := verifyAllPathsRemoved(devicePaths)
if err != nil {
// Log error, if any, and continue checking periodically.
glog.Errorf("Error verifying GCE PD (%q) is detached: %v", c.pdName, err)
} else if allPathsRemoved {
// All paths to the PD have been succefully removed
unmountPDAndRemoveGlobalPath(c)
glog.Infof("Successfully detached GCE PD %q.", c.pdName)
return
}
// Sleep then check again
glog.V(3).Infof("Waiting for GCE PD %q to detach.", c.pdName)
time.Sleep(checkSleepDuration)
}
}
glog.Errorf("Failed to detach GCE PD %q. One or more mount paths was not removed.", c.pdName)
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:57,代码来源:gce_util.go
示例10: Run
// Run the main goroutine responsible for watching and syncing jobs.
func (jm *JobController) Run(workers int, stopCh <-chan struct{}) {
defer util.HandleCrash()
go jm.jobController.Run(stopCh)
go jm.podController.Run(stopCh)
for i := 0; i < workers; i++ {
go util.Until(jm.worker, time.Second, stopCh)
}
<-stopCh
glog.Infof("Shutting down Job Manager")
jm.queue.ShutDown()
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:12,代码来源:controller.go
示例11: Run
// Run begins watching and syncing.
func (rm *ReplicationManager) Run(workers int, stopCh <-chan struct{}) {
defer util.HandleCrash()
go rm.rcController.Run(stopCh)
go rm.podController.Run(stopCh)
for i := 0; i < workers; i++ {
go util.Until(rm.worker, time.Second, stopCh)
}
<-stopCh
glog.Infof("Shutting down RC Manager")
rm.queue.ShutDown()
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:12,代码来源:replication_controller.go
示例12: Open
// Open the connection and create channels for reading and writing.
func (conn *Conn) Open(w http.ResponseWriter, req *http.Request) ([]io.ReadWriteCloser, error) {
go func() {
defer util.HandleCrash()
defer conn.Close()
websocket.Server{Handshake: conn.handshake, Handler: conn.handle}.ServeHTTP(w, req)
}()
<-conn.ready
rwc := make([]io.ReadWriteCloser, len(conn.channels))
for i := range conn.channels {
rwc[i] = conn.channels[i]
}
return rwc, nil
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:14,代码来源:conn.go
示例13: UpdatePod
// Apply the new setting to the specified pod. updateComplete is called when the update is completed.
func (p *podWorkers) UpdatePod(pod *api.Pod, mirrorPod *api.Pod, updateComplete func()) {
uid := pod.UID
var podUpdates chan workUpdate
var exists bool
// TODO: Pipe this through from the kubelet. Currently kubelets operating with
// snapshot updates (PodConfigNotificationSnapshot) will send updates, creates
// and deletes as SET operations, which makes updates indistinguishable from
// creates. The intent here is to communicate to the pod worker that it can take
// certain liberties, like skipping status generation, when it receives a create
// event for a pod.
updateType := SyncPodUpdate
p.podLock.Lock()
defer p.podLock.Unlock()
if podUpdates, exists = p.podUpdates[uid]; !exists {
// We need to have a buffer here, because checkForUpdates() method that
// puts an update into channel is called from the same goroutine where
// the channel is consumed. However, it is guaranteed that in such case
// the channel is empty, so buffer of size 1 is enough.
podUpdates = make(chan workUpdate, 1)
p.podUpdates[uid] = podUpdates
// Creating a new pod worker either means this is a new pod, or that the
// kubelet just restarted. In either case the kubelet is willing to believe
// the status of the pod for the first pod worker sync. See corresponding
// comment in syncPod.
updateType = SyncPodCreate
go func() {
defer util.HandleCrash()
p.managePodLoop(podUpdates)
}()
}
if !p.isWorking[pod.UID] {
p.isWorking[pod.UID] = true
podUpdates <- workUpdate{
pod: pod,
mirrorPod: mirrorPod,
updateCompleteFn: updateComplete,
updateType: updateType,
}
} else {
p.lastUndeliveredWorkUpdate[pod.UID] = workUpdate{
pod: pod,
mirrorPod: mirrorPod,
updateCompleteFn: updateComplete,
updateType: updateType,
}
}
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:51,代码来源:pod_workers.go
示例14: doShutdown
// assumes that caller has obtained state lock
func (k *KubernetesExecutor) doShutdown(driver bindings.ExecutorDriver) {
defer func() {
log.Errorf("exiting with unclean shutdown: %v", recover())
if k.exitFunc != nil {
k.exitFunc(1)
}
}()
(&k.state).transitionTo(terminalState)
// signal to all listeners that this KubeletExecutor is done!
close(k.done)
if k.shutdownAlert != nil {
func() {
util.HandleCrash()
k.shutdownAlert()
}()
}
log.Infoln("Stopping executor driver")
_, err := driver.Stop()
if err != nil {
log.Warningf("failed to stop executor driver: %v", err)
}
log.Infoln("Shutdown the executor")
// according to docs, mesos will generate TASK_LOST updates for us
// if needed, so don't take extra time to do that here.
k.tasks = map[string]*kuberTask{}
select {
// the main Run() func may still be running... wait for it to finish: it will
// clear the pod configuration cleanly, telling k8s "there are no pods" and
// clean up resources (pods, volumes, etc).
case <-k.kubeletFinished:
//TODO(jdef) attempt to wait for events to propagate to API server?
// TODO(jdef) extract constant, should be smaller than whatever the
// slave graceful shutdown timeout period is.
case <-time.After(15 * time.Second):
log.Errorf("timed out waiting for kubelet Run() to die")
}
log.Infoln("exiting")
if k.exitFunc != nil {
k.exitFunc(0)
}
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:51,代码来源:executor.go
示例15: stateRun
func stateRun(ps *processState, a *scheduledAction) stateFn {
// it's only possible to ever receive this once because we transition
// to state "shutdown", permanently
if a == nil {
ps.shutdown()
return stateShutdown
}
close(a.errCh) // signal that action was scheduled
func() {
// we don't trust clients of this package
defer util.HandleCrash()
a.action()
}()
return stateRun
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:16,代码来源:proc.go
示例16: etcdWatch
// etcdWatch calls etcd's Watch function, and handles any errors. Meant to be called
// as a goroutine.
func (w *etcdWatcher) etcdWatch(client tools.EtcdClient, key string, resourceVersion uint64) {
defer util.HandleCrash()
defer close(w.etcdError)
if resourceVersion == 0 {
latest, err := etcdGetInitialWatchState(client, key, w.list, w.etcdIncoming)
if err != nil {
w.etcdError <- err
return
}
resourceVersion = latest + 1
}
_, err := client.Watch(key, resourceVersion, w.list, w.etcdIncoming, w.etcdStop)
if err != nil && err != etcd.ErrWatchStoppedByUser {
w.etcdError <- err
}
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:18,代码来源:etcd_watcher.go
示例17: exportHTTP
func (cc *cadvisorClient) exportHTTP(port uint) error {
// Register the handlers regardless as this registers the prometheus
// collector properly.
mux := http.NewServeMux()
err := cadvisorHttp.RegisterHandlers(mux, cc, "", "", "", "")
if err != nil {
return err
}
re := regexp.MustCompile(`^k8s_(?P<kubernetes_container_name>[^_\.]+)[^_]+_(?P<kubernetes_pod_name>[^_]+)_(?P<kubernetes_namespace>[^_]+)`)
reCaptureNames := re.SubexpNames()
cadvisorHttp.RegisterPrometheusHandler(mux, cc, "/metrics", func(name string) map[string]string {
extraLabels := map[string]string{}
matches := re.FindStringSubmatch(name)
for i, match := range matches {
if len(reCaptureNames[i]) > 0 {
extraLabels[re.SubexpNames()[i]] = match
}
}
return extraLabels
})
// Only start the http server if port > 0
if port > 0 {
serv := &http.Server{
Addr: fmt.Sprintf(":%d", port),
Handler: mux,
}
// TODO(vmarmol): Remove this when the cAdvisor port is once again free.
// If export failed, retry in the background until we are able to bind.
// This allows an existing cAdvisor to be killed before this one registers.
go func() {
defer util.HandleCrash()
err := serv.ListenAndServe()
for err != nil {
glog.Infof("Failed to register cAdvisor on port %d, retrying. Error: %v", port, err)
time.Sleep(time.Minute)
err = serv.ListenAndServe()
}
}()
}
return nil
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:46,代码来源:cadvisor_linux.go
示例18: Run
// runs the main kubelet loop, closing the kubeletFinished chan when the loop exits.
// never returns.
func (kl *kubeletExecutor) Run(updates <-chan kubelet.PodUpdate) {
defer func() {
close(kl.kubeletFinished)
util.HandleCrash()
log.Infoln("kubelet run terminated") //TODO(jdef) turn down verbosity
// important: never return! this is in our contract
select {}
}()
// push updates through a closable pipe. when the executor indicates shutdown
// via Done() we want to stop the Kubelet from processing updates.
pipe := make(chan kubelet.PodUpdate)
go func() {
// closing pipe will cause our patched kubelet's syncLoop() to exit
defer close(pipe)
pipeLoop:
for {
select {
case <-kl.executorDone:
break pipeLoop
default:
select {
case u := <-updates:
select {
case pipe <- u: // noop
case <-kl.executorDone:
break pipeLoop
}
case <-kl.executorDone:
break pipeLoop
}
}
}
}()
// we expect that Run() will complete after the pipe is closed and the
// kubelet's syncLoop() has finished processing its backlog, which hopefully
// will not take very long. Peeking into the future (current k8s master) it
// seems that the backlog has grown from 1 to 50 -- this may negatively impact
// us going forward, time will tell.
util.Until(func() { kl.Kubelet.Run(pipe) }, 0, kl.executorDone)
//TODO(jdef) revisit this if/when executor failover lands
// Force kubelet to delete all pods.
kl.HandlePodDeletions(kl.GetPods())
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:48,代码来源:service.go
示例19: Run
// Run begins processing items, and will continue until a value is sent down stopCh.
// It's an error to call Run more than once.
// Run blocks; call via go.
func (c *Controller) Run(stopCh <-chan struct{}) {
defer util.HandleCrash()
r := cache.NewReflector(
c.config.ListerWatcher,
c.config.ObjectType,
c.config.Queue,
c.config.FullResyncPeriod,
)
c.reflectorMutex.Lock()
c.reflector = r
c.reflectorMutex.Unlock()
r.RunUntil(stopCh)
util.Until(c.processLoop, time.Second, stopCh)
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:20,代码来源:controller.go
示例20: getOrExpire
// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't
// already expired. It kicks-off a go routine to delete expired objects from
// the store and sets exists=false.
func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
timestampedItem, exists := c.getTimestampedEntry(key)
if !exists {
return nil, false
}
if c.expirationPolicy.IsExpired(timestampedItem) {
glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
// Since expiration happens lazily on read, don't hold up
// the reader trying to acquire a write lock for the delete.
// The next reader will retry the delete even if this one
// fails; as long as we only return un-expired entries a
// reader doesn't need to wait for the result of the delete.
go func() {
defer util.HandleCrash()
c.cacheStorage.Delete(key)
}()
return nil, false
}
return timestampedItem.obj, true
}
开发者ID:qinguoan,项目名称:vulcan,代码行数:23,代码来源:expiration_cache.go
注:本文中的vulcan/kubernetes/pkg/util.HandleCrash函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论