• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python pylab.waitforbuttonpress函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pylab.waitforbuttonpress函数的典型用法代码示例。如果您正苦于以下问题:Python waitforbuttonpress函数的具体用法?Python waitforbuttonpress怎么用?Python waitforbuttonpress使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了waitforbuttonpress函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: getCoordinate

def getCoordinate(direction='both',axh=None,fig=None):
  """Tool for selecting a coordinate, functionality similar to ginput for a single point. Finish with right mouse button."""
  if not axh:
    axh = pl.gca()
  if not fig: fig = pl.gcf()
  hor=False;ver=False
  if direction is 'horizontal' or 'hor' or 'both':
    hor=True
  if direction is 'vertical' or 'ver' or 'both':
    ver=True

  finished=False
  def button_press_callback(event):
    if event.inaxes:
      if event.button == 3:
        finished = True
  fig.canvas.mpl_connect('button_press_event', button_press_callback)
  print("Select a coordinate, finish with right click.")
  linh = []
  while not finished:
    for tlinh in linh:
      tlinh.remove()
      linh = []
    pl.draw()
    pos = pl.ginput(1)[0]
    if hor:
      linh.append(pl.axvline(pos[0]))
    if ver:
      linh.append(pl.axhline(pos[1]))
    pl.draw()
    pl.waitforbuttonpress()

  
  fig.canvas.draw()
  return pos
开发者ID:htlemke,项目名称:ixppy,代码行数:35,代码来源:toolsPlot.py


示例2: once

def once():
    global depth, rgb
    preview.canvas.SetCurrent()

    opennpy.sync_update()
    depth,_ = opennpy.sync_get_depth()
    rgb,_ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)

    blockdraw.clear()
    #blockdraw.show_grid('o1', main.occvac.occ, color=np.array([1,1,0,1]))
    if 'RGB' in stencil.__dict__:
        blockdraw.show_grid('occ', grid.occ, color=grid.color)
    else:
        blockdraw.show_grid('occ', grid.occ, color=np.array([1,0.6,0.6,1]))

    preview.clearcolor=[0,0,0,0]
    preview.flag_drawgrid = True

    if 'R_correct' in main.__dict__:
        preview.modelmat = main.R_display
    else:
        preview.modelmat = main.R_aligned

    preview.Refresh()
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
开发者ID:amiller,项目名称:projective_stereo,代码行数:28,代码来源:demo_blockprojector.py


示例3: once

def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    def from_rect(m,rect):
        (l,t),(r,b) = rect
        return m[t:b,l:r]

    global mask, rect, modelmat

    try:
        (mask,rect) = preprocess.threshold_and_mask(depth,config.bg)
    except IndexError:
        return

    cv.ShowImage('mask',mask.astype('u1')*255)

    global label_image
    label_image = classify.predict(depth)
    cv.ShowImage('label_image', ((label_image[0]+1)*100*mask).astype('u1'))
    pylab.waitforbuttonpress(0.03)
开发者ID:theY4Kman,项目名称:blockplayer,代码行数:28,代码来源:demo_classify.py


示例4: once

def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)

    blockdraw.clear()
    if 'RGB' in stencil.__dict__:
        blockdraw.show_grid('occ', grid.occ, color=grid.color)
    else:
        blockdraw.show_grid('occ', grid.occ, color=np.array([1,0.6,0.6,1]))

    window.clearcolor=[0,0,0,0]
    window.flag_drawgrid = True

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display
    
    g = blockcraft.translated_rotated(main.R_correct, grid.occ)
    talk_to_minecraft(g)
    
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    import sys
    sys.stdout.flush()
开发者ID:theY4Kman,项目名称:blockplayer,代码行数:32,代码来源:demo_blockcraft.py


示例5: animate_random

def animate_random(max_iters=1000, mod=100):
    global pnew, points_range
    # Apply a perturb to points_p
    obj.RT = np.eye(4, dtype='f')
    obj.RT[:3,3] = -obj.vertices[:,:3].mean(0)
    obj.RT[:3,3] += [0,0,-3.0]
    RT = obj.RT

    prev_rimg = obj.range_render(camera.kinect_camera())
    window.canvas.SetCurrent()
    pnew = prev_rimg.point_model(True)
    points_range = pnew

    if 0:
        obj.RT = np.dot(RT, M)
        rimg = obj.range_render(camera.kinect_camera())
        window.canvas.SetCurrent()
        pm = rimg.point_model(True)
        points_range = pm

        for iters in range(max_iters):
            pnew, err, npairs, uv = fasticp.fast_icp(rimg, pnew, 1000, dist=0.005)
            if iters % mod == 0:
                # print '%d iterations, [%d] RMS: %.3f' % (iters, npairs, np.sqrt(err))
                window.Refresh()
                pylab.waitforbuttonpress(0.02)

        pnew = pm

        window.Refresh()
        pylab.waitforbuttonpress(0.02)        
开发者ID:amiller,项目名称:rtmodel,代码行数:31,代码来源:demo_icp.py


示例6: perturb

def perturb(max_iters=100, mod=10):
    global pnew, uv, err, points_range, rimg, range_image

    # Apply a perturb to points_p
    obj.RT = np.eye(4, dtype='f')
    obj.RT[:3,3] = -obj.vertices[:,:3].mean(0)
    obj.RT[:3,3] += [0,0,-3.0]

    # Rotated object view
    RT = obj.RT
    rp = random_perturbation().astype('f')
    obj.RT = np.dot(rp, obj.RT)
    range_image = obj.range_render(camera.kinect_camera())
    obj.RT = RT

    points_range = range_image.point_model(True)

    # Original object view
    rimg = obj.range_render(camera.kinect_camera())
    pnew = rimg.point_model()

    # Estimate the transformation rp

    for iters in range(max_iters):
        npairs, pnew = model.align(range_image, pnew, rtmodel.RATE1, rtmodel.DIST1, 6)
        #pnew, err, npairs, uv = fasticp.fast_icp(range_image, pnew, 0.1, dist=0.05)
        if iters % mod == 0 or 1:
            #print '%d iterations, [%d] RMS: %.3f' % (iters, npairs, np.sqrt(err))
            window.Refresh()
            pylab.waitforbuttonpress(0.02)
            break

    window.Refresh()
开发者ID:amiller,项目名称:rtmodel,代码行数:33,代码来源:demo_icp.py


示例7: error

 def error(x):
     theta, dist = x
     line = middle_offset(theta, dist, size)
     s =  1./(d.score(line, True) + 1e-5)
     clf()
     imshow(d.debug * d.image)
     pylab.waitforbuttonpress(0.01)
     return s
开发者ID:amiller,项目名称:dividingline,代码行数:8,代码来源:demo_dividingline.py


示例8: show_depth

def show_depth(name, depth):
    #im = cv.CvreateImage((depth.shape[1],depth.shape[0]), 8, 3)
    #cv.SetData(im, colormap.color_map(depth/2))
    #cv.ShowImage(name, im)
    #cv2.imshow(name, colormap.color_map(depth/2))
    cv2.imshow(name, 1024./depth)
    #pylab.imshow(colormap.color_map(depth))
    pylab.waitforbuttonpress(0.005)
开发者ID:amiller,项目名称:quartet,代码行数:8,代码来源:record.py


示例9: testing

    def testing(self, testFace, visualiseInfo=None):
        # Returns the predictive mean, the predictive variance and the axis (pp) of the latent space backwards mapping.            
        ret = self.SAMObject.pattern_completion(testFace, visualiseInfo=visualiseInfo)
        mm = ret[0]
        vv = ret[1]
        post = ret[3]        
        # find nearest neighbour of mm and SAMObject.model.X
        dists = numpy.zeros((self.SAMObject.model.X.shape[0],1))

        facePredictionBottle = yarp.Bottle()
    
        for j in range(dists.shape[0]):
            dists[j,:] = distance.euclidean(self.SAMObject.model.X.mean[j,:], mm[0].values)
            print "Dist: " + str(testFace.shape)
        nn, min_value = min(enumerate(dists), key=operator.itemgetter(1))
        if self.SAMObject.type == 'mrd':
            ret_y = self.SAMObject.model.bgplvms[1]._raw_predict(post.X)
            vv_y = ret_y[1]
            print "With " + str(vv.mean()) + "(" + str(vv_y) + ")" +" prob. error the new image is " + self.participant_index[int(self.SAMObject.model.bgplvms[1].Y[nn,:])]
            textStringOut=self.participant_index[int(self.SAMObject.model.bgplvms[1].Y[nn,:])]

        elif self.SAMObject.type == 'bgplvm':
            print "With " + str(vv.mean()) +" prob. error the new image is " + self.participant_index[int(self.L[nn,:])]
            textStringOut=self.participant_index[int(self.L[nn,:])]
        if (vv.mean()<0.00012):
            choice=numpy.random.randint(4)
            if (choice==0):
                 facePredictionBottle.addString("Hello " + textStringOut)
            elif(choice==1):
                 facePredictionBottle.addString("I am watching you " + textStringOut)
            elif(choice==2):
                 facePredictionBottle.addString(textStringOut + " could you move a little you are blocking my view of the outside")
            else:
                 facePredictionBottle.addString(textStringOut + " will you be my friend")                  
            # Otherwise ask for updated name... (TODO: add in updated name)
        else:
            facePredictionBottle.addString("I think you are " + textStringOut + " but I am not sure, please confirm?")        
     
        # Plot the training NN of the test image (the NN is found in the INTERNAl, compressed (latent) memory space!!!)
        if visualiseInfo is not None:
            fig_nn = visualiseInfo['fig_nn']
            fig_nn = pb.figure(11)
            pb.title('Training NN')
            fig_nn.clf()
            pl_nn = fig_nn.add_subplot(111)
            pl_nn.imshow(numpy.reshape(self.SAMObject.recall(nn),(self.imgHeightNew, self.imgWidthNew)), cmap=plt.cm.Greys_r)
            pb.title('Training NN')
            pb.show()
            pb.draw()
            pb.waitforbuttonpress(0.1)
            
        self.speakStatusPort.write(self.speakStatusOutBottle, self.speakStatusInBottle)

        if( self.speakStatusInBottle.get(0).asString() == "quiet"):
            self.outputFacePrection.write(facePredictionBottle)

        facePredictionBottle.clear()
开发者ID:GunnyPong,项目名称:wysiwyd,代码行数:57,代码来源:SAMDriver_faces.py


示例10: once

def once():
    dataset.advance()
    depthL, depthR = dataset.depthL, dataset.depthR
    maskL, rectL = preprocess.threshold_and_mask(depthL, config.bgL)
    maskR, rectR = preprocess.threshold_and_mask(depthR, config.bgR)
    show_mask("maskL", maskL.astype("f"), rectL)
    show_mask("maskR", maskR.astype("f"), rectR)

    pylab.waitforbuttonpress(0.01)
开发者ID:amiller,项目名称:blockplayer,代码行数:9,代码来源:demo_preprocess.py


示例11: check_dataset

def check_dataset(dataset, labels, label_map, index):
    data = np.uint8(dataset[index]).reshape((32, 32))
    i = np.argwhere(labels[index] == 1)[0][0]
    import matplotlib.pyplot as plt  # im.show may not be implemented
                                     #  in opencv-python on Tk GUI (such as Linux)
    import pylab
    plt.ion()
    plt.imshow(data)
    pylab.waitforbuttonpress(timeout=5)
    print("label:", label_map[i])
开发者ID:nladuo,项目名称:captcha-break,代码行数:10,代码来源:check_dataset.py


示例12: animate

def animate():
    while True:

        line = random_middle_line()
        d = DividingLine(synthetic_image(line=line))
        d.traverse(line, True)
        #d.traverse_np(line, True)
        pylab.clf()
        pylab.imshow(d.debug)
        pylab.waitforbuttonpress(0.01)
开发者ID:amiller,项目名称:dividingline,代码行数:10,代码来源:demo_dividingline.py


示例13: sample_rays

def sample_rays(n_rays=10000, reset=False):
    global paths
    global total_rays
    global line_verts, line_colors

    if reset or not 'line_verts' in globals():
        paths = []
        total_rays = 0
        line_verts = np.empty((0,3),'f')
        line_colors = np.empty((0,3),'f')
    total_rays += n_rays
    line_verts_ = []
    line_colors_ = []

    ps = mycybvh.sample_rays(source, sink, sinkrad, n_rays, ROULETTE)
    keys = ['source','sink','diverge','scaflect']

    for path in ps:
        p_ = []
        x1 = path[0]['origin']
        x1 = x1['x'], x1['y'], x1['z']
        orgn = True
        for p in path[1:]:
            o = p['origin']
            d = p['direction']
            ntype = keys[p['ntype']]
            cumdist = p['cumdist']
            origin = o['x'],o['y'],o['z']
            direction = d['x'],d['y'],d['z']
            x2 = origin
            if ntype == 'sink':
                line_colors_ += 2*((1,.6,.6),)
            elif orgn:
                line_colors_ += 2*((.6,.6,1),)
                orgn=False
            else:
                line_colors_ += 2*((1,1,1),)
            x2 = origin
            line_verts_.append(x1)
            line_verts_.append(x2)
            x1 = x2
            p_.append((origin, direction, ntype, cumdist))
        paths.append(p_)
    if line_colors_:
        line_colors = np.vstack((line_colors, np.array(line_colors_,'f')))
        line_verts = np.vstack((line_verts, np.array(line_verts_,'f')))
    window.Refresh()

    pylab.clf();
    times, pressure = energy_contributions()
    pylab.hist(times,weights=pressure,bins=100, range=(0,0.2))
    pylab.waitforbuttonpress(0.03)
    update_filter()
开发者ID:amiller,项目名称:mesh,代码行数:53,代码来源:demo_walking.py


示例14: testing

    def testing(self, testFace, choice, visualiseInfo=None):
        # Returns the predictive mean, the predictive variance and the axis (pp) of the latent space backwards mapping.            
        #mm,vv,pp=self.SAMObject.pattern_completion(testFace, visualiseInfo=visualiseInfo)

        ret=self.SAMObject.pattern_completion(testFace, visualiseInfo=visualiseInfo)
         
        mm = ret[0]
        vv = ret[1]
        post = ret[3]        

        # find nearest neighbour of mm and SAMObject.model.X
        dists = numpy.zeros((self.SAMObject.model.X.shape[0],1))

        facePredictionBottle = yarp.Bottle()
    
        for j in range(dists.shape[0]):
            dists[j,:] = distance.euclidean(self.SAMObject.model.X.mean[j,:], mm[0].values)
        nn, min_value = min(enumerate(dists), key=operator.itemgetter(1))
        if self.SAMObject.type == 'mrd':
            print "With " + str(vv.mean()) +" prob. error the new image is " + self.participant_index[int(self.SAMObject.model.bgplvms[1].Y[nn,:])]
            textStringOut=self.participant_index[int(self.SAMObject.model.bgplvms[1].Y[nn,:])]

        elif self.SAMObject.type == 'bgplvm':
            print "With " + str(vv.mean()) +" prob. error the new image is " + self.participant_index[int(self.L[nn,:])]
            textStringOut=self.participant_index[int(self.L[nn,:])]
        if(choice.get(0).asInt() == 16 and vv.mean()<0.00012):            
            facePredictionBottle.addString("You are " + textStringOut)
        elif(choice.get(0).asInt() == 16 and vv.mean()>0.00012):
            facePredictionBottle.addString("I think you are " + textStringOut + " but I am not sure, please confirm?")        
     
        # Plot the training NN of the test image (the NN is found in the INTERNAl, compressed (latent) memory space!!!)
        if visualiseInfo is not None:
            fig_nn = visualiseInfo['fig_nn']
            fig_nn = pb.figure(11)
            pb.title('Training NN')
            fig_nn.clf()
            pl_nn = fig_nn.add_subplot(111)
            pl_nn.imshow(numpy.reshape(self.SAMObject.recall(nn),(self.imgHeightNew, self.imgWidthNew)), cmap=plt.cm.Greys_r)
            pb.title('Training NN')
            pb.show()
            pb.draw()
            pb.waitforbuttonpress(0.1)
            
        self.speakStatusPort.write(self.speakStatusOutBottle, self.speakStatusInBottle)

        if( self.speakStatusInBottle.get(0).asString() == "quiet"):
            self.outputFacePrection.write(facePredictionBottle)

        facePredictionBottle.clear()
        #return pp

        return ret[2]
开发者ID:jgqysu,项目名称:wysiwyd,代码行数:52,代码来源:SAMDriver_interaction.py


示例15: click_point

def click_point(im):
    fig = pylab.figure(1);
    pylab.imshow(im)
    #pylab.xlim(xlim)
    #pylab.ylim(ylim)
    point = []
    def pick(event): 
        point.append((event.xdata, event.ydata))
    cid = fig.canvas.mpl_connect('button_press_event', pick)
    print("Click a point")
    while not point: pylab.waitforbuttonpress()
    print "Ok!", point
    return point[0]
开发者ID:wearscript,项目名称:wearscript-ar,代码行数:13,代码来源:big_to_glass_calibration.py


示例16: getRectangleCoordinates

def getRectangleCoordinates(axh=None,fig=None,autozoom=True):
  """Tool for selecting a rectangle, functionality similar to ginput. Finish with right mouse button."""
  if not axh:
    axh = pl.gca()
  if not fig: fig = pl.gcf()
  class ROI:
    def __init__(self,fig,axh):
      self.fig = fig
      self.axh = axh
      self.lims = list(axh.axis())
      self.boxh = None
      self.finished = False

    def coo(self,eclick,erelease):

      self.lims = [min([eclick.xdata,erelease.xdata]),
       max([eclick.xdata,erelease.xdata]),
       min([eclick.ydata,erelease.ydata]),
       max([eclick.ydata,erelease.ydata])]

      if autozoom:
	ll = np.asarray(self.lims).reshape(2,2)
	ll[0] = np.mean(ll[0]) + np.array([-1,1])*np.diff(ll[0])*0.7
	ll[1] = np.mean(ll[1]) + np.array([-1,1])*np.diff(ll[1])*0.7
	self.axh.axis(ll.ravel())
       
      if not self.boxh is None:  self.boxh.remove()
      ptch = pl.Rectangle([self.lims[0],self.lims[2]],self.lims[1]-self.lims[0],self.lims[3]-self.lims[2],facecolor='r',alpha=0.5,ec='k')
      self.boxh = self.axh.add_patch(ptch)
      fig.canvas.draw()
    
    def button_press_callback(self,event):
      if event.inaxes:
        if event.button == 3:
          self.finished = True
	  if self.boxh is None:
            self.lims = list(axh.axis())

	   
  roi = ROI(fig,axh)
  selector = pl.matplotlib.widgets.RectangleSelector(axh,roi.coo)
  fig.canvas.mpl_connect('button_press_event', roi.button_press_callback)
  print("Select rectangular region of interest, finish with right click.")
  while not roi.finished:
    pl.waitforbuttonpress()
  if not roi.boxh is None: 
    roi.boxh.remove()
    axh.patches[-1].remove()
  del selector
  fig.canvas.draw()
  return roi.lims
开发者ID:htlemke,项目名称:ixppy,代码行数:51,代码来源:toolsPlot.py


示例17: once

def once():
    global depth, rgb
    if not FOR_REAL:
        dataset.advance()
        depth = dataset.depth
        rgb = dataset.rgb
    else:
        opennpy.sync_update()
        depth,_ = opennpy.sync_get_depth()
        rgb,_ = opennpy.sync_get_video()

    main.update_frame(depth, rgb)
    blockdraw.clear()
    try:
        c,_ = hashalign.find_best_alignment(grid.occ,0*grid.occ,
                                        target_model,~target_model)
    except ValueError:
        pass
    else:
        tm = hashalign.apply_correction(target_model, *c)
        tm = np.ascontiguousarray(tm)

        not_filled = tm & ~grid.occ
        correct = tm & grid.occ
        incorrect = ~tm & grid.occ

        try:
            next_layer = np.min(np.nonzero(not_filled)[1])
        except ValueError:
            blockdraw.show_grid('0', grid.occ, color=np.array([0.2,1,0.2,1]))
        else:
            blockdraw.show_grid('1', incorrect,
                                color=np.array([1,1,0.1,1]))
            nf = not_filled*0
            nf[:,next_layer,:] = 1
            nf = nf & not_filled
            blockdraw.show_grid('2', nf,
                                color=np.array([1,0.2,1.0,1]))
            blockdraw.show_grid('3', correct, color=np.array([0.1,0.3,0.1,1]))

    window.clearcolor=[0,0,0,0]
    window.flag_drawgrid = False

    if 'R_correct' in main.__dict__:
        window.modelmat = main.R_display

    #show_rgb(rgb)
    window.Refresh()
    pylab.waitforbuttonpress(0.005)
    sys.stdout.flush()
开发者ID:theY4Kman,项目名称:blockplayer,代码行数:50,代码来源:demo_collab.py


示例18: main

def main():

    f = "tsp-medium.tsp"
    j = File.read(f)  # to generate data, use the R_File.read access
    t = TwoOpt(j)  # returns an object ready to sort

    pylab.show()
    pylab.waitforbuttonpress()

    t.sort()  # one round of sorting

    # finally, once it's finished, just wait for button press
    pylab.waitforbuttonpress()
    pylab.close()
开发者ID:mark-ross,项目名称:two-opt,代码行数:14,代码来源:main.py


示例19: get_terminus

def get_terminus():
    from matplotlib.widgets import Cursor
    def tellme(s):
        print s
        plt.title(s,fontsize=16)
        plt.draw()

    plt.setp(plt.gca(),autoscale_on=False)

    cursor = Cursor(plt.axes(), useblit=True, color='white', linewidth=1 )

    happy = False
    while not happy:
        pts = []
        while len(pts) < 4:
            tellme('Select 4 corners of the terminus region')
            pts = np.asarray( plt.ginput(4, timeout=-1) )
            if len(pts) < 4:
                tellme('Too few points, starting over')
                time.sleep(1) # Wait a second

        ph = plt.fill(pts[:,0], pts[:,1], 'white', lw = 2, alpha=0.5)

        tellme('Done? Press any key if yes, mouse click to reset')

        happy = plt.waitforbuttonpress()

        # Get rid of fill
        if not happy:
            for p in ph: p.remove()

        return pts
开发者ID:ckhroulev,项目名称:dbg-playground,代码行数:32,代码来源:test.py


示例20: waitKey

def waitKey(fig):
	from pylab import waitforbuttonpress
	r = [None]
	fig.canvas.mpl_connect('key_press_event', lambda e: r.__setitem__(0, e.key))
	while not waitforbuttonpress():
		pass
	return r[0]
开发者ID:caomw,项目名称:voc-classification,代码行数:7,代码来源:util.py



注:本文中的pylab.waitforbuttonpress函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python pylab.where函数代码示例发布时间:2022-05-25
下一篇:
Python pylab.vstack函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap