• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python multiprocessing.Array类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中multiprocessing.Array的典型用法代码示例。如果您正苦于以下问题:Python Array类的具体用法?Python Array怎么用?Python Array使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Array类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: steps_multiprocessing

    def steps_multiprocessing(self, number_of_steps, plot, plot_every_n):
        """ 
        Equal to take_steps but using multiprocesing.

        Parameters
        ----------
        number_of_steps : float
                 Total number of time steps.
        plot : object
                 make_plot Object.
        plot_every_n : float
                 Every few time steps are going on a plot.
        """
        
        shared_array_base = Array(ctypes.c_double, len(self.bodies)*2)
        shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
        shared_array = shared_array.reshape(2, len(self.bodies))

        counter = Value(ctypes.c_int64, 0)
        end_plot = Value(ctypes.c_int8, 1)

        old_counter = 1
        rk_fun = Process(target = self.rk_fun_task, args=(number_of_steps, plot_every_n, shared_array, end_plot, counter))
        plot_fun = Process(target = self.plot_fun_task, args=(old_counter, shared_array, end_plot, counter, plot))

        rk_fun.start()
        plot_fun.start()

        rk_fun.join()
        plot_fun.join()
开发者ID:fnbellomo,项目名称:GProject,代码行数:30,代码来源:Gravitation.py


示例2: get_predict

def get_predict(args, ortho, model):
    xp = cuda.cupy if args.gpu >= 0 else np
    args.h_limit, args.w_limit = ortho.shape[0], ortho.shape[1]
    args.canvas_h = args.h_limit
    args.canvas_w = args.w_limit

    # to share 'canvas' between different threads
    canvas_ = Array(ctypes.c_float, args.canvas_h * args.canvas_w * args.channels)
    canvas = np.ctypeslib.as_array(canvas_.get_obj())
    canvas = canvas.reshape((args.canvas_h, args.canvas_w, args.channels))

    # prepare queues and threads
    patch_queue = Queue(maxsize=5)
    preds_queue = Queue()
    patch_worker = Process(target=create_minibatch, args=(args, ortho, patch_queue))
    canvas_worker = Process(target=tile_patches, args=(args, canvas, preds_queue))
    patch_worker.start()
    canvas_worker.start()

    while True:
        minibatch = patch_queue.get()
        if minibatch is None:
            break
        minibatch = Variable(xp.asarray(minibatch, dtype=xp.float32), volatile=True)
        preds = model(minibatch, None).data
        if args.gpu >= 0:
            preds = xp.asnumpy(preds)
        [preds_queue.put(pred) for pred in preds]

    preds_queue.put(None)
    patch_worker.join()
    canvas_worker.join()

    return canvas
开发者ID:lucmichalski,项目名称:ssai-cnn,代码行数:34,代码来源:predict_offset.py


示例3: conv_single_image

def conv_single_image(image):
    shared_array_base = Array(ctypes.c_double, image.size)
    shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
    shared_array = shared_array.reshape(image.shape)
    shared_array[:] = image

    return shared_array
开发者ID:CellProfiler,项目名称:cellstar,代码行数:7,代码来源:snake_grow.py


示例4: run

def run(args):
    # create dummy environment to be able to create model
    env = gym.make(args.environment)
    assert isinstance(env.observation_space, Box)
    assert isinstance(env.action_space, Discrete)
    print("Observation space:", env.observation_space)
    print("Action space:", env.action_space)

    # create main model
    model = create_model(env, args)
    model.summary()
    env.close()

    # for better compatibility with Theano and Tensorflow
    multiprocessing.set_start_method('spawn')

    # create shared buffer for sharing weights
    blob = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL)
    shared_buffer = Array('c', len(blob))
    shared_buffer.raw = blob

    # force runner processes to use cpu
    os.environ["CUDA_VISIBLE_DEVICES"] = ""

    # create fifos and threads for all runners
    fifos = []
    for i in range(args.num_runners):
        fifo = Queue(args.queue_length)
        fifos.append(fifo)
        process = Process(target=runner, args=(shared_buffer, fifo, args))
        process.start()

    # start trainer in main thread
    trainer(model, fifos, shared_buffer, args)
开发者ID:tambetm,项目名称:gymexperiments,代码行数:34,代码来源:a2c.py


示例5: shared_np_array

def shared_np_array(shape, data=None, integer=False):
    """Create a new numpy array that resides in shared memory.

    Parameters
    ----------
    shape : tuple of ints
        The shape of the new array.
    data : numpy.array
        Data to copy to the new array. Has to have the same shape.
    integer : boolean
        Whether to use an integer array. Defaults to False which means
        float array.
    """
    size = np.prod(shape)
    if integer:
        array = Array(ctypes.c_int64, int(size))
        np_array = np.frombuffer(array.get_obj(), dtype="int64")
    else:
        array = Array(ctypes.c_double, int(size))
        np_array = np.frombuffer(array.get_obj())
    np_array = np_array.reshape(shape)

    if data is not None:
        if len(shape) != len(data.shape):
            raise ValueError("`data` must have the same dimensions"
                             "as the created array.")
        same = all(x == y for x, y in zip(shape, data.shape))
        if not same:
            raise ValueError("`data` must have the same shape"
                             "as the created array.")
        np_array[:] = data

    return np_array
开发者ID:cdiener,项目名称:cobrapy,代码行数:33,代码来源:sampling.py


示例6: initialize

    def initialize(self):
        # Create thread safe arrays.
        self.prev_values = Array('d', self.prev_values, lock=False)
        self.next_values = Array('d', self.next_values, lock=False)

        for key in self.records:
            self.records[key] = manager.list()
        for key in self.spikes:
            self.spikes[key] = manager.list()
开发者ID:vicariousgreg,项目名称:neurotransmissionModel,代码行数:9,代码来源:environment.py


示例7: __init__

class SynapseEnvironment:
    def __init__(self, noise=0.0):
        def beta(maximum, rate=1.0):
            return betav(maximum, noise=noise, rate=rate)
        self.beta = beta

        self.prev_concentrations = []
        self.next_concentrations = []

    def initialize(self):
        # Create thread safe arrays.
        self.prev_concentrations = Array('d', self.prev_concentrations, lock=False)
        self.next_concentrations = Array('d', self.next_concentrations, lock=False)
        self.dirty = Value('b', True, lock=False)
        
    def register(self, baseline_concentration):
        pool_id = len(self.prev_concentrations)
        self.prev_concentrations.append(baseline_concentration)
        self.next_concentrations.append(baseline_concentration)
        return pool_id

    def get_concentration(self, pool_id):
        try: self.dirty
        except: self.initialize()
        return self.prev_concentrations[pool_id]

    def set_concentration(self, pool_id, new_concentration):
        try: self.dirty.value = True
        except: self.initialize()
        self.next_concentrations[pool_id] = new_concentration

    def add_concentration(self, pool_id, molecules):
        try: self.dirty.value = True
        except: self.initialize()
        self.next_concentrations[pool_id] += molecules

    def remove_concentration(self, pool_id, molecules):
        try: self.dirty.value = True
        except: self.initialize()
        self.next_concentrations[pool_id] -= molecules
        self.next_concentrations[pool_id] = \
            max(0.0, self.next_concentrations[pool_id])

    def step(self):
        """
        Cycles the environment.
        Returns whether the environment is stable (not dirty, no changes)
        """
        try: self.dirty
        except: self.initialize()
        if self.dirty.value:
            self.dirty.value = False
            for i in xrange(len(self.prev_concentrations)):
                self.prev_concentrations[i]=self.next_concentrations[i]
            return False
        else: return True
开发者ID:vicariousgreg,项目名称:neurotransmissionModel,代码行数:56,代码来源:environment.py


示例8: __init__

 def __init__(self, size=1000, data_type="int32"):
     self.data_type = data_type
     self.head = Value("i", 0)
     self.ring_buffer = Array(data_type[0], range(size))
     self.size = size
     for i in range(size):
         self.ring_buffer[i] = 0  # probably really slow but not done often
开发者ID:wenlintan,项目名称:trap-control,代码行数:7,代码来源:counters.py


示例9: calculatePearsonCorrelationMatrixMultiprocessing

def calculatePearsonCorrelationMatrixMultiprocessing(matrix, axis=0, symmetrical=True, getpvalmat=False):

    if axis == 1:
        matrix = matrix.T

    nRows = matrix.shape[0]

    # create shared array that can be used from multiple processes
    output_r_arr = Array(ctypes.c_double, matrix.shape[0] * matrix.shape[0])
    # then in each new process create a new numpy array using:
    output_r = np.frombuffer(output_r_arr.get_obj())  # mp_arr and arr share the same memory
    # make it two-dimensional
    output_r = output_r.reshape((matrix.shape[0], matrix.shape[0]))  # b and arr share the same memory
    # output_r = np.zeros((nRows,nRows))  # old version

    output_p_arr = Array(ctypes.c_double, matrix.shape[0] * matrix.shape[0])
    output_p = np.frombuffer(output_p_arr.get_obj())
    output_p = output_p.reshape((matrix.shape[0], matrix.shape[0]))

    print 'Calculating Pearson R for each row, multithreaded'
    print mp.cpu_count(), 'processes in pool'

    pool = None
    try:
        pool = mp.Pool(mp.cpu_count(),
                       initializer=_init_pool,
                       initargs=(matrix, output_r_arr, output_p_arr,
                                 nRows, symmetrical))

        # bar = tqdm(total=nRows*nRows/2)
        # tqdm.write('Calculating Pearson R for each row, multithreaded')
        for result in tqdm(pool.imap_unordered(_f, range(0, nRows)), total=nRows):
            # bar.update(result)
            pass
        # bar.close()
    finally:  # To make sure processes are closed in the end, even if errors happen
        pool.close()
        pool.join()

    print output_r

    if getpvalmat:
        return output_r, output_p
    else:
        return output_r
开发者ID:themantalope,项目名称:maakclusterutils,代码行数:45,代码来源:cluster_utils.py


示例10: initialize

 def initialize(self, nChannels, nSamples, windowSize=1, nptype='float32'):
     '''
     Initializes the buffer with a new raw array
     
     Parameters
     ----------
     nChannels : int
         dimensionality of a single sample
     nSamples : int
         the buffer capacity in samples
     windowSize : int, optional
         optional, the size of the window to be used for reading the
         data. The pocket of the this size will be created
     nptype : string, optional
         the type of the data to be stored
                        
     '''
     self.__initialized = True
     
     # checking parameters
     if nChannels < 1:
         self.logger.warning('nChannels must be a positive integer, setting to 1')
         nChannels = 1
     if nSamples < 1:
         self.logger.warning('nSamples must be a positive integer, setting to 1')
         nSamples = 1
     if windowSize < 1:
         self.logger.warning('wondowSize must be a positive integer, setting to 1')
         windowSize = 1
     
     # initializing
     sizeBytes = c.sizeof(BufferHeader) + \
                 (nSamples + windowSize) * nChannels * np.dtype(nptype).itemsize
     
     raw = Array('c', sizeBytes)
     hdr = BufferHeader.from_buffer(raw.get_obj())
     
     hdr.bufSizeBytes = nSamples * nChannels * np.dtype(nptype).itemsize
     hdr.pocketSizeBytes = windowSize * nChannels * np.dtype(nptype).itemsize
     hdr.dataType = datatypes.get_code(nptype)
     hdr.nChannels = nChannels
     hdr.nSamplesWritten = 0
     
     self.initialize_from_raw(raw.get_obj())
开发者ID:belevtsoff,项目名称:rdaclient.py,代码行数:44,代码来源:ringbuffer.py


示例11: _calculate_phi

    def _calculate_phi(self, x):
        C = self.workers
        neurons = self.neurons
        mu = self.mu
        sigmas = self.sigmas
        phi = self.phi = None
        n = self.n


        def heavy_lifting(c, phi):
            s = jobs[c][1] - jobs[c][0]
            for k, i in enumerate(xrange(jobs[c][0], jobs[c][1])):
                for j in xrange(neurons):
                    # phi[i, j] = metrics(x[i,:], mu[j])**3)
                    # phi[i, j] = plateSpine(x[i,:], mu[j]))
                    # phi[i, j] = invMultiQuadric(x[i,:], mu[j], sigmas[j]))
                    phi[i, j] = multiQuadric(x[i,:], mu[j], sigmas[j])
                    # phi[i, j] = gaussian(x[i,:], mu[j], sigmas[j]))
                if k % 1000 == 0:
                    percent = true_divide(k, s)*100
                    print c, ': {:2.2f}%'.format(percent)
            print c, ': Done'
        
        # distributing the work between 4 workers
        shared_array = Array(c_double, n * neurons)
        phi = frombuffer(shared_array.get_obj())
        phi = phi.reshape((n, neurons))

        jobs = []
        workers = []

        p = n / C
        m = n % C
        for c in range(C):
            jobs.append((c*p, (c+1)*p + (m if c == C-1 else 0)))
            worker = Process(target = heavy_lifting, args = (c, phi))
            workers.append(worker)
            worker.start()

        for worker in workers:
            worker.join()

        return phi
开发者ID:eugeniashurko,项目名称:rbfnnpy,代码行数:43,代码来源:rbfnnpy.py


示例12: initialize

    def initialize(self, n_channels, n_samples, np_dtype='float32'):
        """Initializes the buffer with a new array."""
        logger.debug('Initializing {}x{} {} buffer.'.format(n_channels, n_samples, np_dtype))

        # check parameters
        if n_channels < 1 or n_samples < 1:
            logger.error('n_channels and n_samples must be a positive integer')
            raise SharedBufferError(1)

        size_bytes = ct.sizeof(SharedBufferHeader) + n_samples * n_channels * np.dtype(np_dtype).itemsize
        raw = Array('c', size_bytes)
        hdr = SharedBufferHeader.from_buffer(raw.get_obj())

        hdr.bufSizeBytes = size_bytes - ct.sizeof(SharedBufferHeader)
        hdr.dataType = DataTypes.get_code(np_dtype)
        hdr.nChannels = n_channels
        hdr.nSamples = n_samples
        hdr.position = 0

        self.initialize_from_raw(raw.get_obj())
开发者ID:wonkoderverstaendige,项目名称:ophys_io,代码行数:20,代码来源:SharedBuffer.py


示例13: load_data

def load_data(args, input_q, minibatch_q):
    c = args.channel
    s = args.size
    d = args.joint_num * 2

    input_data_base = Array(ctypes.c_float, args.batchsize * c * s * s)
    input_data = np.ctypeslib.as_array(input_data_base.get_obj())
    input_data = input_data.reshape((args.batchsize, c, s, s))

    label_base = Array(ctypes.c_float, args.batchsize * d)
    label = np.ctypeslib.as_array(label_base.get_obj())
    label = label.reshape((args.batchsize, d))

    x_queue, o_queue = Queue(), Queue()
    workers = [Process(target=transform,
                       args=(args, x_queue, args.datadir, args.fname_index,
                             args.joint_index, o_queue))
               for _ in range(args.batchsize)]
    for w in workers:
        w.start()

    while True:
        x_batch = input_q.get()
        if x_batch is None:
            break

        # data augmentation
        for x in x_batch:
            x_queue.put(x)
        j = 0
        while j != len(x_batch):
            a, b = o_queue.get()
            input_data[j] = a
            label[j] = b
            j += 1
        minibatch_q.put([input_data, label])

    for _ in range(args.batchsize):
        x_queue.put(None)
    for w in workers:
        w.join()
开发者ID:cybermatt,项目名称:deeppose,代码行数:41,代码来源:train.py


示例14: __init__

    def __init__(self):

        self.all_curves = Listing.index_all_curves()
        index_file = open ("outputs/index_file.txt","w")
        for index, item in enumerate(self.all_curves):
              index_file.write("%i,%s" % (index, str(item)))
        index_file.close()
        self.n = len(self.all_curves)

        self.total_costs_matrix_base = Array(ctypes.c_double, self.n*self.n)
        self.total_costs_matrix = numpy.ctypeslib.as_array(
                             self.total_costs_matrix_base.get_obj())
        self.total_costs_matrix = self.total_costs_matrix.reshape(self.n,self.n)
开发者ID:tomasyany,项目名称:spectra,代码行数:13,代码来源:cost_computation.py


示例15: test_continuous_send_dialog

    def test_continuous_send_dialog(self):
        self.add_signal_to_form("esaver.coco")
        self.__add_first_signal_to_generator()

        port = self.get_free_port()

        gframe = self.form.generator_tab_controller
        expected = np.zeros(gframe.total_modulated_samples, dtype=np.complex64)
        expected = gframe.modulate_data(expected)
        current_index = Value("L", 0)
        buffer = Array("f", 4 * len(expected))

        process = Process(target=receive, args=(port, current_index, 2 * len(expected), buffer))
        process.daemon = True
        process.start()
        time.sleep(1)  # ensure server is up

        ContinuousModulator.BUFFER_SIZE_MB = 10

        continuous_send_dialog = self.__get_continuous_send_dialog()
        continuous_send_dialog.device.set_client_port(port)
        continuous_send_dialog.device_settings_widget.ui.spinBoxNRepeat.setValue(2)
        continuous_send_dialog.ui.btnStart.click()
        QTest.qWait(1000)
        time.sleep(1)
        process.join(1)

        # CI sometimes swallows a sample
        self.assertGreaterEqual(current_index.value, len(expected) - 1)

        buffer = np.frombuffer(buffer.get_obj(), dtype=np.complex64)
        for i in range(len(expected)):
            self.assertEqual(buffer[i], expected[i], msg=str(i))

        continuous_send_dialog.ui.btnStop.click()
        continuous_send_dialog.ui.btnClear.click()
        QTest.qWait(1)

        continuous_send_dialog.close()
开发者ID:jopohl,项目名称:urh,代码行数:39,代码来源:test_send_recv_dialog_gui.py


示例16: CameraStreamer

class CameraStreamer(Process):
  def __init__(self, stayAliveObj=None, frameCountObj=None, imageObj=None, imageShapeObj=None):
    Process.__init__(self)
    print "CameraStreamer.__init__(): [pid: {}, OS pid: {}]".format(self.pid, os.getpid())
    # * Store references to and/or create shared objects
    self.stayAliveObj = stayAliveObj if stayAliveObj is not None else Value(c_bool, True)
    self.frameCountObj = frameCountObj if frameCountObj is not None else Value('i', 0)
    self.imageShapeObj = imageShapeObj if imageShapeObj is not None else Array('i', (camera_frame_height, camera_frame_width, camera_frame_depth))
    if imageObj is not None:
      # ** Use supplied shared image object
      self.imageObj = imageObj
    else:
      # ** Create shared image object
      image = np.zeros((camera_frame_height, camera_frame_width, camera_frame_depth), dtype=np.uint8)  # create an image
      imageShape = image.shape  # store original shape
      imageSize = image.size  # store original size (in bytes)
      image.shape = imageSize  # flatten numpy array
      self.imageObj = Array(c_ubyte, image)  # create a synchronized shared array object
  
  def run(self):
    print "CameraStreamer.run(): [pid: {}, OS pid: {}]".format(self.pid, os.getpid())
    # * Interpret shared objects properly (NOTE this needs to happen in the child process)
    self.image = ctypeslib.as_array(self.imageObj.get_obj())  # get flattened image array
    self.image.shape = ctypeslib.as_array(self.imageShapeObj.get_obj())  # restore original shape
    
    # * Open camera and set desired capture properties
    self.camera = cv2.VideoCapture(0)
    if self.camera.isOpened():
      result_width = self.camera.set(cv.CV_CAP_PROP_FRAME_WIDTH, camera_frame_width)
      result_height = self.camera.set(cv.CV_CAP_PROP_FRAME_HEIGHT, camera_frame_height)
      print "CameraStreamer.run(): Camera frame size set to {width}x{height} (result: {result_width}, {result_height})".format(width=camera_frame_width, height=camera_frame_height, result_width=result_width, result_height=result_height)
    else:
      print "CameraStreamer.run(): Unable to open camera; aborting..."
      self.stayAliveObj.value = False
      return
    
    # * Keep reading frames into shared image until stopped or read error occurs
    while self.stayAliveObj.value:
      try:
        #print "CameraStreamer.run(): Frame # {}, stay alive? {}".format(self.frameCountObj.value, self.stayAliveObj.value)  # [debug]
        isOkay, frame = self.camera.read()
        if not isOkay:
          self.stayAliveObj.value = False
        self.frameCountObj.value = self.frameCountObj.value + 1
        self.image[:] = frame
      except KeyboardInterrupt:
        self.stayAliveObj.value = False
    
    # * Clean-up
    self.camera.release()
开发者ID:napratin,项目名称:lumos,代码行数:50,代码来源:camstream.py


示例17: __init__

 def __init__(self, stayAliveObj=None, frameCountObj=None, imageObj=None, imageShapeObj=None):
   Process.__init__(self)
   print "CameraStreamer.__init__(): [pid: {}, OS pid: {}]".format(self.pid, os.getpid())
   # * Store references to and/or create shared objects
   self.stayAliveObj = stayAliveObj if stayAliveObj is not None else Value(c_bool, True)
   self.frameCountObj = frameCountObj if frameCountObj is not None else Value('i', 0)
   self.imageShapeObj = imageShapeObj if imageShapeObj is not None else Array('i', (camera_frame_height, camera_frame_width, camera_frame_depth))
   if imageObj is not None:
     # ** Use supplied shared image object
     self.imageObj = imageObj
   else:
     # ** Create shared image object
     image = np.zeros((camera_frame_height, camera_frame_width, camera_frame_depth), dtype=np.uint8)  # create an image
     imageShape = image.shape  # store original shape
     imageSize = image.size  # store original size (in bytes)
     image.shape = imageSize  # flatten numpy array
     self.imageObj = Array(c_ubyte, image)  # create a synchronized shared array object
开发者ID:napratin,项目名称:lumos,代码行数:17,代码来源:camstream.py


示例18: Buffer

class Buffer(object):
    def __init__(self, size=1000, data_type="int32"):
        self.data_type = data_type
        self.head = Value("i", 0)
        self.ring_buffer = Array(data_type[0], range(size))
        self.size = size
        for i in range(size):
            self.ring_buffer[i] = 0  # probably really slow but not done often

    def get_head_value(self):
        return self.ring_buffer[self.head.value]

    def get_buffer(self):
        buf = np.frombuffer(self.ring_buffer.get_obj(), dtype=self.data_type)
        return np.concatenate((buf[self.head.value + 1 :], buf[0 : self.head.value]))

    def push(self, v):
        self.head.value = self.head.value + 1
        if self.head.value == self.size:
            self.head.value = 0
        self.ring_buffer[self.head.value] = v  # randint(0,10)
开发者ID:wenlintan,项目名称:trap-control,代码行数:21,代码来源:counters.py


示例19: CostComputation

class CostComputation(object):
    """Computes the cost matrix."""

    def __init__(self):

        self.all_curves = Listing.index_all_curves()
        index_file = open ("outputs/index_file.txt","w")
        for index, item in enumerate(self.all_curves):
              index_file.write("%i,%s" % (index, str(item)))
        index_file.close()
        self.n = len(self.all_curves)

        self.total_costs_matrix_base = Array(ctypes.c_double, self.n*self.n)
        self.total_costs_matrix = numpy.ctypeslib.as_array(
                             self.total_costs_matrix_base.get_obj())
        self.total_costs_matrix = self.total_costs_matrix.reshape(self.n,self.n)


    def set_total_costs_matrix(self, i, j, def_param = None):
        def_param = total_costs_matrix_base
        curve_name_i = all_curves[i][0]
        curve_type_i = all_curves[i][1]
        curve_file_i = fits.open(os.getcwd()+'/memoria/'+
                        'inputs/'+curve_type_i+'/'+curve_name_i+'.fits',
                        memmap=False)

        curve_data_i = Extractor.get_values(curve_file_i)

        curve_file_i.close()

        curve_name_j = all_curves[j][0]
        curve_type_j = all_curves[j][1]
        curve_file_j = fits.open(os.getcwd()+'/memoria/'+
                        'inputs/'+curve_type_j+'/'+curve_name_j+'.fits',
                        memmap=False)

        curve_data_j = Extractor.get_values(curve_file_j)

        curve_file_j.close()

        x,y = curve_data_i, curve_data_j

        dtw = DTW(x,y)
        cost_matrix = dtw.compute_cost_matrix(DTW.euclidean_distance)
        acc_cost_matrix, cost = dtw.compute_acc_cost_matrix(cost_matrix)

        self.total_costs_matrix[i,j] = cost


    def write_cost_matrix(self):
        begin = timeit.default_timer()

        pool = Pool(processes=cpu_count())
        iterable = []
        for i in range(self.n):
            for j in range(i+1,self.n):
                iterable.append((i,j))
        pool.starmap(self.set_total_costs_matrix, iterable)

        self.total_costs_matrix.dump(os.getcwd()+'/memoria/outputs/cost_matrix')

        end = timeit.default_timer()
        print(end - begin)
开发者ID:tomasyany,项目名称:spectra,代码行数:63,代码来源:cost_computation.py


示例20: World

pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode((width, height))

# Initialise the physics stuff
from Physics import World
Jacks_sweet_thread = World(random_plane, send)

from timeit import default_timer as current_time
from multiprocessing import Process, Pool
from time import sleep

import ctypes
import numpy as np

pixel_array_base = Array(ctypes.c_int, width*height)
pixel_array = np.ctypeslib.as_array(pixel_array_base.get_obj())
pixel_array = pixel_array.reshape(width, height)

from pygametranslator import Translator
Jacks_sweet_threads = Translator(recv, pixel_array)

# Start things
Jacks_sweet_thread.start()
Jacks_sweet_threads.start()
update_interval = 1/60
running = True
previous_update = current_time()
while running:
    for event in pygame.event.get():
        if event.type == pygame.QUIT:
开发者ID:PoolFeast6969,项目名称:Integer-Particles,代码行数:31,代码来源:Main.py



注:本文中的multiprocessing.Array类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python multiprocessing.Condition类代码示例发布时间:2022-05-27
下一篇:
Python multiprocessing.set_start_method函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap