• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.correlate函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.correlate函数的典型用法代码示例。如果您正苦于以下问题:Python correlate函数的具体用法?Python correlate怎么用?Python correlate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了correlate函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: check_preamble_properties

def check_preamble_properties(preamble, x_preamble):
    x_1st = x_preamble[0:len(x_preamble) // 2]
    x_2nd = x_preamble[-len(x_preamble) // 2:]
    if not np.all(np.abs(x_1st - x_2nd) < 1e-12):
        print np.abs(x_1st - x_2nd)
        raise ValueError('preamble timeslots do not repeat!')
    from correlation import cross_correlate_naive, auto_correlate_halfs
    from utils import calculate_signal_energy
    x_ampl = np.sqrt(calculate_signal_energy(x_preamble))
    preamble *= x_ampl
    x_preamble *= x_ampl
    x_energy = calculate_signal_energy(x_preamble)
    if np.abs(2. * auto_correlate_halfs(x_preamble) / x_energy) -1. > 1e-10:
        raise ValueError('auto correlating halfs of preamble fails!')

    print 'normalized preamble xcorr val: ', np.correlate(x_preamble, x_preamble) / x_energy
    print 'windowed normalized preamble: ', np.correlate(preamble[-len(x_preamble):], x_preamble) / x_energy
    fxc = np.correlate(preamble, x_preamble, 'full') / x_energy
    vxc = np.correlate(preamble, x_preamble, 'valid') / x_energy
    nxc = cross_correlate_naive(preamble, x_preamble) / x_energy
    import matplotlib.pyplot as plt

    plt.plot(np.abs(fxc))
    plt.plot(np.abs(vxc))
    plt.plot(np.abs(nxc))
    plt.show()
开发者ID:jdemel,项目名称:gr-gfdm,代码行数:26,代码来源:preamble.py


示例2: find

 def find(self, target):
     if len(target) == 4:
         #check pattern d
         sum = 0
         for i in range(len(target)):
             sum += np.correlate(target[i], self.pd[i])[0]
         if sum >= self.threshold_expand['pd']:
             return True
         else:
             return False
         
     elif len(target) == 3:
         if len(target[0]) == 4:
             #check pattern c
             sum = 0
             for i in range(len(target)):
                 sum += np.correlate(target[i], self.pc[i])[0]
             if sum >= self.threshold_expand['pc']:
                 return True
             else:
                 return False
         elif len(target[0]) == 3:
             # common cases
             for k in self.threshold:
                 sum = 0
                 pt = k[0]
                 tr = k[1]
                 for i in range(len(target)):
                     sum += np.correlate(target[i], pt[i])[0]
                 if sum >= tr:
                     return True
             return False
开发者ID:lucaschenex,项目名称:PatternRecognition,代码行数:32,代码来源:PatternRecognition.py


示例3: ccovf

def ccovf(x, y, unbiased=True, demean=True):
    ''' crosscovariance for 1D

    Parameters
    ----------
    x, y : arrays
       time series data
    unbiased : boolean
       if True, then denominators is n-k, otherwise n

    Returns
    -------
    ccovf : array
        autocovariance function

    Notes
    -----
    This uses np.correlate which does full convolution. For very long time
    series it is recommended to use fft convolution instead.
    '''
    n = len(x)
    if demean:
        xo = x - x.mean()
        yo = y - y.mean()
    else:
        xo = x
        yo = y
    if unbiased:
        xi = np.ones(n)
        d = np.correlate(xi, xi, 'full')
    else:
        d = n
    return (np.correlate(xo, yo, 'full') / d)[n - 1:]
开发者ID:philippmuller,项目名称:statsmodels,代码行数:33,代码来源:stattools.py


示例4: plot_acorr

def plot_acorr(x, ax=None, title="", xlabel="Shift", ylabel="",
               append_analysis=True):
    """Plot the autocorrelation

    If variance is too small (i.e. for a deterministic process),
    falls back to plotting autocovariance
    """
    x_centered = x - np.mean(x)
    x_var = np.var(x)
    x_len = len(x)
    x_centered_sample = x_centered[:int(x_len//2)]
    if len(np.unique(x.round(decimals=12))) > 1:
        # compute autocorrelation
        x_acorr = np.correlate(x_centered, x_centered_sample, 'valid')/x_var
        analysis_mode = "Autocorrelation"
    else:
        # if process is deterministic, autocorrelation is undefined
        # use the autocovariance instead
        x_acorr = np.correlate(x_centered, x_centered_sample, 'valid') 
        analysis_mode = "Autocovariance"

    if ax is None:
        fig, ax = plt.subplots(nrows=1, figsize=(12,3))
    ax.plot(x_acorr[:100], 'o')
    if append_analysis:
        ax.set_title(title+analysis_mode)
    else:
        ax.set_title(title)
    ax.set_xlabel(xlabel)
    ax.set_ylabel(ylabel)
    limit_ylim(ax)
    return ax
开发者ID:fragapanagos,项目名称:notebooks,代码行数:32,代码来源:exp.py


示例5: findInserts

    def findInserts(self):
        #interpolate to the base phantom slice thickness
        #X=numpy.linspace(0,self.phBase.slicethk*(self.slices*self.slicethk/self.phBase.slicethk),(self.slices*self.slicethk)/self.phBase.slicethk+1)
        X=numpy.arange(0,self.slices*self.slicethk,self.phBase.slicethk)
        Xp=numpy.linspace(0,(self.slices-1)*self.slicethk,self.slices)

        profileResc=numpy.interp(X,Xp,self.profile)
        profileRescMirror=numpy.fliplr([profileResc,numpy.zeros(len(profileResc))])[0,:]

        #find order of acquisition
        fwdcor=numpy.correlate(self.phBase.profile[:,1],profileResc,'full')
        rwdcor=numpy.correlate(self.phBase.profile[:,1],profileRescMirror,'full')

        reverse=False
        if numpy.amax(fwdcor)>=numpy.amax(rwdcor):
            shift=numpy.argmax(fwdcor)
        else:
            reverse=True
            shift=numpy.argmax(rwdcor)

        #align profile and base profile
        #get index of slices
        Xcor=(X/self.phBase.slicethk)-len(X)+1+shift

        #find phantom slice nearest to base inserts
        Inserts=["resolution","sliceThk","uniform","dgp"]
        for insert in Inserts:
            if (Xcor==self.phBase.inserts[insert][0]).any() or (Xcor==self.phBase.inserts[insert][1]).any():
                f=max(self.phBase.inserts[insert][0],Xcor[0])
                s=min(self.phBase.inserts[insert][1],Xcor[len(Xcor)-1])
                self.inserts[insert]=numpy.round(((numpy.array([f,s])+len(X)-1-shift)*float(self.phBase.slicethk))/float(self.slicethk))
                if reverse:
                    self.inserts[insert]=numpy.abs(self.inserts[insert]-self.slices+1)
                    (self.inserts[insert]).sort()
开发者ID:gina-belmonte,项目名称:SlicerExtension-QASuite,代码行数:34,代码来源:philipsMR.py


示例6: dbpsk_demod

def dbpsk_demod(rx_data, sample_rate, L):
    print "Demodulating [email protected]", sample_rate
    time_seq = np.arange(0, len(rx_data), 1, dtype=float) / sample_rate
    two_pi_fc_t = 2 * np.pi * CenterFreq * time_seq
    # Filter out-of-band noise
    rx_inband = np.convolve(rx_data, bp_filt)
    N = len(rx_inband)
    # Downconvert I/Q channels into baseband signals
    rx_bb_i = np.multiply(rx_inband[SamplesPerSymbol / 2 : N - SamplesPerSymbol / 2], np.cos(two_pi_fc_t))
    rx_bb_q = np.multiply(rx_inband[SamplesPerSymbol / 2 : N - SamplesPerSymbol / 2], np.sin(two_pi_fc_t))
    # Filter any high frequency remnants
    audio_bb_i = np.convolve(rx_bb_i, lp_filt)[: L * SamplesPerSymbol * BitsPerChar]
    audio_bb_q = np.convolve(rx_bb_q, lp_filt)[: L * SamplesPerSymbol * BitsPerChar]
    decoded_bits = np.zeros(L * BitsPerChar)
    # Previous Phase and decode bit
    pp = 0
    pb = 0
    detected_bitstream = np.zeros(L * BitsPerChar, dtype=int)
    T = SamplesPerSymbol
    # Matched filter is just a rectangular pulse
    rect_pulse = np.ones(T)
    for demod in np.arange(L * BitsPerChar):
        sym_i = np.correlate(audio_bb_i[demod * T : (demod + 1) * T], rect_pulse, "full")[T]
        sym_q = np.correlate(audio_bb_q[demod * T : (demod + 1) * T], rect_pulse, "full")[T]
        cp = np.arctan(sym_q / sym_i)
        # print "Phase Diff:", cp-pp
        if np.abs(cp - pp) > 0.1:
            detected_bitstream[demod] = pb ^ 1
        else:
            detected_bitstream[demod] = detected_bitstream[demod - 1]
        pb = detected_bitstream[demod]
        pp = cp

    return detected_bitstream
开发者ID:fsheikh,项目名称:sample_code,代码行数:34,代码来源:data_sound.py


示例7: determineDelay

def determineDelay(source, target, maxdel=2**16, ax=None):
    '''
    Determine the delay between two signals
    (based on correlation extrema)

    Parameters:
    * Signals
      - source
      - target
    * maxdel: maximum delay to look for (in both directions)
    '''
    sample_start = 0
    xd = source[sample_start:sample_start+maxdel]
    yd = target[sample_start:sample_start+maxdel]
    Cxx = np.correlate(xd, xd, 'full')
    Cxy = np.correlate(yd, xd, 'full')
    Pkx = np.argmax(np.abs(Cxx))
    Pky = np.argmax(np.abs(Cxy))
    if ax:
        try:
            ax.plot(Cxx)
        except AttributeError:
            fig, ax = pl.subplots(1)
            ax.plot(Cxx)
        ax.plot(Cxy)
        ax.axvline(Pkx, color='red')
        ax.plot(Pky, Cxy[Pky], 'o')

    delay = Pky-Pkx
    return delay
开发者ID:goiosunsw,项目名称:PyPeVoc,代码行数:30,代码来源:TransferFunctions.py


示例8: get_best_time_window

def get_best_time_window(data, samplerate, fundamental_frequency, eod_cycles):
    eod_peaks1, eod_peak_idx1, _, _ = peakdet(data)

    max_time = len(data) / samplerate
    time_for_eod_cycles_in_window = eod_cycles / fundamental_frequency

    if time_for_eod_cycles_in_window > max_time * .2:
        time_for_eod_cycles_in_window = max_time * .2
        warnings.warn("You are reqeusting a window that is too long. Using T=%f" % (time_for_eod_cycles_in_window,))

    sample_points_in_window = int(fundamental_frequency * time_for_eod_cycles_in_window)

    tApp = np.arange(len(data)) / samplerate
    w1 = np.ones(sample_points_in_window) / sample_points_in_window

    local_mean = np.correlate(eod_peaks1, w1, mode='valid')
    local_std = np.sqrt(np.correlate(eod_peaks1 ** 2., w1, mode='valid') - local_mean ** 2.)
    COV = local_std / local_mean

    mi = min(COV)
    for ind, j in enumerate(COV):
        if j == mi:
            v = (eod_peak_idx1[ind])

    idx = (tApp >= tApp[v]) & (tApp < tApp[v] + time_for_eod_cycles_in_window)
    tApp = tApp[idx]
    dat_app = data[idx]
    tApp = tApp - tApp[0]

    return tApp, dat_app
开发者ID:fabiansinz,项目名称:efish_locking,代码行数:30,代码来源:modelling.py


示例9: _updateBuffer

 def _updateBuffer(self, v):
     """
     Keep a buffer of the running data and process it to determine if there is
     a peak. 
     """
     self._rtData.append(v)
     wndwCenter = int(np.floor(self._window / 2.0))
     # pop the end of the buffer
     if len(self._rtData) > self._window:
         self._rtData = self._rtData[1:]
         if self._isPeak:
             lm = self._rtData.findPeaks()
             for l in lm:
                 if l[0] == wndwCenter and l[1] > self._cutoff:
                     if self.doCorr:
                         corrVal = np.correlate(self._rtData.normalize(), self._template)
                         thresh = self.corrThresh[0] - self.corrStdMult * self.corrThresh[1]
                         if corrVal[0] > thresh:
                             self.count += 1
                     else:
                         self.count += 1
         else:
             lm = self._rtData.findValleys()
             for l in lm:
                 if l[0] == wndwCenter and l[1] < self._cutoff:
                     if self.doCorr:
                         corrVal = np.correlate(self._rtData.normalize(), self._template)
                         thresh = self.corrThresh[0] - self.corrStdMult * self.corrThresh[1]
                         if corrVal[0] > thresh:
                             self.count += 1
                     else:
                         self.count += 1
     return self.count
开发者ID:RoGoSo,项目名称:SimpleCV,代码行数:33,代码来源:TemporalColorTracker.py


示例10: correlation

def correlation (results = [], bin_size = 100, N =1000):
	wait_time=0.
	print 'N = ',N
	nr_datasets = results ['max_ind']
	ind = 0
	for counter in np.arange(nr_datasets):
		dati = results [str(counter)]
		if (len(dati)>2*N+1):
			if (bin_size>1 ):
				b = bin_data (data = dati, bin_size = bin_size)
			else:
				b = dati
							
			t = np.arange(len(b))*bin_size*(20e-6+wait_time*1e-6)	
			mu = np.mean(b)
			sigma = np.std(b)
			corr = np.correlate (b-mu, b-mu, 'full')/(np.correlate(b-mu, b-mu)+0.)
			t_corr = (np.arange (len(corr))-len(corr)/2.)*(wait_time+20.)*1e-6*bin_size

			nn = len(corr)
			corr2 = corr [nn/2-N:nn/2+N]
			t_corr2 = t_corr [nn/2-N:nn/2+N]
			
			if (ind == 0):
				avg_corr = corr2			
			else:
				avg_corr = avg_corr+corr2
			ind = ind + 1
	
	avg_corr[N] = 0
	avg_corr = avg_corr/max(avg_corr)			
	return t_corr2, avg_corr
开发者ID:machielblok,项目名称:analysis,代码行数:32,代码来源:quick_analysis_ramseys.py


示例11: calculate_maxcrosscorrelation

def calculate_maxcrosscorrelation(reference_signal, unknown_signal):    
    
    '''
    function:
    ---------
    given a reference signal and an unknown signal, calculate the max cross correlation score. the higher the score,
    the more similar two signals are. 
    
    the max cross correlation score will be used to identify events.
    
    parameters:
    -----------
    @reference_signal: 150 unit numpy array, representing reference signal.
    @unknown_signal: 150 unit numpy array
    
    returns:
    --------
    @score: int between [0,1]; represents similarity between two curves. 
    '''
    
    # https://stackoverflow.com/questions/1289415/what-is-a-good-r-value-when-comparing-2-signals-using-cross-correlation
    x = max(np.correlate(reference_signal, reference_signal, 'full'))
    y = max(np.correlate(unknown_signal, unknown_signal, 'full'))
    z = max(np.correlate(reference_signal, unknown_signal, 'full'))
    score = (z ** 2) / float(x * y)
    
    return score
开发者ID:mynameisvinn,项目名称:gesture-recognition,代码行数:27,代码来源:tools.py


示例12: chickling_corr

def chickling_corr(shotno, date=time.strftime("%Y%m%d"), bandwidth=40000):

	fname, data = file_finder(shotno,date)
		
	samplesize = int(np.unwrap(data[0]['phasediff_co2']).size/bandwidth)
	phase_avr_co2 = np.zeros(samplesize)
	phase_avr_hene = np.zeros(samplesize)

	#reshape the array of x points (20M for 1s) into a 2d array each with 40k segments.
	phasediff_co2 = np.reshape(np.unwrap(data[0]['phasediff_co2'][0:(samplesize*bandwidth)]),(samplesize,bandwidth))
	phasediff_hene = np.reshape(np.unwrap(data[0]['phasediff_hene'][0:(samplesize*bandwidth)]),(samplesize,bandwidth))

	#for each horizontal column perform an average
	for i in range(0,samplesize):
		phase_avr_co2[i] = np.mean(phasediff_co2[i])
		phase_avr_hene[i] = np.mean(phasediff_hene[i])

	x = np.linspace(0,1,samplesize)
	plt.figure("2 Channels | Blue = Scene | Orange = Reference | Green = Cross-Correlation | shot " + str(shotno) +  " Date " + str(date))
	plt.xlabel("Time, s")
	plt.ylabel("Phase Difference, Radians")
	plt.plot(x,phase_avr_co2-np.average(phase_avr_co2))
	plt.plot(x,phase_avr_hene-np.average(phase_avr_hene))

	a = (phase_avr_co2 - np.mean(phase_avr_co2)) / (np.std(phase_avr_co2) * len(phase_avr_co2))
	b = (phase_avr_hene - np.mean(phase_avr_hene)) / (np.std(phase_avr_hene))
	yc = np.correlate(a, b, 'full')
	print(np.correlate(a, b, 'valid'))
	xc = np.linspace(0,1,yc.size)
	plt.plot(xc,yc)#,'o',ms=0.4)
开发者ID:chickling1994,项目名称:Diss-05-05-2018,代码行数:30,代码来源:chickling_additions_2.py


示例13: linearCouplingCoeff2

def linearCouplingCoeff2(dataH, dataX, timeH, timeX, transFnXtoH, segStartTime,
			segEndTime, timeShift, samplFreq, logFid, debugLevel):
  # LINEARCOUPLINGCOEFF - calculate the cross correlation coeff b/w the gravitational
  # ave channel H and the "projected" instrumental channel X. The noise in the
  # instrumental channel X is projected to the domain of the H using a linear coupling
  # function Txh


  rXH = np.asarray([])
  rMaxXH = np.asarray([])
  if((len(dataH)==0) | (len(dataX)==0)):
    logFid.write('Error: One or more data vectors are empty..\n')
    logFid.write('Error: len(dataH) = %d len(dataX) = %d..\n' %(len(dataH), len(dataX[0])))
  
  elif(len(dataH)!=len(dataX[0])):
    logFid.write('Error: Different lengths. len(dataH) = %d len(dataX) = %d..\n'%(len(dataH), len(dataX[0])))
  else:
    dataH = dataH #- np.mean(dataH)
    dataX = dataX[0] #- np.mean(dataX[0])
    
    segIdxH = np.intersect1d(np.where(timeH>=segStartTime)[0], np.where(timeH<segEndTime)[0])
    dataH = dataH[segIdxH]
    
    segIdxX = np.intersect1d(np.where(timeX + timeShift >= segStartTime)[0], np.where(timeX + timeShift < segEndTime)[0])
    dataX = dataX[segIdxX]
    
    
    
    a = np.correlate(dataH, dataX)/(np.sqrt(np.correlate(dataH, dataH)*np.correlate(dataX, dataX)))
    rXH = np.append(rXH, a)
    rMaxXH = np.append(rMaxXH, a)
    return [rXH, rMaxXH]  
开发者ID:sudughonge,项目名称:bcv,代码行数:32,代码来源:bcv.py


示例14: aligndata

def aligndata(baselineremoved, brightest, pulsar):
    nbins = baselineremoved.shape[0]
    nprofiles = baselineremoved.shape[1]
    template = baselineremoved[:,brightest]
    # rotate template to put peak at 1/4
    peakbin = np.argmax(template)
    fixedlag = int(nbins/4)-peakbin
    aligned = np.zeros((nbins,nprofiles))
    newtemplate = np.roll(template, fixedlag)
    template = newtemplate
    plt.plot(newtemplate)
    plt.savefig('./{0}/{0}_brightest.png' .format(pulsar))
    plt.clf()
    for i in range(nprofiles):
        xcorr = np.correlate(template,baselineremoved[:,i],"full")
        lag = np.argmax(xcorr)
        aligned[:,i] = np.roll(baselineremoved[:,i],lag)
    template = np.median(aligned,1)
    # repeat with better template now and shift peak to 1/4 of the profile
    peakbin = np.argmax(template)
    fixedlag = int(nbins/4)-peakbin
    double = np.zeros(2*nbins)
    for i in range(nprofiles):
        double[0:nbins] = baselineremoved[:,i]
        double[nbins:2*nbins] = baselineremoved[:,i]
#        xcorr = np.correlate(template,baselineremoved[:,i],"full")
        xcorr = np.correlate(template,double,"full")
        lag = np.argmax(xcorr) + fixedlag
        aligned[:,i] = np.roll(baselineremoved[:,i],lag)
        newtemplate = np.median(aligned,1)
    return np.array(aligned), np.array(newtemplate)
开发者ID:ArisKarastergiou,项目名称:Vpsr,代码行数:31,代码来源:Vfunctions.py


示例15: correlationIndividual

def correlationIndividual(data, idx = (0,1), cls = -1, delay = (-100, 100)):
  """Calculate corrs and auto correlation in time between the various measures"""

  n = len(idx);  
  means = np.mean(data[:,:-1], axis = 0);
  
  nd = delay[1] - delay[0] + 1;
  
  cc = np.zeros((nd,n,n))
  for i in range(n):
    for j in range(n):
        if delay[0] < 0:
          cm = np.correlate(data[:, i] - means[i], data[-delay[0]:, j] - means[j]);
        else:
          cm = [0];
        
        if delay[1] > 0:
          cp = np.correlate(data[:, j] - means[j], data[delay[1]:, i] - means[i]);
        else:
          cp = [0];
        
        ca = np.concatenate((cm[1:], cp[::-1]));
        
        if delay[0] > 0:
          cc[:,i,j] = ca[delay[0]:];
        elif delay[1] < 0:
          cc[:,i,j] = ca[:-delay[1]];
        else:
          cc[:,i,j] = ca;
  
  return cc;
开发者ID:ChristophKirst,项目名称:CElegansBehaviour,代码行数:31,代码来源:tools.py


示例16: _corr_ax1

def _corr_ax1(input_image):
    """
    Internal helper function that finds the best estimate for the
    location of the vertical mirror plane.  For each row the maximum
    of the correlating with it's mirror is found.  The most common value
    is reported back as the location of the mirror plane.

    Parameters
    ----------
    input_image : ndarray
        The input image

    Returns
    -------
    vals : ndarray
        histogram of what pixel has the highest correlation

    bins : ndarray
        Bin edges for the vals histogram
    """
    dim = input_image.shape[1]
    m_ones = np.ones(dim)
    norm_mask = np.correlate(m_ones, m_ones, mode='full')
    # not sure that the /2 is the correct correction
    est_by_row = [np.argmax(np.correlate(v, v[::-1],
                                         mode='full')/norm_mask) / 2
             for v in input_image]
    return np.histogram(est_by_row, bins=np.arange(0, dim + 1))
开发者ID:giltis,项目名称:scikit-xray,代码行数:28,代码来源:image.py


示例17: msd_fast

def msd_fast(trajs):
    T = trajs.shape[1]
    N = trajs.shape[0]

    trajs2 = trajs ** 2

    msd = np.zeros((T))

    for n in xrange(N):
        r2 = np.zeros((T))
        rtau2 = np.zeros((T))

        # compute sums over squares of positions for r(t) and r(t+tau)
        for tau in xrange(T):
            r2[tau] = np.sum(trajs2[n][: (T - tau), 0] + trajs2[n][: (T - tau), 1])
            rtau2[tau] = np.sum(trajs2[n][tau:, 0] + trajs2[n][tau:, 1])

        # compute auto correlation
        corx = np.correlate(trajs[n][:, 0], trajs[n][:, 0], mode="full")[T - 1 :]
        cory = np.correlate(trajs[n][:, 1], trajs[n][:, 1], mode="full")[T - 1 :]
        cor = corx + cory

        msd += (rtau2 - 2 * cor + r2) / np.arange(T, 0, -1)

    msd = msd / N

    return msd
开发者ID:ebuchman,项目名称:Bacteria,代码行数:27,代码来源:analysis.py


示例18: plot_wavenvelope

    def plot_wavenvelope(self, ax, w_start, w_end):

        """ This function plots the envelope of the recording.

        :param ax: The axis in which you wish to plot.
        :param w_start: Start of the best window.
        :param w_end: End of the best window.
        """
        window_size = int(0.05 * self._sample_rate)  # 0.050 are 50 milliseconds for the envelope window!
        w = 1.0 * np.ones(window_size) / window_size
        envelope = (np.sqrt((np.correlate(self._eod ** 2, w, mode='same') -
                    np.correlate(self._eod, w, mode='same') ** 2)).ravel()) * np.sqrt(2.)
        upper_bound = np.max(envelope) + np.percentile(envelope, 1)
        ax.fill_between(self._time[::500], y1=-envelope[::500], y2=envelope[::500], color='purple', alpha=0.5)
        ax.plot((w_start, w_start), (-upper_bound, upper_bound), 'k--', linewidth=2)
        ax.plot((w_end, w_end), (-upper_bound, upper_bound), 'k--', linewidth=2)
        ax.text((w_start + w_end) / 2., upper_bound - np.percentile(envelope, 10), 'Analysis Window',
                rotation='horizontal', horizontalalignment='center', verticalalignment='center', fontsize=14)

        ax.set_ylim(-upper_bound, upper_bound)
        ax.set_xlabel('Time [s]', fontsize=16)
        ax.set_ylabel('Signal Amplitude [au]', fontsize=16)
        ax.tick_params(axis='both', which='major', labelsize=14)

        pass
开发者ID:fabiansinz,项目名称:thunderfish,代码行数:25,代码来源:FishRecording.py


示例19: correlateData

	def correlateData(self,frameLimit):
		sample = []
		self.fh.seek((self.startFrame)*4128,0)
		steps = frameLimit/10
		totalTime = datetime.now()
		print 'Correlating [          ]',
		print '\b'*12,
		sys.stdout.flush()
		for p in xrange(frameLimit):
			startTime = datetime.now()
			frame = drx.readFrame(self.fh)		
			
			if frame.parseID()[1] == 1:
				self.realTune1 = self.realTune1 + numpy.correlate(frame.data.iq.real,self.template).tolist()
				self.imagTune1 = self.imagTune1 + numpy.correlate(frame.data.iq.imag,self.template).tolist()
			else:
				self.realTune2 = self.realTune2 + numpy.correlate(frame.data.iq.real,self.template).tolist()
				self.imagTune2 = self.imagTune2 + numpy.correlate(frame.data.iq.imag,self.template).tolist()
			if p%steps == 0:
				print '\b=',
				sys.stdout.flush()
		print '\b] Done'
		self.startFrame += frameLimit	
		#self.fh.close()
		print 'Read time: ' + str(datetime.now() - totalTime)
开发者ID:kkirchhoff01,项目名称:lslext,代码行数:25,代码来源:templateCorrelation.py


示例20: adjust_for_phase_and_lag

 def adjust_for_phase_and_lag(self):
     # store a list of the l2-norms and the lags for each phase value
     norm_list = np.zeros((self.phases_vec.size,2))
     
     # loop through each phase value
     for pp in range(self.phases_vec.size):
         y = self.cur_signal_up.copy()
         y = y*np.exp(1j*self.phases_vec[pp])
         
         # Adjust for magnitude setting
         if self.sig_type == 'm':
             y = self.sig_mag(y)
         
         # compute autocorrelation
         if self.filter_on:
             cur_corr = np.correlate(y,self.avg_complex_cir,mode='full')
         else:
             cur_corr = np.correlate(y,self.ref_complex_cir,mode='full')
         opt_lag = -self.lag_vec[np.argmax(self.sig_mag(cur_corr)).flatten()[0]]
         norm_list[pp,0] = opt_lag
         
         # Shift the signal to adjust for any lag
         if opt_lag > 0:
             y = np.array(((0.+1j*0.)*np.ones(opt_lag)).tolist() + y[0:-opt_lag].tolist())
         elif opt_lag < 0:
             y = np.array(y[-opt_lag:].tolist() + ((0.+1j*0.)*np.ones(-opt_lag)).tolist())
         
         # Adjust for magnitude setting
         if self.sig_type == 'm':
             y = self.sig_mag(y)
         
         # Compute the l2-norm
         if self.filter_on:
             tmp = y - self.avg_complex_cir
         else:
             tmp = y - self.ref_complex_cir
         
         # Save l2-norm to list
         norm_list[pp,1] = self.sig_mag(tmp).sum()
     
     # Get the index of the smallest l2-norm
     min_idx = np.argmin(norm_list[:,1]).flatten()[0]
     
     # Adjust for phase and lag
     y = self.cur_signal_up.copy()
     y = y*np.exp(1j*self.phases_vec[min_idx])
     opt_lag = norm_list[min_idx,0]
     
     # Shift the signal to adjust for any lag
     if opt_lag > 0:
         self.cur_signal_up = np.array(((0+1j*0)*np.ones(opt_lag)).tolist() + y[0:-opt_lag].tolist())
     elif opt_lag < 0:
         self.cur_signal_up = np.array(y[-opt_lag:].tolist() + ((0+1j*0)*np.ones(-opt_lag)).tolist())
     else:
         self.cur_signal_up = y.copy()
         
     # Adjust for magnitude setting
     if self.sig_type == 'm':
         self.cur_signal_up = self.sig_mag(self.cur_signal_up)
开发者ID:peterhillyard,项目名称:cir_testing,代码行数:59,代码来源:cir_class_master.py



注:本文中的numpy.correlate函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.cos函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.corrcoef函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap