我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用matplotlib.pyplot.stem()。
def _periodogram_plot(title, column, data, trend, peaks): """display periodogram results using matplotlib""" periods, power = periodogram(data) plt.figure(1) plt.subplot(311) plt.title(title) plt.plot(data, label=column) if trend is not None: plt.plot(trend, linewidth=3, label="broad trend") plt.legend() plt.subplot(312) plt.title("detrended") plt.plot(data - trend) else: plt.legend() plt.subplot(312) plt.title("(no detrending specified)") plt.subplot(313) plt.title("periodogram") plt.stem(periods, power) for peak in peaks: period, score, pmin, pmax = peak plt.axvline(period, linestyle='dashed', linewidth=2) plt.axvspan(pmin, pmax, alpha=0.2, color='b') plt.annotate("{}".format(period), (period, score * 0.8)) plt.annotate("{}...{}".format(pmin, pmax), (pmin, score * 0.5)) plt.tight_layout() plt.show()
def plot_temporal_configuration(self, t_start=None, t_end=None): if t_start is None: t_start = 0.0 # s if t_end is None: t_end = self.duration # s plt.figure() plt.subplot(1, 1, 1) # Plot spike times of each cell for cell in self.cells.itervalues(): m = np.logical_and(t_start <= cell.times, cell.times < t_end) x = cell.times[m] y = (float(cell.id) + 0.8) * np.ones_like(x) bottom = cell.id x = np.insert(x, [0, len(x)], [t_start, t_end]) y = np.insert(y, [0, len(y)], [bottom] * 2) markerline, stemlines, baseline = plt.stem(x, y, bottom=bottom) plt.setp(markerline, 'marker', None) plt.setp(markerline, 'color', cell.color) plt.setp(stemlines, 'color', cell.color) plt.setp(baseline, 'color', cell.color) yticks = [cell.id for cell in self.cells.itervalues()] ylabels = [str(cell.id) for cell in self.cells.itervalues()] plt.yticks(yticks, ylabels) plt.xlim(t_start, t_end) plt.ylim(-0.1, float(len(self.cells)) - 0.1) plt.xlabel(r"time $(s)$") plt.ylabel(r"cell") plt.title(r"Temporal configuration") plt.tight_layout() return
def getmeasurements(a, x, noisetype, var=1, outlierproportion=0): import numpy as np import pickle import sys # to be able to exit import matplotlib.pyplot as plt # import statistics as st measurementsize = a.shape[0] # number of measurements y = np.dot(a, x) # noiseless measurements if noisetype == 'none': # noiseless case n = np.zeros((measurementsize, 1)) # zero noise elif noisetype == 'gaussian': # gaussian noise n = var * np.random.randn(measurementsize, 1) elif noisetype == 'outliers': # gaussian noise # additive Gaussian noise n = var * np.random.randn(measurementsize, 1) p = np.random.permutation(measurementsize) # how many measurements are outliers noutliers = np.round(outlierproportion * measurementsize) outindex = p[0:noutliers] # getting random indexes for the outliers # the outliers have a variance ten times larger than clean data n[outindex] = np.var(y) * 10 * np.random.randn(noutliers, 1) else: # unknown type of additive noise sys.exit('unknown noise type %s' % noisetype) # die gracefully yn = y + n # get the measurements yn = np.asarray(yn) #plt.stem(n, 'b') # plt.show(block=True) #plt.stem(y, 'kd-') #plt.stem(yn, 'rs--') # plt.show() # show figure return yn # what we were asked to deliver # ------------------------------------------------------------------- # Score functions for the robust regressors # -------------------------------------------------------------------
def compare_arrays(array1, array2, decimal_tolerance = 7, plot_key_errors = False): """ Compare two arrays and return indexes and values with differences. Arrays to compare must have the same length. Parameters ---------- array1: numpy array Numpy array to compare (real and complex data). array2: numpy array Numpy array to compare (real and complex data). decimal_tolerance: int Number of digits of decimal part to be compared. DEFAULT: 7. plot_results: boolean If true: plot results over generic 1D plot. If false, plot is not done. DEFAULT: False. Returns ------- diff_dict: dict. dictionary containing as keys the indexes with errors, and as a values the values with errors. """ # Dictionary with encountered differences. diff_dict = {} # Iterate over arrays and find differences. for i in range(np.size(array1)): # If there is an error, save the values. if np.around(array1[i]-array2[i], decimal_tolerance) != 0+0j: diff_dict[i] = [array1[i], array2[i]] # Not used, uncomment of use -> #if plot_key_errors: # plt.stem(np.arange(len(diff_dict.keys())), diff_dict.keys()) # plt.show() return diff_dict
def diagnostics(self, tmin=None, tmax=None, show=True): innovations = self.ml.innovations(tmin, tmax) fig = self._get_figure() gs = plt.GridSpec(2, 3, wspace=0.2) plt.subplot(gs[0, :2]) plt.title('Autocorrelation') # plt.axhline(0.2, '--') r = ps.stats.acf(innovations) plt.stem(r) plt.subplot(gs[1, :2]) plt.title('Partial Autocorrelation') # plt.axhline(0.2, '--') # plt.stem(self.ml.stats.pacf()) plt.subplot(gs[0, 2]) innovations.hist(bins=20) plt.subplot(gs[1, 2]) probplot(innovations, plot=plt) if show: plt.show() return fig.axes
def plot_auto_corr(s, lags=range(1,200)): """ ??????? """ corr_coefs = auto_corr(s,lags) plt.figure() plt.stem(corr_coefs) plt.title('Auto Correlation') return plt
def plot_partial_corr(s, lags=range(1,100)): """ ???????? """ partial_coefs = partial_corrs(s,lags) plt.figure() plt.stem(partial_coefs) plt.title('Partial Correlation') return plt
def show_both( self, c): X1part = self.X1part X2part = self.X2part y = self.y cell = self.cell X1_ylim = self.X1_ylim X2_ylim = self.X2_ylim cmethod = self.cmethod cparam_d = self.cparam_d #print("Cluster:", c) X3_int = X2part[ np.where(y==c)[0],:] X3_vel = X1part[ np.where(y==c)[0],:] #km = cluster.KMeans(2) #km = getattr(cluster, cmethod)(2) km = getattr(cluster, cmethod)(**cparam_d) y3 = km.fit_predict( X3_int) plt.figure(figsize=(9,4)) plt.subplot(1,2,1) #print("Intensity") n_0 = X3_int[ np.where( y3==0)[0]].shape[0] n_1 = X3_int[ np.where( y3==1)[0]].shape[0] sns.tsplot( X3_int[ np.where( y3==0)[0],:], color="blue") sns.tsplot( X3_int[ np.where( y3==1)[0],:], color="green") plt.ylim(X2_ylim) plt.title("Cluster{0}:X2 {1}:{2}".format(c, n_0, n_1)) #plt.show() plt.subplot(1,2,2) #print("Velocity") sns.tsplot( X3_vel[ np.where( y3==0)[0],:], color="blue") sns.tsplot( X3_vel[ np.where( y3==1)[0],:], color="green") plt.ylim(X1_ylim) plt.title("Cluster{0}:X1 {1}:{2}".format(c, n_0, n_1)) plt.show() cell3 = cell[ np.where(y==c)[0]] plt.subplot(1,2,1) plt.stem( cell3[np.where( y3==0)[0]], linefmt='b-', markerfmt='bo') plt.title("Cell Index - Subcluster 1") plt.subplot(1,2,2) plt.stem( cell3[np.where( y3==1)[0]], linefmt='g-', markerfmt='go') plt.title("Cell Index - Subcluster 2") plt.show() return y3
def training(nfiltbank, orderLPC): nSpeaker = 8 nCentroid = 16 codebooks_mfcc = np.empty((nSpeaker,nfiltbank,nCentroid)) codebooks_lpc = np.empty((nSpeaker, orderLPC, nCentroid)) directory = os.getcwd() + '/train'; fname = str() for i in range(nSpeaker): fname = '/s' + str(i+1) + '.wav' print 'Now speaker ', str(i+1), 'features are being trained' (fs,s) = read(directory + fname) mel_coeff = mfcc(s, fs, nfiltbank) lpc_coeff = lpc(s, fs, orderLPC) codebooks_mfcc[i,:,:] = lbg(mel_coeff, nCentroid) codebooks_lpc[i,:,:] = lbg(lpc_coeff, nCentroid) plt.figure(i) plt.title('Codebook for speaker ' + str(i+1) + ' with ' + str(nCentroid) + ' centroids') for j in range(nCentroid): plt.subplot(211) plt.stem(codebooks_mfcc[i,:,j]) plt.ylabel('MFCC') plt.subplot(212) markerline, stemlines, baseline = plt.stem(codebooks_lpc[i,:,j]) plt.setp(markerline,'markerfacecolor','r') plt.setp(baseline,'color', 'k') plt.ylabel('LPC') plt.axis(ymin = -1, ymax = 1) plt.xlabel('Number of features') plt.show() print 'Training complete' #plotting 5th and 6th dimension MFCC features on a 2D plane #comment lines 54 to 71 if you don't want to see codebook codebooks = np.empty((2, nfiltbank, nCentroid)) mel_coeff = np.empty((2, nfiltbank, 68)) for i in range(2): fname = '/s' + str(i+2) + '.wav' (fs,s) = read(directory + fname) mel_coeff[i,:,:] = mfcc(s, fs, nfiltbank)[:,0:68] codebooks[i,:,:] = lbg(mel_coeff[i,:,:], nCentroid) plt.figure(nSpeaker + 1) s1 = plt.scatter(mel_coeff[0,6,:], mel_coeff[0,4,:],s = 100, color = 'r', marker = 'o') c1 = plt.scatter(codebooks[0,6,:], codebooks[0,4,:], s = 100, color = 'r', marker = '+') s2 = plt.scatter(mel_coeff[1,6,:], mel_coeff[1,4,:],s = 100, color = 'b', marker = 'o') c2 = plt.scatter(codebooks[1,6,:], codebooks[1,4,:], s = 100, color = 'b', marker = '+') plt.grid() plt.legend((s1, s2, c1, c2), ('Sp1','Sp2','Sp1 centroids', 'Sp2 centroids'), scatterpoints = 1, loc = 'upper left') plt.show() return (codebooks_mfcc, codebooks_lpc)