Python matplotlib.pylab 模块,xlabel() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pylab.xlabel()。
def hrd_key(self, key_str):
"""
plot an HR diagram
Parameters
----------
key_str : string
A label string
"""
pyl.plot(self.data[:,self.cols['log_Teff']-1],\
self.data[:,self.cols['log_L']-1],label = key_str)
pyl.legend()
pyl.xlabel('log Teff')
pyl.ylabel('log L')
x1,x2=pl.xlim()
if x2 > x1:
self._xlimrev()
def compare_images(path = '.'):
S_limit = 10.
file_list = glob.glob(os.path.join(path, 'Abu*'))
file_list_master = glob.glob(os.path.join(path, 'MasterAbu*'))
file_list.sort()
file_list_master.sort()
S=[]
print("Identifying images with rmq > "+'%3.1f'%S_limit)
ierr_count = 0
for i in range(len(file_list)):
this_S,fimg1,fimg2 = compare_entropy(file_list[i],file_list_master[i])
if this_S > S_limit:
warnings.warn(file_list[i]+" and "+file_list_master[i]+" differ by "+'%6.3f'%this_S)
ierr_count += 1
S.append(this_S)
if ierr_count > 0:
print("Error: at least one image differs by more than S_limit")
sys.exit(1)
#print ("S: ",S)
#plb.plot(S,'o')
#plb.xlabel("image number")
#plb.ylabel("modified log KL-divergence to previous image")
#plb.show()
def plot_volcano(logFC,p_val,sample_name,saveName,logFC_thresh):
fig=pl.figure()
## To plot and save
pl.scatter(logFC[(p_val>0.05)|(abs(logFC)<logFC_thresh)],-np.log10(p_val[(p_val>0.05)|(abs(logFC)<logFC_thresh)]),color='blue',alpha=0.5);
pl.scatter(logFC[(p_val<0.05)&(abs(logFC)>logFC_thresh)],-np.log10(p_val[(p_val<0.05)&(abs(logFC)>logFC_thresh)]),color='red');
pl.hlines(-np.log10(0.05),min(logFC),max(logFC))
pl.vlines(-logFC_thresh,min(-np.log10(p_val)),max(-np.log10(p_val)))
pl.vlines(logFC_thresh,min(-np.log10(p_val)),max(-np.log10(p_val)))
pl.xlim(-3,3)
pl.xlabel('Log Fold Change')
pl.ylabel('-log10(p-value)')
pl.savefig(saveName)
pl.close(fig)
# def plot_histograms(df_peaks,pntr_list):
#
# for pntr in pntr_list:
# colName =pntr[2]+'_Intragenic_position'
# pl.hist(df_peaks[colName])
# pl.xlabel(colName)
# pl.ylabel()
# pl.show()
def plot(l, x1, x2, y, e):
# Plot
time_range = numpy.arange(0, l)
pl.figure(1)
pl.subplot(221)
pl.plot(time_range, x1)
pl.title("Input signal")
pl.subplot(222)
pl.plot(time_range, x2, c="r")
pl.plot(time_range, y, c="b")
pl.title("Reference signal")
pl.subplot(223)
pl.plot(time_range, e, c="r")
pl.title("Noise")
pl.xlabel("time")
pl.show()
def make_fft_graph(fft, corre):
fft_np = numpy.array(fft).swapaxes(0, 1).swapaxes(1, 2)
channel_N, freq_N, sample_N = fft_np.shape
if (channel_N > 6): # We don't have space for more than 6 channels
return
fig, axes = plt.subplots(2, 3)
fig.subplots_adjust(hspace=0.3, wspace=0.05)
for ax, mat, i in zip(axes.flat, fft_np, range(1, channel_N + 1)):
fft_abs = numpy.abs(mat)
fft_less_row = fft_abs[0::20]
n = freq_N / 20
fft_sqr = numpy.repeat(fft_less_row, int(n / sample_N)).reshape([n, n])
ax.matshow(fft_sqr, cmap='viridis')
plt.xlabel('time')
plt.ylabel('freq')
ax.set_title('Channel {0}'.format(i))
plt.show()
print("Plotted.")
def plot(traj, x, y, **kwargs):
""" Create a matplotlib plot of property x against property y
Args:
x,y (str): names of the properties
**kwargs (dict): kwargs for :meth:`matplotlib.pylab.plot`
Returns:
List[matplotlib.lines.Lines2D]: the lines that were plotted
"""
from matplotlib import pylab
xl = yl = None
if type(x) is str:
strx = x
x = getattr(traj, x)
xl = '%s / %s' % (strx, getattr(x, 'units', 'dimensionless'))
if type(y) is str:
stry = y
y = getattr(traj, y)
yl = '%s / %s' % (stry, getattr(y, 'units', 'dimensionless'))
plt = pylab.plot(x, y, **kwargs)
pylab.xlabel(xl); pylab.ylabel(yl); pylab.grid()
return plt
def plot_confusion_matrix(cm, label_list, title='Confusion matrix', cmap=None):
from matplotlib import pylab
cm = np.asarray(cm, dtype=np.float32)
for i, row in enumerate(cm):
cm[i] = cm[i] / np.sum(cm[i])
#import matplotlib.pyplot as plt
#plt.ion()
pylab.clf()
pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
ax = pylab.axes()
ax.set_xticks(range(len(label_list)))
ax.set_xticklabels(label_list, rotation='vertical')
ax.xaxis.set_ticks_position('bottom')
ax.set_yticks(range(len(label_list)))
ax.set_yticklabels(label_list)
pylab.title(title)
pylab.colorbar()
pylab.grid(False)
pylab.xlabel('Predicted class')
pylab.ylabel('True class')
pylab.grid(False)
pylab.savefig('test.jpg')
pylab.show()
def plot_position(self, pos_true, pos_est):
N = pos_est.shape[1]
pos_true = pos_true[:, :N]
pos_est = pos_est[:, :N]
# Figure
plt.figure()
plt.suptitle("Position")
# Ground truth
plt.plot(pos_true[0, :], pos_true[1, :],
color="red", marker="o", label="Grouth truth")
# Estimated
plt.plot(pos_est[0, :], pos_est[1, :],
color="blue", marker="o", label="Estimated")
# Plot labels and legends
plt.xlabel("East (m)")
plt.ylabel("North (m)")
plt.axis("equal")
plt.legend(loc=0)
def plotLine(self, x_vals, y_vals, x_label, y_label, title, filename=None):
plt.clf()
plt.xlabel(x_label)
plt.xlim(((min(x_vals) - 0.5), (max(x_vals) + 0.5)))
plt.ylabel(y_label)
plt.ylim(((min(y_vals) - 0.5), (max(y_vals) + 0.5)))
plt.title(title)
plt.plot(x_vals, y_vals, c='k', lw=2)
#plt.plot(x_vals, len(x_vals) * y_vals[0], c='r', lw=2)
if filename == None:
plt.show()
else:
plt.savefig(self.outputPath + filename)
def plot_entropy():
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
title = "Entropy $H(X)$"
pylab.title(title)
pylab.xlabel("$P(X=$coin will show heads up$)$")
pylab.ylabel("$H(X)$")
pylab.xlim(xmin=0, xmax=1.1)
x = np.arange(0.001, 1, 0.001)
y = -x * np.log2(x) - (1 - x) * np.log2(1 - x)
pylab.plot(x, y)
# pylab.xticks([w*7*24 for w in [0,1,2,3,4]], ['week %i'%(w+1) for w in
# [0,1,2,3,4]])
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "entropy_demo.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_clustering(x, y, title, mx=None, ymax=None, xmin=None, km=None):
pylab.figure(num=None, figsize=(8, 6))
if km:
pylab.scatter(x, y, s=50, c=km.predict(list(zip(x, y))))
else:
pylab.scatter(x, y, s=50)
pylab.title(title)
pylab.xlabel("Occurrence word 1")
pylab.ylabel("Occurrence word 2")
pylab.autoscale(tight=True)
pylab.ylim(ymin=0, ymax=1)
pylab.xlim(xmin=0, xmax=1)
pylab.grid(True, linestyle='-', color='0.75')
return pylab
def plot_confusion_matrix(cm, genre_list, name, title):
pylab.clf()
pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
ax = pylab.axes()
ax.set_xticks(range(len(genre_list)))
ax.set_xticklabels(genre_list)
ax.xaxis.set_ticks_position("bottom")
ax.set_yticks(range(len(genre_list)))
ax.set_yticklabels(genre_list)
pylab.title(title)
pylab.colorbar()
pylab.grid(False)
pylab.show()
pylab.xlabel('Predicted class')
pylab.ylabel('True class')
pylab.grid(False)
pylab.savefig(
os.path.join(CHART_DIR, "confusion_matrix_%s.png" % name), bbox_inches="tight")
def plot_roc(auc_score, name, tpr, fpr, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.plot([0, 1], [0, 1], 'k--')
pylab.plot(fpr, tpr)
pylab.fill_between(fpr, tpr, alpha=0.5)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('False Positive Rate')
pylab.ylabel('True Positive Rate')
pylab.title('ROC curve (AUC = %0.2f) / %s' %
(auc_score, label), verticalalignment="bottom")
pylab.legend(loc="lower right")
filename = name.replace(" ", "_")
pylab.savefig(
os.path.join(CHART_DIR, "roc_" + filename + ".png"), bbox_inches="tight")
def plotKChart(self, misClassDict, saveFigPath):
kList = []
misRateList = []
for k, misClassNum in misClassDict.iteritems():
kList.append(k)
misRateList.append(1.0 - 1.0/k*misClassNum)
fig = plt.figure(saveFigPath)
plt.plot(kList, misRateList, 'r--')
plt.title(saveFigPath)
plt.xlabel('k Num.')
plt.ylabel('Misclassified Rate')
plt.legend(saveFigPath)
plt.grid(True)
plt.savefig(saveFigPath)
plt.show()
################################### PART3 TEST ########################################
# ??
def show_feature_importance(gbdt, feature_names=None):
importance = gbdt.get_fscore(fmap='xgb.fmap')
importance = sorted(importance.items(), key=operator.itemgetter(1))
df = pd.DataFrame(importance, columns=['feature', 'fscore'])
df['fscore'] = df['fscore'] / df['fscore'].sum()
print "feature importance", df
if feature_names is not None:
used_features = df['feature']
unused_features = [f for f in feature_names if f not in used_features]
print "[IDF]Unused features:", str(unused_features)
plt.figure()
df.plot()
df.plot(kind='barh', x='feature', y='fscore', legend=False, figsize=(6, 10))
plt.title('XGBoost Feature Importance')
plt.xlabel('relative importance')
plt.gcf().savefig('feature_importance_xgb.png')
def backtest(config_file, day_trade):
cfg = config.Config(config_file)
cfg.day_trade = day_trade
dfs = load_data(config_file)
trender = strategies[cfg.strategy](**cfg.strategy_parameters)
res = []
for df in dfs:
res.append(trender.backtest(data_frame=df))
final_panel = pd.Panel({os.path.basename(p['path']): df for p, df in
zip(cfg.data_path, res)})
profit_series = final_panel.sum(axis=0)['total_profit'].cumsum()
final_panel.to_excel(cfg.output_file)
if cfg.show:
profit_series.plot()
plt.xlabel('Time')
plt.ylabel('Profit')
plt.legend('Profit')
plt.show()
def fit_data():
data=np.loadtxt('data.dat')
print(data)
params = dict()
params["c"] = {"min" : -np.inf,"max" : np.inf}
result = qudi_fitting.make_lorentzian_fit(axis=data[:,0], data=data[:,3], add_parameters=params)
print(result.fit_report())
plt.plot(data[:,0],-data[:,3]+2,"b-o",label="data mean")
# plt.plot(data[:,0],data[:,1],label="data")
# plt.plot(data[:,0],data[:,2],label="data")
plt.plot(data[:,0],-result.best_fit+2,"r-",linewidth=2.,label="fit")
# plt.plot(data[:,0],result.init_fit,label="init")
plt.xlabel("time (ns)")
plt.ylabel("polarization transfer (arb. u.)")
plt.legend(loc=1)
# plt.savefig("pol20_24repetition_pol.pdf")
# plt.savefig("pol20_24repetition_pol.png")
plt.show()
savedata=[[data[ii,0],-data[ii,3]+2,-result.best_fit[ii]+2] for ii in range(len(data[:,0]))]
np.savetxt("pol_data_fit.csv",savedata)
# print(result.params)
print(result.params)
def plot_penalty_vl(debug, tag, fold_exp):
plt.close("all")
vl = np.array(debug["penalty"])
fig = plt.figure(figsize=(15, 10.8), dpi=300)
names = debug["names"]
for i in range(vl.shape[1]):
if vl.shape[1] > 1:
plt.plot(vl[:, i], label="layer_"+str(names[i]))
else:
plt.plot(vl[:], label="layer_"+str(names[i]))
plt.xlabel("mini-batchs")
plt.ylabel("value of penlaty")
plt.title(
"Penalty value over layers:" + "_".join([str(k) for k in names]) +
". tag:" + tag)
plt.legend(loc='upper right', fancybox=True, shadow=True, prop={'size': 8})
plt.grid(True)
fig.savefig(fold_exp+"/penalty.png", bbox_inches='tight')
plt.close('all')
del fig
def plot_roc(y_test, y_pred, label=''):
"""Compute ROC curve and ROC area"""
fpr, tpr, _ = roc_curve(y_test, y_pred)
roc_auc = auc(fpr, tpr)
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic' + label)
plt.legend(loc="lower right")
plt.show()
def plot_confusion_matrix(cm, plot_title, filename, genres=None):
if not genres:
genres = GENRES
pylab.clf()
pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=100.0)
axes = pylab.axes()
axes.set_xticks(range(len(genres)))
axes.set_xticklabels(genres, rotation=45)
axes.set_yticks(range(len(genres)))
axes.set_yticklabels(genres)
axes.xaxis.set_ticks_position("bottom")
pylab.title(plot_title, fontsize=14)
pylab.colorbar()
pylab.xlabel('Predicted class', fontsize=12)
pylab.ylabel('Correct class', fontsize=12)
pylab.grid(False)
#pylab.show()
pylab.savefig(os.path.join(PLOTS_DIR, "cm_%s.eps" % filename), bbox_inches="tight")
def plot_confusion_matrix(cm, label_list, title='Confusion matrix', cmap=None):
from matplotlib import pylab
cm = np.asarray(cm, dtype=np.float32)
for i, row in enumerate(cm):
cm[i] = cm[i] / np.sum(cm[i])
#import matplotlib.pyplot as plt
#plt.ion()
pylab.clf()
pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
ax = pylab.axes()
ax.set_xticks(range(len(label_list)))
ax.set_xticklabels(label_list, rotation='vertical')
ax.xaxis.set_ticks_position('bottom')
ax.set_yticks(range(len(label_list)))
ax.set_yticklabels(label_list)
pylab.title(title)
pylab.colorbar()
pylab.grid(False)
pylab.xlabel('Predicted class')
pylab.ylabel('True class')
pylab.grid(False)
pylab.savefig('test.jpg')
pylab.show()
def plotSpeedupFigure(AllInfo, maxWorker=1, **kwargs):
pylab.figure(2)
xs = AllInfo['nWorker']
ts_mono = AllInfo['t_monolithic']
xgrid = np.linspace(0, maxWorker + 0.1, 100)
pylab.plot(xgrid, xgrid, 'y--', label='ideal parallel')
for method in getMethodNames(**kwargs):
speedupRatio = ts_mono / AllInfo['t_' + method]
pylab.plot(xs, speedupRatio, 'o-',
label=method,
color=ColorMap[method],
markeredgecolor=ColorMap[method])
pylab.xlim([-0.2, maxWorker + 0.5])
pylab.ylim([0, maxWorker + 0.5])
pylab.legend(loc='upper left')
pylab.xlabel('Number of Workers')
pylab.ylabel('Speedup over Monolithic')
def plotBoundVsAlph(alphaVals=np.linspace(.001, 3, 1000),
beta1=0.5):
exactVals = cD_exact(alphaVals, beta1)
boundVals = cD_bound(alphaVals, beta1)
assert np.all(exactVals >= boundVals)
pylab.plot(alphaVals, exactVals, 'k-', linewidth=LINEWIDTH)
pylab.plot(alphaVals, boundVals, 'r--', linewidth=LINEWIDTH)
pylab.xlabel("alpha", fontsize=FONTSIZE)
pylab.ylabel(" ", fontsize=FONTSIZE)
pylab.xlim([np.min(alphaVals) - 0.1, np.max(alphaVals) + 0.1])
pylab.ylim([np.min(exactVals) - 0.05, np.max(exactVals) + 0.05])
pylab.xticks(np.arange(np.max(alphaVals) + 1))
pylab.legend(['c_D exact',
'c_D surrogate'],
fontsize=LEGENDSIZE,
loc='lower right')
pylab.tick_params(axis='both', which='major', labelsize=TICKSIZE)
def nmf(fdoc, fvocab):
T = 100
nmf = NMF(fdoc, fvocab)
nmf.train(T)
nmf.get_words()
# print(mf.R)
plt.figure()
plt.plot(range(1,T+1),nmf.objective)
plt.xticks(np.linspace(1,T,10))
plt.xlabel('Iterations')
plt.ylabel('Objective')
plt.title('Variation of objective with iterations')
plt.savefig('hw5_2a.png')
plt.show()
def gp_partd(Xtrain,ytrain,Xtest,ytest):
gp = gaussian_process(Xtrain[:,3],ytrain,Xtrain[:,3],ytrain)
gp.init_kernel_matrices(b=5,var=2)
gp.predict_test()
x = np.asarray(Xtrain[:,3]).flatten()
xsortind = np.argsort(x)
y1 = np.asarray(ytrain).flatten()
y2 = np.asarray(gp.test_predictions).flatten()
plt.figure()
plt.scatter(x[xsortind],y1[xsortind])
plt.plot(x[xsortind],y2[xsortind],'b-')
plt.xlabel('Car Weight (Dimension 4)')
plt.ylabel('Outcome')
plt.title('Visualizing model through single dimension')
plt.savefig('hw3_gaussian_dim4_viz')
plt.show()
def energy_profile(self,ixaxis):
"""
Plot radial profile of key energy generations eps_nuc,
eps_neu etc.
Parameters
----------
ixaxis : 'mass' or 'radius'
"""
mass = self.get('mass')
radius = self.get('radius') * ast.rsun_cm
eps_nuc = self.get('eps_nuc')
eps_neu = self.get('non_nuc_neu')
if ixaxis == 'mass':
xaxis = mass
xlab = 'Mass / M$_\odot$'
else:
xaxis = old_div(radius, 1.e8) # Mm
xlab = 'radius / Mm'
pl.plot(xaxis, np.log10(eps_nuc),
'k-',
label='$\epsilon_\mathrm{nuc}>0$')
pl.plot(xaxis, np.log10(-eps_nuc),
'k--',
label='$\epsilon_\mathrm{nuc}<0$')
pl.plot(xaxis, np.log10(eps_neu),
'r-',
label='$\epsilon_\\nu$')
pl.xlabel(xlab)
pl.ylabel('$\log(\epsilon_\mathrm{nuc},\epsilon_\\nu)$')
pl.legend(loc='best').draw_frame(False)
def hrd_new(self, input_label="", skip=0):
"""
plot an HR diagram with options to skip the first N lines and
add a label string
Parameters
----------
input_label : string, optional
Diagram label. The default is "".
skip : integer, optional
Skip the first n lines. The default is 0.
"""
xl_old=pyl.gca().get_xlim()
if input_label == "":
my_label="M="+str(self.header_attr['initial_mass'])+", Z="+str(self.header_attr['initial_z'])
else:
my_label="M="+str(self.header_attr['initial_mass'])+", Z="+str(self.header_attr['initial_z'])+"; "+str(input_label)
pyl.plot(self.data[skip:,self.cols['log_Teff']-1],self.data[skip:,self.cols['log_L']-1],label = my_label)
pyl.legend(loc=0)
xl_new=pyl.gca().get_xlim()
pyl.xlabel('log Teff')
pyl.ylabel('log L')
if any(array(xl_old)==0):
pyl.gca().set_xlim(max(xl_new),min(xl_new))
elif any(array(xl_new)==0):
pyl.gca().set_xlim(max(xl_old),min(xl_old))
else:
pyl.gca().set_xlim([max(xl_old+xl_new),min(xl_old+xl_new)])
def t_lumi(self,num_frame,xax):
"""
Luminosity evolution as a function of time or model.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into.
xax : string
Either model or time to indicate what is to be used on the
x-axis
"""
pyl.figure(num_frame)
if xax == 'time':
xaxisarray = self.get('star_age')
elif xax == 'model':
xaxisarray = self.get('model_number')
else:
print('kippenhahn_error: invalid string for x-axis selction. needs to be "time" or "model"')
logLH = self.get('log_LH')
logLHe = self.get('log_LHe')
pyl.plot(xaxisarray,logLH,label='L_(H)')
pyl.plot(xaxisarray,logLHe,label='L(He)')
pyl.ylabel('log L')
pyl.legend(loc=2)
if xax == 'time':
pyl.xlabel('t / yrs')
elif xax == 'model':
pyl.xlabel('model number')
def t_surf_parameter(self, num_frame, xax):
"""
Surface parameter evolution as a function of time or model.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into.
xax : string
Either model or time to indicate what is to be used on the
x-axis
"""
pyl.figure(num_frame)
if xax == 'time':
xaxisarray = self.get('star_age')
elif xax == 'model':
xaxisarray = self.get('model_number')
else:
print('kippenhahn_error: invalid string for x-axis selction. needs to be "time" or "model"')
logL = self.get('log_L')
logTeff = self.get('log_Teff')
pyl.plot(xaxisarray,logL,'-k',label='log L')
pyl.plot(xaxisarray,logTeff,'-k',label='log Teff')
pyl.ylabel('log L, log Teff')
pyl.legend(loc=2)
if xax == 'time':
pyl.xlabel('t / yrs')
elif xax == 'model':
pyl.xlabel('model number')
def test_abu_evolution(self):
from nugridpy import ppn, utils
import matplotlib
matplotlib.use('agg')
import matplotlib.pylab as mpy
import os
# Perform tests within temporary directory
with TemporaryDirectory() as tdir:
# wget the data for a ppn run from the CADC VOspace
os.system("wget -q --content-disposition --directory '" + tdir + "' "\
+ "'http://www.canfar.phys.uvic.ca/vospace/synctrans?TARGET="\
+ "vos%3A%2F%2Fcadc.nrc.ca%21vospace%2Fnugrid%2Fdata%2Fprojects%2Fppn%2Fexamples%2F"\
+ "ppn_Hburn_simple%2Fx-time.dat&DIRECTION=pullFromVoSpace&PROTOCOL"\
+ "=ivo%3A%2F%2Fivoa.net%2Fvospace%2Fcore%23httpget'")
#nugrid_dir= os.path.dirname(os.path.dirname(ppn.__file__))
#NuPPN_dir= nugrid_dir + "/NuPPN"
#test_data_dir= NuPPN_dir + "/examples/ppn_Hburn_simple/RUN_MASTER"
symbs=utils.symbol_list('lines2')
x=ppn.xtime(tdir)
specs=['PROT','HE 4','C 12','N 14','O 16']
i=0
for spec in specs:
x.plot('time',spec,logy=True,logx=True,shape=utils.linestyle(i)[0],show=False,title='')
i += 1
mpy.ylim(-5,0.2)
mpy.legend(loc=0)
mpy.xlabel('$\log t / \mathrm{min}$')
mpy.ylabel('$\log X \mathrm{[mass fraction]}$')
abu_evol_file = 'abu_evolution.png'
mpy.savefig(abu_evol_file)
self.assertTrue(os.path.exists(abu_evol_file))
def plot(l, samp, w1, w2, cor):
time_range = numpy.arange(0, l) * (1.0 / samp)
pl.figure(1)
pl.subplot(211)
pl.plot(time_range, w1)
pl.subplot(212)
pl.plot(time_range, w2, c="r")
pl.xlabel("time")
pl.figure(2)
pl.plot(time_range, cor)
pl.show()
def main():
sampling, maxvalue, wave_data = record.record()
# Pick out two channels for our study.
w1, w2 = wave_data[1:3]
nframes = w1.shape[0]
# Cut one channel in the tail, while the other in the head,
# to guarantee same length and first delays second.
cut_time_len = 0.2 # second
cut_len = int(cut_time_len * sampling)
wp1 = w1[:-cut_len]
wp2 = w2[cut_len:]
# Get their reduced (amplitude) version, and
# calculate correlation.
a = numpy.array(wp1, dtype=numpy.double) / maxvalue
b = numpy.array(wp2, dtype=numpy.double) / maxvalue
delay_time = delay.fst_delay_snd(a, b, sampling)
# Plot the channels, also the correlation.
time_range = numpy.arange(0, nframes - cut_len)*(1.0/sampling)
# Still shows the original signal
pl.figure(1)
pl.subplot(211)
pl.plot(time_range, wp1)
pl.subplot(212)
pl.plot(time_range, wp2, c="r")
pl.xlabel("time")
pl.show()
# Print delay
print("Chan 1 delay chan 2 by {0}".format(delay_time))
def main():
sampling, maxvalue, wave_data = record.record()
# Pick out two channels for our study.
w1, w2 = wave_data[0:2]
nframes = w1.shape[0]
# Pad one channel in the head, while the other in the tail,
# to guarantee same length.
pad_time_len = 0.01 # second
pad_len = int(pad_time_len * sampling)
pad_arr = numpy.zeros(pad_len)
wp1 = numpy.concatenate((pad_arr, w1))
wp2 = numpy.concatenate((w2, pad_arr))
# Get their reduced (amplitude) version, and
# calculate correlation.
a = numpy.array(wp1, dtype=numpy.double) / maxvalue
b = numpy.array(wp2, dtype=numpy.double) / maxvalue
delay_time = delay.fst_delay_snd(a, b, sampling)
# Plot the channels, also the correlation.
time_range = numpy.arange(0, nframes + pad_len)*(1.0/sampling)
# Still shows the original signal
pl.figure(1)
pl.subplot(211)
pl.plot(time_range, wp1)
pl.subplot(212)
pl.plot(time_range, wp2, c="r")
pl.xlabel("time")
pl.show()
# Print delay
print("Chan 1 delay chan 2 by {0}".format(delay_time))
def plot_channel(audio, sampling):
channels, nframes = audio.shape[0], audio.shape[1]
time_range = numpy.arange(0, nframes) * (1.0 / sampling)
for i in range(1, channels + 1):
pl.figure(i)
pl.plot(time_range, audio[i - 1])
pl.xlabel("time{0}".format(i))
pl.show()
def onehist(x,xlabel='',fontsize=12):
"""
Script that plots the histogram of x with the corresponding xlabel.
"""
pylab.clf()
pylab.rcParams.update({'font.size': fontsize})
pylab.hist(x,histtype='stepfilled')
pylab.legend()
#### Change the X-axis appropriately ####
pylab.xlabel(xlabel)
pylab.ylabel('Number')
pylab.draw()
pylab.show()
def plot_graphs(df, trending_daily, day_from, day_to, limit, country_code, folder_out=None):
days = pd.DatetimeIndex(start=day_from, end=day_to, freq='D')
for day in days:
fig = plt.figure()
ax = fig.add_subplot(111)
plt.rc('lines', linewidth=2)
data = trending_daily.get_group(str(day.date()))
places, clusters = top_trending(data, limit)
for cluster in clusters:
places.add(max_from_cluster(cluster, data))
ax.set_prop_cycle(plt.cycler('color', ['r', 'b', 'yellow'] + [plt.cm.Accent(i) for i in np.linspace(0, 1, limit-3)]
) + plt.cycler('linestyle', ['-', '-', '-', '-', '-', '--', '--', '--', '--', '--']))
frame = export(places, clusters, data)
frame.sort_values('trending_rank', ascending=False, inplace=True)
for i in range(len(frame)):
item = frame.index[i]
lat, lon, country = item
result_items = ReverseGeoCode().get_address_attributes(lat, lon, 10, 'city', 'country_code')
if 'city' not in result_items.keys():
mark = "%s (%s)" % (manipulate_display_name(result_items['display_name']),
result_items['country_code'].upper() if 'country_code' in result_items.keys() else country)
else:
if check_eng(result_items['city']):
mark = "%s (%s)" % (result_items['city'], result_items['country_code'].upper())
else:
mark = "%.2f %.2f (%s)" % (lat, lon, result_items['country_code'].upper())
gp = df.loc[item].plot(ax=ax, x='date', y='count', label=mark)
ax.tick_params(axis='both', which='major', labelsize=10)
ax.set_yscale("log", nonposy='clip')
plt.xlabel('Date', fontsize='small', verticalalignment='baseline', horizontalalignment='right')
plt.ylabel('Total number of views (log)', fontsize='small', verticalalignment='center', horizontalalignment='center', labelpad=6)
gp.legend(loc='best', fontsize='xx-small', ncol=2)
gp.set_title('Top 10 OSM trending places on ' + str(day.date()), {'fontsize': 'large', 'verticalalignment': 'bottom'})
plt.tight_layout()
db = TrendingDb()
db.update_table_img(plt, str(day.date()), region=country_code)
plt.close()
def dispersion_plot(text, words, ignore_case=False, title="Lexical Dispersion Plot"):
"""
Generate a lexical dispersion plot.
:param text: The source text
:type text: list(str) or enum(str)
:param words: The target words
:type words: list of str
:param ignore_case: flag to set if case should be ignored when searching text
:type ignore_case: bool
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
text = list(text)
words.reverse()
if ignore_case:
words_to_comp = list(map(str.lower, words))
text_to_comp = list(map(str.lower, text))
else:
words_to_comp = words
text_to_comp = text
points = [(x,y) for x in range(len(text_to_comp))
for y in range(len(words_to_comp))
if text_to_comp[x] == words_to_comp[y]]
if points:
x, y = list(zip(*points))
else:
x = y = ()
pylab.plot(x, y, "b|", scalex=.1)
pylab.yticks(list(range(len(words))), words, color="b")
pylab.ylim(-1, len(words))
pylab.title(title)
pylab.xlabel("Word Offset")
pylab.show()
def plot_word_freq_dist(text):
fd = text.vocab()
samples = [item for item, _ in fd.most_common(50)]
values = [fd[sample] for sample in samples]
values = [sum(values[:i+1]) * 100.0/fd.N() for i in range(len(values))]
pylab.title(text.name)
pylab.xlabel("Samples")
pylab.ylabel("Cumulative Percentage")
pylab.plot(values)
pylab.xticks(range(len(samples)), [str(s) for s in samples], rotation=90)
pylab.show()
def demo(text=None):
from nltk.corpus import brown
from matplotlib import pylab
tt = TextTilingTokenizer(demo_mode=True)
if text is None: text = brown.raw()[:10000]
s, ss, d, b = tt.tokenize(text)
pylab.xlabel("Sentence Gap index")
pylab.ylabel("Gap Scores")
pylab.plot(range(len(s)), s, label="Gap Scores")
pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores")
pylab.plot(range(len(d)), d, label="Depth scores")
pylab.stem(range(len(b)), b)
pylab.legend()
pylab.show()
def plot_position(self, pos_true, pos_est, cam_states):
N = pos_est.shape[1]
pos_true = pos_true[:, :N]
pos_est = pos_est[:, :N]
# Figure
plt.figure()
plt.suptitle("Position")
# Ground truth
plt.plot(pos_true[0, :], pos_true[1, :],
color="red", label="Grouth truth")
# color="red", marker="x", label="Grouth truth")
# Estimated
plt.plot(pos_est[0, :], pos_est[1, :],
color="blue", label="Estimated")
# color="blue", marker="o", label="Estimated")
# Sliding window
cam_pos = []
for cam_state in cam_states:
cam_pos.append(cam_state.p_G)
cam_pos = np.array(cam_pos).reshape((len(cam_pos), 3)).T
plt.plot(cam_pos[0, :], cam_pos[1, :],
color="green", label="Camera Poses")
# color="green", marker="o", label="Camera Poses")
# Plot labels and legends
plt.xlabel("East (m)")
plt.ylabel("North (m)")
plt.axis("equal")
plt.legend(loc=0)
def plot_velocity(self, timestamps, vel_true, vel_est):
N = vel_est.shape[1]
t = timestamps[:N]
vel_true = vel_true[:, :N]
vel_est = vel_est[:, :N]
# Figure
plt.figure()
plt.suptitle("Velocity")
# X axis
plt.subplot(311)
plt.plot(t, vel_true[0, :], color="red", label="Ground_truth")
plt.plot(t, vel_est[0, :], color="blue", label="Estimate")
plt.title("x-axis")
plt.xlabel("Date Time")
plt.ylabel("ms^-1")
plt.legend(loc=0)
# Y axis
plt.subplot(312)
plt.plot(t, vel_true[1, :], color="red", label="Ground_truth")
plt.plot(t, vel_est[1, :], color="blue", label="Estimate")
plt.title("y-axis")
plt.xlabel("Date Time")
plt.ylabel("ms^-1")
plt.legend(loc=0)
# Z axis
plt.subplot(313)
plt.plot(t, vel_true[2, :], color="red", label="Ground_truth")
plt.plot(t, vel_est[2, :], color="blue", label="Estimate")
plt.title("z-axis")
plt.xlabel("Date Time")
plt.ylabel("ms^-1")
plt.legend(loc=0)
def plot_attitude(self, timestamps, att_true, att_est):
# Setup
N = att_est.shape[1]
t = timestamps[:N]
att_true = att_true[:, :N]
att_est = att_est[:, :N]
# Figure
plt.figure()
plt.suptitle("Attitude")
# X axis
plt.subplot(311)
plt.plot(t, att_true[0, :], color="red", label="Ground_truth")
plt.plot(t, att_est[0, :], color="blue", label="Estimate")
plt.title("x-axis")
plt.legend(loc=0)
plt.xlabel("Date Time")
plt.ylabel("rad s^-1")
# Y axis
plt.subplot(312)
plt.plot(t, att_true[1, :], color="red", label="Ground_truth")
plt.plot(t, att_est[1, :], color="blue", label="Estimate")
plt.title("y-axis")
plt.legend(loc=0)
plt.xlabel("Date Time")
plt.ylabel("rad s^-1")
# Z axis
plt.subplot(313)
plt.plot(t, att_true[2, :], color="red", label="Ground_truth")
plt.plot(t, att_est[2, :], color="blue", label="Estimate")
plt.title("z-axis")
plt.legend(loc=0)
plt.xlabel("Date Time")
plt.ylabel("rad s^-1")
def plot_velocity(self, timestamps, vel_true, vel_est):
N = vel_est.shape[1]
t = timestamps[:N]
vel_true = vel_true[:, :N]
vel_est = vel_est[:, :N]
# Figure
plt.figure()
plt.suptitle("Velocity")
# X axis
plt.subplot(311)
plt.plot(t, vel_true[0, :], color="red", label="Ground_truth")
plt.plot(t, vel_est[0, :], color="blue", label="Estimate")
plt.title("x-axis")
plt.xlabel("Date Time")
plt.ylabel("ms^-1")
plt.legend(loc=0)
# Y axis
plt.subplot(312)
plt.plot(t, vel_true[1, :], color="red", label="Ground_truth")
plt.plot(t, vel_est[1, :], color="blue", label="Estimate")
plt.title("y-axis")
plt.xlabel("Date Time")
plt.ylabel("ms^-1")
plt.legend(loc=0)
# Z axis
plt.subplot(313)
plt.plot(t, vel_true[2, :], color="red", label="Ground_truth")
plt.plot(t, vel_est[2, :], color="blue", label="Estimate")
plt.title("z-axis")
plt.xlabel("Date Time")
plt.ylabel("ms^-1")
plt.legend(loc=0)
def plot_storage(self, storage):
plt.figure()
plt.plot(range(len(storage)), storage)
plt.title("Num of tracks over time")
plt.xlabel("Frame No.")
plt.ylabel("Num of Tracks")
def plot_tracked(self, tracked):
plt.figure()
plt.plot(range(len(tracked)), tracked)
plt.title("Matches per Frame")
plt.xlabel("Frame No.")
plt.ylabel("Num of Tracks")
def plot_1d_model(self):
plt.subplot(131)
plt.plot(self.rho_bg,self.radius)
plt.xlabel('density (kg/m3)')
plt.ylabel('radius (km)')
plt.subplot(132)
plt.plot(self.vp_bg,self.radius)
plt.xlabel('Vp (km/s)')
plt.ylabel('radius (km)')
plt.subplot(133)
plt.plot(self.vs_bg,self.radius)
plt.xlabel('Vs (km/s)')
plt.ylabel('radius (km)')
plt.show()
def plot_q(model='cem', r_min=0.0, r_max=6371.0, dr=1.0):
"""
Plot a radiallysymmetric Q model.
plot_q(model='cem', r_min=0.0, r_max=6371.0, dr=1.0):
r_min=minimum radius [km], r_max=maximum radius [km], dr=radius increment [km]
Currently available models (model): cem, prem, ql6
"""
r = np.arange(r_min, r_max+dr, dr)
q = np.zeros(len(r))
for k in range(len(r)):
if model=='cem':
q[k]=q_cem(r[k])
elif model=='ql6':
q[k]=q_ql6(r[k])
elif model=='prem':
q[k]=q_prem(r[k])
plt.plot(r,q,'k')
plt.xlim((0.0,r_max))
plt.xlabel('radius [km]')
plt.ylabel('Q')
plt.show()
###################################################################################################
#- CEM, EUMOD
###################################################################################################
def xlabel(s, *args, **kwargs):
print "Warning! Failed to import matplotlib so no axes will be labeled"
def _plot_mi_func(x, y):
mi = mutual_info(x, y)
title = "NI($X_1$, $X_2$) = %.3f" % mi
pylab.scatter(x, y)
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
def _plot_correlation_func(x, y):
r, p = pearsonr(x, y)
title = "Cor($X_1$, $X_2$) = %.3f" % r
pylab.scatter(x, y)
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
f1 = scipy.poly1d(scipy.polyfit(x, y, 1))
pylab.plot(x, f1(x), "r--", linewidth=2)
# pylab.xticks([w*7*24 for w in [0,1,2,3,4]], ['week %i'%(w+1) for w in
# [0,1,2,3,4]])