Python matplotlib.pylab 模块,ylabel() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pylab.ylabel()。
def hrd_key(self, key_str):
"""
plot an HR diagram
Parameters
----------
key_str : string
A label string
"""
pyl.plot(self.data[:,self.cols['log_Teff']-1],\
self.data[:,self.cols['log_L']-1],label = key_str)
pyl.legend()
pyl.xlabel('log Teff')
pyl.ylabel('log L')
x1,x2=pl.xlim()
if x2 > x1:
self._xlimrev()
def compare_images(path = '.'):
S_limit = 10.
file_list = glob.glob(os.path.join(path, 'Abu*'))
file_list_master = glob.glob(os.path.join(path, 'MasterAbu*'))
file_list.sort()
file_list_master.sort()
S=[]
print("Identifying images with rmq > "+'%3.1f'%S_limit)
ierr_count = 0
for i in range(len(file_list)):
this_S,fimg1,fimg2 = compare_entropy(file_list[i],file_list_master[i])
if this_S > S_limit:
warnings.warn(file_list[i]+" and "+file_list_master[i]+" differ by "+'%6.3f'%this_S)
ierr_count += 1
S.append(this_S)
if ierr_count > 0:
print("Error: at least one image differs by more than S_limit")
sys.exit(1)
#print ("S: ",S)
#plb.plot(S,'o')
#plb.xlabel("image number")
#plb.ylabel("modified log KL-divergence to previous image")
#plb.show()
def plot_volcano(logFC,p_val,sample_name,saveName,logFC_thresh):
fig=pl.figure()
## To plot and save
pl.scatter(logFC[(p_val>0.05)|(abs(logFC)<logFC_thresh)],-np.log10(p_val[(p_val>0.05)|(abs(logFC)<logFC_thresh)]),color='blue',alpha=0.5);
pl.scatter(logFC[(p_val<0.05)&(abs(logFC)>logFC_thresh)],-np.log10(p_val[(p_val<0.05)&(abs(logFC)>logFC_thresh)]),color='red');
pl.hlines(-np.log10(0.05),min(logFC),max(logFC))
pl.vlines(-logFC_thresh,min(-np.log10(p_val)),max(-np.log10(p_val)))
pl.vlines(logFC_thresh,min(-np.log10(p_val)),max(-np.log10(p_val)))
pl.xlim(-3,3)
pl.xlabel('Log Fold Change')
pl.ylabel('-log10(p-value)')
pl.savefig(saveName)
pl.close(fig)
# def plot_histograms(df_peaks,pntr_list):
#
# for pntr in pntr_list:
# colName =pntr[2]+'_Intragenic_position'
# pl.hist(df_peaks[colName])
# pl.xlabel(colName)
# pl.ylabel()
# pl.show()
def plot(traj, x, y, **kwargs):
""" Create a matplotlib plot of property x against property y
Args:
x,y (str): names of the properties
**kwargs (dict): kwargs for :meth:`matplotlib.pylab.plot`
Returns:
List[matplotlib.lines.Lines2D]: the lines that were plotted
"""
from matplotlib import pylab
xl = yl = None
if type(x) is str:
strx = x
x = getattr(traj, x)
xl = '%s / %s' % (strx, getattr(x, 'units', 'dimensionless'))
if type(y) is str:
stry = y
y = getattr(traj, y)
yl = '%s / %s' % (stry, getattr(y, 'units', 'dimensionless'))
plt = pylab.plot(x, y, **kwargs)
pylab.xlabel(xl); pylab.ylabel(yl); pylab.grid()
return plt
def plot_confusion_matrix(cm, label_list, title='Confusion matrix', cmap=None):
from matplotlib import pylab
cm = np.asarray(cm, dtype=np.float32)
for i, row in enumerate(cm):
cm[i] = cm[i] / np.sum(cm[i])
#import matplotlib.pyplot as plt
#plt.ion()
pylab.clf()
pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
ax = pylab.axes()
ax.set_xticks(range(len(label_list)))
ax.set_xticklabels(label_list, rotation='vertical')
ax.xaxis.set_ticks_position('bottom')
ax.set_yticks(range(len(label_list)))
ax.set_yticklabels(label_list)
pylab.title(title)
pylab.colorbar()
pylab.grid(False)
pylab.xlabel('Predicted class')
pylab.ylabel('True class')
pylab.grid(False)
pylab.savefig('test.jpg')
pylab.show()
def plot_position(self, pos_true, pos_est):
N = pos_est.shape[1]
pos_true = pos_true[:, :N]
pos_est = pos_est[:, :N]
# Figure
plt.figure()
plt.suptitle("Position")
# Ground truth
plt.plot(pos_true[0, :], pos_true[1, :],
color="red", marker="o", label="Grouth truth")
# Estimated
plt.plot(pos_est[0, :], pos_est[1, :],
color="blue", marker="o", label="Estimated")
# Plot labels and legends
plt.xlabel("East (m)")
plt.ylabel("North (m)")
plt.axis("equal")
plt.legend(loc=0)
def plot_1d(dataset, nbins, data):
with sns.axes_style('white'):
plt.rc('font', weight='bold')
plt.rc('grid', lw=2)
plt.rc('lines', lw=3)
plt.figure(1)
plt.hist(data, bins=np.arange(nbins+1), color='blue')
plt.ylabel('Count', weight='bold', fontsize=24)
xticks = list(plt.gca().get_xticks())
while (nbins-1) / float(xticks[-1]) < 1.1:
xticks = xticks[:-1]
while xticks[0] < 0:
xticks = xticks[1:]
xticks.append(nbins-1)
xticks = list(sorted(xticks))
plt.gca().set_xticks(xticks)
plt.xlim([int(np.ceil(-0.05*nbins)),int(np.ceil(nbins*1.05))])
plt.legend(loc='upper right')
plt.savefig('plots/marginals-{0}.pdf'.format(dataset.replace('_','-')), bbox_inches='tight')
plt.clf()
plt.close()
def plot_1d(dataset, nbins):
data = np.loadtxt('experiments/uci/data/splits/{0}_all.csv'.format(dataset), skiprows=1, delimiter=',')[:,-1]
with sns.axes_style('white'):
plt.rc('font', weight='bold')
plt.rc('grid', lw=2)
plt.rc('lines', lw=3)
plt.figure(1)
plt.hist(data, bins=np.arange(nbins+1), color='blue')
plt.ylabel('Count', weight='bold', fontsize=24)
xticks = list(plt.gca().get_xticks())
while (nbins-1) / float(xticks[-1]) < 1.1:
xticks = xticks[:-1]
while xticks[0] < 0:
xticks = xticks[1:]
xticks.append(nbins-1)
xticks = list(sorted(xticks))
plt.gca().set_xticks(xticks)
plt.xlim([int(np.ceil(-0.05*nbins)),int(np.ceil(nbins*1.05))])
plt.legend(loc='upper right')
plt.savefig('plots/marginals-{0}.pdf'.format(dataset.replace('_','-')), bbox_inches='tight')
plt.clf()
plt.close()
def plotLine(self, x_vals, y_vals, x_label, y_label, title, filename=None):
plt.clf()
plt.xlabel(x_label)
plt.xlim(((min(x_vals) - 0.5), (max(x_vals) + 0.5)))
plt.ylabel(y_label)
plt.ylim(((min(y_vals) - 0.5), (max(y_vals) + 0.5)))
plt.title(title)
plt.plot(x_vals, y_vals, c='k', lw=2)
#plt.plot(x_vals, len(x_vals) * y_vals[0], c='r', lw=2)
if filename == None:
plt.show()
else:
plt.savefig(self.outputPath + filename)
def plot_entropy():
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
title = "Entropy $H(X)$"
pylab.title(title)
pylab.xlabel("$P(X=$coin will show heads up$)$")
pylab.ylabel("$H(X)$")
pylab.xlim(xmin=0, xmax=1.1)
x = np.arange(0.001, 1, 0.001)
y = -x * np.log2(x) - (1 - x) * np.log2(1 - x)
pylab.plot(x, y)
# pylab.xticks([w*7*24 for w in [0,1,2,3,4]], ['week %i'%(w+1) for w in
# [0,1,2,3,4]])
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "entropy_demo.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_clustering(x, y, title, mx=None, ymax=None, xmin=None, km=None):
pylab.figure(num=None, figsize=(8, 6))
if km:
pylab.scatter(x, y, s=50, c=km.predict(list(zip(x, y))))
else:
pylab.scatter(x, y, s=50)
pylab.title(title)
pylab.xlabel("Occurrence word 1")
pylab.ylabel("Occurrence word 2")
pylab.autoscale(tight=True)
pylab.ylim(ymin=0, ymax=1)
pylab.xlim(xmin=0, xmax=1)
pylab.grid(True, linestyle='-', color='0.75')
return pylab
def plot_confusion_matrix(cm, genre_list, name, title):
pylab.clf()
pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
ax = pylab.axes()
ax.set_xticks(range(len(genre_list)))
ax.set_xticklabels(genre_list)
ax.xaxis.set_ticks_position("bottom")
ax.set_yticks(range(len(genre_list)))
ax.set_yticklabels(genre_list)
pylab.title(title)
pylab.colorbar()
pylab.grid(False)
pylab.show()
pylab.xlabel('Predicted class')
pylab.ylabel('True class')
pylab.grid(False)
pylab.savefig(
os.path.join(CHART_DIR, "confusion_matrix_%s.png" % name), bbox_inches="tight")
def plot_roc(auc_score, name, tpr, fpr, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.plot([0, 1], [0, 1], 'k--')
pylab.plot(fpr, tpr)
pylab.fill_between(fpr, tpr, alpha=0.5)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('False Positive Rate')
pylab.ylabel('True Positive Rate')
pylab.title('ROC curve (AUC = %0.2f) / %s' %
(auc_score, label), verticalalignment="bottom")
pylab.legend(loc="lower right")
filename = name.replace(" ", "_")
pylab.savefig(
os.path.join(CHART_DIR, "roc_" + filename + ".png"), bbox_inches="tight")
def plotKChart(self, misClassDict, saveFigPath):
kList = []
misRateList = []
for k, misClassNum in misClassDict.iteritems():
kList.append(k)
misRateList.append(1.0 - 1.0/k*misClassNum)
fig = plt.figure(saveFigPath)
plt.plot(kList, misRateList, 'r--')
plt.title(saveFigPath)
plt.xlabel('k Num.')
plt.ylabel('Misclassified Rate')
plt.legend(saveFigPath)
plt.grid(True)
plt.savefig(saveFigPath)
plt.show()
################################### PART3 TEST ########################################
# ??
def backtest(config_file, day_trade):
cfg = config.Config(config_file)
cfg.day_trade = day_trade
dfs = load_data(config_file)
trender = strategies[cfg.strategy](**cfg.strategy_parameters)
res = []
for df in dfs:
res.append(trender.backtest(data_frame=df))
final_panel = pd.Panel({os.path.basename(p['path']): df for p, df in
zip(cfg.data_path, res)})
profit_series = final_panel.sum(axis=0)['total_profit'].cumsum()
final_panel.to_excel(cfg.output_file)
if cfg.show:
profit_series.plot()
plt.xlabel('Time')
plt.ylabel('Profit')
plt.legend('Profit')
plt.show()
def fit_data():
data=np.loadtxt('data.dat')
print(data)
params = dict()
params["c"] = {"min" : -np.inf,"max" : np.inf}
result = qudi_fitting.make_lorentzian_fit(axis=data[:,0], data=data[:,3], add_parameters=params)
print(result.fit_report())
plt.plot(data[:,0],-data[:,3]+2,"b-o",label="data mean")
# plt.plot(data[:,0],data[:,1],label="data")
# plt.plot(data[:,0],data[:,2],label="data")
plt.plot(data[:,0],-result.best_fit+2,"r-",linewidth=2.,label="fit")
# plt.plot(data[:,0],result.init_fit,label="init")
plt.xlabel("time (ns)")
plt.ylabel("polarization transfer (arb. u.)")
plt.legend(loc=1)
# plt.savefig("pol20_24repetition_pol.pdf")
# plt.savefig("pol20_24repetition_pol.png")
plt.show()
savedata=[[data[ii,0],-data[ii,3]+2,-result.best_fit[ii]+2] for ii in range(len(data[:,0]))]
np.savetxt("pol_data_fit.csv",savedata)
# print(result.params)
print(result.params)
def plot_penalty_vl(debug, tag, fold_exp):
plt.close("all")
vl = np.array(debug["penalty"])
fig = plt.figure(figsize=(15, 10.8), dpi=300)
names = debug["names"]
for i in range(vl.shape[1]):
if vl.shape[1] > 1:
plt.plot(vl[:, i], label="layer_"+str(names[i]))
else:
plt.plot(vl[:], label="layer_"+str(names[i]))
plt.xlabel("mini-batchs")
plt.ylabel("value of penlaty")
plt.title(
"Penalty value over layers:" + "_".join([str(k) for k in names]) +
". tag:" + tag)
plt.legend(loc='upper right', fancybox=True, shadow=True, prop={'size': 8})
plt.grid(True)
fig.savefig(fold_exp+"/penalty.png", bbox_inches='tight')
plt.close('all')
del fig
def plot_roc(y_test, y_pred, label=''):
"""Compute ROC curve and ROC area"""
fpr, tpr, _ = roc_curve(y_test, y_pred)
roc_auc = auc(fpr, tpr)
# Plot of a ROC curve for a specific class
plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic' + label)
plt.legend(loc="lower right")
plt.show()
def modelfit(alg, dtrain, predictors, useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
if useTrainCV:
xgb_param = alg.get_xgb_params()
xgtrain = xgb.DMatrix(dtrain[predictors].values, label=dtrain['label'].values)
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,
metrics='auc', early_stopping_rounds=early_stopping_rounds, show_progress=False)
alg.set_params(n_estimators=cvresult.shape[0])
# Fit the algorithm on the data
alg.fit(dtrain[predictors], dtrain['label'], eval_metric='auc')
# Predict training set:
dtrain_predictions = alg.predict(dtrain[predictors])
dtrain_predprob = alg.predict_proba(dtrain[predictors])[:, 1]
# Print model report:
print "\nModel Report"
print "Accuracy : %.4g" % metrics.accuracy_score(dtrain['Disbursed'].values, dtrain_predictions)
print "AUC Score (Train): %f" % metrics.roc_auc_score(dtrain['Disbursed'], dtrain_predprob)
feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
def plot_confusion_matrix(cm, plot_title, filename, genres=None):
if not genres:
genres = GENRES
pylab.clf()
pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=100.0)
axes = pylab.axes()
axes.set_xticks(range(len(genres)))
axes.set_xticklabels(genres, rotation=45)
axes.set_yticks(range(len(genres)))
axes.set_yticklabels(genres)
axes.xaxis.set_ticks_position("bottom")
pylab.title(plot_title, fontsize=14)
pylab.colorbar()
pylab.xlabel('Predicted class', fontsize=12)
pylab.ylabel('Correct class', fontsize=12)
pylab.grid(False)
#pylab.show()
pylab.savefig(os.path.join(PLOTS_DIR, "cm_%s.eps" % filename), bbox_inches="tight")
def plot_confusion_matrix(cm, label_list, title='Confusion matrix', cmap=None):
from matplotlib import pylab
cm = np.asarray(cm, dtype=np.float32)
for i, row in enumerate(cm):
cm[i] = cm[i] / np.sum(cm[i])
#import matplotlib.pyplot as plt
#plt.ion()
pylab.clf()
pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
ax = pylab.axes()
ax.set_xticks(range(len(label_list)))
ax.set_xticklabels(label_list, rotation='vertical')
ax.xaxis.set_ticks_position('bottom')
ax.set_yticks(range(len(label_list)))
ax.set_yticklabels(label_list)
pylab.title(title)
pylab.colorbar()
pylab.grid(False)
pylab.xlabel('Predicted class')
pylab.ylabel('True class')
pylab.grid(False)
pylab.savefig('test.jpg')
pylab.show()
def plotSpeedupFigure(AllInfo, maxWorker=1, **kwargs):
pylab.figure(2)
xs = AllInfo['nWorker']
ts_mono = AllInfo['t_monolithic']
xgrid = np.linspace(0, maxWorker + 0.1, 100)
pylab.plot(xgrid, xgrid, 'y--', label='ideal parallel')
for method in getMethodNames(**kwargs):
speedupRatio = ts_mono / AllInfo['t_' + method]
pylab.plot(xs, speedupRatio, 'o-',
label=method,
color=ColorMap[method],
markeredgecolor=ColorMap[method])
pylab.xlim([-0.2, maxWorker + 0.5])
pylab.ylim([0, maxWorker + 0.5])
pylab.legend(loc='upper left')
pylab.xlabel('Number of Workers')
pylab.ylabel('Speedup over Monolithic')
def plotBoundVsAlph(alphaVals=np.linspace(.001, 3, 1000),
beta1=0.5):
exactVals = cD_exact(alphaVals, beta1)
boundVals = cD_bound(alphaVals, beta1)
assert np.all(exactVals >= boundVals)
pylab.plot(alphaVals, exactVals, 'k-', linewidth=LINEWIDTH)
pylab.plot(alphaVals, boundVals, 'r--', linewidth=LINEWIDTH)
pylab.xlabel("alpha", fontsize=FONTSIZE)
pylab.ylabel(" ", fontsize=FONTSIZE)
pylab.xlim([np.min(alphaVals) - 0.1, np.max(alphaVals) + 0.1])
pylab.ylim([np.min(exactVals) - 0.05, np.max(exactVals) + 0.05])
pylab.xticks(np.arange(np.max(alphaVals) + 1))
pylab.legend(['c_D exact',
'c_D surrogate'],
fontsize=LEGENDSIZE,
loc='lower right')
pylab.tick_params(axis='both', which='major', labelsize=TICKSIZE)
def nmf(fdoc, fvocab):
T = 100
nmf = NMF(fdoc, fvocab)
nmf.train(T)
nmf.get_words()
# print(mf.R)
plt.figure()
plt.plot(range(1,T+1),nmf.objective)
plt.xticks(np.linspace(1,T,10))
plt.xlabel('Iterations')
plt.ylabel('Objective')
plt.title('Variation of objective with iterations')
plt.savefig('hw5_2a.png')
plt.show()
def gp_partd(Xtrain,ytrain,Xtest,ytest):
gp = gaussian_process(Xtrain[:,3],ytrain,Xtrain[:,3],ytrain)
gp.init_kernel_matrices(b=5,var=2)
gp.predict_test()
x = np.asarray(Xtrain[:,3]).flatten()
xsortind = np.argsort(x)
y1 = np.asarray(ytrain).flatten()
y2 = np.asarray(gp.test_predictions).flatten()
plt.figure()
plt.scatter(x[xsortind],y1[xsortind])
plt.plot(x[xsortind],y2[xsortind],'b-')
plt.xlabel('Car Weight (Dimension 4)')
plt.ylabel('Outcome')
plt.title('Visualizing model through single dimension')
plt.savefig('hw3_gaussian_dim4_viz')
plt.show()
def energy_profile(self,ixaxis):
"""
Plot radial profile of key energy generations eps_nuc,
eps_neu etc.
Parameters
----------
ixaxis : 'mass' or 'radius'
"""
mass = self.get('mass')
radius = self.get('radius') * ast.rsun_cm
eps_nuc = self.get('eps_nuc')
eps_neu = self.get('non_nuc_neu')
if ixaxis == 'mass':
xaxis = mass
xlab = 'Mass / M$_\odot$'
else:
xaxis = old_div(radius, 1.e8) # Mm
xlab = 'radius / Mm'
pl.plot(xaxis, np.log10(eps_nuc),
'k-',
label='$\epsilon_\mathrm{nuc}>0$')
pl.plot(xaxis, np.log10(-eps_nuc),
'k--',
label='$\epsilon_\mathrm{nuc}<0$')
pl.plot(xaxis, np.log10(eps_neu),
'r-',
label='$\epsilon_\\nu$')
pl.xlabel(xlab)
pl.ylabel('$\log(\epsilon_\mathrm{nuc},\epsilon_\\nu)$')
pl.legend(loc='best').draw_frame(False)
def hrd_new(self, input_label="", skip=0):
"""
plot an HR diagram with options to skip the first N lines and
add a label string
Parameters
----------
input_label : string, optional
Diagram label. The default is "".
skip : integer, optional
Skip the first n lines. The default is 0.
"""
xl_old=pyl.gca().get_xlim()
if input_label == "":
my_label="M="+str(self.header_attr['initial_mass'])+", Z="+str(self.header_attr['initial_z'])
else:
my_label="M="+str(self.header_attr['initial_mass'])+", Z="+str(self.header_attr['initial_z'])+"; "+str(input_label)
pyl.plot(self.data[skip:,self.cols['log_Teff']-1],self.data[skip:,self.cols['log_L']-1],label = my_label)
pyl.legend(loc=0)
xl_new=pyl.gca().get_xlim()
pyl.xlabel('log Teff')
pyl.ylabel('log L')
if any(array(xl_old)==0):
pyl.gca().set_xlim(max(xl_new),min(xl_new))
elif any(array(xl_new)==0):
pyl.gca().set_xlim(max(xl_old),min(xl_old))
else:
pyl.gca().set_xlim([max(xl_old+xl_new),min(xl_old+xl_new)])
def t_lumi(self,num_frame,xax):
"""
Luminosity evolution as a function of time or model.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into.
xax : string
Either model or time to indicate what is to be used on the
x-axis
"""
pyl.figure(num_frame)
if xax == 'time':
xaxisarray = self.get('star_age')
elif xax == 'model':
xaxisarray = self.get('model_number')
else:
print('kippenhahn_error: invalid string for x-axis selction. needs to be "time" or "model"')
logLH = self.get('log_LH')
logLHe = self.get('log_LHe')
pyl.plot(xaxisarray,logLH,label='L_(H)')
pyl.plot(xaxisarray,logLHe,label='L(He)')
pyl.ylabel('log L')
pyl.legend(loc=2)
if xax == 'time':
pyl.xlabel('t / yrs')
elif xax == 'model':
pyl.xlabel('model number')
def t_surf_parameter(self, num_frame, xax):
"""
Surface parameter evolution as a function of time or model.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into.
xax : string
Either model or time to indicate what is to be used on the
x-axis
"""
pyl.figure(num_frame)
if xax == 'time':
xaxisarray = self.get('star_age')
elif xax == 'model':
xaxisarray = self.get('model_number')
else:
print('kippenhahn_error: invalid string for x-axis selction. needs to be "time" or "model"')
logL = self.get('log_L')
logTeff = self.get('log_Teff')
pyl.plot(xaxisarray,logL,'-k',label='log L')
pyl.plot(xaxisarray,logTeff,'-k',label='log Teff')
pyl.ylabel('log L, log Teff')
pyl.legend(loc=2)
if xax == 'time':
pyl.xlabel('t / yrs')
elif xax == 'model':
pyl.xlabel('model number')
def test_abu_evolution(self):
from nugridpy import ppn, utils
import matplotlib
matplotlib.use('agg')
import matplotlib.pylab as mpy
import os
# Perform tests within temporary directory
with TemporaryDirectory() as tdir:
# wget the data for a ppn run from the CADC VOspace
os.system("wget -q --content-disposition --directory '" + tdir + "' "\
+ "'http://www.canfar.phys.uvic.ca/vospace/synctrans?TARGET="\
+ "vos%3A%2F%2Fcadc.nrc.ca%21vospace%2Fnugrid%2Fdata%2Fprojects%2Fppn%2Fexamples%2F"\
+ "ppn_Hburn_simple%2Fx-time.dat&DIRECTION=pullFromVoSpace&PROTOCOL"\
+ "=ivo%3A%2F%2Fivoa.net%2Fvospace%2Fcore%23httpget'")
#nugrid_dir= os.path.dirname(os.path.dirname(ppn.__file__))
#NuPPN_dir= nugrid_dir + "/NuPPN"
#test_data_dir= NuPPN_dir + "/examples/ppn_Hburn_simple/RUN_MASTER"
symbs=utils.symbol_list('lines2')
x=ppn.xtime(tdir)
specs=['PROT','HE 4','C 12','N 14','O 16']
i=0
for spec in specs:
x.plot('time',spec,logy=True,logx=True,shape=utils.linestyle(i)[0],show=False,title='')
i += 1
mpy.ylim(-5,0.2)
mpy.legend(loc=0)
mpy.xlabel('$\log t / \mathrm{min}$')
mpy.ylabel('$\log X \mathrm{[mass fraction]}$')
abu_evol_file = 'abu_evolution.png'
mpy.savefig(abu_evol_file)
self.assertTrue(os.path.exists(abu_evol_file))
def modelfit(alg, predictors, target, useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
if useTrainCV:
xgb_param = alg.get_xgb_params()
xgtrain = xgb.DMatrix(predictors.values, label=target.values)
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,\
metrics=['auc'], early_stopping_rounds=early_stopping_rounds, show_progress=False)
alg.set_params(n_estimators=cvresult.shape[0])
#Fit the algorithm on the data
alg.fit(predictors, target, eval_metric='auc')
#Predict training set:
dtrain_predictions = alg.predict(predictors)
dtrain_predprob = alg.predict_proba(predictors)[:, 1]
#Print model report:
print("\nModel Report")
print("Accuracy : %.4g" % metrics.accuracy_score(target.values, dtrain_predictions))
print("AUC Score (Train): %f" % metrics.roc_auc_score(target, dtrain_predprob))
feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
# examples of usage
# 1
def hackathon_GBC_model(clf, train, features):
clf.fit(train[features], train["Class"])
probab_of_predict = clf.predict_proba(train[features])[:,1]
predict_train = clf.predict(train[features])
cv_score = cross_val_score(clf, train[features], train["Class"], cv=5, scoring="roc_auc")
print("----------------------Model performance-----------------------")
print("Accuracy score: ", accuracy_score(train["Class"].values, predict_train))
print("AUC: ", roc_auc_score(train["Class"],probab_of_predict) )
print("CV score: Mean - {}, Max - {}, Min - {}, Std - {}".format(np.mean(cv_score), np.max(cv_score),
np.min(cv_score), np.std(cv_score)))
Relative_Feature_importance = pd.Series(clf.feature_importances_, features).sort_values(ascending=False)
Relative_Feature_importance.plot(kind='bar', title='Order of Feature Importance')
plt.ylabel('Feature Importance')
plt.show()
def onehist(x,xlabel='',fontsize=12):
"""
Script that plots the histogram of x with the corresponding xlabel.
"""
pylab.clf()
pylab.rcParams.update({'font.size': fontsize})
pylab.hist(x,histtype='stepfilled')
pylab.legend()
#### Change the X-axis appropriately ####
pylab.xlabel(xlabel)
pylab.ylabel('Number')
pylab.draw()
pylab.show()
def plot_graphs(df, trending_daily, day_from, day_to, limit, country_code, folder_out=None):
days = pd.DatetimeIndex(start=day_from, end=day_to, freq='D')
for day in days:
fig = plt.figure()
ax = fig.add_subplot(111)
plt.rc('lines', linewidth=2)
data = trending_daily.get_group(str(day.date()))
places, clusters = top_trending(data, limit)
for cluster in clusters:
places.add(max_from_cluster(cluster, data))
ax.set_prop_cycle(plt.cycler('color', ['r', 'b', 'yellow'] + [plt.cm.Accent(i) for i in np.linspace(0, 1, limit-3)]
) + plt.cycler('linestyle', ['-', '-', '-', '-', '-', '--', '--', '--', '--', '--']))
frame = export(places, clusters, data)
frame.sort_values('trending_rank', ascending=False, inplace=True)
for i in range(len(frame)):
item = frame.index[i]
lat, lon, country = item
result_items = ReverseGeoCode().get_address_attributes(lat, lon, 10, 'city', 'country_code')
if 'city' not in result_items.keys():
mark = "%s (%s)" % (manipulate_display_name(result_items['display_name']),
result_items['country_code'].upper() if 'country_code' in result_items.keys() else country)
else:
if check_eng(result_items['city']):
mark = "%s (%s)" % (result_items['city'], result_items['country_code'].upper())
else:
mark = "%.2f %.2f (%s)" % (lat, lon, result_items['country_code'].upper())
gp = df.loc[item].plot(ax=ax, x='date', y='count', label=mark)
ax.tick_params(axis='both', which='major', labelsize=10)
ax.set_yscale("log", nonposy='clip')
plt.xlabel('Date', fontsize='small', verticalalignment='baseline', horizontalalignment='right')
plt.ylabel('Total number of views (log)', fontsize='small', verticalalignment='center', horizontalalignment='center', labelpad=6)
gp.legend(loc='best', fontsize='xx-small', ncol=2)
gp.set_title('Top 10 OSM trending places on ' + str(day.date()), {'fontsize': 'large', 'verticalalignment': 'bottom'})
plt.tight_layout()
db = TrendingDb()
db.update_table_img(plt, str(day.date()), region=country_code)
plt.close()
def plot_word_freq_dist(text):
fd = text.vocab()
samples = [item for item, _ in fd.most_common(50)]
values = [fd[sample] for sample in samples]
values = [sum(values[:i+1]) * 100.0/fd.N() for i in range(len(values))]
pylab.title(text.name)
pylab.xlabel("Samples")
pylab.ylabel("Cumulative Percentage")
pylab.plot(values)
pylab.xticks(range(len(samples)), [str(s) for s in samples], rotation=90)
pylab.show()
def demo(text=None):
from nltk.corpus import brown
from matplotlib import pylab
tt = TextTilingTokenizer(demo_mode=True)
if text is None: text = brown.raw()[:10000]
s, ss, d, b = tt.tokenize(text)
pylab.xlabel("Sentence Gap index")
pylab.ylabel("Gap Scores")
pylab.plot(range(len(s)), s, label="Gap Scores")
pylab.plot(range(len(ss)), ss, label="Smoothed Gap scores")
pylab.plot(range(len(d)), d, label="Depth scores")
pylab.stem(range(len(b)), b)
pylab.legend()
pylab.show()
def plot_position(self, pos_true, pos_est, cam_states):
N = pos_est.shape[1]
pos_true = pos_true[:, :N]
pos_est = pos_est[:, :N]
# Figure
plt.figure()
plt.suptitle("Position")
# Ground truth
plt.plot(pos_true[0, :], pos_true[1, :],
color="red", label="Grouth truth")
# color="red", marker="x", label="Grouth truth")
# Estimated
plt.plot(pos_est[0, :], pos_est[1, :],
color="blue", label="Estimated")
# color="blue", marker="o", label="Estimated")
# Sliding window
cam_pos = []
for cam_state in cam_states:
cam_pos.append(cam_state.p_G)
cam_pos = np.array(cam_pos).reshape((len(cam_pos), 3)).T
plt.plot(cam_pos[0, :], cam_pos[1, :],
color="green", label="Camera Poses")
# color="green", marker="o", label="Camera Poses")
# Plot labels and legends
plt.xlabel("East (m)")
plt.ylabel("North (m)")
plt.axis("equal")
plt.legend(loc=0)
def plot_velocity(self, timestamps, vel_true, vel_est):
N = vel_est.shape[1]
t = timestamps[:N]
vel_true = vel_true[:, :N]
vel_est = vel_est[:, :N]
# Figure
plt.figure()
plt.suptitle("Velocity")
# X axis
plt.subplot(311)
plt.plot(t, vel_true[0, :], color="red", label="Ground_truth")
plt.plot(t, vel_est[0, :], color="blue", label="Estimate")
plt.title("x-axis")
plt.xlabel("Date Time")
plt.ylabel("ms^-1")
plt.legend(loc=0)
# Y axis
plt.subplot(312)
plt.plot(t, vel_true[1, :], color="red", label="Ground_truth")
plt.plot(t, vel_est[1, :], color="blue", label="Estimate")
plt.title("y-axis")
plt.xlabel("Date Time")
plt.ylabel("ms^-1")
plt.legend(loc=0)
# Z axis
plt.subplot(313)
plt.plot(t, vel_true[2, :], color="red", label="Ground_truth")
plt.plot(t, vel_est[2, :], color="blue", label="Estimate")
plt.title("z-axis")
plt.xlabel("Date Time")
plt.ylabel("ms^-1")
plt.legend(loc=0)
def plot_attitude(self, timestamps, att_true, att_est):
# Setup
N = att_est.shape[1]
t = timestamps[:N]
att_true = att_true[:, :N]
att_est = att_est[:, :N]
# Figure
plt.figure()
plt.suptitle("Attitude")
# X axis
plt.subplot(311)
plt.plot(t, att_true[0, :], color="red", label="Ground_truth")
plt.plot(t, att_est[0, :], color="blue", label="Estimate")
plt.title("x-axis")
plt.legend(loc=0)
plt.xlabel("Date Time")
plt.ylabel("rad s^-1")
# Y axis
plt.subplot(312)
plt.plot(t, att_true[1, :], color="red", label="Ground_truth")
plt.plot(t, att_est[1, :], color="blue", label="Estimate")
plt.title("y-axis")
plt.legend(loc=0)
plt.xlabel("Date Time")
plt.ylabel("rad s^-1")
# Z axis
plt.subplot(313)
plt.plot(t, att_true[2, :], color="red", label="Ground_truth")
plt.plot(t, att_est[2, :], color="blue", label="Estimate")
plt.title("z-axis")
plt.legend(loc=0)
plt.xlabel("Date Time")
plt.ylabel("rad s^-1")
def plot_velocity(self, timestamps, vel_true, vel_est):
N = vel_est.shape[1]
t = timestamps[:N]
vel_true = vel_true[:, :N]
vel_est = vel_est[:, :N]
# Figure
plt.figure()
plt.suptitle("Velocity")
# X axis
plt.subplot(311)
plt.plot(t, vel_true[0, :], color="red", label="Ground_truth")
plt.plot(t, vel_est[0, :], color="blue", label="Estimate")
plt.title("x-axis")
plt.xlabel("Date Time")
plt.ylabel("ms^-1")
plt.legend(loc=0)
# Y axis
plt.subplot(312)
plt.plot(t, vel_true[1, :], color="red", label="Ground_truth")
plt.plot(t, vel_est[1, :], color="blue", label="Estimate")
plt.title("y-axis")
plt.xlabel("Date Time")
plt.ylabel("ms^-1")
plt.legend(loc=0)
# Z axis
plt.subplot(313)
plt.plot(t, vel_true[2, :], color="red", label="Ground_truth")
plt.plot(t, vel_est[2, :], color="blue", label="Estimate")
plt.title("z-axis")
plt.xlabel("Date Time")
plt.ylabel("ms^-1")
plt.legend(loc=0)
def plot_storage(self, storage):
plt.figure()
plt.plot(range(len(storage)), storage)
plt.title("Num of tracks over time")
plt.xlabel("Frame No.")
plt.ylabel("Num of Tracks")
def plot_tracked(self, tracked):
plt.figure()
plt.plot(range(len(tracked)), tracked)
plt.title("Matches per Frame")
plt.xlabel("Frame No.")
plt.ylabel("Num of Tracks")
def plot_1d_model(self):
plt.subplot(131)
plt.plot(self.rho_bg,self.radius)
plt.xlabel('density (kg/m3)')
plt.ylabel('radius (km)')
plt.subplot(132)
plt.plot(self.vp_bg,self.radius)
plt.xlabel('Vp (km/s)')
plt.ylabel('radius (km)')
plt.subplot(133)
plt.plot(self.vs_bg,self.radius)
plt.xlabel('Vs (km/s)')
plt.ylabel('radius (km)')
plt.show()
def plot_q(model='cem', r_min=0.0, r_max=6371.0, dr=1.0):
"""
Plot a radiallysymmetric Q model.
plot_q(model='cem', r_min=0.0, r_max=6371.0, dr=1.0):
r_min=minimum radius [km], r_max=maximum radius [km], dr=radius increment [km]
Currently available models (model): cem, prem, ql6
"""
r = np.arange(r_min, r_max+dr, dr)
q = np.zeros(len(r))
for k in range(len(r)):
if model=='cem':
q[k]=q_cem(r[k])
elif model=='ql6':
q[k]=q_ql6(r[k])
elif model=='prem':
q[k]=q_prem(r[k])
plt.plot(r,q,'k')
plt.xlim((0.0,r_max))
plt.xlabel('radius [km]')
plt.ylabel('Q')
plt.show()
###################################################################################################
#- CEM, EUMOD
###################################################################################################
def ylabel(s, *args, **kwargs):
print "Warning! Failed to import matplotlib so no axes will be labeled"
def _plot_mi_func(x, y):
mi = mutual_info(x, y)
title = "NI($X_1$, $X_2$) = %.3f" % mi
pylab.scatter(x, y)
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
def _plot_correlation_func(x, y):
r, p = pearsonr(x, y)
title = "Cor($X_1$, $X_2$) = %.3f" % r
pylab.scatter(x, y)
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
f1 = scipy.poly1d(scipy.polyfit(x, y, 1))
pylab.plot(x, f1(x), "r--", linewidth=2)
# pylab.xticks([w*7*24 for w in [0,1,2,3,4]], ['week %i'%(w+1) for w in
# [0,1,2,3,4]])
def plot_pr(auc_score, name, phase, precision, recall, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.fill_between(recall, precision, alpha=0.5)
pylab.plot(recall, precision, lw=1)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('Recall')
pylab.ylabel('Precision')
pylab.title('P/R curve (AUC=%0.2f) / %s' % (auc_score, label))
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(CHART_DIR, "pr_%s_%s.png" %
(filename, phase)), bbox_inches="tight")
def plot_log():
pylab.clf()
pylab.figure(num=None, figsize=(6, 5))
x = np.arange(0.001, 1, 0.001)
y = np.log(x)
pylab.title('Relationship between probabilities and their logarithm')
pylab.plot(x, y)
pylab.grid(True)
pylab.xlabel('P')
pylab.ylabel('log(P)')
filename = 'log_probs.png'
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_feat_hist(data_name_list, filename=None):
pylab.clf()
num_rows = 1 + (len(data_name_list) - 1) / 2
num_cols = 1 if len(data_name_list) == 1 else 2
pylab.figure(figsize=(5 * num_cols, 4 * num_rows))
for i in range(num_rows):
for j in range(num_cols):
pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
x, name = data_name_list[i * num_cols + j]
pylab.title(name)
pylab.xlabel('Value')
pylab.ylabel('Density')
# the histogram of the data
max_val = np.max(x)
if max_val <= 1.0:
bins = 50
elif max_val > 50:
bins = 50
else:
bins = max_val
n, bins, patches = pylab.hist(
x, bins=bins, normed=1, facecolor='green', alpha=0.75)
pylab.grid(True)
if not filename:
filename = "feat_hist_%s.png" % name
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")