Python matplotlib.pylab 模块,clf() 实例源码
我们从Python开源项目中,提取了以下46个代码示例,用于说明如何使用matplotlib.pylab.clf()。
def prec_ets(n_trees, X_train, y_train, X_test, y_test, random_state=None):
"""
ExtraTrees
"""
from sklearn.ensemble import ExtraTreesClassifier
if not issparse(X_train):
X_train = X_train.reshape((X_train.shape[0], -1))
if not issparse(X_test):
X_test = X_test.reshape((X_test.shape[0], -1))
LOGGER.info('start predict: n_trees={},X_train.shape={},y_train.shape={},X_test.shape={},y_test.shape={}'.format(
n_trees, X_train.shape, y_train.shape, X_test.shape, y_test.shape))
clf = ExtraTreesClassifier(n_estimators=n_trees, max_depth=None, n_jobs=-1, verbose=1, random_state=random_state)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
prec = float(np.sum(y_pred == y_test)) / len(y_test)
LOGGER.info('prec_ets{}={:.6f}%'.format(n_trees, prec*100.0))
return clf, y_pred
def prec_rf(n_trees, X_train, y_train, X_test, y_test):
"""
ExtraTrees
"""
from sklearn.ensemble import RandomForestClassifier
if not issparse(X_train):
X_train = X_train.reshape((X_train.shape[0], -1))
if not issparse(X_test):
X_test = X_test.reshape((X_test.shape[0], -1))
LOGGER.info('start predict: n_trees={},X_train.shape={},y_train.shape={},X_test.shape={},y_test.shape={}'.format(
n_trees, X_train.shape, y_train.shape, X_test.shape, y_test.shape))
clf = RandomForestClassifier(n_estimators=n_trees, max_depth=None, n_jobs=-1, verbose=1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
prec = float(np.sum(y_pred == y_test)) / len(y_test)
LOGGER.info('prec_rf{}={:.6f}%'.format(n_trees, prec*100.0))
return clf, y_pred
def prec_xgb(n_trees, max_depth, X_train, y_train, X_test, y_test, learning_rate=0.1):
"""
ExtraTrees
"""
import xgboost as xgb
X_train = X_train.reshape((X_train.shape[0], -1))
X_test = X_test.reshape((X_test.shape[0], -1))
LOGGER.info('start predict: n_trees={},X_train.shape={},y_train.shape={},X_test.shape={},y_test.shape={}'.format(
n_trees, X_train.shape, y_train.shape, X_test.shape, y_test.shape))
clf = xgb.XGBClassifier(n_estimators=n_trees, max_depth=max_depth, objective='multi:softprob',
seed=0, silent=True, nthread=-1, learning_rate=learning_rate)
eval_set = [(X_test, y_test)]
clf.fit(X_train, y_train, eval_set=eval_set, eval_metric="merror")
y_pred = clf.predict(X_test)
prec = float(np.sum(y_pred == y_test)) / len(y_test)
LOGGER.info('prec_xgb_{}={:.6f}%'.format(n_trees, prec*100.0))
return clf, y_pred
def prec_log(X_train, y_train, X_test, y_test):
from sklearn.linear_model import LogisticRegression
if not issparse(X_train):
X_train = X_train.reshape((X_train.shape[0], -1))
if not issparse(X_test):
X_test = X_test.reshape((X_test.shape[0], -1))
LOGGER.info('start predict: X_train.shape={},y_train.shape={},X_test.shape={},y_test.shape={}'.format(
X_train.shape, y_train.shape, X_test.shape, y_test.shape))
X_train = X_train.reshape((X_train.shape[0], -1))
X_test = X_test.reshape((X_test.shape[0], -1))
clf = LogisticRegression(solver='sag', n_jobs=-1, verbose=1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
prec = float(np.sum(y_pred == y_test)) / len(y_test)
LOGGER.info('prec_log={:.6f}%'.format(prec*100.0))
return clf, y_pred
def show_mpl(self, im, enhance=True, clear_fig=True):
if self._pylab is None:
import pylab
self._pylab = pylab
if self._render_figure is None:
self._render_figure = self._pylab.figure(1)
if clear_fig: self._render_figure.clf()
if enhance:
nz = im[im > 0.0]
nim = im / (nz.mean() + 6.0 * np.std(nz))
nim[nim > 1.0] = 1.0
nim[nim < 0.0] = 0.0
del nz
else:
nim = im
ax = self._pylab.imshow(nim[:,:,:3]/nim[:,:,:3].max(), origin='upper')
return ax
def test_plot_error_ellipse(self):
# Generate random data
x = np.random.normal(0, 1, 300)
s = np.array([2.0, 2.0])
y1 = np.random.normal(s[0] * x)
y2 = np.random.normal(s[1] * x)
data = np.array([y1, y2])
# Calculate covariance and plot error ellipse
cov = np.cov(data)
plot_error_ellipse([0.0, 0.0], cov)
debug = False
if debug:
plt.scatter(data[0, :], data[1, :])
plt.xlim([-8, 8])
plt.ylim([-8, 8])
plt.show()
plt.clf()
def plot_1d(dataset, nbins, data):
with sns.axes_style('white'):
plt.rc('font', weight='bold')
plt.rc('grid', lw=2)
plt.rc('lines', lw=3)
plt.figure(1)
plt.hist(data, bins=np.arange(nbins+1), color='blue')
plt.ylabel('Count', weight='bold', fontsize=24)
xticks = list(plt.gca().get_xticks())
while (nbins-1) / float(xticks[-1]) < 1.1:
xticks = xticks[:-1]
while xticks[0] < 0:
xticks = xticks[1:]
xticks.append(nbins-1)
xticks = list(sorted(xticks))
plt.gca().set_xticks(xticks)
plt.xlim([int(np.ceil(-0.05*nbins)),int(np.ceil(nbins*1.05))])
plt.legend(loc='upper right')
plt.savefig('plots/marginals-{0}.pdf'.format(dataset.replace('_','-')), bbox_inches='tight')
plt.clf()
plt.close()
def plot_2d(dataset, nbins, data, extra=None):
with sns.axes_style('white'):
plt.rc('font', weight='bold')
plt.rc('grid', lw=2)
plt.rc('lines', lw=2)
rows, cols = nbins
im = np.zeros(nbins)
for i in xrange(rows):
for j in xrange(cols):
im[i,j] = ((data[:,0] == i) & (data[:,1] == j)).sum()
plt.imshow(im, cmap='gray_r', interpolation='none')
if extra is not None:
dataset += extra
plt.savefig('plots/marginals-{0}.pdf'.format(dataset.replace('_','-')), bbox_inches='tight')
plt.clf()
plt.close()
def plot_1d(dataset, nbins):
data = np.loadtxt('experiments/uci/data/splits/{0}_all.csv'.format(dataset), skiprows=1, delimiter=',')[:,-1]
with sns.axes_style('white'):
plt.rc('font', weight='bold')
plt.rc('grid', lw=2)
plt.rc('lines', lw=3)
plt.figure(1)
plt.hist(data, bins=np.arange(nbins+1), color='blue')
plt.ylabel('Count', weight='bold', fontsize=24)
xticks = list(plt.gca().get_xticks())
while (nbins-1) / float(xticks[-1]) < 1.1:
xticks = xticks[:-1]
while xticks[0] < 0:
xticks = xticks[1:]
xticks.append(nbins-1)
xticks = list(sorted(xticks))
plt.gca().set_xticks(xticks)
plt.xlim([int(np.ceil(-0.05*nbins)),int(np.ceil(nbins*1.05))])
plt.legend(loc='upper right')
plt.savefig('plots/marginals-{0}.pdf'.format(dataset.replace('_','-')), bbox_inches='tight')
plt.clf()
plt.close()
def plot_2d(dataset, nbins, data=None, extra=None):
if data is None:
data = np.loadtxt('experiments/uci/data/splits/{0}_all.csv'.format(dataset), skiprows=1, delimiter=',')[:,-2:]
with sns.axes_style('white'):
plt.rc('font', weight='bold')
plt.rc('grid', lw=2)
plt.rc('lines', lw=2)
rows, cols = nbins
im = np.zeros(nbins)
for i in xrange(rows):
for j in xrange(cols):
im[i,j] = ((data[:,0] == i) & (data[:,1] == j)).sum()
plt.imshow(im, cmap='gray_r', interpolation='none')
if extra is not None:
dataset += extra
plt.savefig('plots/marginals-{0}.pdf'.format(dataset.replace('_','-')), bbox_inches='tight')
plt.clf()
plt.close()
def plotLine(self, x_vals, y_vals, x_label, y_label, title, filename=None):
plt.clf()
plt.xlabel(x_label)
plt.xlim(((min(x_vals) - 0.5), (max(x_vals) + 0.5)))
plt.ylabel(y_label)
plt.ylim(((min(y_vals) - 0.5), (max(y_vals) + 0.5)))
plt.title(title)
plt.plot(x_vals, y_vals, c='k', lw=2)
#plt.plot(x_vals, len(x_vals) * y_vals[0], c='r', lw=2)
if filename == None:
plt.show()
else:
plt.savefig(self.outputPath + filename)
def plot_entropy():
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
title = "Entropy $H(X)$"
pylab.title(title)
pylab.xlabel("$P(X=$coin will show heads up$)$")
pylab.ylabel("$H(X)$")
pylab.xlim(xmin=0, xmax=1.1)
x = np.arange(0.001, 1, 0.001)
y = -x * np.log2(x) - (1 - x) * np.log2(1 - x)
pylab.plot(x, y)
# pylab.xticks([w*7*24 for w in [0,1,2,3,4]], ['week %i'%(w+1) for w in
# [0,1,2,3,4]])
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "entropy_demo.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_feat_importance(feature_names, clf, name):
pylab.clf()
coef_ = clf.coef_
important = np.argsort(np.absolute(coef_.ravel()))
f_imp = feature_names[important]
coef = coef_.ravel()[important]
inds = np.argsort(coef)
f_imp = f_imp[inds]
coef = coef[inds]
xpos = np.array(range(len(coef)))
pylab.bar(xpos, coef, width=1)
pylab.title('Feature importance for %s' % (name))
ax = pylab.gca()
ax.set_xticks(np.arange(len(coef)))
labels = ax.set_xticklabels(f_imp)
for label in labels:
label.set_rotation(90)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(
CHART_DIR, "feat_imp_%s.png" % filename), bbox_inches="tight")
def plot_confusion_matrix(cm, genre_list, name, title):
pylab.clf()
pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
ax = pylab.axes()
ax.set_xticks(range(len(genre_list)))
ax.set_xticklabels(genre_list)
ax.xaxis.set_ticks_position("bottom")
ax.set_yticks(range(len(genre_list)))
ax.set_yticklabels(genre_list)
pylab.title(title)
pylab.colorbar()
pylab.grid(False)
pylab.show()
pylab.xlabel('Predicted class')
pylab.ylabel('True class')
pylab.grid(False)
pylab.savefig(
os.path.join(CHART_DIR, "confusion_matrix_%s.png" % name), bbox_inches="tight")
def plot_roc(auc_score, name, tpr, fpr, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.plot([0, 1], [0, 1], 'k--')
pylab.plot(fpr, tpr)
pylab.fill_between(fpr, tpr, alpha=0.5)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('False Positive Rate')
pylab.ylabel('True Positive Rate')
pylab.title('ROC curve (AUC = %0.2f) / %s' %
(auc_score, label), verticalalignment="bottom")
pylab.legend(loc="lower right")
filename = name.replace(" ", "_")
pylab.savefig(
os.path.join(CHART_DIR, "roc_" + filename + ".png"), bbox_inches="tight")
def plot_feat_importance(feature_names, clf, name):
pylab.clf()
coef_ = clf.coef_
important = np.argsort(np.absolute(coef_.ravel()))
f_imp = feature_names[important]
coef = coef_.ravel()[important]
inds = np.argsort(coef)
f_imp = f_imp[inds]
coef = coef[inds]
xpos = np.array(range(len(coef)))
pylab.bar(xpos, coef, width=1)
pylab.title('Feature importance for %s' % (name))
ax = pylab.gca()
ax.set_xticks(np.arange(len(coef)))
labels = ax.set_xticklabels(f_imp)
for label in labels:
label.set_rotation(90)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(
CHART_DIR, "feat_imp_%s.png" % filename), bbox_inches="tight")
def plot_confusion_matrix(cm, plot_title, filename, genres=None):
if not genres:
genres = GENRES
pylab.clf()
pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=100.0)
axes = pylab.axes()
axes.set_xticks(range(len(genres)))
axes.set_xticklabels(genres, rotation=45)
axes.set_yticks(range(len(genres)))
axes.set_yticklabels(genres)
axes.xaxis.set_ticks_position("bottom")
pylab.title(plot_title, fontsize=14)
pylab.colorbar()
pylab.xlabel('Predicted class', fontsize=12)
pylab.ylabel('Correct class', fontsize=12)
pylab.grid(False)
#pylab.show()
pylab.savefig(os.path.join(PLOTS_DIR, "cm_%s.eps" % filename), bbox_inches="tight")
def prec_ets(n_trees, X_train, y_train, X_test, y_test, random_state=None):
"""
ExtraTrees
"""
from sklearn.ensemble import ExtraTreesClassifier
if not issparse(X_train):
X_train = X_train.reshape((X_train.shape[0], -1))
if not issparse(X_test):
X_test = X_test.reshape((X_test.shape[0], -1))
LOGGER.info('start predict: n_trees={},X_train.shape={},y_train.shape={},X_test.shape={},y_test.shape={}'.format(
n_trees, X_train.shape, y_train.shape, X_test.shape, y_test.shape))
clf = ExtraTreesClassifier(n_estimators=n_trees, max_depth=None, n_jobs=-1, verbose=1, random_state=random_state)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
prec = float(np.sum(y_pred == y_test)) / len(y_test)
LOGGER.info('prec_ets{}={:.6f}%'.format(n_trees, prec*100.0))
return clf, y_pred
def prec_rf(n_trees, X_train, y_train, X_test, y_test):
"""
ExtraTrees
"""
from sklearn.ensemble import RandomForestClassifier
if not issparse(X_train):
X_train = X_train.reshape((X_train.shape[0], -1))
if not issparse(X_test):
X_test = X_test.reshape((X_test.shape[0], -1))
LOGGER.info('start predict: n_trees={},X_train.shape={},y_train.shape={},X_test.shape={},y_test.shape={}'.format(
n_trees, X_train.shape, y_train.shape, X_test.shape, y_test.shape))
clf = RandomForestClassifier(n_estimators=n_trees, max_depth=None, n_jobs=-1, verbose=1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
prec = float(np.sum(y_pred == y_test)) / len(y_test)
LOGGER.info('prec_rf{}={:.6f}%'.format(n_trees, prec*100.0))
return clf, y_pred
def prec_xgb(n_trees, max_depth, X_train, y_train, X_test, y_test, learning_rate=0.1):
"""
ExtraTrees
"""
import xgboost as xgb
X_train = X_train.reshape((X_train.shape[0], -1))
X_test = X_test.reshape((X_test.shape[0], -1))
LOGGER.info('start predict: n_trees={},X_train.shape={},y_train.shape={},X_test.shape={},y_test.shape={}'.format(
n_trees, X_train.shape, y_train.shape, X_test.shape, y_test.shape))
clf = xgb.XGBClassifier(n_estimators=n_trees, max_depth=max_depth, objective='multi:softprob',
seed=0, silent=True, nthread=-1, learning_rate=learning_rate)
eval_set = [(X_test, y_test)]
clf.fit(X_train, y_train, eval_set=eval_set, eval_metric="merror")
y_pred = clf.predict(X_test)
prec = float(np.sum(y_pred == y_test)) / len(y_test)
LOGGER.info('prec_xgb_{}={:.6f}%'.format(n_trees, prec*100.0))
return clf, y_pred
def prec_log(X_train, y_train, X_test, y_test):
from sklearn.linear_model import LogisticRegression
if not issparse(X_train):
X_train = X_train.reshape((X_train.shape[0], -1))
if not issparse(X_test):
X_test = X_test.reshape((X_test.shape[0], -1))
LOGGER.info('start predict: X_train.shape={},y_train.shape={},X_test.shape={},y_test.shape={}'.format(
X_train.shape, y_train.shape, X_test.shape, y_test.shape))
X_train = X_train.reshape((X_train.shape[0], -1))
X_test = X_test.reshape((X_test.shape[0], -1))
clf = LogisticRegression(solver='sag', n_jobs=-1, verbose=1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
prec = float(np.sum(y_pred == y_test)) / len(y_test)
LOGGER.info('prec_log={:.6f}%'.format(prec*100.0))
return clf, y_pred
def plot_true_and_augmented_data(sample,noised_sample,label,n_examples):
output_dir = os.path.split(FLAGS.output)[0]
# Save augmented data
plt.clf()
fig, ax = plt.subplots(3,1)
for t in range(noised_sample.shape[1]):
ax[t].plot(noised_sample[:,t])
ax[t].set_xlabel('time (samples)')
ax[t].set_ylabel('amplitude')
ax[0].set_title('window {:03d}, cluster_id: {}'.format(n_examples,label))
plt.savefig(os.path.join(output_dir, "augmented_data",
'augmented_{:03d}.pdf'.format(n_examples)))
plt.close()
# Save true data
plt.clf()
fig, ax = plt.subplots(3,1)
for t in range(sample.shape[1]):
ax[t].plot(sample[:,t])
ax[t].set_xlabel('time (samples)')
ax[t].set_ylabel('amplitude')
ax[0].set_title('window {:03d}, cluster_id: {}'.format(n_examples,label))
plt.savefig(os.path.join(output_dir, "true_data",
'true__{:03d}.pdf'.format(n_examples)))
plt.close()
def save(self, out_path):
'''Saves a figure for the monitor
Args:
out_path: str
'''
plt.clf()
np.set_printoptions(precision=4)
font = {
'size': 7
}
matplotlib.rc('font', **font)
y = 2
x = ((len(self.d) - 1) // y) + 1
fig, axes = plt.subplots(y, x)
fig.set_size_inches(20, 8)
for j, (k, v) in enumerate(self.d.iteritems()):
ax = axes[j // x, j % x]
ax.plot(v, label=k)
if k in self.d_valid.keys():
ax.plot(self.d_valid[k], label=k + '(valid)')
ax.set_title(k)
ax.legend()
plt.tight_layout()
plt.savefig(out_path, facecolor=(1, 1, 1))
plt.close()
def onehist(x,xlabel='',fontsize=12):
"""
Script that plots the histogram of x with the corresponding xlabel.
"""
pylab.clf()
pylab.rcParams.update({'font.size': fontsize})
pylab.hist(x,histtype='stepfilled')
pylab.legend()
#### Change the X-axis appropriately ####
pylab.xlabel(xlabel)
pylab.ylabel('Number')
pylab.draw()
pylab.show()
def threehistsx(x1,x2,x3,x1leg='$x_1$',x2leg='$x_2$',x3leg='$x_3$',fig=1,fontsize=12,bins1=10,bins2=10,bins3=10):
"""
Script that pretty-plots three histograms of quantities x1, x2 and x3.
Arguments:
:param x1,x2,x3: arrays with data to be plotted
:param x1leg, x2leg, x3leg: legends for each histogram
:param fig: which plot window should I use?
Example:
x1=Lbol(AD), x2=Lbol(JD), x3=Lbol(EHF10)
>>> threehists(x1,x2,x3,38,44,'AD','JD','EHF10','$\log L_{\\rm bol}$ (erg s$^{-1}$)')
Inspired by http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label.
"""
pylab.rcParams.update({'font.size': fontsize})
pylab.figure(fig)
pylab.clf()
pylab.subplot(3,1,1)
pylab.hist(x1,label=x1leg,color='b',bins=bins1)
pylab.legend(loc='best',frameon=False)
pylab.subplot(3,1,2)
pylab.hist(x2,label=x2leg,color='r',bins=bins2)
pylab.legend(loc='best',frameon=False)
pylab.subplot(3,1,3)
pylab.hist(x3,label=x3leg,color='y',bins=bins3)
pylab.legend(loc='best',frameon=False)
pylab.minorticks_on()
pylab.subplots_adjust(hspace=0.15)
pylab.draw()
pylab.show()
def plot_forest_all_proba(y_proba_all, y_gt):
from matplotlib import pylab
N = len(y_gt)
num_tree = len(y_proba_all)
pylab.clf()
mat = np.zeros((num_tree, N))
LOGGER.info('mat.shape={}'.format(mat.shape))
for i in range(num_tree):
mat[i,:] = y_proba_all[i][(range(N), y_gt)]
pylab.matshow(mat, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
pylab.grid(False)
pylab.show()
def save_annotated(self, fn, image, enhance=True, dpi=100, clear_fig=True,
label_fmt=None):
"""
Save an image with the transfer function represented as a colorbar.
Parameters
----------
fn : str
The output filename
image : ImageArray
The image to annotate
enhance : bool, optional
Enhance the contrast (default: True)
dpi : int, optional
Dots per inch in the output image (default: 100)
clear_fig : bool, optional
Reset the figure (through pylab.clf()) before drawing. Setting
this to false can allow us to overlay the image onto an
existing figure
label_fmt : str, optional
A format specifier (e.g., label_fmt="%.2g") to use in formatting
the data values that label the transfer function colorbar.
"""
image = image.swapaxes(0,1)
ax = self.show_mpl(image, enhance=enhance, clear_fig=clear_fig)
self.annotate(ax.axes, enhance, label_fmt=label_fmt)
self._pylab.savefig(fn, bbox_inches='tight', facecolor='black', dpi=dpi)
def test_update(self):
debug = False
tracks_tracked = []
# Loop through images
index = 0
while index < len(self.img):
# Index out of bounds guard
index = 0 if index < 0 else index
# Feature tracker update
self.tracker.update(self.img[index], debug)
tracks_tracked.append(len(self.tracker.tracks_tracking))
# Display image
if debug:
cv2.imshow("VO Sequence " + self.data.sequence, self.img[index])
key = cv2.waitKey(0)
if key == ord('q'): # Quit
sys.exit(1)
elif key == ord('p'): # Previous image
index -= 1
else:
index += 1
else:
index += 1
if debug:
import matplotlib.pylab as plt
plt.plot(range(len(tracks_tracked)), tracks_tracked)
plt.show()
plt.clf()
def plot_pr(auc_score, name, phase, precision, recall, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.fill_between(recall, precision, alpha=0.5)
pylab.plot(recall, precision, lw=1)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('Recall')
pylab.ylabel('Precision')
pylab.title('P/R curve (AUC=%0.2f) / %s' % (auc_score, label))
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(CHART_DIR, "pr_%s_%s.png" %
(filename, phase)), bbox_inches="tight")
def show_most_informative_features(vectorizer, clf, n=20):
c_f = sorted(zip(clf.coef_[0], vectorizer.get_feature_names()))
top = zip(c_f[:n], c_f[:-(n + 1):-1])
for (c1, f1), (c2, f2) in top:
print "\t%.4f\t%-15s\t\t%.4f\t%-15s" % (c1, f1, c2, f2)
def plot_log():
pylab.clf()
pylab.figure(num=None, figsize=(6, 5))
x = np.arange(0.001, 1, 0.001)
y = np.log(x)
pylab.title('Relationship between probabilities and their logarithm')
pylab.plot(x, y)
pylab.grid(True)
pylab.xlabel('P')
pylab.ylabel('log(P)')
filename = 'log_probs.png'
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_feat_hist(data_name_list, filename=None):
pylab.clf()
num_rows = 1 + (len(data_name_list) - 1) / 2
num_cols = 1 if len(data_name_list) == 1 else 2
pylab.figure(figsize=(5 * num_cols, 4 * num_rows))
for i in range(num_rows):
for j in range(num_cols):
pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
x, name = data_name_list[i * num_cols + j]
pylab.title(name)
pylab.xlabel('Value')
pylab.ylabel('Density')
# the histogram of the data
max_val = np.max(x)
if max_val <= 1.0:
bins = 50
elif max_val > 50:
bins = 50
else:
bins = max_val
n, bins, patches = pylab.hist(
x, bins=bins, normed=1, facecolor='green', alpha=0.75)
pylab.grid(True)
if not filename:
filename = "feat_hist_%s.png" % name
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def log_false_positives(clf, X, y, name):
with open("FP_" + name.replace(" ", "_") + ".tsv", "w") as f:
false_positive = clf.predict(X) != y
for tweet, false_class in zip(X[false_positive], y[false_positive]):
f.write("%s\t%s\n" %
(false_class, tweet.encode("ascii", "ignore")))
def plot_pr(auc_score, name, precision, recall, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.fill_between(recall, precision, alpha=0.5)
pylab.plot(recall, precision, lw=1)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('Recall')
pylab.ylabel('Precision')
pylab.title('P/R curve (AUC = %0.2f) / %s' % (auc_score, label))
filename = name.replace(" ", "_")
pylab.savefig(
os.path.join(CHART_DIR, "pr_" + filename + ".png"), bbox_inches="tight")
def show_most_informative_features(vectorizer, clf, n=20):
c_f = sorted(zip(clf.coef_[0], vectorizer.get_feature_names()))
top = zip(c_f[:n], c_f[:-(n + 1):-1])
for (c1, f1), (c2, f2) in top:
print("\t%.4f\t%-15s\t\t%.4f\t%-15s" % (c1, f1, c2, f2))
def plot_feat_hist(data_name_list, filename=None):
pylab.clf()
num_rows = 1 + (len(data_name_list) - 1) / 2
num_cols = 1 if len(data_name_list) == 1 else 2
pylab.figure(figsize=(5 * num_cols, 4 * num_rows))
for i in range(num_rows):
for j in range(num_cols):
pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
x, name = data_name_list[i * num_cols + j]
pylab.title(name)
pylab.xlabel('Value')
pylab.ylabel('Density')
# the histogram of the data
max_val = np.max(x)
if max_val <= 1.0:
bins = 50
elif max_val > 50:
bins = 50
else:
bins = max_val
n, bins, patches = pylab.hist(
x, bins=bins, normed=1, facecolor='green', alpha=0.75)
pylab.grid(True)
if not filename:
filename = "feat_hist_%s.png" % name
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_bias_variance(data_sizes, train_errors, test_errors, name):
pylab.clf()
pylab.ylim([0.0, 1.0])
pylab.xlabel('Data set size')
pylab.ylabel('Error')
pylab.title("Bias-Variance for '%s'" % name)
pylab.plot(
data_sizes, train_errors, "-", data_sizes, test_errors, "--", lw=1)
pylab.legend(["train error", "test error"], loc="upper right")
pylab.grid(True)
pylab.savefig(os.path.join(CHART_DIR, "bv_" + name + ".png"))
def plot_forest_all_proba(y_proba_all, y_gt):
from matplotlib import pylab
N = len(y_gt)
num_tree = len(y_proba_all)
pylab.clf()
mat = np.zeros((num_tree, N))
LOGGER.info('mat.shape={}'.format(mat.shape))
for i in range(num_tree):
mat[i,:] = y_proba_all[i][(range(N), y_gt)]
pylab.matshow(mat, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
pylab.grid(False)
pylab.show()
def twohists(x1,x2,xmin,xmax,range=None,x1leg='$x_1$',x2leg='$x_2$',xlabel='',fig=1,sharey=False,fontsize=12,bins1=10,bins2=10):
"""
Script that plots two histograms of quantities x1 and x2
sharing the same X-axis.
:param x1,x2: arrays with data to be plotted
:param xmin,xmax: lower and upper range of plotted values, will be used to set a consistent x-range
for both histograms.
:param x1leg, x2leg: legends for each histogram
:param xlabel: self-explanatory.
:param bins1,bins2: number of bins in each histogram
:param fig: which plot window should I use?
:param range: in the form (xmin,xmax), same as range argument for hist and applied to both
histograms.
Inspired by `Scipy <http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label>`_.
"""
pylab.rcParams.update({'font.size': fontsize})
fig=pylab.figure(fig)
pylab.clf()
a=fig.add_subplot(2,1,1)
if sharey==True:
b=fig.add_subplot(2,1,2, sharex=a, sharey=a)
else:
b=fig.add_subplot(2,1,2, sharex=a)
a.hist(x1,bins1,label=x1leg,color='b',histtype='stepfilled',range=range)
a.legend(loc='best',frameon=False)
a.set_xlim(xmin,xmax)
b.hist(x2,bins2,label=x2leg,color='r',histtype='stepfilled',range=range)
b.legend(loc='best',frameon=False)
pylab.setp(a.get_xticklabels(), visible=False)
b.set_xlabel(xlabel)
b.set_ylabel('Number',verticalalignment='bottom')
pylab.minorticks_on()
pylab.subplots_adjust(hspace=0.15)
pylab.draw()
pylab.show()
def threehists(x1,x2,x3,xmin,xmax,x1leg='$x_1$',x2leg='$x_2$',x3leg='$x_3$',xlabel='',fig=1,sharey=False,fontsize=12):
"""
Script that plots three histograms of quantities x1, x2 and x3
sharing the same X-axis.
Arguments:
- x1,x2,x3: arrays with data to be plotted
- xmin,xmax: lower and upper range of plotted values, will be used to set a consistent x-range for both histograms.
- x1leg, x2leg, x3leg: legends for each histogram
- xlabel: self-explanatory.
- sharey: sharing the Y-axis among the histograms?
- fig: which plot window should I use?
Example:
x1=Lbol(AD), x2=Lbol(JD), x3=Lbol(EHF10)
>>> threehists(x1,x2,x3,38,44,'AD','JD','EHF10','$\log L_{\\rm bol}$ (erg s$^{-1}$)',sharey=True)
Inspired by `Scipy <http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label>`_.
"""
pylab.rcParams.update({'font.size': fontsize})
fig=pylab.figure(fig)
pylab.clf()
a=fig.add_subplot(3,1,1)
if sharey==True:
b=fig.add_subplot(3,1,2, sharex=a, sharey=a)
c=fig.add_subplot(3,1,3, sharex=a, sharey=a)
else:
b=fig.add_subplot(3,1,2, sharex=a)
c=fig.add_subplot(3,1,3, sharex=a)
a.hist(x1,label=x1leg,color='b',histtype='stepfilled')
a.legend(loc='best',frameon=False)
a.set_xlim(xmin,xmax)
b.hist(x2,label=x2leg,color='r',histtype='stepfilled')
b.legend(loc='best',frameon=False)
c.hist(x3,label=x3leg,color='y',histtype='stepfilled')
c.legend(loc='best',frameon=False)
pylab.setp(a.get_xticklabels(), visible=False)
pylab.setp(b.get_xticklabels(), visible=False)
c.set_xlabel(xlabel)
b.set_ylabel('Number')
pylab.minorticks_on()
pylab.subplots_adjust(hspace=0.15)
pylab.draw()
pylab.show()
def fourcumplot(x1,x2,x3,x4,xmin,xmax,x1leg='$x_1$',x2leg='$x_2$',x3leg='$x_3$',x4leg='$x_3$',xlabel='',ylabel='$N(x>x\')$',fig=1,sharey=False,fontsize=12,bins1=50,bins2=50,bins3=50,bins4=50):
"""
Script that plots the cumulative histograms of four variables x1, x2, x3 and x4
sharing the same X-axis. For each bin, Y is the fraction of the sample
with values above X.
Arguments:
- x1,x2,x3,x4: arrays with data to be plotted
- xmin,xmax: lower and upper range of plotted values, will be used to set a consistent x-range
for both histograms.
- x1leg, x2leg, x3leg, x4leg: legends for each histogram
- xlabel: self-explanatory.
- sharey: sharing the Y-axis among the histograms?
- bins1,bins2,...: number of bins in each histogram
- fig: which plot window should I use?
Inspired by `Scipy <http://www.scipy.org/Cookbook/Matplotlib/Multiple_Subplots_with_One_Axis_Label>`_.
v1 Jun. 2012: inherited from fourhists.
"""
pylab.rcParams.update({'font.size': fontsize})
fig=pylab.figure(fig)
pylab.clf()
a=fig.add_subplot(4,1,1)
if sharey==True:
b=fig.add_subplot(4,1,2, sharex=a, sharey=a)
c=fig.add_subplot(4,1,3, sharex=a, sharey=a)
d=fig.add_subplot(4,1,4, sharex=a, sharey=a)
else:
b=fig.add_subplot(4,1,2, sharex=a)
c=fig.add_subplot(4,1,3, sharex=a)
d=fig.add_subplot(4,1,4, sharex=a)
a.hist(x1,bins1,label=x1leg,color='b',cumulative=-True,normed=True,histtype='stepfilled')
a.legend(loc='best',frameon=False)
a.set_xlim(xmin,xmax)
b.hist(x2,bins2,label=x2leg,color='r',cumulative=-True,normed=True,histtype='stepfilled')
b.legend(loc='best',frameon=False)
c.hist(x3,bins3,label=x3leg,color='y',cumulative=-True,normed=True,histtype='stepfilled')
c.legend(loc='best',frameon=False)
d.hist(x4,bins4,label=x4leg,color='g',cumulative=-True,normed=True,histtype='stepfilled')
d.legend(loc='best',frameon=False)
pylab.setp(a.get_xticklabels(), visible=False)
pylab.setp(b.get_xticklabels(), visible=False)
pylab.setp(c.get_xticklabels(), visible=False)
d.set_xlabel(xlabel)
c.set_ylabel(ylabel)
pylab.minorticks_on()
pylab.subplots_adjust(hspace=0.15)
pylab.draw()
pylab.show()
def plotPolicy(self, policy, prefix):
plt.clf()
for idx in xrange(len(policy)):
i, j = self.env.getStateXY(idx)
dx = 0
dy = 0
if policy[idx] == 0: # up
dy = 0.35
elif policy[idx] == 1: #right
dx = 0.35
elif policy[idx] == 2: #down
dy = -0.35
elif policy[idx] == 3: #left
dx = -0.35
elif self.matrixMDP[i][j] != -1 and policy[idx] == 4: # termination
circle = plt.Circle(
(j + 0.5, self.numRows - i + 0.5 - 1), 0.025, color='k')
plt.gca().add_artist(circle)
if self.matrixMDP[i][j] != -1:
plt.arrow(j + 0.5, self.numRows - i + 0.5 - 1, dx, dy,
head_width=0.05, head_length=0.05, fc='k', ec='k')
else:
plt.gca().add_patch(
patches.Rectangle(
(j, self.numRows - i - 1), # (x,y)
1.0, # width
1.0, # height
facecolor = "gray"
)
)
plt.xlim([0, self.numCols])
plt.ylim([0, self.numRows])
for i in xrange(self.numCols):
plt.axvline(i, color='k', linestyle=':')
plt.axvline(self.numCols, color='k', linestyle=':')
for j in xrange(self.numRows):
plt.axhline(j, color='k', linestyle=':')
plt.axhline(self.numRows, color='k', linestyle=':')
plt.savefig(self.outputPath + prefix + 'policy.png')
plt.close()
def plot_mi_demo():
np.random.seed(0) # to reproduce the data later on
pylab.clf()
pylab.figure(num=None, figsize=(8, 8))
x = np.arange(0, 10, 0.2)
pylab.subplot(221)
y = 0.5 * x + norm.rvs(1, scale=.01, size=len(x))
_plot_mi_func(x, y)
pylab.subplot(222)
y = 0.5 * x + norm.rvs(1, scale=.1, size=len(x))
_plot_mi_func(x, y)
pylab.subplot(223)
y = 0.5 * x + norm.rvs(1, scale=1, size=len(x))
_plot_mi_func(x, y)
pylab.subplot(224)
y = norm.rvs(1, scale=10, size=len(x))
_plot_mi_func(x, y)
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "mi_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
pylab.clf()
pylab.figure(num=None, figsize=(8, 8))
x = np.arange(-5, 5, 0.2)
pylab.subplot(221)
y = 0.5 * x ** 2 + norm.rvs(1, scale=.01, size=len(x))
_plot_mi_func(x, y)
pylab.subplot(222)
y = 0.5 * x ** 2 + norm.rvs(1, scale=.1, size=len(x))
_plot_mi_func(x, y)
pylab.subplot(223)
y = 0.5 * x ** 2 + norm.rvs(1, scale=1, size=len(x))
_plot_mi_func(x, y)
pylab.subplot(224)
y = 0.5 * x ** 2 + norm.rvs(1, scale=10, size=len(x))
_plot_mi_func(x, y)
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "mi_demo_2.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_correlation_demo():
np.random.seed(0) # to reproduce the data later on
pylab.clf()
pylab.figure(num=None, figsize=(8, 8))
x = np.arange(0, 10, 0.2)
pylab.subplot(221)
y = 0.5 * x + norm.rvs(1, scale=.01, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(222)
y = 0.5 * x + norm.rvs(1, scale=.1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(223)
y = 0.5 * x + norm.rvs(1, scale=1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(224)
y = norm.rvs(1, scale=10, size=len(x))
_plot_correlation_func(x, y)
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "corr_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
pylab.clf()
pylab.figure(num=None, figsize=(8, 8))
x = np.arange(-5, 5, 0.2)
pylab.subplot(221)
y = 0.5 * x ** 2 + norm.rvs(1, scale=.01, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(222)
y = 0.5 * x ** 2 + norm.rvs(1, scale=.1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(223)
y = 0.5 * x ** 2 + norm.rvs(1, scale=1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(224)
y = 0.5 * x ** 2 + norm.rvs(1, scale=10, size=len(x))
_plot_correlation_func(x, y)
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "corr_demo_2.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_simple_demo_1():
pylab.clf()
fig = pylab.figure(num=None, figsize=(10, 4))
pylab.subplot(121)
title = "Original feature space"
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
x1 = np.arange(0, 10, .2)
x2 = x1 + np.random.normal(scale=1, size=len(x1))
good = (x1 > 5) | (x2 > 5)
bad = ~good
x1g = x1[good]
x2g = x2[good]
pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue")
x1b = x1[bad]
x2b = x2[bad]
pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white")
pylab.grid(True)
pylab.subplot(122)
X = np.c_[(x1, x2)]
pca = decomposition.PCA(n_components=1)
Xtrans = pca.fit_transform(X)
Xg = Xtrans[good]
Xb = Xtrans[bad]
pylab.scatter(
Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue")
pylab.scatter(
Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white")
title = "Transformed feature space"
pylab.title(title)
pylab.xlabel("$X'$")
fig.axes[1].get_yaxis().set_visible(False)
print(pca.explained_variance_ratio_)
pylab.grid(True)
pylab.autoscale(tight=True)
filename = "pca_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_simple_demo_2():
pylab.clf()
fig = pylab.figure(num=None, figsize=(10, 4))
pylab.subplot(121)
title = "Original feature space"
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
x1 = np.arange(0, 10, .2)
x2 = x1 + np.random.normal(scale=1, size=len(x1))
good = x1 > x2
bad = ~good
x1g = x1[good]
x2g = x2[good]
pylab.scatter(x1g, x2g, edgecolor="blue", facecolor="blue")
x1b = x1[bad]
x2b = x2[bad]
pylab.scatter(x1b, x2b, edgecolor="red", facecolor="white")
pylab.grid(True)
pylab.subplot(122)
X = np.c_[(x1, x2)]
pca = decomposition.PCA(n_components=1)
Xtrans = pca.fit_transform(X)
Xg = Xtrans[good]
Xb = Xtrans[bad]
pylab.scatter(
Xg[:, 0], np.zeros(len(Xg)), edgecolor="blue", facecolor="blue")
pylab.scatter(
Xb[:, 0], np.zeros(len(Xb)), edgecolor="red", facecolor="white")
title = "Transformed feature space"
pylab.title(title)
pylab.xlabel("$X'$")
fig.axes[1].get_yaxis().set_visible(False)
print(pca.explained_variance_ratio_)
pylab.grid(True)
pylab.autoscale(tight=True)
filename = "pca_demo_2.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")