我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用matplotlib.pyplot.tight_layout()。
def visualizeLayer(model, img, input_image, layerIndex): layer = model.layers[layerIndex] get_activations = K.function([model.layers[0].input, K.learning_phase()], [layer.output,]) activations = get_activations([input_image, 0])[0] output_image = activations ## If 4 dimensional then take the last dimension value as it would be no of filters if output_image.ndim == 4: # Rearrange dimension so we can plot the result o1 = np.rollaxis(output_image, 3, 1) output_image = np.rollaxis(o1, 3, 1) print "Dumping filter data of layer{} - {}".format(layerIndex,layer.__class__.__name__) filters = len(output_image[0,0,0,:]) fig=plt.figure(figsize=(8,8)) # This loop will plot the 32 filter data for the input image for i in range(filters): ax = fig.add_subplot(6, 6, i+1) #ax.imshow(output_image[img,:,:,i],interpolation='none' ) #to see the first filter ax.imshow(output_image[0,:,:,i],'gray') #ax.set_title("Feature map of layer#{} \ncalled '{}' \nof type {} ".format(layerIndex, # layer.name,layer.__class__.__name__)) plt.xticks(np.array([])) plt.yticks(np.array([])) plt.tight_layout() #plt.show() fig.savefig("img_" + str(img) + "_layer" + str(layerIndex)+"_"+layer.__class__.__name__+".png") #plt.close(fig) else: print "Can't dump data of this layer{}- {}".format(layerIndex, layer.__class__.__name__)
def get_feature_importance(list_of_features): n_estimators=10000 random_state=0 n_jobs=4 x_train=data_frame[list_of_features] y_train=data_frame.iloc[:,-1] feat_labels= data_frame.columns[1:] forest = BaggingRegressor(n_estimators=n_estimators,random_state=random_state,n_jobs=n_jobs) forest.fit(x_train,y_train) importances=forest.feature_importances_ indices = np.argsort(importances)[::-1] for f in range(x_train.shape[1]): print("%2d) %-*s %f" % (f+1,30,feat_labels[indices[f]], importances[indices[f]])) plt.title("Feature Importance") plt.bar(range(x_train.shape[1]),importances[indices],color='lightblue',align='center') plt.xticks(range(x_train.shape[1]),feat_labels[indices],rotation=90) plt.xlim([-1,x_train.shape[1]]) plt.tight_layout() plt.show()
def different_training_sets(): # base+author -> +paraphrasing -> +ifttt -> +generated train = [84.7, 93.2, 90.4, 91.99] test = [3.6, 37.4, 50.94, 55.4] train_recall = [66.6, 88.43, 92.63, 91.21] test_recall = [0.066, 49.05, 50.94, 75.47] #plt.newfigure() X = 1 + np.arange(4) plt.plot(X, train_recall, '--', color='#85c1e5') plt.plot(X, train, '-x', color='#6182a6') plt.plot(X, test_recall, '-o', color='#6182a6') plt.plot(X, test, '-', color='#052548') plt.ylim(0, 100) plt.xlim(0.5, 4.5) plt.xticks(X, ["Base + Author", "+ Paraphrasing", "+ IFTTT", "+ Generated"]) plt.tight_layout() plt.legend(["Train recall", "Train accuracy", "Test recall", "Test accuracy"], loc='lower right') plt.savefig('./figures/training-sets.pdf')
def plot_line_graph_multiple_lines(x, label_to_values, title, x_label, y_label): if not all(len(x) == len(values) for values in label_to_values.values()): raise ValueError('values of label_to_values must have length len(x)') colors = ['b','g','r','c','m','y','k'] line_styles = ['-','--',':'] for (i, label) in enumerate(sorted(label_to_values.keys())): color = colors[i%len(colors)] line_style = line_styles[(i//len(colors))%len(line_styles)] plt.plot(x, label_to_values[label], label=label, color=color, linestyle=line_style) plt.legend(loc='center left', bbox_to_anchor=(1,0.5), prop={'size':9}) plt.tight_layout(pad=9) plt.title(title) plt.xlabel(x_label) plt.ylabel(y_label) plt.show() # x_min, x_max for example proportion_initiated_by_user
def make_python_fig(self, code: str, exts: Tuple[str, ...]=('pdf', 'svg'), tight_layout=True) -> str: hashsum = hashlib.md5(code.encode('utf8')).hexdigest() prefix = hashsum[:2] path = os.path.join(self.figures_dir, prefix, hashsum) needfigure = False for ext in exts: if not os.path.isfile(os.path.join( path, self.default_figname + "." + ext)): needfigure = True break if needfigure: make_sure_path_exists(path) gl = self.pythonfigure_globals plt.close() exec(code, gl) if tight_layout: plt.tight_layout() for ext in exts: plt.savefig(os.path.join( path, self.default_figname + "." + ext)) return os.path.join(prefix, hashsum)
def plot_events_with_event_scores(gt_event_scores, detected_event_scores, ground_truth_events, detected_events, show=True): fig = plt.figure(figsize=(10, 3)) for i in range(len(detected_events)): d = detected_events[i] plt.axvspan(d[0], d[1], 0, 0.5) plt.text((d[1] + d[0]) / 2, 0.2, detected_event_scores[i], horizontalalignment='center', verticalalignment='center') for i in range(len(ground_truth_events)): gt = ground_truth_events[i] plt.axvspan(gt[0], gt[1], 0.5, 1) plt.text((gt[1] + gt[0]) / 2, 0.8, gt_event_scores[i], horizontalalignment='center', verticalalignment='center') plt.tight_layout() if show: plt.show() else: plt.draw()
def plot_hist(baseline_samples, target_samples, true_x, true_y): baseline_samples = baseline_samples.squeeze() target_samples = target_samples.squeeze() bmin, bmax = baseline_samples.min(), baseline_samples.max() ax = sns.kdeplot(baseline_samples, shade=True, color=(0.6, 0.1, 0.1, 0.2)) ax = sns.kdeplot(target_samples, shade=True, color=(0.1, 0.1, 0.6, 0.2)) ax.set_xlim(bmin, bmax) y0, y1 = ax.get_ylim() plt.plot([true_y, true_y], [0, y1 - (y1 - y0) * 0.01], linewidth=1, color='r') plt.title('Predictive' + (f' at {true_x:.2f}' if true_x is not None else '')) fig = plt.gcf() fig.set_size_inches(9, 9) # plt.tight_layout() # pad=0.4, w_pad=0.5, h_pad=1.0) name = utils.DATA_DIR.replace('/', '-') # plt.tight_layout(pad=0.6) utils.save_fig('predictive-at-point-' + name)
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Greys, block=True): # Colormaps: jet, Greys cm_normalized = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis] plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap) # Show confidences for i, cas in enumerate(cm): for j, c in enumerate(cas): if c > 0: plt.text(j-0.1, i+0.2, c, fontsize=16, fontweight='bold', color='#b70000') f = plt.figure(1) f.clf() plt.title(title) plt.colorbar() tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show(block=block)
def plot_confusion_matrix(cm, clf_target_names, title='Confusion matrix', cmap=plt.cm.jet): target_names = map(lambda key: key.replace('_','-'), clf_target_names) for idx in range(len(cm)): cm[idx,:] = (cm[idx,:] * 100.0 / np.sum(cm[idx,:])).astype(np.int) plt.imshow(cm, interpolation='nearest', cmap=cmap) # plt.matshow(cm) plt.title(title) plt.colorbar() tick_marks = np.arange(len(clf_target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) # plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label')
def plotGeneratedImages(epoch,example=100,dim=(10,10),figsize=(10,10)): noise = np.random.normal(0,1,size=(example,randomDim)) generatedImage = generator.predict(noise) generatedImage = generatedImage.reshape(example,28,28) plt.figure(figsize=figsize) for i in range(example): plt.subplot(dim[0],dim[1],i+1) plt.imshow(generatedImage[i],interpolation='nearest',cmap='gray') '''drop the x and y axis''' plt.axis('off') plt.tight_layout() if not os.path.exists('generated_image'): os.mkdir('generated_image') plt.savefig('generated_image/wgan_generated_img_epoch_%d.png' % epoch)
def __init__(self, model, num_nodes, ax): """Initialization of visualization.""" # line1 is the snake # line2 is the sample points # lines are the external forces on each snake-node self._line1 = ax.plot([], [], '-ow')[0] if PLOT_MORE: self._line2 = ax.plot([], [], 'x')[0] self._lines = [ ax.plot([], [], 'w')[0] for _ in range(num_nodes) ] ax.set_xlim([0, 1]) ax.set_ylim([0, 1]) ax.set_xlabel(model.x_label, fontsize=16) ax.set_ylabel(model.y_label, fontsize=16) ax.set_xticklabels(model.x_lim, fontsize=16) ax.set_yticklabels(model.y_lim, fontsize=16) ax.set_xticks([0, 1]) ax.set_yticks([0, 1]) ax.set_aspect('equal') plt.tight_layout()
def plot_feature_importances(feature_names, feature_importances, N=30): importances = list(zip(feature_names, list(feature_importances))) importances = pd.DataFrame(importances, columns=["Feature", "Importance"]) importances = importances.set_index("Feature") # Sort by the absolute value of the importance of the feature importances["sort"] = abs(importances["Importance"]) importances = importances.sort(columns="sort", ascending=False).drop("sort", axis=1) importances = importances[0:N] # Show the most important positive feature at the top of the graph importances = importances.sort(columns="Importance", ascending=True) with plt.style.context(('ggplot')): fig, ax = plt.subplots(figsize=(16,12)) ax.tick_params(labelsize=16) importances.plot(kind="barh", legend=False, ax=ax) ax.set_frame_on(False) ax.set_xlabel("Relative importance", fontsize=20) ax.set_ylabel("Feature name", fontsize=20) plt.tight_layout() plt.title("Most important features for attack", fontsize=20).set_position([.5, 0.99]) return fig
def mfi(df): df['date'] = pd.to_datetime(df.date) fig = plt.figure(figsize=(16, 9)) gs = GridSpec(3, 1) # 2 rows, 3 columns fig.suptitle(df['date'][-1:].values[0]) fig.set_label('MFI') price = fig.add_subplot(gs[:2, 0]) price.plot(df['date'], df['close'], color='blue') indicator = fig.add_subplot(gs[2, 0], sharex=price) indicator.plot(df['date'], df['mfi'], c='pink') indicator.plot(df['date'], [20.]*len(df['date']), c='green') indicator.plot(df['date'], [80.]*len(df['date']), c='orange') price.grid(True) indicator.grid(True) plt.tight_layout() plt.show()
def atr(df): ''' Average True Range :param df: :return: ''' df['date'] = pd.to_datetime(df.date) fig = plt.figure(figsize=(16, 9)) gs = GridSpec(3, 1) # 2 rows, 3 columns fig.suptitle(df['date'][-1:].values[0]) fig.set_label('ATR') price = fig.add_subplot(gs[:2, 0]) price.plot(df['date'], df['close'], color='blue') indicator = fig.add_subplot(gs[2, 0], sharex=price) indicator.plot(df['date'], df['atr'], c='pink') # indicator.plot(df['date'], [20.]*len(df['date']), c='green') # indicator.plot(df['date'], [80.]*len(df['date']), c='orange') price.grid(True) indicator.grid(True) plt.tight_layout() plt.show()
def rocr(df): ''' Average True Range :param df: :return: ''' df['date'] = pd.to_datetime(df.date) fig = plt.figure(figsize=(16, 9)) gs = GridSpec(3, 1) # 2 rows, 3 columns fig.suptitle(df['date'][-1:].values[0]) fig.set_label('ATR') price = fig.add_subplot(gs[:2, 0]) price.plot(df['date'], df['close'], color='blue') indicator = fig.add_subplot(gs[2, 0], sharex=price) indicator.plot(df['date'], df['rocr'], c='pink') # indicator.plot(df['date'], [20.]*len(df['date']), c='green') # indicator.plot(df['date'], [80.]*len(df['date']), c='orange') price.grid(True) indicator.grid(True) plt.tight_layout() plt.show()
def pieGraph(data_count): """ Graph's a pie graph of the data with count values; Only includes data that appears more than once! Parameter: -data_count: dict """ names, count = [], [] for val, key in data_count.items(): if key > 1: names.append(val) count.append(key) fig1, ax1 = plt.subplots() ax1.pie(count, labels=names, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. # plt.tight_layout() plt.show()
def pie_graph(data_count): """ Graph's a pie graph of the data with count values (only shows schools that appear more than once) Parameter: -data_count: dict """ names, count = [], [] for val, key in data_count.items(): if key > 1: names.append(val) count.append(key) fig1, ax1 = plt.subplots() ax1.pie(count, labels=names, autopct='%1.1f%%', shadow=True, startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. # plt.tight_layout() plt.show()
def plot_confusion_matrix(cm, col, title, cmap=plt.cm.viridis): plt.imshow(cm, interpolation='nearest', cmap=cmap) for i in range(cm.shape[0]): plt.annotate("%.2f" %cm[i][i],xy=(i,i), horizontalalignment='center', verticalalignment='center') plt.title(title,fontsize=18) plt.colorbar(fraction=0.046, pad=0.04) tick_marks = np.arange(len(col.unique())) plt.xticks(tick_marks, sorted(col.unique()),rotation=90) plt.yticks(tick_marks, sorted(col.unique())) plt.tight_layout() plt.ylabel('True label',fontsize=18) plt.xlabel('Predicted label',fontsize=18) #using flavor network to project recipes from ingredient matrix to flavor matrix
def plot_correlation_fig(data): """ Creates a correlation heat map for all columns in user data. Parameters ---------- data: Pandas DataFrame User data file as a Pandas DataFrame Returns ------- Matplotlib Figure object. """ sns.set(context='talk', style='white') fig = plt.figure() sns.heatmap(data.corr(), vmin=-1, vmax=1) plt.tight_layout() return fig
def plot_spatial_cluster_fig(data, covar_type_tied_labels_k): """ Creates a 3x2 plot spatial plot using labels as the color """ sns.set(context='talk', style='white') data.columns = [c.lower() for c in data.columns] fig = plt.figure() placement = {'full': {True: 1, False: 4}, 'diag': {True: 2, False: 5}, 'spher': {True: 3, False: 6}} lim_left = data['longitude'].min() lim_right = data['longitude'].max() lim_bottom = data['latitude'].min() lim_top = data['latitude'].max() for covar_type, covar_tied, labels, k in covar_type_tied_labels_k: plt.subplot(2, 3, placement[covar_type][covar_tied]) plt.scatter(data['longitude'], data['latitude'], c=labels, cmap=plt.cm.rainbow, s=10) plt.xlim(left=lim_left, right=lim_right) plt.ylim(bottom=lim_bottom, top=lim_top) plt.xticks([]) plt.yticks([]) plt.xlabel('Longitude') plt.ylabel('Latitude') plt.title('{}-{}, K={}'.format(covar_type.capitalize(), ['Untied', 'Tied'][covar_tied], k)) plt.tight_layout() return fig
def plot_hist(arr, title): ''' Function to plot a histogram of scores for employers INPUT: arr: Array-like, scores title: String, title for plot OUTPUT: Histogram plot (saved in directory) ''' fig = plt.figure(figsize=(6, 4)) ax = fig.add_subplot(111) ax.set_title(title, fontsize=14) ax.set_xlabel('Overall Score', fontsize=10) ax.set_ylabel('Observations', fontsize=10) ax.hist(arr, bins=(len(arr) / 180)) plt.tight_layout() plt.savefig('images/{}.png'.format(title.replace(' ', '_').lower())) return
def plot_segmented_hist(arr_middle, arr_tails): ''' Function to plot a histogram of scores, color coded tails to visualize sections of employers to be analyzed INPUT: arr_middle: Array-like, scores of employers not being analyzed (middle 90%) arr_tails: Array-like, scores of employers to be analyzed (>95%, <5%) OUTPUT: Histogram plot (saved in directory) ''' fig = plt.figure(figsize=(6, 4)) ax = fig.add_subplot(111) ax.set_title('Employers with Significant Scores', fontsize=14) ax.set_xlabel('Overall Score', fontsize=10) ax.set_ylabel('Observations', fontsize=10) ax.hist(arr_middle, bins=34, label='Middle 90%') ax.hist(arr_tails, bins=34, label='Outer 5% Tails') plt.legend(loc='best', fontsize=10) plt.tight_layout() plt.savefig('images/sig_scores.png')
def plot_grid(images,w=10,path="plan.png",verbose=False): import matplotlib.pyplot as plt l = 0 images = fix_images(images) l = len(images) h = int(math.ceil(l/w)) plt.figure(figsize=(w*1.5, h*1.5)) for i,image in enumerate(images): ax = plt.subplot(h,w,i+1) try: plt.imshow(image,interpolation='nearest',cmap='gray',) except TypeError: TypeError("Invalid dimensions for image data: image={}".format(np.array(image).shape)) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) print(path) if verbose else None plt.tight_layout() plt.savefig(path) plt.close() # contiguous image
def plot_grid2(images,w=10,path="plan.png",verbose=False): import matplotlib.pyplot as plt images = fix_images(images) l = images.shape[0] h = int(math.ceil(l/w)) margin = 3 m_shape = (margin + np.array(images.shape[1:])) all_shape = m_shape * np.array((h,w)) figure = np.ones(all_shape) print(images.shape,h,w,m_shape,figure.shape) if verbose else None for y in range(h): for x in range(w): begin = m_shape * np.array((y,x)) end = (m_shape * (np.array((y,x))+1)) - margin # print(begin,end,y*w+x) if y*w+x < len(images): figure[begin[0]:end[0],begin[1]:end[1]] = images[y*w+x] plt.figure(figsize=all_shape[::-1] * 0.01) plt.imshow(figure,interpolation='nearest',cmap='gray',) print(path) if verbose else None plt.tight_layout() plt.savefig(path)
def display_classes(png, images, classes, ncol=4): """ Draw a number of images and their predictions Example: images = data[1][:12] classes = model.predict_classes('classes.png', images) """ fig = plt.figure() nrow = len(images) / ncol if len(images) % ncol > 0: nrow = nrow + 1 def draw(i): plt.subplot(nrow,ncol,i) plt.imshow(images[i].reshape(28,28), cmap='gray', interpolation='none') plt.title('Predicted: %s' % classes[i]) [ draw(i) for i in range(0,len(images)) ] plt.tight_layout() plt.savefig(png)
def plot_entropy_distribution(): fig = plt.figure() ax = fig.add_subplot(111) entropy = read_pickle('output/normalized_entropy.obj') hist, bin_edges = np.histogram(entropy, bins=10000) print hist, bin_edges #ax.set_yscale('log') #ax.set_xscale('log') ax.plot(bin_edges[:-1], hist, marker='o', markersize=3, markeredgecolor='none', color='#D65F5F') #ax.set_ylim([10**0, 10**6]) #ax.set_xlim([10**0, 10**6]) ax.set_xlabel('Entropy') ax.set_ylabel('Frequency') fig.tight_layout() fig.savefig( 'output/normalized_entropy_distribution.pdf', bbox_inches='tight')
def plot_entropy_hist(): fig = plt.figure() ax = fig.add_subplot(111) entropy = read_pickle('output/normalized_entropy.obj') number_of_zeros = [1 if item is 0 else 0 for item in entropy] print len(number_of_zeros) print sum(number_of_zeros) n, bins, patches = ax.hist(entropy, 50) ax.plot(bins, ) #ax.set_ylim([-1,1]) ax.set_xlim([0,1]) ax.set_yscale('log') ax.set_xlabel('Normalized entropy') ax.set_ylabel('Frequency (log)') fig.tight_layout() fig.savefig( 'output/normalized_entropy_hist.pdf', bbox_inches='tight')
def visualize_document_topic_probs(self, outfile): plots = [] height_cumulative = numpy.zeros(self.rows) #fig = pyplot.figure(figsize=(21, 10), dpi=550) for column in range(self.columns): color = pyplot.cm.coolwarm(column/self.columns, 1) if column == 0: p = pyplot.bar(self.ind, self.document_topics_raw[:, column], self.barwidth, color=color) else: p = pyplot.bar(self.ind, self.document_topics_raw[:, column], self.barwidth, bottom=height_cumulative, color=color) height_cumulative += self.document_topics_raw[:, column] plots.append(p) pyplot.ylim((0, 1)) pyplot.ylabel('Topics') pyplot.title('Topic distribution of CLS papers') pyplot.xticks(self.ind+self.barwidth/2, self.document_names, rotation='vertical', size = 10) pyplot.yticks(numpy.arange(0, 1, 10)) pyplot.legend([p[0] for p in plots], self.topic_labels, bbox_to_anchor=(1, 1)) self.fig.tight_layout() pyplot.savefig(outfile)
def visualize_words_bytopic(self,outfile,num_words=25): topics = self.lda.show_topics(num_topics=self.columns,num_words=10000) word_topic = [] probs = [] for t in range(self.columns): word_topic_str = topics[t][1] prob_word = word_topic_str.split(' + ') word_prob = [(x.split('*')[1], float(x.split('*')[0])) for x in prob_word] word_prob_sorted = sorted(word_prob, key = lambda k : k[1], reverse=True) word_prob_pruned = word_prob_sorted[:num_words] probs.extend([x[1] for x in word_prob_pruned]) word_topic.append(word_prob_pruned) fontsize_base = 100 / numpy.max(probs) # font size for word with largest share in corpus for t in range(self.columns): self.visualize_topic_words(t,word_topic[t],fontsize_base) self.fig.tight_layout() pyplot.savefig(outfile)
def pd_show_with_entities(self, x_label, y_label, title_msg, ax, fig, plt, legend_list=[], show_plot=True, debug=False): plt.xlabel(x_label) plt.ylabel(y_label) ax.set_title(title_msg) if len(legend_list) == 0: ax.legend(loc="best", prop={"size":"medium"}) else: ax.legend(legend_list, loc="best", prop={"size": "medium"}) self.pd_add_footnote(fig) plt.tight_layout() if show_plot: plt.show() else: plt.plot() # end of pd_show_with_entities
def tsplot(y, lags=None, figsize=(10, 8), style='bmh'): if not isinstance(y, pd.Series): y = pd.Series(y) with plt.style.context(style): fig = plt.figure(figsize=figsize) # mpl.rcParams['font.family'] = 'Ubuntu Mono' layout = (3, 2) ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2) acf_ax = plt.subplot2grid(layout, (1, 0)) pacf_ax = plt.subplot2grid(layout, (1, 1)) qq_ax = plt.subplot2grid(layout, (2, 0)) pp_ax = plt.subplot2grid(layout, (2, 1)) y.plot(ax=ts_ax) ts_ax.set_title('Time Series Analysis Plots') smt.graphics.plot_acf(y, lags=lags, ax=acf_ax, alpha=0.5) smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax, alpha=0.5) sm.qqplot(y, line='s', ax=qq_ax) qq_ax.set_title('QQ Plot') scs.probplot(y, sparams=(y.mean(), y.std()), plot=pp_ax) plt.tight_layout() return
def update(self, conf_mat, classes, normalize=False): """This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.imshow(conf_mat, interpolation='nearest', cmap=self.cmap) plt.title(self.title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) if normalize: conf_mat = conf_mat.astype('float') / conf_mat.sum(axis=1)[:, np.newaxis] thresh = conf_mat.max() / 2. for i, j in itertools.product(range(conf_mat.shape[0]), range(conf_mat.shape[1])): plt.text(j, i, conf_mat[i, j], horizontalalignment="center", color="white" if conf_mat[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.draw()
def plot_normalized_confusion_matrix_at_depth(self): """ Returns a normalized confusion matrix. :returns: normalized confusion matrix :rtype: matplotlib figure """ cm = metrics.confusion_matrix(self.predictions['label'], self.y_pred) np.set_printoptions(precision = 2) fig = plt.figure() cm_normalized = cm.astype('float') / cm.sum(axis = 1)[:, np.newaxis] plt.imshow(cm_normalized, interpolation = 'nearest', cmap = plt.cm.Blues) plt.title("Normalized Confusion Matrix") plt.colorbar() tick_marks = np.arange(len(self.labels)) plt.xticks(tick_marks, self.labels, rotation = 45) plt.yticks(tick_marks, self.labels) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') return(fig)
def plotSeasonBreakdown(data, trend, seasonal, residual, decompfreq, saveFigName=None): """ plots each on own subplot """ ax1 = plt.subplot(411) # print(data) plt.plot(data)#, label='Original') ax1.set_title('original') plt.legend(loc='best') ax2 = plt.subplot(412) plt.plot(trend)#, label='Trend') ax2.set_title('trend') plt.legend(loc='best') ax3 = plt.subplot(413) plt.plot(seasonal,label=str(decompfreq)) ax3.set_title('seasonality') plt.legend(loc='best') ax4 = plt.subplot(414) plt.plot(residual)#, label='Residuals') ax4.set_title('residuals') plt.legend(loc='best') plt.tight_layout() if (saveFigName == None): plt.show() else: plt.savefig(str(saveFigName))
def visualize(config, vae): if(config['n_z'] != 2): print("Skipping visuals since n_z is not 2") return nx = ny = 20 x_values = np.linspace(-3, 3, nx) y_values = np.linspace(-3, 3, ny) canvas = np.empty((28*ny, 28*nx)) for i, yi in enumerate(x_values): for j, xi in enumerate(y_values): z_mu = np.array([[xi, yi]]) x_mean = vae.generate(np.tile(z_mu, [config['batch_size'], 1])) canvas[(nx-i-1)*28:(nx-i)*28, j*28:(j+1)*28] = x_mean[0].reshape(28, 28) plt.figure(figsize=(8, 10)) Xi, Yi = np.meshgrid(x_values, y_values) plt.imshow(canvas, origin="upper") plt.tight_layout() img = "samples/2d-visualization.png" plt.savefig(img) hc.io.sample(config, [{"label": "2d visualization", "image": img}])
def sample(config, vae): x_sample = mnist.test.next_batch(100)[0] x_reconstruct = vae.reconstruct(x_sample) plt.figure(figsize=(8, 12)) for i in range(5): plt.subplot(5, 2, 2*i + 1) plt.imshow(x_sample[i].reshape(28, 28), vmin=0, vmax=1) plt.title("Test input") plt.colorbar() plt.subplot(5, 2, 2*i + 2) plt.imshow(x_reconstruct[i].reshape(28, 28), vmin=0, vmax=1) plt.title("Reconstruction") plt.colorbar() plt.tight_layout() img = "samples/reconstruction.png" plt.savefig(img) hc.io.sample(config, [{"label": "Reconstruction", "image": img}])
def show_scatter(df, xlim=(-5, 105), ylim=(-5, 105), color="black", marker="o", reg_fit=False): """Create a scatter plot of the data Args: df (pd.DataFrame): The data set to plot xlim ((float, float)): The x-axis limits ylim ((float, float)): The y-axis limits color (str): The color of the scatter points marker (str): The marker style for the scatter points reg_fit (bool): Whether to plot a linear regression on the graph """ sns.regplot( x="x", y="y", data=df, ci=None, fit_reg=reg_fit, marker=marker, scatter_kws={"s": 50, "alpha": 0.7, "color": color}, line_kws={"linewidth": 4, "color": "red"}) plt.xlim(xlim) plt.ylim(ylim) plt.tight_layout()
def plot_profile(lab, element): plt.figure() plt.plot(lab.profiles[element], -lab.x, sns.xkcd_rgb["denim blue"], lw=3, label=element) if element == 'Temperature': plt.title('Temperature profile') plt.xlabel('Temperature, C') elif element == 'pH': plt.title('pH profile') plt.xlabel('pH') else: plt.title('%s concentration' % (element, )) plt.xlabel('Concentration') plt.ylabel('Depth') ax = plt.gca() ax.ticklabel_format(useOffset=False) ax.grid(linestyle='-', linewidth=0.2) plt.legend() plt.tight_layout() return ax
def saveAttention(input_sentence, attentions, outpath): # Set up figure with colorbar import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.ticker as ticker fig = plt.figure(figsize=(24,10), ) ax = fig.add_subplot(111) cax = ax.matshow(attentions.cpu().numpy(), cmap='bone') fig.colorbar(cax) if input_sentence: # Set up axes ax.set_yticklabels([' '] + list(input_sentence) + [' ']) # Show label at every tick ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) plt.tight_layout() plt.savefig(outpath) plt.close('all')
def fit_peak(x, y, display=False): ''' Fit a Gaussian (with linear trend) Parameters ---------- x : array_like x values y : array_like y values display : bool Display the result of the fit Returns ------- par Fit parameters: Gaussian amplitude, Gaussian mean, Gaussian stddev, line slope, line intercept ''' # fit: Gaussian + constant g_init = models.Gaussian1D(amplitude=y.max(), mean=x[np.argmax(y)]) + models.Linear1D(slope=0, intercept=0) fitter = fitting.LevMarLSQFitter() fit = fitter(g_init, x, y) if display: plt.clf() plt.plot(x, y, color='k') plt.plot(x, fit(x), color='r') plt.tight_layout() return fit.parameters
def demo2(fun): ''' Smiled Monalisa ''' p = np.array([ [186, 140], [295, 135], [208, 181], [261, 181], [184, 203], [304, 202], [213, 225], [243, 225], [211, 244], [253, 244], [195, 254], [232, 281], [285, 252] ]) q = np.array([ [186, 140], [295, 135], [208, 181], [261, 181], [184, 203], [304, 202], [213, 225], [243, 225], [207, 238], [261, 237], [199, 253], [232, 281], [279, 249] ]) image = plt.imread(os.path.join(sys.path[0], "monalisa.jpg")) plt.subplot(121) plt.axis('off') plt.imshow(image) transformed_image = fun(image, p, q, alpha=1, density=1) plt.subplot(122) plt.axis('off') plt.imshow(transformed_image) plt.tight_layout(w_pad=1.0, h_pad=1.0) plt.show()
def plot(self, ax=None, clim=[None, None], pcolorOpts=None): """ plot the electrical conductivity and relative permeability :param matplotlib.axes ax: axis :param list clim: list of numpy arrays: colorbar limits :param dict pcolorOpts: dictionary of pcolor options """ if ax is None: fig, ax = plt.subplots(1, 2, figsize=(12, 4)) if not isinstance(pcolorOpts, list): pcolorOpts = [pcolorOpts]*2 self.plot_sigma(ax=ax[0], clim=clim[0], pcolorOpts=pcolorOpts[0]) self.plot_mur(ax=ax[1], clim=clim[1], pcolorOpts=pcolorOpts[1]) plt.tight_layout() return ax
def card_cue_bandit_experiment(alpha=0.1, beta=0.5): np.random.seed(42) print('Running experiment with alpha={} and beta={}'.format(alpha, beta)) df = agent.run_single_softmax_experiment(beta, alpha) f = lambda x: {'reward':0, 'punishment':1, 'neutral':2}[x] df['cue'] = df['context'].map(f) f = lambda x: {23:0, 14:1, 8:2, 3:3}[x] df['action'] = df['action'].map(f) ml = ML(df) r = ml.ml_estimation() print(r) alpha_hat, beta_hat = r.x[:2] fig, ax = plt.subplots(1, 1) ml.plot_ml(ax, alpha, beta, alpha_hat, beta_hat) plt.tight_layout() plt.savefig('likelihood.pdf') plt.show() globals().update(locals())
def plot_confusion_matrix(cm, names=None, title='Confusion Matrix', cmap=plt.cm.Blues): plt.figure(4) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() # Add labels to confusion matrix: if names is None: names = range(cm.shape[0]) tick_marks = np.arange(len(names)) plt.xticks(tick_marks, names, rotation=45) plt.yticks(tick_marks, names) plt.tight_layout() plt.ylabel('Correct label') plt.xlabel('Predicted label') plt.show() # Generate confusion matrix for Jaffe # results = list of tuples of (correct label, predicted label) # e.g. [ ('HA', 3) ] # categories = list of category names # Returns confusion matrix; rows are correct labels and columns are predictions
def plotstyle(): # makes the plot look pretty :) ax1.set_xlabel('Distance(m)') ax1.set_ylabel('Velocity (km/h)') ax1.legend(loc='upper right',fontsize=11,fancybox=True) majxticks = np.arange(0,L,500) minxticks = np.arange(0,L,100) majyticks = np.arange(vmin,vmax,50) minyticks = np.arange(vmin,vmax,10) ax1.set_xticks(majxticks) ax1.set_xticks(minxticks,minor=True) ax1.set_yticks(majyticks) ax1.set_yticks(minyticks,minor=True) ax1.grid(which='major',alpha=0.5) ax1.grid(which='minor',alpha=0.25) ax1.axis([0,L,vmin,vmax]) plt.tight_layout()
def plot(sizes,plotname): fig = plt.figure(figsize=(2.0,2.0),facecolor='white') ax = plt.subplot(111) psizes = ['%1.1f%%' % (x/sum(sizes)*100) for x in sizes] labels = [x+'\n'+y for x,y in zip(LABELS,psizes)] patches = plt.pie(sizes, colors=COLORS, labels=labels, shadow=False, startangle=90, labeldistance=0.7, wedgeprops={'linewidth': 4}) for pie_wedge in patches[0]: pie_wedge.set_edgecolor('white') for t in patches[1]: t.set_horizontalalignment('center') plt.axis('equal') plt.tight_layout() plt.savefig(plotname) print('saved plot to {}'.format(plotname)) plt.show() ###################################################### # Data processing
def test_face(Img, CAE, n_input): batch_x_test = Img[200:300,:] batch_x_test= np.reshape(batch_x_test,[100,n_input[0],n_input[1],1]) CAE.restore() x_re = CAE.reconstruct(batch_x_test) plt.figure(figsize=(8,12)) for i in range(5): plt.subplot(5,2,2*i+1) plt.imshow(batch_x_test[i,:,:,0], vmin=0, vmax=255, cmap="gray") # plt.title("Test input") plt.colorbar() plt.subplot(5, 2, 2*i + 2) plt.imshow(x_re[i,:,:,0], vmin=0, vmax=255, cmap="gray") plt.title("Reconstruction") plt.colorbar() plt.tight_layout() plt.show() return
def create_qqplots(pval_df, save_path): """Create qq plots for oncogene, tsg, and driver p-value. NOTE: gene names should be the index of the dataframe """ pval_cols = ['oncogene p-value', 'tsg p-value', 'driver p-value'] #keep_cols = ['gene'] + pval_cols plot_df = pval_df[pval_cols].copy() plot_df['gene'] = pval_df.index plot_df = plot_df[~plot_df['gene'].isin(pval.mlfc_remove_genes)] fig, ax = plt.subplots(1, 3, subplot_kw={'aspect': 'equal'}) fig.set_size_inches(9, 3) for i, pval_col in enumerate(pval_cols): qqplot(pval_df[pval_col], ax[i], title=pval_col.split(' ')[0]) mlfc = pval.mean_log_fold_change(plot_df[pval_col], plot_df['gene']) ax[i].text(.05, .85, 'MLFC = {0:.2f}'.format(mlfc)) ax[i].set_xlim((0, 1)) ax[i].set_ylim((0, 1)) plt.tight_layout() plt.savefig(save_path) plt.close()
def correlation_plot(x, y, save_path, title, xlabel, ylabel): plt.scatter(x, y) slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) line_x = np.arange(x.min(), x.max()) line_y = slope*line_x + intercept plt.plot(line_x, line_y, label='$%.2fx + %.2f$, $R^2=%.2f$' % (slope, intercept, r_value**2)) plt.legend(loc='best') plt.title(title) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.tight_layout() plt.savefig(save_path) plt.clf() # clear figure plt.close()