我们从Python开源项目中,提取了以下28个代码示例,用于说明如何使用matplotlib.image()。
def reconstruction(img_data, size): """ Reconstruct single image from flattened array. IMPORTANT: overlapping patches are averaged, not replaced like in recontrustion() Args: img_data: flattened image array type: size of the image (rescaled) Returns: recontructed image """ patches_per_dim = size - conf.patch_size + 1 reconstruction = np.zeros((size,size)) n = np.zeros((size,size)) idx = 0 for i in range(patches_per_dim): for j in range(patches_per_dim): reconstruction[i:(i+conf.patch_size),j:(j+conf.patch_size)] += img_data[idx,:].reshape(conf.patch_size, conf.patch_size) n[i:(i+conf.patch_size),j:(j+conf.patch_size)] += 1 idx += 1 return np.divide(reconstruction, n)
def _reconstruction(img_data, size): """ Reconstruct single image from flattened array, function replaces values, so good for visualising the corruption process Args: img_data: flattened image array type: size of the image (rescaled) Returns: recontructed image """ patches_per_dim = size - conf.patch_size + 1 print("size: {}".format(size)) print("patches_per_dim: {}".format(patches_per_dim)) print("img_data: {}".format(img_data.shape)) reconstruction = np.zeros((size,size)) idx = 0 for i in range(patches_per_dim): for j in range(patches_per_dim): reconstruction[i:(i+conf.patch_size),j:(j+conf.patch_size)] = img_data[idx,:].reshape(conf.patch_size, conf.patch_size) idx += 1 return reconstruction
def print(self): printer = QPrinter() printer.setOrientation(QPrinter.Landscape) printer.setDuplex(QPrinter.DuplexAuto) printerDialog = QPrintDialog(printer) ret = printerDialog.exec() if ret == QDialog.Accepted: _dpi = 96 painter = QPainter(printer) self.dyfig.savefig("../WorkingDir/Page1.png",dpi=_dpi) image = QImage("../WorkingDir/Page1.png") pageRect = printer.pageRect() imageRect = image.rect() xOffset = (pageRect.width() - imageRect.width())/2 yOffset = (pageRect.height() - imageRect.height())/2 - pageRect.y()/2 # ? to fit layout like pdf painter.drawImage(QPoint(xOffset,yOffset),image) painter.end() painter = QPainter(printer) self.dyfig2.savefig("../WorkingDir/Page2.png",dpi=_dpi) image = QImage("../WorkingDir/Page2.png") painter.drawImage(QPoint(0,0),image) painter.end() self.signalPrintEnd.set()
def onMakeThumbnail(self): try: #t1 = "../WorkingDir/x " + time.ctime() + ".png" #t1x = t1.replace(':', ' ') self.fig.savefig('../WorkingDir/Page1.png', format='png') image.thumbnail('../WorkingDir/Page1.png','../WorkingDir/ThumbNail.png',scale=0.10) if self.autoPrint: print ("autoPrint",self.autoPrint) self.onPrint() except Exception as _err: print(_err) self.fig.savefig('../WorkingDir/Pagex.png', format = 'png',dpi=300) logging.exception(_err) _sData = [] _sData.append(self.signals.GRAPH_THUMBNAIL_READY) self.Client.send(_sData) pass
def get_interpolations(ae, images, attributes, params): """ Reconstruct images / create interpolations """ assert len(images) == len(attributes) enc_outputs = ae.encode(images) # interpolation values alphas = np.linspace(1 - params.alpha_min, params.alpha_max, params.n_interpolations) alphas = [torch.FloatTensor([1 - alpha, alpha]) for alpha in alphas] # original image / reconstructed image / interpolations outputs = [] outputs.append(images) outputs.append(ae.decode(enc_outputs, attributes)[-1]) for alpha in alphas: alpha = Variable(alpha.unsqueeze(0).expand((len(images), 2)).cuda()) outputs.append(ae.decode(enc_outputs, alpha)[-1]) # return stacked images return torch.cat([x.unsqueeze(1) for x in outputs], 1).data.cpu()
def convert_images(image_name, folder_type, label_of_images, allImages, slice_number): #load the image_name epi_img = nib.load(image_name) #gest the data from the image epi_img_data = epi_img.get_data() #getting the slices print("Getting the slices") index2 = np.array(epi_img_data[1], dtype=int) std_image = epi_img.get_data()[:,:,slice_number] slice_1 = epi_img_data[:,:,slice_number] #plotting the slice that we care for print("Plotting the slices") plot = plt.imshow(slice_1, cmap="gray") plt.axis('off') plot.axes.get_xaxis().set_visible(False) plot.axes.get_yaxis().set_visible(False) if allImages == "1": full_path = folder_type + "/" + image_name + '.png' else: full_path = folder_type + '/' + label_of_images + '/' + image_name + '.png' #sving it print("Saving the slice") #plt.savefig(full_path, bbox_inches='tight', pad_inches = 0) matplotlib.image.imsave(full_path, std_image, cmap="gray")
def convert_images(image_name, folder_type, label_of_images, allImages): #load the image_name epi_img = nib.load(image_name) #gest the data from the image epi_img_data = epi_img.get_data() #getting the slices print("Getting the slices") index2 = np.array(epi_img_data[1], dtype=int) std_image = epi_img.get_data()[:,:,45] slice_1 = epi_img_data[:,:,45] #plotting the slice that we care for print("Plotting the slices") plot = plt.imshow(slice_1, cmap="gray") plt.axis('off') plot.axes.get_xaxis().set_visible(False) plot.axes.get_yaxis().set_visible(False) if allImages == "1": full_path = image_name + '.png' else: full_path = folder_type + '/' + label_of_images + '/' + image_name + '.png' #sving it print("Saving the slice") #plt.savefig(full_path, bbox_inches='tight', pad_inches = 0) matplotlib.image.imsave(full_path, std_image, cmap="gray")
def load_img(self, img_path): """ Return an image object that can be immediately plotted with matplotlib """ with open_file(self.uuid, img_path) as f: return mpimg.imread(f)
def configure_matplotlib(): """Set Matplotlib backend to 'Agg', which is necessary on CodaLab docker image.""" import warnings import matplotlib with warnings.catch_warnings(): warnings.simplefilter('ignore') matplotlib.use('Agg') # needed when running from server
def launch_job(job_name, cmd=None, code_dir=None, excludes='*.ipynb .git .ipynb_checkpoints', dependencies=tuple(), queue='john', image='codalab/python', memory='18g', debug=False, tail=False): """Launch a job on CodaLab (optionally upload code that the job depends on). Args: job_name: name of the job cmd: command to execute code_dir: path to code folder. If None, no code is uploaded. excludes: file types to exclude from the upload dependencies: list of other bundles that we depend on debug: if True, prints SSH commands, but does not execute them tail: show the streaming output returned by CodaLab once it launches the job """ print 'Remember to set up SSH tunnel and LOG IN through the command line before calling this.' def execute(cmd): return shell(cmd, verbose=True, debug=debug) if code_dir: execute('cl up -n code -w {} {} -x {}'.format(worksheet, code_dir, excludes)) options = '-v -n {} -w {} --request-queue {} --request-docker-image {} --request-memory {}'.format( job_name, worksheet, queue, image, memory) dep_str = ' '.join(['{0}:{0}'.format(dep) for dep in dependencies]) cmd = "cl run {} {} '{}'".format(options, dep_str, cmd) if tail: cmd += ' -t' execute(cmd)
def unpack_data_file(source_file_name, target_dir, start_idx): print("Unpacking {} to {}".format(source_file_name, target_dir)) data = load_file(source_file_name) for idx, (image_data, label_idx) in tqdm(enumerate(zip(data['data'], data['labels'])), total=len(data['data'])): subdir = os.path.join(target_dir, label_names[label_idx]) name = "{}_{}.png".format(start_idx + idx, label_names[label_idx]) os.makedirs(subdir, exist_ok=True) image = np.moveaxis(image_data.reshape(3, 32, 32), 0, 2) matplotlib.image.imsave(os.path.join(subdir, name), image) return len(data['data'])
def launch_job(job_name, cmd, dependencies=tuple(), queue='john', image='kelvinguu/gtd:1.0', memory=None, cpus='5', network=False, debug=False, tail=False): """Launch a job on CodaLab (optionally upload code that the job depends on). Args: job_name: name of the job cmd: command to execute dependencies: list of other bundles that we depend on debug: if True, prints SSH commands, but does not execute them tail: show the streaming output returned by CodaLab once it launches the job """ print 'Remember to set up SSH tunnel and LOG IN through the command line before calling this.' options = '-v -n {} -w {} --request-queue {} --request-docker-image {} --request-cpus {}'.format( job_name, worksheet, queue, image, cpus) if memory: options += ' --request-memory {}'.format(memory) if network: options += ' --request-network' dep_str = ' '.join(['{0}:{0}'.format(dep) for dep in dependencies]) full_cmd = "cl run {} {} '{}'".format(options, dep_str, cmd) if tail: full_cmd += ' -t' shell(full_cmd, verbose=True, debug=debug)
def load_patches_to_predict(directory_path, num_images, patch_size=conf.patch_size, phase='test'): """ Loads prediction images and splits them up into patches. :param directory_path: The directory to load images from :param num_images: Number of images to load :param patch_size: The desired patch size. For prediction, the stride will be 1. :param phase: Whether the image to load are from the test or training dataset. Must be 'test' or 'train_cnn_output'. (This is important for the filename and resizing size.) :return: A tensor of patches with dimensions (num_images, vertical patch count, horizontal patch count, patch_size, patch_size) """ patches = [] if phase == 'test': base_filename = "raw_test_%d_pixels" resize_size = conf.test_image_resize elif phase == 'train_cnn_output': base_filename = "raw_satImage_%.3d_pixels" resize_size = conf.train_image_resize else: raise ValueError('Unsupported phase') for i in range(1, num_images+1): imageid = base_filename % i image_filename = directory_path + imageid + ".png" if os.path.isfile(image_filename): img = mpimg.imread(image_filename) # Resize images s.t. one patch is represented by a single pixel img = resize(img, (resize_size, resize_size)) # For prediction we always extract patches with stride 1 and then average the predictions patches.append(skimg.extract_patches(img, (patch_size, patch_size), extraction_step=1)) stacked_image_patches = np.stack(patches) return stacked_image_patches
def reconstruct_image_from_patches(img_data, patches_per_predict_image_dim, size): """ Reconstruct single image from multiple image patches. IMPORTANT: overlapping patches are averaged Args: img_data: An array with dimensions (patches_per_predict_image_dim**2, patch size, patch size) patches_per_predict_image_dim: Number of patches for one dimension. We assume image have the same dimension horizontally as well as vertically. size: Height/Widgth of the target image. Returns: recontructed image: An image of (size x size) reconstructed from the patches """ reconstruction = np.zeros((size,size)) n = np.zeros((size,size)) idx = 0 # Loop through all the patches in 2-dim and sum up the pixel values. # (We split up the image with stride 1 before) # Also keep a count array for i in range(patches_per_predict_image_dim): for j in range(patches_per_predict_image_dim): reconstruction[i:(i+conf.patch_size),j:(j+conf.patch_size)] += img_data[idx,:,:,0] n[i:(i+conf.patch_size),j:(j+conf.patch_size)] += 1 idx += 1 #Return the arithmetic average return np.divide(reconstruction, n)
def binarize(image): """ Binarizes an image with the threshold defined in the AE config :param image: The image to binarize. Most likely a low-res image where each pixel represents a patch :return: An image where each pixel larger than the threshold is set to 1, and otherwise set to 0. """ binarized = np.zeros(image.shape) binarized[image > conf.binarize_threshold] = 1 return binarized
def add_rotations(image): """ Rotates the provided image a couple of time to generate more training data. This should make the autoencoder more robust to diagonal roads for example. The rotations will keep the dimensions of the image intact. :param image: The image to rotate :return: A list of rotated images, including the original image """ rot90img = rotate(image, 90, reshape=False, mode='reflect', order=3) rot45img = rotate(image, 45, reshape=False, mode='reflect', order=3) rot135img = rotate(image, 135, reshape=False, mode='reflect', order=3) return [image, rot90img, rot45img, rot135img]
def preprocessing(image): image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return image
def reverse_preprocessing(image): image = tf.multiply(image, 0.5) image = tf.add(image, 0.5) return image
def draw(self, title): import matplotlib import matplotlib.pyplot as plt import matplotlib.image as mpimg from matplotlib import gridspec plt.style.use('ggplot') red = '#aa4643' blue = '#4572a7' black = '#000000' figsize = (18,6) f = plt.figure(title, figsize=figsize) gs = gridspec.GridSpec(10,8) font_size = 12 value_font_size = 11 label_height, value_height = 0.8, 0.6 fig_data = [ (0.00, label_height, value_height, 'Max Down', '{0:.3%}'.format(self._max_drawdown), red, black), (0.30, label_height, value_height, 'Sharpe', '{0:.3}'.format(self._sharpe), red, black), (0.60, label_height, value_height, 'Volatility', '{0:3%}'.format(self._volatility), red, black)] ax = plt.subplot(gs[:3, :-1]) ax.axis('off') for x, y1, y2, label, value, label_color, value_color in fig_data: ax.text(x, y1, label, color=label_color, fontsize=font_size) ax.text(x, y2, value, color=value_color, fontsize=value_font_size) ax = plt.subplot(gs[4:,:]) ax.get_xaxis().set_minor_locator(matplotlib.ticker.AutoMinorLocator()) ax.get_yaxis().set_minor_locator(matplotlib.ticker.AutoMinorLocator()) ax.grid(b=True, which='minor', linewidth=.2) ax.grid(b=True, which='major', linewidth=1) ax.plot(self._ret_df['net_worth'], label='strategy', alpha=1, linewidth=2, color=red) vals = ax.get_yticks() ax.set_yticklabels(['{:3.2f}%'.format(x*100) for x in vals]) leg = plt.legend(loc='upper left') leg.get_frame().set_alpha(0.5) plt.show()
def openAllGraph(self): self.plot_file_list = self.parent.plot_file[self.root_name] self.graphviewer = QtWidgets.QFrame() self.screen = QtWidgets.QDesktopWidget().screenGeometry() # Set geometry self.graphviewer.setGeometry(self.screen.width()/4, 100, 800, 600) self.graphviewer.setWindowFlags(QtCore.Qt.Popup) self.graphviewer.setObjectName("graphView") self.graphviewer.setStyleSheet("QFrame#graphView{background:#ffffff;border:0.5px solid #fa7064;} QPushButton:hover{background:#6e66cc;border:1px solid #373366;} QToolButton:hover{background:#fa7064;}") # self.graphviewer.setWindowModality(QtCore.Qt.WindowModal) # Layout graph_layout = QtWidgets.QVBoxLayout() # Title graph_title = SubTitleBar(self.graphviewer) graph_title.title_Label.setText(self.root_name) # Separator line hline = QtWidgets.QWidget() hline.setStyleSheet("QWidget{min-height:2px; max-height:2px; background:#399ee5;}") # ComboBox graph_control = QtWidgets.QWidget() graph_control_layout = QtWidgets.QHBoxLayout() self.graph_control_comboBox = QtWidgets.QComboBox() self.graph_control_comboBox.setStyleSheet("QComboBox{font-family:'Segoe UI';font-size: 10pt;border: 1px solid #c5d2d9; border-radius:5px;padding: 5px 10px 5px 10px; color: #66767c;min-width: 250px;} QComboBox:hover{border: 2px solid #2a4d69;border-radius: 5px;height: 30ps;} QComboBox::drop-down {subcontrol-origin: padding; subcontrol-position: top right;width: 40px;border-left-width: 2px;border-left-color: #c5d2d9;border-left-style: solid; border-top-right-radius: 5px; border-bottom-right-radius: 5px;padding: 1px 1px 1px 1px;image: url(:/resources/dropdown_arrow.png);} QComboBox QAbstractItemView {border: 1px solid #c5d2d9; border-bottom-left-radius: 5px; border-bottom-right-radius: 5px;selection-background-color:#4b86b4;outline: solid #2a4d69;font-family: 'Segoe UI';font-size: 10pt;color: #66767c;}") graph_control_layout.insertStretch(0, 4) graph_control_layout.addWidget(self.graph_control_comboBox) graph_control_layout.insertStretch(3, 4) graph_control.setLayout(graph_control_layout) # Main Content self.graph_content = QtWidgets.QStackedWidget() # Add stack for i in range(1, len(self.plot_file_list)): currentName = self.plot_file_list[i].rsplit("/", 1)[1].rsplit(".", 1)[0] self.graph_control_comboBox.addItem(currentName) graph_label = QtWidgets.QLabel() currentGraph = QtGui.QPixmap(self.plot_file_list[i]) graph_label.setPixmap(currentGraph) graph_label.setAlignment(QtCore.Qt.AlignCenter) self.graph_content.addWidget(graph_label) # Add layout graph_layout.addWidget(graph_title, 1) graph_layout.addWidget(hline, 1) graph_layout.addWidget(graph_control, 1) graph_layout.addWidget(self.graph_content, 8) graph_layout.setContentsMargins(5, 10, 5, 10) graph_layout.setAlignment(QtCore.Qt.AlignTop) self.graphviewer.setLayout(graph_layout) self.graph_control_comboBox.currentIndexChanged.connect(self.changeGraph) self.graphviewer.show()
def predict_on_test_set(model, sess): print("Running the Convolutional Denoising Autoencoder on the predictions") prediction_test_dir = "../results/CNN_Output/test/high_res_raw/" if not os.path.isdir(prediction_test_dir): raise ValueError("Couldn't find directory {}".format(prediction_test_dir)) patches_to_predict = load_patches_to_predict(prediction_test_dir, conf.train_size, conf.patch_size, 'test') print("Shape of patches_to_predict for training data: {}".format(patches_to_predict.shape)) patches_per_predict_image_dim = patches_to_predict.shape[1] # Assume square images patches_to_predict = patches_to_predict.reshape((-1, conf.patch_size, conf.patch_size)) predictions = [] runs = patches_to_predict.shape[0] // conf.batch_size rem = patches_to_predict.shape[0] % conf.batch_size for i in tqdm(range(runs)): batch_inputs = patches_to_predict[i * conf.batch_size:((i + 1) * conf.batch_size), ...] feed_dict = model.make_inputs_predict(batch_inputs) prediction = sess.run(model.y_pred, feed_dict) predictions.append(prediction) if rem > 0: batch_inputs = patches_to_predict[runs * conf.batch_size:(runs * conf.batch_size + rem), ...] feed_dict = model.make_inputs_predict(batch_inputs) prediction = sess.run(model.y_pred, feed_dict) predictions.append(prediction) print("individual training image prediction shape: {}".format(predictions[0].shape)) predictions = np.concatenate(predictions, axis=0) print("Shape of training image predictions: {}".format(predictions.shape)) output_path = "../results/CNN_Autoencoder_Output/test/" binarize_output_path = os.path.join(output_path, "binarized/") if os.path.isdir(output_path): shutil.rmtree(output_path) os.makedirs(output_path) os.makedirs(binarize_output_path) # Save outputs to disk for i in range(conf.test_size): print("Test img: " + str(i + 1)) img_name = "cnn_ae_test_" + str(i + 1) prediction = reconstruct_image_from_patches( predictions[i * patches_per_predict_image_dim ** 2:(i + 1) * patches_per_predict_image_dim ** 2, :], patches_per_predict_image_dim, conf.test_image_resize) binarized_prediction = binarize(prediction) # resizing test images to 608x608 and saving to disk resized_greylevel_output_images = resize_img(prediction, 'test') scipy.misc.imsave(output_path + img_name + ".png", resized_greylevel_output_images) resized_binarized_output_images = resize_img(binarized_prediction, 'test') scipy.misc.imsave(binarize_output_path + img_name + ".png", resized_binarized_output_images)
def test(): from io import BytesIO x = [datetime.datetime(2017, 4, 6, 0, 0), datetime.datetime(2017, 4, 7, 0, 0), datetime.datetime(2017, 4, 8, 0, 0), datetime.datetime(2017, 4, 11, 0, 0), datetime.datetime(2017, 4, 12, 0, 0), datetime.datetime(2017, 4, 13, 0, 0), datetime.datetime(2017, 4, 14, 0, 0), datetime.datetime(2017, 4, 16, 0, 0), datetime.datetime(2017, 4, 17, 0, 0), datetime.datetime(2017, 4, 18, 0, 0), datetime.datetime(2017, 4, 19, 0, 0), datetime.datetime(2017, 4, 20, 0, 0), datetime.datetime(2017, 4, 22, 0, 0), datetime.datetime(2017, 4, 23, 0, 0)] y = [[0.0, 15.0, 9.0, 0.0, 9.0, 5.0, 6.0, 0.0, 11.0, 9.0, 5.0, 6.0, 0.0, 11.0], [15.0, 17.0, 0.0, 20.0, 20.0, 19.0, 30.0, 32.0, 23.0, 20.0, 19.0, 30.0, 32.0, 23.0]] grid = [ [{'date': datetime.datetime(2017, 4, 3, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}, {'date': datetime.datetime(2017, 4, 4, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}, {'date': datetime.datetime(2017, 4, 5, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}, {'date': datetime.datetime(2017, 4, 6, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 15.]}, {'date': datetime.datetime(2017, 4, 7, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 15., 17.]}, {'date': datetime.datetime(2017, 4, 8, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 9., 0.]}, {'date': datetime.datetime(2017, 4, 9, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}], [{'date': datetime.datetime(2017, 4, 10, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}, {'date': datetime.datetime(2017, 4, 11, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 20.]}, {'date': datetime.datetime(2017, 4, 12, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 9., 20.]}, {'date': datetime.datetime(2017, 4, 13, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 5., 19.]}, {'date': datetime.datetime(2017, 4, 14, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 6., 30.]}, {'date': datetime.datetime(2017, 4, 15, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}, {'date': datetime.datetime(2017, 4, 16, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 32.]}], [{'date': datetime.datetime(2017, 4, 17, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 11., 23.]}, {'date': datetime.datetime(2017, 4, 18, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}, {'date': datetime.datetime(2017, 4, 19, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}, {'date': datetime.datetime(2017, 4, 20, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}, {'date': datetime.datetime(2017, 4, 21, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}, {'date': datetime.datetime(2017, 4, 22, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}, {'date': datetime.datetime(2017, 4, 23, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}], [{'date': datetime.datetime(2017, 4, 24, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 11., 23.]}, {'date': datetime.datetime(2017, 4, 25, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}, {'date': datetime.datetime(2017, 4, 26, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}, {'date': datetime.datetime(2017, 4, 27, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}, {'date': datetime.datetime(2017, 4, 28, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}, {'date': datetime.datetime(2017, 4, 29, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}, {'date': datetime.datetime(2017, 4, 30, 0, 0, tzinfo=tzoffset(None, 10800)), 'values': [ 0., 0.]}] ] dashboard = { "summary": "Anna work-out", "empty_image": "../amazon-dash-private/images/old-woman.png", "images_folder": "../amazon-dash-private/images/" } labels = [ {"summary": "Morning work-out", "image": "../amazon-dash-private/images/morning4.png"}, {"summary": "Physiotherapy", "image": "../amazon-dash-private/images/evening2.png"} ] absent_labels = [ {'image_grid': '../amazon-dash-private/images/absent_ill_grid.png', 'image_plot': '../amazon-dash-private/images/absent_ill_plot.png', 'summary': 'Sick'}, {'image_grid': '../amazon-dash-private/images/absent_vacation_grid.png', 'image_plot': '../amazon-dash-private/images/absent_vacation_plot.png', 'summary': 'Vacation'} ] weather = {'day': [datetime.datetime(2017, 4, 22, 0, 0), datetime.datetime(2017, 4, 23, 0, 0), datetime.datetime(2017, 4, 24, 0, 0), datetime.datetime(2017, 4, 25, 0, 0)], 'icon': ['sct', 'ovc', 'hi_shwrs', 'sn'], 'temp_max': [6.64, 6.38, 4.07, 6.91], 'temp_min': [-0.58, -2.86, -1.87, -1.91], 'images_folder': '../amazon-dash-private/images/'} t0 = datetime.datetime.now() image_data = draw_calendar(grid, x, y, weather, dashboard, labels, absent_labels, ImageParams( dashboard='', format='gif', style='seaborn-talk', xkcd=1, rotate=0 ) ) t1 = datetime.datetime.now() print(t1 - t0) image_file = BytesIO(image_data) image = PIL.Image.open(image_file) image.show() # with open('test.png', 'wb') as png_file: # png_file.write(image) #plt.show() #todo speed it up. too many rescalings as I see from profiling. # may be using artists (http://stackoverflow.com/questions/41453902/is-it-possible-to-patch-an-image-in-matplotlib) # will reduce number of rescaling? # now it looks like matplotlib rescales after each operation