我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用builtins.str()。
def setlocale(category, locale=None): """ Set the locale for the given category. The locale can be a string, an iterable of two strings (language code and encoding), or None. Iterables are converted to strings using the locale aliasing engine. Locale strings are passed directly to the C lib. category may be given as one of the LC_* values. """ if locale and not isinstance(locale, _builtin_str): # convert to string locale = normalize(_build_localename(locale)) return _setlocale(category, locale)
def _setup_token(self): self.logger.warn("\n\n(One Time Setup) Please create a Personal Access Token") self.logger.warn("https://%s/profile/personal_access_tokens" % self.origin_domain) self.logger.warn("Scope: API, Expires: Never\n") token = input("Please enter your Personal Access Token: ") # Make request to resource that requires us to be authenticated path = 'projects/%s/labels' % self._url_encoded_path() url = urljoin(str(self._API()), path) res = requests.get( url, headers={"PRIVATE-TOKEN": token} ) if res.status_code == 200: return(token, None) return(-1, "Invalid Personal Access Token")
def zoomToPressed(self): try: w3wCoord = str(self.coordBox.text()).replace(" ", "") QApplication.setOverrideCursor(QCursor(Qt.WaitCursor)) json = self.w3w.forwardGeocode(w3wCoord) lat = float(json["geometry"]["lat"]) lon = float(json["geometry"]["lng"]) canvasCrs = self.canvas.mapSettings().destinationCrs() epsg4326 = QgsCoordinateReferenceSystem("EPSG:4326") transform4326 = QgsCoordinateTransform(epsg4326, canvasCrs) center = transform4326.transform(lon, lat) self.canvas.zoomByFactor(1, center) self.canvas.refresh() if self.marker is None: self.marker = QgsVertexMarker(self.canvas) self.marker.setCenter(center) self.marker.setIconSize(8) self.marker.setPenWidth(4) self.removeMarkerButton.setDisabled(False) self.coordBox.setStyleSheet("QLineEdit{background: white}") except Exception as e: self.coordBox.setStyleSheet("QLineEdit{background: yellow}") finally: QApplication.restoreOverrideCursor()
def __init__(self, verbose): # Attempt to find the _ampersand.json configuration file try: config = build.get_json("_ampersand.json") root = p.dirname(p.abspath("./_ampersand.json")) except OSError: # Ask the user where to find the _ampersand.json file try: location = input("Enter the path (from here) to the root of " + "your project: ") config = build.get_json(p.join(location, "_ampersand.json")) root = p.abspath(location) except (KeyboardInterrupt, OSError) as e: print(str(e)) sys.exit() self.root = root self.config = config self.verbose = verbose
def plugin_remove(self, name): root = relative(self.root) try: # Delete the directory containing the plugin print("Removing plugin '%s'" % name) rmtree(root(self.config["modules"], name )) except FileNotFoundError: pass except (IOError, OSError) as e: print(str(e)) print("Couldn't remove plugin. You may need to delete it manually.") try: # Update _ampersand.json by adding the plugin self.config["plugins"].pop(name) with open(root("_ampersand.json"), "w", encoding="utf-8") as updated: updated.write(json.dumps(self.config, indent=4, ensure_ascii=False)) except KeyError: print("Failed to remove plugin '%s' as it is not installed." % name) sys.exit()
def parse_csv(csv_text): csv_text = str(csv_text) c = csv.reader(io.StringIO(csv_text)) rows = [row for row in c] keys = rows.pop(0) normal_keys = [k for k in keys if not k.startswith('_')] dict_rows = [] for row in rows: d = Row() for i in range(0, len(keys)): if keys[i] in normal_keys: d[keys[i]] = row[i] else: d.xtras[keys[i]] = row[i] dict_rows.append(d) return dict_rows
def slugify(value, allow_unicode=False): """Slugify string to make it a valid filename. Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens. Remove characters that aren't alphanumerics, underscores, or hyphens. Also strip leading and trailing whitespace. """ import unicodedata value = str(value) if allow_unicode: value = unicodedata.normalize('NFKC', value) else: value = unicodedata.normalize('NFKD', value).encode( 'ascii', 'ignore').decode('ascii') value = re.sub(r'[^\w\s-]', '', value).strip() return re.sub(r'[-\s]+', '-', value) # Below from # http://stackoverflow.com/questions/2333872/atomic-writing-to-file-with-python
def save_subsequences(subsets, subsets_path): """ Save the subsequences to the path: `[subsets_path]/[filename].csv` Parameters ---------- subsets : dict Dict holding the dataframes of the subsequences subsets_path : str Directory to store the outputs """ if not os.path.exists(subsets_path): os.makedirs(subsets_path) for fn, i in subsets: fn_out = str(str(i) + '_' + fn) dat = subsets[(fn, i)] dat.to_csv(os.path.join(subsets_path, fn_out), date_format='%Y-%m-%dT%H:%M:%S%z')
def plot_boxplots(data, hidden_states): """ Plot boxplots for all variables in the dataset, per state Parameters ------ data : pandas DataFrame Data to plot hidden_states: iteretable the hidden states corresponding to the timesteps """ column_names = data.columns figs, axes = plt.subplots(len(column_names), figsize=(15, 15)) for j, var in enumerate(column_names): axes[j].set_title(var) vals = data[var] data_to_plot = [] labels = [] for i in set(hidden_states): mask = hidden_states == i if (sum(mask) > 0): labels.append(str(i)) values = np.array(vals[mask]) data_to_plot.append(values) axes[j].boxplot(data_to_plot, sym='', labels=labels)
def _readPPN(self, fname, sldir): ''' Private method that reads in and organizes the .ppn file Loads the data of the .ppn file into the variable cols. ''' if sldir.endswith(os.sep): #Making sure fname will be formatted correctly fname = str(sldir)+str(fname) else: fname = str(sldir)+os.sep+str(fname) self.sldir+=os.sep f=open(fname,'r') lines=f.readlines() for i in range(len(lines)): lines[i]=lines[i].strip() cols = ['ISOTP', 'ABUNDANCE_MF'] #These are constant, .ppn files have no header to read from for i in range(len(lines)): if not lines[i].startswith('H'): index = i-1 break return cols, index
def _padding_model_number(number, max_num): ''' This method returns a zero-front padded string It makes out of str(45) -> '0045' if 999 < max_num < 10000. This is meant to work for reasonable integers (maybe less than 10^6). Parameters ---------- number : integer number that the string should represent. max_num : integer max number of cycle list, implies how many 0s have be padded ''' cnum = str(number) clen = len(cnum) cmax = int(log10(max_num)) + 1 return (cmax - clen)*'0' + cnum
def fetch_data_one(self,dataitem,cycle): self.h5 = mrT.File(self.filename,'r') try: data = self.h5[self.cycle_header+str(cycle)]['SE_DATASET'][dataitem] except ValueError: try: data = self.h5[self.cycle_header+str(cycle)].attrs.get(dataitem, None) except TypeError: data = self.h5[self.cycle_header+str(cycle)][dataitem] try: while data.shape[0] < 2: data = data[0] except (IndexError, AttributeError): None self.h5.close() return data
def plot_prof_2(self, mod, species, xlim1, xlim2): """ Plot one species for cycle between xlim1 and xlim2 Parameters ---------- mod : string or integer Model to plot, same as cycle number. species : list Which species to plot. xlim1, xlim2 : float Mass coordinate range. """ mass=self.se.get(mod,'mass') Xspecies=self.se.get(mod,'yps',species) pyl.plot(mass,Xspecies,'-',label=str(mod)+', '+species) pyl.xlim(xlim1,xlim2) pyl.legend()
def isoratio_init(self,isos): ''' This file returns the isotopic ratio of two isotopes specified as iso1 and iso2. The isotopes are given as, e.g., ['Fe',56,'Fe',58] or ['Fe-56','Fe-58'] (for compatibility) -> list. ''' if len(isos) == 2: dumb = [] dumb = isos[0].split('-') dumb.append(isos[1].split('-')[0]) dumb.append(isos[1].split('-')[1]) isos = dumb ssratio = old_div(self.habu[isos[0].ljust(2).lower() + str(int(isos[1])).rjust(3)], self.habu[isos[2].ljust(2).lower() + str(int(isos[3])).rjust(3)]) return ssratio
def iso_abundance(self,isos): ''' This routine returns the abundance of a specific isotope. Isotope given as, e.g., 'Si-28' or as list ['Si-28','Si-29','Si-30'] ''' if type(isos) == list: dumb = [] for it in range(len(isos)): dumb.append(isos[it].split('-')) ssratio = [] isos = dumb for it in range(len(isos)): ssratio.append(self.habu[isos[it][0].ljust(2).lower() + str(int(isos[it][1])).rjust(3)]) else: isos = isos.split('-') ssratio = self.habu[isos[0].ljust(2).lower() + str(int(isos[1])).rjust(3)] return ssratio
def __str__(self): """ >>> str(FloatTextRecord(float('-inf'))) '-INF' >>> str(FloatTextRecord(-0.0)) '-0' >>> str(FloatTextRecord(1.337)) '1.337' """ try: if self.value == int(self.value): return '%.0f' % self.value else: return str(self.value) except: return str(self.value).upper()
def __str__(self): """ >>> str(DoubleTextRecord(float('-inf'))) '-INF' >>> str(DoubleTextRecord(-0.0)) '-0' >>> str(DoubleTextRecord(1.337)) '1.337' """ super_self = super(DoubleTextRecord, self) if hasattr(super_self, '__unicode__'): # PY3 return super_self.__unicode__() else: # PY2 return super_self.__str__()
def parse(cls, fp): """ >>> from io import BytesIO >>> fp = BytesIO(b'\\x01x\\x04test\\x86') >>> ar = AttributeRecord.parse(fp) >>> str(ar.prefix) 'x' >>> str(ar.name) 'test' >>> ar.value <TrueTextRecord(type=0x86)> """ prefix = Utf8String.parse(fp).value name = Utf8String.parse(fp).value type = struct.unpack(b'<B', fp.read(1))[0] value = Record.records[type].parse(fp) return cls(prefix, name, value)
def parse(cls, fp): """ >>> from io import BytesIO >>> fp = BytesIO(b'\\x0c\\x86') >>> sdar = ShortDictionaryAttributeRecord.parse(fp) >>> sdar.index 12 >>> sdar.value <TrueTextRecord(type=0x86)> >>> str(sdar) 'To="true"' """ index = MultiByteInt31.parse(fp).value type = struct.unpack(b'<B', fp.read(1))[0] value = Record.records[type].parse(fp) return cls(index, value)
def parse(cls, fp): """ >>> from io import BytesIO >>> fp = BytesIO(b'\\x01x\\x02\\x86') >>> dar = DictionaryAttributeRecord.parse(fp) >>> str(dar.prefix) 'x' >>> dar.index 2 >>> str(dar.value) 'true' """ prefix = Utf8String.parse(fp).value index = MultiByteInt31.parse(fp).value type = struct.unpack(b'<B', fp.read(1))[0] value = Record.records[type].parse(fp) return cls(prefix, index, value)
def __str__(self): """ >>> str(Decimal(False, 0, 1234, 3)) '1.234' >>> str(Decimal(False, 0, 1234, 1)) '123.4' >>> str(Decimal(True, 0, 1234, 1)) '-123.4' >>> str(Decimal(False, 0, 5123456, 6)) '5.123456' """ log.warn('Possible false interpretation') value = str(self.high * 2**64 + self.low) if self.scale > 0: value = value[:-self.scale] + '.' + value[-self.scale:] if self.sign: value = '-%s' % value return value
def delete_configuration(self, EP): try: url = self.base_url + self.CORPS_EP + self.corp + self.SITES_EP + self.site + EP with open(self.file) as data_file: data = json.load(data_file) for config in data['data']: requests.delete(url + "/" + config['id'], cookies=self.authn.cookies, headers=self.get_headers()) print("Delete complete!") except Exception as e: print('Error: %s ' % str(e)) print('Query: %s ' % url) quit()
def cassandra_ddl_repr(data): """Generate a string representation of a map suitable for use in C* DDL""" if isinstance(data, str): return "'" + re.sub(r"(?<!\\)'", "\\'", data) + "'" elif isinstance(data, dict): pairs = [] for k, v in data.items(): if not isinstance(k, str): raise ValueError('DDL map keys must be strings') pairs.append(cassandra_ddl_repr(k) + ': ' + cassandra_ddl_repr(v)) return '{' + ', '.join(pairs) + '}' elif isinstance(data, int): return str(data) elif isinstance(data, bool): if data: return 'true' else: return 'false' else: raise ValueError('Cannot convert data to a DDL representation')
def sequential(x, net, defaults = {}, name = '', reuse = None, var = {}, layers = {}): layers = dict(list(layers.items()) + list(predefined_layers.items())) y = x logging.info('Building Sequential Network : %s', name) with tf.variable_scope(name, reuse = reuse): for i in range(len(net)): ltype = net[i][0] lcfg = net[i][1] if len(net[i]) == 2 else {} lname = lcfg.get('name', ltype + str(i)) ldefs = defaults.get(ltype, {}) lcfg = dict(list(ldefs.items()) + list(lcfg.items())) for k, v in list(lcfg.items()): if isinstance(v, basestring) and v[0] == '$': # print var, v lcfg[k] = var[v[1:]] y = layers[ltype](y, lname, **lcfg) logging.info('\t %s \t %s', lname, y.get_shape().as_list()) return y
def batch_get(self, keys): schema_len = len(self.schema) schema_names = [k.name for k in self.schema] dictkeys = [] for key in keys: if not isinstance(key, (tuple, list)): key = [key] if schema_len != len(key): raise ArgumentError("key `%s` can not match " "the table's schema" % str(key)) dictkeys.append(dict(zip(schema_names, key))) if not dictkeys: return [] results = self.table.batch_get(dictkeys) return self.wrap_result(results)
def process_i1k_tar_subpath(args): """ Process a single subpath in a I1K tar. By process: optionally untar recursive tars (only on 'train') resize/copy images Returns a list of [(fname, label), ...] """ target_size, toptar, img_dir, setn, label_dict, subpath = args name_slice = slice(None, 9) if setn == 'train' else slice(15, -5) label = label_dict[subpath.name[name_slice]] outpath = os.path.join(img_dir, str(label)) if setn == 'train': tf = tarfile.open(toptar) subtar = tarfile.open(fileobj=tf.extractfile(subpath)) file_list = subtar.getmembers() return process_files_in_tar(target_size, label, subtar, file_list, outpath) elif setn == 'val': tf = tarfile.open(toptar) file_list = [subpath] return process_files_in_tar(target_size, label, tf, file_list, outpath)
def update_instance(self, name, app_name, version, **kwargs): """ Request the instance to be created with a desired state of 'running'. Args: name (str): The name of the instance to be started. app_name (str): The name of the application to be started. version (str): The version of the application to be started. **kwargs: Additional properties to use when starting the instance. Returns: :obj:`bigboat.instance.Instance` or `None`: The instance if it was started or `None` if the instance failed to start. """ raise NotImplementedError('Must be implemented by subclasses')
def __prompt_for_role(account_name, role_names): border = "" spaces = "" for index in range(len(account_name)): border = "-" + border spaces = " " + spaces print('{}#------------------------------------------------{}#'.format(Colors.lblue,border)) print('# {}You have access to the following roles in {}{}{} #'.format(Colors.white,Colors.yellow,account_name,Colors.lblue)) print('# {}Which role would you like to assume?{}{} #'.format(Colors.white,Colors.lblue,spaces)) print('#------------------------------------------------{}#{}'.format(border,Colors.normal)) for index, role_name in enumerate(role_names): if role_name == "AccountAdministrator": print("\t{}{} {}{}".format(Colors.red, str(index).rjust(2), role_name,Colors.normal)) else: print("\t{}{}{} {}{}".format(Colors.white, str(index).rjust(2), Colors.cyan, role_name, Colors.normal)) while True: choice = input('{}Select role: {}'.format(Colors.lblue, Colors.normal)) try: return role_names[int(choice)] except: maximum = len(role_names) - 1 print('{}Please enter an integer between 0 and {}{}'.format(Colors.lred, maximum, Colors.normal))
def repl(args, quantizer): dataset = Dataset(args.model) clf = load_clf(dataset, args) classes = loadClasses(dataset) try: while True: title = input("> ") X = quantizer.quantize(title) y_hat = clf.predict(X, 'dict')[0] yi = islice(iter(y_hat.items()), args.max_predict) nvals = [[str(classes[k]), v] for k, v in yi] pprint.pprint(nvals) except KeyboardInterrupt: pass
def json_encode(self): jsonDict = self.__getstate__() #Turn objects into labels for member in ["phys_chan", "gate_chan", "trig_chan", "receiver_chan", "source", "target"]: if member in jsonDict and not isinstance(jsonDict[member], str): obj = jsonDict.pop(member) if obj: jsonDict[member] = obj.label #We want the name of shape functions if "pulse_params" in jsonDict: pulse_params = deepcopy(jsonDict.pop("pulse_params")) if "shape_fun" in pulse_params: pulse_params["shape_fun"] = pulse_params["shape_fun"].__name__ jsonDict["pulse_params"] = pulse_params return jsonDict
def clear(self, url=None, xpath=None): """Clear cache Args: url (str): If given, clear specific item only. Otherwise remove the DB file. xpath (str): xpath to search (may be ``None``) """ if url is not None: query = self._query(url, xpath) if query.count() > 0: query.delete() self.session.commit() else: raise KeyError("Cannot clear URL, not in cache: " + str(url) + " xpath:" + str(xpath)) else: # remove the DB file self.close() if path.exists(self.db_path): remove(self.db_path)
def get_timestamp(self, url, xpath=None): """Get time stamp of cached query result. If DB has not yet been initialized or url/xpath has not been queried yet, return None. Args: url (str): If given, clear specific item only. Otherwise remove the DB file. xpath (str): xpath to search (may be ``None``) Returns: datetime.datetime: cached response timestamp, None if not available """ if not path.exists(self.db_path): return None if self._query(url, xpath).count() > 0: return self._query(url, xpath).one().queried_on
def serialize(self, array, dict_key): # TODO: select more appropriate block size block_size = 1024 * 1024 for block, last in self.slice_ndarray(array, block_size): assert 0 <= len(block) <= block_size yield bridge_pb2.Item( item_type=self.item_type, dict_key=dict_key if last else None, numpy_array_value=bridge_pb2.Item.NumpyArray( last=last, dtype=str(array.dtype) if last else None, shape=array.shape if last else None, data=block ) )
def create(self): try: app_path = self.mk_app_folder() cmd_genereate = CmdGenerate(self.ctx, app_path + '/', self.algorithm, self.environment, copy_algorithm=False, create_config_backup=False) cmd_genereate.create_default_config(self.environment) cmd_genereate.apply() if self.environment == 'openai-gym': self.ctx.log('Please make sure you have OpenAI Gym installed; ' 'see installation instruction here: https://github.com/openai/gym') self.ctx.log('To run application, please do: cd %s && relaax run' % self.app_name) except Exception as e: self.ctx.log('%s', str(e))
def test_socket_error_on_accept2(self, monkeypatch): def error(*args): called.times += 1 called.args = args logger = Mock() called = MockUtils.Placeholder() logger.error = error monkeypatch.setattr('relaax.server.rlx_server.rlx_port.log', logger) if sys.platform != 'win32': monkeypatch.setattr(os, 'fork', lambda: 0) monkeypatch.setattr(socket, 'socket', lambda af, st: self.socket) self.socket.accept = lambda: MockUtils.raise_(socket.error(errno.ENOMEM, "fatal error message")) try: RLXPort.listen(('localhost', 7000)) assert False except Exception as e: assert called.args == ('Could not accept new connection (fatal error message)',) assert called.times == 1 assert str(e) == '[Errno %d] fatal error message' % errno.ENOMEM
def test_socket_busy_on_accept(self, monkeypatch): accept_responses = [ lambda: MockUtils.raise_(socket.error(errno.ENOBUFS, "fatal error message")), lambda: MockUtils.raise_(socket.error(errno.EAGAIN, "busy, try accept again")), lambda: MockUtils.raise_(socket.error(errno.EPERM, "rejected, but try accept again")) ] if sys.platform != 'win32': monkeypatch.setattr(os, 'fork', lambda: 0) monkeypatch.setattr(socket, 'socket', lambda af, st: self.socket) self.socket.accept = lambda: accept_responses.pop()() try: RLXPort.listen(('localhost', 7000)) assert False except Exception as e: traceback.format_exc() assert str(e) == '[Errno %d] fatal error message' % errno.ENOBUFS
def test_fork_error(self, monkeypatch): def critical(*args): called.times += 1 called.args = args logger = Mock() called = MockUtils.Placeholder() logger.critical = critical accept_responses = [ lambda: MockUtils.raise_(socket.error(errno.EMFILE, "fatal error message")), lambda: (MockSocket.create(), ('some-address', 7000)) ] monkeypatch.setattr('relaax.server.rlx_server.rlx_port.log', logger) if sys.platform != 'win32': monkeypatch.setattr(os, 'fork', lambda: MockUtils.raise_(OSError('can\'t fork'))) monkeypatch.setattr(socket, 'socket', lambda af, st: self.socket) self.socket.accept = lambda: accept_responses.pop()() try: RLXPort.listen(('localhost', 7000)) assert False except Exception as e: if sys.platform != 'win32': assert called.args == ("Can't start child process ('some-address', 7000): can't fork",) assert called.times == 1 assert str(e) == '[Errno %d] fatal error message' % errno.EMFILE
def __checktype(self, value): """Checks if the given value is a known datatype :param value: The value to check :type value: str :return: Data type of value, if known, else empty string :rtype: str """ if isinstance(value, (str, unicode)): for r in self.regex: if r.get('regex').match(value): return r.get('type') return ''
def wavejson_to_wavedrom(wavejson, width=None, skin='default'): ''' Create WaveDrom display from WaveJSON data. This code is from https://github.com/witchard/ipython-wavedrom. Inputs: width: Width of the display window in pixels. If left as None, the entire waveform will be squashed into the width of the page. To prevent this, set width to a large value. The display will then become scrollable. skin: Selects the set of graphic elements used to draw the waveforms. Allowable values are 'default' and 'narrow'. ''' # Set the width of the waveform display. style = '' if width != None: style = ' style="width: {w}px"'.format(w=str(int(width))) # Generate the HTML from the JSON. htmldata = '<div{style}><script type="WaveDrom">{json}</script></div>'.format( style=style, json=json.dumps(wavejson)) DISP.display_html(DISP.HTML(htmldata)) # Trigger the WaveDrom Javascript that creates the graphical display. DISP.display_javascript( DISP.Javascript( data='WaveDrom.ProcessAll();', lib=[ 'http://wavedrom.com/wavedrom.min.js', 'http://wavedrom.com/skins/{skin}.js'.format(skin=skin) ])) # The following allows the display of WaveDROM in the HTML files generated by nbconvert. # It's disabled because it makes Github's nbconvert freak out. setup = ''' <script src="http://wavedrom.com/skins/{skin}.js" type="text/javascript"></script> <script src="http://wavedrom.com/wavedrom.min.js" type="text/javascript"></script> <body onload="WaveDrom.ProcessAll()"> '''.format(skin=skin) #DISP.display_html(DISP.HTML(setup))
def _API(self): url = 'https://%s/' % self.origin_domain path = 'api/v4/' return urljoin(str(url), str(path))
def merge_results(sol,files): model = get_model_type(sol) save_where = '/Batch results/' working_path = getcwd().replace("\\", "/")+"/" save_path = working_path+save_where print("\nChecking for longest csv file") lengths = [] for f in files: to_merge_temp = working_path+"/Results/%s/INV_%s-%s_%s.csv" %(f,sol.model,model,f) headers_temp = np.genfromtxt(to_merge_temp, delimiter=",", dtype=str, skip_footer=1) lengths.append(len(headers_temp)) to_merge_max = working_path+"/Results/%s/INV_%s-%s_%s.csv" %(files[lengths.index(max(lengths))],sol.model,model,files[lengths.index(max(lengths))]) headers = np.genfromtxt(to_merge_max, delimiter=",", dtype=str, skip_footer=1) print("\nMerging csv files") if not path.exists(save_path): makedirs(save_path) # to_merge = working_path+"/Results/%s/INV_%s_%s.csv" %(files[0],model,files[0]) # headers = np.genfromtxt(to_merge, delimiter=",", dtype=str, skip_footer=1) merged_inv_results = np.zeros((len(files), len(headers))) merged_inv_results.fill(np.nan) for i, f in enumerate(files): to_add = np.loadtxt(working_path+"/Results/%s/INV_%s-%s_%s.csv" %(f,sol.model,model,f), delimiter=",", skiprows=1) merged_inv_results[i][:to_add.shape[0]] = to_add rows = np.array(files, dtype=str)[:, np.newaxis] hd = ",".join(["ID"] + list(headers)) np.savetxt(save_path+"Merged_%s-%s_%s_TO_%s.csv" %(sol.model,model,files[0],files[-1]), np.hstack((rows, merged_inv_results)), delimiter=",", header=hd, fmt="%s") print("Batch file successfully saved in:\n", save_path)
def run_inversion(self): try: self.clear() except: pass self.sel_files = [str(self.open_files[i]) for i in self.text_files.curselection()] if len(self.sel_files) == 0: tkinter.messagebox.showwarning("Inversion error", "No data selected for inversion \nSelect at least one data file in the left panel", parent=self.master) if len(self.sel_files) >= 1: try: self.Inversion() stdout.flush() except: tkinter.messagebox.showerror("Inversion error", "Error\nMake sure all fields are OK\nMake sure data file is correctly formatted", parent=self.master) return
def preview_data(self): try: for i in self.text_files.curselection(): sel = str(self.open_files[i]) fn = sel.split("/")[-1].split(".")[0] fig_data = sel.plot_data(self.head.get(), self.units.get()) self.plot_window(fig_data, "Data preview: "+fn) except: tkinter.messagebox.showwarning("Preview error", "Can't draw data\nImport and select at least one data file first", parent=self.master)
def plugin_add(self, url): root = relative(self.root) try: # Decide on what to call the plugin and its path plugin = p.split(url)[1] plugin_path = root(self.config["modules"], plugin) # Download the plugin via git print("Installing Ampersand plugin '%s'" % plugin) try: clone = subprocess.check_call(["git", "clone", url, plugin_path]) try: plugins_dict = self.config["plugins"] except KeyError: self.config["plugins"] = {} plugins_dict = self.config["plugins"] # Update the _ampersand.json file by adding the plugin plugins_dict[p.basename(plugin)] = p.join( self.config["modules"], plugin ) with open(p.join(self.root, "_ampersand.json"), "w", encoding="utf-8") as updated: updated.write(json.dumps(self.config, indent=4, ensure_ascii=False)) except (subprocess.CalledProcessError, KeyboardInterrupt) as e: print(str(e)) sys.exit() except KeyError as e: print("Missing entry in your configuration file: %s" % str(e))
def copy_content(src, dest): if os.path.exists(src): try: if os.path.exists(dest): shutil.rmtree(dest) shutil.copytree(src, dest) except OSError as exc: try: if exc.errno == errno.ENOTDIR: shutil.copy(src, dest) else: raise except OSError as e: print("Couldn't copy: %s" % str(e)) else: print("Source path doesn't exist: %s" % src)
def get_json(path): # Load a JSON file into a dictionary try: return json.loads(read_file(path)) except ValueError as e: print("Failed to get JSON from '%s': %s" % (path, str(e))) sys.exit()
def string_to_enum(self, s): """ Conver string to a Origins Enum object :param str s: The string representing the name of the origin """ if s.lower() == "quickmeme": return self.QUICKMEME elif s.lower() == "memegenerator": return self.MEMEGENERATOR elif s.lower() == "redditmemes": return self.REDDITMEMES else: return self.NA