我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用util.get_data()。
def compute_portvals(orders_file = "./orders/orders2.csv", \ start_val = 1000000, leverLimit = True): # this is the function the autograder will call to test your code order = pd.read_csv(orders_file, index_col = 'Date', parse_dates = True, na_values=['nan']) """Create dataframe prices with symbol/dates/prices relevant to the order""" start_date = order.index[0] end_date = order.index[-1] dates = pd.date_range(start_date, end_date) symbols = list( order.ix[:,0].unique() ) #ndarray to list of symbols in order prices = get_data(symbols, dates) """Create dataframe of cash and deposits in stocks, indexed by date""" cashStocks = get_cash_stocks(order, prices, symbols, start_val, leverLimit) posVals = get_position_values(cashStocks, prices, symbols) portVals = posVals.sum(axis = 1) return portVals
def main(_): """Does all the things that we need to do: - gets data - sets up the graph for inference and sampling - gets training ops etc. - initialise or reload the variables. - train until it's time to go. """ _start_msg('getting data') data = util.get_data(FLAGS.batch_size, FLAGS.sequence_length, FLAGS.dataset, FLAGS.embedding_size) _end_msg('got data') _start_msg('getting forward model') rnn_model = get_forward(data) _end_msg('got forward model') _start_msg('getting train ops') # TODO(pfcm): be more flexible with this global_step = tf.Variable(0, name='global_step', trainable=False) saver = tf.train.Saver(tf.all_variables(), max_to_keep=1) loss_op, train_op = minimise_xent( rnn_model['inference']['logits'], data['placeholders']['targets'], global_step=global_step) _end_msg('got train ops') do_training(data, rnn_model, loss_op, train_op, saver=saver)
def compute_prices(sd = dt.datetime(2008,1,1),ed = dt.datetime(2009,12,31),lookback = 21,syms = ['AAPL'], gen_plot = False): #Read in adjusted closing prices for given symbols, date range dates = pd.date_range(sd,ed) prices_all = get_data(syms,dates) #automatically adds SPY prices = prices_all[syms] #only portfolio symbols prices_SPY = prices_all['SPY'] #only SPY, for comparision later return prices, prices_all, prices_SPY ###calculate the stochastic indicator
def main(): legend = [] # p0 = Profolio('All short-only') # for etf in ETFs: # data = util.get_data(etf, '2016/1/1', '2016/12/31') # p0.profits -= data.Return # label, = plt.plot(range(1, 253), p0.accProfits(), 'y--', label=p0.name) # legend.append(label) # baseline1 SPY long-only p1 = Profolio('SPY long-only') data = util.get_data('SPY', '2016/1/1', '2016/12/31') p1.profits = data.Return * 9 label, = plt.plot(range(1, 253), p1.accProfits(), 'b--', label=p1.name) legend.append(label) # baseline2 All long-only p2 = Profolio('All long-only') for etf in ETFs: data = util.get_data(etf, '2016/1/1', '2016/12/31') p2.profits += data.Return label, = plt.plot(range(1, 253), p2.accProfits(), 'g--', label=p2.name) legend.append(label) # My strategy my = Profolio('My strategy') my.profits = numpy.zeros(252) for etf in ETFs: my.profits += smart_trade(etf, 'SVM', 4) label, = plt.plot(range(1, 253), my.accProfits(), 'r--', label=my.name) legend.append(label) plt.legend(handles=legend) plt.show() print p1.annualSharpeRatio(), p2.annualSharpeRatio(), my.annualSharpeRatio()
def assess_portfolio(sd = dt.datetime(2008,1,1), ed = dt.datetime(2009,1,1), \ syms = ['GOOG','AAPL','GLD','XOM'], \ allocs=[0.1,0.2,0.3,0.4], \ sv=1000000, rfr=0.0, sf=252.0, \ gen_plot=False): # Read in adjusted closing prices for given symbols, date range dates = pd.date_range(sd, ed) prices_all = get_data(syms, dates) # automatically adds SPY prices = prices_all[syms] # only portfolio symbols prices_SPY = prices_all['SPY'] # only SPY, for comparison later portVal = get_portfolio_value(prices, allocs, sv) cr, adr, sddr, sr = get_portfolio_stats(portVal, rfr, sf) # Compare daily portfolio value with SPY using a normalized plot if gen_plot: # add code to plot here df_temp = pd.concat([portVal, prices_SPY], keys=['Portfolio', 'SPY'], axis=1) plot_normalized_data(df_temp,'Daily portfolio value and SPY',\ 'date', 'normalized price') # ( df_temp / df_temp.values[0,:] ).plot() # Add code here to properly compute end value ev = portVal[-1] return cr, adr, sddr, sr, ev
def defineData(startDate = '01-01-2008', stopDate = '31-12-2009', symList = ['AAPL']): """ @Summary: Create a Series of a single stock price @param startDate: starting date @param stopDate: end date @param symList: List of a single stock symbol @returns a Series containing the prices with the specified dates as indices """ dates = pd.date_range(startDate, stopDate) df = get_data(symList, dates) data = df.ix[:,1] # First column is SPY by default return data
def optimize_portfolio(sd=dt.datetime(2008,1,1), ed=dt.datetime(2009,1,1), \ syms=['GOOG','AAPL','GLD','XOM'], gen_plot=False): start_val = 1000000 daily_rf = 0 samples_per_year = 252 # Read in adjusted closing prices for given symbols, date range dates = pd.date_range(sd, ed) prices_all = get_data(syms, dates) # automatically adds SPY prices = prices_all[syms] # only portfolio symbols prices_SPY = prices_all['SPY'] # only SPY, for comparison later # find the allocations for the optimal portfolio allocGuess = np.ones(len(syms), dtype = 'float32') / len(syms) setBnds = tuple( [(0,1) for x,y in enumerate(allocGuess)] )#create tuple of (0,1) tuples # 'constraints' below constrains allocations to sum to 1 # and 'setBnds' forces each allocation to lie in (0,1) srMax = spo.minimize(cost, allocGuess, bounds = setBnds, \ constraints = ({ 'type': 'eq', 'fun': lambda inputs: 1.0 - np.sum(inputs) }), \ args = (prices,start_val,daily_rf,samples_per_year,), \ method = 'SLSQP', options = {'disp': True}) allocs = srMax.x portVal = get_portfolio_value(prices, allocs, start_val) cr, adr, sddr, sr = get_portfolio_stats(portVal, daily_rf, samples_per_year) # Compare daily portfolio value with SPY using a normalized plot if gen_plot: df_temp = pd.concat([portVal, prices_SPY], keys=['Portfolio', 'SPY'], axis=1) plot_normalized_data(df_temp, 'Optimized portfolio values', 'date', \ 'normalized price') return allocs, cr, adr, sddr, sr
def __init__(self, symbols, start_date=dt.datetime(2008,1,1), #CHANGE DATES TO ACTUAL RANGE!!! end_date= dt.datetime(2009,1,1)): # frame a time period as world self.dates_range = pd.date_range(start_date, end_date) # initialize cash holdings init_cash = 100000 #for visualization self.data_out = [] # preprocessing time series # stock symbol data stock_symbols = symbols[:] symbols.append('interest_rates') symbols.append('vix') # price data prices_all = util.get_data(symbols, self.dates_range, True) self.stock_A = stock_symbols[0] self.stock_B = stock_symbols[1] """ #unemployment rate temp_unemp = {} unemployment = {} with open('unemployment.csv') as unemp_file: for line in csv.reader(unemp_file, delimiter=','): curr_date = dt.strptime(line[0], '%B-%y') temp_unemp[curr_date] = line[1] for d in prices_all.keys(): temp_date = dt.datetime(d.year, d.month) if temp_date in temp_unemp: unemployment[d] = temp_unemp[temp_date] """ # first trading day self.dateIdx = 0 self.date = prices_all.index[0] self.start_date = start_date self.end_date = end_date self.prices = prices_all[stock_symbols] self.prices_SPY = prices_all['spy'] self.prices_VIX = prices_all['vix'] self.prices_interest_rate = prices_all['interest_rates'] # keep track of portfolio value as a series self.portfolio = {'cash': init_cash, 'a_vol': [], 'a_price': [], 'b_vol': [], 'b_price': [], 'longA': 0} self.port_val = self.port_value_for_output() # hardcode enumerating of features """ self.sma = SMA(self.dates_range) self.bbp = BBP(self.dates_range) self.rsi = RSI(self.dates_range) """
def sample(model, data, sess, seed=None): """Draws a sample from the model Args: model (dict): return value of one of the get_model functions. data (dict): return from the get_data function. sess (tf.Session): a session in which to run the sampling ops. seed (Optional): either a sequence to feed into the first few batches or None, in which case just the GO symbol is fed in. Returns: str: the sample. """ if 'inverse_vocab' not in data: data['inverse_vocab'] = {b: a for a, b in data['vocab'].items()} if seed is not None: # run it a bit to get a starting state state, inputs = _init_nextstep_state(model, data, seed, sess) else: # otherwise start from zero state = sess.run(model['sampling']['initial_state']) inputs = np.array( [[data['go_symbol']] * FLAGS.batch_size] * FLAGS.sequence_length) seq = [] # now just roll through while len(seq) < FLAGS.sample_length: results = sess.run( model['sampling']['sequence'] + model['sampling']['final_state'], _fill_feed(data, inputs, state_var=model['sampling']['initial_state'], state_val=state)) seq.extend(results[:FLAGS.sequence_length-2]) state = results[FLAGS.sequence_length-1:] inputs = np.array( [seq[-1]] * FLAGS.sequence_length) batch_index = random.randint(0, FLAGS.batch_size) samp = ''.join([str(data['inverse_vocab'][symbol[batch_index]]) for symbol in seq]) return samp
def compute_portvals2(orders_file="train.csv", start_val=100000): # this is the function the autograder will call to test your code # TODO: Your code here of= open(orders_file, 'w+') of.write('Date,Symbol,Order,Shares\n') if orders_file == "train.csv": of.write("2006-01-03,IBM,BUY,500\n") of.write("2009-12-31,IBM,SELL,500\n") else: of.write("2010-01-04,IBM,BUY,500\n") of.write("2010-12-31,IBM,SELL,500\n") of.close() orders_df = pd.read_csv(orders_file, index_col='Date', parse_dates=True, na_values=['nan']) os.remove(orders_file) # In the template, instead of computing the value of the portfolio, we just # read in the value of IBM over 6 months start_date = orders_df.index[0] end_date = orders_df.index[-1] orders_df.index = orders_df.index - start_date bad_index = datetime(2011, 6, 15) - start_date cos = orders_df['Symbol'].unique() portvals = get_data(cos, pd.date_range(start_date, end_date), addSPY=False) portMatrix = portvals.as_matrix() rows = np.isfinite(portMatrix[:, 0]) Allocs = np.zeros((orders_df.index[-1].days + 1, len(cos))) Cash = np.zeros(orders_df.index[-1].days + 1) Cash.fill(100000) leverage = 0; # (sum(abs(all stock positions))) / (sum(all stock positions) + cash) stockVal = 0; for order in orders_df.iterrows(): day = order[0].days sym = np.where(cos == order[1][0])[0][0] amt = order[1][2] if day == bad_index.days: continue if order[1][1] == 'BUY': Allocs[day][sym] += amt Cash[day:] -= amt * portMatrix[day][sym] else: Allocs[day][sym] -= amt Cash[day:] += amt * portMatrix[day][sym] Allocs = np.cumsum(Allocs, axis=0) norm_vals = np.sum(np.multiply(Allocs, portMatrix), axis=1); norm_vals = np.add(norm_vals, Cash) norm_vals = pd.DataFrame(data=norm_vals[rows], index=portvals.index[rows]) return norm_vals
def get_indicators (symbols, start_date, end_date, lookback=14): # Construct an appropriate DatetimeIndex object. dates = pd.date_range(start_date, end_date) # Read all the relevant price data (plus SPY) into a DataFrame. price = util.get_data(symbols, dates) # Add SPY to the symbol list for convenience. symbols.append('SPY') #Simple Moving Average sma = price.rolling(window=lookback,min_periods=lookback).mean() smaRatio = sma.copy() smaRatio = price / sma # Exponentially Moving Average ewma = price.ewm(ignore_na=False, span=lookback, min_periods=0, adjust=True).mean() ewmaRatio = ewma.copy() ewmaRatio = price / ewma #Momentum momentum = price.copy() momentum /= momentum.shift(lookback) momentum -= 1 momentum = momentum.fillna(0) #Bollinger Bands rolling_std = price.rolling(window=lookback,min_periods=lookback).std() top_band = sma + (2 * rolling_std) bottom_band = sma - (2 * rolling_std) bbp = (price - bottom_band) / (top_band - bottom_band) data = pd.concat([price, sma, smaRatio, bbp, ewma, ewmaRatio, momentum], keys=['Price', 'SMA', 'SMA Ratio', 'BBP', 'EMA', 'EMA Ratio', 'Momentum'], axis=1) return data
def print_port(of, sv=1000000, output=False, lvrg=False, symbol='SPY'): ''' :param of: .csv order file :param sv: starting value of the portfolio :param output: whether to output info :param lvrg: whether to consider leverage of the portfolio :param symbol: stock symbol to consider :return: void ''' portvals, start_date, end_date = compute_portvals(orders_file=of, start_val=sv, lvrg=lvrg) if isinstance(portvals, pd.DataFrame): portvals = portvals[portvals.columns[0]] # just get the first column else: "warning, code did not return a DataFrame" cum_ret, avg_daily_ret, std_daily_ret, sharpe_ratio = compute_portfolio_stats(portvals) # SPX data originally has a'$" in front which creates problem as an improper name symb_vals = get_data([symbol], pd.date_range(start_date, end_date)) cum_ret_SPY, avg_daily_ret_SPY, std_daily_ret_SPY, sharpe_ratio_SPY = compute_portfolio_stats(symb_vals.SPY_test) if output: # Compare portfolio against $SPX print "Date Range: {} to {}".format(start_date, end_date) print print "Sharpe Ratio of Fund: {}".format(sharpe_ratio) print "Sharpe Ratio of SPY : {}".format(sharpe_ratio_SPY) print print "Cumulative Return of Fund: {}".format(cum_ret) print "Cumulative Return of SPY : {}".format(cum_ret_SPY) print print "Standard Deviation of Fund: {}".format(std_daily_ret) print "Standard Deviation of SPY : {}".format(std_daily_ret_SPY) print print "Average Daily Return of Fund: {}".format(avg_daily_ret) print "Average Daily Return of SPY : {}".format(avg_daily_ret_SPY) print print "Final Portfolio Value: {}".format(portvals[-1]) norm_port = portvals / portvals[0] norm_SPY = symb_vals.SPY_test / symb_vals.SPY_test[0] plt.plot(norm_port) plt.plot(norm_SPY) plt.title('Daily portfolio value and SPY') plt.ylabel('Normalized price') plt.legend(['Portfolio', 'SPY'], loc=2) plt.show()
def query_model(sym=['AAPL'], sd=dt.datetime(2017,5,1), ed=dt.datetime(2017,7,1), holdings=None, inds_list=['bbp','ATR'], div_file='Dividers.csv', QT_file='Q_Table.csv'): data = get_data(sym, dates=pd.date_range(sd - dt.timedelta(days=35), ed)) data = add_bband(data) data = add_ATR(data) # make a divider dictionary from saved csv file try: divider = pd.read_csv(div_file, index_col=0) except IOError as e: print(e) div_dict = {}; for ind in inds_list: div_dict[ind] = divider[ind].tolist() # create a StrategyLearner just to get states sLeaner = sl.StrategyLearner() sLeaner.div_dict = div_dict indicators = data[inds_list].dropna(how='all') states = sLeaner._get_state(indicators) # slicing out only required date range # able to deal with non-trading sd pass_day = 0 while sd not in indicators.index.tolist(): sd = sd + dt.timedelta(days=1) pass_day += 1 if sd > indicators.index[-1]: print('something wrong with the start date') break start_index = indicators.index.get_loc(sd) states = states[start_index:] if holdings is None: states = states + 100 # in this two indicator case, assume no holdings else: try: new_holdings = holdings[pass_day:] for i, hold in zip(range(len(states)), new_holdings): states[i] = states[i] + hold except: print('may have different length of holding information in this case') try: Q_table = pd.read_csv(QT_file, index_col=0) except IOError as e: print(e) Q_table = np.matrix(Q_table) qLearner = ql.QLearner(rar=0) # no random choice qLearner.Q_table = Q_table look_up = {0: 'SELL', 1: 'NOTHING', 2: 'BUY'} suggestions = [] for state in states: suggestions.append(look_up[qLearner.querysetstate(state)]) effect_dates = indicators.index[start_index:] guide_df = pd.DataFrame(suggestions, index=effect_dates, columns=['{}'.format(sym[0])]) return guide_df
def test_code(): # this is a helper function you can use to test your code # note that during autograding his function will not be called. # Define input parameters verbose = False # # initializating by reading data from files data_start = '2007-12-01' data_end = '2011-12-31' data = get_data(['IBM'], pd.date_range(data_start, data_end)) data, data['SMA'], data['StDev'] = add_bband(data,N=20) data = add_mmt(data) data = add_MACD(data) # generate the daily return data['DRet'] = data.iloc[:,1].slice_shift(-1)/data.iloc[:,1] - 1 # for testing of bollinger band only plt.plot(data.iloc[:,1][20:300]) plt.plot(data.SMA[20:300]+2*data.StDev[20:300]) plt.plot(data.SMA[20:300]-2*data.StDev[20:300]) plt.plot(data.SMA[20:300]) plt.show() # mmt_div = bin_divider(data.mmt) # bbp_div = bin_divider(data.bbp) # MACD_div = bin_divider(data.MACD) # # sd = dt.datetime(2008, 1, 1); ed = dt.datetime(2009, 1, 1) # temp = data.index[data.index > sd] # print temp[0] if verbose: print 'mmt_div:', mmt_div print 'bbp_div:', bbp_div print 'MACD_div:', MACD_div test = [data.mmt[500],data.bbp[500],data.MACD[500]] print test s = get_state(test,[mmt_div,bbp_div,MACD_div],1) print s # # start and end date the learner concerns # start_date = '2007-12-31' # end_date = '2009-12-31' # # generate training data # trainX = data[['bband','mmt','MACD']][start_date:end_date].values # trainY = data.Y[start_date:end_date].values # # plt.plot(data.iloc[:,1]['2007-12-31':'2009-12-31'],'r') # # adjust prediction Y to scale up with price # enlarge = (data.iloc[:,1].max()-data.iloc[:,1].min())/(trainY.max()-trainY.min()) # plt.plot(data.Y[start_date:end_date]*enlarge+(data.iloc[:,1].min()+data.iloc[:,1].max())/2,'g') # plt.show() # # # orders = generate_order(ana_data)
def main(): stock_name = 'SPY' delta = 4 start = datetime.datetime(2010,1,1) end = datetime.datetime(2015,12,31) start_test = datetime.datetime(2015,1,1) dataset = util.get_data(stock_name, start, end) delta = range(1, delta) dataset = util.applyFeatures(dataset, delta) dataset = util.preprocessData(dataset) X_train, y_train, X_test, y_test = \ classifier.prepareDataForClassification(dataset, start_test) X_train = numpy.reshape(numpy.array(X_train), (X_train.shape[0], 1, X_train.shape[1])) X_test = numpy.reshape(numpy.array(X_test), (X_test.shape[0], 1, X_test.shape[1])) #Step 2 Build Model model = Sequential() model.add(LSTM( 128, input_shape=(None, X_train.shape[2]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM( 240, return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense( units=1)) model.add(Activation('sigmoid')) start = time.time() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) #Step 3 Train the model model.fit( X_train, y_train, batch_size=4, epochs=4, validation_split=0.1) print model.predict(X_train) print model.evaluate(X_train, y_train)
def smart_trade(etf, method, delta): parameters = [8, 0.0125] data = util.get_data(etf, '2014/1/1', '2016/12/31') # keep a copy for unscaled data for later gain calculation # TODO replace by MinMax_Scaler.inverse_transform() # # the first day of test is 2015/12/31. Using this data on this day to predict # Up/Down of 2016/01/04 test = data[data.index > datetime.datetime(2015,12,30)] le = preprocessing.LabelEncoder() test['UpDown'] = (test['Close'] - test['Open']) / test['Open'] threshold = 0.000 test.UpDown[test.UpDown >= threshold] = 'Up' test.UpDown[test.UpDown < threshold] = 'Down' test.UpDown = le.fit(test.UpDown).transform(test.UpDown) test.UpDown = test.UpDown.shift(-1) # shift 1, so the y is actually next day's up/down dataMod = util.applyFeatures(data, range(1, delta)) dataMod = util.preprocessData(dataMod) tr = dataMod[dataMod.index <= datetime.datetime(2015,12,30)] #tr = dataMod[dataMod.index <= datetime.datetime(2016,06,30)] te = dataMod[dataMod.index > datetime.datetime(2015,12,30)] te = te[te.columns[0:-1]] # remove Up/Down label from testing clf = classifier.buildModel(tr, method, parameters) if method == 'RNN': te = numpy.reshape(numpy.array(te), (te.shape[0], 1, te.shape[1])) pred = clf.predict(te) profits = numpy.zeros(pred.size) for i in range(pred.size): if pred[i] < 0.5: # predict long p = (test.Close[i+1] - test.Open[i+1]) / test.Open[i+1] else: # predict short p = -(test.Close[i+1] - test.Open[i+1]) / test.Open[i+1] profits[i] = p return profits
def testcode_marketsim(symbol = 'ML_based', base_dir = './orders/', \ sv = 100000, leverLimit = True, verbose = True): ### Use one of the order folders below ### # of = "./orders/benchmark.csv" # of = "./orders/bestPossibleStrategy.csv" # of = "./orders/rule_based.csv" # of = "./orders/ML_based.csv" of = symbol_to_path(symbol, base_dir) # sv = 100000 # starting value of portfolio, i.e. initial cash available # Process orders portVals = compute_portvals(of, sv, leverLimit) if isinstance(portVals, pd.DataFrame): portVals = portVals[portVals.columns[0]] # just get the first column as a Series else: "warning, code did not return a DataFrame" start_date = portVals.index[0] end_date = portVals.index[-1] pricesSPX = get_data(['$SPX'], pd.date_range(start_date, end_date)) pricesSPX = pricesSPX['$SPX'] cum_ret, avg_daily_ret, std_daily_ret, sharpe_ratio = get_portfolio_stats(portVals, \ daily_rf = 0, samples_per_year = 252) cum_ret_SPY, avg_daily_ret_SPY, std_daily_ret_SPY, sharpe_ratio_SPY = \ get_portfolio_stats(pricesSPX, daily_rf = 0, samples_per_year = 252) # Compare portfolio against $SPX if verbose == True: dfTemp = pd.concat([portVals, pricesSPX], axis = 1, keys = ['portfolio', '$SPX']) plot_normalized_data(dfTemp,'', '', '') print "\nDate Range: {} to {}".format(start_date.date(), end_date.date()) print print "Sharpe Ratio of Fund: {}".format(sharpe_ratio) print "Sharpe Ratio of SPY : {}".format(sharpe_ratio_SPY) print print "Cumulative Return of Fund: {}".format(cum_ret) print "Cumulative Return of SPY : {}".format(cum_ret_SPY) print print "Standard Deviation of Fund: {}".format(std_daily_ret) print "Standard Deviation of SPY : {}".format(std_daily_ret_SPY) print print "Average Daily Return of Fund: {}".format(avg_daily_ret) print "Average Daily Return of SPY : {}".format(avg_daily_ret_SPY) print print "Final Portfolio Value: {}".format(portVals[-1]) return cum_ret, portVals
def test_code(): ### Use one of the order folders below ### ### of = "./orders/orders-leverage-3.csv" # verify from wiki of = "./orders_mc2p1_spr2016/orders-12-modified.csv" #verify from saved pdf sv = 1000000 # starting value of portfolio, i.e. initial cash available # Process orders portVals = compute_portvals(orders_file = of, start_val = sv) if isinstance(portVals, pd.DataFrame): portVals = portVals[portVals.columns[0]] # just get the first column as a Series else: "warning, code did not return a DataFrame" start_date = portVals.index[0] end_date = portVals.index[-1] pricesSPX = get_data(['$SPX'], pd.date_range(start_date, end_date)) pricesSPX = pricesSPX['$SPX'] dfTemp = pd.concat([portVals, pricesSPX], axis = 1, keys = ['portfolio', '$SPX']) plot_normalized_data(dfTemp,'', '', '') cum_ret, avg_daily_ret, std_daily_ret, sharpe_ratio = get_portfolio_stats(portVals, \ daily_rf = 0, samples_per_year = 252) cum_ret_SPY, avg_daily_ret_SPY, std_daily_ret_SPY, sharpe_ratio_SPY = \ get_portfolio_stats(pricesSPX, daily_rf = 0, samples_per_year = 252) # Compare portfolio against $SPX print "\nDate Range: {} to {}".format(start_date.date(), end_date.date()) print print "Sharpe Ratio of Fund: {}".format(sharpe_ratio) print "Sharpe Ratio of SPY : {}".format(sharpe_ratio_SPY) print print "Cumulative Return of Fund: {}".format(cum_ret) print "Cumulative Return of SPY : {}".format(cum_ret_SPY) print print "Standard Deviation of Fund: {}".format(std_daily_ret) print "Standard Deviation of SPY : {}".format(std_daily_ret_SPY) print print "Average Daily Return of Fund: {}".format(avg_daily_ret) print "Average Daily Return of SPY : {}".format(avg_daily_ret_SPY) print print "Final Portfolio Value: {}".format(portVals[-1])