Python pandas 模块,rolling_mean() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用pandas.rolling_mean()。
def _save(stock, conn):
try:
print "save ----- :", stock
marketday = stocksinfo.at[stock, 'timeToMarket']
startday = pd.Timestamp(str(marketday))
# print marketday,startday,str(startday)[:10]
# df = ts.get_h_data(code, start=str(startday)[:10], retry_count = 5)
df = ts.get_h_data(stock, start=str(startday)[:10], retry_count=5, pause=1)
df = df.sort_index(ascending=True)
# ma_list = [5,10,20,60]
# for ma in ma_list:
# df['MA_' + str(ma)] = pd.rolling_mean(df.close, ma)
# print df[['open','high','close','low','volume']].head(2)
df[['open', 'high', 'close', 'low', 'volume']].to_sql(stock, conn, if_exists='append')
except Exception, arg:
print "exceptions:", stock, arg
errorlist.append(stock)
def VCI(df, n, rng = 8):
if n > 7:
varA = pd.rolling_max(df.high, rng) - pd.rolling_min(df.low, rng)
varB = varA.shift(rng)
varC = varA.shift(rng*2)
varD = varA.shift(rng*3)
varE = varA.shift(rng*4)
avg_tr = (varA+varB+varC+varD+varE)/25.0
else:
tr = pd.concat([df.high - df.low, abs(df.close - df.close.shift(1))], join='outer', axis=1).max(1)
avg_tr = pd.rolling_mean(tr, n) * 0.16
avg_pr = (pd.rolling_mean(df.high, n) + pd.rolling_mean(df.low, n))/2.0
VO = pd.Series((df.open - avg_pr)/avg_tr, name = 'VCIO')
VH = pd.Series((df.high - avg_pr)/avg_tr, name = 'VCIH')
VL = pd.Series((df.low - avg_pr)/avg_tr, name = 'VCIL')
VC = pd.Series((df.close - avg_pr)/avg_tr, name = 'VCIC')
return pd.concat([VO, VH, VL, VC], join='outer', axis=1)
def ASCTREND(df, n, risk = 3, stop_ratio = 0.5, atr_mode = 0):
wpr = WPR(df, n)
uplevel = 67 + risk
dnlevel = 33 - risk
signal = pd.Series(0, index = df.index, name = "ASCSIG_%s" % str(n))
trend = pd.Series(index = df.index, name = "ASCTRD_%s" % str(n))
stop = pd.Series(index = df.index, name = "ASCSTOP_%s" % str(n))
ind = (wpr >= uplevel) & (wpr.shift(1) < uplevel)
signal[ind] = 1
trend[ind] = 1
ind = (wpr <= dnlevel) & (wpr.shift(1) > dnlevel)
signal[ind] = -1
trend[ind] = -1
trend = trend.fillna(method='ffill')
if atr_mode == 0:
atr = ATR(df, n + 1)
else:
atr = pd.rolling_mean(df['high'] - df['low'], n + 1)
stop[trend > 0] = df['low'] - stop_ratio * atr
stop[trend < 0] = df['high'] + stop_ratio * atr
return pd.concat([signal, trend, stop], join='outer', axis=1)
def addFeatures(dataframe, adjclose, returns, n):
"""
operates on two columns of dataframe:
- n >= 2
- given Return_* computes the return of day i respect to day i-n.
- given AdjClose_* computes its moving average on n days
"""
# print adjclose
if adjclose[9] == "^":
return_n = adjclose[10:] + "_Time_" + str(n)
dataframe[return_n] = dataframe[adjclose].pct_change(n)
# print return_n
roll_n = returns[8:] + "_RollMean_" + str(n)
dataframe[roll_n] = pd.rolling_mean(dataframe[returns], n)
else:
return_n = adjclose[9:] + "_Time_" + str(n)
dataframe[return_n] = dataframe[adjclose].pct_change(n)
# print return_n
roll_n = returns[7:] + "_RollMean_" + str(n)
dataframe[roll_n] = pd.rolling_mean(dataframe[returns], n)
def MFI(df, n):
PP = (df['High'] + df['Low'] + df['Close']) / 3
i = 0
PosMF = [0]
while i < df.index[-1]:
if PP[i + 1] > PP[i]:
PosMF.append(PP[i + 1] * df.get_value(i + 1, 'Volume'))
else:
PosMF.append(0)
i = i + 1
PosMF = pd.Series(PosMF)
TotMF = PP * df['Volume']
MFR = pd.Series(PosMF / TotMF)
MFI = pd.Series(pd.rolling_mean(MFR, n), name = 'MFI_' + str(n))
df = df.join(MFI)
return df
#On-balance Volume
def OBV(df, n):
i = 0
OBV = [0]
while i < df.index[-1]:
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') > 0:
OBV.append(df.get_value(i + 1, 'Volume'))
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') == 0:
OBV.append(0)
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') < 0:
OBV.append(-df.get_value(i + 1, 'Volume'))
i = i + 1
OBV = pd.Series(OBV)
OBV_ma = pd.Series(pd.rolling_mean(OBV, n), name = 'OBV_' + str(n))
df = df.join(OBV_ma)
return df
#Force Index
def KST(df, r1, r2, r3, r4, n1, n2, n3, n4, s):
M = df['Close'].diff(r1)
N = df['Close'].shift(r1)
ROC1 = (M/N)*100
M = df['Close'].diff(r2)
N = df['Close'].shift(r2)
ROC2 = (M/N)*100
M = df['Close'].diff(r3)
N = df['Close'].shift(r3)
ROC3 = (M/N)*100
M = df['Close'].diff(r4)
N = df['Close'].shift(r4)
ROC4 = (M/N)*100
KST = pd.Series(pd.rolling_mean(ROC1, n1) +
pd.rolling_mean(ROC2, n2) * 2 +
pd.rolling_mean(ROC3, n3) * 3 +
pd.rolling_mean(ROC4, n4) * 4, name = 'KST')
Sig = pd.Series(pd.rolling_mean(KST, s), name='Signal')
print KST, Sig
df = df.join(KST)
df = df.join(Sig)
return df
def KST(df, r1, r2, r3, r4, n1, n2, n3, n4,s):
M = df['Close'].diff(r1)
N = df['Close'].shift(r1)
ROC1 = (M / N)*100
M = df['Close'].diff(r2)
N = df['Close'].shift(r2)
ROC2 = (M / N)*100
M = df['Close'].diff(r3)
N = df['Close'].shift(r3)
ROC3 = (M / N)*100
M = df['Close'].diff(r4)
N = df['Close'].shift(r4)
ROC4 = (M / N)*100
KST = pd.Series(pd.rolling_mean(ROC1, n1) + pd.rolling_mean(ROC2, n2) * 2 +
pd.rolling_mean(ROC3, n3) * 3 + pd.rolling_mean(ROC4, n4) * 4, name = 'KST')
Sig = pd.Series(pd.rolling_mean(KST, s), name = 'Signal')
df = df.join(KST)
df = df.join(Sig)
#df = df.round(2)
return df
# Retrieve the S&P 500 data from Yahoo finance:
def update_plot(self, episode_index):
plot_right_edge = episode_index
plot_left_edge = max(0, plot_right_edge - self.plot_episode_count)
# Update point plot.
x = range(plot_left_edge, plot_right_edge)
y = self.lengths[plot_left_edge:plot_right_edge]
self.point_plot.set_xdata(x)
self.point_plot.set_ydata(y)
self.ax.set_xlim(plot_left_edge, plot_left_edge + self.plot_episode_count)
# Update rolling mean plot.
mean_kernel_size = 101
rolling_mean_data = np.concatenate((np.zeros(mean_kernel_size), self.lengths[plot_left_edge:episode_index]))
rolling_means = pd.rolling_mean(
rolling_mean_data,
window=mean_kernel_size,
min_periods=0
)[mean_kernel_size:]
self.mean_plot.set_xdata(range(plot_left_edge, plot_left_edge + len(rolling_means)))
self.mean_plot.set_ydata(rolling_means)
# Repaint the surface.
plt.draw()
plt.pause(0.0001)
def signal_updown(dataframe, window):
'''
Generate 'up' or 'down' signal as target for analysis
Parameters:
dataframe: dataframe of data whose signal is to be generated
window: signal for n number of days ahead
filename: name of csv file to save data
Returns: dataframe with signal
'''
signal = []
values = dataframe['Closing Price'].values
data_mean20 = pd.rolling_mean(dataframe['Closing Price'],window=20)
values = data_mean20
for i in range(0,len(values)-window):
if(values[i+window]>values[i]):
signal.append('up')
else:
signal.append('down')
for i in range(window):
signal.append('NaN')
dataframe['signal'] = signal
return dataframe
def MFI(df, n):
PP = (df['High'] + df['Low'] + df['Close']) / 3
i = 0
PosMF = [0]
while i < df.index[-1]:
if PP[i + 1] > PP[i]:
PosMF.append(PP[i + 1] * df.get_value(i + 1, 'Volume'))
else:
PosMF.append(0)
i = i + 1
PosMF = pd.Series(PosMF)
TotMF = PP * df['Volume']
MFR = pd.Series(PosMF / TotMF)
MFI = pd.Series(pd.rolling_mean(MFR, n), name = 'MFI_' + str(n))
df = df.join(MFI)
return df
#On-balance Volume
def OBV(df, n):
i = 0
OBV = [0]
while i < df.index[-1]:
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') > 0:
OBV.append(df.get_value(i + 1, 'Volume'))
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') == 0:
OBV.append(0)
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') < 0:
OBV.append(-df.get_value(i + 1, 'Volume'))
i = i + 1
OBV = pd.Series(OBV)
OBV_ma = pd.Series(pd.rolling_mean(OBV, n), name = 'OBV_' + str(n))
df = df.join(OBV_ma)
return df
#Force Index
def is_strong(df_close, window=12):
""" In 12 days periods, How many are ROC'security more than ROC' SET (percent %) """
# df_roc has calculated by daily_returns
df_roc = roc(df_close)
# Empty Data Frame
df_main = pd.DataFrame(index=df_close.index, columns=df_close.columns)
for symbol in df_close.columns:
if symbol == 'SET':
continue
df_compare = df_roc['SET'] < df_roc[symbol]
# In python True is 1, False is 0
# 1: meaning 12 days periods, ROC'security > ROC'SET always
# 2: meaning 12 days periods, ROC'security < ROC'SET always
df_main[symbol] = pd.rolling_mean(df_compare[window:], window)
df_main['SET']=0
print(df_main)
def BBANDS(df_price, periods=20, mul=2):
# Middle Band = 20-day simple moving average (SMA)
df_middle_band = pd.rolling_mean(df_price, window=periods)
#df_middle_band = pd.rolling(window=periods,center=False).mean()
# 20-day standard deviation of price
""" Pandas uses the unbiased estimator (N-1 in the denominator),
whereas Numpy by default does not.
To make them behave the same, pass ddof=1 to numpy.std()."""
df_std = pd.rolling_std(df_price, window=periods)
#df_std = pd.rolling(window=periods,center=False).std()
# Upper Band = 20-day SMA + (20-day standard deviation of price x 2)
df_upper_band = df_middle_band + (df_std * mul)
# Lower Band = 20-day SMA - (20-day standard deviation of price x 2)
df_lower_band = df_middle_band - (df_std * mul)
return (df_upper_band, df_middle_band, df_lower_band)
def MFI(df, n):
PP = (df['High'] + df['Low'] + df['Close']) / 3
i = 0
PosMF = [0]
while i < df.index[-1]:
if PP[i + 1] > PP[i]:
PosMF.append(PP[i + 1] * df.get_value(i + 1, 'Volume'))
else:
PosMF.append(0)
i = i + 1
PosMF = pd.Series(PosMF)
TotMF = PP * df['Volume']
MFR = pd.Series(PosMF / TotMF)
MFI = pd.Series(pd.rolling_mean(MFR, n), name = 'MFI_' + str(n))
df = df.join(MFI)
return df
#On-balance Volume
def OBV(df, n):
i = 0
OBV = [0]
while i < df.index[-1]:
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') > 0:
OBV.append(df.get_value(i + 1, 'Volume'))
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') == 0:
OBV.append(0)
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') < 0:
OBV.append(-df.get_value(i + 1, 'Volume'))
i = i + 1
OBV = pd.Series(OBV)
OBV_ma = pd.Series(pd.rolling_mean(OBV, n), name = 'OBV_' + str(n))
df = df.join(OBV_ma)
return df
#Force Index
def test_multiple_talib_with_args(self):
zipline_transforms = [ta.MA(timeperiod=10),
ta.MA(timeperiod=25)]
talib_fn = talib.abstract.MA
algo = TALIBAlgorithm(talib=zipline_transforms, identifiers=[0])
algo.run(self.source)
# Test if computed values match those computed by pandas rolling mean.
sid = 0
talib_values = np.array([x[sid] for x in
algo.talib_results[zipline_transforms[0]]])
np.testing.assert_array_equal(talib_values,
pd.rolling_mean(self.panel[0]['price'],
10).values)
talib_values = np.array([x[sid] for x in
algo.talib_results[zipline_transforms[1]]])
np.testing.assert_array_equal(talib_values,
pd.rolling_mean(self.panel[0]['price'],
25).values)
for t in zipline_transforms:
talib_result = np.array(algo.talib_results[t][-1])
talib_data = dict()
data = t.window
# TODO: Figure out if we are clobbering the tests by this
# protection against empty windows
if not data:
continue
for key in ['open', 'high', 'low', 'volume']:
if key in data:
talib_data[key] = data[key][0].values
talib_data['close'] = data['price'][0].values
expected_result = talib_fn(talib_data, **t.call_kwargs)[-1]
np.testing.assert_allclose(talib_result, expected_result)
def MA(self, param):
if param[1] == 0:
return pd.rolling_mean(param[0])
return pd.rolling_mean(param[0], param[1])
def load_timings(path, args, y):
logging.debug("Loading timings from {}".format(path))
tm = numpy.load(path)
num_steps = min(tm['step'], args.finish)
df = pandas.DataFrame({k : tm[k] for k in [y, 'time_step']})[args.start:num_steps]
one_step = df['time_step'].median() / 3600.0
logging.debug("Median time for one step is {} hours".format(one_step))
if args.hours:
df.index = (args.start + numpy.arange(0, df.index.shape[0])) * one_step
return pandas.rolling_mean(df, args.window).iloc[args.window:]
def load_timings(path, y="cost2_p_expl", start=0, finish=3000000, window=100, hours=False):
logging.debug("Loading timings from {}".format(path))
tm = numpy.load(path)
num_steps = min(tm['step'], finish)
df = pandas.DataFrame({k : tm[k] for k in [y, 'time_step']})[start:num_steps]
one_step = df['time_step'][-window:].median() / 3600.0
print "Median time for one step is {} hours".format(one_step)
if hours:
df.index = (start + numpy.arange(0, df.index.shape[0])) * one_step
return pandas.rolling_mean(df, window).iloc[window:]
def determine_TESMaxSig(self,doplot=False):
max_sig=-1000;
max_tes=-10
#print 'attention remove first samples a cause de steps..otherwise may need https://github.com/thomasbkahn/step-detect.git'
for tes in range(128):
tfromLib2=self.timelines[tes]
tfromLib=tfromLib2[self.minStep:]
if not tes in self.tes_blacklist:
mean_gliss=pandas.rolling_mean(tfromLib,50)
mean_gliss=mean_gliss[50:]
delta_mean=numpy.fabs(mean_gliss.max()-mean_gliss.min())
if delta_mean>max_sig:
max_sig=delta_mean
max_tes=tes
#print delta_mean, max_sig
print 'chosen tes=',max_tes
self.maxTES=max_tes
if doplot:
data_maxtes=(self.timelines[max_tes])[self.minStep:]
plt.figure()
plt.plot(data_maxtes)
plt.show()
d,pic_array=self.tesDataObj[max_tes].compute_summedData(doplot)
self.picArray=pic_array
return self.maxTES, self.picArray
def noisefilter(t, signal, avgpts=30, smoothfac=1600):
smooth = rolling_mean(signal[::-1], avgpts)[::-1]
fspline = InterpolatedUnivariateSpline(t[:-avgpts], smooth[:-avgpts], k=4)
fspline.set_smoothing_factor(smoothfac)
return fspline(t)
def atr(bars, window=14, exp=False):
tr = true_range(bars)
if exp:
res = rolling_weighted_mean(tr, window)
else:
res = rolling_mean(tr, window)
res = pd.Series(res)
return (res.shift(1) * (window - 1) + res) / window
# ---------------------------------------------
def rolling_mean(series, window=200, min_periods=None):
min_periods = window if min_periods is None else min_periods
try:
if min_periods == window:
return numpy_rolling_mean(series, window, True)
else:
try:
return series.rolling(window=window, min_periods=min_periods).mean()
except BaseException:
return pd.Series(series).rolling(window=window, min_periods=min_periods).mean()
except BaseException:
return pd.rolling_mean(series, window=window, min_periods=min_periods)
# ---------------------------------------------
def sma(series, window=200, min_periods=None):
return rolling_mean(series, window=window, min_periods=min_periods)
# ---------------------------------------------
def bollinger_bands(series, window=20, stds=2):
sma = rolling_mean(series, window=window)
std = rolling_std(series, window=window)
upper = sma + std * stds
lower = sma - std * stds
return pd.DataFrame(index=series.index, data={
'upper': upper,
'mid': sma,
'lower': lower
})
# ---------------------------------------------
def keltner_channel(bars, window=14, atrs=2):
typical_mean = rolling_mean(typical_price(bars), window)
atrval = atr(bars, window) * atrs
upper = typical_mean + atrval
lower = typical_mean - atrval
return pd.DataFrame(index=bars.index, data={
'upper': upper.values,
'mid': typical_mean.values,
'lower': lower.values
})
# ---------------------------------------------
def MA(df, n, field = 'close'):
return pd.Series(pd.rolling_mean(df[field], n), name = 'MA_' + field.upper() + '_' + str(n), index = df.index)
def SMAVAR(df, n, field = 'close'):
ma_ts = MA(df, n, field)
var_ts = pd.Series(pd.rolling_mean(df[field]**2, n) - ma_ts**2, name = 'SVAR_' + field.upper() + '_' + str(n))
return pd.concat([ma_ts, var_ts], join='outer', axis=1)
def BBANDS(df, n, k = 2):
MA = pd.Series(pd.rolling_mean(df['close'], n))
MSD = pd.Series(pd.rolling_std(df['close'], n))
b1 = 2 * k * MSD / MA
B1 = pd.Series(b1, name = 'BollingerB' + str(n))
b2 = (df['close'] - MA + k * MSD) / (2 * k * MSD)
B2 = pd.Series(b2, name = 'Bollingerb' + str(n))
return pd.concat([B1,B2], join='outer', axis=1)
#Pivot Points, Supports and Resistances
def OBV(df, n):
PosVol = pd.Series(df['volume'])
NegVol = pd.Series(-df['volume'])
PosVol[df['close'] <= df['close'].shift(1)] = 0
NegVol[df['close'] >= df['close'].shift(1)] = 0
OBV = pd.Series(pd.rolling_mean(PosVol + NegVol, n), name = 'OBV' + str(n))
return OBV
#Force Index
def EOM(df, n):
EoM = (df['high'].diff(1) + df['low'].diff(1)) * (df['high'] - df['low']) / (2 * df['volume'])
Eom_ma = pd.Series(pd.rolling_mean(EoM, n), name = 'EoM' + str(n))
return Eom_ma
#Commodity Channel Index
def CCI(df, n):
PP = (df['high'] + df['low'] + df['close']) / 3
CCI = pd.Series((PP - pd.rolling_mean(PP, n)) / pd.rolling_std(PP, n) / 0.015, name = 'CCI' + str(n))
return CCI
def KELCH(df, n):
KelChM = pd.Series(pd.rolling_mean((df['high'] + df['low'] + df['close']) / 3, n), name = 'KelChM' + str(n))
KelChU = pd.Series(pd.rolling_mean((4 * df['high'] - 2 * df['low'] + df['close']) / 3, n), name = 'KelChU' + str(n))
KelChD = pd.Series(pd.rolling_mean((-2 * df['high'] + 4 * df['low'] + df['close']) / 3, n), name = 'KelChD' + str(n))
return pd.concat([KelChM, KelChU, KelChD], join='outer', axis=1)
#Ultimate Oscillator
def HEIKEN_ASHI(df, period1):
SM_O = pd.rolling_mean(df['open'], period1)
SM_H = pd.rolling_mean(df['high'], period1)
SM_L = pd.rolling_mean(df['low'], period1)
SM_C = pd.rolling_mean(df['close'], period1)
HA_C = pd.Series((SM_O + SM_H + SM_L + SM_C)/4.0, name = 'HAclose')
HA_O = pd.Series(SM_O, name = 'HAopen')
HA_H = pd.Series(SM_H, name = 'HAhigh')
HA_L = pd.Series(SM_L, name = 'HAlow')
for idx, dateidx in enumerate(HA_C.index):
if idx >= (period1):
HA_O[idx] = (HA_O[idx-1] + HA_C[idx-1])/2.0
HA_H[idx] = max(SM_H[idx], HA_O[idx], HA_C[idx])
HA_L[idx] = min(SM_L[idx], HA_O[idx], HA_C[idx])
return pd.concat([HA_O, HA_H, HA_L, HA_C], join='outer', axis=1)
def SPBFILTER(df, n1 = 40, n2 = 60, n3 = 0, field = 'close'):
if n3 == 0:
n3 = int((n1 + n2)/2)
a1 = 5.0/n1
a2 = 5.0/n2
B = [a1-a2, a2-a1]
A = [1, (1-a1)+(1-a2), -(1-a1)*(1-a2)]
PB = pd.Series(signal.lfilter(B, A, df[field]), name = 'SPB_%s_%s' % (n1, n2))
RMS = pd.Series(pd.rolling_mean(PB*PB, n3)**0.5, name = 'SPBRMS__%s_%s' % (n1, n2))
return pd.concat([PB, RMS], join='outer', axis=1)
def rolling_mean(self, df, window, shift=1):
"""
:param df: ??????? dataFrame
:param window: ????
:param shitf: ????1
:return: ??????7?? dataFrame
"""
rolling_df = pd.rolling_mean(df, window=window).shift(shift)
return rolling_df[60:]
def run(self):
for dir_path in listdir(self.src_dir):
src_dir = join(self.src_dir, dir_path)
target_dir = join(self.target_dir, dir_path)
makedirs(target_dir)
for file in listdir(src_dir):
df = pd.read_csv(join(src_dir, file))
if len(df) > 0:
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d')
offset = pd.Timedelta(days=60)
date_range = pd.date_range((df['date'].min() + offset).date(),
df['date'].max().date())
averin1 = self.rolling_mean(df['aver'], 1)
averin7 = self.rolling_mean(df['aver'], 7)
averin15 = self.rolling_mean(df['aver'], 15)
averin30 = self.rolling_mean(df['aver'], 30)
averin90 = self.rolling_mean(df['aver'], 60)
assert len(averin1) == len(averin30) == len(averin7) == len(averin15) == len(averin90) == len(
df['aver'][60:])
target_df = pd.DataFrame({
'province': df['province'][0],
'market': df['market'][0],
'type': df['type'][0],
'key': df['key'][0],
'date': date_range,
'aver': df['aver'][60:],
'averin1': averin1,
'averin7': averin7,
'averin15': averin15,
'averin30': averin30,
'averin90': averin90
})
print(join(target_dir, file))
target_df.to_csv(join(target_dir, file), header=True, index=False,
columns=['province', 'market', 'type', 'key', 'date', 'averin1',
'averin7', 'averin15', 'averin30', 'averin90', 'aver'])
def get_rolling_mean(values, window):
"""Return rolling mean of given values, using specified window size."""
return values.rolling(window=window).mean()
# return pd.rolling_mean(values, window=window)
def addFeatures(dataframe, adjclose, returns, n):
"""
operates on two columns of dataframe:
- n >= 2
- given Return_* computes the return of day i respect to day i-n.
- given AdjClose_* computes its moving average on n days
"""
return_n = adjclose[9:] + "Time_" + str(n)
dataframe[return_n] = dataframe[adjclose].pct_change(n)
roll_n = returns[7:] + "RollMean_" + str(n)
dataframe[roll_n] = pd.rolling_mean(dataframe[returns], n)
def makeMa(self):
self.df.sort_values(by=('dateline'), ascending=False)
#print self.df
#sys.exit()
ma_list = [5, 89]
for ma in ma_list:
self.df['MA_' + str(ma)] = pandas.rolling_mean(self.df['close'], ma)
#self.df = self.df[self.df['MA_89'].notnull()]
#print self.df
#sys.exit()
#?? code[13]
self.df['dif'] = self.df['MA_5'] - self.df['MA_89']
#??code[14]
self.df['difma'] = pandas.rolling_mean(self.df['dif'], 36)
def MA(df, n):
MA = pd.Series(pd.rolling_mean(df['Close'], n), name = 'MA_' + str(n))
df = df.join(MA)
return df
#Exponential Moving Average
def BBANDS(df, n):
MA = pd.Series(pd.rolling_mean(df['Close'], n))
MSD = pd.Series(pd.rolling_std(df['Close'], n))
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name = 'BollingerB_' + str(n))
df = df.join(B1)
b2 = (df['Close'] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name = 'Bollinger%b_' + str(n))
df = df.join(B2)
return df
#Pivot Points, Supports and Resistances
def EOM(df, n):
EoM = (df['High'].diff(1) + df['Low'].diff(1)) * (df['High'] - df['Low']) / (2 * df['Volume'])
Eom_ma = pd.Series(pd.rolling_mean(EoM, n), name = 'EoM_' + str(n))
df = df.join(Eom_ma)
return df
#Commodity Channel Index
def KELCH(df, n):
KelChM = pd.Series(pd.rolling_mean((df['High'] + df['Low'] + df['Close']) / 3, n), name = 'KelChM_' + str(n))
KelChU = pd.Series(pd.rolling_mean((4 * df['High'] - 2 * df['Low'] + df['Close']) / 3, n), name = 'KelChU_' + str(n))
KelChD = pd.Series(pd.rolling_mean((-2 * df['High'] + 4 * df['Low'] + df['Close']) / 3, n), name = 'KelChD_' + str(n))
df = df.join(KelChM)
df = df.join(KelChU)
df = df.join(KelChD)
return df
#Ultimate Oscillator
def calculateBollingerBandsValue(df):
simpleMovingAverage = pandas.rolling_mean(df,window=5)
stdDeviation = pandas.rolling_std(df,window=5)
bollingerBandsValue = (df - simpleMovingAverage)/(2*stdDeviation)
bollingerBandsValue = bollingerBandsValue.dropna()
return bollingerBandsValue
def calculateSimpleMovingAverage(df):
simpleMovingAverage = pandas.rolling_mean(df,window=5)
simpleMovingAverage = normalize(simpleMovingAverage)
simpleMovingAverage = simpleMovingAverage.dropna()
return simpleMovingAverage
def plot(df):
rollingMean = pandas.rolling_mean(df['Close Price'], window=100)
rollingStdv = pandas.rolling_std(df['Close Price'], window=100)
plotter.plot(rollingMean)
# plotting bollinger bands
plotter.plot(rollingMean + rollingStdv * 2)
plotter.plot(rollingMean - rollingStdv * 2)
# df1 = df[['Date','Close']]
plotter.plot(df)
plotter.show()
def get_rolling_mean(values, window,min_periods=None):
return pd.rolling_mean(values, window=window, min_periods=min_periods)
def apply_rolling_window(df, width):
df = df.ix[1:, :]
df_num = df.select_dtypes(include=[np.float, np.int])
df_non_num = df.select_dtypes(exclude=[np.float, np.int])
df_num_window = pd.rolling_mean(df_num, width, min_periods=1)
df_window = pd.concat([df_non_num, df_num_window], axis=1)
return df_window