Python numpy 模块,ediff1d() 实例源码
我们从Python开源项目中,提取了以下20个代码示例,用于说明如何使用numpy.ediff1d()。
def eta(radii, phot):
"""
eta = I(r) / \bar{I}(<r)
radii -- 1d array of aperture photometry radii
phot -- 1d array of aperture photometry fluxes
this is currently calculated quite naively, and probably could be done better
"""
phot_area = np.pi * radii**2
phot_area_diff = np.ediff1d(phot_area, to_begin=phot_area[0])
I_bar = phot / (phot_area)
I_delta_r = np.ediff1d(phot, to_begin=phot[0]) / phot_area_diff
I_r = (I_delta_r[:-1] + I_delta_r[1:]) / 2 #lost last array element here
I_r = np.append(I_r, I_delta_r[-1]) #added it back in here
eta = I_r / I_bar
return eta
def SpaceFunc(val_x_array,val_y_array):
spa_X_array = np.ediff1d(val_x_array)
spa_Y_array = np.ediff1d(val_y_array)
return spa_X_array,spa_Y_array
#Fucntion to convert matrix to binary (those with value to 1, those with 0 to 0)
def SpaceFunc(matr):
matr_shape = matr.shape
spa_X_array = np.array([])
spa_Y_array = np.array([])
val_X_matrix = np.zeros((matr_shape[0], matr_shape[1]), dtype=np.ndarray)
val_Y_matrix = np.zeros((matr_shape[0], matr_shape[1]), dtype=np.ndarray)
val_X_matrix_counter = np.zeros((matr_shape[0], matr_shape[1]), dtype=np.ndarray)
val_Y_matrix_counter=np.zeros((matr_shape[0], matr_shape[1]), dtype=np.ndarray)
counter_g1 = 0
while counter_g1 < matr_shape[1]:
counter_g2 = 0
while counter_g2 < matr_shape[0]:
matr_value = matr[counter_g2, counter_g1]
matr_value=np.asarray(matr_value)
if matr_value.size==3:
val_X_matrix[counter_g2, counter_g1] = matr_value[0]
val_Y_matrix[counter_g2, counter_g1] = matr_value[1]
val_X_matrix_counter[counter_g2, counter_g1] = 1
val_Y_matrix_counter[counter_g2, counter_g1] = 1
elif matr_value.size == 0:
val_X_matrix[counter_g2, counter_g1] = 0
val_Y_matrix[counter_g2, counter_g1] = 0
val_X_matrix_counter[counter_g2, counter_g1] = 0
val_Y_matrix_counter[counter_g2, counter_g1] = 0
counter_g2 = counter_g2 + 1
counter_g1 = counter_g1 + 1
val_X_array_counter = val_X_matrix_counter.sum(axis=0)
val_Y_array_counter = val_Y_matrix_counter.sum(axis=1)
val_X_array_acc = val_X_matrix.sum(axis=0)
val_Y_array_acc=val_Y_matrix.sum(axis=1)
val_X_array = val_X_array_acc/val_X_array_counter
val_Y_array = val_Y_array_acc / val_Y_array_counter
spa_X_array=np.ediff1d(val_X_array)
spa_Y_array=np.ediff1d(val_Y_array)
#Creating function to convert matrix to binary (those with value to 1, those with 0 to 0)
def SpaceFunc(matr):
matr_shape = matr.shape
spa_X_array = np.array([])
spa_Y_array = np.array([])
val_X_matrix = np.zeros((matr_shape[0], matr_shape[1]), dtype=np.ndarray)
val_Y_matrix = np.zeros((matr_shape[0], matr_shape[1]), dtype=np.ndarray)
val_X_matrix_counter = np.zeros((matr_shape[0], matr_shape[1]), dtype=np.ndarray)
val_Y_matrix_counter=np.zeros((matr_shape[0], matr_shape[1]), dtype=np.ndarray)
counter_g1 = 0
while counter_g1 < matr_shape[1]:
counter_g2 = 0
while counter_g2 < matr_shape[0]:
matr_value = matr[counter_g2, counter_g1]
matr_value=np.asarray(matr_value)
if matr_value.size==3:
val_X_matrix[counter_g2, counter_g1] = matr_value[0]
val_Y_matrix[counter_g2, counter_g1] = matr_value[1]
val_X_matrix_counter[counter_g2, counter_g1] = 1
val_Y_matrix_counter[counter_g2, counter_g1] = 1
elif matr_value.size == 0:
val_X_matrix[counter_g2, counter_g1] = 0
val_Y_matrix[counter_g2, counter_g1] = 0
val_X_matrix_counter[counter_g2, counter_g1] = 0
val_Y_matrix_counter[counter_g2, counter_g1] = 0
counter_g2 = counter_g2 + 1
counter_g1 = counter_g1 + 1
val_X_array_counter = val_X_matrix_counter.sum(axis=0)
val_Y_array_counter = val_Y_matrix_counter.sum(axis=1)
val_X_array_acc = val_X_matrix.sum(axis=0)
val_Y_array_acc=val_Y_matrix.sum(axis=1)
val_X_array = val_X_array_acc/val_X_array_counter
val_Y_array = val_Y_array_acc / val_Y_array_counter
spa_X_array=np.ediff1d(val_X_array)
spa_Y_array=np.ediff1d(val_Y_array)
#Creating function to convert matrix to binary (those with value to 1, those with 0 to 0)
def learning_rate(lr=LEARNING_RATE):
decrease_rate = 0.75
lr = lr
window = []
window_size = 5
def f(loss = float('inf')):
nonlocal window
nonlocal lr
nonlocal window_size
window.append(loss)
if len(window) == window_size:
diffs = np.ediff1d(window)
if np.all(abs(diffs) > np.array(window[:-1])*0.05) and np.mean(diffs > 0) >= 0.5: # if large loss
# fluctuations
print("fluctuating", window)
lr *= decrease_rate
window = []
elif np.all(abs(diffs) < np.array(window[:-1])*0.01) and np.all(diffs < 0): # if decreased by
# small amount
print("too slow", window)
lr *= 1/decrease_rate
window = []
else:
window.pop(0)
return lr
return f
def stopping_rule():
window = []
window_size = 5
def c(val_acc):
nonlocal window
nonlocal window_size
print('acc', val_acc)
window.append(val_acc)
if len(window) == window_size:
diffs = np.ediff1d(window)
if np.all(diffs < 0):
return True
window.pop(0)
return False
return c
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : array_like
If necessary, will be flattened before the differences are taken.
to_end : array_like, optional
Number(s) to append at the end of the returned differences.
to_begin : array_like, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
See Also
--------
diff, gradient
Notes
-----
When applied to masked arrays, this function drops the mask information
if the `to_begin` and/or `to_end` parameters are used.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1, 2, 3, -7])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
array([-99, 1, 2, 3, -7, 88, 99])
The returned array is always 1D.
>>> y = [[1, 2, 4], [1, 6, 24]]
>>> np.ediff1d(y)
array([ 1, 2, -3, 5, 18])
"""
ary = np.asanyarray(ary).flat
ed = ary[1:] - ary[:-1]
arrays = [ed]
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in
# the common case where neither to_begin or to_end was given.
ed = np.hstack(arrays)
return ed
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
Return the sorted, unique values that are in only one (not both) of the
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setxor1d : ndarray
Sorted 1D array of unique values that are in only one of the input
arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4])
>>> b = np.array([2, 3, 5, 7, 5])
>>> np.setxor1d(a,b)
array([1, 4, 5, 7])
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = flag[1:] == flag[:-1]
return aux[flag2]
def _compute_snp_distances(self, df, build):
if build == 36:
hapmap = self._resources.get_hapmap_h36()
else:
hapmap = self._resources.get_hapmap_h37()
for chrom in df['chrom'].unique():
if chrom not in hapmap.keys():
continue
# create a new dataframe from the positions for the current chromosome
temp = pd.DataFrame(df.loc[(df['chrom'] == chrom)]['pos'].values, columns=['pos'])
# merge HapMap for this chrom
temp = temp.append(hapmap[chrom], ignore_index=True)
# sort based on pos
temp = temp.sort_values('pos')
# fill cM rates forward and backward
temp['rate'] = temp['rate'].fillna(method='ffill')
temp['rate'] = temp['rate'].fillna(method='bfill')
# get difference between positions
pos_diffs = np.ediff1d(temp['pos'])
# compute cMs between each pos based on probabilistic recombination rate
# https://www.biostars.org/p/123539/
cMs_match_segment = (temp['rate'] * np.r_[pos_diffs, 0] / 1e6).values
# add back into temp
temp['cMs'] = np.r_[0, cMs_match_segment][:-1]
temp = temp.reset_index()
del temp['index']
# use null `map` values to find locations of SNPs
snp_indices = temp.loc[temp['map'].isnull()].index
# use SNP indices to determine boundaries over which to sum cMs
start_snp_ix = snp_indices + 1
end_snp_ix = np.r_[snp_indices, snp_indices[-1]][1:] + 1
snp_boundaries = np.c_[start_snp_ix, end_snp_ix]
# sum cMs between SNPs to get total cM distance between SNPs
# http://stackoverflow.com/a/7471967
c = np.r_[0, temp['cMs'].cumsum()][snp_boundaries]
cM_from_prev_snp = c[:, 1] - c[:, 0]
# debug
# temp.loc[snp_indices, 'cM_from_prev_snp'] = np.r_[0, cM_from_prev_snp][:-1]
# temp.to_csv('debug.csv')
# add back into df
df.loc[(df['chrom'] == chrom), 'cM_from_prev_snp'] = np.r_[0, cM_from_prev_snp][:-1]
return hapmap, df
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : array_like
If necessary, will be flattened before the differences are taken.
to_end : array_like, optional
Number(s) to append at the end of the returned differences.
to_begin : array_like, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
See Also
--------
diff, gradient
Notes
-----
When applied to masked arrays, this function drops the mask information
if the `to_begin` and/or `to_end` parameters are used.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1, 2, 3, -7])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
array([-99, 1, 2, 3, -7, 88, 99])
The returned array is always 1D.
>>> y = [[1, 2, 4], [1, 6, 24]]
>>> np.ediff1d(y)
array([ 1, 2, -3, 5, 18])
"""
ary = np.asanyarray(ary).flat
ed = ary[1:] - ary[:-1]
arrays = [ed]
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in
# the common case where neither to_begin or to_end was given.
ed = np.hstack(arrays)
return ed
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
Return the sorted, unique values that are in only one (not both) of the
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setxor1d : ndarray
Sorted 1D array of unique values that are in only one of the input
arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4])
>>> b = np.array([2, 3, 5, 7, 5])
>>> np.setxor1d(a,b)
array([1, 4, 5, 7])
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = flag[1:] == flag[:-1]
return aux[flag2]
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : array_like
If necessary, will be flattened before the differences are taken.
to_end : array_like, optional
Number(s) to append at the end of the returned differences.
to_begin : array_like, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
See Also
--------
diff, gradient
Notes
-----
When applied to masked arrays, this function drops the mask information
if the `to_begin` and/or `to_end` parameters are used.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1, 2, 3, -7])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
array([-99, 1, 2, 3, -7, 88, 99])
The returned array is always 1D.
>>> y = [[1, 2, 4], [1, 6, 24]]
>>> np.ediff1d(y)
array([ 1, 2, -3, 5, 18])
"""
ary = np.asanyarray(ary).flat
ed = ary[1:] - ary[:-1]
arrays = [ed]
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in
# the common case where neither to_begin or to_end was given.
ed = np.hstack(arrays)
return ed
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
Return the sorted, unique values that are in only one (not both) of the
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setxor1d : ndarray
Sorted 1D array of unique values that are in only one of the input
arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4])
>>> b = np.array([2, 3, 5, 7, 5])
>>> np.setxor1d(a,b)
array([1, 4, 5, 7])
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = flag[1:] == flag[:-1]
return aux[flag2]
def estimate_baresine(self, x_axis, data, params):
""" Bare sine estimator with a frequency and phase.
@param numpy.array x_axis: 1D axis values
@param numpy.array data: 1D data, should have the same dimension as x_axis.
@param lmfit.Parameters params: object includes parameter dictionary which
can be set
@return tuple (error, params):
Explanation of the return parameter:
int error: error code (0:OK, -1:error)
lmfit.Parameters params: derived OrderedDict object contains the initial
values for the fit.
"""
# Convert for safety:
x_axis = np.array(x_axis)
data = np.array(data)
error = self._check_1D_input(x_axis=x_axis, data=data, params=params)
# calculate dft with zeropadding to obtain nicer interpolation between the
# appearing peaks.
dft_x, dft_y = compute_ft(x_axis, data, zeropad_num=1)
stepsize = x_axis[1]-x_axis[0] # for frequency axis
frequency_max = np.abs(dft_x[np.log(dft_y).argmax()])
# find minimal distance to the next meas point in the corresponding time value>
min_x_diff = np.ediff1d(x_axis).min()
# How many points are used to sample the estimated frequency with min_x_diff:
iter_steps = int(1/(frequency_max*min_x_diff))
if iter_steps < 1:
iter_steps = 1
sum_res = np.zeros(iter_steps)
# Procedure: Create sin waves with different phases and perform a summation.
# The sum shows how well the sine was fitting to the actual data.
# The best fitting sine should be a maximum of the summed time
# trace.
for iter_s in range(iter_steps):
func_val = np.sin(2*np.pi*frequency_max*x_axis + iter_s/iter_steps *2*np.pi)
sum_res[iter_s] = np.abs(data - func_val).sum()
# The minimum indicates where the sine function was fittng the worst,
# therefore subtract pi. This will also ensure that the estimated phase will
# be in the interval [-pi,pi].
phase = sum_res.argmax()/iter_steps *2*np.pi - np.pi
params['frequency'].set(value=frequency_max, min=0.0, max=1/(stepsize)*3)
params['phase'].set(value=phase, min=-np.pi, max=np.pi)
return error, params
def sine_testing2():
""" Sinus fit testing with the direct fit method. """
x_axis = np.linspace(0, 250, 75)
x_axis1 = np.linspace(250, 500, 75)
x_axis = np.append(x_axis, x_axis1)
x_nice = np.linspace(x_axis[0],x_axis[-1], 1000)
mod, params = qudi_fitting.make_sine_model()
params['phase'].value = np.pi/2 # np.random.uniform()*2*np.pi
params['frequency'].value = 0.01
params['amplitude'].value = 1.5
params['offset'].value = 0.4
data = mod.eval(x=x_axis, params=params)
data_noisy = (mod.eval(x=x_axis, params=params)
+ 1.5* np.random.normal(size=x_axis.shape))
# sorted_indices = x_axis.argsort()
# x_axis = x_axis[sorted_indices]
# data = data[sorted_indices]
# diff_array = np.ediff1d(x_axis)
# print(diff_array)
# print(diff_array.min())
# min_x_diff = diff_array.min()
# if np.isclose(min_x_diff, 0.0):
# index = np.argmin(diff_array)
# print('index',index)
# diff_array = np.delete(diff_array, index)
# print('diff_array',diff_array)
update_dict = {}
update_dict['phase'] = {'vary': False, 'value': np.pi/2.}
result = qudi_fitting.make_sine_fit(x_axis=x_axis, data=data_noisy,
add_params=update_dict)
plt.figure()
# plt.plot(x_axis, data, 'simulate data')
plt.plot(x_axis, data_noisy, label='noisy data')
plt.plot(x_axis, result.init_fit, label='initial data')
plt.plot(x_axis, result.best_fit, label='fit data')
plt.xlabel('time')
plt.ylabel('signal')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.show()
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : array_like
If necessary, will be flattened before the differences are taken.
to_end : array_like, optional
Number(s) to append at the end of the returned differences.
to_begin : array_like, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
See Also
--------
diff, gradient
Notes
-----
When applied to masked arrays, this function drops the mask information
if the `to_begin` and/or `to_end` parameters are used.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1, 2, 3, -7])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
array([-99, 1, 2, 3, -7, 88, 99])
The returned array is always 1D.
>>> y = [[1, 2, 4], [1, 6, 24]]
>>> np.ediff1d(y)
array([ 1, 2, -3, 5, 18])
"""
ary = np.asanyarray(ary).flat
ed = ary[1:] - ary[:-1]
arrays = [ed]
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in
# the common case where neither to_begin or to_end was given.
ed = np.hstack(arrays)
return ed
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
Return the sorted, unique values that are in only one (not both) of the
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setxor1d : ndarray
Sorted 1D array of unique values that are in only one of the input
arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4])
>>> b = np.array([2, 3, 5, 7, 5])
>>> np.setxor1d(a,b)
array([1, 4, 5, 7])
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = flag[1:] == flag[:-1]
return aux[flag2]
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
Return the sorted, unique values that are in only one (not both) of the
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setxor1d : ndarray
Sorted 1D array of unique values that are in only one of the input
arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4])
>>> b = np.array([2, 3, 5, 7, 5])
>>> np.setxor1d(a,b)
array([1, 4, 5, 7])
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = flag[1:] == flag[:-1]
return aux[flag2]
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : array_like
If necessary, will be flattened before the differences are taken.
to_end : array_like, optional
Number(s) to append at the end of the returned differences.
to_begin : array_like, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
See Also
--------
diff, gradient
Notes
-----
When applied to masked arrays, this function drops the mask information
if the `to_begin` and/or `to_end` parameters are used.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1, 2, 3, -7])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
array([-99, 1, 2, 3, -7, 88, 99])
The returned array is always 1D.
>>> y = [[1, 2, 4], [1, 6, 24]]
>>> np.ediff1d(y)
array([ 1, 2, -3, 5, 18])
"""
ary = np.asanyarray(ary).flat
ed = ary[1:] - ary[:-1]
arrays = [ed]
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in
# the common case where neither to_begin or to_end was given.
ed = np.hstack(arrays)
return ed
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
Return the sorted, unique values that are in only one (not both) of the
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setxor1d : ndarray
Sorted 1D array of unique values that are in only one of the input
arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4])
>>> b = np.array([2, 3, 5, 7, 5])
>>> np.setxor1d(a,b)
array([1, 4, 5, 7])
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = flag[1:] == flag[:-1]
return aux[flag2]