我们从Python开源项目中,提取了以下16个代码示例,用于说明如何使用theano.tensor.argmin()。
def apply(self, y_hat): # reshape 1d vector to 2d matrix y_hat_2d = y_hat.reshape((y_hat.shape[0]/self.examples_group_size, self.examples_group_size)) #y_hat_2d = tt.printing.Print("Y hat 2d in correct rank: ")(y_hat_2d) # sort each group by relevance # we sort the responses in decreasing order, that is why we multiply y_hat by -1 sorting_indices = tt.argsort(-1 * y_hat_2d, axis=1) #sorting_indices = tt.printing.Print("sorting indices in correct rank: ")(sorting_indices) # check where is the ground truth whose index should be 0 in the original array correct_rank = tt.argmin(sorting_indices, axis=1) + 1 #correct_rank = tt.printing.Print("correct rank: ")(correct_rank) correct_rank.name = "correct_rank" return correct_rank
def argmin(x, axis=-1): return T.argmin(x, axis=axis, keepdims=False)
def compute_vector_distances(trainingdata, testdata): # adapted from https://gist.github.com/danielvarga/d0eeacea92e65b19188c # with lamblin's workaround at https://github.com/Theano/Theano/issues/1399 n = trainingdata.shape[0] # number of candidates assert testdata.shape[1] == trainingdata.shape[1] m = testdata.shape[0] # number of targets f = testdata.shape[1] # number of features x = T.matrix('x') # candidates y = T.matrix('y') # targets xL2S = T.sum(x*x, axis=-1) # [n] yL2S = T.sum(y*y, axis=-1) # [m] xL2SM = T.zeros((m, n)) + xL2S # broadcasting, [m, n] yL2SM = T.zeros((n, m)) + yL2S # # broadcasting, [n, m] squaredPairwiseDistances = xL2SM.T + yL2SM - 2.0*T.dot(x, y.T) # [n, m] #lamblinsTrick = False #if lamblinsTrick: # s = squaredPairwiseDistances # bestIndices = T.cast( ( T.arange(n).dimshuffle(0, 'x') * T.cast(T.eq(s, s.min(axis=0, keepdims=True)), 'float32') ).sum(axis=0), 'int32') #else: # bestIndices = T.argmin(squaredPairwiseDistances, axis=0) #nearests_fn = theano.function([x, y], bestIndices, profile=False) #return nearests_fn(trainingdata, testdata) squaredpwdist_fn = theano.function([x, y], [T.transpose(squaredPairwiseDistances), T.transpose(T.argsort(squaredPairwiseDistances, axis=0))] , profile=False) return squaredpwdist_fn(trainingdata, testdata)
def argmin(self, x, axis=-1): return T.argmin(x, axis=axis, keepdims=False)
def argmin(x, axis=-1, keepdims=False): return T.argmin(x, axis=axis, keepdims=keepdims)
def init_count_window_bigrams(self, train_stories, window_size, batch_size): window = T.matrix('window', dtype='int32') window.tag.test_value = rng.randint(self.lexicon_size, size=(window_size, 100)).astype('int32') window.tag.test_value[1, 10] = -1 window.tag.test_value[:, 0] = -1 window.tag.test_value[-1, 1] = -1 words1 = window[0] words2 = window[1:].T word_index = T.scalar('word_index', dtype='int32') word_index.tag.test_value = 0 batch_index = T.scalar('batch_index', dtype='int32') batch_index.tag.test_value = 0 #select words in sequence and batch window_ = train_stories[word_index:word_index + window_size, batch_index:batch_index + batch_size] #filter stories with all empty words from this batch window_ = window_[:, T.argmin(window_[0] < 0):] self.count_window_bigrams = theano.function(inputs=[word_index, batch_index],\ outputs=[words1, words2],\ givens={window: window_},\ on_unused_input='ignore',\ allow_input_downcast=True)
def get_output_for(self, inputs): A = inputs[0] X = inputs[1] max_degree_node = T.argmax(A.sum(0)) min_degree_node = T.argmin(A.sum(0)) return self.reduce(A, [max_degree_node, min_degree_node])
def get_output_for(self, inputs): A = inputs[0] eigenvals_eigenvecs = T.nlinalg.eig(A) smallest_eigenval_index = T.argmin(eigenvals_eigenvecs[0]) smallest_eigenvec = eigenvals_eigenvecs[1][smallest_eigenval_index] return smallest_eigenvec