我们从Python开源项目中,提取了以下36个代码示例,用于说明如何使用keras.backend.any()。
def call(self, x, mask=None): # x: (batch_size, input_length, input_dim) if mask is None: return K.mean(x, axis=1) # (batch_size, input_dim) else: # This is to remove padding from the computational graph. if K.ndim(mask) > K.ndim(x): # This is due to the bug in Bidirectional that is passing the input mask # instead of computing output mask. # TODO: Fix the implementation of Bidirectional. mask = K.any(mask, axis=(-2, -1)) if K.ndim(mask) < K.ndim(x): mask = K.expand_dims(mask) masked_input = switch(mask, x, K.zeros_like(x)) weights = K.cast(mask / (K.sum(mask) + K.epsilon()), 'float32') return K.sum(masked_input * weights, axis=1) # (batch_size, input_dim)
def call(self, x, mask=None): mean = super(IntraAttention, self).call(x, mask) # x: (batch_size, input_length, input_dim) # mean: (batch_size, input_dim) ones = K.expand_dims(K.mean(K.ones_like(x), axis=(0, 2)), dim=0) # (1, input_length) # (batch_size, input_length, input_dim) tiled_mean = K.permute_dimensions(K.dot(K.expand_dims(mean), ones), (0, 2, 1)) if mask is not None: if K.ndim(mask) > K.ndim(x): # Assuming this is because of the bug in Bidirectional. Temporary fix follows. # TODO: Fix Bidirectional. mask = K.any(mask, axis=(-2, -1)) if K.ndim(mask) < K.ndim(x): mask = K.expand_dims(mask) x = switch(mask, x, K.zeros_like(x)) # (batch_size, input_length, proj_dim) projected_combination = K.tanh(K.dot(x, self.vector_projector) + K.dot(tiled_mean, self.mean_projector)) scores = K.dot(projected_combination, self.scorer) # (batch_size, input_length) weights = K.softmax(scores) # (batch_size, input_length) attended_x = K.sum(K.expand_dims(weights) * x, axis=1) # (batch_size, input_dim) return attended_x
def validate_transitions_cpu_old(transitions, **kwargs): pre = np.array(transitions[0]) suc = np.array(transitions[1]) base = setting['base'] width = pre.shape[1] // base height = pre.shape[1] // base load(width,height) pre_validation = validate_states(pre, **kwargs) suc_validation = validate_states(suc, **kwargs) results = [] for pre, suc, pre_validation, suc_validation in zip(pre, suc, pre_validation, suc_validation): if pre_validation and suc_validation: c = to_configs(np.array([pre, suc]), verbose=False) succs = successors(c[0], width, height) results.append(np.any(np.all(np.equal(succs, c[1]), axis=1))) else: results.append(False) return results
def validate_transitions(transitions, check_states=True, **kwargs): pre = np.array(transitions[0]) suc = np.array(transitions[1]) if check_states: pre_validation = validate_states(pre, verbose=False, **kwargs) suc_validation = validate_states(suc, verbose=False, **kwargs) pre_configs = to_configs(pre, verbose=False, **kwargs) suc_configs = to_configs(suc, verbose=False, **kwargs) results = [] if check_states: for pre_c, suc_c, pre_validation, suc_validation in zip(pre_configs, suc_configs, pre_validation, suc_validation): if pre_validation and suc_validation: succs = successors(pre_c) results.append(np.any(np.all(np.equal(succs, suc_c), axis=1))) else: results.append(False) else: for pre_c, suc_c in zip(pre_configs, suc_configs): succs = successors(pre_c) results.append(np.any(np.all(np.equal(succs, suc_c), axis=1))) return results
def loss_function(self): if self.learn_mode == 'join': def loss(y_true, y_pred): assert self.inbound_nodes, 'CRF has not connected to any layer.' assert not self.outbound_nodes, 'When learn_model="join", CRF must be the last layer.' if self.sparse_target: y_true = K.one_hot(K.cast(y_true[:, :, 0], 'int32'), self.units) X = self.inbound_nodes[0].input_tensors[0] mask = self.inbound_nodes[0].input_masks[0] nloglik = self.get_negative_log_likelihood(y_true, X, mask) return nloglik return loss else: if self.sparse_target: return sparse_categorical_crossentropy else: return categorical_crossentropy
def compute_mask(self, input, mask): # redefining compute mask because the input ndim is different from the output ndim, and # this needs to be handled. if self.return_sequences and mask is not None: # Get rid of syn and hyp dimensions # input mask's shape: (batch_size, num_words, num_hyps, num_senses) # output mask's shape: (batch_size, num_words) return K.any(mask, axis=(-2, -1)) else: return None
def _gen_local_drops(self, count, p): # Create a local droppath with at least one path arr = self._random_arr(count, p) drops = K.switch( K.any(arr), arr, self._arr_with_one(count) ) return drops
def compute_mask(self, inputs, mask=None): dimension = K.ndim(inputs) mask_tensor = K.any(K.not_equal(inputs, self.mask_value), axis=-1) mask_base = K.any(mask_tensor, axis=1, keepdims=True) for axis in range(2, dimension - 1): mask_axis = K.any(mask_tensor, axis=axis, keepdims=True) mask_base = tf.logical_and(mask_base, mask_axis) return mask_base
def compute_mask(self, inputs, mask): output_mask = self.layer.compute_mask( inputs=inputs, mask=mask, ) if self.time_steps is None: return output_mask else: output_mask = K.ones_like(output_mask) output_mask = K.any(output_mask, axis=1, keepdims=True) return K.tile(output_mask, [1, self.time_steps])
def compute_mask(self, inputs, mask=None): mask_tensor = self.layer.compute_mask(inputs, mask) mask_shape = K.int_shape(mask_tensor) mask_tensor = K.permute_dimensions( mask_tensor, self.permute_pattern ) reduce_time = len(mask_shape) - 2 for _ in range(reduce_time): mask_tensor = K.any(mask_tensor, -1) return mask_tensor
def call(self, x, mask=None): if mask is not None: mask = K.cast(mask, 'float32') if not K.any(mask): return K.mean(x, axis=1) else: return K.cast(x.sum(axis=1) / mask.sum(axis=1, keepdims=True), K.floatx()) else: return K.mean(x, axis=1)
def validate_transitions_cpu(transitions, check_states=True, **kwargs): pre = np.array(transitions[0]) suc = np.array(transitions[1]) base = setting['base'] width = pre.shape[1] // base height = pre.shape[1] // base load(width,height) if check_states: pre_validation = validate_states(pre, verbose=False, **kwargs) suc_validation = validate_states(suc, verbose=False, **kwargs) pre_configs = to_configs(pre, verbose=False, **kwargs) suc_configs = to_configs(suc, verbose=False, **kwargs) results = [] if check_states: for pre_c, suc_c, pre_validation, suc_validation in zip(pre_configs, suc_configs, pre_validation, suc_validation): if pre_validation and suc_validation: succs = successors(pre_c, width, height) results.append(np.any(np.all(np.equal(succs, suc_c), axis=1))) else: results.append(False) else: for pre_c, suc_c in zip(pre_configs, suc_configs): succs = successors(pre_c, width, height) results.append(np.any(np.all(np.equal(succs, suc_c), axis=1))) return results
def validate_states(states,verbose=True,**kwargs): base = panels.shape[1] dim = states.shape[1] - pad*2 size = dim // base def build(): states = Input(shape=(dim+2*pad,dim+2*pad)) s = tensor_swirl(states, radius=dim+2*pad * relative_swirl_radius, **unswirl_args) error = build_errors(s,base,pad,dim,size) matches = 1 - K.clip(K.sign(error - threshold),0,1) num_matches = K.sum(matches, axis=3) panels_ok = K.all(K.equal(num_matches, 1), (1,2)) panels_ng = K.any(K.not_equal(num_matches, 1), (1,2)) panels_nomatch = K.any(K.equal(num_matches, 0), (1,2)) panels_ambiguous = K.any(K.greater(num_matches, 1), (1,2)) validity = panels_ok if verbose: return Model(states, [ wrap(states, x) for x in [panels_ng, panels_nomatch, panels_ambiguous, validity]]) else: return Model(states, wrap(states, validity)) if verbose: panels_ng, panels_nomatch, panels_ambiguous, validity \ = build().predict(states, **kwargs) print(np.count_nonzero(panels_ng), "images have some panels which match 0 or >2 panels, out of which") print(np.count_nonzero(panels_nomatch), "images have some panels which are unlike any panels") print(np.count_nonzero(panels_ambiguous),"images have some panels which match >2 panels") print(np.count_nonzero(validity), "images have panels (all of them) which match exactly 1 panel each") return validity else: validity \ = build().predict(states, **kwargs) return validity
def validate_transitions(transitions, check_states=True, **kwargs): pre = np.array(transitions[0]) suc = np.array(transitions[1]) tower_height = pre.shape[1] disks = tower_height // disk_height tower_width = disks * (2*disk_inc) + base_disk_width + border towers = pre.shape[2] // tower_width if check_states: pre_validation = validate_states(pre, verbose=False, **kwargs) suc_validation = validate_states(suc, verbose=False, **kwargs) pre_configs = to_configs(pre, verbose=False, **kwargs) suc_configs = to_configs(suc, verbose=False, **kwargs) results = [] if check_states: for pre_c, suc_c, pre_validation, suc_validation in zip(pre_configs, suc_configs, pre_validation, suc_validation): if pre_validation and suc_validation: succs = successors(pre_c, disks, towers) results.append(np.any(np.all(np.equal(succs, suc_c), axis=1))) else: results.append(False) else: for pre_c, suc_c in zip(pre_configs, suc_configs): succs = successors(pre_c, disks, towers) results.append(np.any(np.all(np.equal(succs, suc_c), axis=1))) return results ## patterns ##############################################################
def compute_mask(self, input, mask=None): if mask is not None: return K.any(mask, axis=1) return mask
def MaskingHack(x): #mask = K.repeat_elements( K.any(x[:,:,0:-2], axis=-1), rep=x.shape[-1], axis=-1 ) mask = K.any(x[:,:,0:-2], axis=-1, keepdims=True) return x*mask
def _cosine_distance(M, k): # this is equation (6), or as I like to call it: The NaN factory. # TODO: Find it in a library (keras cosine loss?) # normalizing first as it is better conditioned. nk = K.l2_normalize(k, axis=-1) nM = K.l2_normalize(M, axis=-1) cosine_distance = K.batch_dot(nM, nk) # TODO: Do succesfull error handling #cosine_distance_error_handling = tf.Print(cosine_distance, [cosine_distance], message="NaN occured in _cosine_distance") #cosine_distance_error_handling = K.ones(cosine_distance_error_handling.shape) #cosine_distance = tf.case({K.any(tf.is_nan(cosine_distance)) : (lambda: cosine_distance_error_handling)}, # default = lambda: cosine_distance, strict=True) return cosine_distance
def non_triv_stab_expanded(self, y_true, y_pred): "Whether the stabilizer after correction is not trivial." if self.p: y_pred = undo_normcentererr(y_pred, self.p) y_true = undo_normcentererr(y_true, self.p) return K.any(K.dot(self.H, K.transpose((K.round(y_pred)+y_true)%2))%2, axis=0)
def logic_error_expanded(self, y_true, y_pred): "Whether there is a logical error after correction." if self.p: y_pred = undo_normcentererr(y_pred, self.p) y_true = undo_normcentererr(y_true, self.p) return K.any(K.dot(self.E, K.transpose((K.round(y_pred)+y_true)%2))%2, axis=0)
def nonzeroflips(q, out_dimZ, out_dimX): flips = makeflips(q, out_dimZ, out_dimX) while not np.any(flips): flips = makeflips(q, out_dimZ, out_dimX) return flips
def masked_categorical_crossentropy(y_true, y_pred): mask = K.cast(K.expand_dims(K.any(y_true, -1), axis=-1), 'float32') y_pred *= mask y_pred += 1-mask y_pred += 1-mask losses = K.categorical_crossentropy(y_pred, y_true) losses *= K.squeeze(mask, -1) ## Normalize by number of real segments, using a small non-zero denominator in cases of padding characters ## in order to avoid division by zero #losses /= (K.mean(mask) + (1e-10*(1-K.mean(mask)))) return losses
def compute_mask(self, input, mask=None): if mask is not None and self.learn_mode == 'join': return K.any(mask, axis=1) return mask
def get_log_normalization_constant(self, input_energy, mask, **kwargs): """Compute logarithm of the normalization constance Z, where Z = sum exp(-E) -> logZ = log sum exp(-E) =: -nlogZ """ # should have logZ[:, i] == logZ[:, j] for any i, j logZ = self.recursion(input_energy, mask, return_sequences=False, **kwargs) return logZ[:, 0]
def get_energy(self, y_true, input_energy, mask): """Energy = a1' y1 + u1' y1 + y1' U y2 + u2' y2 + y2' U y3 + u3' y3 + an' y3 """ input_energy = K.sum(input_energy * y_true, 2) # (B, T) chain_energy = K.sum(K.dot(y_true[:, :-1, :], self.chain_kernel) * y_true[:, 1:, :], 2) # (B, T-1) if mask is not None: mask = K.cast(mask, K.floatx()) chain_mask = mask[:, :-1] * mask[:, 1:] # (B, T-1), mask[:,:-1]*mask[:,1:] makes it work with any padding input_energy = input_energy * mask chain_energy = chain_energy * chain_mask total_energy = K.sum(input_energy, -1) + K.sum(chain_energy, -1) # (B, ) return total_energy
def viterbi_decoding(self, X, mask=None): input_energy = self.activation(K.dot(X, self.kernel) + self.bias) if self.use_boundary: input_energy = self.add_boundary_energy(input_energy, mask, self.left_boundary, self.right_boundary) argmin_tables = self.recursion(input_energy, mask, return_logZ=False) argmin_tables = K.cast(argmin_tables, 'int32') # backward to find best path, `initial_best_idx` can be any, as all elements in the last argmin_table are the same argmin_tables = K.reverse(argmin_tables, 1) initial_best_idx = [K.expand_dims(argmin_tables[:, 0, 0])] # matrix instead of vector is required by tf `K.rnn` if K.backend() == 'theano': initial_best_idx = [K.T.unbroadcast(initial_best_idx[0], 1)] def gather_each_row(params, indices): n = K.shape(indices)[0] if K.backend() == 'theano': return params[K.T.arange(n), indices] else: indices = K.transpose(K.stack([K.tf.range(n), indices])) return K.tf.gather_nd(params, indices) def find_path(argmin_table, best_idx): next_best_idx = gather_each_row(argmin_table, best_idx[0][:, 0]) next_best_idx = K.expand_dims(next_best_idx) if K.backend() == 'theano': next_best_idx = K.T.unbroadcast(next_best_idx, 1) return next_best_idx, [next_best_idx] _, best_paths, _ = K.rnn(find_path, argmin_tables, initial_best_idx, input_length=K.int_shape(X)[1], unroll=self.unroll) best_paths = K.reverse(best_paths, 1) best_paths = K.squeeze(best_paths, 2) return K.one_hot(best_paths, self.units)
def compute_mask(self, x, mask=None): if mask is None: return None #import pdb #pdb.set_trace() target_dim = K.ndim(x) - 2 num_reducing = K.ndim(mask) - target_dim if num_reducing: axes = tuple([-i for i in range(1,num_reducing+1)]) mask = K.any(mask, axes) return mask
def compute_mask(self, x, mask=None): if mask is None or mask.ndim==2: return None elif mask.ndim==3: mask = K.any(mask, axis=(1,2)) else: raise Exception("Unexpected situation")
def normalize_mask(x, mask): '''Keep the mask align wtih the tensor x Arguments: x is a data tensor; mask is a binary tensor Rationale: keep mask at same dimensionality as x, but only with a length-1 trailing dimension. This ensures broadcastability, which is important because inferring shapes is hard and shapes are easy to get wrong. ''' mask = K.cast(mask, K.floatx()) while K.ndim(mask) != K.ndim(x): if K.ndim(mask) > K.ndim(x): mask = K.any(mask, axis=-1) elif K.ndim(mask) < K.ndim(x): mask = K.expand_dims(mask) return K.any(mask, axis=-1, keepdims=True)
def compute_mask(x, mask_value=0): boolean_mask = K.any(K.not_equal(x, mask_value), axis=-1, keepdims=False) return K.cast(boolean_mask, K.floatx())
def call(self, x, mask=None): # x: (batch_size, input_length, input_dim) where input_length = head_size + 2 head_encoding = x[:, :-2, :] # (batch_size, head_size, input_dim) prep_encoding = x[:, -2, :] # (batch_size, input_dim) child_encoding = x[:, -1, :] # (batch_size, input_dim) if self.composition_type == 'HPCD': # TODO: The following line may not work with TF. # (batch_size, head_size, input_dim, 1) * (1, head_size, input_dim, proj_dim) head_proj_prod = K.expand_dims(head_encoding) * K.expand_dims(self.dist_proj_head, dim=0) head_projection = K.sum(head_proj_prod, axis=2) # (batch_size, head_size, proj_dim) else: head_projection = K.dot(head_encoding, self.proj_head) # (batch_size, head_size, proj_dim) prep_projection = K.expand_dims(K.dot(prep_encoding, self.proj_prep), dim=1) # (batch_size, 1, proj_dim) child_projection = K.expand_dims(K.dot(child_encoding, self.proj_child), dim=1) # (batch_size, 1, proj_dim) #(batch_size, head_size, proj_dim) if self.composition_type == 'HPCT': composed_projection = K.tanh(head_projection + prep_projection + child_projection) elif self.composition_type == 'HPC' or self.composition_type == "HPCD": prep_child_projection = K.tanh(prep_projection + child_projection) # (batch_size, 1, proj_dim) composed_projection = K.tanh(head_projection + prep_child_projection) else: # Composition type in HC composed_projection = K.tanh(head_projection + child_projection) for hidden_layer in self.hidden_layers: composed_projection = K.tanh(K.dot(composed_projection, hidden_layer)) # (batch_size, head_size, proj_dim) # (batch_size, head_size) head_word_scores = K.squeeze(K.dot(composed_projection, self.scorer), axis=-1) if mask is None: attachment_probabilities = K.softmax(head_word_scores) # (batch_size, head_size) else: if K.ndim(mask) > 2: # This means this layer came after a Bidirectional layer. Keras has this bug which # concatenates input masks instead of output masks. # TODO: Fix Bidirectional instead. mask = K.any(mask, axis=(-2, -1)) # We need to do a masked softmax. exp_scores = K.exp(head_word_scores) # (batch_size, head_size) head_mask = mask[:, :-2] # (batch_size, head_size) # (batch_size, head_size) masked_exp_scores = switch(head_mask, exp_scores, K.zeros_like(head_encoding[:, :, 0])) # (batch_size, 1). Adding epsilon to avoid divison by 0. But epsilon is float64. exp_sum = K.cast(K.expand_dims(K.sum(masked_exp_scores, axis=1) + K.epsilon()), 'float32') attachment_probabilities = masked_exp_scores / exp_sum # (batch_size, head_size) return attachment_probabilities
def call(self, x, mask=None): # x[0]: (batch_size, input_length, input_dim) # x[1]: (batch_size, 1) indices of prepositions # Optional: x[2]: (batch_size, input_length - 2) assert isinstance(x, list) or isinstance(x, tuple) encoded_sentence = x[0] prep_indices = K.squeeze(x[1], axis=-1) #(batch_size,) batch_indices = K.arange(K.shape(encoded_sentence)[0]) # (batch_size,) if self.with_attachment_probs: # We're essentially doing K.argmax(x[2]) here, but argmax is not differentiable! head_probs = x[2] head_probs_padding = K.zeros_like(x[2])[:, :2] # (batch_size, 2) # (batch_size, input_length) padded_head_probs = K.concatenate([head_probs, head_probs_padding]) # (batch_size, 1) max_head_probs = K.expand_dims(K.max(padded_head_probs, axis=1)) # (batch_size, input_length, 1) max_head_prob_indices = K.expand_dims(K.equal(padded_head_probs, max_head_probs)) # (batch_size, input_length, input_dim) masked_head_encoding = K.switch(max_head_prob_indices, encoded_sentence, K.zeros_like(encoded_sentence)) # (batch_size, input_dim) head_encoding = K.sum(masked_head_encoding, axis=1) else: head_indices = prep_indices - 1 # (batch_size,) head_encoding = encoded_sentence[batch_indices, head_indices, :] # (batch_size, input_dim) prep_encoding = encoded_sentence[batch_indices, prep_indices, :] # (batch_size, input_dim) child_encoding = encoded_sentence[batch_indices, prep_indices+1, :] # (batch_size, input_dim) ''' prep_indices = x[1] sentence_mask = mask[0] if sentence_mask is not None: if K.ndim(sentence_mask) > 2: # This means this layer came after a Bidirectional layer. Keras has this bug which # concatenates input masks instead of output masks. # TODO: Fix Bidirectional instead. sentence_mask = K.any(sentence_mask, axis=(-2, -1)) head_encoding, prep_encoding, child_encoding = self.get_split_averages(encoded_sentence, sentence_mask, prep_indices) ''' head_projection = K.dot(head_encoding, self.proj_head) # (batch_size, proj_dim) prep_projection = K.dot(prep_encoding, self.proj_prep) # (batch_size, proj_dim) child_projection = K.dot(child_encoding, self.proj_child) # (batch_size, proj_dim) #(batch_size, proj_dim) if self.composition_type == 'HPCT': composed_projection = K.tanh(head_projection + prep_projection + child_projection) elif self.composition_type == 'HPC': prep_child_projection = K.tanh(prep_projection + child_projection) # (batch_size, proj_dim) composed_projection = K.tanh(head_projection + prep_child_projection) else: # Composition type in HC composed_projection = K.tanh(head_projection + child_projection) for hidden_layer in self.hidden_layers: composed_projection = K.tanh(K.dot(composed_projection, hidden_layer)) # (batch_size, proj_dim) # (batch_size, num_classes) class_scores = K.dot(composed_projection, self.scorer) label_probabilities = K.softmax(class_scores) return label_probabilities
def validate_states(states, verbose=True, **kwargs): base = setting['base'] width = states.shape[1] // base height = states.shape[1] // base load(width,height) def build(): states = Input(shape=(height*base,width*base)) error = build_error(states, height, width, base) matches = 1 - K.clip(K.sign(error - threshold),0,1) # a, h, w, panel num_matches = K.sum(matches, axis=3) panels_ok = K.all(K.equal(num_matches, 1), (1,2)) panels_ng = K.any(K.not_equal(num_matches, 1), (1,2)) panels_nomatch = K.any(K.equal(num_matches, 0), (1,2)) panels_ambiguous = K.any(K.greater(num_matches, 1), (1,2)) panel_coverage = K.sum(matches,axis=(1,2)) # ideally, this should be [[1,1,1,1,1,1,1,1,1], ...] coverage_ok = K.all(K.less_equal(panel_coverage, 1), 1) coverage_ng = K.any(K.greater(panel_coverage, 1), 1) validity = tf.logical_and(panels_ok, coverage_ok) if verbose: return Model(states, [ wrap(states, x) for x in [panels_ok, panels_ng, panels_nomatch, panels_ambiguous, coverage_ok, coverage_ng, validity]]) else: return Model(states, wrap(states, validity)) model = build() # model.summary() if verbose: panels_ok, panels_ng, panels_nomatch, panels_ambiguous, \ coverage_ok, coverage_ng, validity = model.predict(states, **kwargs) print(np.count_nonzero(panels_ng), "images have some panels which match 0 or >2 panels, out of which") print(np.count_nonzero(panels_nomatch), "images have some panels which are unlike any panels") print(np.count_nonzero(panels_ambiguous),"images have some panels which match >2 panels") print(np.count_nonzero(panels_ok), "images have panels (all of them) which match exactly 1 panel each") print(np.count_nonzero(np.logical_and(panels_ok, coverage_ng)),"images have duplicated tiles") print(np.count_nonzero(np.logical_and(panels_ok, coverage_ok)),"images have no duplicated tiles") return validity else: validity = model.predict(states, **kwargs) return validity
def validate_states(states,verbose=True, **kwargs): tower_height = states.shape[1] disks = tower_height // disk_height tower_width = disks * (2*disk_inc) + base_disk_width + border towers = states.shape[2] // tower_width panels = get_panels(disks, tower_width) def build(): states = Input(shape=(tower_height, tower_width*towers)) error = build_error(states, disks, towers, tower_width, panels) matches = 1 - K.clip(K.sign(error - threshold),0,1) num_matches = K.sum(matches, axis=3) panels_ok = K.all(K.equal(num_matches, 1), (1,2)) panels_ng = K.any(K.not_equal(num_matches, 1), (1,2)) panels_nomatch = K.any(K.equal(num_matches, 0), (1,2)) panels_ambiguous = K.any(K.greater(num_matches, 1), (1,2)) panel_coverage = K.sum(matches,axis=(1,2)) # ideally, this should be [[1,1,1...1,1,1,disks*tower-disk], ...] ideal_coverage = np.ones(disks+1) ideal_coverage[-1] = disks*towers-disks ideal_coverage = K.variable(ideal_coverage) coverage_ok = K.all(K.equal(panel_coverage, ideal_coverage), 1) coverage_ng = K.any(K.not_equal(panel_coverage, ideal_coverage), 1) validity = tf.logical_and(panels_ok, coverage_ok) if verbose: return Model(states, [ wrap(states, x) for x in [panels_ok, panels_ng, panels_nomatch, panels_ambiguous, coverage_ok, coverage_ng, validity]]) else: return Model(states, wrap(states, validity)) model = build() # model.summary() if verbose: panels_ok, panels_ng, panels_nomatch, panels_ambiguous, \ coverage_ok, coverage_ng, validity = model.predict(states, **kwargs) print(np.count_nonzero(panels_ng), "images have some panels which match 0 or >2 panels, out of which") print(np.count_nonzero(panels_nomatch), "images have some panels which are unlike any panels") print(np.count_nonzero(panels_ambiguous),"images have some panels which match >2 panels") print(np.count_nonzero(panels_ok), "images have panels (all of them) which match exactly 1 panel each") print(np.count_nonzero(np.logical_and(panels_ok, coverage_ng)),"images have duplicated tiles") print(np.count_nonzero(np.logical_and(panels_ok, coverage_ok)),"images have no duplicated tiles") return validity else: validity = model.predict(states, **kwargs) return validity