我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用util.raiseNotDefined()。
def getAction(self, gameState): """ Returns the minimax action from the current gameState using self.depth and self.evaluationFunction. Here are some method calls that might be useful when implementing minimax. gameState.getLegalActions(agentIndex): Returns a list of legal actions for an agent agentIndex=0 means Pacman, ghosts are >= 1 gameState.generateSuccessor(agentIndex, action): Returns the successor game state after an agent takes an action gameState.getNumAgents(): Returns the total number of agents in the game """ "*** YOUR CODE HERE ***" util.raiseNotDefined()
def backtrack(self): """ Reconstruct a path to the initial state from the current node. Bear in mind that usually you will reconstruct the path from the final node to the initial. """ moves = [] # make a deep copy to stop any referencing isues. node = copy.deepcopy(self) if node.isRootNode(): # The initial state is the final state return moves "**YOUR CODE HERE**" util.raiseNotDefined()
def depthFirstSearch(problem): """ Search the deepest nodes in the search tree first. Your search algorithm needs to return a list of actions that reaches the goal. Make sure to implement a graph search algorithm. To get started, you might want to try some of these simple commands to understand the search problem that is being passed in: print "Start:", problem.getStartState() print "Is the start a goal?", problem.isGoalState(problem.getStartState()) print "Start's successors:", problem.getSuccessors(problem.getStartState()) """ "*** YOUR CODE HERE ***" util.raiseNotDefined()
def getAgent(self, index): "Returns the agent for the provided index." util.raiseNotDefined()
def chooseAction(self, gameState): """ Override this method to make a good agent. It should return a legal action within the time limit (otherwise a random legal action will be chosen for you). """ util.raiseNotDefined() ####################### # Convenience Methods # #######################
def getDistribution(self, state): "Returns a Counter encoding a distribution over actions from the provided state." util.raiseNotDefined()
def getStartState(self): """ Returns the start state for the search problem """ util.raiseNotDefined()
def isGoalState(self, state): """ state: Search state Returns True if and only if the state is a valid goal state """ util.raiseNotDefined()
def getSuccessors(self, state): """ state: Search state For a given state, this should return a list of triples, (successor, action, stepCost), where 'successor' is a successor to the current state, 'action' is the action required to get there, and 'stepCost' is the incremental cost of expanding to that successor """ util.raiseNotDefined()
def getCostOfActions(self, actions): """ actions: A list of actions to take This method returns the total cost of a particular sequence of actions. The sequence must be composed of legal moves """ util.raiseNotDefined()
def getAction(self, gameState): """ Returns the minimax action using self.depth and self.evaluationFunction """ "*** YOUR CODE HERE ***" util.raiseNotDefined()
def getAction(self, gameState): """ Returns the expectimax action using self.depth and self.evaluationFunction All ghosts should be modeled as choosing uniformly at random from their legal moves. """ "*** YOUR CODE HERE ***" util.raiseNotDefined()
def betterEvaluationFunction(currentGameState): """ Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable evaluation function (question 5). DESCRIPTION: <write something here so we know what you did> """ "*** YOUR CODE HERE ***" util.raiseNotDefined() # Abbreviation
def getQValue(self, state, action): """ The q-value of the state action pair (after the indicated number of value iteration passes). Note that value iteration does not necessarily create this quantity and you may have to derive it on the fly. """ "*** YOUR CODE HERE ***" util.raiseNotDefined()
def getPolicy(self, state): """ The policy is the best action in the given state according to the values computed by value iteration. You may break ties any way you see fit. Note that if there are no legal actions, which is the case at the terminal state, you should return None. """ "*** YOUR CODE HERE ***" util.raiseNotDefined()
def getFeatures(self, state, action): """ Returns a dict from features to counts Usually, the count will just be 1.0 for indicator functions. """ util.raiseNotDefined()
def getQValue(self, state, action): """ Should return Q(state,action) """ util.raiseNotDefined()
def getValue(self, state): """ What is the value of this state under the best action? Concretely, this is given by V(s) = max_{a in actions} Q(s,a) """ util.raiseNotDefined()
def getAction(self, state): """ state: can call state.getLegalActions() Choose an action and return it. """ util.raiseNotDefined()
def update(self, state, action, nextState, reward): """ This class will call this function, which you write, after observing a transition and reward """ util.raiseNotDefined() #################################### # Read These Functions # ####################################
def getQValue(self, state, action): """ Should return Q(state,action) = w * featureVector where * is the dotProduct operator """ "*** YOUR CODE HERE ***" util.raiseNotDefined()
def update(self, state, action, nextState, reward): """ Should update your weights based on transition """ "*** YOUR CODE HERE ***" util.raiseNotDefined()
def getStartState(self): """ Returns the start state for the search problem. """ util.raiseNotDefined()
def isGoalState(self, state): """ state: Search state Returns True if and only if the state is a valid goal state. """ util.raiseNotDefined()
def getSuccessors(self, state): """ state: Search state For a given state, this should return a list of triples, (successor, action, stepCost), where 'successor' is a successor to the current state, 'action' is the action required to get there, and 'stepCost' is the incremental cost of expanding to that successor. """ util.raiseNotDefined()
def getCostOfActions(self, actions): """ actions: A list of actions to take This method returns the total cost of a particular sequence of actions. The sequence must be composed of legal moves. """ util.raiseNotDefined()
def enhancedPacmanFeatures(state, action): """ For each state, this function is called with each legal action. It should return a counter with { <feature name> : <feature value>, ... } """ features = util.Counter() "*** YOUR CODE HERE ***" util.raiseNotDefined() return features
def uniformCostSearch(problem): """Search the node of least total cost first.""" "*** YOUR CODE HERE ***" startState = problem.getStartState() visited = set() actions = [] fringe = util.PriorityQueue() fringe.push((startState, None, None, actions), 0) while not fringe.isEmpty(): currPath = fringe.pop() currState = currPath[0] action = currPath[1] stepCost = currPath[2] actions = currPath[3] if problem.isGoalState(currState): return actions if not currState in visited: visited.add(currState) paths = problem.getSuccessors(currState) for path in paths: if not path[0] in visited: newActions = list(actions) newActions.append(path[1]) fringe.push((path[0],path[1],path[2],newActions), problem.getCostOfActions(newActions)) util.raiseNotDefined()