dump the chump
parent
ecce81a6a8
commit
066645b63e
@ -1,533 +0,0 @@
|
||||
"""Implement Agents and Environments (Chapters 1-2).
|
||||
|
||||
The class hierarchies are as follows:
|
||||
|
||||
Object ## A physical object that can exist in an environment
|
||||
Agent
|
||||
Wumpus
|
||||
RandomAgent
|
||||
ReflexVacuumAgent
|
||||
...
|
||||
Dirt
|
||||
Wall
|
||||
...
|
||||
|
||||
Environment ## An environment holds objects, runs simulations
|
||||
XYEnvironment
|
||||
VacuumEnvironment
|
||||
WumpusEnvironment
|
||||
|
||||
EnvFrame ## A graphical representation of the Environment
|
||||
|
||||
"""
|
||||
|
||||
from utils import *
|
||||
import random, copy
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class Object:
|
||||
"""This represents any physical object that can appear in an Environment.
|
||||
You subclass Object to get the objects you want. Each object can have a
|
||||
.__name__ slot (used for output only)."""
|
||||
def __repr__(self):
|
||||
return '<%s>' % getattr(self, '__name__', self.__class__.__name__)
|
||||
|
||||
def is_alive(self):
|
||||
"""Objects that are 'alive' should return true."""
|
||||
return hasattr(self, 'alive') and self.alive
|
||||
|
||||
def display(self, canvas, x, y, width, height):
|
||||
"""Display an image of this Object on the canvas."""
|
||||
pass
|
||||
|
||||
class Agent(Object):
|
||||
"""An Agent is a subclass of Object with one required slot,
|
||||
.program, which should hold a function that takes one argument, the
|
||||
percept, and returns an action. (What counts as a percept or action
|
||||
will depend on the specific environment in which the agent exists.)
|
||||
Note that 'program' is a slot, not a method. If it were a method,
|
||||
then the program could 'cheat' and look at aspects of the agent.
|
||||
It's not supposed to do that: the program can only look at the
|
||||
percepts. An agent program that needs a model of the world (and of
|
||||
the agent itself) will have to build and maintain its own model.
|
||||
There is an optional slots, .performance, which is a number giving
|
||||
the performance measure of the agent in its environment."""
|
||||
|
||||
def __init__(self):
|
||||
def program(percept):
|
||||
return raw_input('Percept=%s; action? ' % percept)
|
||||
self.program = program
|
||||
self.alive = True
|
||||
|
||||
def TraceAgent(agent):
|
||||
"""Wrap the agent's program to print its input and output. This will let
|
||||
you see what the agent is doing in the environment."""
|
||||
old_program = agent.program
|
||||
def new_program(percept):
|
||||
action = old_program(percept)
|
||||
print '%s perceives %s and does %s' % (agent, percept, action)
|
||||
return action
|
||||
agent.program = new_program
|
||||
return agent
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class TableDrivenAgent(Agent):
|
||||
"""This agent selects an action based on the percept sequence.
|
||||
It is practical only for tiny domains.
|
||||
To customize it you provide a table to the constructor. [Fig. 2.7]"""
|
||||
|
||||
def __init__(self, table):
|
||||
"Supply as table a dictionary of all {percept_sequence:action} pairs."
|
||||
## The agent program could in principle be a function, but because
|
||||
## it needs to store state, we make it a callable instance of a class.
|
||||
Agent.__init__(self)
|
||||
percepts = []
|
||||
def program(percept):
|
||||
percepts.append(percept)
|
||||
action = table.get(tuple(percepts))
|
||||
return action
|
||||
self.program = program
|
||||
|
||||
|
||||
class RandomAgent(Agent):
|
||||
"An agent that chooses an action at random, ignoring all percepts."
|
||||
def __init__(self, actions):
|
||||
Agent.__init__(self)
|
||||
self.program = lambda percept: random.choice(actions)
|
||||
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
loc_A, loc_B = (0, 0), (1, 0) # The two locations for the Vacuum world
|
||||
|
||||
class ReflexVacuumAgent(Agent):
|
||||
"A reflex agent for the two-state vacuum environment. [Fig. 2.8]"
|
||||
|
||||
def __init__(self):
|
||||
Agent.__init__(self)
|
||||
def program((location, status)):
|
||||
if status == 'Dirty': return 'Suck'
|
||||
elif location == loc_A: return 'Right'
|
||||
elif location == loc_B: return 'Left'
|
||||
self.program = program
|
||||
|
||||
|
||||
def RandomVacuumAgent():
|
||||
"Randomly choose one of the actions from the vaccum environment."
|
||||
return RandomAgent(['Right', 'Left', 'Suck', 'NoOp'])
|
||||
|
||||
|
||||
def TableDrivenVacuumAgent():
|
||||
"[Fig. 2.3]"
|
||||
table = {((loc_A, 'Clean'),): 'Right',
|
||||
((loc_A, 'Dirty'),): 'Suck',
|
||||
((loc_B, 'Clean'),): 'Left',
|
||||
((loc_B, 'Dirty'),): 'Suck',
|
||||
((loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
|
||||
((loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
|
||||
# ...
|
||||
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
|
||||
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
|
||||
# ...
|
||||
}
|
||||
return TableDrivenAgent(table)
|
||||
|
||||
|
||||
class ModelBasedVacuumAgent(Agent):
|
||||
"An agent that keeps track of what locations are clean or dirty."
|
||||
def __init__(self):
|
||||
Agent.__init__(self)
|
||||
model = {loc_A: None, loc_B: None}
|
||||
def program((location, status)):
|
||||
"Same as ReflexVacuumAgent, except if everything is clean, do NoOp"
|
||||
model[location] = status ## Update the model here
|
||||
if model[loc_A] == model[loc_B] == 'Clean': return 'NoOp'
|
||||
elif status == 'Dirty': return 'Suck'
|
||||
elif location == loc_A: return 'Right'
|
||||
elif location == loc_B: return 'Left'
|
||||
self.program = program
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class Environment:
|
||||
"""Abstract class representing an Environment. 'Real' Environment classes
|
||||
inherit from this. Your Environment will typically need to implement:
|
||||
percept: Define the percept that an agent sees.
|
||||
execute_action: Define the effects of executing an action.
|
||||
Also update the agent.performance slot.
|
||||
The environment keeps a list of .objects and .agents (which is a subset
|
||||
of .objects). Each agent has a .performance slot, initialized to 0.
|
||||
Each object has a .location slot, even though some environments may not
|
||||
need this."""
|
||||
|
||||
def __init__(self,):
|
||||
self.objects = []; self.agents = []
|
||||
|
||||
object_classes = [] ## List of classes that can go into environment
|
||||
|
||||
def percept(self, agent):
|
||||
"Return the percept that the agent sees at this point. Override this."
|
||||
abstract
|
||||
|
||||
def execute_action(self, agent, action):
|
||||
"Change the world to reflect this action. Override this."
|
||||
abstract
|
||||
|
||||
def default_location(self, object):
|
||||
"Default location to place a new object with unspecified location."
|
||||
return None
|
||||
|
||||
def exogenous_change(self):
|
||||
"If there is spontaneous change in the world, override this."
|
||||
pass
|
||||
|
||||
def is_done(self):
|
||||
"By default, we're done when we can't find a live agent."
|
||||
for agent in self.agents:
|
||||
if agent.is_alive(): return False
|
||||
return True
|
||||
|
||||
def step(self):
|
||||
"""Run the environment for one time step. If the
|
||||
actions and exogenous changes are independent, this method will
|
||||
do. If there are interactions between them, you'll need to
|
||||
override this method."""
|
||||
if not self.is_done():
|
||||
actions = [agent.program(self.percept(agent))
|
||||
for agent in self.agents]
|
||||
for (agent, action) in zip(self.agents, actions):
|
||||
self.execute_action(agent, action)
|
||||
self.exogenous_change()
|
||||
|
||||
def run(self, steps=1000):
|
||||
"""Run the Environment for given number of time steps."""
|
||||
for step in range(steps):
|
||||
if self.is_done(): return
|
||||
self.step()
|
||||
|
||||
def add_object(self, object, location=None):
|
||||
"""Add an object to the environment, setting its location. Also keep
|
||||
track of objects that are agents. Shouldn't need to override this."""
|
||||
object.location = location or self.default_location(object)
|
||||
self.objects.append(object)
|
||||
if isinstance(object, Agent):
|
||||
object.performance = 0
|
||||
self.agents.append(object)
|
||||
return self
|
||||
|
||||
|
||||
class XYEnvironment(Environment):
|
||||
"""This class is for environments on a 2D plane, with locations
|
||||
labelled by (x, y) points, either discrete or continuous. Agents
|
||||
perceive objects within a radius. Each agent in the environment
|
||||
has a .location slot which should be a location such as (0, 1),
|
||||
and a .holding slot, which should be a list of objects that are
|
||||
held """
|
||||
|
||||
def __init__(self, width=10, height=10):
|
||||
update(self, objects=[], agents=[], width=width, height=height)
|
||||
|
||||
def objects_at(self, location):
|
||||
"Return all objects exactly at a given location."
|
||||
return [obj for obj in self.objects if obj.location == location]
|
||||
|
||||
def objects_near(self, location, radius):
|
||||
"Return all objects within radius of location."
|
||||
radius2 = radius * radius
|
||||
return [obj for obj in self.objects
|
||||
if distance2(location, obj.location) <= radius2]
|
||||
|
||||
def percept(self, agent):
|
||||
"By default, agent perceives objects within radius r."
|
||||
return [self.object_percept(obj, agent)
|
||||
for obj in self.objects_near(agent)]
|
||||
|
||||
def execute_action(self, agent, action):
|
||||
if action == 'TurnRight':
|
||||
agent.heading = turn_heading(agent.heading, -1)
|
||||
elif action == 'TurnLeft':
|
||||
agent.heading = turn_heading(agent.heading, +1)
|
||||
elif action == 'Forward':
|
||||
self.move_to(agent, vector_add(agent.heading, agent.location))
|
||||
elif action == 'Grab':
|
||||
objs = [obj for obj in self.objects_at(agent.location)
|
||||
if obj.is_grabable(agent)]
|
||||
if objs:
|
||||
agent.holding.append(objs[0])
|
||||
elif action == 'Release':
|
||||
if agent.holding:
|
||||
agent.holding.pop()
|
||||
agent.bump = False
|
||||
|
||||
def object_percept(self, obj, agent): #??? Should go to object?
|
||||
"Return the percept for this object."
|
||||
return obj.__class__.__name__
|
||||
|
||||
def default_location(self, object):
|
||||
return (random.choice(self.width), random.choice(self.height))
|
||||
|
||||
def move_to(object, destination):
|
||||
"Move an object to a new location."
|
||||
|
||||
def add_object(self, object, location=(1, 1)):
|
||||
Environment.add_object(self, object, location)
|
||||
object.holding = []
|
||||
object.held = None
|
||||
self.objects.append(object)
|
||||
|
||||
def add_walls(self):
|
||||
"Put walls around the entire perimeter of the grid."
|
||||
for x in range(self.width):
|
||||
self.add_object(Wall(), (x, 0))
|
||||
self.add_object(Wall(), (x, self.height-1))
|
||||
for y in range(self.height):
|
||||
self.add_object(Wall(), (0, y))
|
||||
self.add_object(Wall(), (self.width-1, y))
|
||||
|
||||
def turn_heading(self, heading, inc,
|
||||
headings=[(1, 0), (0, 1), (-1, 0), (0, -1)]):
|
||||
"Return the heading to the left (inc=+1) or right (inc=-1) in headings."
|
||||
return headings[(headings.index(heading) + inc) % len(headings)]
|
||||
|
||||
#______________________________________________________________________________
|
||||
## Vacuum environment
|
||||
|
||||
class TrivialVacuumEnvironment(Environment):
|
||||
"""This environment has two locations, A and B. Each can be Dirty or Clean.
|
||||
The agent perceives its location and the location's status. This serves as
|
||||
an example of how to implement a simple Environment."""
|
||||
|
||||
def __init__(self):
|
||||
Environment.__init__(self)
|
||||
self.status = {loc_A:random.choice(['Clean', 'Dirty']),
|
||||
loc_B:random.choice(['Clean', 'Dirty'])}
|
||||
|
||||
def percept(self, agent):
|
||||
"Returns the agent's location, and the location status (Dirty/Clean)."
|
||||
return (agent.location, self.status[agent.location])
|
||||
|
||||
def execute_action(self, agent, action):
|
||||
"""Change agent's location and/or location's status; track performance.
|
||||
Score 10 for each dirt cleaned; -1 for each move."""
|
||||
if action == 'Right':
|
||||
agent.location = loc_B
|
||||
agent.performance -= 1
|
||||
elif action == 'Left':
|
||||
agent.location = loc_A
|
||||
agent.performance -= 1
|
||||
elif action == 'Suck':
|
||||
if self.status[agent.location] == 'Dirty':
|
||||
agent.performance += 10
|
||||
self.status[agent.location] = 'Clean'
|
||||
|
||||
def default_location(self, object):
|
||||
"Agents start in either location at random."
|
||||
return random.choice([loc_A, loc_B])
|
||||
|
||||
class Dirt(Object): pass
|
||||
class Wall(Object): pass
|
||||
|
||||
class VacuumEnvironment(XYEnvironment):
|
||||
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
|
||||
and bump (into obstacle) or not; 2D discrete world of unknown size;
|
||||
performance measure is 100 for each dirt cleaned, and -1 for
|
||||
each turn taken."""
|
||||
def __init__(self, width=10, height=10):
|
||||
XYEnvironment.__init__(self, width, height)
|
||||
self.add_walls()
|
||||
|
||||
object_classes = [Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
|
||||
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
|
||||
|
||||
def percept(self, agent):
|
||||
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
|
||||
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
|
||||
status = if_(self.find_at(Dirt, agent.location), 'Dirty', 'Clean')
|
||||
bump = if_(agent.bump, 'Bump', 'None')
|
||||
return (status, bump)
|
||||
|
||||
def execute_action(self, agent, action):
|
||||
if action == 'Suck':
|
||||
if self.find_at(Dirt, agent.location):
|
||||
agent.performance += 100
|
||||
agent.performance -= 1
|
||||
XYEnvironment.execute_action(self, agent, action)
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class SimpleReflexAgent(Agent):
|
||||
"""This agent takes action based solely on the percept. [Fig. 2.13]"""
|
||||
|
||||
def __init__(self, rules, interpret_input):
|
||||
Agent.__init__(self)
|
||||
def program(percept):
|
||||
state = interpret_input(percept)
|
||||
rule = rule_match(state, rules)
|
||||
action = rule.action
|
||||
return action
|
||||
self.program = program
|
||||
|
||||
class ReflexAgentWithState(Agent):
|
||||
"""This agent takes action based on the percept and state. [Fig. 2.16]"""
|
||||
|
||||
def __init__(self, rules, udpate_state):
|
||||
Agent.__init__(self)
|
||||
state, action = None, None
|
||||
def program(percept):
|
||||
state = update_state(state, action, percept)
|
||||
rule = rule_match(state, rules)
|
||||
action = rule.action
|
||||
return action
|
||||
self.program = program
|
||||
|
||||
#______________________________________________________________________________
|
||||
## The Wumpus World
|
||||
|
||||
class Gold(Object): pass
|
||||
class Pit(Object): pass
|
||||
class Arrow(Object): pass
|
||||
class Wumpus(Agent): pass
|
||||
class Explorer(Agent): pass
|
||||
|
||||
class WumpusEnvironment(XYEnvironment):
|
||||
object_classes = [Wall, Gold, Pit, Arrow, Wumpus, Explorer]
|
||||
def __init__(self, width=10, height=10):
|
||||
XYEnvironment.__init__(self, width, height)
|
||||
self.add_walls()
|
||||
## Needs a lot of work ...
|
||||
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
def compare_agents(EnvFactory, AgentFactories, n=10, steps=1000):
|
||||
"""See how well each of several agents do in n instances of an environment.
|
||||
Pass in a factory (constructor) for environments, and several for agents.
|
||||
Create n instances of the environment, and run each agent in copies of
|
||||
each one for steps. Return a list of (agent, average-score) tuples."""
|
||||
envs = [EnvFactory() for i in range(n)]
|
||||
return [(A, test_agent(A, steps, copy.deepcopy(envs)))
|
||||
for A in AgentFactories]
|
||||
|
||||
def test_agent(AgentFactory, steps, envs):
|
||||
"Return the mean score of running an agent in each of the envs, for steps"
|
||||
total = 0
|
||||
for env in envs:
|
||||
agent = AgentFactory()
|
||||
env.add_object(agent)
|
||||
env.run(steps)
|
||||
total += agent.performance
|
||||
return float(total)/len(envs)
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
_docex = """
|
||||
a = ReflexVacuumAgent()
|
||||
a.program
|
||||
a.program((loc_A, 'Clean')) ==> 'Right'
|
||||
a.program((loc_B, 'Clean')) ==> 'Left'
|
||||
a.program((loc_A, 'Dirty')) ==> 'Suck'
|
||||
a.program((loc_A, 'Dirty')) ==> 'Suck'
|
||||
|
||||
e = TrivialVacuumEnvironment()
|
||||
e.add_object(TraceAgent(ModelBasedVacuumAgent()))
|
||||
e.run(5)
|
||||
|
||||
## Environments, and some agents, are randomized, so the best we can
|
||||
## give is a range of expected scores. If this test fails, it does
|
||||
## not necessarily mean something is wrong.
|
||||
envs = [TrivialVacuumEnvironment() for i in range(100)]
|
||||
def testv(A): return test_agent(A, 4, copy.deepcopy(envs))
|
||||
testv(ModelBasedVacuumAgent)
|
||||
(7 < _ < 11) ==> True
|
||||
testv(ReflexVacuumAgent)
|
||||
(5 < _ < 9) ==> True
|
||||
testv(TableDrivenVacuumAgent)
|
||||
(2 < _ < 6) ==> True
|
||||
testv(RandomVacuumAgent)
|
||||
(0.5 < _ < 3) ==> True
|
||||
"""
|
||||
|
||||
#______________________________________________________________________________
|
||||
# GUI - Graphical User Interface for Environments
|
||||
# If you do not have Tkinter installed, either get a new installation of Python
|
||||
# (Tkinter is standard in all new releases), or delete the rest of this file
|
||||
# and muddle through without a GUI.
|
||||
|
||||
'''
|
||||
import Tkinter as tk
|
||||
|
||||
class EnvFrame(tk.Frame):
|
||||
def __init__(self, env, title='AIMA GUI', cellwidth=50, n=10):
|
||||
update(self, cellwidth = cellwidth, running=False, delay=1.0)
|
||||
self.n = n
|
||||
self.running = 0
|
||||
self.delay = 1.0
|
||||
self.env = env
|
||||
tk.Frame.__init__(self, None, width=(cellwidth+2)*n, height=(cellwidth+2)*n)
|
||||
#self.title(title)
|
||||
# Toolbar
|
||||
toolbar = tk.Frame(self, relief='raised', bd=2)
|
||||
toolbar.pack(side='top', fill='x')
|
||||
for txt, cmd in [('Step >', self.env.step), ('Run >>', self.run),
|
||||
('Stop [ ]', self.stop)]:
|
||||
tk.Button(toolbar, text=txt, command=cmd).pack(side='left')
|
||||
tk.Label(toolbar, text='Delay').pack(side='left')
|
||||
scale = tk.Scale(toolbar, orient='h', from_=0.0, to=10, resolution=0.5,
|
||||
command=lambda d: setattr(self, 'delay', d))
|
||||
scale.set(self.delay)
|
||||
scale.pack(side='left')
|
||||
# Canvas for drawing on
|
||||
self.canvas = tk.Canvas(self, width=(cellwidth+1)*n,
|
||||
height=(cellwidth+1)*n, background="white")
|
||||
self.canvas.bind('<Button-1>', self.left) ## What should this do?
|
||||
self.canvas.bind('<Button-2>', self.edit_objects)
|
||||
self.canvas.bind('<Button-3>', self.add_object)
|
||||
if cellwidth:
|
||||
c = self.canvas
|
||||
for i in range(1, n+1):
|
||||
c.create_line(0, i*cellwidth, n*cellwidth, i*cellwidth)
|
||||
c.create_line(i*cellwidth, 0, i*cellwidth, n*cellwidth)
|
||||
c.pack(expand=1, fill='both')
|
||||
self.pack()
|
||||
|
||||
|
||||
def background_run(self):
|
||||
if self.running:
|
||||
self.env.step()
|
||||
ms = int(1000 * max(float(self.delay), 0.5))
|
||||
self.after(ms, self.background_run)
|
||||
|
||||
def run(self):
|
||||
print 'run'
|
||||
self.running = 1
|
||||
self.background_run()
|
||||
|
||||
def stop(self):
|
||||
print 'stop'
|
||||
self.running = 0
|
||||
|
||||
def left(self, event):
|
||||
print 'left at ', event.x/50, event.y/50
|
||||
|
||||
def edit_objects(self, event):
|
||||
"""Choose an object within radius and edit its fields."""
|
||||
pass
|
||||
|
||||
def add_object(self, event):
|
||||
## This is supposed to pop up a menu of Object classes; you choose the one
|
||||
## You want to put in this square. Not working yet.
|
||||
menu = tk.Menu(self, title='Edit (%d, %d)' % (event.x/50, event.y/50))
|
||||
for (txt, cmd) in [('Wumpus', self.run), ('Pit', self.run)]:
|
||||
menu.add_command(label=txt, command=cmd)
|
||||
menu.tk_popup(event.x + self.winfo_rootx(),
|
||||
event.y + self.winfo_rooty())
|
||||
|
||||
#image=PhotoImage(file=r"C:\Documents and Settings\pnorvig\Desktop\wumpus.gif")
|
||||
#self.images = []
|
||||
#self.images.append(image)
|
||||
#c.create_image(200,200,anchor=NW,image=image)
|
||||
|
||||
#v = VacuumEnvironment(); w = EnvFrame(v);
|
||||
'''
|
@ -1,451 +0,0 @@
|
||||
"""CSP (Constraint Satisfaction Problems) problems and solvers. (Chapter 5)."""
|
||||
|
||||
from __future__ import generators
|
||||
from utils import *
|
||||
import search
|
||||
import types
|
||||
|
||||
class CSP(search.Problem):
|
||||
"""This class describes finite-domain Constraint Satisfaction Problems.
|
||||
A CSP is specified by the following three inputs:
|
||||
vars A list of variables; each is atomic (e.g. int or string).
|
||||
domains A dict of {var:[possible_value, ...]} entries.
|
||||
neighbors A dict of {var:[var,...]} that for each variable lists
|
||||
the other variables that participate in constraints.
|
||||
constraints A function f(A, a, B, b) that returns true if neighbors
|
||||
A, B satisfy the constraint when they have values A=a, B=b
|
||||
In the textbook and in most mathematical definitions, the
|
||||
constraints are specified as explicit pairs of allowable values,
|
||||
but the formulation here is easier to express and more compact for
|
||||
most cases. (For example, the n-Queens problem can be represented
|
||||
in O(n) space using this notation, instead of O(N^4) for the
|
||||
explicit representation.) In terms of describing the CSP as a
|
||||
problem, that's all there is.
|
||||
|
||||
However, the class also supports data structures and methods that help you
|
||||
solve CSPs by calling a search function on the CSP. Methods and slots are
|
||||
as follows, where the argument 'a' represents an assignment, which is a
|
||||
dict of {var:val} entries:
|
||||
assign(var, val, a) Assign a[var] = val; do other bookkeeping
|
||||
unassign(var, a) Do del a[var], plus other bookkeeping
|
||||
nconflicts(var, val, a) Return the number of other variables that
|
||||
conflict with var=val
|
||||
curr_domains[var] Slot: remaining consistent values for var
|
||||
Used by constraint propagation routines.
|
||||
The following methods are used only by graph_search and tree_search:
|
||||
succ() Return a list of (action, state) pairs
|
||||
goal_test(a) Return true if all constraints satisfied
|
||||
The following are just for debugging purposes:
|
||||
nassigns Slot: tracks the number of assignments made
|
||||
display(a) Print a human-readable representation
|
||||
"""
|
||||
|
||||
def __init__(self, vars, domains, neighbors, constraints):
|
||||
"Construct a CSP problem. If vars is empty, it becomes domains.keys()."
|
||||
vars = vars or domains.keys()
|
||||
update(self, vars=vars, domains=domains,
|
||||
neighbors=neighbors, constraints=constraints,
|
||||
initial={}, curr_domains=None, pruned=None, nassigns=0)
|
||||
|
||||
def assign(self, var, val, assignment):
|
||||
"""Add {var: val} to assignment; Discard the old value if any.
|
||||
Do bookkeeping for curr_domains and nassigns."""
|
||||
self.nassigns += 1
|
||||
assignment[var] = val
|
||||
if self.curr_domains:
|
||||
if self.fc:
|
||||
self.forward_check(var, val, assignment)
|
||||
if self.mac:
|
||||
AC3(self, [(Xk, var) for Xk in self.neighbors[var]])
|
||||
|
||||
def unassign(self, var, assignment):
|
||||
"""Remove {var: val} from assignment; that is backtrack.
|
||||
DO NOT call this if you are changing a variable to a new value;
|
||||
just call assign for that."""
|
||||
if var in assignment:
|
||||
# Reset the curr_domain to be the full original domain
|
||||
if self.curr_domains:
|
||||
self.curr_domains[var] = self.domains[var][:]
|
||||
del assignment[var]
|
||||
|
||||
def nconflicts(self, var, val, assignment):
|
||||
"Return the number of conflicts var=val has with other variables."
|
||||
# Subclasses may implement this more efficiently
|
||||
def conflict(var2):
|
||||
val2 = assignment.get(var2, None)
|
||||
return val2 != None and not self.constraints(var, val, var2, val2)
|
||||
return count_if(conflict, self.neighbors[var])
|
||||
|
||||
def forward_check(self, var, val, assignment):
|
||||
"Do forward checking (current domain reduction) for this assignment."
|
||||
if self.curr_domains:
|
||||
# Restore prunings from previous value of var
|
||||
for (B, b) in self.pruned[var]:
|
||||
self.curr_domains[B].append(b)
|
||||
self.pruned[var] = []
|
||||
# Prune any other B=b assignement that conflict with var=val
|
||||
for B in self.neighbors[var]:
|
||||
if B not in assignment:
|
||||
for b in self.curr_domains[B][:]:
|
||||
if not self.constraints(var, val, B, b):
|
||||
self.curr_domains[B].remove(b)
|
||||
self.pruned[var].append((B, b))
|
||||
|
||||
def display(self, assignment):
|
||||
"Show a human-readable representation of the CSP."
|
||||
# Subclasses can print in a prettier way, or display with a GUI
|
||||
print 'CSP:', self, 'with assignment:', assignment
|
||||
|
||||
## These methods are for the tree and graph search interface:
|
||||
|
||||
def succ(self, assignment):
|
||||
"Return a list of (action, state) pairs."
|
||||
if len(assignment) == len(self.vars):
|
||||
return []
|
||||
else:
|
||||
var = find_if(lambda v: v not in assignment, self.vars)
|
||||
result = []
|
||||
for val in self.domains[var]:
|
||||
if self.nconflicts(self, var, val, assignment) == 0:
|
||||
a = assignment.copy; a[var] = val
|
||||
result.append(((var, val), a))
|
||||
return result
|
||||
|
||||
def goal_test(self, assignment):
|
||||
"The goal is to assign all vars, with all constraints satisfied."
|
||||
return (len(assignment) == len(self.vars) and
|
||||
every(lambda var: self.nconflicts(var, assignment[var],
|
||||
assignment) == 0,
|
||||
self.vars))
|
||||
|
||||
## This is for min_conflicts search
|
||||
|
||||
def conflicted_vars(self, current):
|
||||
"Return a list of variables in current assignment that are in conflict"
|
||||
return [var for var in self.vars
|
||||
if self.nconflicts(var, current[var], current) > 0]
|
||||
|
||||
#______________________________________________________________________________
|
||||
# CSP Backtracking Search
|
||||
|
||||
def backtracking_search(csp, mcv=False, lcv=False, fc=False, mac=False):
|
||||
"""Set up to do recursive backtracking search. Allow the following options:
|
||||
mcv - If true, use Most Constrained Variable Heuristic
|
||||
lcv - If true, use Least Constraining Value Heuristic
|
||||
fc - If true, use Forward Checking
|
||||
mac - If true, use Maintaining Arc Consistency. [Fig. 5.3]
|
||||
>>> backtracking_search(australia)
|
||||
{'WA': 'B', 'Q': 'B', 'T': 'B', 'V': 'B', 'SA': 'G', 'NT': 'R', 'NSW': 'R'}
|
||||
"""
|
||||
if fc or mac:
|
||||
csp.curr_domains, csp.pruned = {}, {}
|
||||
for v in csp.vars:
|
||||
csp.curr_domains[v] = csp.domains[v][:]
|
||||
csp.pruned[v] = []
|
||||
update(csp, mcv=mcv, lcv=lcv, fc=fc, mac=mac)
|
||||
return recursive_backtracking({}, csp)
|
||||
|
||||
def recursive_backtracking(assignment, csp):
|
||||
"""Search for a consistent assignment for the csp.
|
||||
Each recursive call chooses a variable, and considers values for it."""
|
||||
if len(assignment) == len(csp.vars):
|
||||
return assignment
|
||||
var = select_unassigned_variable(assignment, csp)
|
||||
for val in order_domain_values(var, assignment, csp):
|
||||
if csp.fc or csp.nconflicts(var, val, assignment) == 0:
|
||||
csp.assign(var, val, assignment)
|
||||
result = recursive_backtracking(assignment, csp)
|
||||
if result is not None:
|
||||
return result
|
||||
csp.unassign(var, assignment)
|
||||
return None
|
||||
|
||||
def select_unassigned_variable(assignment, csp):
|
||||
"Select the variable to work on next. Find"
|
||||
if csp.mcv: # Most Constrained Variable
|
||||
unassigned = [v for v in csp.vars if v not in assignment]
|
||||
return argmin_random_tie(unassigned,
|
||||
lambda var: -num_legal_values(csp, var, assignment))
|
||||
else: # First unassigned variable
|
||||
for v in csp.vars:
|
||||
if v not in assignment:
|
||||
return v
|
||||
|
||||
def order_domain_values(var, assignment, csp):
|
||||
"Decide what order to consider the domain variables."
|
||||
if csp.curr_domains:
|
||||
domain = csp.curr_domains[var]
|
||||
else:
|
||||
domain = csp.domains[var][:]
|
||||
if csp.lcv:
|
||||
# If LCV is specified, consider values with fewer conflicts first
|
||||
key = lambda val: csp.nconflicts(var, val, assignment)
|
||||
domain.sort(lambda(x,y): cmp(key(x), key(y)))
|
||||
while domain:
|
||||
yield domain.pop()
|
||||
|
||||
def num_legal_values(csp, var, assignment):
|
||||
if csp.curr_domains:
|
||||
return len(csp.curr_domains[var])
|
||||
else:
|
||||
return count_if(lambda val: csp.nconflicts(var, val, assignment) == 0,
|
||||
csp.domains[var])
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Constraint Propagation with AC-3
|
||||
|
||||
def AC3(csp, queue=None):
|
||||
"""[Fig. 5.7]"""
|
||||
if queue == None:
|
||||
queue = [(Xi, Xk) for Xi in csp.vars for Xk in csp.neighbors[Xi]]
|
||||
while queue:
|
||||
(Xi, Xj) = queue.pop()
|
||||
if remove_inconsistent_values(csp, Xi, Xj):
|
||||
for Xk in csp.neighbors[Xi]:
|
||||
queue.append((Xk, Xi))
|
||||
|
||||
def remove_inconsistent_values(csp, Xi, Xj):
|
||||
"Return true if we remove a value."
|
||||
removed = False
|
||||
for x in csp.curr_domains[Xi][:]:
|
||||
# If Xi=x conflicts with Xj=y for every possible y, eliminate Xi=x
|
||||
if every(lambda y: not csp.constraints(Xi, x, Xj, y),
|
||||
csp.curr_domains[Xj]):
|
||||
csp.curr_domains[Xi].remove(x)
|
||||
removed = True
|
||||
return removed
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Min-conflicts hillclimbing search for CSPs
|
||||
|
||||
def min_conflicts(csp, max_steps=1000000):
|
||||
"""Solve a CSP by stochastic hillclimbing on the number of conflicts."""
|
||||
# Generate a complete assignement for all vars (probably with conflicts)
|
||||
current = {}; csp.current = current
|
||||
for var in csp.vars:
|
||||
val = min_conflicts_value(csp, var, current)
|
||||
csp.assign(var, val, current)
|
||||
# Now repeapedly choose a random conflicted variable and change it
|
||||
for i in range(max_steps):
|
||||
print i
|
||||
conflicted = csp.conflicted_vars(current)
|
||||
if not conflicted:
|
||||
return current
|
||||
var = random.choice(conflicted)
|
||||
val = min_conflicts_value(csp, var, current)
|
||||
csp.assign(var, val, current)
|
||||
return None
|
||||
|
||||
def min_conflicts_value(csp, var, current):
|
||||
"""Return the value that will give var the least number of conflicts.
|
||||
If there is a tie, choose at random."""
|
||||
return argmin_random_tie(csp.domains[var],
|
||||
lambda val: csp.nconflicts(var, val, current))
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Map-Coloring Problems
|
||||
|
||||
class UniversalDict:
|
||||
"""A universal dict maps any key to the same value. We use it here
|
||||
as the domains dict for CSPs in which all vars have the same domain.
|
||||
>>> d = UniversalDict(42)
|
||||
>>> d['life']
|
||||
42
|
||||
"""
|
||||
def __init__(self, value): self.value = value
|
||||
def __getitem__(self, key): return self.value
|
||||
def __repr__(self): return '{Any: %r}' % self.value
|
||||
|
||||
def different_values_constraint(A, a, B, b):
|
||||
"A constraint saying two neighboring variables must differ in value."
|
||||
return a != b
|
||||
|
||||
def MapColoringCSP(colors, neighbors):
|
||||
"""Make a CSP for the problem of coloring a map with different colors
|
||||
for any two adjacent regions. Arguments are a list of colors, and a
|
||||
dict of {region: [neighbor,...]} entries. This dict may also be
|
||||
specified as a string of the form defined by parse_neighbors"""
|
||||
|
||||
if isinstance(neighbors, str):
|
||||
neighbors = parse_neighbors(neighbors)
|
||||
return CSP(neighbors.keys(), UniversalDict(colors), neighbors,
|
||||
different_values_constraint)
|
||||
|
||||
def parse_neighbors(neighbors, vars=[]):
|
||||
"""Convert a string of the form 'X: Y Z; Y: Z' into a dict mapping
|
||||
regions to neighbors. The syntax is a region name followed by a ':'
|
||||
followed by zero or more region names, followed by ';', repeated for
|
||||
each region name. If you say 'X: Y' you don't need 'Y: X'.
|
||||
>>> parse_neighbors('X: Y Z; Y: Z')
|
||||
{'Y': ['X', 'Z'], 'X': ['Y', 'Z'], 'Z': ['X', 'Y']}
|
||||
"""
|
||||
dict = DefaultDict([])
|
||||
for var in vars:
|
||||
dict[var] = []
|
||||
specs = [spec.split(':') for spec in neighbors.split(';')]
|
||||
for (A, Aneighbors) in specs:
|
||||
A = A.strip();
|
||||
dict.setdefault(A, [])
|
||||
for B in Aneighbors.split():
|
||||
dict[A].append(B)
|
||||
dict[B].append(A)
|
||||
return dict
|
||||
|
||||
australia = MapColoringCSP(list('RGB'),
|
||||
'SA: WA NT Q NSW V; NT: WA Q; NSW: Q V; T: ')
|
||||
|
||||
usa = MapColoringCSP(list('RGBY'),
|
||||
"""WA: OR ID; OR: ID NV CA; CA: NV AZ; NV: ID UT AZ; ID: MT WY UT;
|
||||
UT: WY CO AZ; MT: ND SD WY; WY: SD NE CO; CO: NE KA OK NM; NM: OK TX;
|
||||
ND: MN SD; SD: MN IA NE; NE: IA MO KA; KA: MO OK; OK: MO AR TX;
|
||||
TX: AR LA; MN: WI IA; IA: WI IL MO; MO: IL KY TN AR; AR: MS TN LA;
|
||||
LA: MS; WI: MI IL; IL: IN; IN: KY; MS: TN AL; AL: TN GA FL; MI: OH;
|
||||
OH: PA WV KY; KY: WV VA TN; TN: VA NC GA; GA: NC SC FL;
|
||||
PA: NY NJ DE MD WV; WV: MD VA; VA: MD DC NC; NC: SC; NY: VT MA CA NJ;
|
||||
NJ: DE; DE: MD; MD: DC; VT: NH MA; MA: NH RI CT; CT: RI; ME: NH;
|
||||
HI: ; AK: """)
|
||||
#______________________________________________________________________________
|
||||
# n-Queens Problem
|
||||
|
||||
def queen_constraint(A, a, B, b):
|
||||
"""Constraint is satisfied (true) if A, B are really the same variable,
|
||||
or if they are not in the same row, down diagonal, or up diagonal."""
|
||||
return A == B or (a != b and A + a != B + b and A - a != B - b)
|
||||
|
||||
class NQueensCSP(CSP):
|
||||
"""Make a CSP for the nQueens problem for search with min_conflicts.
|
||||
Suitable for large n, it uses only data structures of size O(n).
|
||||
Think of placing queens one per column, from left to right.
|
||||
That means position (x, y) represents (var, val) in the CSP.
|
||||
The main structures are three arrays to count queens that could conflict:
|
||||
rows[i] Number of queens in the ith row (i.e val == i)
|
||||
downs[i] Number of queens in the \ diagonal
|
||||
such that their (x, y) coordinates sum to i
|
||||
ups[i] Number of queens in the / diagonal
|
||||
such that their (x, y) coordinates have x-y+n-1 = i
|
||||
We increment/decrement these counts each time a queen is placed/moved from
|
||||
a row/diagonal. So moving is O(1), as is nconflicts. But choosing
|
||||
a variable, and a best value for the variable, are each O(n).
|
||||
If you want, you can keep track of conflicted vars, then variable
|
||||
selection will also be O(1).
|
||||
>>> len(backtracking_search(NQueensCSP(8)))
|
||||
8
|
||||
>>> len(min_conflicts(NQueensCSP(8)))
|
||||
8
|
||||
"""
|
||||
def __init__(self, n):
|
||||
"""Initialize data structures for n Queens."""
|
||||
CSP.__init__(self, range(n), UniversalDict(range(n)),
|
||||
UniversalDict(range(n)), queen_constraint)
|
||||
update(self, rows=[0]*n, ups=[0]*(2*n - 1), downs=[0]*(2*n - 1))
|
||||
|
||||
def nconflicts(self, var, val, assignment):
|
||||
"""The number of conflicts, as recorded with each assignment.
|
||||
Count conflicts in row and in up, down diagonals. If there
|
||||
is a queen there, it can't conflict with itself, so subtract 3."""
|
||||
n = len(self.vars)
|
||||
c = self.rows[val] + self.downs[var+val] + self.ups[var-val+n-1]
|
||||
if assignment.get(var, None) == val:
|
||||
c -= 3
|
||||
return c
|
||||
|
||||
def assign(self, var, val, assignment):
|
||||
"Assign var, and keep track of conflicts."
|
||||
oldval = assignment.get(var, None)
|
||||
if val != oldval:
|
||||
if oldval is not None: # Remove old val if there was one
|
||||
self.record_conflict(assignment, var, oldval, -1)
|
||||
self.record_conflict(assignment, var, val, +1)
|
||||
CSP.assign(self, var, val, assignment)
|
||||
|
||||
def unassign(self, var, assignment):
|
||||
"Remove var from assignment (if it is there) and track conflicts."
|
||||
if var in assignment:
|
||||
self.record_conflict(assignment, var, assignment[var], -1)
|
||||
CSP.unassign(self, var, assignment)
|
||||
|
||||
def record_conflict(self, assignment, var, val, delta):
|
||||
"Record conflicts caused by addition or deletion of a Queen."
|
||||
n = len(self.vars)
|
||||
self.rows[val] += delta
|
||||
self.downs[var + val] += delta
|
||||
self.ups[var - val + n - 1] += delta
|
||||
|
||||
def display(self, assignment):
|
||||
"Print the queens and the nconflicts values (for debugging)."
|
||||
n = len(self.vars)
|
||||
for val in range(n):
|
||||
for var in range(n):
|
||||
if assignment.get(var,'') == val: ch ='Q'
|
||||
elif (var+val) % 2 == 0: ch = '.'
|
||||
else: ch = '-'
|
||||
print ch,
|
||||
print ' ',
|
||||
for var in range(n):
|
||||
if assignment.get(var,'') == val: ch ='*'
|
||||
else: ch = ' '
|
||||
print str(self.nconflicts(var, val, assignment))+ch,
|
||||
print
|
||||
|
||||
#______________________________________________________________________________
|
||||
# The Zebra Puzzle
|
||||
|
||||
def Zebra():
|
||||
"Return an instance of the Zebra Puzzle."
|
||||
Colors = 'Red Yellow Blue Green Ivory'.split()
|
||||
Pets = 'Dog Fox Snails Horse Zebra'.split()
|
||||
Drinks = 'OJ Tea Coffee Milk Water'.split()
|
||||
Countries = 'Englishman Spaniard Norwegian Ukranian Japanese'.split()
|
||||
Smokes = 'Kools Chesterfields Winston LuckyStrike Parliaments'.split()
|
||||
vars = Colors + Pets + Drinks + Countries + Smokes
|
||||
domains = {}
|
||||
for var in vars:
|
||||
domains[var] = range(1, 6)
|
||||
domains['Norwegian'] = [1]
|
||||
domains['Milk'] = [3]
|
||||
neighbors = parse_neighbors("""Englishman: Red;
|
||||
Spaniard: Dog; Kools: Yellow; Chesterfields: Fox;
|
||||
Norwegian: Blue; Winston: Snails; LuckyStrike: OJ;
|
||||
Ukranian: Tea; Japanese: Parliaments; Kools: Horse;
|
||||
Coffee: Green; Green: Ivory""", vars)
|
||||
for type in [Colors, Pets, Drinks, Countries, Smokes]:
|
||||
for A in type:
|
||||
for B in type:
|
||||
if A != B:
|
||||
if B not in neighbors[A]: neighbors[A].append(B)
|
||||
if A not in neighbors[B]: neighbors[B].append(A)
|
||||
def zebra_constraint(A, a, B, b, recurse=0):
|
||||
same = (a == b)
|
||||
next_to = abs(a - b) == 1
|
||||
if A == 'Englishman' and B == 'Red': return same
|
||||
if A == 'Spaniard' and B == 'Dog': return same
|
||||
if A == 'Chesterfields' and B == 'Fox': return next_to
|
||||
if A == 'Norwegian' and B == 'Blue': return next_to
|
||||
if A == 'Kools' and B == 'Yellow': return same
|
||||
if A == 'Winston' and B == 'Snails': return same
|
||||
if A == 'LuckyStrike' and B == 'OJ': return same
|
||||
if A == 'Ukranian' and B == 'Tea': return same
|
||||
if A == 'Japanese' and B == 'Parliaments': return same
|
||||
if A == 'Kools' and B == 'Horse': return next_to
|
||||
if A == 'Coffee' and B == 'Green': return same
|
||||
if A == 'Green' and B == 'Ivory': return (a - 1) == b
|
||||
if recurse == 0: return zebra_constraint(B, b, A, a, 1)
|
||||
if ((A in Colors and B in Colors) or
|
||||
(A in Pets and B in Pets) or
|
||||
(A in Drinks and B in Drinks) or
|
||||
(A in Countries and B in Countries) or
|
||||
(A in Smokes and B in Smokes)): return not same
|
||||
raise 'error'
|
||||
return CSP(vars, domains, neighbors, zebra_constraint)
|
||||
|
||||
def solve_zebra(algorithm=min_conflicts, **args):
|
||||
z = Zebra()
|
||||
ans = algorithm(z, **args)
|
||||
for h in range(1, 6):
|
||||
print 'House', h,
|
||||
for (var, val) in ans.items():
|
||||
if val == h: print var,
|
||||
print
|
||||
return ans['Zebra'], ans['Water'], z.nassigns, ans,
|
||||
|
||||
solve_zebra()
|
@ -1,8 +0,0 @@
|
||||
|
||||
### demo
|
||||
|
||||
>>> min_conflicts(australia)
|
||||
{'WA': 'B', 'Q': 'B', 'T': 'G', 'V': 'B', 'SA': 'R', 'NT': 'G', 'NSW': 'G'}
|
||||
|
||||
>>> min_conflicts(usa)
|
||||
{'WA': 'B', 'DE': 'R', 'DC': 'Y', 'WI': 'G', 'WV': 'Y', 'HI': 'R', 'FL': 'B', 'WY': 'Y', 'NH': 'R', 'NJ': 'Y', 'NM': 'Y', 'TX': 'G', 'LA': 'R', 'NC': 'B', 'ND': 'Y', 'NE': 'B', 'TN': 'G', 'NY': 'R', 'PA': 'B', 'RI': 'R', 'NV': 'Y', 'VA': 'R', 'CO': 'R', 'CA': 'B', 'AL': 'R', 'AR': 'Y', 'VT': 'Y', 'IL': 'B', 'GA': 'Y', 'IN': 'Y', 'IA': 'Y', 'OK': 'B', 'AZ': 'R', 'ID': 'G', 'CT': 'Y', 'ME': 'B', 'MD': 'G', 'KA': 'Y', 'MA': 'B', 'OH': 'R', 'UT': 'B', 'MO': 'R', 'MN': 'R', 'MI': 'B', 'AK': 'B', 'MT': 'R', 'MS': 'B', 'SC': 'G', 'KY': 'B', 'OR': 'R', 'SD': 'G'}
|
@ -1,43 +0,0 @@
|
||||
"""Run all doctests from modules on the command line. For each
|
||||
module, if there is a "module.txt" file, run that too. However,
|
||||
if the module.txt file contains the comment "# demo",
|
||||
then the remainder of the file has its ">>>" lines executed,
|
||||
but not run through doctest. The idea is that you can use this
|
||||
to demo statements that return random or otherwise variable results.
|
||||
|
||||
Example usage:
|
||||
|
||||
python doctests.py *.py
|
||||
"""
|
||||
|
||||
import doctest, re
|
||||
|
||||
def run_tests(modules, verbose=None):
|
||||
"Run tests for a list of modules; then summarize results."
|
||||
for module in modules:
|
||||
tests, demos = split_extra_tests(module.__name__ + ".txt")
|
||||
if tests:
|
||||
if '__doc__' not in dir(module):
|
||||
module.__doc__ = ''
|
||||
module.__doc__ += '\n' + tests + '\n'
|
||||
doctest.testmod(module, report=0, verbose=verbose)
|
||||
if demos:
|
||||
for stmt in re.findall(">>> (.*)", demos):
|
||||
exec stmt in module.__dict__
|
||||
doctest.master.summarize()
|
||||
|
||||
|
||||
def split_extra_tests(filename):
|
||||
"""Take a filename and, if it exists, return a 2-tuple of
|
||||
the parts before and after '# demo'."""
|
||||
try:
|
||||
contents = open(filename).read() + '# demo'
|
||||
return contents.split("# demo", 1)
|
||||
except IOError:
|
||||
return ('', '')
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
modules = [__import__(name.replace('.py',''))
|
||||
for name in sys.argv if name != "-v"]
|
||||
run_tests(modules, ("-v" in sys.argv))
|
@ -1,21 +0,0 @@
|
||||
### This is an example module.txt file.
|
||||
### It should contain unit tests and their expected results:
|
||||
|
||||
>>> 2 + 2
|
||||
4
|
||||
|
||||
>>> '2' + '2'
|
||||
'22'
|
||||
|
||||
### demo
|
||||
|
||||
### After the part that says 'demo' we have statements that
|
||||
### are intended not as unit tests, but as demos of how to
|
||||
### use the functions and methods in the module. The
|
||||
### statements are executed, but the results are not
|
||||
### compared to the expected results. This can be useful
|
||||
### for nondeterministic functions:
|
||||
|
||||
|
||||
>>> import random; random.choice('abc')
|
||||
'c'
|
@ -1,286 +0,0 @@
|
||||
"""Games, or Adversarial Search. (Chapters 6)
|
||||
|
||||
"""
|
||||
|
||||
from utils import *
|
||||
import random
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Minimax Search
|
||||
|
||||
def minimax_decision(state, game):
|
||||
"""Given a state in a game, calculate the best move by searching
|
||||
forward all the way to the terminal states. [Fig. 6.4]"""
|
||||
|
||||
player = game.to_move(state)
|
||||
|
||||
def max_value(state):
|
||||
if game.terminal_test(state):
|
||||
return game.utility(state, player)
|
||||
v = -infinity
|
||||
for (a, s) in game.successors(state):
|
||||
v = max(v, min_value(s))
|
||||
return v
|
||||
|
||||
def min_value(state):
|
||||
if game.terminal_test(state):
|
||||
return game.utility(state, player)
|
||||
v = infinity
|
||||
for (a, s) in game.successors(state):
|
||||
v = min(v, max_value(s))
|
||||
return v
|
||||
|
||||
# Body of minimax_decision starts here:
|
||||
action, state = argmax(game.successors(state),
|
||||
lambda ((a, s)): min_value(s))
|
||||
return action
|
||||
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
def alphabeta_full_search(state, game):
|
||||
"""Search game to determine best action; use alpha-beta pruning.
|
||||
As in [Fig. 6.7], this version searches all the way to the leaves."""
|
||||
|
||||
player = game.to_move(state)
|
||||
|
||||
def max_value(state, alpha, beta):
|
||||
if game.terminal_test(state):
|
||||
return game.utility(state, player)
|
||||
v = -infinity
|
||||
for (a, s) in game.successors(state):
|
||||
v = max(v, min_value(s, alpha, beta))
|
||||
if v >= beta:
|
||||
return v
|
||||
alpha = max(alpha, v)
|
||||
return v
|
||||
|
||||
def min_value(state, alpha, beta):
|
||||
if game.terminal_test(state):
|
||||
return game.utility(state, player)
|
||||
v = infinity
|
||||
for (a, s) in game.successors(state):
|
||||
v = min(v, max_value(s, alpha, beta))
|
||||
if v <= alpha:
|
||||
return v
|
||||
beta = min(beta, v)
|
||||
return v
|
||||
|
||||
# Body of alphabeta_search starts here:
|
||||
action, state = argmax(game.successors(state),
|
||||
lambda ((a, s)): min_value(s, -infinity, infinity))
|
||||
return action
|
||||
|
||||
def alphabeta_search(state, game, d=4, cutoff_test=None, eval_fn=None):
|
||||
"""Search game to determine best action; use alpha-beta pruning.
|
||||
This version cuts off search and uses an evaluation function."""
|
||||
|
||||
player = game.to_move(state)
|
||||
|
||||
def max_value(state, alpha, beta, depth):
|
||||
if cutoff_test(state, depth):
|
||||
return eval_fn(state)
|
||||
v = -infinity
|
||||
for (a, s) in game.successors(state):
|
||||
v = max(v, min_value(s, alpha, beta, depth+1))
|
||||
if v >= beta:
|
||||
return v
|
||||
alpha = max(alpha, v)
|
||||
return v
|
||||
|
||||
def min_value(state, alpha, beta, depth):
|
||||
if cutoff_test(state, depth):
|
||||
return eval_fn(state)
|
||||
v = infinity
|
||||
for (a, s) in game.successors(state):
|
||||
v = min(v, max_value(s, alpha, beta, depth+1))
|
||||
if v <= alpha:
|
||||
return v
|
||||
beta = min(beta, v)
|
||||
return v
|
||||
|
||||
# Body of alphabeta_search starts here:
|
||||
# The default test cuts off at depth d or at a terminal state
|
||||
cutoff_test = (cutoff_test or
|
||||
(lambda state,depth: depth>d or game.terminal_test(state)))
|
||||
eval_fn = eval_fn or (lambda state: game.utility(state, player))
|
||||
action, state = argmax(game.successors(state),
|
||||
lambda ((a, s)): min_value(s, -infinity, infinity, 0))
|
||||
return action
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Players for Games
|
||||
|
||||
def query_player(game, state):
|
||||
"Make a move by querying standard input."
|
||||
game.display(state)
|
||||
return num_or_str(raw_input('Your move? '))
|
||||
|
||||
def random_player(game, state):
|
||||
"A player that chooses a legal move at random."
|
||||
return random.choice(game.legal_moves())
|
||||
|
||||
def alphabeta_player(game, state):
|
||||
return alphabeta_search(state, game)
|
||||
|
||||
def play_game(game, *players):
|
||||
"Play an n-person, move-alternating game."
|
||||
state = game.initial
|
||||
while True:
|
||||
for player in players:
|
||||
move = player(game, state)
|
||||
state = game.make_move(move, state)
|
||||
if game.terminal_test(state):
|
||||
return game.utility(state, players[0])
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Some Sample Games
|
||||
|
||||
class Game:
|
||||
"""A game is similar to a problem, but it has a utility for each
|
||||
state and a terminal test instead of a path cost and a goal
|
||||
test. To create a game, subclass this class and implement
|
||||
legal_moves, make_move, utility, and terminal_test. You may
|
||||
override display and successors or you can inherit their default
|
||||
methods. You will also need to set the .initial attribute to the
|
||||
initial state; this can be done in the constructor."""
|
||||
|
||||
def legal_moves(self, state):
|
||||
"Return a list of the allowable moves at this point."
|
||||
abstract
|
||||
|
||||
def make_move(self, move, state):
|
||||
"Return the state that results from making a move from a state."
|
||||
abstract
|
||||
|
||||
def utility(self, state, player):
|
||||
"Return the value of this final state to player."
|
||||
abstract
|
||||
|
||||
def terminal_test(self, state):
|
||||
"Return True if this is a final state for the game."
|
||||
return not self.legal_moves(state)
|
||||
|
||||
def to_move(self, state):
|
||||
"Return the player whose move it is in this state."
|
||||
return state.to_move
|
||||
|
||||
def display(self, state):
|
||||
"Print or otherwise display the state."
|
||||
print state
|
||||
|
||||
def successors(self, state):
|
||||
"Return a list of legal (move, state) pairs."
|
||||
return [(move, self.make_move(move, state))
|
||||
for move in self.legal_moves(state)]
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s>' % self.__class__.__name__
|
||||
|
||||
class Fig62Game(Game):
|
||||
"""The game represented in [Fig. 6.2]. Serves as a simple test case.
|
||||
>>> g = Fig62Game()
|
||||
>>> minimax_decision('A', g)
|
||||
'a1'
|
||||
>>> alphabeta_full_search('A', g)
|
||||
'a1'
|
||||
>>> alphabeta_search('A', g)
|
||||
'a1'
|
||||
"""
|
||||
succs = {'A': [('a1', 'B'), ('a2', 'C'), ('a3', 'D')],
|
||||
'B': [('b1', 'B1'), ('b2', 'B2'), ('b3', 'B3')],
|
||||
'C': [('c1', 'C1'), ('c2', 'C2'), ('c3', 'C3')],
|
||||
'D': [('d1', 'D1'), ('d2', 'D2'), ('d3', 'D3')]}
|
||||
utils = Dict(B1=3, B2=12, B3=8, C1=2, C2=4, C3=6, D1=14, D2=5, D3=2)
|
||||
initial = 'A'
|
||||
|
||||
def successors(self, state):
|
||||
return self.succs.get(state, [])
|
||||
|
||||
def utility(self, state, player):
|
||||
if player == 'MAX':
|
||||
return self.utils[state]
|
||||
else:
|
||||
return -self.utils[state]
|
||||
|
||||
def terminal_test(self, state):
|
||||
return state not in ('A', 'B', 'C', 'D')
|
||||
|
||||
def to_move(self, state):
|
||||
return if_(state in 'BCD', 'MIN', 'MAX')
|
||||
|
||||
class TicTacToe(Game):
|
||||
"""Play TicTacToe on an h x v board, with Max (first player) playing 'X'.
|
||||
A state has the player to move, a cached utility, a list of moves in
|
||||
the form of a list of (x, y) positions, and a board, in the form of
|
||||
a dict of {(x, y): Player} entries, where Player is 'X' or 'O'."""
|
||||
def __init__(self, h=3, v=3, k=3):
|
||||
update(self, h=h, v=v, k=k)
|
||||
moves = [(x, y) for x in range(1, h+1)
|
||||
for y in range(1, v+1)]
|
||||
self.initial = Struct(to_move='X', utility=0, board={}, moves=moves)
|
||||
|
||||
def legal_moves(self, state):
|
||||
"Legal moves are any square not yet taken."
|
||||
return state.moves
|
||||
|
||||
def make_move(self, move, state):
|
||||
if move not in state.moves:
|
||||
return state # Illegal move has no effect
|
||||
board = state.board.copy(); board[move] = state.to_move
|
||||
moves = list(state.moves); moves.remove(move)
|
||||
return Struct(to_move=if_(state.to_move == 'X', 'O', 'X'),
|
||||
utility=self.compute_utility(board, move, state.to_move),
|
||||
board=board, moves=moves)
|
||||
|
||||
def utility(self, state):
|
||||
"Return the value to X; 1 for win, -1 for loss, 0 otherwise."
|
||||
return state.utility
|
||||
|
||||
def terminal_test(self, state):
|
||||
"A state is terminal if it is won or there are no empty squares."
|
||||
return state.utility != 0 or len(state.moves) == 0
|
||||
|
||||
def display(self, state):
|
||||
board = state.board
|
||||
for x in range(1, self.h+1):
|
||||
for y in range(1, self.v+1):
|
||||
print board.get((x, y), '.'),
|
||||
print
|
||||
|
||||
def compute_utility(self, board, move, player):
|
||||
"If X wins with this move, return 1; if O return -1; else return 0."
|
||||
if (self.k_in_row(board, move, player, (0, 1)) or
|
||||
self.k_in_row(board, move, player, (1, 0)) or
|
||||
self.k_in_row(board, move, player, (1, -1)) or
|
||||
self.k_in_row(board, move, player, (1, 1))):
|
||||
return if_(player == 'X', +1, -1)
|
||||
else:
|
||||
return 0
|
||||
|
||||
def k_in_row(self, board, move, player, (delta_x, delta_y)):
|
||||
"Return true if there is a line through move on board for player."
|
||||
x, y = move
|
||||
n = 0 # n is number of moves in row
|
||||
while board.get((x, y)) == player:
|
||||
n += 1
|
||||
x, y = x + delta_x, y + delta_y
|
||||
x, y = move
|
||||
while board.get((x, y)) == player:
|
||||
n += 1
|
||||
x, y = x - delta_x, y - delta_y
|
||||
n -= 1 # Because we counted move itself twice
|
||||
return n >= self.k
|
||||
|
||||
class ConnectFour(TicTacToe):
|
||||
"""A TicTacToe-like game in which you can only make a move on the bottom
|
||||
row, or in a square directly above an occupied square. Traditionally
|
||||
played on a 7x6 board and requiring 4 in a row."""
|
||||
|
||||
def __init__(self, h=7, v=6, k=4):
|
||||
TicTacToe.__init__(self, h, v, k)
|
||||
|
||||
def legal_moves(self, state):
|
||||
"Legal moves are any square not yet taken."
|
||||
return [(x, y) for (x, y) in state.moves
|
||||
if y == 0 or (x, y-1) in state.board]
|
@ -1,586 +0,0 @@
|
||||
"""Learn to estimate functions from examples. (Chapters 18-20)"""
|
||||
|
||||
from utils import *
|
||||
import agents, random, operator
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class DataSet:
|
||||
"""A data set for a machine learning problem. It has the following fields:
|
||||
|
||||
d.examples A list of examples. Each one is a list of attribute values.
|
||||
d.attrs A list of integers to index into an example, so example[attr]
|
||||
gives a value. Normally the same as range(len(d.examples)).
|
||||
d.attrnames Optional list of mnemonic names for corresponding attrs.
|
||||
d.target The attribute that a learning algorithm will try to predict.
|
||||
By default the final attribute.
|
||||
d.inputs The list of attrs without the target.
|
||||
d.values A list of lists, each sublist is the set of possible
|
||||
values for the corresponding attribute. If None, it
|
||||
is computed from the known examples by self.setproblem.
|
||||
If not None, an erroneous value raises ValueError.
|
||||
d.name Name of the data set (for output display only).
|
||||
d.source URL or other source where the data came from.
|
||||
|
||||
Normally, you call the constructor and you're done; then you just
|
||||
access fields like d.examples and d.target and d.inputs."""
|
||||
|
||||
def __init__(self, examples=None, attrs=None, target=-1, values=None,
|
||||
attrnames=None, name='', source='',
|
||||
inputs=None, exclude=(), doc=''):
|
||||
"""Accepts any of DataSet's fields. Examples can
|
||||
also be a string or file from which to parse examples using parse_csv.
|
||||
>>> DataSet(examples='1, 2, 3')
|
||||
<DataSet(): 1 examples, 3 attributes>
|
||||
"""
|
||||
update(self, name=name, source=source, values=values)
|
||||
# Initialize .examples from string or list or data directory
|
||||
if isinstance(examples, str):
|
||||
self.examples = parse_csv(examples)
|
||||
elif examples is None:
|
||||
self.examples = parse_csv(DataFile(name+'.csv').read())
|
||||
else:
|
||||
self.examples = examples
|
||||
map(self.check_example, self.examples)
|
||||
# Attrs are the indicies of examples, unless otherwise stated.
|
||||
if not attrs and self.examples:
|
||||
attrs = range(len(self.examples[0]))
|
||||
self.attrs = attrs
|
||||
# Initialize .attrnames from string, list, or by default
|
||||
if isinstance(attrnames, str):
|
||||
self.attrnames = attrnames.split()
|
||||
else:
|
||||
self.attrnames = attrnames or attrs
|
||||
self.setproblem(target, inputs=inputs, exclude=exclude)
|
||||
|
||||
def setproblem(self, target, inputs=None, exclude=()):
|
||||
"""Set (or change) the target and/or inputs.
|
||||
This way, one DataSet can be used multiple ways. inputs, if specified,
|
||||
is a list of attributes, or specify exclude as a list of attributes
|
||||
to not put use in inputs. Attributes can be -n .. n, or an attrname.
|
||||
Also computes the list of possible values, if that wasn't done yet."""
|
||||
self.target = self.attrnum(target)
|
||||
exclude = map(self.attrnum, exclude)
|
||||
if inputs:
|
||||
self.inputs = removall(self.target, inputs)
|
||||
else:
|
||||
self.inputs = [a for a in self.attrs
|
||||
if a is not self.target and a not in exclude]
|
||||
if not self.values:
|
||||
self.values = map(unique, zip(*self.examples))
|
||||
|
||||
def add_example(self, example):
|
||||
"""Add an example to the list of examples, checking it first."""
|
||||
self.check_example(example)
|
||||
self.examples.append(example)
|
||||
|
||||
def check_example(self, example):
|
||||
"""Raise ValueError if example has any invalid values."""
|
||||
if self.values:
|
||||
for a in self.attrs:
|
||||
if example[a] not in self.values[a]:
|
||||
raise ValueError('Bad value %s for attribute %s in %s' %
|
||||
(example[a], self.attrnames[a], example))
|
||||
|
||||
def attrnum(self, attr):
|
||||
"Returns the number used for attr, which can be a name, or -n .. n."
|
||||
if attr < 0:
|
||||
return len(self.attrs) + attr
|
||||
elif isinstance(attr, str):
|
||||
return self.attrnames.index(attr)
|
||||
else:
|
||||
return attr
|
||||
|
||||
def sanitize(self, example):
|
||||
"Return a copy of example, with non-input attributes replaced by 0."
|
||||
return [i in self.inputs and example[i] for i in range(len(example))]
|
||||
|
||||
def __repr__(self):
|
||||
return '<DataSet(%s): %d examples, %d attributes>' % (
|
||||
self.name, len(self.examples), len(self.attrs))
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
def parse_csv(input, delim=','):
|
||||
r"""Input is a string consisting of lines, each line has comma-delimited
|
||||
fields. Convert this into a list of lists. Blank lines are skipped.
|
||||
Fields that look like numbers are converted to numbers.
|
||||
The delim defaults to ',' but '\t' and None are also reasonable values.
|
||||
>>> parse_csv('1, 2, 3 \n 0, 2, na')
|
||||
[[1, 2, 3], [0, 2, 'na']]
|
||||
"""
|
||||
lines = [line for line in input.splitlines() if line.strip() is not '']
|
||||
return [map(num_or_str, line.split(delim)) for line in lines]
|
||||
|
||||
def rms_error(predictions, targets):
|
||||
return math.sqrt(ms_error(predictions, targets))
|
||||
|
||||
def ms_error(predictions, targets):
|
||||
return mean([(p - t)**2 for p, t in zip(predictions, targets)])
|
||||
|
||||
def mean_error(predictions, targets):
|
||||
return mean([abs(p - t) for p, t in zip(predictions, targets)])
|
||||
|
||||
def mean_boolean_error(predictions, targets):
|
||||
return mean([(p != t) for p, t in zip(predictions, targets)])
|
||||
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class Learner:
|
||||
"""A Learner, or Learning Algorithm, can be trained with a dataset,
|
||||
and then asked to predict the target attribute of an example."""
|
||||
|
||||
def train(self, dataset):
|
||||
self.dataset = dataset
|
||||
|
||||
def predict(self, example):
|
||||
abstract
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class MajorityLearner(Learner):
|
||||
"""A very dumb algorithm: always pick the result that was most popular
|
||||
in the training data. Makes a baseline for comparison."""
|
||||
|
||||
def train(self, dataset):
|
||||
"Find the target value that appears most often."
|
||||
self.most_popular = mode([e[dataset.target] for e in dataset.examples])
|
||||
|
||||
def predict(self, example):
|
||||
"Always return same result: the most popular from the training set."
|
||||
return self.most_popular
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class NaiveBayesLearner(Learner):
|
||||
|
||||
def train(self, dataset):
|
||||
"""Just count the target/attr/val occurences.
|
||||
Count how many times each value of each attribute occurs.
|
||||
Store count in N[targetvalue][attr][val]. Let N[attr][None] be the
|
||||
sum over all vals."""
|
||||
N = {}
|
||||
## Initialize to 0
|
||||
for gv in self.dataset.values[self.dataset.target]:
|
||||
N[gv] = {}
|
||||
for attr in self.dataset.attrs:
|
||||
N[gv][attr] = {}
|
||||
for val in self.dataset.values[attr]:
|
||||
N[gv][attr][val] = 0
|
||||
N[gv][attr][None] = 0
|
||||
## Go thru examples
|
||||
for example in self.dataset.examples:
|
||||
Ngv = N[example[self.dataset.target]]
|
||||
for attr in self.dataset.attrs:
|
||||
Ngv[attr][example[attr]] += 1
|
||||
Ngv[attr][None] += 1
|
||||
self._N = N
|
||||
|
||||
def N(self, targetval, attr, attrval):
|
||||
"Return the count in the training data of this combination."
|
||||
try:
|
||||
return self._N[targetval][attr][attrval]
|
||||
except KeyError:
|
||||
return 0
|
||||
|
||||
def P(self, targetval, attr, attrval):
|
||||
"""Smooth the raw counts to give a probability estimate.
|
||||
Estimate adds 1 to numerator and len(possible vals) to denominator."""
|
||||
return ((self.N(targetval, attr, attrval) + 1.0) /
|
||||
(self.N(targetval, attr, None) + len(self.dataset.values[attr])))
|
||||
|
||||
def predict(self, example):
|
||||
"""Predict the target value for example. Consider each possible value,
|
||||
choose the most likely, by looking at each attribute independently."""
|
||||
possible_values = self.dataset.values[self.dataset.target]
|
||||
def class_probability(targetval):
|
||||
return product([self.P(targetval, a, example[a])
|
||||
for a in self.dataset.inputs], 1)
|
||||
return argmax(possible_values, class_probability)
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class NearestNeighborLearner(Learner):
|
||||
|
||||
def __init__(self, k=1):
|
||||
"k-NearestNeighbor: the k nearest neighbors vote."
|
||||
self.k = k
|
||||
|
||||
def predict(self, example):
|
||||
"""With k=1, find the point closest to example.
|
||||
With k>1, find k closest, and have them vote for the best."""
|
||||
if self.k == 1:
|
||||
neighbor = argmin(self.dataset.examples,
|
||||
lambda e: self.distance(e, example))
|
||||
return neighbor[self.dataset.target]
|
||||
else:
|
||||
## Maintain a sorted list of (distance, example) pairs.
|
||||
## For very large k, a PriorityQueue would be better
|
||||
best = []
|
||||
for e in examples:
|
||||
d = self.distance(e, example)
|
||||
if len(best) < k:
|
||||
e.append((d, e))
|
||||
elif d < best[-1][0]:
|
||||
best[-1] = (d, e)
|
||||
best.sort()
|
||||
return mode([e[self.dataset.target] for (d, e) in best])
|
||||
|
||||
def distance(self, e1, e2):
|
||||
return mean_boolean_error(e1, e2)
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class DecisionTree:
|
||||
"""A DecisionTree holds an attribute that is being tested, and a
|
||||
dict of {attrval: Tree} entries. If Tree here is not a DecisionTree
|
||||
then it is the final classification of the example."""
|
||||
|
||||
def __init__(self, attr, attrname=None, branches=None):
|
||||
"Initialize by saying what attribute this node tests."
|
||||
update(self, attr=attr, attrname=attrname or attr,
|
||||
branches=branches or {})
|
||||
|
||||
def predict(self, example):
|
||||
"Given an example, use the tree to classify the example."
|
||||
child = self.branches[example[self.attr]]
|
||||
if isinstance(child, DecisionTree):
|
||||
return child.predict(example)
|
||||
else:
|
||||
return child
|
||||
|
||||
def add(self, val, subtree):
|
||||
"Add a branch. If self.attr = val, go to the given subtree."
|
||||
self.branches[val] = subtree
|
||||
return self
|
||||
|
||||
def display(self, indent=0):
|
||||
name = self.attrname
|
||||
print 'Test', name
|
||||
for (val, subtree) in self.branches.items():
|
||||
print ' '*4*indent, name, '=', val, '==>',
|
||||
if isinstance(subtree, DecisionTree):
|
||||
subtree.display(indent+1)
|
||||
else:
|
||||
print 'RESULT = ', subtree
|
||||
|
||||
def __repr__(self):
|
||||
return 'DecisionTree(%r, %r, %r)' % (
|
||||
self.attr, self.attrname, self.branches)
|
||||
|
||||
Yes, No = True, False
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class DecisionTreeLearner(Learner):
|
||||
|
||||
def predict(self, example):
|
||||
if isinstance(self.dt, DecisionTree):
|
||||
return self.dt.predict(example)
|
||||
else:
|
||||
return self.dt
|
||||
|
||||
def train(self, dataset):
|
||||
self.dataset = dataset
|
||||
self.attrnames = dataset.attrnames
|
||||
self.dt = self.decision_tree_learning(dataset.examples, dataset.inputs)
|
||||
|
||||
def decision_tree_learning(self, examples, attrs, default=None):
|
||||
if len(examples) == 0:
|
||||
return default
|
||||
elif self.all_same_class(examples):
|
||||
return examples[0][self.dataset.target]
|
||||
elif len(attrs) == 0:
|
||||
return self.majority_value(examples)
|
||||
else:
|
||||
best = self.choose_attribute(attrs, examples)
|
||||
tree = DecisionTree(best, self.attrnames[best])
|
||||
for (v, examples_i) in self.split_by(best, examples):
|
||||
subtree = self.decision_tree_learning(examples_i,
|
||||
removeall(best, attrs), self.majority_value(examples))
|
||||
tree.add(v, subtree)
|
||||
return tree
|
||||
|
||||
def choose_attribute(self, attrs, examples):
|
||||
"Choose the attribute with the highest information gain."
|
||||
return argmax(attrs, lambda a: self.information_gain(a, examples))
|
||||
|
||||
def all_same_class(self, examples):
|
||||
"Are all these examples in the same target class?"
|
||||
target = self.dataset.target
|
||||
class0 = examples[0][target]
|
||||
for e in examples:
|
||||
if e[target] != class0: return False
|
||||
return True
|
||||
|
||||
def majority_value(self, examples):
|
||||
"""Return the most popular target value for this set of examples.
|
||||
(If target is binary, this is the majority; otherwise plurality.)"""
|
||||
g = self.dataset.target
|
||||
return argmax(self.dataset.values[g],
|
||||
lambda v: self.count(g, v, examples))
|
||||
|
||||
def count(self, attr, val, examples):
|
||||
return count_if(lambda e: e[attr] == val, examples)
|
||||
|
||||
def information_gain(self, attr, examples):
|
||||
def I(examples):
|
||||
target = self.dataset.target
|
||||
return information_content([self.count(target, v, examples)
|
||||
for v in self.dataset.values[target]])
|
||||
N = float(len(examples))
|
||||
remainder = 0
|
||||
for (v, examples_i) in self.split_by(attr, examples):
|
||||
remainder += (len(examples_i) / N) * I(examples_i)
|
||||
return I(examples) - remainder
|
||||
|
||||
def split_by(self, attr, examples=None):
|
||||
"Return a list of (val, examples) pairs for each val of attr."
|
||||
if examples == None:
|
||||
examples = self.dataset.examples
|
||||
return [(v, [e for e in examples if e[attr] == v])
|
||||
for v in self.dataset.values[attr]]
|
||||
|
||||
def information_content(values):
|
||||
"Number of bits to represent the probability distribution in values."
|
||||
# If the values do not sum to 1, normalize them to make them a Prob. Dist.
|
||||
values = removeall(0, values)
|
||||
s = float(sum(values))
|
||||
if s != 1.0: values = [v/s for v in values]
|
||||
return sum([- v * log2(v) for v in values])
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
### A decision list is implemented as a list of (test, value) pairs.
|
||||
|
||||
class DecisionListLearner(Learner):
|
||||
|
||||
def train(self, dataset):
|
||||
self.dataset = dataset
|
||||
self.attrnames = dataset.attrnames
|
||||
self.dl = self.decision_list_learning(Set(dataset.examples))
|
||||
|
||||
def decision_list_learning(self, examples):
|
||||
"""[Fig. 18.14]"""
|
||||
if not examples:
|
||||
return [(True, No)]
|
||||
t, o, examples_t = self.find_examples(examples)
|
||||
if not t:
|
||||
raise Failure
|
||||
return [(t, o)] + self.decision_list_learning(examples - examples_t)
|
||||
|
||||
def find_examples(self, examples):
|
||||
"""Find a set of examples that all have the same outcome under some test.
|
||||
Return a tuple of the test, outcome, and examples."""
|
||||
NotImplemented
|
||||
#______________________________________________________________________________
|
||||
|
||||
class NeuralNetLearner(Learner):
|
||||
"""Layered feed-forward network."""
|
||||
|
||||
def __init__(self, sizes):
|
||||
self.activations = map(lambda n: [0.0 for i in range(n)], sizes)
|
||||
self.weights = []
|
||||
|
||||
def train(self, dataset):
|
||||
NotImplemented
|
||||
|
||||
def predict(self, example):
|
||||
NotImplemented
|
||||
|
||||
class NNUnit:
|
||||
"""Unit of a neural net."""
|
||||
def __init__(self):
|
||||
NotImplemented
|
||||
|
||||
class PerceptronLearner(NeuralNetLearner):
|
||||
|
||||
def predict(self, example):
|
||||
return sum([])
|
||||
#______________________________________________________________________________
|
||||
|
||||
class Linearlearner(Learner):
|
||||
"""Fit a linear model to the data."""
|
||||
|
||||
NotImplemented
|
||||
#______________________________________________________________________________
|
||||
|
||||
class EnsembleLearner(Learner):
|
||||
"""Given a list of learning algorithms, have them vote."""
|
||||
|
||||
def __init__(self, learners=[]):
|
||||
self.learners=learners
|
||||
|
||||
def train(self, dataset):
|
||||
for learner in self.learners:
|
||||
learner.train(dataset)
|
||||
|
||||
def predict(self, example):
|
||||
return mode([learner.predict(example) for learner in self.learners])
|
||||
|
||||
#_____________________________________________________________________________
|
||||
# Functions for testing learners on examples
|
||||
|
||||
def test(learner, dataset, examples=None, verbose=0):
|
||||
"""Return the proportion of the examples that are correctly predicted.
|
||||
Assumes the learner has already been trained."""
|
||||
if examples == None: examples = dataset.examples
|
||||
if len(examples) == 0: return 0.0
|
||||
right = 0.0
|
||||
for example in examples:
|
||||
desired = example[dataset.target]
|
||||
output = learner.predict(dataset.sanitize(example))
|
||||
if output == desired:
|
||||
right += 1
|
||||
if verbose >= 2:
|
||||
print ' OK: got %s for %s' % (desired, example)
|
||||
elif verbose:
|
||||
print 'WRONG: got %s, expected %s for %s' % (
|
||||
output, desired, example)
|
||||
return right / len(examples)
|
||||
|
||||
def train_and_test(learner, dataset, start, end):
|
||||
"""Reserve dataset.examples[start:end] for test; train on the remainder.
|
||||
Return the proportion of examples correct on the test examples."""
|
||||
examples = dataset.examples
|
||||
try:
|
||||
dataset.examples = examples[:start] + examples[end:]
|
||||
learner.dataset = dataset
|
||||
learner.train(dataset)
|
||||
return test(learner, dataset, examples[start:end])
|
||||
finally:
|
||||
dataset.examples = examples
|
||||
|
||||
def cross_validation(learner, dataset, k=10, trials=1):
|
||||
"""Do k-fold cross_validate and return their mean.
|
||||
That is, keep out 1/k of the examples for testing on each of k runs.
|
||||
Shuffle the examples first; If trials>1, average over several shuffles."""
|
||||
if k == None:
|
||||
k = len(dataset.examples)
|
||||
if trials > 1:
|
||||
return mean([cross_validation(learner, dataset, k, trials=1)
|
||||
for t in range(trials)])
|
||||
else:
|
||||
n = len(dataset.examples)
|
||||
random.shuffle(dataset.examples)
|
||||
return mean([train_and_test(learner, dataset, i*(n/k), (i+1)*(n/k))
|
||||
for i in range(k)])
|
||||
|
||||
def leave1out(learner, dataset):
|
||||
"Leave one out cross-validation over the dataset."
|
||||
return cross_validation(learner, dataset, k=len(dataset.examples))
|
||||
|
||||
def learningcurve(learner, dataset, trials=10, sizes=None):
|
||||
if sizes == None:
|
||||
sizes = range(2, len(dataset.examples)-10, 2)
|
||||
def score(learner, size):
|
||||
random.shuffle(dataset.examples)
|
||||
return train_and_test(learner, dataset, 0, size)
|
||||
return [(size, mean([score(learner, size) for t in range(trials)]))
|
||||
for size in sizes]
|
||||
|
||||
#______________________________________________________________________________
|
||||
# The rest of this file gives Data sets for machine learning problems.
|
||||
|
||||
orings = DataSet(name='orings', target='Distressed',
|
||||
attrnames="Rings Distressed Temp Pressure Flightnum")
|
||||
|
||||
|
||||
zoo = DataSet(name='zoo', target='type', exclude=['name'],
|
||||
attrnames="name hair feathers eggs milk airborne aquatic " +
|
||||
"predator toothed backbone breathes venomous fins legs tail " +
|
||||
"domestic catsize type")
|
||||
|
||||
|
||||
iris = DataSet(name="iris", target="class",
|
||||
attrnames="sepal-len sepal-width petal-len petal-width class")
|
||||
|
||||
#______________________________________________________________________________
|
||||
# The Restaurant example from Fig. 18.2
|
||||
|
||||
def RestaurantDataSet(examples=None):
|
||||
"Build a DataSet of Restaurant waiting examples."
|
||||
return DataSet(name='restaurant', target='Wait', examples=examples,
|
||||
attrnames='Alternate Bar Fri/Sat Hungry Patrons Price '
|
||||
+ 'Raining Reservation Type WaitEstimate Wait')
|
||||
|
||||
restaurant = RestaurantDataSet()
|
||||
|
||||
def T(attrname, branches):
|
||||
return DecisionTree(restaurant.attrnum(attrname), attrname, branches)
|
||||
|
||||
Fig[18,2] = T('Patrons',
|
||||
{'None': 'No', 'Some': 'Yes', 'Full':
|
||||
T('WaitEstimate',
|
||||
{'>60': 'No', '0-10': 'Yes',
|
||||
'30-60':
|
||||
T('Alternate', {'No':
|
||||
T('Reservation', {'Yes': 'Yes', 'No':
|
||||
T('Bar', {'No':'No',
|
||||
'Yes':'Yes'})}),
|
||||
'Yes':
|
||||
T('Fri/Sat', {'No': 'No', 'Yes': 'Yes'})}),
|
||||
'10-30':
|
||||
T('Hungry', {'No': 'Yes', 'Yes':
|
||||
T('Alternate',
|
||||
{'No': 'Yes', 'Yes':
|
||||
T('Raining', {'No': 'No', 'Yes': 'Yes'})})})})})
|
||||
|
||||
def SyntheticRestaurant(n=20):
|
||||
"Generate a DataSet with n examples."
|
||||
def gen():
|
||||
example = map(random.choice, restaurant.values)
|
||||
example[restaurant.target] = Fig[18,2].predict(example)
|
||||
return example
|
||||
return RestaurantDataSet([gen() for i in range(n)])
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Artificial, generated examples.
|
||||
|
||||
def Majority(k, n):
|
||||
"""Return a DataSet with n k-bit examples of the majority problem:
|
||||
k random bits followed by a 1 if more than half the bits are 1, else 0."""
|
||||
examples = []
|
||||
for i in range(n):
|
||||
bits = [random.choice([0, 1]) for i in range(k)]
|
||||
bits.append(sum(bits) > k/2)
|
||||
examples.append(bits)
|
||||
return DataSet(name="majority", examples=examples)
|
||||
|
||||
def Parity(k, n, name="parity"):
|
||||
"""Return a DataSet with n k-bit examples of the parity problem:
|
||||
k random bits followed by a 1 if an odd number of bits are 1, else 0."""
|
||||
examples = []
|
||||
for i in range(n):
|
||||
bits = [random.choice([0, 1]) for i in range(k)]
|
||||
bits.append(sum(bits) % 2)
|
||||
examples.append(bits)
|
||||
return DataSet(name=name, examples=examples)
|
||||
|
||||
def Xor(n):
|
||||
"""Return a DataSet with n examples of 2-input xor."""
|
||||
return Parity(2, n, name="xor")
|
||||
|
||||
def ContinuousXor(n):
|
||||
"2 inputs are chosen uniformly form (0.0 .. 2.0]; output is xor of ints."
|
||||
examples = []
|
||||
for i in range(n):
|
||||
x, y = [random.uniform(0.0, 2.0) for i in '12']
|
||||
examples.append([x, y, int(x) != int(y)])
|
||||
return DataSet(name="continuous xor", examples=examples)
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
def compare(algorithms=[MajorityLearner, NaiveBayesLearner,
|
||||
NearestNeighborLearner, DecisionTreeLearner],
|
||||
datasets=[iris, orings, zoo, restaurant, SyntheticRestaurant(20),
|
||||
Majority(7, 100), Parity(7, 100), Xor(100)],
|
||||
k=10, trials=1):
|
||||
"""Compare various learners on various datasets using cross-validation.
|
||||
Print results as a table."""
|
||||
print_table([[a.__name__.replace('Learner','')] +
|
||||
[cross_validation(a(), d, k, trials) for d in datasets]
|
||||
for a in algorithms],
|
||||
header=[''] + [d.name[0:7] for d in datasets], round=2)
|
||||
|
@ -1,888 +0,0 @@
|
||||
"""Representations and Inference for Logic (Chapters 7-10)
|
||||
|
||||
Covers both Propositional and First-Order Logic. First we have four
|
||||
important data types:
|
||||
|
||||
KB Abstract class holds a knowledge base of logical expressions
|
||||
KB_Agent Abstract class subclasses agents.Agent
|
||||
Expr A logical expression
|
||||
substitution Implemented as a dictionary of var:value pairs, {x:1, y:x}
|
||||
|
||||
Be careful: some functions take an Expr as argument, and some take a KB.
|
||||
Then we implement various functions for doing logical inference:
|
||||
|
||||
pl_true Evaluate a propositional logical sentence in a model
|
||||
tt_entails Say if a statement is entailed by a KB
|
||||
pl_resolution Do resolution on propositional sentences
|
||||
dpll_satisfiable See if a propositional sentence is satisfiable
|
||||
WalkSAT (not yet implemented)
|
||||
|
||||
And a few other functions:
|
||||
|
||||
to_cnf Convert to conjunctive normal form
|
||||
unify Do unification of two FOL sentences
|
||||
diff, simp Symbolic differentiation and simplification
|
||||
"""
|
||||
|
||||
from __future__ import generators
|
||||
import re
|
||||
import agents
|
||||
from utils import *
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class KB:
|
||||
"""A Knowledge base to which you can tell and ask sentences.
|
||||
To create a KB, first subclass this class and implement
|
||||
tell, ask_generator, and retract. Why ask_generator instead of ask?
|
||||
The book is a bit vague on what ask means --
|
||||
For a Propositional Logic KB, ask(P & Q) returns True or False, but for an
|
||||
FOL KB, something like ask(Brother(x, y)) might return many substitutions
|
||||
such as {x: Cain, y: Able}, {x: Able, y: Cain}, {x: George, y: Jeb}, etc.
|
||||
So ask_generator generates these one at a time, and ask either returns the
|
||||
first one or returns False."""
|
||||
|
||||
def __init__(self, sentence=None):
|
||||
abstract
|
||||
|
||||
def tell(self, sentence):
|
||||
"Add the sentence to the KB"
|
||||
abstract
|
||||
|
||||
def ask(self, query):
|
||||
"""Ask returns a substitution that makes the query true, or
|
||||
it returns False. It is implemented in terms of ask_generator."""
|
||||
try:
|
||||
return self.ask_generator(query).next()
|
||||
except StopIteration:
|
||||
return False
|
||||
|
||||
def ask_generator(self, query):
|
||||
"Yield all the substitutions that make query true."
|
||||
abstract
|
||||
|
||||
def retract(self, sentence):
|
||||
"Remove the sentence from the KB"
|
||||
abstract
|
||||
|
||||
|
||||
class PropKB(KB):
|
||||
"A KB for Propositional Logic. Inefficient, with no indexing."
|
||||
|
||||
def __init__(self, sentence=None):
|
||||
self.clauses = []
|
||||
if sentence:
|
||||
self.tell(sentence)
|
||||
|
||||
def tell(self, sentence):
|
||||
"Add the sentence's clauses to the KB"
|
||||
self.clauses.extend(conjuncts(to_cnf(sentence)))
|
||||
|
||||
def ask_generator(self, query):
|
||||
"Yield the empty substitution if KB implies query; else False"
|
||||
if not tt_entails(Expr('&', *self.clauses), query):
|
||||
return
|
||||
yield {}
|
||||
|
||||
def retract(self, sentence):
|
||||
"Remove the sentence's clauses from the KB"
|
||||
for c in conjuncts(to_cnf(sentence)):
|
||||
if c in self.clauses:
|
||||
self.clauses.remove(c)
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class KB_Agent(agents.Agent):
|
||||
"""A generic logical knowledge-based agent. [Fig. 7.1]"""
|
||||
def __init__(self, KB):
|
||||
t = 0
|
||||
def program(percept):
|
||||
KB.tell(self.make_percept_sentence(percept, t))
|
||||
action = KB.ask(self.make_action_query(t))
|
||||
KB.tell(self.make_action_sentence(action, t))
|
||||
t = t + 1
|
||||
return action
|
||||
self.program = program
|
||||
|
||||
def make_percept_sentence(self, percept, t):
|
||||
return(Expr("Percept")(percept, t))
|
||||
|
||||
def make_action_query(self, t):
|
||||
return(expr("ShouldDo(action, %d)" % t))
|
||||
|
||||
def make_action_sentence(self, action, t):
|
||||
return(Expr("Did")(action, t))
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class Expr:
|
||||
"""A symbolic mathematical expression. We use this class for logical
|
||||
expressions, and for terms within logical expressions. In general, an
|
||||
Expr has an op (operator) and a list of args. The op can be:
|
||||
Null-ary (no args) op:
|
||||
A number, representing the number itself. (e.g. Expr(42) => 42)
|
||||
A symbol, representing a variable or constant (e.g. Expr('F') => F)
|
||||
Unary (1 arg) op:
|
||||
'~', '-', representing NOT, negation (e.g. Expr('~', Expr('P')) => ~P)
|
||||
Binary (2 arg) op:
|
||||
'>>', '<<', representing forward and backward implication
|
||||
'+', '-', '*', '/', '**', representing arithmetic operators
|
||||
'<', '>', '>=', '<=', representing comparison operators
|
||||
'<=>', '^', representing logical equality and XOR
|
||||
N-ary (0 or more args) op:
|
||||
'&', '|', representing conjunction and disjunction
|
||||
A symbol, representing a function term or FOL proposition
|
||||
|
||||
Exprs can be constructed with operator overloading: if x and y are Exprs,
|
||||
then so are x + y and x & y, etc. Also, if F and x are Exprs, then so is
|
||||
F(x); it works by overloading the __call__ method of the Expr F. Note
|
||||
that in the Expr that is created by F(x), the op is the str 'F', not the
|
||||
Expr F. See http://www.python.org/doc/current/ref/specialnames.html
|
||||
to learn more about operator overloading in Python.
|
||||
|
||||
WARNING: x == y and x != y are NOT Exprs. The reason is that we want
|
||||
to write code that tests 'if x == y:' and if x == y were the same
|
||||
as Expr('==', x, y), then the result would always be true; not what a
|
||||
programmer would expect. But we still need to form Exprs representing
|
||||
equalities and disequalities. We concentrate on logical equality (or
|
||||
equivalence) and logical disequality (or XOR). You have 3 choices:
|
||||
(1) Expr('<=>', x, y) and Expr('^', x, y)
|
||||
Note that ^ is bitwose XOR in Python (and Java and C++)
|
||||
(2) expr('x <=> y') and expr('x =/= y').
|
||||
See the doc string for the function expr.
|
||||
(3) (x % y) and (x ^ y).
|
||||
It is very ugly to have (x % y) mean (x <=> y), but we need
|
||||
SOME operator to make (2) work, and this seems the best choice.
|
||||
|
||||
WARNING: if x is an Expr, then so is x + 1, because the int 1 gets
|
||||
coerced to an Expr by the constructor. But 1 + x is an error, because
|
||||
1 doesn't know how to add an Expr. (Adding an __radd__ method to Expr
|
||||
wouldn't help, because int.__add__ is still called first.) Therefore,
|
||||
you should use Expr(1) + x instead, or ONE + x, or expr('1 + x').
|
||||
"""
|
||||
|
||||
def __init__(self, op, *args):
|
||||
"Op is a string or number; args are Exprs (or are coerced to Exprs)."
|
||||
assert isinstance(op, str) or (isnumber(op) and not args)
|
||||
self.op = num_or_str(op)
|
||||
self.args = map(expr, args) ## Coerce args to Exprs
|
||||
|
||||
def __call__(self, *args):
|
||||
"""Self must be a symbol with no args, such as Expr('F'). Create a new
|
||||
Expr with 'F' as op and the args as arguments."""
|
||||
assert is_symbol(self.op) and not self.args
|
||||
return Expr(self.op, *args)
|
||||
|
||||
def __repr__(self):
|
||||
"Show something like 'P' or 'P(x, y)', or '~P' or '(P | Q | R)'"
|
||||
if len(self.args) == 0: # Constant or proposition with arity 0
|
||||
return str(self.op)
|
||||
elif is_symbol(self.op): # Functional or Propositional operator
|
||||
return '%s(%s)' % (self.op, ', '.join(map(repr, self.args)))
|
||||
elif len(self.args) == 1: # Prefix operator
|
||||
return self.op + repr(self.args[0])
|
||||
else: # Infix operator
|
||||
return '(%s)' % (' '+self.op+' ').join(map(repr, self.args))
|
||||
|
||||
def __eq__(self, other):
|
||||
"""x and y are equal iff their ops and args are equal."""
|
||||
return (other is self) or (isinstance(other, Expr)
|
||||
and self.op == other.op and self.args == other.args)
|
||||
|
||||
def __hash__(self):
|
||||
"Need a hash method so Exprs can live in dicts."
|
||||
return hash(self.op) ^ hash(tuple(self.args))
|
||||
|
||||
# See http://www.python.org/doc/current/lib/module-operator.html
|
||||
# Not implemented: not, abs, pos, concat, contains, *item, *slice
|
||||
def __lt__(self, other): return Expr('<', self, other)
|
||||
def __le__(self, other): return Expr('<=', self, other)
|
||||
def __ge__(self, other): return Expr('>=', self, other)
|
||||
def __gt__(self, other): return Expr('>', self, other)
|
||||
def __add__(self, other): return Expr('+', self, other)
|
||||
def __sub__(self, other): return Expr('-', self, other)
|
||||
def __and__(self, other): return Expr('&', self, other)
|
||||
def __div__(self, other): return Expr('/', self, other)
|
||||
def __truediv__(self, other):return Expr('/', self, other)
|
||||
def __invert__(self): return Expr('~', self)
|
||||
def __lshift__(self, other): return Expr('<<', self, other)
|
||||
def __rshift__(self, other): return Expr('>>', self, other)
|
||||
def __mul__(self, other): return Expr('*', self, other)
|
||||
def __neg__(self): return Expr('-', self)
|
||||
def __or__(self, other): return Expr('|', self, other)
|
||||
def __pow__(self, other): return Expr('**', self, other)
|
||||
def __xor__(self, other): return Expr('^', self, other)
|
||||
def __mod__(self, other): return Expr('<=>', self, other) ## (x % y)
|
||||
|
||||
|
||||
|
||||
def expr(s):
|
||||
"""Create an Expr representing a logic expression by parsing the input
|
||||
string. Symbols and numbers are automatically converted to Exprs.
|
||||
In addition you can use alternative spellings of these operators:
|
||||
'x ==> y' parses as (x >> y) # Implication
|
||||
'x <== y' parses as (x << y) # Reverse implication
|
||||
'x <=> y' parses as (x % y) # Logical equivalence
|
||||
'x =/= y' parses as (x ^ y) # Logical disequality (xor)
|
||||
But BE CAREFUL; precedence of implication is wrong. expr('P & Q ==> R & S')
|
||||
is ((P & (Q >> R)) & S); so you must use expr('(P & Q) ==> (R & S)').
|
||||
>>> expr('P <=> Q(1)')
|
||||
(P <=> Q(1))
|
||||
>>> expr('P & Q | ~R(x, F(x))')
|
||||
((P & Q) | ~R(x, F(x)))
|
||||
"""
|
||||
if isinstance(s, Expr): return s
|
||||
if isnumber(s): return Expr(s)
|
||||
## Replace the alternative spellings of operators with canonical spellings
|
||||
s = s.replace('==>', '>>').replace('<==', '<<')
|
||||
s = s.replace('<=>', '%').replace('=/=', '^')
|
||||
## Replace a symbol or number, such as 'P' with 'Expr("P")'
|
||||
s = re.sub(r'([a-zA-Z0-9_.]+)', r'Expr("\1")', s)
|
||||
## Now eval the string. (A security hole; do not use with an adversary.)
|
||||
return eval(s, {'Expr':Expr})
|
||||
|
||||
def is_symbol(s):
|
||||
"A string s is a symbol if it starts with an alphabetic char."
|
||||
return isinstance(s, str) and s[0].isalpha()
|
||||
|
||||
def is_var_symbol(s):
|
||||
"A logic variable symbol is an initial-lowercase string."
|
||||
return is_symbol(s) and s[0].islower()
|
||||
|
||||
def is_prop_symbol(s):
|
||||
"""A proposition logic symbol is an initial-uppercase string other than
|
||||
TRUE or FALSE."""
|
||||
return is_symbol(s) and s[0].isupper() and s != 'TRUE' and s != 'FALSE'
|
||||
|
||||
|
||||
## Useful constant Exprs used in examples and code:
|
||||
TRUE, FALSE, ZERO, ONE, TWO = map(Expr, ['TRUE', 'FALSE', 0, 1, 2])
|
||||
A, B, C, F, G, P, Q, x, y, z = map(Expr, 'ABCFGPQxyz')
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
def tt_entails(kb, alpha):
|
||||
"""Use truth tables to determine if KB entails sentence alpha. [Fig. 7.10]
|
||||
>>> tt_entails(expr('P & Q'), expr('Q'))
|
||||
True
|
||||
"""
|
||||
return tt_check_all(kb, alpha, prop_symbols(kb & alpha), {})
|
||||
|
||||
def tt_check_all(kb, alpha, symbols, model):
|
||||
"Auxiliary routine to implement tt_entails."
|
||||
if not symbols:
|
||||
if pl_true(kb, model): return pl_true(alpha, model)
|
||||
else: return True
|
||||
assert result != None
|
||||
else:
|
||||
P, rest = symbols[0], symbols[1:]
|
||||
return (tt_check_all(kb, alpha, rest, extend(model, P, True)) and
|
||||
tt_check_all(kb, alpha, rest, extend(model, P, False)))
|
||||
|
||||
def prop_symbols(x):
|
||||
"Return a list of all propositional symbols in x."
|
||||
if not isinstance(x, Expr):
|
||||
return []
|
||||
elif is_prop_symbol(x.op):
|
||||
return [x]
|
||||
else:
|
||||
s = set(())
|
||||
for arg in x.args:
|
||||
for symbol in prop_symbols(arg):
|
||||
s.add(symbol)
|
||||
return list(s)
|
||||
|
||||
def tt_true(alpha):
|
||||
"""Is the sentence alpha a tautology? (alpha will be coerced to an expr.)
|
||||
>>> tt_true(expr("(P >> Q) <=> (~P | Q)"))
|
||||
True
|
||||
"""
|
||||
return tt_entails(TRUE, expr(alpha))
|
||||
|
||||
def pl_true(exp, model={}):
|
||||
"""Return True if the propositional logic expression is true in the model,
|
||||
and False if it is false. If the model does not specify the value for
|
||||
every proposition, this may return None to indicate 'not obvious';
|
||||
this may happen even when the expression is tautological."""
|
||||
op, args = exp.op, exp.args
|
||||
if exp == TRUE:
|
||||
return True
|
||||
elif exp == FALSE:
|
||||
return False
|
||||
elif is_prop_symbol(op):
|
||||
return model.get(exp)
|
||||
elif op == '~':
|
||||
p = pl_true(args[0], model)
|
||||
if p == None: return None
|
||||
else: return not p
|
||||
elif op == '|':
|
||||
result = False
|
||||
for arg in args:
|
||||
p = pl_true(arg, model)
|
||||
if p == True: return True
|
||||
if p == None: result = None
|
||||
return result
|
||||
elif op == '&':
|
||||
result = True
|
||||
for arg in args:
|
||||
p = pl_true(arg, model)
|
||||
if p == False: return False
|
||||
if p == None: result = None
|
||||
return result
|
||||
p, q = args
|
||||
if op == '>>':
|
||||
return pl_true(~p | q, model)
|
||||
elif op == '<<':
|
||||
return pl_true(p | ~q, model)
|
||||
pt = pl_true(p, model)
|
||||
if pt == None: return None
|
||||
qt = pl_true(q, model)
|
||||
if qt == None: return None
|
||||
if op == '<=>':
|
||||
return pt == qt
|
||||
elif op == '^':
|
||||
return pt != qt
|
||||
else:
|
||||
raise ValueError, "illegal operator in logic expression" + str(exp)
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
## Convert to Conjunctive Normal Form (CNF)
|
||||
|
||||
def to_cnf(s):
|
||||
"""Convert a propositional logical sentence s to conjunctive normal form.
|
||||
That is, of the form ((A | ~B | ...) & (B | C | ...) & ...) [p. 215]
|
||||
>>> to_cnf("~(B|C)")
|
||||
(~B & ~C)
|
||||
>>> to_cnf("B <=> (P1|P2)")
|
||||
((~P1 | B) & (~P2 | B) & (P1 | P2 | ~B))
|
||||
>>> to_cnf("a | (b & c) | d")
|
||||
((b | a | d) & (c | a | d))
|
||||
>>> to_cnf("A & (B | (D & E))")
|
||||
(A & (D | B) & (E | B))
|
||||
"""
|
||||
if isinstance(s, str): s = expr(s)
|
||||
s = eliminate_implications(s) # Steps 1, 2 from p. 215
|
||||
s = move_not_inwards(s) # Step 3
|
||||
return distribute_and_over_or(s) # Step 4
|
||||
|
||||
def eliminate_implications(s):
|
||||
"""Change >>, <<, and <=> into &, |, and ~. That is, return an Expr
|
||||
that is equivalent to s, but has only &, |, and ~ as logical operators.
|
||||
>>> eliminate_implications(A >> (~B << C))
|
||||
((~B | ~C) | ~A)
|
||||
"""
|
||||
if not s.args or is_symbol(s.op): return s ## (Atoms are unchanged.)
|
||||
args = map(eliminate_implications, s.args)
|
||||
a, b = args[0], args[-1]
|
||||
if s.op == '>>':
|
||||
return (b | ~a)
|
||||
elif s.op == '<<':
|
||||
return (a | ~b)
|
||||
elif s.op == '<=>':
|
||||
return (a | ~b) & (b | ~a)
|
||||
else:
|
||||
return Expr(s.op, *args)
|
||||
|
||||
def move_not_inwards(s):
|
||||
"""Rewrite sentence s by moving negation sign inward.
|
||||
>>> move_not_inwards(~(A | B))
|
||||
(~A & ~B)
|
||||
>>> move_not_inwards(~(A & B))
|
||||
(~A | ~B)
|
||||
>>> move_not_inwards(~(~(A | ~B) | ~~C))
|
||||
((A | ~B) & ~C)
|
||||
"""
|
||||
if s.op == '~':
|
||||
NOT = lambda b: move_not_inwards(~b)
|
||||
a = s.args[0]
|
||||
if a.op == '~': return move_not_inwards(a.args[0]) # ~~A ==> A
|
||||
if a.op =='&': return NaryExpr('|', *map(NOT, a.args))
|
||||
if a.op =='|': return NaryExpr('&', *map(NOT, a.args))
|
||||
return s
|
||||
elif is_symbol(s.op) or not s.args:
|
||||
return s
|
||||
else:
|
||||
return Expr(s.op, *map(move_not_inwards, s.args))
|
||||
|
||||
def distribute_and_over_or(s):
|
||||
"""Given a sentence s consisting of conjunctions and disjunctions
|
||||
of literals, return an equivalent sentence in CNF.
|
||||
>>> distribute_and_over_or((A & B) | C)
|
||||
((A | C) & (B | C))
|
||||
"""
|
||||
if s.op == '|':
|
||||
s = NaryExpr('|', *s.args)
|
||||
if len(s.args) == 0:
|
||||
return FALSE
|
||||
if len(s.args) == 1:
|
||||
return distribute_and_over_or(s.args[0])
|
||||
conj = find_if((lambda d: d.op == '&'), s.args)
|
||||
if not conj:
|
||||
return NaryExpr(s.op, *s.args)
|
||||
others = [a for a in s.args if a is not conj]
|
||||
if len(others) == 1:
|
||||
rest = others[0]
|
||||
else:
|
||||
rest = NaryExpr('|', *others)
|
||||
return NaryExpr('&', *map(distribute_and_over_or,
|
||||
[(c|rest) for c in conj.args]))
|
||||
elif s.op == '&':
|
||||
return NaryExpr('&', *map(distribute_and_over_or, s.args))
|
||||
else:
|
||||
return s
|
||||
|
||||
_NaryExprTable = {'&':TRUE, '|':FALSE, '+':ZERO, '*':ONE}
|
||||
|
||||
def NaryExpr(op, *args):
|
||||
"""Create an Expr, but with an nary, associative op, so we can promote
|
||||
nested instances of the same op up to the top level.
|
||||
>>> NaryExpr('&', (A&B),(B|C),(B&C))
|
||||
(A & B & (B | C) & B & C)
|
||||
"""
|
||||
arglist = []
|
||||
for arg in args:
|
||||
if arg.op == op: arglist.extend(arg.args)
|
||||
else: arglist.append(arg)
|
||||
if len(args) == 1:
|
||||
return args[0]
|
||||
elif len(args) == 0:
|
||||
return _NaryExprTable[op]
|
||||
else:
|
||||
return Expr(op, *arglist)
|
||||
|
||||
def conjuncts(s):
|
||||
"""Return a list of the conjuncts in the sentence s.
|
||||
>>> conjuncts(A & B)
|
||||
[A, B]
|
||||
>>> conjuncts(A | B)
|
||||
[(A | B)]
|
||||
"""
|
||||
if isinstance(s, Expr) and s.op == '&':
|
||||
return s.args
|
||||
else:
|
||||
return [s]
|
||||
|
||||
def disjuncts(s):
|
||||
"""Return a list of the disjuncts in the sentence s.
|
||||
>>> disjuncts(A | B)
|
||||
[A, B]
|
||||
>>> disjuncts(A & B)
|
||||
[(A & B)]
|
||||
"""
|
||||
if isinstance(s, Expr) and s.op == '|':
|
||||
return s.args
|
||||
else:
|
||||
return [s]
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
def pl_resolution(KB, alpha):
|
||||
"Propositional Logic Resolution: say if alpha follows from KB. [Fig. 7.12]"
|
||||
clauses = KB.clauses + conjuncts(to_cnf(~alpha))
|
||||
new = set()
|
||||
while True:
|
||||
n = len(clauses)
|
||||
pairs = [(clauses[i], clauses[j]) for i in range(n) for j in range(i+1, n)]
|
||||
for (ci, cj) in pairs:
|
||||
resolvents = pl_resolve(ci, cj)
|
||||
if FALSE in resolvents: return True
|
||||
new.union_update(set(resolvents))
|
||||
if new.issubset(set(clauses)): return False
|
||||
for c in new:
|
||||
if c not in clauses: clauses.append(c)
|
||||
|
||||
def pl_resolve(ci, cj):
|
||||
"""Return all clauses that can be obtained by resolving clauses ci and cj.
|
||||
>>> pl_resolve(to_cnf(A|B|C), to_cnf(~B|~C|F))
|
||||
[(A | C | ~C | F), (A | B | ~B | F)]
|
||||
"""
|
||||
clauses = []
|
||||
for di in disjuncts(ci):
|
||||
for dj in disjuncts(cj):
|
||||
if di == ~dj or ~di == dj:
|
||||
dnew = unique(removeall(di, disjuncts(ci)) +
|
||||
removeall(dj, disjuncts(cj)))
|
||||
clauses.append(NaryExpr('|', *dnew))
|
||||
return clauses
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class PropHornKB(PropKB):
|
||||
"A KB of Propositional Horn clauses."
|
||||
|
||||
def tell(self, sentence):
|
||||
"Add a Horn Clauses to this KB."
|
||||
op = sentence.op
|
||||
assert op == '>>' or is_prop_symbol(op), "Must be Horn Clause"
|
||||
self.clauses.append(sentence)
|
||||
|
||||
def ask_generator(self, query):
|
||||
"Yield the empty substitution if KB implies query; else False"
|
||||
if not pl_fc_entails(self.clauses, query):
|
||||
return
|
||||
yield {}
|
||||
|
||||
def retract(self, sentence):
|
||||
"Remove the sentence's clauses from the KB"
|
||||
for c in conjuncts(to_cnf(sentence)):
|
||||
if c in self.clauses:
|
||||
self.clauses.remove(c)
|
||||
|
||||
def clauses_with_premise(self, p):
|
||||
"""The list of clauses in KB that have p in the premise.
|
||||
This could be cached away for O(1) speed, but we'll recompute it."""
|
||||
return [c for c in self.clauses
|
||||
if c.op == '>>' and p in conjuncts(c.args[0])]
|
||||
|
||||
def pl_fc_entails(KB, q):
|
||||
"""Use forward chaining to see if a HornKB entails symbol q. [Fig. 7.14]
|
||||
>>> pl_fc_entails(Fig[7,15], expr('Q'))
|
||||
True
|
||||
"""
|
||||
count = dict([(c, len(conjuncts(c.args[0]))) for c in KB.clauses
|
||||
if c.op == '>>'])
|
||||
inferred = DefaultDict(False)
|
||||
agenda = [s for s in KB.clauses if is_prop_symbol(s.op)]
|
||||
if q in agenda: return True
|
||||
while agenda:
|
||||
p = agenda.pop()
|
||||
if not inferred[p]:
|
||||
inferred[p] = True
|
||||
for c in KB.clauses_with_premise(p):
|
||||
count[c] -= 1
|
||||
if count[c] == 0:
|
||||
if c.args[1] == q: return True
|
||||
agenda.append(c.args[1])
|
||||
return False
|
||||
|
||||
## Wumpus World example [Fig. 7.13]
|
||||
Fig[7,13] = expr("(B11 <=> (P12 | P21)) & ~B11")
|
||||
|
||||
## Propositional Logic Forward Chanining example [Fig. 7.15]
|
||||
Fig[7,15] = PropHornKB()
|
||||
for s in "P>>Q (L&M)>>P (B&L)>>M (A&P)>>L (A&B)>>L A B".split():
|
||||
Fig[7,15].tell(expr(s))
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
# DPLL-Satisfiable [Fig. 7.16]
|
||||
|
||||
def dpll_satisfiable(s):
|
||||
"""Check satisfiability of a propositional sentence.
|
||||
This differs from the book code in two ways: (1) it returns a model
|
||||
rather than True when it succeeds; this is more useful. (2) The
|
||||
function find_pure_symbol is passed a list of unknown clauses, rather
|
||||
than a list of all clauses and the model; this is more efficient.
|
||||
>>> dpll_satisfiable(A&~B)
|
||||
{A: True, B: False}
|
||||
>>> dpll_satisfiable(P&~P)
|
||||
False
|
||||
"""
|
||||
clauses = conjuncts(to_cnf(s))
|
||||
symbols = prop_symbols(s)
|
||||
return dpll(clauses, symbols, {})
|
||||
|
||||
def dpll(clauses, symbols, model):
|
||||
"See if the clauses are true in a partial model."
|
||||
unknown_clauses = [] ## clauses with an unknown truth value
|
||||
for c in clauses:
|
||||
val = pl_true(c, model)
|
||||
if val == False:
|
||||
return False
|
||||
if val != True:
|
||||
unknown_clauses.append(c)
|
||||
if not unknown_clauses:
|
||||
return model
|
||||
P, value = find_pure_symbol(symbols, unknown_clauses)
|
||||
if P:
|
||||
return dpll(clauses, removeall(P, symbols), extend(model, P, value))
|
||||
P, value = find_unit_clause(clauses, model)
|
||||
if P:
|
||||
return dpll(clauses, removeall(P, symbols), extend(model, P, value))
|
||||
P = symbols.pop()
|
||||
return (dpll(clauses, symbols, extend(model, P, True)) or
|
||||
dpll(clauses, symbols, extend(model, P, False)))
|
||||
|
||||
def find_pure_symbol(symbols, unknown_clauses):
|
||||
"""Find a symbol and its value if it appears only as a positive literal
|
||||
(or only as a negative) in clauses.
|
||||
>>> find_pure_symbol([A, B, C], [A|~B,~B|~C,C|A])
|
||||
(A, True)
|
||||
"""
|
||||
for s in symbols:
|
||||
found_pos, found_neg = False, False
|
||||
for c in unknown_clauses:
|
||||
if not found_pos and s in disjuncts(c): found_pos = True
|
||||
if not found_neg and ~s in disjuncts(c): found_neg = True
|
||||
if found_pos != found_neg: return s, found_pos
|
||||
return None, None
|
||||
|
||||
def find_unit_clause(clauses, model):
|
||||
"""A unit clause has only 1 variable that is not bound in the model.
|
||||
>>> find_unit_clause([A|B|C, B|~C, A|~B], {A:True})
|
||||
(B, False)
|
||||
"""
|
||||
for clause in clauses:
|
||||
num_not_in_model = 0
|
||||
for literal in disjuncts(clause):
|
||||
sym = literal_symbol(literal)
|
||||
if sym not in model:
|
||||
num_not_in_model += 1
|
||||
P, value = sym, (literal.op != '~')
|
||||
if num_not_in_model == 1:
|
||||
return P, value
|
||||
return None, None
|
||||
|
||||
|
||||
def literal_symbol(literal):
|
||||
"""The symbol in this literal (without the negation).
|
||||
>>> literal_symbol(P)
|
||||
P
|
||||
>>> literal_symbol(~P)
|
||||
P
|
||||
"""
|
||||
if literal.op == '~':
|
||||
return literal.args[0]
|
||||
else:
|
||||
return literal
|
||||
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Walk-SAT [Fig. 7.17]
|
||||
|
||||
def WalkSAT(clauses, p=0.5, max_flips=10000):
|
||||
## model is a random assignment of true/false to the symbols in clauses
|
||||
## See ~/aima1e/print1/manual/knowledge+logic-answers.tex ???
|
||||
model = dict([(s, random.choice([True, False]))
|
||||
for s in prop_symbols(clauses)])
|
||||
for i in range(max_flips):
|
||||
satisfied, unsatisfied = [], []
|
||||
for clause in clauses:
|
||||
if_(pl_true(clause, model), satisfied, unsatisfied).append(clause)
|
||||
if not unsatisfied: ## if model satisfies all the clauses
|
||||
return model
|
||||
clause = random.choice(unsatisfied)
|
||||
if probability(p):
|
||||
sym = random.choice(prop_symbols(clause))
|
||||
else:
|
||||
## Flip the symbol in clause that miximizes number of sat. clauses
|
||||
raise NotImplementedError
|
||||
model[sym] = not model[sym]
|
||||
|
||||
|
||||
# PL-Wumpus-Agent [Fig. 7.19]
|
||||
class PLWumpusAgent(agents.Agent):
|
||||
"An agent for the wumpus world that does logical inference. [Fig. 7.19]"""
|
||||
def __init__(self):
|
||||
KB = FOLKB()
|
||||
x, y, orientation = 1, 1, (1, 0)
|
||||
visited = set() ## squares already visited
|
||||
action = None
|
||||
plan = []
|
||||
|
||||
def program(percept):
|
||||
stench, breeze, glitter = percept
|
||||
x, y, orientation = update_position(x, y, orientation, action)
|
||||
KB.tell('%sS_%d,%d' % (if_(stench, '', '~'), x, y))
|
||||
KB.tell('%sB_%d,%d' % (if_(breeze, '', '~'), x, y))
|
||||
if glitter: action = 'Grab'
|
||||
elif plan: action = plan.pop()
|
||||
else:
|
||||
for [i, j] in fringe(visited):
|
||||
if KB.ask('~P_%d,%d & ~W_%d,%d' % (i, j, i, j)) != False:
|
||||
raise NotImplementedError
|
||||
KB.ask('~P_%d,%d | ~W_%d,%d' % (i, j, i, j)) != False
|
||||
if action == None:
|
||||
action = random.choice(['Forward', 'Right', 'Left'])
|
||||
return action
|
||||
|
||||
self.program = program
|
||||
|
||||
def update_position(x, y, orientation, action):
|
||||
if action == 'TurnRight':
|
||||
orientation = turn_right(orientation)
|
||||
elif action == 'TurnLeft':
|
||||
orientation = turn_left(orientation)
|
||||
elif action == 'Forward':
|
||||
x, y = x + vector_add((x, y), orientation)
|
||||
return x, y, orientation
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
def unify(x, y, s):
|
||||
"""Unify expressions x,y with substitution s; return a substitution that
|
||||
would make x,y equal, or None if x,y can not unify. x and y can be
|
||||
variables (e.g. Expr('x')), constants, lists, or Exprs. [Fig. 9.1]
|
||||
>>> unify(x + y, y + C, {})
|
||||
{y: C, x: y}
|
||||
"""
|
||||
if s == None:
|
||||
return None
|
||||
elif x == y:
|
||||
return s
|
||||
elif is_variable(x):
|
||||
return unify_var(x, y, s)
|
||||
elif is_variable(y):
|
||||
return unify_var(y, x, s)
|
||||
elif isinstance(x, Expr) and isinstance(y, Expr):
|
||||
return unify(x.args, y.args, unify(x.op, y.op, s))
|
||||
elif isinstance(x, str) or isinstance(y, str) or not x or not y:
|
||||
return if_(x == y, s, None)
|
||||
elif issequence(x) and issequence(y) and len(x) == len(y):
|
||||
return unify(x[1:], y[1:], unify(x[0], y[0], s))
|
||||
else:
|
||||
return None
|
||||
|
||||
def is_variable(x):
|
||||
"A variable is an Expr with no args and a lowercase symbol as the op."
|
||||
return isinstance(x, Expr) and not x.args and is_var_symbol(x.op)
|
||||
|
||||
def unify_var(var, x, s):
|
||||
if var in s:
|
||||
return unify(s[var], x, s)
|
||||
elif occur_check(var, x):
|
||||
return None
|
||||
else:
|
||||
return extend(s, var, x)
|
||||
|
||||
def occur_check(var, x):
|
||||
"Return true if var occurs anywhere in x."
|
||||
if var == x:
|
||||
return True
|
||||
elif isinstance(x, Expr):
|
||||
return var.op == x.op or occur_check(var, x.args)
|
||||
elif not isinstance(x, str) and issequence(x):
|
||||
for xi in x:
|
||||
if occur_check(var, xi): return True
|
||||
return False
|
||||
|
||||
def extend(s, var, val):
|
||||
"""Copy the substitution s and extend it by setting var to val; return copy.
|
||||
>>> extend({x: 1}, y, 2)
|
||||
{y: 2, x: 1}
|
||||
"""
|
||||
s2 = s.copy()
|
||||
s2[var] = val
|
||||
return s2
|
||||
|
||||
def subst(s, x):
|
||||
"""Substitute the substitution s into the expression x.
|
||||
>>> subst({x: 42, y:0}, F(x) + y)
|
||||
(F(42) + 0)
|
||||
"""
|
||||
if isinstance(x, list):
|
||||
return [subst(s, xi) for xi in x]
|
||||
elif isinstance(x, tuple):
|
||||
return tuple([subst(s, xi) for xi in x])
|
||||
elif not isinstance(x, Expr):
|
||||
return x
|
||||
elif is_var_symbol(x.op):
|
||||
return s.get(x, x)
|
||||
else:
|
||||
return Expr(x.op, *[subst(s, arg) for arg in x.args])
|
||||
|
||||
def fol_fc_ask(KB, alpha):
|
||||
"""Inefficient forward chaining for first-order logic. [Fig. 9.3]
|
||||
KB is an FOLHornKB and alpha must be an atomic sentence."""
|
||||
while True:
|
||||
new = {}
|
||||
for r in KB.clauses:
|
||||
r1 = standardize_apart(r)
|
||||
ps, q = conjuncts(r.args[0]), r.args[1]
|
||||
raise NotImplementedError
|
||||
|
||||
def standardize_apart(sentence, dic):
|
||||
"""Replace all the variables in sentence with new variables."""
|
||||
if not isinstance(sentence, Expr):
|
||||
return sentence
|
||||
elif is_var_symbol(sentence.op):
|
||||
if sentence in dic:
|
||||
return dic[sentence]
|
||||
else:
|
||||
standardize_apart.counter += 1
|
||||
dic[sentence] = Expr('V_%d' % standardize-apart.counter)
|
||||
return dic[sentence]
|
||||
else:
|
||||
return Expr(sentence.op, *[standardize-apart(a, dic) for a in sentence.args])
|
||||
|
||||
standardize_apart.counter = 0
|
||||
|
||||
def fol_bc_ask(KB, goals, theta):
|
||||
"A simple backward-chaining algorithm for first-order logic. [Fig. 9.6]"
|
||||
if not goals:
|
||||
yield theta
|
||||
q1 = subst(theta, goals[0])
|
||||
raise NotImplementedError
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
# Example application (not in the book).
|
||||
# You can use the Expr class to do symbolic differentiation. This used to be
|
||||
# a part of AI; now it is considered a separate field, Symbolic Algebra.
|
||||
|
||||
def diff(y, x):
|
||||
"""Return the symbolic derivative, dy/dx, as an Expr.
|
||||
However, you probably want to simplify the results with simp.
|
||||
>>> diff(x * x, x)
|
||||
((x * 1) + (x * 1))
|
||||
>>> simp(diff(x * x, x))
|
||||
(2 * x)
|
||||
"""
|
||||
if y == x: return ONE
|
||||
elif not y.args: return ZERO
|
||||
else:
|
||||
u, op, v = y.args[0], y.op, y.args[-1]
|
||||
if op == '+': return diff(u, x) + diff(v, x)
|
||||
elif op == '-' and len(args) == 1: return -diff(u, x)
|
||||
elif op == '-': return diff(u, x) - diff(v, x)
|
||||
elif op == '*': return u * diff(v, x) + v * diff(u, x)
|
||||
elif op == '/': return (v*diff(u, x) - u*diff(v, x)) / (v * v)
|
||||
elif op == '**' and isnumber(x.op):
|
||||
return (v * u ** (v - 1) * diff(u, x))
|
||||
elif op == '**': return (v * u ** (v - 1) * diff(u, x)
|
||||
+ u ** v * Expr('log')(u) * diff(v, x))
|
||||
elif op == 'log': return diff(u, x) / u
|
||||
else: raise ValueError("Unknown op: %s in diff(%s, %s)" % (op, y, x))
|
||||
|
||||
def simp(x):
|
||||
if not x.args: return x
|
||||
args = map(simp, x.args)
|
||||
u, op, v = args[0], x.op, args[-1]
|
||||
if op == '+':
|
||||
if v == ZERO: return u
|
||||
if u == ZERO: return v
|
||||
if u == v: return TWO * u
|
||||
if u == -v or v == -u: return ZERO
|
||||
elif op == '-' and len(args) == 1:
|
||||
if u.op == '-' and len(u.args) == 1: return u.args[0] ## --y ==> y
|
||||
elif op == '-':
|
||||
if v == ZERO: return u
|
||||
if u == ZERO: return -v
|
||||
if u == v: return ZERO
|
||||
if u == -v or v == -u: return ZERO
|
||||
elif op == '*':
|
||||
if u == ZERO or v == ZERO: return ZERO
|
||||
if u == ONE: return v
|
||||
if v == ONE: return u
|
||||
if u == v: return u ** 2
|
||||
elif op == '/':
|
||||
if u == ZERO: return ZERO
|
||||
if v == ZERO: return Expr('Undefined')
|
||||
if u == v: return ONE
|
||||
if u == -v or v == -u: return ZERO
|
||||
elif op == '**':
|
||||
if u == ZERO: return ZERO
|
||||
if v == ZERO: return ONE
|
||||
if u == ONE: return ONE
|
||||
if v == ONE: return u
|
||||
elif op == 'log':
|
||||
if u == ONE: return ZERO
|
||||
else: raise ValueError("Unknown op: " + op)
|
||||
## If we fall through to here, we can not simplify further
|
||||
return Expr(op, *args)
|
||||
|
||||
def d(y, x):
|
||||
"Differentiate and then simplify."
|
||||
return simp(diff(y, x))
|
||||
|
@ -1,78 +0,0 @@
|
||||
### PropKB
|
||||
>>> kb = PropKB()
|
||||
>>> kb.tell(A & B)
|
||||
>>> kb.tell(B >> C)
|
||||
>>> kb.ask(C) ## The result {} means true, with no substitutions
|
||||
{}
|
||||
>>> kb.ask(P)
|
||||
False
|
||||
>>> kb.retract(B)
|
||||
>>> kb.ask(C)
|
||||
False
|
||||
|
||||
>>> pl_true(P, {})
|
||||
>>> pl_true(P | Q, {P: True})
|
||||
True
|
||||
|
||||
# Notice that the function pl_true cannot reason by cases:
|
||||
>>> pl_true(P | ~P)
|
||||
|
||||
# However, tt_true can:
|
||||
>>> tt_true(P | ~P)
|
||||
True
|
||||
|
||||
# The following are tautologies from [Fig. 7.11]:
|
||||
>>> tt_true("(A & B) <=> (B & A)")
|
||||
True
|
||||
>>> tt_true("(A | B) <=> (B | A)")
|
||||
True
|
||||
>>> tt_true("((A & B) & C) <=> (A & (B & C))")
|
||||
True
|
||||
>>> tt_true("((A | B) | C) <=> (A | (B | C))")
|
||||
True
|
||||
>>> tt_true("~~A <=> A")
|
||||
True
|
||||
>>> tt_true("(A >> B) <=> (~B >> ~A)")
|
||||
True
|
||||
>>> tt_true("(A >> B) <=> (~A | B)")
|
||||
True
|
||||
>>> tt_true("(A <=> B) <=> ((A >> B) & (B >> A))")
|
||||
True
|
||||
>>> tt_true("~(A & B) <=> (~A | ~B)")
|
||||
True
|
||||
>>> tt_true("~(A | B) <=> (~A & ~B)")
|
||||
True
|
||||
>>> tt_true("(A & (B | C)) <=> ((A & B) | (A & C))")
|
||||
True
|
||||
>>> tt_true("(A | (B & C)) <=> ((A | B) & (A | C))")
|
||||
True
|
||||
|
||||
# The following are not tautologies:
|
||||
>>> tt_true(A & ~A)
|
||||
False
|
||||
>>> tt_true(A & B)
|
||||
False
|
||||
|
||||
### [Fig. 7.13]
|
||||
>>> alpha = expr("~P12")
|
||||
>>> to_cnf(Fig[7,13] & ~alpha)
|
||||
((~P12 | B11) & (~P21 | B11) & (P12 | P21 | ~B11) & ~B11 & P12)
|
||||
>>> tt_entails(Fig[7,13], alpha)
|
||||
True
|
||||
>>> pl_resolution(PropKB(Fig[7,13]), alpha)
|
||||
True
|
||||
|
||||
### [Fig. 7.15]
|
||||
>>> pl_fc_entails(Fig[7,15], expr('SomethingSilly'))
|
||||
False
|
||||
|
||||
### Unification:
|
||||
>>> unify(x, x, {})
|
||||
{}
|
||||
>>> unify(x, 3, {})
|
||||
{x: 3}
|
||||
|
||||
|
||||
>>> to_cnf((P&Q) | (~P & ~Q))
|
||||
((~P | P) & (~Q | P) & (~P | Q) & (~Q | Q))
|
||||
|
@ -1,142 +0,0 @@
|
||||
"""Markov Decision Processes (Chapter 17)
|
||||
|
||||
First we define an MDP, and the special case of a GridMDP, in which
|
||||
states are laid out in a 2-dimensional grid. We also represent a policy
|
||||
as a dictionary of {state:action} pairs, and a Utility function as a
|
||||
dictionary of {state:number} pairs. We then define the value_iteration
|
||||
and policy_iteration algorithms."""
|
||||
|
||||
from utils import *
|
||||
|
||||
class MDP:
|
||||
"""A Markov Decision Process, defined by an initial state, transition model,
|
||||
and reward function. We also keep track of a gamma value, for use by
|
||||
algorithms. The transition model is represented somewhat differently from
|
||||
the text. Instead of T(s, a, s') being probability number for each
|
||||
state/action/state triplet, we instead have T(s, a) return a list of (p, s')
|
||||
pairs. We also keep track of the possible states, terminal states, and
|
||||
actions for each state. [page 615]"""
|
||||
|
||||
def __init__(self, init, actlist, terminals, gamma=.9):
|
||||
update(self, init=init, actlist=actlist, terminals=terminals,
|
||||
gamma=gamma, states=set(), reward={})
|
||||
|
||||
def R(self, state):
|
||||
"Return a numeric reward for this state."
|
||||
return self.reward[state]
|
||||
|
||||
def T(state, action):
|
||||
"""Transition model. From a state and an action, return a list
|
||||
of (result-state, probability) pairs."""
|
||||
abstract
|
||||
|
||||
def actions(self, state):
|
||||
"""Set of actions that can be performed in this state. By default, a
|
||||
fixed list of actions, except for terminal states. Override this
|
||||
method if you need to specialize by state."""
|
||||
if state in self.terminals:
|
||||
return [None]
|
||||
else:
|
||||
return self.actlist
|
||||
|
||||
class GridMDP(MDP):
|
||||
"""A two-dimensional grid MDP, as in [Figure 17.1]. All you have to do is
|
||||
specify the grid as a list of lists of rewards; use None for an obstacle
|
||||
(unreachable state). Also, you should specify the terminal states.
|
||||
An action is an (x, y) unit vector; e.g. (1, 0) means move east."""
|
||||
def __init__(self, grid, terminals, init=(0, 0), gamma=.9):
|
||||
grid.reverse() ## because we want row 0 on bottom, not on top
|
||||
MDP.__init__(self, init, actlist=orientations,
|
||||
terminals=terminals, gamma=gamma)
|
||||
update(self, grid=grid, rows=len(grid), cols=len(grid[0]))
|
||||
for x in range(self.cols):
|
||||
for y in range(self.rows):
|
||||
self.reward[x, y] = grid[y][x]
|
||||
if grid[y][x] is not None:
|
||||
self.states.add((x, y))
|
||||
|
||||
def T(self, state, action):
|
||||
if action == None:
|
||||
return [(0.0, state)]
|
||||
else:
|
||||
return [(0.8, self.go(state, action)),
|
||||
(0.1, self.go(state, turn_right(action))),
|
||||
(0.1, self.go(state, turn_left(action)))]
|
||||
|
||||
def go(self, state, direction):
|
||||
"Return the state that results from going in this direction."
|
||||
state1 = vector_add(state, direction)
|
||||
return if_(state1 in self.states, state1, state)
|
||||
|
||||
def to_grid(self, mapping):
|
||||
"""Convert a mapping from (x, y) to v into a [[..., v, ...]] grid."""
|
||||
return list(reversed([[mapping.get((x,y), None)
|
||||
for x in range(self.cols)]
|
||||
for y in range(self.rows)]))
|
||||
|
||||
def to_arrows(self, policy):
|
||||
chars = {(1, 0):'>', (0, 1):'^', (-1, 0):'<', (0, -1):'v', None: '.'}
|
||||
return self.to_grid(dict([(s, chars[a]) for (s, a) in policy.items()]))
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
Fig[17,1] = GridMDP([[-0.04, -0.04, -0.04, +1],
|
||||
[-0.04, None, -0.04, -1],
|
||||
[-0.04, -0.04, -0.04, -0.04]],
|
||||
terminals=[(3, 2), (3, 1)])
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
def value_iteration(mdp, epsilon=0.001):
|
||||
"Solving an MDP by value iteration. [Fig. 17.4]"
|
||||
U1 = dict([(s, 0) for s in mdp.states])
|
||||
R, T, gamma = mdp.R, mdp.T, mdp.gamma
|
||||
while True:
|
||||
U = U1.copy()
|
||||
delta = 0
|
||||
for s in mdp.states:
|
||||
U1[s] = R(s) + gamma * max([sum([p * U[s1] for (p, s1) in T(s, a)])
|
||||
for a in mdp.actions(s)])
|
||||
delta = max(delta, abs(U1[s] - U[s]))
|
||||
if delta < epsilon * (1 - gamma) / gamma:
|
||||
return U
|
||||
|
||||
def best_policy(mdp, U):
|
||||
"""Given an MDP and a utility function U, determine the best policy,
|
||||
as a mapping from state to action. (Equation 17.4)"""
|
||||
pi = {}
|
||||
for s in mdp.states:
|
||||
pi[s] = argmax(mdp.actions(s), lambda a:expected_utility(a, s, U, mdp))
|
||||
return pi
|
||||
|
||||
def expected_utility(a, s, U, mdp):
|
||||
"The expected utility of doing a in state s, according to the MDP and U."
|
||||
return sum([p * U[s1] for (p, s1) in mdp.T(s, a)])
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
def policy_iteration(mdp):
|
||||
"Solve an MDP by policy iteration [Fig. 17.7]"
|
||||
U = dict([(s, 0) for s in mdp.states])
|
||||
pi = dict([(s, random.choice(mdp.actions(s))) for s in mdp.states])
|
||||
while True:
|
||||
U = policy_evaluation(pi, U, mdp)
|
||||
unchanged = True
|
||||
for s in mdp.states:
|
||||
a = argmax(mdp.actions(s), lambda a: expected_utility(a,s,U,mdp))
|
||||
if a != pi[s]:
|
||||
pi[s] = a
|
||||
unchanged = False
|
||||
if unchanged:
|
||||
return pi
|
||||
|
||||
def policy_evaluation(pi, U, mdp, k=20):
|
||||
"""Return an updated utility mapping U from each state in the MDP to its
|
||||
utility, using an approximation (modified policy iteration)."""
|
||||
R, T, gamma = mdp.R, mdp.T, mdp.gamma
|
||||
for i in range(k):
|
||||
for s in mdp.states:
|
||||
U[s] = R(s) + gamma * sum([p * U[s] for (p, s1) in T(s, pi[s])])
|
||||
return U
|
||||
|
||||
|
@ -1,27 +0,0 @@
|
||||
### demo
|
||||
|
||||
>>> m = Fig[17,1]
|
||||
|
||||
>>> pi = best_policy(m, value_iteration(m, .01))
|
||||
|
||||
>>> pi
|
||||
{(3, 2): None, (3, 1): None, (3, 0): (-1, 0), (2, 1): (0, 1), (0, 2): (1, 0), (1, 0): (1, 0), (0, 0): (0, 1), (1, 2): (1, 0), (2, 0): (0, 1), (0, 1): (0, 1), (2, 2): (1, 0)}
|
||||
|
||||
>>> m.to_arrows(pi)
|
||||
[['>', '>', '>', '.'], ['^', None, '^', '.'], ['^', '>', '^', '<']]
|
||||
|
||||
>>> print_table(m.to_arrows(pi))
|
||||
> > > .
|
||||
^ None ^ .
|
||||
^ > ^ <
|
||||
|
||||
>>> value_iteration(m, .01)
|
||||
{(3, 2): 1.0, (3, 1): -1.0, (3, 0): 0.12958868267972745, (0, 1): 0.39810203830605462, (0, 2): 0.50928545646220924, (1, 0): 0.25348746162470537, (0, 0): 0.29543540628363629, (1, 2): 0.64958064617168676, (2, 0): 0.34461306281476806, (2, 1): 0.48643676237737926, (2, 2): 0.79536093684710951}
|
||||
|
||||
>>> policy_iteration(m)
|
||||
{(3, 2): None, (3, 1): None, (3, 0): (0, -1), (2, 1): (-1, 0), (0, 2): (1, 0), (1, 0): (1, 0), (0, 0): (1, 0), (1, 2): (1, 0), (2, 0): (1, 0), (0, 1): (1, 0), (2, 2): (1, 0)}
|
||||
|
||||
>>> print_table(m.to_arrows(policy_iteration(m)))
|
||||
> > > .
|
||||
> None < .
|
||||
> > > v
|
@ -1,170 +0,0 @@
|
||||
"""A chart parser and some grammars. (Chapter 22)"""
|
||||
|
||||
from utils import *
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Grammars and Lexicons
|
||||
|
||||
def Rules(**rules):
|
||||
"""Create a dictionary mapping symbols to alternative sequences.
|
||||
>>> Rules(A = "B C | D E")
|
||||
{'A': [['B', 'C'], ['D', 'E']]}
|
||||
"""
|
||||
for (lhs, rhs) in rules.items():
|
||||
rules[lhs] = [alt.strip().split() for alt in rhs.split('|')]
|
||||
return rules
|
||||
|
||||
def Lexicon(**rules):
|
||||
"""Create a dictionary mapping symbols to alternative words.
|
||||
>>> Lexicon(Art = "the | a | an")
|
||||
{'Art': ['the', 'a', 'an']}
|
||||
"""
|
||||
for (lhs, rhs) in rules.items():
|
||||
rules[lhs] = [word.strip() for word in rhs.split('|')]
|
||||
return rules
|
||||
|
||||
class Grammar:
|
||||
def __init__(self, name, rules, lexicon):
|
||||
"A grammar has a set of rules and a lexicon."
|
||||
update(self, name=name, rules=rules, lexicon=lexicon)
|
||||
self.categories = DefaultDict([])
|
||||
for lhs in lexicon:
|
||||
for word in lexicon[lhs]:
|
||||
self.categories[word].append(lhs)
|
||||
|
||||
def rewrites_for(self, cat):
|
||||
"Return a sequence of possible rhs's that cat can be rewritten as."
|
||||
return self.rules.get(cat, ())
|
||||
|
||||
def isa(self, word, cat):
|
||||
"Return True iff word is of category cat"
|
||||
return cat in self.categories[word]
|
||||
|
||||
def __repr__(self):
|
||||
return '<Grammar %s>' % self.name
|
||||
|
||||
E0 = Grammar('E0',
|
||||
Rules( # Grammar for E_0 [Fig. 22.4]
|
||||
S = 'NP VP | S Conjunction S',
|
||||
NP = 'Pronoun | Noun | Article Noun | Digit Digit | NP PP | NP RelClause',
|
||||
VP = 'Verb | VP NP | VP Adjective | VP PP | VP Adverb',
|
||||
PP = 'Preposition NP',
|
||||
RelClause = 'That VP'),
|
||||
|
||||
Lexicon( # Lexicon for E_0 [Fig. 22.3]
|
||||
Noun = "stench | breeze | glitter | nothing | wumpus | pit | pits | gold | east",
|
||||
Verb = "is | see | smell | shoot | fell | stinks | go | grab | carry | kill | turn | feel",
|
||||
Adjective = "right | left | east | south | back | smelly",
|
||||
Adverb = "here | there | nearby | ahead | right | left | east | south | back",
|
||||
Pronoun = "me | you | I | it",
|
||||
Name = "John | Mary | Boston | Aristotle",
|
||||
Article = "the | a | an",
|
||||
Preposition = "to | in | on | near",
|
||||
Conjunction = "and | or | but",
|
||||
Digit = "0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9",
|
||||
That = "that"
|
||||
))
|
||||
|
||||
E_ = Grammar('E_', # Trivial Grammar and lexicon for testing
|
||||
Rules(
|
||||
S = 'NP VP',
|
||||
NP = 'Art N | Pronoun',
|
||||
VP = 'V NP'),
|
||||
|
||||
Lexicon(
|
||||
Art = 'the | a',
|
||||
N = 'man | woman | table | shoelace | saw',
|
||||
Pronoun = 'I | you | it',
|
||||
V = 'saw | liked | feel'
|
||||
))
|
||||
|
||||
def generate_random(grammar=E_, s='S'):
|
||||
"""Replace each token in s by a random entry in grammar (recursively).
|
||||
This is useful for testing a grammar, e.g. generate_random(E_)"""
|
||||
import random
|
||||
|
||||
def rewrite(tokens, into):
|
||||
for token in tokens:
|
||||
if token in grammar.rules:
|
||||
rewrite(random.choice(grammar.rules[token]), into)
|
||||
elif token in grammar.lexicon:
|
||||
into.append(random.choice(grammar.lexicon[token]))
|
||||
else:
|
||||
into.append(token)
|
||||
return into
|
||||
|
||||
return ' '.join(rewrite(s.split(), []))
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Chart Parsing
|
||||
|
||||
|
||||
class Chart:
|
||||
"""Class for parsing sentences using a chart data structure. [Fig 22.7]
|
||||
>>> chart = Chart(E0);
|
||||
>>> len(chart.parses('the stench is in 2 2'))
|
||||
1
|
||||
"""
|
||||
|
||||
def __init__(self, grammar, trace=False):
|
||||
"""A datastructure for parsing a string; and methods to do the parse.
|
||||
self.chart[i] holds the edges that end just before the i'th word.
|
||||
Edges are 5-element lists of [start, end, lhs, [found], [expects]]."""
|
||||
update(self, grammar=grammar, trace=trace)
|
||||
|
||||
def parses(self, words, S='S'):
|
||||
"""Return a list of parses; words can be a list or string."""
|
||||
if isinstance(words, str):
|
||||
words = words.split()
|
||||
self.parse(words, S)
|
||||
# Return all the parses that span the whole input
|
||||
return [[i, j, S, found, []]
|
||||
for (i, j, lhs, found, expects) in self.chart[len(words)]
|
||||
if lhs == S and expects == []]
|
||||
|
||||
def parse(self, words, S='S'):
|
||||
"""Parse a list of words; according to the grammar.
|
||||
Leave results in the chart."""
|
||||
self.chart = [[] for i in range(len(words)+1)]
|
||||
self.add_edge([0, 0, 'S_', [], [S]])
|
||||
for i in range(len(words)):
|
||||
self.scanner(i, words[i])
|
||||
return self.chart
|
||||
|
||||
def add_edge(self, edge):
|
||||
"Add edge to chart, and see if it extends or predicts another edge."
|
||||
start, end, lhs, found, expects = edge
|
||||
if edge not in self.chart[end]:
|
||||
self.chart[end].append(edge)
|
||||
if self.trace:
|
||||
print '%10s: added %s' % (caller(2), edge)
|
||||
if not expects:
|
||||
self.extender(edge)
|
||||
else:
|
||||
self.predictor(edge)
|
||||
|
||||
def scanner(self, j, word):
|
||||
"For each edge expecting a word of this category here, extend the edge."
|
||||
for (i, j, A, alpha, Bb) in self.chart[j]:
|
||||
if Bb and self.grammar.isa(word, Bb[0]):
|
||||
self.add_edge([i, j+1, A, alpha + [(Bb[0], word)], Bb[1:]])
|
||||
|
||||
def predictor(self, (i, j, A, alpha, Bb)):
|
||||
"Add to chart any rules for B that could help extend this edge."
|
||||
B = Bb[0]
|
||||
if B in self.grammar.rules:
|
||||
for rhs in self.grammar.rewrites_for(B):
|
||||
self.add_edge([j, j, B, [], rhs])
|
||||
|
||||
def extender(self, edge):
|
||||
"See what edges can be extended by this edge."
|
||||
(j, k, B, _, _) = edge
|
||||
for (i, j, A, alpha, B1b) in self.chart[j]:
|
||||
if B1b and B == B1b[0]:
|
||||
self.add_edge([i, k, A, alpha + [edge], B1b[1:]])
|
||||
|
||||
|
||||
|
||||
#### TODO:
|
||||
#### 1. Parsing with augmentations -- requires unification, etc.
|
||||
#### 2. Sequitor
|
@ -1,23 +0,0 @@
|
||||
>>> chart = Chart(E0)
|
||||
|
||||
>>> chart.parses('the wumpus that is smelly is near 2 2')
|
||||
[[0, 9, 'S', [[0, 5, 'NP', [[0, 2, 'NP', [('Article', 'the'), ('Noun', 'wumpus')], []], [2, 5, 'RelClause', [('That', 'that'), [3, 5, 'VP', [[3, 4, 'VP', [('Verb', 'is')], []], ('Adjective', 'smelly')], []]], []]], []], [5, 9, 'VP', [[5, 6, 'VP', [('Verb', 'is')], []], [6, 9, 'PP', [('Preposition', 'near'), [7, 9, 'NP', [('Digit', '2'), ('Digit', '2')], []]], []]], []]], []]]
|
||||
|
||||
### There is a built-in trace facility (compare [Fig. 22.9])
|
||||
>>> Chart(E_, trace=True).parses('I feel it')
|
||||
parse: added [0, 0, 'S_', [], ['S']]
|
||||
predictor: added [0, 0, 'S', [], ['NP', 'VP']]
|
||||
predictor: added [0, 0, 'NP', [], ['Art', 'N']]
|
||||
predictor: added [0, 0, 'NP', [], ['Pronoun']]
|
||||
scanner: added [0, 1, 'NP', [('Pronoun', 'I')], []]
|
||||
extender: added [0, 1, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []]], ['VP']]
|
||||
predictor: added [1, 1, 'VP', [], ['V', 'NP']]
|
||||
scanner: added [1, 2, 'VP', [('V', 'feel')], ['NP']]
|
||||
predictor: added [2, 2, 'NP', [], ['Art', 'N']]
|
||||
predictor: added [2, 2, 'NP', [], ['Pronoun']]
|
||||
scanner: added [2, 3, 'NP', [('Pronoun', 'it')], []]
|
||||
extender: added [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]
|
||||
extender: added [0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]
|
||||
extender: added [0, 3, 'S_', [[0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]], []]
|
||||
[[0, 3, 'S', [[0, 1, 'NP', [('Pronoun', 'I')], []], [1, 3, 'VP', [('V', 'feel'), [2, 3, 'NP', [('Pronoun', 'it')], []]], []]], []]]
|
||||
|
@ -1,7 +0,0 @@
|
||||
"""Planning (Chapters 11-12)
|
||||
"""
|
||||
|
||||
from __future__ import generators
|
||||
from utils import *
|
||||
import agents
|
||||
import math, random, sys, time, bisect, string
|
@ -1,171 +0,0 @@
|
||||
"""Probability models. (Chapter 13-15)
|
||||
"""
|
||||
|
||||
from utils import *
|
||||
from logic import extend
|
||||
import agents
|
||||
import bisect, random
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class DTAgent(agents.Agent):
|
||||
"A decision-theoretic agent. [Fig. 13.1]"
|
||||
|
||||
def __init__(self, belief_state):
|
||||
agents.Agent.__init__(self)
|
||||
|
||||
def program(percept):
|
||||
belief_state.observe(action, percept)
|
||||
program.action = argmax(belief_state.actions(),
|
||||
belief_state.expected_outcome_utility)
|
||||
return program.action
|
||||
|
||||
program.action = None
|
||||
self.program = program
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class ProbDist:
|
||||
"""A discrete probability distribution. You name the random variable
|
||||
in the constructor, then assign and query probability of values.
|
||||
>>> P = ProbDist('Flip'); P['H'], P['T'] = 0.5, 0.5; P['H']
|
||||
0.5
|
||||
"""
|
||||
def __init__(self, varname='?'):
|
||||
update(self, prob={}, varname=varname, values=[])
|
||||
|
||||
def __getitem__(self, val):
|
||||
"Given a value, return P(value)."
|
||||
return self.prob[val]
|
||||
|
||||
def __setitem__(self, val, p):
|
||||
"Set P(val) = p"
|
||||
if val not in self.values:
|
||||
self.values.append(val)
|
||||
self.prob[val] = p
|
||||
|
||||
def normalize(self):
|
||||
"Make sure the probabilities of all values sum to 1."
|
||||
total = sum(self.prob.values())
|
||||
if not (1.0-epsilon < total < 1.0+epsilon):
|
||||
for val in self.prob:
|
||||
self.prob[val] /= total
|
||||
return self
|
||||
|
||||
epsilon = 0.001
|
||||
|
||||
class JointProbDist(ProbDist):
|
||||
"""A discrete probability distribute over a set of variables.
|
||||
>>> P = JointProbDist(['X', 'Y']); P[1, 1] = 0.25
|
||||
>>> P[1, 1]
|
||||
0.25
|
||||
"""
|
||||
def __init__(self, variables):
|
||||
update(self, prob={}, variables=variables, vals=DefaultDict([]))
|
||||
|
||||
def __getitem__(self, values):
|
||||
"Given a tuple or dict of values, return P(values)."
|
||||
if isinstance(values, dict):
|
||||
values = tuple([values[var] for var in self.variables])
|
||||
return self.prob[values]
|
||||
|
||||
def __setitem__(self, values, p):
|
||||
"""Set P(values) = p. Values can be a tuple or a dict; it must
|
||||
have a value for each of the variables in the joint. Also keep track
|
||||
of the values we have seen so far for each variable."""
|
||||
if isinstance(values, dict):
|
||||
values = [values[var] for var in self.variables]
|
||||
self.prob[values] = p
|
||||
for var,val in zip(self.variables, values):
|
||||
if val not in self.vals[var]:
|
||||
self.vals[var].append(val)
|
||||
|
||||
def values(self, var):
|
||||
"Return the set of possible values for a variable."
|
||||
return self.vals[var]
|
||||
|
||||
def __repr__(self):
|
||||
return "P(%s)" % self.variables
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
def enumerate_joint_ask(X, e, P):
|
||||
"""Return a probability distribution over the values of the variable X,
|
||||
given the {var:val} observations e, in the JointProbDist P.
|
||||
Works for Boolean variables only. [Fig. 13.4]"""
|
||||
Q = ProbDist(X) ## A probability distribution for X, initially empty
|
||||
Y = [v for v in P.variables if v != X and v not in e]
|
||||
for xi in P.values(X):
|
||||
Q[xi] = enumerate_joint(Y, extend(e, X, xi), P)
|
||||
return Q.normalize()
|
||||
|
||||
def enumerate_joint(vars, values, P):
|
||||
"As in Fig 13.4, except x and e are already incorporated in values."
|
||||
if not vars:
|
||||
return P[values]
|
||||
Y = vars[0]; rest = vars[1:]
|
||||
return sum([enumerate_joint(rest, extend(values, Y, y), P)
|
||||
for y in P.values(Y)])
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class BayesNet:
|
||||
def __init__(self, nodes=[]):
|
||||
update(self, nodes=[], vars=[])
|
||||
for node in nodes:
|
||||
self.add(node)
|
||||
|
||||
def add(self, node):
|
||||
self.nodes.append(node)
|
||||
self.vars.append(node.variable)
|
||||
|
||||
def observe(self, var, val):
|
||||
self.evidence[var] = val
|
||||
|
||||
class BayesNode:
|
||||
def __init__(self, variable, parents, cpt):
|
||||
if isinstance(parents, str): parents = parents.split()
|
||||
update(self, variable=variable, parents=parents, cpt=cpt)
|
||||
|
||||
node = BayesNode
|
||||
|
||||
|
||||
T, F = True, False
|
||||
|
||||
burglary = BayesNet([
|
||||
node('Burglary', '', .001),
|
||||
node('Earthquake', '', .002),
|
||||
node('Alarm', 'Burglary Earthquake', {
|
||||
(T, T):.95,
|
||||
(T, F):.94,
|
||||
(F, T):.29,
|
||||
(F, F):.001}),
|
||||
node('JohnCalls', 'Alarm', {T:.90, F:.05}),
|
||||
node('MaryCalls', 'Alarm', {T:.70, F:.01})
|
||||
])
|
||||
#______________________________________________________________________________
|
||||
|
||||
def elimination_ask(X, e, bn):
|
||||
"[Fig. 14.10]"
|
||||
factors = []
|
||||
for var in reverse(bn.vars):
|
||||
factors.append(Factor(var, e))
|
||||
if is_hidden(var, X, e):
|
||||
factors = sum_out(var, factors)
|
||||
return pointwise_product(factors).normalize()
|
||||
|
||||
def pointwise_product(factors):
|
||||
pass
|
||||
|
||||
def sum_out(var, factors):
|
||||
pass
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
def prior_sample(bn):
|
||||
x = {}
|
||||
for xi in bn.vars:
|
||||
x[xi.var] = xi.sample([x])
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
@ -1,32 +0,0 @@
|
||||
## We can build up a probability distribution like this (p. 469):
|
||||
>>> P = ProbDist()
|
||||
>>> P['sunny'] = 0.7
|
||||
>>> P['rain'] = 0.2
|
||||
>>> P['cloudy'] = 0.08
|
||||
>>> P['snow'] = 0.02
|
||||
|
||||
## and query it like this:
|
||||
>>> P['rain']
|
||||
0.20000000000000001
|
||||
|
||||
## A Joint Probability Distribution is dealt with like this (p. 475):
|
||||
>>> P = JointProbDist(['Toothache', 'Cavity', 'Catch'])
|
||||
>>> T, F = True, False
|
||||
>>> P[T, T, T] = 0.108; P[T, T, F] = 0.012; P[F, T, T] = 0.072; P[F, T, F] = 0.008
|
||||
>>> P[T, F, T] = 0.016; P[T, F, F] = 0.064; P[F, F, T] = 0.144; P[F, F, F] = 0.576
|
||||
|
||||
>>> P[T, T, T]
|
||||
0.108
|
||||
|
||||
## Ask for P(Cavity|Toothache=T)
|
||||
>>> PC = enumerate_joint_ask('Cavity', {'Toothache': T}, P)
|
||||
>>> PC.prob
|
||||
{False: 0.39999999999999997, True: 0.59999999999999998}
|
||||
|
||||
>>> 0.6-epsilon < PC[T] < 0.6+epsilon
|
||||
True
|
||||
|
||||
>>> 0.4-epsilon < PC[F] < 0.4+epsilon
|
||||
True
|
||||
|
||||
|
@ -1,15 +0,0 @@
|
||||
"""Reinforcement Learning (Chapter 21)
|
||||
"""
|
||||
|
||||
from utils import *
|
||||
import agents
|
||||
|
||||
class PassiveADPAgent(agents.Agent):
|
||||
"""Passive (non-learning) agent that uses adaptive dynamic programming
|
||||
on a given MDP and policy. [Fig. 21.2]"""
|
||||
NotImplementedError
|
||||
|
||||
class PassiveTDAgent(agents.Agent):
|
||||
"""Passive (non-learning) agent that uses temporal differences to learn
|
||||
utility estimates. [Fig. 21.4]"""
|
||||
NotImplementedError
|
@ -1,736 +0,0 @@
|
||||
"""Search (Chapters 3-4)
|
||||
|
||||
The way to use this code is to subclass Problem to create a class of problems,
|
||||
then create problem instances and solve them with calls to the various search
|
||||
functions."""
|
||||
|
||||
from __future__ import generators
|
||||
from utils import *
|
||||
import agents
|
||||
import math, random, sys, time, bisect, string
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class Problem:
|
||||
"""The abstract class for a formal problem. You should subclass this and
|
||||
implement the method successor, and possibly __init__, goal_test, and
|
||||
path_cost. Then you will create instances of your subclass and solve them
|
||||
with the various search functions."""
|
||||
|
||||
def __init__(self, initial, goal=None):
|
||||
"""The constructor specifies the initial state, and possibly a goal
|
||||
state, if there is a unique goal. Your subclass's constructor can add
|
||||
other arguments."""
|
||||
self.initial = initial; self.goal = goal
|
||||
|
||||
def successor(self, state):
|
||||
"""Given a state, return a sequence of (action, state) pairs reachable
|
||||
from this state. If there are many successors, consider an iterator
|
||||
that yields the successors one at a time, rather than building them
|
||||
all at once. Iterators will work fine within the framework."""
|
||||
abstract
|
||||
|
||||
def goal_test(self, state):
|
||||
"""Return True if the state is a goal. The default method compares the
|
||||
state to self.goal, as specified in the constructor. Implement this
|
||||
method if checking against a single self.goal is not enough."""
|
||||
return state == self.goal
|
||||
|
||||
def path_cost(self, c, state1, action, state2):
|
||||
"""Return the cost of a solution path that arrives at state2 from
|
||||
state1 via action, assuming cost c to get up to state1. If the problem
|
||||
is such that the path doesn't matter, this function will only look at
|
||||
state2. If the path does matter, it will consider c and maybe state1
|
||||
and action. The default method costs 1 for every step in the path."""
|
||||
return c + 1
|
||||
|
||||
def value(self):
|
||||
"""For optimization problems, each state has a value. Hill-climbing
|
||||
and related algorithms try to maximize this value."""
|
||||
abstract
|
||||
#______________________________________________________________________________
|
||||
|
||||
class Node:
|
||||
"""A node in a search tree. Contains a pointer to the parent (the node
|
||||
that this is a successor of) and to the actual state for this node. Note
|
||||
that if a state is arrived at by two paths, then there are two nodes with
|
||||
the same state. Also includes the action that got us to this state, and
|
||||
the total path_cost (also known as g) to reach the node. Other functions
|
||||
may add an f and h value; see best_first_graph_search and astar_search for
|
||||
an explanation of how the f and h values are handled. You will not need to
|
||||
subclass this class."""
|
||||
|
||||
def __init__(self, state, parent=None, action=None, path_cost=0):
|
||||
"Create a search tree Node, derived from a parent by an action."
|
||||
update(self, state=state, parent=parent, action=action,
|
||||
path_cost=path_cost, depth=0)
|
||||
if parent:
|
||||
self.depth = parent.depth + 1
|
||||
|
||||
def __repr__(self):
|
||||
return "<Node %s>" % (self.state,)
|
||||
|
||||
def path(self):
|
||||
"Create a list of nodes from the root to this node."
|
||||
x, result = self, [self]
|
||||
while x.parent:
|
||||
result.append(x.parent)
|
||||
x = x.parent
|
||||
return result
|
||||
|
||||
def expand(self, problem):
|
||||
"Return a list of nodes reachable from this node. [Fig. 3.8]"
|
||||
return [Node(next, self, act,
|
||||
problem.path_cost(self.path_cost, self.state, act, next))
|
||||
for (act, next) in problem.successor(self.state)]
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class SimpleProblemSolvingAgent(agents.Agent):
|
||||
"""Abstract framework for problem-solving agent. [Fig. 3.1]"""
|
||||
def __init__(self):
|
||||
Agent.__init__(self)
|
||||
state = []
|
||||
seq = []
|
||||
|
||||
def program(percept):
|
||||
state = self.update_state(state, percept)
|
||||
if not seq:
|
||||
goal = self.formulate_goal(state)
|
||||
problem = self.formulate_problem(state, goal)
|
||||
seq = self.search(problem)
|
||||
action = seq[0]
|
||||
seq[0:1] = []
|
||||
return action
|
||||
|
||||
self.program = program
|
||||
|
||||
#______________________________________________________________________________
|
||||
## Uninformed Search algorithms
|
||||
|
||||
def tree_search(problem, fringe):
|
||||
"""Search through the successors of a problem to find a goal.
|
||||
The argument fringe should be an empty queue.
|
||||
Don't worry about repeated paths to a state. [Fig. 3.8]"""
|
||||
fringe.append(Node(problem.initial))
|
||||
while fringe:
|
||||
node = fringe.pop()
|
||||
if problem.goal_test(node.state):
|
||||
return node
|
||||
fringe.extend(node.expand(problem))
|
||||
return None
|
||||
|
||||
def breadth_first_tree_search(problem):
|
||||
"Search the shallowest nodes in the search tree first. [p 74]"
|
||||
return tree_search(problem, FIFOQueue())
|
||||
|
||||
def depth_first_tree_search(problem):
|
||||
"Search the deepest nodes in the search tree first. [p 74]"
|
||||
return tree_search(problem, Stack())
|
||||
|
||||
def graph_search(problem, fringe):
|
||||
"""Search through the successors of a problem to find a goal.
|
||||
The argument fringe should be an empty queue.
|
||||
If two paths reach a state, only use the best one. [Fig. 3.18]"""
|
||||
closed = {}
|
||||
fringe.append(Node(problem.initial))
|
||||
while fringe:
|
||||
node = fringe.pop()
|
||||
if problem.goal_test(node.state):
|
||||
return node
|
||||
if node.state not in closed:
|
||||
closed[node.state] = True
|
||||
fringe.extend(node.expand(problem))
|
||||
return None
|
||||
|
||||
def breadth_first_graph_search(problem):
|
||||
"Search the shallowest nodes in the search tree first. [p 74]"
|
||||
return graph_search(problem, FIFOQueue())
|
||||
|
||||
def depth_first_graph_search(problem):
|
||||
"Search the deepest nodes in the search tree first. [p 74]"
|
||||
return graph_search(problem, Stack())
|
||||
|
||||
def depth_limited_search(problem, limit=50):
|
||||
"[Fig. 3.12]"
|
||||
def recursive_dls(node, problem, limit):
|
||||
cutoff_occurred = False
|
||||
if problem.goal_test(node.state):
|
||||
return node
|
||||
elif node.depth == limit:
|
||||
return 'cutoff'
|
||||
else:
|
||||
for successor in node.expand(problem):
|
||||
result = recursive_dls(successor, problem, limit)
|
||||
if result == 'cutoff':
|
||||
cutoff_occurred = True
|
||||
elif result != None:
|
||||
return result
|
||||
if cutoff_occurred:
|
||||
return 'cutoff'
|
||||
else:
|
||||
return None
|
||||
# Body of depth_limited_search:
|
||||
return recursive_dls(Node(problem.initial), problem, limit)
|
||||
|
||||
def iterative_deepening_search(problem):
|
||||
"[Fig. 3.13]"
|
||||
for depth in xrange(sys.maxint):
|
||||
result = depth_limited_search(problem, depth)
|
||||
if result is not 'cutoff':
|
||||
return result
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Informed (Heuristic) Search
|
||||
|
||||
def best_first_graph_search(problem, f):
|
||||
"""Search the nodes with the lowest f scores first.
|
||||
You specify the function f(node) that you want to minimize; for example,
|
||||
if f is a heuristic estimate to the goal, then we have greedy best
|
||||
first search; if f is node.depth then we have depth-first search.
|
||||
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
|
||||
values will be cached on the nodes as they are computed. So after doing
|
||||
a best first search you can examine the f values of the path returned."""
|
||||
f = memoize(f, 'f')
|
||||
return graph_search(problem, PriorityQueue(min, f))
|
||||
|
||||
greedy_best_first_graph_search = best_first_graph_search
|
||||
# Greedy best-first search is accomplished by specifying f(n) = h(n).
|
||||
|
||||
def astar_search(problem, h=None):
|
||||
"""A* search is best-first graph search with f(n) = g(n)+h(n).
|
||||
You need to specify the h function when you call astar_search.
|
||||
Uses the pathmax trick: f(n) = max(f(n), g(n)+h(n))."""
|
||||
h = h or problem.h
|
||||
def f(n):
|
||||
return max(getattr(n, 'f', -infinity), n.path_cost + h(n))
|
||||
return best_first_graph_search(problem, f)
|
||||
|
||||
#______________________________________________________________________________
|
||||
## Other search algorithms
|
||||
|
||||
def recursive_best_first_search(problem):
|
||||
"[Fig. 4.5]"
|
||||
def RBFS(problem, node, flimit):
|
||||
if problem.goal_test(node.state):
|
||||
return node
|
||||
successors = expand(node, problem)
|
||||
if len(successors) == 0:
|
||||
return None, infinity
|
||||
for s in successors:
|
||||
s.f = max(s.path_cost + s.h, node.f)
|
||||
while True:
|
||||
successors.sort(lambda x,y: x.f - y.f) # Order by lowest f value
|
||||
best = successors[0]
|
||||
if best.f > flimit:
|
||||
return None, best.f
|
||||
alternative = successors[1]
|
||||
result, best.f = RBFS(problem, best, min(flimit, alternative))
|
||||
if result is not None:
|
||||
return result
|
||||
return RBFS(Node(problem.initial), infinity)
|
||||
|
||||
|
||||
def hill_climbing(problem):
|
||||
"""From the initial node, keep choosing the neighbor with highest value,
|
||||
stopping when no neighbor is better. [Fig. 4.11]"""
|
||||
current = Node(problem.initial)
|
||||
while True:
|
||||
neighbor = argmax(expand(node, problem), Node.value)
|
||||
if neighbor.value() <= current.value():
|
||||
return current.state
|
||||
current = neighbor
|
||||
|
||||
def exp_schedule(k=20, lam=0.005, limit=100):
|
||||
"One possible schedule function for simulated annealing"
|
||||
return lambda t: if_(t < limit, k * math.exp(-lam * t), 0)
|
||||
|
||||
def simulated_annealing(problem, schedule=exp_schedule()):
|
||||
"[Fig. 4.5]"
|
||||
current = Node(problem.initial)
|
||||
for t in xrange(sys.maxint):
|
||||
T = schedule(t)
|
||||
if T == 0:
|
||||
return current
|
||||
next = random.choice(expand(node. problem))
|
||||
delta_e = next.path_cost - current.path_cost
|
||||
if delta_e > 0 or probability(math.exp(delta_e/T)):
|
||||
current = next
|
||||
|
||||
def online_dfs_agent(a):
|
||||
"[Fig. 4.12]"
|
||||
pass #### more
|
||||
|
||||
def lrta_star_agent(a):
|
||||
"[Fig. 4.12]"
|
||||
pass #### more
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Genetic Algorithm
|
||||
|
||||
def genetic_search(problem, fitness_fn, ngen=1000, pmut=0.0, n=20):
|
||||
"""Call genetic_algorithm on the appropriate parts of a problem.
|
||||
This requires that the problem has a successor function that generates
|
||||
reasonable states, and that it has a path_cost function that scores states.
|
||||
We use the negative of the path_cost function, because costs are to be
|
||||
minimized, while genetic-algorithm expects a fitness_fn to be maximized."""
|
||||
states = [s for (a, s) in problem.successor(problem.initial_state)[:n]]
|
||||
random.shuffle(states)
|
||||
fitness_fn = lambda s: - problem.path_cost(0, s, None, s)
|
||||
return genetic_algorithm(states, fitness_fn, ngen, pmut)
|
||||
|
||||
def genetic_algorithm(population, fitness_fn, ngen=1000, pmut=0.0):
|
||||
"""[Fig. 4.7]"""
|
||||
def reproduce(p1, p2):
|
||||
c = random.randrange(len(p1))
|
||||
return p1[:c] + p2[c:]
|
||||
|
||||
for i in range(ngen):
|
||||
new_population = []
|
||||
for i in len(population):
|
||||
p1, p2 = random_weighted_selections(population, 2, fitness_fn)
|
||||
child = reproduce(p1, p2)
|
||||
if random.uniform(0,1) > pmut:
|
||||
child.mutate()
|
||||
new_population.append(child)
|
||||
population = new_population
|
||||
return argmax(population, fitness_fn)
|
||||
|
||||
def random_weighted_selection(seq, n, weight_fn):
|
||||
"""Pick n elements of seq, weighted according to weight_fn.
|
||||
That is, apply weight_fn to each element of seq, add up the total.
|
||||
Then choose an element e with probability weight[e]/total.
|
||||
Repeat n times, with replacement. """
|
||||
totals = []; runningtotal = 0
|
||||
for item in seq:
|
||||
runningtotal += weight_fn(item)
|
||||
totals.append(runningtotal)
|
||||
selections = []
|
||||
for s in range(n):
|
||||
r = random.uniform(0, totals[-1])
|
||||
for i in range(len(seq)):
|
||||
if totals[i] > r:
|
||||
selections.append(seq[i])
|
||||
break
|
||||
return selections
|
||||
|
||||
|
||||
#_____________________________________________________________________________
|
||||
# The remainder of this file implements examples for the search algorithms.
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Graphs and Graph Problems
|
||||
|
||||
class Graph:
|
||||
"""A graph connects nodes (verticies) by edges (links). Each edge can also
|
||||
have a length associated with it. The constructor call is something like:
|
||||
g = Graph({'A': {'B': 1, 'C': 2})
|
||||
this makes a graph with 3 nodes, A, B, and C, with an edge of length 1 from
|
||||
A to B, and an edge of length 2 from A to C. You can also do:
|
||||
g = Graph({'A': {'B': 1, 'C': 2}, directed=False)
|
||||
This makes an undirected graph, so inverse links are also added. The graph
|
||||
stays undirected; if you add more links with g.connect('B', 'C', 3), then
|
||||
inverse link is also added. You can use g.nodes() to get a list of nodes,
|
||||
g.get('A') to get a dict of links out of A, and g.get('A', 'B') to get the
|
||||
length of the link from A to B. 'Lengths' can actually be any object at
|
||||
all, and nodes can be any hashable object."""
|
||||
|
||||
def __init__(self, dict=None, directed=True):
|
||||
self.dict = dict or {}
|
||||
self.directed = directed
|
||||
if not directed: self.make_undirected()
|
||||
|
||||
def make_undirected(self):
|
||||
"Make a digraph into an undirected graph by adding symmetric edges."
|
||||
for a in self.dict.keys():
|
||||
for (b, distance) in self.dict[a].items():
|
||||
self.connect1(b, a, distance)
|
||||
|
||||
def connect(self, A, B, distance=1):
|
||||
"""Add a link from A and B of given distance, and also add the inverse
|
||||
link if the graph is undirected."""
|
||||
self.connect1(A, B, distance)
|
||||
if not self.directed: self.connect1(B, A, distance)
|
||||
|
||||
def connect1(self, A, B, distance):
|
||||
"Add a link from A to B of given distance, in one direction only."
|
||||
self.dict.setdefault(A,{})[B] = distance
|
||||
|
||||
def get(self, a, b=None):
|
||||
"""Return a link distance or a dict of {node: distance} entries.
|
||||
.get(a,b) returns the distance or None;
|
||||
.get(a) returns a dict of {node: distance} entries, possibly {}."""
|
||||
links = self.dict.setdefault(a, {})
|
||||
if b is None: return links
|
||||
else: return links.get(b)
|
||||
|
||||
def nodes(self):
|
||||
"Return a list of nodes in the graph."
|
||||
return self.dict.keys()
|
||||
|
||||
def UndirectedGraph(dict=None):
|
||||
"Build a Graph where every edge (including future ones) goes both ways."
|
||||
return Graph(dict=dict, directed=False)
|
||||
|
||||
def RandomGraph(nodes=range(10), min_links=2, width=400, height=300,
|
||||
curvature=lambda: random.uniform(1.1, 1.5)):
|
||||
"""Construct a random graph, with the specified nodes, and random links.
|
||||
The nodes are laid out randomly on a (width x height) rectangle.
|
||||
Then each node is connected to the min_links nearest neighbors.
|
||||
Because inverse links are added, some nodes will have more connections.
|
||||
The distance between nodes is the hypotenuse times curvature(),
|
||||
where curvature() defaults to a random number between 1.1 and 1.5."""
|
||||
g = UndirectedGraph()
|
||||
g.locations = {}
|
||||
## Build the cities
|
||||
for node in nodes:
|
||||
g.locations[node] = (random.randrange(width), random.randrange(height))
|
||||
## Build roads from each city to at least min_links nearest neighbors.
|
||||
for i in range(min_links):
|
||||
for node in nodes:
|
||||
if len(g.get(node)) < min_links:
|
||||
here = g.locations[node]
|
||||
def distance_to_node(n):
|
||||
if n is node or g.get(node,n): return infinity
|
||||
return distance(g.locations[n], here)
|
||||
neighbor = argmin(nodes, distance_to_node)
|
||||
d = distance(g.locations[neighbor], here) * curvature()
|
||||
g.connect(node, neighbor, int(d))
|
||||
return g
|
||||
|
||||
romania = UndirectedGraph(Dict(
|
||||
A=Dict(Z=75, S=140, T=118),
|
||||
B=Dict(U=85, P=101, G=90, F=211),
|
||||
C=Dict(D=120, R=146, P=138),
|
||||
D=Dict(M=75),
|
||||
E=Dict(H=86),
|
||||
F=Dict(S=99),
|
||||
H=Dict(U=98),
|
||||
I=Dict(V=92, N=87),
|
||||
L=Dict(T=111, M=70),
|
||||
O=Dict(Z=71, S=151),
|
||||
P=Dict(R=97),
|
||||
R=Dict(S=80),
|
||||
U=Dict(V=142)))
|
||||
romania.locations = Dict(
|
||||
A=( 91, 492), B=(400, 327), C=(253, 288), D=(165, 299),
|
||||
E=(562, 293), F=(305, 449), G=(375, 270), H=(534, 350),
|
||||
I=(473, 506), L=(165, 379), M=(168, 339), N=(406, 537),
|
||||
O=(131, 571), P=(320, 368), R=(233, 410), S=(207, 457),
|
||||
T=( 94, 410), U=(456, 350), V=(509, 444), Z=(108, 531))
|
||||
|
||||
australia = UndirectedGraph(Dict(
|
||||
T=Dict(),
|
||||
SA=Dict(WA=1, NT=1, Q=1, NSW=1, V=1),
|
||||
NT=Dict(WA=1, Q=1),
|
||||
NSW=Dict(Q=1, V=1)))
|
||||
australia.locations = Dict(WA=(120, 24), NT=(135, 20), SA=(135, 30),
|
||||
Q=(145, 20), NSW=(145, 32), T=(145, 42), V=(145, 37))
|
||||
|
||||
class GraphProblem(Problem):
|
||||
"The problem of searching a graph from one node to another."
|
||||
def __init__(self, initial, goal, graph):
|
||||
Problem.__init__(self, initial, goal)
|
||||
self.graph = graph
|
||||
|
||||
def successor(self, A):
|
||||
"Return a list of (action, result) pairs."
|
||||
return [(B, B) for B in self.graph.get(A).keys()]
|
||||
|
||||
def path_cost(self, cost_so_far, A, action, B):
|
||||
return cost_so_far + (self.graph.get(A,B) or infinity)
|
||||
|
||||
def h(self, node):
|
||||
"h function is straight-line distance from a node's state to goal."
|
||||
locs = getattr(self.graph, 'locations', None)
|
||||
if locs:
|
||||
return int(distance(locs[node.state], locs[self.goal]))
|
||||
else:
|
||||
return infinity
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
#### NOTE: NQueensProblem not working properly yet.
|
||||
|
||||
class NQueensProblem(Problem):
|
||||
"""The problem of placing N queens on an NxN board with none attacking
|
||||
each other. A state is represented as an N-element array, where the
|
||||
a value of r in the c-th entry means there is a queen at column c,
|
||||
row r, and a value of None means that the c-th column has not been
|
||||
filled in left. We fill in columns left to right."""
|
||||
def __init__(self, N):
|
||||
self.N = N
|
||||
self.initial = [None] * N
|
||||
|
||||
def successor(self, state):
|
||||
"In the leftmost empty column, try all non-conflicting rows."
|
||||
if state[-1] is not None:
|
||||
return [] ## All columns filled; no successors
|
||||
else:
|
||||
def place(col, row):
|
||||
new = state[:]
|
||||
new[col] = row
|
||||
return new
|
||||
col = state.index(None)
|
||||
return [(row, place(col, row)) for row in range(self.N)
|
||||
if not self.conflicted(state, row, col)]
|
||||
|
||||
def conflicted(self, state, row, col):
|
||||
"Would placing a queen at (row, col) conflict with anything?"
|
||||
for c in range(col-1):
|
||||
if self.conflict(row, col, state[c], c):
|
||||
return True
|
||||
return False
|
||||
|
||||
def conflict(self, row1, col1, row2, col2):
|
||||
"Would putting two queens in (row1, col1) and (row2, col2) conflict?"
|
||||
return (row1 == row2 ## same row
|
||||
or col1 == col2 ## same column
|
||||
or row1-col1 == row2-col2 ## same \ diagonal
|
||||
or row1+col1 == row2+col2) ## same / diagonal
|
||||
|
||||
def goal_test(self, state):
|
||||
"Check if all columns filled, no conflicts."
|
||||
if state[-1] is None:
|
||||
return False
|
||||
for c in range(len(state)):
|
||||
if self.conflicted(state, state[c], c):
|
||||
return False
|
||||
return True
|
||||
|
||||
#______________________________________________________________________________
|
||||
## Inverse Boggle: Search for a high-scoring Boggle board. A good domain for
|
||||
## iterative-repair and related search tehniques, as suggested by Justin Boyan.
|
||||
|
||||
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
|
||||
|
||||
cubes16 = ['FORIXB', 'MOQABJ', 'GURILW', 'SETUPL',
|
||||
'CMPDAE', 'ACITAO', 'SLCRAE', 'ROMASH',
|
||||
'NODESW', 'HEFIYE', 'ONUDTK', 'TEVIGN',
|
||||
'ANEDVZ', 'PINESH', 'ABILYT', 'GKYLEU']
|
||||
|
||||
def random_boggle(n=4):
|
||||
"""Return a random Boggle board of size n x n.
|
||||
We represent a board as a linear list of letters."""
|
||||
cubes = [cubes16[i % 16] for i in range(n*n)]
|
||||
random.shuffle(cubes)
|
||||
return map(random.choice, cubes)
|
||||
|
||||
## The best 5x5 board found by Boyan, with our word list this board scores
|
||||
## 2274 words, for a score of 9837
|
||||
|
||||
boyan_best = list('RSTCSDEIAEGNLRPEATESMSSID')
|
||||
|
||||
def print_boggle(board):
|
||||
"Print the board in a 2-d array."
|
||||
n2 = len(board); n = exact_sqrt(n2)
|
||||
for i in range(n2):
|
||||
if i % n == 0: print
|
||||
if board[i] == 'Q': print 'Qu',
|
||||
else: print str(board[i]) + ' ',
|
||||
print
|
||||
|
||||
def boggle_neighbors(n2, cache={}):
|
||||
""""Return a list of lists, where the i-th element is the list of indexes
|
||||
for the neighbors of square i."""
|
||||
if cache.get(n2):
|
||||
return cache.get(n2)
|
||||
n = exact_sqrt(n2)
|
||||
neighbors = [None] * n2
|
||||
for i in range(n2):
|
||||
neighbors[i] = []
|
||||
on_top = i < n
|
||||
on_bottom = i >= n2 - n
|
||||
on_left = i % n == 0
|
||||
on_right = (i+1) % n == 0
|
||||
if not on_top:
|
||||
neighbors[i].append(i - n)
|
||||
if not on_left: neighbors[i].append(i - n - 1)
|
||||
if not on_right: neighbors[i].append(i - n + 1)
|
||||
if not on_bottom:
|
||||
neighbors[i].append(i + n)
|
||||
if not on_left: neighbors[i].append(i + n - 1)
|
||||
if not on_right: neighbors[i].append(i + n + 1)
|
||||
if not on_left: neighbors[i].append(i - 1)
|
||||
if not on_right: neighbors[i].append(i + 1)
|
||||
cache[n2] = neighbors
|
||||
return neighbors
|
||||
|
||||
def exact_sqrt(n2):
|
||||
"If n2 is a perfect square, return its square root, else raise error."
|
||||
n = int(math.sqrt(n2))
|
||||
assert n * n == n2
|
||||
return n
|
||||
|
||||
##_____________________________________________________________________________
|
||||
|
||||
class Wordlist:
|
||||
"""This class holds a list of words. You can use (word in wordlist)
|
||||
to check if a word is in the list, or wordlist.lookup(prefix)
|
||||
to see if prefix starts any of the words in the list."""
|
||||
def __init__(self, filename, min_len=3):
|
||||
lines = open(filename).read().upper().split()
|
||||
self.words = [word for word in lines if len(word) >= min_len]
|
||||
self.words.sort()
|
||||
self.bounds = {}
|
||||
for c in ALPHABET:
|
||||
c2 = chr(ord(c) + 1)
|
||||
self.bounds[c] = (bisect.bisect(self.words, c),
|
||||
bisect.bisect(self.words, c2))
|
||||
|
||||
def lookup(self, prefix, lo=0, hi=None):
|
||||
"""See if prefix is in dictionary, as a full word or as a prefix.
|
||||
Return two values: the first is the lowest i such that
|
||||
words[i].startswith(prefix), or is None; the second is
|
||||
True iff prefix itself is in the Wordlist."""
|
||||
words = self.words
|
||||
i = bisect.bisect_left(words, prefix, lo, hi)
|
||||
if i < len(words) and words[i].startswith(prefix):
|
||||
return i, (words[i] == prefix)
|
||||
else:
|
||||
return None, False
|
||||
|
||||
def __contains__(self, word):
|
||||
return self.words[bisect.bisect_left(self.words, word)] == word
|
||||
|
||||
def __len__(self):
|
||||
return len(self.words)
|
||||
|
||||
##_____________________________________________________________________________
|
||||
|
||||
class BoggleFinder:
|
||||
"""A class that allows you to find all the words in a Boggle board. """
|
||||
|
||||
wordlist = None ## A class variable, holding a wordlist
|
||||
|
||||
def __init__(self, board=None):
|
||||
if BoggleFinder.wordlist is None:
|
||||
BoggleFinder.wordlist = Wordlist("../data/wordlist")
|
||||
self.found = {}
|
||||
if board:
|
||||
self.set_board(board)
|
||||
|
||||
def set_board(self, board=None):
|
||||
"Set the board, and find all the words in it."
|
||||
if board is None:
|
||||
board = random_boggle()
|
||||
self.board = board
|
||||
self.neighbors = boggle_neighbors(len(board))
|
||||
self.found = {}
|
||||
for i in range(len(board)):
|
||||
lo, hi = self.wordlist.bounds[board[i]]
|
||||
self.find(lo, hi, i, [], '')
|
||||
return self
|
||||
|
||||
def find(self, lo, hi, i, visited, prefix):
|
||||
"""Looking in square i, find the words that continue the prefix,
|
||||
considering the entries in self.wordlist.words[lo:hi], and not
|
||||
revisiting the squares in visited."""
|
||||
if i in visited:
|
||||
return
|
||||
wordpos, is_word = self.wordlist.lookup(prefix, lo, hi)
|
||||
if wordpos is not None:
|
||||
if is_word:
|
||||
self.found[prefix] = True
|
||||
visited.append(i)
|
||||
c = self.board[i]
|
||||
if c == 'Q': c = 'QU'
|
||||
prefix += c
|
||||
for j in self.neighbors[i]:
|
||||
self.find(wordpos, hi, j, visited, prefix)
|
||||
visited.pop()
|
||||
|
||||
def words(self):
|
||||
"The words found."
|
||||
return self.found.keys()
|
||||
|
||||
scores = [0, 0, 0, 0, 1, 2, 3, 5] + [11] * 100
|
||||
|
||||
def score(self):
|
||||
"The total score for the words found, according to the rules."
|
||||
return sum([self.scores[len(w)] for w in self.words()])
|
||||
|
||||
def __len__(self):
|
||||
"The number of words found."
|
||||
return len(self.found)
|
||||
|
||||
##_____________________________________________________________________________
|
||||
|
||||
def boggle_hill_climbing(board=None, ntimes=100, print_it=True):
|
||||
"""Solve inverse Boggle by hill-climbing: find a high-scoring board by
|
||||
starting with a random one and changing it."""
|
||||
finder = BoggleFinder()
|
||||
if board is None:
|
||||
board = random_boggle()
|
||||
best = len(finder.set_board(board))
|
||||
for _ in range(ntimes):
|
||||
i, oldc = mutate_boggle(board)
|
||||
new = len(finder.set_board(board))
|
||||
if new > best:
|
||||
best = new
|
||||
print best, _, board
|
||||
else:
|
||||
board[i] = oldc ## Change back
|
||||
if print_it:
|
||||
print_boggle(board)
|
||||
return board, best
|
||||
|
||||
def mutate_boggle(board):
|
||||
i = random.randrange(len(board))
|
||||
oldc = board[i]
|
||||
board[i] = random.choice(random.choice(cubes16)) ##random.choice(boyan_best)
|
||||
return i, oldc
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
## Code to compare searchers on various problems.
|
||||
|
||||
class InstrumentedProblem(Problem):
|
||||
"""Delegates to a problem, and keeps statistics."""
|
||||
|
||||
def __init__(self, problem):
|
||||
self.problem = problem
|
||||
self.succs = self.goal_tests = self.states = 0
|
||||
self.found = None
|
||||
|
||||
def successor(self, state):
|
||||
"Return a list of (action, state) pairs reachable from this state."
|
||||
result = self.problem.successor(state)
|
||||
self.succs += 1; self.states += len(result)
|
||||
return result
|
||||
|
||||
def goal_test(self, state):
|
||||
"Return true if the state is a goal."
|
||||
self.goal_tests += 1
|
||||
result = self.problem.goal_test(state)
|
||||
if result:
|
||||
self.found = state
|
||||
return result
|
||||
|
||||
def __getattr__(self, attr):
|
||||
if attr in ('succs', 'goal_tests', 'states'):
|
||||
return self.__dict__[attr]
|
||||
else:
|
||||
return getattr(self.problem, attr)
|
||||
|
||||
def __repr__(self):
|
||||
return '<%4d/%4d/%4d/%s>' % (self.succs, self.goal_tests,
|
||||
self.states, str(self.found)[0:4])
|
||||
|
||||
def compare_searchers(problems, header, searchers=[breadth_first_tree_search,
|
||||
breadth_first_graph_search, depth_first_graph_search,
|
||||
iterative_deepening_search, depth_limited_search,
|
||||
astar_search]):
|
||||
def do(searcher, problem):
|
||||
p = InstrumentedProblem(problem)
|
||||
searcher(p)
|
||||
return p
|
||||
table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]
|
||||
print_table(table, header)
|
||||
|
||||
def compare_graph_searchers():
|
||||
compare_searchers(problems=[GraphProblem('A', 'B', romania),
|
||||
GraphProblem('O', 'N', romania),
|
||||
GraphProblem('Q', 'WA', australia)],
|
||||
header=['Searcher', 'Romania(A,B)', 'Romania(O, N)', 'Australia'])
|
||||
|
@ -1,68 +0,0 @@
|
||||
|
||||
>>> ab = GraphProblem('A', 'B', romania)
|
||||
>>> breadth_first_tree_search(ab).state
|
||||
'B'
|
||||
>>> breadth_first_graph_search(ab).state
|
||||
'B'
|
||||
>>> depth_first_graph_search(ab).state
|
||||
'B'
|
||||
>>> iterative_deepening_search(ab).state
|
||||
'B'
|
||||
>>> depth_limited_search(ab).state
|
||||
'B'
|
||||
>>> astar_search(ab).state
|
||||
'B'
|
||||
>>> [node.state for node in astar_search(ab).path()]
|
||||
['B', 'P', 'R', 'S', 'A']
|
||||
|
||||
|
||||
### demo
|
||||
|
||||
>>> compare_graph_searchers()
|
||||
Searcher Romania(A,B) Romania(O, N) Australia
|
||||
breadth_first_tree_search < 21/ 22/ 59/B> <1158/1159/3288/N> < 7/ 8/ 22/WA>
|
||||
breadth_first_graph_search < 10/ 19/ 26/B> < 19/ 45/ 45/N> < 5/ 8/ 16/WA>
|
||||
depth_first_graph_search < 9/ 15/ 23/B> < 16/ 27/ 39/N> < 4/ 7/ 13/WA>
|
||||
iterative_deepening_search < 11/ 33/ 31/B> < 656/1815/1812/N> < 3/ 11/ 11/WA>
|
||||
depth_limited_search < 54/ 65/ 185/B> < 387/1012/1125/N> < 50/ 54/ 200/WA>
|
||||
astar_search < 3/ 4/ 9/B> < 8/ 10/ 22/N> < 2/ 3/ 6/WA>
|
||||
|
||||
>>> board = list('SARTELNID')
|
||||
>>> print_boggle(board)
|
||||
S A R
|
||||
T E L
|
||||
N I D
|
||||
|
||||
>>> f = BoggleFinder(board)
|
||||
|
||||
>>> len(f)
|
||||
206
|
||||
|
||||
>>> ' '.join(f.words())
|
||||
'LID LARES DEAL LIE DIETS LIN LINT TIL TIN RATED ERAS LATEN DEAR TIE LINE INTER STEAL LATED LAST TAR SAL DITES RALES SAE RETS TAE RAT RAS SAT IDLE TILDES LEAST IDEAS LITE SATED TINED LEST LIT RASE RENTS TINEA EDIT EDITS NITES ALES LATE LETS RELIT TINES LEI LAT ELINT LATI SENT TARED DINE STAR SEAR NEST LITAS TIED SEAT SERAL RATE DINT DEL DEN SEAL TIER TIES NET SALINE DILATE EAST TIDES LINTER NEAR LITS ELINTS DENI RASED SERA TILE NEAT DERAT IDLEST NIDE LIEN STARED LIER LIES SETA NITS TINE DITAS ALINE SATIN TAS ASTER LEAS TSAR LAR NITE RALE LAS REAL NITER ATE RES RATEL IDEA RET IDEAL REI RATS STALE DENT RED IDES ALIEN SET TEL SER TEN TEA TED SALE TALE STILE ARES SEA TILDE SEN SEL ALINES SEI LASE DINES ILEA LINES ELD TIDE RENT DIEL STELA TAEL STALED EARL LEA TILES TILER LED ETA TALI ALE LASED TELA LET IDLER REIN ALIT ITS NIDES DIN DIE DENTS STIED LINER LASTED RATINE ERA IDLES DIT RENTAL DINER SENTI TINEAL DEIL TEAR LITER LINTS TEAL DIES EAR EAT ARLES SATE STARE DITS DELI DENTAL REST DITE DENTIL DINTS DITA DIET LENT NETS NIL NIT SETAL LATS TARE ARE SATI'
|
||||
|
||||
>>> boggle_hill_climbing(list('ABCDEFGHI'))
|
||||
30 1 ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'S', 'I']
|
||||
35 2 ['A', 'B', 'S', 'D', 'E', 'F', 'G', 'S', 'I']
|
||||
36 10 ['A', 'B', 'O', 'D', 'E', 'F', 'G', 'S', 'I']
|
||||
41 11 ['A', 'B', 'O', 'D', 'O', 'F', 'G', 'S', 'I']
|
||||
46 13 ['A', 'B', 'O', 'D', 'O', 'C', 'G', 'S', 'I']
|
||||
48 14 ['A', 'M', 'O', 'D', 'O', 'C', 'G', 'S', 'I']
|
||||
55 16 ['A', 'M', 'L', 'D', 'O', 'C', 'G', 'S', 'I']
|
||||
60 17 ['A', 'M', 'L', 'D', 'O', 'C', 'G', 'S', 'E']
|
||||
67 23 ['A', 'M', 'L', 'D', 'O', 'A', 'G', 'S', 'E']
|
||||
70 29 ['A', 'B', 'L', 'D', 'O', 'A', 'G', 'S', 'E']
|
||||
73 33 ['A', 'N', 'L', 'D', 'O', 'A', 'G', 'S', 'E']
|
||||
80 55 ['A', 'N', 'L', 'D', 'O', 'A', 'G', 'S', 'W']
|
||||
84 115 ['A', 'N', 'R', 'D', 'O', 'A', 'G', 'S', 'W']
|
||||
100 116 ['A', 'N', 'R', 'D', 'O', 'A', 'G', 'S', 'T']
|
||||
111 140 ['E', 'N', 'R', 'D', 'O', 'A', 'G', 'S', 'T']
|
||||
123 169 ['E', 'P', 'R', 'D', 'O', 'A', 'G', 'S', 'T']
|
||||
|
||||
E P R
|
||||
D O A
|
||||
G S T
|
||||
(['E', 'P', 'R', 'D', 'O', 'A', 'G', 'S', 'T'], 123)
|
||||
|
||||
>>> random_weighted_selection(range(10), 3, lambda x: x * x)
|
||||
[8, 9, 6]
|
@ -1,365 +0,0 @@
|
||||
"""Statistical Language Processing tools. (Chapter 23)
|
||||
We define Unigram and Ngram text models, use them to generate random text,
|
||||
and show the Viterbi algorithm for segmentatioon of letters into words.
|
||||
Then we show a very simple Information Retrieval system, and an example
|
||||
working on a tiny sample of Unix manual pages."""
|
||||
|
||||
from utils import *
|
||||
from math import log, exp
|
||||
import re, probability, string, search
|
||||
|
||||
class CountingProbDist(probability.ProbDist):
|
||||
"""A probability distribution formed by observing and counting examples.
|
||||
If P is an instance of this class and o
|
||||
is an observed value, then there are 3 main operations:
|
||||
p.add(o) increments the count for observation o by 1.
|
||||
p.sample() returns a random element from the distribution.
|
||||
p[o] returns the probability for o (as in a regular ProbDist)."""
|
||||
|
||||
def __init__(self, observations=[], default=0):
|
||||
"""Create a distribution, and optionally add in some observations.
|
||||
By default this is an unsmoothed distribution, but saying default=1,
|
||||
for example, gives you add-one smoothing."""
|
||||
update(self, dictionary=DefaultDict(default), needs_recompute=False,
|
||||
table=[], n_obs=0)
|
||||
for o in observations:
|
||||
self.add(o)
|
||||
|
||||
def add(self, o):
|
||||
"""Add an observation o to the distribution."""
|
||||
self.dictionary[o] += 1
|
||||
self.n_obs += 1
|
||||
self.needs_recompute = True
|
||||
|
||||
def sample(self):
|
||||
"""Return a random sample from the distribution."""
|
||||
if self.needs_recompute: self._recompute()
|
||||
if self.n_obs == 0:
|
||||
return None
|
||||
i = bisect.bisect_left(self.table, (1 + random.randrange(self.n_obs),))
|
||||
(count, o) = self.table[i]
|
||||
return o
|
||||
|
||||
def __getitem__(self, item):
|
||||
"""Return an estimate of the probability of item."""
|
||||
if self.needs_recompute: self._recompute()
|
||||
return self.dictionary[item] / self.n_obs
|
||||
|
||||
def __len__(self):
|
||||
if self.needs_recompute: self._recompute()
|
||||
return self.n_obs
|
||||
|
||||
def top(self, n):
|
||||
"Return (count, obs) tuples for the n most frequent observations."
|
||||
items = [(v, k) for (k, v) in self.dictionary.items()]
|
||||
items.sort(); items.reverse()
|
||||
return items[0:n]
|
||||
|
||||
def _recompute(self):
|
||||
"""Recompute the total count n_obs and the table of entries."""
|
||||
n_obs = 0
|
||||
table = []
|
||||
for (o, count) in self.dictionary.items():
|
||||
n_obs += count
|
||||
table.append((n_obs, o))
|
||||
update(self, n_obs=float(n_obs), table=table, needs_recompute=False)
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
class UnigramTextModel(CountingProbDist):
|
||||
"""This is a discrete probability distribution over words, so you
|
||||
can add, sample, or get P[word], just like with CountingProbDist. You can
|
||||
also generate a random text n words long with P.samples(n)"""
|
||||
|
||||
def samples(self, n):
|
||||
"Return a string of n words, random according to the model."
|
||||
return ' '.join([self.sample() for i in range(n)])
|
||||
|
||||
class NgramTextModel(CountingProbDist):
|
||||
"""This is a discrete probability distribution over n-tuples of words.
|
||||
You can add, sample or get P[(word1, ..., wordn)]. The method P.samples(n)
|
||||
builds up an n-word sequence; P.add_text and P.add_sequence add data."""
|
||||
|
||||
def __init__(self, n, observation_sequence=[]):
|
||||
## In addition to the dictionary of n-tuples, cond_prob is a
|
||||
## mapping from (w1, ..., wn-1) to P(wn | w1, ... wn-1)
|
||||
CountingProbDist.__init__(self)
|
||||
self.n = n
|
||||
self.cond_prob = DefaultDict(CountingProbDist())
|
||||
self.add_sequence(observation_sequence)
|
||||
|
||||
## sample, __len__, __getitem__ inherited from CountingProbDist
|
||||
## Note they deal with tuples, not strings, as inputs
|
||||
|
||||
def add(self, ngram):
|
||||
"""Count 1 for P[(w1, ..., wn)] and for P(wn | (w1, ..., wn-1)"""
|
||||
CountingProbDist.add(self, ngram)
|
||||
self.cond_prob[ngram[:-1]].add(ngram[-1])
|
||||
|
||||
def add_sequence(self, words):
|
||||
"""Add each of the tuple words[i:i+n], using a sliding window.
|
||||
Prefix some copies of the empty word, '', to make the start work."""
|
||||
n = self.n
|
||||
words = ['',] * (n-1) + words
|
||||
for i in range(len(words)-n):
|
||||
self.add(tuple(words[i:i+n]))
|
||||
|
||||
def samples(self, nwords):
|
||||
"""Build up a random sample of text n words long, using the"""
|
||||
n = self.n
|
||||
nminus1gram = ('',) * (n-1)
|
||||
output = []
|
||||
while len(output) < nwords:
|
||||
wn = self.cond_prob[nminus1gram].sample()
|
||||
if wn:
|
||||
output.append(wn)
|
||||
nminus1gram = nminus1gram[1:] + (wn,)
|
||||
else: ## Cannot continue, so restart.
|
||||
nminus1gram = ('',) * (n-1)
|
||||
return ' '.join(output)
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
|
||||
def viterbi_segment(text, P):
|
||||
"""Find the best segmentation of the string of characters, given the
|
||||
UnigramTextModel P."""
|
||||
# best[i] = best probability for text[0:i]
|
||||
# words[i] = best word ending at position i
|
||||
n = len(text)
|
||||
words = [''] + list(text)
|
||||
best = [1.0] + [0.0] * n
|
||||
## Fill in the vectors best, words via dynamic programming
|
||||
for i in range(n+1):
|
||||
for j in range(0, i):
|
||||
w = text[j:i]
|
||||
if P[w] * best[i - len(w)] >= best[i]:
|
||||
best[i] = P[w] * best[i - len(w)]
|
||||
words[i] = w
|
||||
## Now recover the sequence of best words
|
||||
sequence = []; i = len(words)-1
|
||||
while i > 0:
|
||||
sequence[0:0] = [words[i]]
|
||||
i = i - len(words[i])
|
||||
## Return sequence of best words and overall probability
|
||||
return sequence, best[-1]
|
||||
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
|
||||
class IRSystem:
|
||||
"""A very simple Information Retrieval System, as discussed in Sect. 23.2.
|
||||
The constructor s = IRSystem('the a') builds an empty system with two
|
||||
stopwords. Next, index several documents with s.index_document(text, url).
|
||||
Then ask queries with s.query('query words', n) to retrieve the top n
|
||||
matching documents. Queries are literal words from the document,
|
||||
except that stopwords are ignored, and there is one special syntax:
|
||||
The query "learn: man cat", for example, runs "man cat" and indexes it."""
|
||||
|
||||
def __init__(self, stopwords='the a of'):
|
||||
"""Create an IR System. Optionally specify stopwords."""
|
||||
## index is a map of {word: {docid: count}}, where docid is an int,
|
||||
## indicating the index into the documents list.
|
||||
update(self, index=DefaultDict(DefaultDict(0)),
|
||||
stopwords=set(words(stopwords)), documents=[])
|
||||
|
||||
def index_collection(self, filenames):
|
||||
"Index a whole collection of files."
|
||||
for filename in filenames:
|
||||
self.index_document(open(filename).read(), filename)
|
||||
|
||||
def index_document(self, text, url):
|
||||
"Index the text of a document."
|
||||
## For now, use first line for title
|
||||
title = text[:text.index('\n')].strip()
|
||||
docwords = words(text)
|
||||
docid = len(self.documents)
|
||||
self.documents.append(Document(title, url, len(docwords)))
|
||||
for word in docwords:
|
||||
if word not in self.stopwords:
|
||||
self.index[word][docid] += 1
|
||||
|
||||
def query(self, query_text, n=10):
|
||||
"""Return a list of n (score, docid) pairs for the best matches.
|
||||
Also handle the special syntax for 'learn: command'."""
|
||||
if query_text.startswith("learn:"):
|
||||
doctext = os.popen(query_text[len("learn:"):], 'r').read()
|
||||
self.index_document(doctext, query_text)
|
||||
return []
|
||||
qwords = [w for w in words(query_text) if w not in self.stopwords]
|
||||
shortest = argmin(qwords, lambda w: len(self.index[w]))
|
||||
docs = self.index[shortest]
|
||||
results = [(sum([self.score(w, d) for w in qwords]), d) for d in docs]
|
||||
results.sort(); results.reverse()
|
||||
return results[:n]
|
||||
|
||||
def score(self, word, docid):
|
||||
"Compute a score for this word on this docid."
|
||||
## There are many options; here we take a very simple approach
|
||||
return (math.log(1 + self.index[word][docid])
|
||||
/ math.log(1 + self.documents[docid].nwords))
|
||||
|
||||
def present(self, results):
|
||||
"Present the results as a list."
|
||||
for (score, d) in results:
|
||||
doc = self.documents[d]
|
||||
print "%5.2f|%25s | %s" % (100 * score, doc.url, doc.title[:45])
|
||||
|
||||
def present_results(self, query_text, n=10):
|
||||
"Get results for the query and present them."
|
||||
self.present(self.query(query_text, n))
|
||||
|
||||
class UnixConsultant(IRSystem):
|
||||
"""A trivial IR system over a small collection of Unix man pages."""
|
||||
def __init__(self):
|
||||
IRSystem.__init__(self, stopwords="how do i the a of")
|
||||
import os
|
||||
mandir = '../data/man/'
|
||||
man_files = [mandir + f for f in os.listdir(mandir)]
|
||||
self.index_collection(man_files)
|
||||
|
||||
class Document:
|
||||
"""Metadata for a document: title and url; maybe add others later."""
|
||||
def __init__(self, title, url, nwords):
|
||||
update(self, title=title, url=url, nwords=nwords)
|
||||
|
||||
def words(text, reg=re.compile('[a-z0-9]+')):
|
||||
"""Return a list of the words in text, ignoring punctuation and
|
||||
converting everything to lowercase (to canonicalize).
|
||||
>>> words("``EGAD!'' Edgar cried.")
|
||||
['egad', 'edgar', 'cried']
|
||||
"""
|
||||
return reg.findall(text.lower())
|
||||
|
||||
def canonicalize(text):
|
||||
"""Return a canonical text: only lowercase letters and blanks.
|
||||
>>> canonicalize("``EGAD!'' Edgar cried.")
|
||||
'egad edgar cried'
|
||||
"""
|
||||
return ' '.join(words(text))
|
||||
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
||||
## Example application (not in book): decode a cipher.
|
||||
## A cipher is a code that substitutes one character for another.
|
||||
## A shift cipher is a rotation of the letters in the alphabet,
|
||||
## such as the famous rot13, which maps A to N, B to M, etc.
|
||||
|
||||
#### Encoding
|
||||
|
||||
def shift_encode(plaintext, n):
|
||||
"""Encode text with a shift cipher that moves each letter up by n letters.
|
||||
>>> shift_encode('abc z', 1)
|
||||
'bcd a'
|
||||
"""
|
||||
return encode(plaintext, alphabet[n:] + alphabet[:n])
|
||||
|
||||
def rot13(plaintext):
|
||||
"""Encode text by rotating letters by 13 spaces in the alphabet.
|
||||
>>> rot13('hello')
|
||||
'uryyb'
|
||||
>>> rot13(rot13('hello'))
|
||||
'hello'
|
||||
"""
|
||||
return shift_encode(plaintext, 13)
|
||||
|
||||
def encode(plaintext, code):
|
||||
"Encodes text, using a code which is a permutation of the alphabet."
|
||||
from string import maketrans
|
||||
trans = maketrans(alphabet + alphabet.upper(), code + code.upper())
|
||||
return plaintext.translate(trans)
|
||||
|
||||
alphabet = 'abcdefghijklmnopqrstuvwxyz'
|
||||
|
||||
def bigrams(text):
|
||||
"""Return a list of pairs in text (a sequence of letters or words).
|
||||
>>> bigrams('this')
|
||||
['th', 'hi', 'is']
|
||||
>>> bigrams(['this', 'is', 'a', 'test'])
|
||||
[['this', 'is'], ['is', 'a'], ['a', 'test']]
|
||||
"""
|
||||
return [text[i:i+2] for i in range(len(text) - 1)]
|
||||
|
||||
#### Decoding a Shift (or Caesar) Cipher
|
||||
|
||||
class ShiftDecoder:
|
||||
"""There are only 26 possible encodings, so we can try all of them,
|
||||
and return the one with the highest probability, according to a
|
||||
bigram probability distribution."""
|
||||
def __init__(self, training_text):
|
||||
training_text = canonicalize(training_text)
|
||||
self.P2 = CountingProbDist(bigrams(training_text), default=1)
|
||||
|
||||
def score(self, plaintext):
|
||||
"Return a score for text based on how common letters pairs are."
|
||||
s = 1.0
|
||||
for bi in bigrams(plaintext):
|
||||
s = s * self.P2[bi]
|
||||
return s
|
||||
|
||||
def decode(self, ciphertext):
|
||||
"Return the shift decoding of text with the best score."
|
||||
return argmax(all_shifts(ciphertext), self.score)
|
||||
|
||||
def all_shifts(text):
|
||||
"Return a list of all 26 possible encodings of text by a shift cipher."
|
||||
return [shift_encode(text, n) for n in range(len(alphabet))]
|
||||
|
||||
#### Decoding a General Permutation Cipher
|
||||
|
||||
class PermutationDecoder:
|
||||
"""This is a much harder problem than the shift decoder. There are 26!
|
||||
permutations, so we can't try them all. Instead we have to search.
|
||||
We want to search well, but there are many things to consider:
|
||||
Unigram probabilities (E is the most common letter); Bigram probabilities
|
||||
(TH is the most common bigram); word probabilities (I and A are the most
|
||||
common one-letter words, etc.); etc.
|
||||
We could represent a search state as a permutation of the 26 letters,
|
||||
and alter the solution through hill climbing. With an initial guess
|
||||
based on unigram probabilities, this would probably fair well. However,
|
||||
I chose instead to have an incremental representation. A state is
|
||||
represented as a letter-to-letter map; for example {'z': 'e'} to
|
||||
represent that 'z' will be translated to 'e'
|
||||
"""
|
||||
def __init__(self, training_text, ciphertext=None):
|
||||
self.Pwords = UnigramTextModel(words(training_text))
|
||||
self.P1 = UnigramTextModel(training_text) # By letter
|
||||
self.P2 = NgramTextModel(2, training_text) # By letter pair
|
||||
if ciphertext:
|
||||
return self.decode(ciphertext)
|
||||
|
||||
def decode(self, ciphertext):
|
||||
"Search for a decoding of the ciphertext."
|
||||
self.ciphertext = ciphertext
|
||||
problem = PermutationDecoderProblem(decoder=self)
|
||||
return search.best_first_tree_search(problem, self.score)
|
||||
|
||||
def score(self, ciphertext, code):
|
||||
"""Score is product of word scores, unigram scores, and bigram scores.
|
||||
This can get very small, so we use logs and exp."""
|
||||
text = decode(ciphertext, code)
|
||||
logP = (sum([log(self.Pwords[word]) for word in words(text)]) +
|
||||
sum([log(self.P1[c]) for c in text]) +
|
||||
sum([log(self.P2[b]) for b in bigrams(text)]))
|
||||
return exp(logP)
|
||||
|
||||
class PermutationDecoderProblem(search.Problem):
|
||||
def __init__(self, initial=None, goal=None, decoder=None):
|
||||
self.initial = initial or {}
|
||||
self.decoder = decoder
|
||||
|
||||
def successors(self, state):
|
||||
## Find the best
|
||||
p, plainchar = max([(self.decoder.P1[c], c)
|
||||
for c in alphabet if c not in state])
|
||||
succs = [extend(state, plainchar, cipherchar)] #????
|
||||
|
||||
def goal_test(self, state):
|
||||
"We're done when we get all 26 letters assigned."
|
||||
return len(state) >= 26
|
||||
|
||||
|
||||
#______________________________________________________________________________
|
||||
|
@ -1,122 +0,0 @@
|
||||
## Create a Unigram text model from the words in the book "Flatland".
|
||||
>>> flatland = DataFile("flat11.txt").read()
|
||||
>>> wordseq = words(flatland)
|
||||
>>> P = UnigramTextModel(wordseq)
|
||||
|
||||
## Now do segmentation, using the text model as a prior.
|
||||
>>> s, p = viterbi_segment('itiseasytoreadwordswithoutspaces', P)
|
||||
>>> s
|
||||
['it', 'is', 'easy', 'to', 'read', 'words', 'without', 'spaces']
|
||||
>>> 1e-30 < p < 1e-20
|
||||
True
|
||||
>>> s, p = viterbi_segment('wheninthecourseofhumaneventsitbecomesnecessary', P)
|
||||
>>> s
|
||||
['when', 'in', 'the', 'course', 'of', 'human', 'events', 'it', 'becomes', 'necessary']
|
||||
|
||||
## Test the decoding system
|
||||
>>> shift_encode("This is a secret message.", 17)
|
||||
'Kyzj zj r jvtivk dvjjrxv.'
|
||||
|
||||
>>> ring = ShiftDecoder(flatland)
|
||||
>>> ring.decode('Kyzj zj r jvtivk dvjjrxv.')
|
||||
'This is a secret message.'
|
||||
>>> ring.decode(rot13('Hello, world!'))
|
||||
'Hello, world!'
|
||||
|
||||
## CountingProbDist
|
||||
## Add a thousand samples of a roll of a die to D.
|
||||
>>> D = CountingProbDist()
|
||||
>>> for i in range(10000):
|
||||
... D.add(random.choice('123456'))
|
||||
>>> ps = [D[n] for n in '123456']
|
||||
>>> 1./7. <= min(ps) <= max(ps) <= 1./5.
|
||||
True
|
||||
|
||||
## demo
|
||||
|
||||
## Compare 1-, 2-, and 3-gram word models of the same text.
|
||||
>>> flatland = DataFile("flat11.txt").read()
|
||||
>>> wordseq = words(flatland)
|
||||
>>> P1 = UnigramTextModel(wordseq)
|
||||
>>> P2 = NgramTextModel(2, wordseq)
|
||||
>>> P3 = NgramTextModel(3, wordseq)
|
||||
|
||||
## Generate random text from the N-gram models
|
||||
>>> P1.samples(20)
|
||||
'you thought known but were insides of see in depend by us dodecahedrons just but i words are instead degrees'
|
||||
|
||||
>>> P2.samples(20)
|
||||
'flatland well then can anything else more into the total destruction and circles teach others confine women must be added'
|
||||
|
||||
>>> P3.samples(20)
|
||||
'flatland by edwin a abbott 1884 to the wake of a certificate from nature herself proving the equal sided triangle'
|
||||
|
||||
## The most frequent entries in each model
|
||||
>>> P1.top(10)
|
||||
[(2081, 'the'), (1479, 'of'), (1021, 'and'), (1008, 'to'), (850, 'a'), (722, 'i'), (640, 'in'), (478, 'that'), (399, 'is'), (348, 'you')]
|
||||
|
||||
>>> P2.top(10)
|
||||
[(368, ('of', 'the')), (152, ('to', 'the')), (152, ('in', 'the')), (86, ('of', 'a')), (80, ('it', 'is')), (71, ('by', 'the')), (68, ('for', 'the')), (68, ('and', 'the')), (62, ('on', 'the')), (60, ('to', 'be'))]
|
||||
|
||||
>>> P3.top(10)
|
||||
[(30, ('a', 'straight', 'line')), (19, ('of', 'three', 'dimensions')), (16, ('the', 'sense', 'of')), (13, ('by', 'the', 'sense')), (13, ('as', 'well', 'as')), (12, ('of', 'the', 'circles')), (12, ('of', 'sight', 'recognition')), (11, ('the', 'number', 'of')), (11, ('that', 'i', 'had')), (11, ('so', 'as', 'to'))]
|
||||
|
||||
## Probabilities of some common n-grams
|
||||
>>> P1['the']
|
||||
0.061139348356200607
|
||||
|
||||
>>> P2[('of', 'the')]
|
||||
0.010812081325655188
|
||||
|
||||
>>> P3[('', '', 'but')]
|
||||
0.0
|
||||
|
||||
>>> P3[('so', 'as', 'to')]
|
||||
0.00032318721353860618
|
||||
|
||||
## Distributions given the previous n-1 words
|
||||
>>> P2.cond_prob['went',].dictionary
|
||||
>>> P3.cond_prob['in', 'order'].dictionary
|
||||
{'to': 6}
|
||||
|
||||
## Build and test an IR System
|
||||
>>> uc = UnixConsultant()
|
||||
>>> uc.present_results("how do I remove a file")
|
||||
76.83| ../data/man/rm.txt | RM(1) FSF RM(1)
|
||||
67.83| ../data/man/tar.txt | TAR(1) TAR(1)
|
||||
67.79| ../data/man/cp.txt | CP(1) FSF CP(1)
|
||||
66.58| ../data/man/zip.txt | ZIP(1L) ZIP(1L)
|
||||
64.58| ../data/man/gzip.txt | GZIP(1) GZIP(1)
|
||||
63.74| ../data/man/pine.txt | pine(1) pine(1)
|
||||
62.95| ../data/man/shred.txt | SHRED(1) FSF SHRED(1)
|
||||
57.46| ../data/man/pico.txt | pico(1) pico(1)
|
||||
43.38| ../data/man/login.txt | LOGIN(1) Linux Programmer's Manual
|
||||
41.93| ../data/man/ln.txt | LN(1) FSF LN(1)
|
||||
|
||||
>>> uc.present_results("how do I delete a file")
|
||||
75.47| ../data/man/diff.txt | DIFF(1) GNU Tools DIFF(1)
|
||||
69.12| ../data/man/pine.txt | pine(1) pine(1)
|
||||
63.56| ../data/man/tar.txt | TAR(1) TAR(1)
|
||||
60.63| ../data/man/zip.txt | ZIP(1L) ZIP(1L)
|
||||
57.46| ../data/man/pico.txt | pico(1) pico(1)
|
||||
51.28| ../data/man/shred.txt | SHRED(1) FSF SHRED(1)
|
||||
26.72| ../data/man/tr.txt | TR(1) User Commands TR(1)
|
||||
|
||||
>>> uc.present_results("email")
|
||||
18.39| ../data/man/pine.txt | pine(1) pine(1)
|
||||
12.01| ../data/man/info.txt | INFO(1) FSF INFO(1)
|
||||
9.89| ../data/man/pico.txt | pico(1) pico(1)
|
||||
8.73| ../data/man/grep.txt | GREP(1) GREP(1)
|
||||
8.07| ../data/man/zip.txt | ZIP(1L) ZIP(1L)
|
||||
|
||||
>>> uc.present_results("word counts for files")
|
||||
112.38| ../data/man/grep.txt | GREP(1) GREP(1)
|
||||
101.84| ../data/man/wc.txt | WC(1) User Commands WC(1)
|
||||
82.46| ../data/man/find.txt | FIND(1L) FIND(1L)
|
||||
74.64| ../data/man/du.txt | DU(1) FSF DU(1)
|
||||
|
||||
>>> uc.present_results("learn: date")
|
||||
>>> uc.present_results("2003")
|
||||
14.58| ../data/man/pine.txt | pine(1) pine(1)
|
||||
11.62| ../data/man/jar.txt | FASTJAR(1) GNU FASTJAR(1)
|
||||
|
@ -1,714 +0,0 @@
|
||||
"""Provide some widely useful utilities. Safe for "from utils import *".
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import generators
|
||||
import operator, math, random, copy, sys, os.path, bisect
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Compatibility with Python 2.2 and 2.3
|
||||
|
||||
# The AIMA code is designed to run in Python 2.2 and up (at some point,
|
||||
# support for 2.2 may go away; 2.2 was released in 2001, and so is over
|
||||
# 3 years old). The first part of this file brings you up to 2.4
|
||||
# compatibility if you are running in Python 2.2 or 2.3:
|
||||
|
||||
try: bool, True, False ## Introduced in 2.3
|
||||
except NameError:
|
||||
class bool(int):
|
||||
"Simple implementation of Booleans, as in PEP 285"
|
||||
def __init__(self, val): self.val = val
|
||||
def __int__(self): return self.val
|
||||
def __repr__(self): return ('False', 'True')[self.val]
|
||||
|
||||
True, False = bool(1), bool(0)
|
||||
|
||||
try: sum ## Introduced in 2.3
|
||||
except NameError:
|
||||
def sum(seq, start=0):
|
||||
"""Sum the elements of seq.
|
||||
>>> sum([1, 2, 3])
|
||||
6
|
||||
"""
|
||||
return reduce(operator.add, seq, start)
|
||||
|
||||
try: enumerate ## Introduced in 2.3
|
||||
except NameError:
|
||||
def enumerate(collection):
|
||||
"""Return an iterator that enumerates pairs of (i, c[i]). PEP 279.
|
||||
>>> list(enumerate('abc'))
|
||||
[(0, 'a'), (1, 'b'), (2, 'c')]
|
||||
"""
|
||||
## Copied from PEP 279
|
||||
i = 0
|
||||
it = iter(collection)
|
||||
while 1:
|
||||
yield (i, it.next())
|
||||
i += 1
|
||||
|
||||
|
||||
try: reversed ## Introduced in 2.4
|
||||
except NameError:
|
||||
def reversed(seq):
|
||||
"""Iterate over x in reverse order.
|
||||
>>> list(reversed([1,2,3]))
|
||||
[3, 2, 1]
|
||||
"""
|
||||
if hasattr(seq, 'keys'):
|
||||
raise ValueError("mappings do not support reverse iteration")
|
||||
i = len(seq)
|
||||
while i > 0:
|
||||
i -= 1
|
||||
yield seq[i]
|
||||
|
||||
|
||||
try: sorted ## Introduced in 2.4
|
||||
except NameError:
|
||||
def sorted(seq, cmp=None, key=None, reverse=False):
|
||||
"""Copy seq and sort and return it.
|
||||
>>> sorted([3, 1, 2])
|
||||
[1, 2, 3]
|
||||
"""
|
||||
seq2 = copy.copy(seq)
|
||||
if key:
|
||||
if cmp == None:
|
||||
cmp = __builtins__.cmp
|
||||
seq2.sort(lambda x,y: cmp(key(x), key(y)))
|
||||
else:
|
||||
if cmp == None:
|
||||
seq2.sort()
|
||||
else:
|
||||
seq2.sort(cmp)
|
||||
if reverse:
|
||||
seq2.reverse()
|
||||
return seq2
|
||||
|
||||
try:
|
||||
set, frozenset ## set builtin introduced in 2.4
|
||||
except NameError:
|
||||
try:
|
||||
import sets ## sets module introduced in 2.3
|
||||
set, frozenset = sets.Set, sets.ImmutableSet
|
||||
except (NameError, ImportError):
|
||||
class BaseSet:
|
||||
"set type (see http://docs.python.org/lib/types-set.html)"
|
||||
|
||||
|
||||
def __init__(self, elements=[]):
|
||||
self.dict = {}
|
||||
for e in elements:
|
||||
self.dict[e] = 1
|
||||
|
||||
def __len__(self):
|
||||
return len(self.dict)
|
||||
|
||||
def __iter__(self):
|
||||
for e in self.dict:
|
||||
yield e
|
||||
|
||||
def __contains__(self, element):
|
||||
return element in self.dict
|
||||
|
||||
def issubset(self, other):
|
||||
for e in self.dict.keys():
|
||||
if e not in other:
|
||||
return False
|
||||
return True
|
||||
|
||||
def issuperset(self, other):
|
||||
for e in other:
|
||||
if e not in self:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def union(self, other):
|
||||
return type(self)(list(self) + list(other))
|
||||
|
||||
def intersection(self, other):
|
||||
return type(self)([e for e in self.dict if e in other])
|
||||
|
||||
def difference(self, other):
|
||||
return type(self)([e for e in self.dict if e not in other])
|
||||
|
||||
def symmetric_difference(self, other):
|
||||
return type(self)([e for e in self.dict if e not in other] +
|
||||
[e for e in other if e not in self.dict])
|
||||
|
||||
def copy(self):
|
||||
return type(self)(self.dict)
|
||||
|
||||
def __repr__(self):
|
||||
elements = ", ".join(map(str, self.dict))
|
||||
return "%s([%s])" % (type(self).__name__, elements)
|
||||
|
||||
__le__ = issubset
|
||||
__ge__ = issuperset
|
||||
__or__ = union
|
||||
__and__ = intersection
|
||||
__sub__ = difference
|
||||
__xor__ = symmetric_difference
|
||||
|
||||
class frozenset(BaseSet):
|
||||
"A frozenset is a BaseSet that has a hash value and is immutable."
|
||||
|
||||
def __init__(self, elements=[]):
|
||||
BaseSet.__init__(elements)
|
||||
self.hash = 0
|
||||
for e in self:
|
||||
self.hash |= hash(e)
|
||||
|
||||
def __hash__(self):
|
||||
return self.hash
|
||||
|
||||
class set(BaseSet):
|
||||
"A set is a BaseSet that does not have a hash, but is mutable."
|
||||
|
||||
def update(self, other):
|
||||
for e in other:
|
||||
self.add(e)
|
||||
return self
|
||||
|
||||
def intersection_update(self, other):
|
||||
for e in self.dict.keys():
|
||||
if e not in other:
|
||||
self.remove(e)
|
||||
return self
|
||||
|
||||
def difference_update(self, other):
|
||||
for e in self.dict.keys():
|
||||
if e in other:
|
||||
self.remove(e)
|
||||
return self
|
||||
|
||||
def symmetric_difference_update(self, other):
|
||||
to_remove1 = [e for e in self.dict if e in other]
|
||||
to_remove2 = [e for e in other if e in self.dict]
|
||||
self.difference_update(to_remove1)
|
||||
self.difference_update(to_remove2)
|
||||
return self
|
||||
|
||||
def add(self, element):
|
||||
self.dict[element] = 1
|
||||
|
||||
def remove(self, element):
|
||||
del self.dict[element]
|
||||
|
||||
def discard(self, element):
|
||||
if element in self.dict:
|
||||
del self.dict[element]
|
||||
|
||||
def pop(self):
|
||||
key, val = self.dict.popitem()
|
||||
return key
|
||||
|
||||
def clear(self):
|
||||
self.dict.clear()
|
||||
|
||||
__ior__ = update
|
||||
__iand__ = intersection_update
|
||||
__isub__ = difference_update
|
||||
__ixor__ = symmetric_difference_update
|
||||
|
||||
|
||||
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Simple Data Structures: infinity, Dict, Struct
|
||||
|
||||
infinity = 1.0e400
|
||||
|
||||
def Dict(**entries):
|
||||
"""Create a dict out of the argument=value arguments.
|
||||
>>> Dict(a=1, b=2, c=3)
|
||||
{'a': 1, 'c': 3, 'b': 2}
|
||||
"""
|
||||
return entries
|
||||
|
||||
class DefaultDict(dict):
|
||||
"""Dictionary with a default value for unknown keys."""
|
||||
def __init__(self, default):
|
||||
self.default = default
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key in self: return self.get(key)
|
||||
return self.setdefault(key, copy.deepcopy(self.default))
|
||||
|
||||
def __copy__(self):
|
||||
copy = DefaultDict(self.default)
|
||||
copy.update(self)
|
||||
return copy
|
||||
|
||||
class Struct:
|
||||
"""Create an instance with argument=value slots.
|
||||
This is for making a lightweight object whose class doesn't matter."""
|
||||
def __init__(self, **entries):
|
||||
self.__dict__.update(entries)
|
||||
|
||||
def __cmp__(self, other):
|
||||
if isinstance(other, Struct):
|
||||
return cmp(self.__dict__, other.__dict__)
|
||||
else:
|
||||
return cmp(self.__dict__, other)
|
||||
|
||||
def __repr__(self):
|
||||
args = ['%s=%s' % (k, repr(v)) for (k, v) in vars(self).items()]
|
||||
return 'Struct(%s)' % ', '.join(args)
|
||||
|
||||
def update(x, **entries):
|
||||
"""Update a dict; or an object with slots; according to entries.
|
||||
>>> update({'a': 1}, a=10, b=20)
|
||||
{'a': 10, 'b': 20}
|
||||
>>> update(Struct(a=1), a=10, b=20)
|
||||
Struct(a=10, b=20)
|
||||
"""
|
||||
if isinstance(x, dict):
|
||||
x.update(entries)
|
||||
else:
|
||||
x.__dict__.update(entries)
|
||||
return x
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Functions on Sequences (mostly inspired by Common Lisp)
|
||||
# NOTE: Sequence functions (count_if, find_if, every, some) take function
|
||||
# argument first (like reduce, filter, and map).
|
||||
|
||||
def removeall(item, seq):
|
||||
"""Return a copy of seq (or string) with all occurences of item removed.
|
||||
>>> removeall(3, [1, 2, 3, 3, 2, 1, 3])
|
||||
[1, 2, 2, 1]
|
||||
>>> removeall(4, [1, 2, 3])
|
||||
[1, 2, 3]
|
||||
"""
|
||||
if isinstance(seq, str):
|
||||
return seq.replace(item, '')
|
||||
else:
|
||||
return [x for x in seq if x != item]
|
||||
|
||||
def unique(seq):
|
||||
"""Remove duplicate elements from seq. Assumes hashable elements.
|
||||
>>> unique([1, 2, 3, 2, 1])
|
||||
[1, 2, 3]
|
||||
"""
|
||||
return list(set(seq))
|
||||
|
||||
def product(numbers):
|
||||
"""Return the product of the numbers.
|
||||
>>> product([1,2,3,4])
|
||||
24
|
||||
"""
|
||||
return reduce(operator.mul, numbers, 1)
|
||||
|
||||
def count_if(predicate, seq):
|
||||
"""Count the number of elements of seq for which the predicate is true.
|
||||
>>> count_if(callable, [42, None, max, min])
|
||||
2
|
||||
"""
|
||||
f = lambda count, x: count + (not not predicate(x))
|
||||
return reduce(f, seq, 0)
|
||||
|
||||
def find_if(predicate, seq):
|
||||
"""If there is an element of seq that satisfies predicate; return it.
|
||||
>>> find_if(callable, [3, min, max])
|
||||
<built-in function min>
|
||||
>>> find_if(callable, [1, 2, 3])
|
||||
"""
|
||||
for x in seq:
|
||||
if predicate(x): return x
|
||||
return None
|
||||
|
||||
def every(predicate, seq):
|
||||
"""True if every element of seq satisfies predicate.
|
||||
>>> every(callable, [min, max])
|
||||
1
|
||||
>>> every(callable, [min, 3])
|
||||
0
|
||||
"""
|
||||
for x in seq:
|
||||
if not predicate(x): return False
|
||||
return True
|
||||
|
||||
def some(predicate, seq):
|
||||
"""If some element x of seq satisfies predicate(x), return predicate(x).
|
||||
>>> some(callable, [min, 3])
|
||||
1
|
||||
>>> some(callable, [2, 3])
|
||||
0
|
||||
"""
|
||||
for x in seq:
|
||||
px = predicate(x)
|
||||
if px: return px
|
||||
return False
|
||||
|
||||
def isin(elt, seq):
|
||||
"""Like (elt in seq), but compares with is, not ==.
|
||||
>>> e = []; isin(e, [1, e, 3])
|
||||
True
|
||||
>>> isin(e, [1, [], 3])
|
||||
False
|
||||
"""
|
||||
for x in seq:
|
||||
if elt is x: return True
|
||||
return False
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Functions on sequences of numbers
|
||||
# NOTE: these take the sequence argument first, like min and max,
|
||||
# and like standard math notation: \sigma (i = 1..n) fn(i)
|
||||
# A lot of programing is finding the best value that satisfies some condition;
|
||||
# so there are three versions of argmin/argmax, depending on what you want to
|
||||
# do with ties: return the first one, return them all, or pick at random.
|
||||
|
||||
|
||||
def argmin(seq, fn):
|
||||
"""Return an element with lowest fn(seq[i]) score; tie goes to first one.
|
||||
>>> argmin(['one', 'to', 'three'], len)
|
||||
'to'
|
||||
"""
|
||||
best = seq[0]; best_score = fn(best)
|
||||
for x in seq:
|
||||
x_score = fn(x)
|
||||
if x_score < best_score:
|
||||
best, best_score = x, x_score
|
||||
return best
|
||||
|
||||
def argmin_list(seq, fn):
|
||||
"""Return a list of elements of seq[i] with the lowest fn(seq[i]) scores.
|
||||
>>> argmin_list(['one', 'to', 'three', 'or'], len)
|
||||
['to', 'or']
|
||||
"""
|
||||
best_score, best = fn(seq[0]), []
|
||||
for x in seq:
|
||||
x_score = fn(x)
|
||||
if x_score < best_score:
|
||||
best, best_score = [x], x_score
|
||||
elif x_score == best_score:
|
||||
best.append(x)
|
||||
return best
|
||||
|
||||
def argmin_random_tie(seq, fn):
|
||||
"""Return an element with lowest fn(seq[i]) score; break ties at random.
|
||||
Thus, for all s,f: argmin_random_tie(s, f) in argmin_list(s, f)"""
|
||||
best_score = fn(seq[0]); n = 0
|
||||
for x in seq:
|
||||
x_score = fn(x)
|
||||
if x_score < best_score:
|
||||
best, best_score = x, x_score; n = 1
|
||||
elif x_score == best_score:
|
||||
n += 1
|
||||
if random.randrange(n) == 0:
|
||||
best = x
|
||||
return best
|
||||
|
||||
def argmax(seq, fn):
|
||||
"""Return an element with highest fn(seq[i]) score; tie goes to first one.
|
||||
>>> argmax(['one', 'to', 'three'], len)
|
||||
'three'
|
||||
"""
|
||||
return argmin(seq, lambda x: -fn(x))
|
||||
|
||||
def argmax_list(seq, fn):
|
||||
"""Return a list of elements of seq[i] with the highest fn(seq[i]) scores.
|
||||
>>> argmax_list(['one', 'three', 'seven'], len)
|
||||
['three', 'seven']
|
||||
"""
|
||||
return argmin_list(seq, lambda x: -fn(x))
|
||||
|
||||
def argmax_random_tie(seq, fn):
|
||||
"Return an element with highest fn(seq[i]) score; break ties at random."
|
||||
return argmin_random_tie(seq, lambda x: -fn(x))
|
||||
#______________________________________________________________________________
|
||||
# Statistical and mathematical functions
|
||||
|
||||
def histogram(values, mode=0, bin_function=None):
|
||||
"""Return a list of (value, count) pairs, summarizing the input values.
|
||||
Sorted by increasing value, or if mode=1, by decreasing count.
|
||||
If bin_function is given, map it over values first."""
|
||||
if bin_function: values = map(bin_function, values)
|
||||
bins = {}
|
||||
for val in values:
|
||||
bins[val] = bins.get(val, 0) + 1
|
||||
if mode:
|
||||
return sorted(bins.items(), key=lambda v: v[1], reverse=True)
|
||||
else:
|
||||
return sorted(bins.items())
|
||||
|
||||
def log2(x):
|
||||
"""Base 2 logarithm.
|
||||
>>> log2(1024)
|
||||
10.0
|
||||
"""
|
||||
return math.log10(x) / math.log10(2)
|
||||
|
||||
def mode(values):
|
||||
"""Return the most common value in the list of values.
|
||||
>>> mode([1, 2, 3, 2])
|
||||
2
|
||||
"""
|
||||
return histogram(values, mode=1)[0][0]
|
||||
|
||||
def median(values):
|
||||
"""Return the middle value, when the values are sorted.
|
||||
If there are an odd number of elements, try to average the middle two.
|
||||
If they can't be averaged (e.g. they are strings), choose one at random.
|
||||
>>> median([10, 100, 11])
|
||||
11
|
||||
>>> median([1, 2, 3, 4])
|
||||
2.5
|
||||
"""
|
||||
n = len(values)
|
||||
values = sorted(values)
|
||||
if n % 2 == 1:
|
||||
return values[n/2]
|
||||
else:
|
||||
middle2 = values[(n/2)-1:(n/2)+1]
|
||||
try:
|
||||
return mean(middle2)
|
||||
except TypeError:
|
||||
return random.choice(middle2)
|
||||
|
||||
def mean(values):
|
||||
"""Return the arithmetic average of the values."""
|
||||
return sum(values) / float(len(values))
|
||||
|
||||
def stddev(values, meanval=None):
|
||||
"""The standard deviation of a set of values.
|
||||
Pass in the mean if you already know it."""
|
||||
if meanval == None: meanval = mean(values)
|
||||
return math.sqrt(sum([(x - meanval)**2 for x in values]) / (len(values)-1))
|
||||
|
||||
def dotproduct(X, Y):
|
||||
"""Return the sum of the element-wise product of vectors x and y.
|
||||
>>> dotproduct([1, 2, 3], [1000, 100, 10])
|
||||
1230
|
||||
"""
|
||||
return sum([x * y for x, y in zip(X, Y)])
|
||||
|
||||
def vector_add(a, b):
|
||||
"""Component-wise addition of two vectors.
|
||||
>>> vector_add((0, 1), (8, 9))
|
||||
(8, 10)
|
||||
"""
|
||||
return tuple(map(operator.add, a, b))
|
||||
|
||||
def probability(p):
|
||||
"Return true with probability p."
|
||||
return p > random.uniform(0.0, 1.0)
|
||||
|
||||
def num_or_str(x):
|
||||
"""The argument is a string; convert to a number if possible, or strip it.
|
||||
>>> num_or_str('42')
|
||||
42
|
||||
>>> num_or_str(' 42x ')
|
||||
'42x'
|
||||
"""
|
||||
if isnumber(x): return x
|
||||
try:
|
||||
return int(x)
|
||||
except ValueError:
|
||||
try:
|
||||
return float(x)
|
||||
except ValueError:
|
||||
return str(x).strip()
|
||||
|
||||
def normalize(numbers, total=1.0):
|
||||
"""Multiply each number by a constant such that the sum is 1.0 (or total).
|
||||
>>> normalize([1,2,1])
|
||||
[0.25, 0.5, 0.25]
|
||||
"""
|
||||
k = total / sum(numbers)
|
||||
return [k * n for n in numbers]
|
||||
|
||||
## OK, the following are not as widely useful utilities as some of the other
|
||||
## functions here, but they do show up wherever we have 2D grids: Wumpus and
|
||||
## Vacuum worlds, TicTacToe and Checkers, and markov decision Processes.
|
||||
|
||||
orientations = [(1,0), (0, 1), (-1, 0), (0, -1)]
|
||||
|
||||
def turn_right(orientation):
|
||||
return orientations[orientations.index(orientation)-1]
|
||||
|
||||
def turn_left(orientation):
|
||||
return orientations[(orientations.index(orientation)+1) % len(orientations)]
|
||||
|
||||
def distance((ax, ay), (bx, by)):
|
||||
"The distance between two (x, y) points."
|
||||
return math.hypot((ax - bx), (ay - by))
|
||||
|
||||
def distance2((ax, ay), (bx, by)):
|
||||
"The square of the distance between two (x, y) points."
|
||||
return (ax - bx)**2 + (ay - by)**2
|
||||
|
||||
def clip(vector, lowest, highest):
|
||||
"""Return vector, except if any element is less than the corresponding
|
||||
value of lowest or more than the corresponding value of highest, clip to
|
||||
those values.
|
||||
>>> clip((-1, 10), (0, 0), (9, 9))
|
||||
(0, 9)
|
||||
"""
|
||||
return type(vector)(map(min, map(max, vector, lowest), highest))
|
||||
#______________________________________________________________________________
|
||||
# Misc Functions
|
||||
|
||||
def printf(format, *args):
|
||||
"""Format args with the first argument as format string, and write.
|
||||
Return the last arg, or format itself if there are no args."""
|
||||
sys.stdout.write(str(format) % args)
|
||||
return if_(args, args[-1], format)
|
||||
|
||||
def caller(n=1):
|
||||
"""Return the name of the calling function n levels up in the frame stack.
|
||||
>>> caller(0)
|
||||
'caller'
|
||||
>>> def f():
|
||||
... return caller()
|
||||
>>> f()
|
||||
'f'
|
||||
"""
|
||||
import inspect
|
||||
return inspect.getouterframes(inspect.currentframe())[n][3]
|
||||
|
||||
def memoize(fn, slot=None):
|
||||
"""Memoize fn: make it remember the computed value for any argument list.
|
||||
If slot is specified, store result in that slot of first argument.
|
||||
If slot is false, store results in a dictionary."""
|
||||
if slot:
|
||||
def memoized_fn(obj, *args):
|
||||
if hasattr(obj, slot):
|
||||
return getattr(obj, slot)
|
||||
else:
|
||||
val = fn(obj, *args)
|
||||
setattr(obj, slot, val)
|
||||
return val
|
||||
else:
|
||||
def memoized_fn(*args):
|
||||
if not memoized_fn.cache.has_key(args):
|
||||
memoized_fn.cache[args] = fn(*args)
|
||||
return memoized_fn.cache[args]
|
||||
memoized_fn.cache = {}
|
||||
return memoized_fn
|
||||
|
||||
def if_(test, result, alternative):
|
||||
"""Like C++ and Java's (test ? result : alternative), except
|
||||
both result and alternative are always evaluated. However, if
|
||||
either evaluates to a function, it is applied to the empty arglist,
|
||||
so you can delay execution by putting it in a lambda.
|
||||
>>> if_(2 + 2 == 4, 'ok', lambda: expensive_computation())
|
||||
'ok'
|
||||
"""
|
||||
if test:
|
||||
if callable(result): return result()
|
||||
return result
|
||||
else:
|
||||
if callable(alternative): return alternative()
|
||||
return alternative
|
||||
|
||||
def name(object):
|
||||
"Try to find some reasonable name for the object."
|
||||
return (getattr(object, 'name', 0) or getattr(object, '__name__', 0)
|
||||
or getattr(getattr(object, '__class__', 0), '__name__', 0)
|
||||
or str(object))
|
||||
|
||||
def isnumber(x):
|
||||
"Is x a number? We say it is if it has a __int__ method."
|
||||
return hasattr(x, '__int__')
|
||||
|
||||
def issequence(x):
|
||||
"Is x a sequence? We say it is if it has a __getitem__ method."
|
||||
return hasattr(x, '__getitem__')
|
||||
|
||||
def print_table(table, header=None, sep=' ', numfmt='%g'):
|
||||
"""Print a list of lists as a table, so that columns line up nicely.
|
||||
header, if specified, will be printed as the first row.
|
||||
numfmt is the format for all numbers; you might want e.g. '%6.2f'.
|
||||
(If you want different formats in differnt columns, don't use print_table.)
|
||||
sep is the separator between columns."""
|
||||
justs = [if_(isnumber(x), 'rjust', 'ljust') for x in table[0]]
|
||||
if header:
|
||||
table = [header] + table
|
||||
table = [[if_(isnumber(x), lambda: numfmt % x, x) for x in row]
|
||||
for row in table]
|
||||
maxlen = lambda seq: max(map(len, seq))
|
||||
sizes = map(maxlen, zip(*[map(str, row) for row in table]))
|
||||
for row in table:
|
||||
for (j, size, x) in zip(justs, sizes, row):
|
||||
print getattr(str(x), j)(size), sep,
|
||||
print
|
||||
|
||||
def AIMAFile(components, mode='r'):
|
||||
"Open a file based at the AIMA root directory."
|
||||
import utils
|
||||
dir = os.path.dirname(utils.__file__)
|
||||
return open(apply(os.path.join, [dir] + components), mode)
|
||||
|
||||
def DataFile(name, mode='r'):
|
||||
"Return a file in the AIMA /data directory."
|
||||
return AIMAFile(['..', 'data', name], mode)
|
||||
|
||||
|
||||
#______________________________________________________________________________
|
||||
# Queues: Stack, FIFOQueue, PriorityQueue
|
||||
|
||||
class Queue:
|
||||
"""Queue is an abstract class/interface. There are three types:
|
||||
Stack(): A Last In First Out Queue.
|
||||
FIFOQueue(): A First In First Out Queue.
|
||||
PriorityQueue(lt): Queue where items are sorted by lt, (default <).
|
||||
Each type supports the following methods and functions:
|
||||
q.append(item) -- add an item to the queue
|
||||
q.extend(items) -- equivalent to: for item in items: q.append(item)
|
||||
q.pop() -- return the top item from the queue
|
||||
len(q) -- number of items in q (also q.__len())
|
||||
Note that isinstance(Stack(), Queue) is false, because we implement stacks
|
||||
as lists. If Python ever gets interfaces, Queue will be an interface."""
|
||||
|
||||
def __init__(self):
|
||||
abstract
|
||||
|
||||
def extend(self, items):
|
||||
for item in items: self.append(item)
|
||||
|
||||
def Stack():
|
||||
"""Return an empty list, suitable as a Last-In-First-Out Queue."""
|
||||
return []
|
||||
|
||||
class FIFOQueue(Queue):
|
||||
"""A First-In-First-Out Queue."""
|
||||
def __init__(self):
|
||||
self.A = []; self.start = 0
|
||||
def append(self, item):
|
||||
self.A.append(item)
|
||||
def __len__(self):
|
||||
return len(self.A) - self.start
|
||||
def extend(self, items):
|
||||
self.A.extend(items)
|
||||
def pop(self):
|
||||
e = self.A[self.start]
|
||||
self.start += 1
|
||||
if self.start > 5 and self.start > len(self.A)/2:
|
||||
self.A = self.A[self.start:]
|
||||
self.start = 0
|
||||
return e
|
||||
|
||||
class PriorityQueue(Queue):
|
||||
"""A queue in which the minimum (or maximum) element (as determined by f and
|
||||
order) is returned first. If order is min, the item with minimum f(x) is
|
||||
returned first; if order is max, then it is the item with maximum f(x)."""
|
||||
def __init__(self, order=min, f=lambda x: x):
|
||||
update(self, A=[], order=order, f=f)
|
||||
def append(self, item):
|
||||
bisect.insort(self.A, (self.f(item), item))
|
||||
def __len__(self):
|
||||
return len(self.A)
|
||||
def pop(self):
|
||||
if self.order == min:
|
||||
return self.A.pop(0)[1]
|
||||
else:
|
||||
return self.A.pop()[1]
|
||||
|
||||
## Fig: The idea is we can define things like Fig[3,10] later.
|
||||
## Alas, it is Fig[3,10] not Fig[3.10], because that would be the same as Fig[3.1]
|
||||
Fig = {}
|
||||
|
||||
|
||||
|
@ -1,169 +0,0 @@
|
||||
>>> d = DefaultDict(0)
|
||||
>>> d['x'] += 1
|
||||
>>> d['x']
|
||||
1
|
||||
|
||||
>>> d = DefaultDict([])
|
||||
>>> d['x'] += [1]
|
||||
>>> d['y'] += [2]
|
||||
>>> d['x']
|
||||
[1]
|
||||
|
||||
>>> s = Struct(a=1, b=2)
|
||||
>>> s.a
|
||||
1
|
||||
>>> s.a = 3
|
||||
>>> s
|
||||
Struct(a=3, b=2)
|
||||
|
||||
>>> def is_even(x):
|
||||
... return x % 2 == 0
|
||||
>>> sorted([1, 2, -3])
|
||||
[-3, 1, 2]
|
||||
>>> sorted(range(10), key=is_even)
|
||||
[1, 3, 5, 7, 9, 0, 2, 4, 6, 8]
|
||||
>>> sorted(range(10), lambda x,y: y-x)
|
||||
[9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
|
||||
|
||||
>>> removeall(4, [])
|
||||
[]
|
||||
>>> removeall('s', 'This is a test. Was a test.')
|
||||
'Thi i a tet. Wa a tet.'
|
||||
>>> removeall('s', 'Something')
|
||||
'Something'
|
||||
>>> removeall('s', '')
|
||||
''
|
||||
|
||||
>>> list(reversed([]))
|
||||
[]
|
||||
|
||||
>>> count_if(is_even, [1, 2, 3, 4])
|
||||
2
|
||||
>>> count_if(is_even, [])
|
||||
0
|
||||
|
||||
>>> argmax([1], lambda x: x*x)
|
||||
1
|
||||
>>> argmin([1], lambda x: x*x)
|
||||
1
|
||||
|
||||
|
||||
# Test of memoize with slots in structures
|
||||
>>> countries = [Struct(name='united states'), Struct(name='canada')]
|
||||
|
||||
# Pretend that 'gnp' was some big hairy operation:
|
||||
>>> def gnp(country):
|
||||
... print 'calculating gnp ...'
|
||||
... return len(country.name) * 1e10
|
||||
|
||||
>>> gnp = memoize(gnp, '_gnp')
|
||||
>>> map(gnp, countries)
|
||||
calculating gnp ...
|
||||
calculating gnp ...
|
||||
[130000000000.0, 60000000000.0]
|
||||
>>> countries
|
||||
[Struct(_gnp=130000000000.0, name='united states'), Struct(_gnp=60000000000.0, name='canada')]
|
||||
|
||||
# This time we avoid re-doing the calculation
|
||||
>>> map(gnp, countries)
|
||||
[130000000000.0, 60000000000.0]
|
||||
|
||||
# Test Queues:
|
||||
>>> nums = [1, 8, 2, 7, 5, 6, -99, 99, 4, 3, 0]
|
||||
>>> def qtest(q):
|
||||
... return [q.extend(nums), [q.pop() for i in range(len(q))]][1]
|
||||
|
||||
>>> qtest(Stack())
|
||||
[0, 3, 4, 99, -99, 6, 5, 7, 2, 8, 1]
|
||||
|
||||
>>> qtest(FIFOQueue())
|
||||
[1, 8, 2, 7, 5, 6, -99, 99, 4, 3, 0]
|
||||
|
||||
>>> qtest(PriorityQueue(min))
|
||||
[-99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 99]
|
||||
|
||||
>>> qtest(PriorityQueue(max))
|
||||
[99, 8, 7, 6, 5, 4, 3, 2, 1, 0, -99]
|
||||
|
||||
>>> qtest(PriorityQueue(min, abs))
|
||||
[0, 1, 2, 3, 4, 5, 6, 7, 8, -99, 99]
|
||||
|
||||
>>> qtest(PriorityQueue(max, abs))
|
||||
[99, -99, 8, 7, 6, 5, 4, 3, 2, 1, 0]
|
||||
|
||||
>>> vals = [100, 110, 160, 200, 160, 110, 200, 200, 220]
|
||||
>>> histogram(vals)
|
||||
[(100, 1), (110, 2), (160, 2), (200, 3), (220, 1)]
|
||||
>>> histogram(vals, 1)
|
||||
[(200, 3), (110, 2), (160, 2), (220, 1), (100, 1)]
|
||||
>>> histogram(vals, 1, lambda v: round(v, -2))
|
||||
[(200.0, 6), (100.0, 3)]
|
||||
|
||||
>>> log2(1.0)
|
||||
0.0
|
||||
|
||||
>>> def fib(n):
|
||||
... return (n<=1 and 1) or (fib(n-1) + fib(n-2))
|
||||
|
||||
>>> fib(9)
|
||||
55
|
||||
|
||||
# Now we make it faster:
|
||||
>>> fib = memoize(fib)
|
||||
>>> fib(9)
|
||||
55
|
||||
|
||||
>>> q = Stack()
|
||||
>>> q.append(1)
|
||||
>>> q.append(2)
|
||||
>>> q.pop(), q.pop()
|
||||
(2, 1)
|
||||
|
||||
>>> q = FIFOQueue()
|
||||
>>> q.append(1)
|
||||
>>> q.append(2)
|
||||
>>> q.pop(), q.pop()
|
||||
(1, 2)
|
||||
|
||||
|
||||
>>> abc = set('abc')
|
||||
>>> bcd = set('bcd')
|
||||
>>> 'a' in abc
|
||||
True
|
||||
>>> 'a' in bcd
|
||||
False
|
||||
>>> list(abc.intersection(bcd))
|
||||
['c', 'b']
|
||||
>>> list(abc.union(bcd))
|
||||
['a', 'c', 'b', 'd']
|
||||
|
||||
## From "What's new in Python 2.4", but I added calls to sl
|
||||
|
||||
>>> def sl(x):
|
||||
... return sorted(list(x))
|
||||
|
||||
|
||||
>>> a = set('abracadabra') # form a set from a string
|
||||
>>> 'z' in a # fast membership testing
|
||||
False
|
||||
>>> sl(a) # unique letters in a
|
||||
['a', 'b', 'c', 'd', 'r']
|
||||
|
||||
>>> b = set('alacazam') # form a second set
|
||||
>>> sl(a - b) # letters in a but not in b
|
||||
['b', 'd', 'r']
|
||||
>>> sl(a | b) # letters in either a or b
|
||||
['a', 'b', 'c', 'd', 'l', 'm', 'r', 'z']
|
||||
>>> sl(a & b) # letters in both a and b
|
||||
['a', 'c']
|
||||
>>> sl(a ^ b) # letters in a or b but not both
|
||||
['b', 'd', 'l', 'm', 'r', 'z']
|
||||
|
||||
|
||||
>>> a.add('z') # add a new element
|
||||
>>> a.update('wxy') # add multiple new elements
|
||||
>>> sl(a)
|
||||
['a', 'b', 'c', 'd', 'r', 'w', 'x', 'y', 'z']
|
||||
>>> a.remove('x') # take one element out
|
||||
>>> sl(a)
|
||||
['a', 'b', 'c', 'd', 'r', 'w', 'y', 'z']
|
Loading…
Reference in New Issue