-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathminimax.py
126 lines (114 loc) · 4.64 KB
/
minimax.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# ------------------------------------------------------------------------------
#
# Minimax: minimax.py
#
# ------------------------------------------------------------------------------
import math
class Minimax():
pos_value = [
# Rey
[2.0, 2.0, 2.0, 1.5, 1.5, 2.0, 2.0, 2.0,
2.0, 2.0, 1.5, 1.5, 1.5, 1.5, 2.0, 2.0,
2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0,
1.0, 1.0, 1.0, 0.5, 0.5, 1.0, 1.0, 1.0,
1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1.0,
1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1.0,
1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1.0,
1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1.0],
# Reina
[0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5,
1.0, 1.5, 2.0, 1.5, 1.5, 1.5, 1.5, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.5, 1.0,
1.0, 1.5, 2.0, 2.0, 2.0, 2.0, 1.5, 1.0,
1.0, 1.5, 2.0, 2.0, 2.0, 2.0, 1.5, 1.0,
1.0, 1.5, 2.0, 2.0, 2.0, 2.0, 1.5, 1.0,
1.0, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.0,
0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5],
# Alfiles
[0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5,
1.0, 1.5, 1.0, 1.0, 1.0, 1.0, 1.5, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 1.0,
1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0,
1.0, 1.5, 1.5, 2.0, 2.0, 1.5, 1.5, 1.0,
1.0, 1.0, 1.5, 2.0, 2.0, 1.5, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5],
# Caballos
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 1.0, 1.5, 1.5, 1.0, 0.5, 0.5,
0.5, 1.5, 1.5, 2.0, 2.0, 1.5, 1.5, 0.5,
0.5, 1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 0.5,
0.5, 1.5, 2.0, 2.0, 2.0, 2.0, 1.5, 0.5,
0.5, 1.0, 1.5, 2.0, 2.0, 1.5, 1.0, 0.5,
0.5, 0.5, 1.0, 1.0, 1.0, 1.0, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
# Torres
[1.0, 1.0, 1.0, 1.5, 1.5, 1.0, 1.0, 1.0,
0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5,
0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5,
0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5,
0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5,
0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5,
0.5, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.5,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
# Peones
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.5, 1.5, 1.5, 0.5, 0.5, 1.5, 1.5, 1.5,
1.5, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, 1.5,
1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0,
0.5, 0.5, 1.5, 2.0, 2.0, 1.5, 0.5, 0.5,
1.5, 1.5, 2.0, 2.0, 2.0, 2.0, 1.5, 1.5,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
]
piece_value = [150, 90, 30, 30, 50, 10]
def __init__(self, player):
self.player = player
def evaluate(self, current):
value = 0.0
for (pos, cell) in enumerate(current.board):
if cell == -1:
continue;
piece, color = cell // 2, cell % 2
p = pos if self.player == 0 else 56 - 8 * (pos // 8) + pos % 8
if color == self.player:
value += self.pos_value[piece][p] * self.piece_value[piece]
else:
value -= self.pos_value[piece][p] * self.piece_value[piece]
return value
def minimax(self, depth, board, alpha, betha, player):
best_move = (0, 0, 0, 0)
if depth == 0:
return (self.evaluate(board), best_move)
all_moves = board.get_all_valid_moves(player)
if player == self.player:
max_val = -10000.0
for move in all_moves:
other = board.simulate(move)
(val, bm) = self.minimax(depth - 1, other, alpha, betha, 1 - player)
if val > max_val:
max_val = val
best_move = move
if val > alpha:
alpha = val
if alpha >= betha:
return (alpha, best_move)
return (max_val, best_move)
else:
min_val = 10000.0
for move in all_moves:
other = board.simulate(move)
(val, bm) = self.minimax(depth - 1, other, alpha, betha, 1 - player)
if val < min_val:
min_val = val
best_move = move
if val < betha:
betha = val
if alpha >= betha:
return (betha, best_move)
return (min_val, best_move)
def get_best_move(self, board):
depth = 4
(value, best_move) = self.minimax(depth, board, -10000.0, 10000.0, self.player)
print(value)
return best_move