Rev 108 | Go to most recent revision | Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
33 | pmbaty | 1 | #include "chess.h" |
2 | #include "data.h" |
||
3 | /* last modified 05/15/14 */ |
||
4 | /* |
||
5 | ******************************************************************************* |
||
6 | * * |
||
7 | * NextEvasion() is used to select the next move from the current move list * |
||
8 | * when the king is in check. We use GenerateEvasions() (in movgen.c) to * |
||
9 | * generate a list of moves that get us out of check. The only unusual * |
||
10 | * feature is that these moves are all legal and do not need to be vetted * |
||
11 | * with the usual Check() function to test for legality. * |
||
12 | * * |
||
13 | ******************************************************************************* |
||
14 | */ |
||
15 | int NextEvasion(TREE * RESTRICT tree, int ply, int wtm) { |
||
16 | int *movep, *sortv; |
||
17 | |||
18 | switch (tree->next_status[ply].phase) { |
||
19 | /* |
||
20 | ************************************************************ |
||
21 | * * |
||
22 | * First try the transposition table move (which might be * |
||
23 | * the principal variation move as we first move down the * |
||
24 | * tree). If it is good enough to cause a cutoff, we * |
||
25 | * avoided the overhead of generating legal moves. * |
||
26 | * * |
||
27 | ************************************************************ |
||
28 | */ |
||
29 | case HASH_MOVE: |
||
30 | if (tree->hash_move[ply]) { |
||
31 | tree->next_status[ply].phase = GENERATE_ALL_MOVES; |
||
32 | tree->curmv[ply] = tree->hash_move[ply]; |
||
33 | if (ValidMove(tree, ply, wtm, tree->curmv[ply])) |
||
34 | return HASH_MOVE; |
||
35 | #if defined(DEBUG) |
||
36 | else |
||
37 | Print(128, "bad move from hash table, ply=%d\n", ply); |
||
38 | #endif |
||
39 | } |
||
40 | /* |
||
41 | ************************************************************ |
||
42 | * * |
||
43 | * Now generate all legal moves by using the special * |
||
44 | * GenerateCheckEvasions() procedure. Then sort the moves * |
||
45 | * based on the expected gain or loss. this is deferred * |
||
46 | * until now to see if the hash move is good enough to * |
||
47 | * produce a cutoff and avoid this effort. * |
||
48 | * * |
||
49 | * Once we confirm that the move does not lose any * |
||
50 | * material, we sort these non-losing moves into MVV/LVA * |
||
51 | * order which appears to be a slightly faster move * |
||
52 | * ordering idea. Unsafe evasion moves are sorted using * |
||
53 | * the original Swap() score to keep them last in the move * |
||
54 | * list. Note that this move list contains both captures * |
||
55 | * and non-captures. We try the safe captures first due * |
||
56 | * to the way the sort score is computed. * |
||
57 | * * |
||
58 | ************************************************************ |
||
59 | */ |
||
60 | case GENERATE_ALL_MOVES: |
||
61 | tree->last[ply] = |
||
62 | GenerateCheckEvasions(tree, ply, wtm, tree->last[ply - 1]); |
||
63 | tree->next_status[ply].phase = REMAINING_MOVES; |
||
64 | for (movep = tree->last[ply - 1], sortv = tree->sort_value; |
||
65 | movep < tree->last[ply]; movep++, sortv++) |
||
66 | if (tree->hash_move[ply] && *movep == tree->hash_move[ply]) { |
||
67 | *sortv = -999999; |
||
68 | *movep = 0; |
||
69 | } else { |
||
70 | if (pcval[Piece(*movep)] <= pcval[Captured(*movep)]) |
||
71 | *sortv = 128 * pcval[Captured(*movep)] - pcval[Piece(*movep)]; |
||
72 | else { |
||
73 | *sortv = Swap(tree, *movep, wtm); |
||
74 | if (*sortv >= 0) |
||
75 | *sortv = 128 * pcval[Captured(*movep)] - pcval[Piece(*movep)]; |
||
76 | } |
||
77 | } |
||
78 | /* |
||
79 | ************************************************************ |
||
80 | * * |
||
81 | * This is a simple insertion sort algorithm. It seems be * |
||
82 | * be no faster than a normal bubble sort, but using this * |
||
83 | * eliminated a lot of explaining about "why?". :) * |
||
84 | * * |
||
85 | ************************************************************ |
||
86 | */ |
||
87 | if (tree->last[ply] > tree->last[ply - 1] + 1) { |
||
88 | int temp1, temp2, *tmovep, *tsortv, *end; |
||
89 | |||
90 | sortv = tree->sort_value + 1; |
||
91 | end = tree->last[ply]; |
||
92 | for (movep = tree->last[ply - 1] + 1; movep < end; movep++, sortv++) { |
||
93 | temp1 = *movep; |
||
94 | temp2 = *sortv; |
||
95 | tmovep = movep - 1; |
||
96 | tsortv = sortv - 1; |
||
97 | while (tmovep >= tree->last[ply - 1] && *tsortv < temp2) { |
||
98 | *(tsortv + 1) = *tsortv; |
||
99 | *(tmovep + 1) = *tmovep; |
||
100 | tmovep--; |
||
101 | tsortv--; |
||
102 | } |
||
103 | *(tmovep + 1) = temp1; |
||
104 | *(tsortv + 1) = temp2; |
||
105 | } |
||
106 | } |
||
107 | tree->next_status[ply].last = tree->last[ply - 1]; |
||
108 | /* |
||
109 | ************************************************************ |
||
110 | * * |
||
111 | * Now try the moves in sorted order. * |
||
112 | * * |
||
113 | ************************************************************ |
||
114 | */ |
||
115 | case REMAINING_MOVES: |
||
116 | for (; tree->next_status[ply].last < tree->last[ply]; |
||
117 | tree->next_status[ply].last++) |
||
118 | if (*tree->next_status[ply].last) { |
||
119 | tree->curmv[ply] = *tree->next_status[ply].last++; |
||
120 | return REMAINING_MOVES; |
||
121 | } |
||
122 | return NONE; |
||
123 | default: |
||
124 | printf("oops! next_status.phase is bad! [evasion %d]\n", |
||
125 | tree->next_status[ply].phase); |
||
126 | } |
||
127 | return NONE; |
||
128 | } |
||
129 | |||
130 | /* last modified 05/15/14 */ |
||
131 | /* |
||
132 | ******************************************************************************* |
||
133 | * * |
||
134 | * NextMove() is used to select the next move from the current move list. * |
||
135 | * * |
||
136 | * The "excluded move" code below simply collects any moves that were * |
||
137 | * searched without being generated (hash move and up to 4 killers). We * |
||
138 | * save them in the NEXT structure and make sure to exclude them when * |
||
139 | * searching after a move generation to avoid the duplicated effort. * |
||
140 | * * |
||
141 | ******************************************************************************* |
||
142 | */ |
||
143 | int NextMove(TREE * RESTRICT tree, int ply, int wtm) { |
||
144 | int *movep, *sortv; |
||
145 | |||
146 | switch (tree->next_status[ply].phase) { |
||
147 | /* |
||
148 | ************************************************************ |
||
149 | * * |
||
150 | * First, try the transposition table move (which will be * |
||
151 | * the principal variation move as we first move down the * |
||
152 | * tree). * |
||
153 | * * |
||
154 | ************************************************************ |
||
155 | */ |
||
156 | case HASH_MOVE: |
||
157 | tree->next_status[ply].num_excluded = 0; |
||
158 | tree->next_status[ply].phase = GENERATE_CAPTURE_MOVES; |
||
159 | if (tree->hash_move[ply]) { |
||
160 | tree->curmv[ply] = tree->hash_move[ply]; |
||
161 | tree->next_status[ply].excluded_moves[tree->next_status[ply]. |
||
162 | num_excluded++] |
||
163 | = tree->curmv[ply]; |
||
164 | if (ValidMove(tree, ply, wtm, tree->curmv[ply])) |
||
165 | return HASH_MOVE; |
||
166 | #if defined(DEBUG) |
||
167 | else |
||
168 | Print(128, "bad move from hash table, ply=%d\n", ply); |
||
169 | #endif |
||
170 | } |
||
171 | /* |
||
172 | ************************************************************ |
||
173 | * * |
||
174 | * Generate captures and sort them based on the simple * |
||
175 | * MVV/LVA ordering where we try to capture the most * |
||
176 | * valuable victim piece possible, using the least * |
||
177 | * valuable attacking piece possible. Later we will test * |
||
178 | * to see if the capture appears to lose material and we * |
||
179 | * will defer searching it until later. * |
||
180 | * * |
||
181 | ************************************************************ |
||
182 | */ |
||
183 | case GENERATE_CAPTURE_MOVES: |
||
184 | tree->next_status[ply].phase = CAPTURE_MOVES; |
||
185 | tree->last[ply] = GenerateCaptures(tree, ply, wtm, tree->last[ply - 1]); |
||
186 | tree->next_status[ply].remaining = 0; |
||
187 | for (movep = tree->last[ply - 1], sortv = tree->sort_value; |
||
188 | movep < tree->last[ply]; movep++, sortv++) |
||
189 | if (*movep == tree->hash_move[ply]) { |
||
190 | *sortv = -999999; |
||
191 | *movep = 0; |
||
192 | tree->next_status[ply].num_excluded = 0; |
||
193 | } else { |
||
194 | *sortv = 128 * pcval[Captured(*movep)] - pcval[Piece(*movep)]; |
||
195 | tree->next_status[ply].remaining++; |
||
196 | } |
||
197 | /* |
||
198 | ************************************************************ |
||
199 | * * |
||
200 | * This is a simple insertion sort algorithm. It seems to * |
||
201 | * be no faster than a normal bubble sort, but using this * |
||
202 | * eliminated a lot of explaining about "why?". :) * |
||
203 | * * |
||
204 | ************************************************************ |
||
205 | */ |
||
206 | if (tree->last[ply] > tree->last[ply - 1] + 1) { |
||
207 | int temp1, temp2, *tmovep, *tsortv, *end; |
||
208 | |||
209 | sortv = tree->sort_value + 1; |
||
210 | end = tree->last[ply]; |
||
211 | for (movep = tree->last[ply - 1] + 1; movep < end; movep++, sortv++) { |
||
212 | temp1 = *movep; |
||
213 | temp2 = *sortv; |
||
214 | tmovep = movep - 1; |
||
215 | tsortv = sortv - 1; |
||
216 | while (tmovep >= tree->last[ply - 1] && *tsortv < temp2) { |
||
217 | *(tsortv + 1) = *tsortv; |
||
218 | *(tmovep + 1) = *tmovep; |
||
219 | tmovep--; |
||
220 | tsortv--; |
||
221 | } |
||
222 | *(tmovep + 1) = temp1; |
||
223 | *(tsortv + 1) = temp2; |
||
224 | } |
||
225 | } |
||
226 | tree->next_status[ply].last = tree->last[ply - 1]; |
||
227 | /* |
||
228 | ************************************************************ |
||
229 | * * |
||
230 | * Try the captures moves, which are in order based on * |
||
231 | * MVV/LVA ordering. If a larger-valued piece captures a * |
||
232 | * lesser-valued piece, and Swap() says it loses material, * |
||
233 | * this capture will be deferred until later. * |
||
234 | * * |
||
235 | ************************************************************ |
||
236 | */ |
||
237 | case CAPTURE_MOVES: |
||
238 | while (tree->next_status[ply].remaining) { |
||
239 | tree->curmv[ply] = *(tree->next_status[ply].last++); |
||
240 | if (!--tree->next_status[ply].remaining) |
||
241 | tree->next_status[ply].phase = KILLER_MOVE_1; |
||
242 | if (pcval[Piece(tree->curmv[ply])] > pcval[Captured(tree->curmv[ply])] |
||
243 | && Swap(tree, tree->curmv[ply], wtm) < 0) |
||
244 | continue; |
||
245 | *(tree->next_status[ply].last - 1) = 0; |
||
246 | return CAPTURE_MOVES; |
||
247 | } |
||
248 | /* |
||
249 | ************************************************************ |
||
250 | * * |
||
251 | * Now, try the killer moves. This phase tries the two * |
||
252 | * killers for the current ply without generating moves, * |
||
253 | * which saves time if a cutoff occurs. After those two * |
||
254 | * killers are searched, we try the killers from two plies * |
||
255 | * back since they have greater depth and might produce a * |
||
256 | * cutoff if the current two do not. * |
||
257 | * * |
||
258 | ************************************************************ |
||
259 | */ |
||
260 | case KILLER_MOVE_1: |
||
261 | if (!Exclude(tree, ply, tree->killers[ply].move1) && |
||
262 | ValidMove(tree, ply, wtm, tree->killers[ply].move1)) { |
||
263 | tree->curmv[ply] = tree->killers[ply].move1; |
||
264 | tree->next_status[ply].excluded_moves[tree->next_status[ply]. |
||
265 | num_excluded++] |
||
266 | = tree->curmv[ply]; |
||
267 | tree->next_status[ply].phase = KILLER_MOVE_2; |
||
268 | return KILLER_MOVE_1; |
||
269 | } |
||
270 | case KILLER_MOVE_2: |
||
271 | if (!Exclude(tree, ply, tree->killers[ply].move2) && |
||
272 | ValidMove(tree, ply, wtm, tree->killers[ply].move2)) { |
||
273 | tree->curmv[ply] = tree->killers[ply].move2; |
||
274 | tree->next_status[ply].excluded_moves[tree->next_status[ply]. |
||
275 | num_excluded++] |
||
276 | = tree->curmv[ply]; |
||
277 | if (ply < 3) { |
||
278 | tree->next_status[ply].phase = GENERATE_ALL_MOVES; |
||
279 | } else |
||
280 | tree->next_status[ply].phase = KILLER_MOVE_3; |
||
281 | return KILLER_MOVE_2; |
||
282 | } |
||
283 | case KILLER_MOVE_3: |
||
284 | if (!Exclude(tree, ply, tree->killers[ply - 2].move1) && |
||
285 | ValidMove(tree, ply, wtm, tree->killers[ply - 2].move1)) { |
||
286 | tree->curmv[ply] = tree->killers[ply - 2].move1; |
||
287 | tree->next_status[ply].excluded_moves[tree->next_status[ply]. |
||
288 | num_excluded++] |
||
289 | = tree->curmv[ply]; |
||
290 | tree->next_status[ply].phase = KILLER_MOVE_4; |
||
291 | return KILLER_MOVE_3; |
||
292 | } |
||
293 | case KILLER_MOVE_4: |
||
294 | if (!Exclude(tree, ply, tree->killers[ply - 2].move2) && |
||
295 | ValidMove(tree, ply, wtm, tree->killers[ply - 2].move2)) { |
||
296 | tree->curmv[ply] = tree->killers[ply - 2].move2; |
||
297 | tree->next_status[ply].excluded_moves[tree->next_status[ply]. |
||
298 | num_excluded++] |
||
299 | = tree->curmv[ply]; |
||
300 | tree->next_status[ply].phase = GENERATE_ALL_MOVES; |
||
301 | return KILLER_MOVE_4; |
||
302 | } |
||
303 | /* |
||
304 | ************************************************************ |
||
305 | * * |
||
306 | * Now, generate all non-capturing moves, which get added * |
||
307 | * to the move list behind any captures we did not search. * |
||
308 | * * |
||
309 | ************************************************************ |
||
310 | */ |
||
311 | case GENERATE_ALL_MOVES: |
||
312 | tree->last[ply] = GenerateNoncaptures(tree, ply, wtm, tree->last[ply]); |
||
313 | tree->next_status[ply].phase = REMAINING_MOVES; |
||
314 | tree->next_status[ply].last = tree->last[ply - 1]; |
||
315 | /* |
||
316 | ************************************************************ |
||
317 | * * |
||
318 | * Then we try the rest of the set of moves, but we use * |
||
319 | * Exclude() function to skip any moves we have already * |
||
320 | * searched (hash or killers). * |
||
321 | * * |
||
322 | ************************************************************ |
||
323 | */ |
||
324 | case REMAINING_MOVES: |
||
325 | for (; tree->next_status[ply].last < tree->last[ply]; |
||
326 | tree->next_status[ply].last++) |
||
327 | if (*tree->next_status[ply].last) { |
||
328 | if (!Exclude(tree, ply, *tree->next_status[ply].last)) { |
||
329 | tree->curmv[ply] = *tree->next_status[ply].last++; |
||
330 | return REMAINING_MOVES; |
||
331 | } |
||
332 | } |
||
333 | return NONE; |
||
334 | default: |
||
335 | Print(4095, "oops! next_status.phase is bad! [normal %d]\n", |
||
336 | tree->next_status[ply].phase); |
||
337 | } |
||
338 | return NONE; |
||
339 | } |
||
340 | |||
341 | /* last modified 05/15/14 */ |
||
342 | /* |
||
343 | ******************************************************************************* |
||
344 | * * |
||
345 | * NextRootMove() is used to select the next move from the root move list. * |
||
346 | * * |
||
347 | ******************************************************************************* |
||
348 | */ |
||
349 | int NextRootMove(TREE * RESTRICT tree, TREE * RESTRICT mytree, int wtm) { |
||
350 | int which, i; |
||
351 | uint64_t total_nodes; |
||
352 | |||
353 | /* |
||
354 | ************************************************************ |
||
355 | * * |
||
356 | * First, we check to see if we only have one legal move. * |
||
357 | * If so, and we are not pondering, we stop after a short * |
||
358 | * search, saving time, but making sure we have something * |
||
359 | * to ponder. * |
||
360 | * * |
||
361 | ************************************************************ |
||
362 | */ |
||
363 | if (!annotate_mode && !pondering && !booking && n_root_moves == 1 && |
||
364 | iteration_depth > 4) { |
||
365 | abort_search = 1; |
||
366 | return NONE; |
||
367 | } |
||
368 | /* |
||
369 | ************************************************************ |
||
370 | * * |
||
371 | * For the moves at the root of the tree, the list has * |
||
372 | * already been generated and sorted. * |
||
373 | * * |
||
374 | * We simply have to find the first move that has a zero * |
||
375 | * "already searched" flag and choose that one. We do set * |
||
376 | * the "already searched" flag for this move before we * |
||
377 | * return so that it won't be searched again in another * |
||
378 | * thread. * |
||
379 | * * |
||
380 | ************************************************************ |
||
381 | */ |
||
382 | for (which = 0; which < n_root_moves; which++) |
||
383 | if (!(root_moves[which].status & 8)) { |
||
384 | if (search_move) { |
||
385 | if (root_moves[which].move != search_move) { |
||
386 | root_moves[which].status |= 8; |
||
387 | continue; |
||
388 | } |
||
389 | } |
||
390 | tree->curmv[1] = root_moves[which].move; |
||
391 | root_moves[which].status |= 8; |
||
392 | /* |
||
393 | ************************************************************ |
||
394 | * * |
||
395 | * We have found a move to search. If appropriate, we * |
||
396 | * display this move, along with the time and information * |
||
397 | * such as which move this is in the list and how many * |
||
398 | * are left to search before this iteration is done, and * |
||
399 | * a "status" character that shows the state of the * |
||
400 | * current search ("?" means we are pondering, waiting on * |
||
401 | * a move to be entered, "*" means we are searching and * |
||
402 | * our clock is running). We also display the NPS for * |
||
403 | * the search, simply for information about how fast the * |
||
404 | * machine is running. * |
||
405 | * * |
||
406 | ************************************************************ |
||
407 | */ |
||
408 | if (tree->nodes_searched > noise_level && display_options & 32) { |
||
409 | Lock(lock_io); |
||
410 | sprintf_s(mytree->remaining_moves_text, sizeof (mytree->remaining_moves_text), "%d/%d", which + 1, // Pierre-Marie Baty -- use safe version |
||
411 | n_root_moves); |
||
412 | end_time = ReadClock(); |
||
413 | if (pondering) |
||
414 | printf(" %2i %s%7s? ", iteration_depth, |
||
415 | Display2Times(end_time - start_time), |
||
416 | mytree->remaining_moves_text); |
||
417 | else |
||
418 | printf(" %2i %s%7s* ", iteration_depth, |
||
419 | Display2Times(end_time - start_time), |
||
420 | mytree->remaining_moves_text); |
||
421 | if (display_options & 32 && display_options & 64) |
||
422 | printf("%d. ", move_number); |
||
423 | if (display_options & 32 && display_options & 64 && Flip(wtm)) |
||
424 | printf("... "); |
||
425 | strcpy_s(mytree->root_move_text, sizeof (mytree->root_move_text), OutputMove(tree, tree->curmv[1], 1, // Pierre-Marie Baty -- use safe version |
||
426 | wtm)); |
||
427 | total_nodes = block[0]->nodes_searched; |
||
428 | for (i = 1; i < MAX_BLOCKS; i++) |
||
429 | if (block[i] && block[i]->used) |
||
430 | total_nodes += block[i]->nodes_searched; |
||
431 | nodes_per_second = (unsigned int) (total_nodes * 100 / Max(end_time - start_time, 1)); // Pierre-Marie Baty -- added type cast |
||
432 | i = strlen(mytree->root_move_text); |
||
433 | i = (i < 8) ? i : 8; |
||
434 | strncat_s(mytree->root_move_text, sizeof (mytree->root_move_text), " ", 8 - i); // Pierre-Marie Baty -- use safe version |
||
435 | printf("%s", mytree->root_move_text); |
||
436 | printf("(%snps) \r", DisplayKMB(nodes_per_second)); |
||
437 | fflush(stdout); |
||
438 | Unlock(lock_io); |
||
439 | } |
||
440 | /* |
||
441 | ************************************************************ |
||
442 | * * |
||
443 | * Bit of a tricky exit. If the move is not flagged as * |
||
444 | * "OK to search in parallel or reduce" then we return * |
||
445 | * "HASH_MOVE" which will prevent Search() from reducing * |
||
446 | * the move (LMR). Otherwise we return the more common * |
||
447 | * "REMAINING_MOVES" value which allows LMR to be used on * |
||
448 | * those root moves. * |
||
449 | * * |
||
450 | ************************************************************ |
||
451 | */ |
||
452 | if ((root_moves[which].status & 4) == 0) |
||
453 | return HASH_MOVE; |
||
454 | else |
||
455 | return REMAINING_MOVES; |
||
456 | } |
||
457 | return NONE; |
||
458 | } |
||
459 | |||
460 | /* last modified 05/15/14 */ |
||
461 | /* |
||
462 | ******************************************************************************* |
||
463 | * * |
||
464 | * NextRootMoveParallel() is used to determine if the next root move can be * |
||
465 | * searched in parallel. If it appears to Iterate() that one of the moves * |
||
466 | * following the first move might become the best move, the 'no parallel' * |
||
467 | * flag is set to speed up finding the new best move. This flag is set if * |
||
468 | * this root move has an "age" value > 0 which indicates this move was the * |
||
469 | * "best move" within the previous 3 search depths. We want to search such * |
||
470 | * moves as quickly as possible, prior to starting a parallel search at the * |
||
471 | * root, in case this move once again becomes the best move and provides a * |
||
472 | * better alpha bound. * |
||
473 | * * |
||
474 | ******************************************************************************* |
||
475 | */ |
||
476 | int NextRootMoveParallel(void) { |
||
477 | int which; |
||
478 | |||
479 | /* |
||
480 | ************************************************************ |
||
481 | * * |
||
482 | * Here we simply check the root_move status flag that is * |
||
483 | * set in Iterate() after each iteration is completed. A * |
||
484 | * value of "1" indicates this move has to be searched by * |
||
485 | * all processors, splitting must wait until after all * |
||
486 | * such moves have been searched individually. * |
||
487 | * * |
||
488 | ************************************************************ |
||
489 | */ |
||
490 | for (which = 0; which < n_root_moves; which++) |
||
491 | if (!(root_moves[which].status & 8)) |
||
492 | break; |
||
493 | if (which < n_root_moves && root_moves[which].status & 4) |
||
494 | return 1; |
||
495 | return 0; |
||
496 | } |
||
497 | |||
498 | /* last modified 05/15/14 */ |
||
499 | /* |
||
500 | ******************************************************************************* |
||
501 | * * |
||
502 | * Exclude() searches the list of moves searched prior to generating a move * |
||
503 | * list to exclude those that were searched via a hash table best move or * |
||
504 | * through the killer moves for the current ply and two plies back. * |
||
505 | * * |
||
506 | * The variable next_status[].num_excluded is the total number of non- * |
||
507 | * generated moves we searched. next_status[].remaining is initially set to * |
||
508 | * num_excluded, but each time an excluded move is found, the counter is * |
||
509 | * decremented. Once all excluded moves have been found, we avoid running * |
||
510 | * through the list of excluded moves on each call and simply return. * |
||
511 | * * |
||
512 | ******************************************************************************* |
||
513 | */ |
||
514 | int Exclude(TREE * RESTRICT tree, int ply, int move) { |
||
515 | int i; |
||
516 | |||
517 | if (tree->next_status[ply].num_excluded) |
||
518 | for (i = 0; i < tree->next_status[ply].num_excluded; i++) |
||
519 | if (move == tree->next_status[ply].excluded_moves[i]) { |
||
520 | tree->next_status[ply].remaining--; |
||
521 | return 1; |
||
522 | } |
||
523 | return 0; |
||
524 | } |