Coverage for moptipy / examples / bitstrings / w_model.py: 43%
159 statements
« prev ^ index » next coverage.py v7.12.0, created at 2025-11-24 08:49 +0000
« prev ^ index » next coverage.py v7.12.0, created at 2025-11-24 08:49 +0000
1"""
2A Python implementation of the W-Model benchmark problem.
4This is a tunable benchmark problem for bit-string based search spaces.
5It exhibits ruggedness, epistasis, deceptiveness, and (uniform) neutrality in
6a tunable fashion.
7In [1], a set of 19 diverse instances of the W-Model are selected for
8algorithm benchmarking. These instances are provided via
9:meth:`~moptipy.examples.bitstrings.w_model.WModel.default_instances`.
111. Thomas Weise, Yan Chen, Xinlu Li, and Zhize Wu. Selecting a diverse set of
12 benchmark instances from a tunable model problem for black-box discrete
13 optimization algorithms. *Applied Soft Computing Journal (ASOC)*, 92:106269,
14 June 2020. doi: https://doi.org/10.1016/j.asoc.2020.106269
152. Thomas Weise and Zijun Wu. Difficult Features of Combinatorial Optimization
16 Problems and the Tunable W-Model Benchmark Problem for Simulating them. In
17 *Black Box Discrete Optimization Benchmarking (BB-DOB) Workshop* of
18 *Companion Material Proceedings of the Genetic and Evolutionary Computation
19 Conference (GECCO 2018),* July 15th-19th 2018, Kyoto, Japan,
20 pages 1769-1776, ISBN 978-1-4503-5764-7. ACM.
21 doi: https://doi.org/10.1145/3205651.3208240
223. Thomas Weise, Stefan Niemczyk, Hendrik Skubch, Roland Reichle, and
23 Kurt Geihs. A Tunable Model for Multi-Objective, Epistatic, Rugged, and
24 Neutral Fitness Landscapes. In Maarten Keijzer, Giuliano Antoniol,
25 Clare Bates Congdon, Kalyanmoy Deb, Benjamin Doerr, Nikolaus Hansen,
26 John H. Holmes, Gregory S. Hornby, Daniel Howard, James Kennedy,
27 Sanjeev P. Kumar, Fernando G. Lobo, Julian Francis Miller, Jason H. Moore,
28 Frank Neumann, Martin Pelikan, Jordan B. Pollack, Kumara Sastry,
29 Kenneth Owen Stanley, Adrian Stoica, El-Ghazali, and Ingo Wegener, editors,
30 *Proceedings of the 10th Annual Conference on Genetic and Evolutionary
31 Computation (GECCO'08)*, pages 795-802, July 12-16, 2008, Atlanta, GA, USA.
32 ISBN: 978-1-60558-130-9, New York, NY, USA: ACM Press.
33 doi: https://doi.org/10.1145/1389095.1389252
344. Carola Doerr, Furong Ye, Naama Horesh, Hao Wang, Ofer M. Shir, Thomas Bäck.
35 Benchmarking Discrete Optimization Heuristics with IOHprofiler. *Applied
36 Soft Computing* 88:106027, March 2020,
37 doi: https://doi.org/10.1016/j.asoc.2019.106027.
385. Thomas Weise, Zhize Wu, Xinlu Li, Yan Chen, and Jörg Lässig. Frequency
39 Fitness Assignment: Optimization without Bias for Good Solutions can be
40 Efficient. *IEEE Transactions on Evolutionary Computation (TEVC)*.
41 27(4):980-992. August 2023.
42 doi: https://doi.org/10.1109/TEVC.2022.3191698
43"""
45from array import array
46from math import sqrt
47from typing import Callable, Final, Iterator, cast
49import numba # type: ignore
50import numpy as np
51from pycommons.types import check_int_range, type_error
53from moptipy.examples.bitstrings.bitstring_problem import BitStringProblem
54from moptipy.utils.logger import KeyValueLogSection
55from moptipy.utils.nputils import DEFAULT_BOOL
56from moptipy.utils.strings import sanitize_name
59@numba.njit(nogil=True, cache=True, inline="always")
60def w_model_f(x: np.ndarray) -> int:
61 """
62 Compute the basic hamming distance of the W-Model to the optimal string.
64 The optimal string in the W-Model is `0101010101...`. Here we compute the
65 objective value of a candidate solution, i.e., the Hamming distance to
66 the string `010101010101...`. This is the basic problem objective
67 function. It can be applied either directly, on top of transformations
68 such as the neutrality- or epistasis mappings. Its result can be
69 transformed by a ruggedness permutation to introduce ruggedness into the
70 problem.
72 :param x: the bit string to evaluate
73 :return: the Hamming distance to the string of alternating zeros and ones
74 and starting with `0`.
76 >>> w_model_f(np.array([False]))
77 0
78 >>> w_model_f(np.array([True]))
79 1
80 >>> w_model_f(np.array([False, True]))
81 0
82 >>> w_model_f(np.array([False, False]))
83 1
84 >>> w_model_f(np.array([True, True]))
85 1
86 >>> w_model_f(np.array([True, False]))
87 2
88 >>> w_model_f(np.array([False, True, False]))
89 0
90 >>> w_model_f(np.array([True, False, True]))
91 3
92 >>> w_model_f(np.array([True, True, True]))
93 2
94 >>> w_model_f(np.array([True, True, False]))
95 1
96 >>> w_model_f(np.array([False, True, True]))
97 1
98 >>> w_model_f(np.array([False, True, False, True, False, True, False]))
99 0
100 >>> w_model_f(np.array([False, True, False, True, False, True, True]))
101 1
102 >>> w_model_f(np.array([True, False, True, False, True, False, True]))
103 7
104 """
105 result = 0
106 for i, xx in enumerate(x):
107 if xx == ((i & 1) == 0):
108 result += 1
109 return result
112@numba.njit(nogil=True, cache=True, inline="always")
113def w_model_neutrality(x_in: np.ndarray, mu: int, x_out: np.ndarray) -> None:
114 """
115 Perform the neutrality transformation.
117 The neutrality transformation is the first layer of the W-Model. It
118 introduces (or better, removes during the mapping) uniform redundancy by
119 basically reducing the size of a bit string by factor `mu`.
121 Basically, the input array `x_in` is split in consecutive bit groups of
122 length `mu`. Each such group is translated to one bit in the output string
123 `x_out`. This bit will become `1` if the majority of the bits in the input
124 group are also `1`. Otherwise it becomes `0`.
126 Notice that `len(x_out) >= mu * length(x_in)` must hold.
128 :param x_in: the input array
129 :param mu: the number of bits to merge
130 :param x_out: the output array
132 >>> xin = np.array([0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,
133 ... 0, 1, 1, 1, 0, 1, 0, 0, 0, 0], bool)
134 >>> xout = np.empty(len(xin) // 2, bool)
135 >>> w_model_neutrality(xin, 2, xout)
136 >>> print("".join('1' if xx else '0' for xx in xout))
137 1011001110
138 >>> xin = np.array([0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0,
139 ... 0, 1, 1, 1, 0, 1, 0, 0, 0, 0], bool)
140 >>> w_model_neutrality(xin, 2, xout)
141 >>> print("".join('1' if xx else '0' for xx in xout))
142 1111001110
143 """
144 threshold_for_1: Final[int] = (mu >> 1) + (mu & 1)
145 flush: int = mu
146 i_in: int = 0
147 i_out: int = 0
148 ones: int = 0
149 length_in: Final[int] = len(x_in)
150 length_out: Final[int] = len(x_out)
151 while (i_in < length_in) and (i_out < length_out):
152 if x_in[i_in]:
153 ones += 1
154 i_in += 1
155 if i_in >= flush:
156 flush += mu
157 x_out[i_out] = (ones >= threshold_for_1)
158 i_out += 1
159 ones = 0
162@numba.njit(nogil=True, cache=True, inline="always")
163def __w_model_epistasis(x_in: np.ndarray, start: int,
164 nu: int, x_out: np.ndarray) -> None:
165 """
166 Perform the transformation of an epistasis chunk.
168 :param x_in: the input string
169 :param start: the start index
170 :param nu: the epistasis parameter
171 :param x_out: the output string
173 >>> xout = np.empty(4, bool)
174 >>> __w_model_epistasis(np.array([0, 0, 0, 0], bool), 0, 4, xout)
175 >>> print("".join('1' if xx else '0' for xx in xout))
176 0000
177 >>> __w_model_epistasis(np.array([0, 0, 0, 1], bool), 0, 4, xout)
178 >>> print("".join('1' if xx else '0' for xx in xout))
179 1101
180 >>> __w_model_epistasis(np.array([0, 0, 1, 0], bool), 0, 4, xout)
181 >>> print("".join('1' if xx else '0' for xx in xout))
182 1011
183 >>> __w_model_epistasis(np.array([0, 1, 0, 0], bool), 0, 4, xout)
184 >>> print("".join('1' if xx else '0' for xx in xout))
185 0111
186 >>> __w_model_epistasis(np.array([1, 0, 0, 0], bool), 0, 4, xout)
187 >>> print("".join('1' if xx else '0' for xx in xout))
188 1111
189 >>> __w_model_epistasis(np.array([1, 1, 1, 1], bool), 0, 4, xout)
190 >>> print("".join('1' if xx else '0' for xx in xout))
191 1110
192 >>> __w_model_epistasis(np.array([0, 1, 1, 1], bool), 0, 4, xout)
193 >>> print("".join('1' if xx else '0' for xx in xout))
194 0001
195 >>> __w_model_epistasis(np.array([1, 0, 1, 1], bool), 0, 4, xout)
196 >>> print("".join('1' if xx else '0' for xx in xout))
197 1001
198 >>> __w_model_epistasis(np.array([1, 1, 0, 1], bool), 0, 4, xout)
199 >>> print("".join('1' if xx else '0' for xx in xout))
200 0101
201 >>> __w_model_epistasis(np.array([1, 1, 1, 0], bool), 0, 4, xout)
202 >>> print("".join('1' if xx else '0' for xx in xout))
203 0011
204 >>> __w_model_epistasis(np.array([0, 0, 1, 1], bool), 0, 4, xout)
205 >>> print("".join('1' if xx else '0' for xx in xout))
206 0110
207 >>> __w_model_epistasis(np.array([0, 1, 0, 1], bool), 0, 4, xout)
208 >>> print("".join('1' if xx else '0' for xx in xout))
209 1010
210 >>> __w_model_epistasis(np.array([0, 1, 1, 0], bool), 0, 4, xout)
211 >>> print("".join('1' if xx else '0' for xx in xout))
212 1100
213 >>> __w_model_epistasis(np.array([1, 0, 0, 1], bool), 0, 4, xout)
214 >>> print("".join('1' if xx else '0' for xx in xout))
215 0010
216 >>> __w_model_epistasis(np.array([1, 0, 1, 0], bool), 0, 4, xout)
217 >>> print("".join('1' if xx else '0' for xx in xout))
218 0100
219 >>> __w_model_epistasis(np.array([1, 1, 0, 0], bool), 0, 4, xout)
220 >>> print("".join('1' if xx else '0' for xx in xout))
221 1000
222 """
223 end: Final[int] = (start + nu) - 1
224 flip: Final[bool] = x_in[start]
225 i: int = end
226 skip: int = start
227 while i >= start:
228 result: bool = flip
229 j: int = end
230 while j > start:
231 if j != skip:
232 result ^= x_in[j]
233 j -= 1
234 x_out[i] = result
235 skip -= 1
236 if skip < start:
237 skip = end
238 i -= 1
241@numba.njit(nogil=True, cache=True, inline="always")
242def w_model_epistasis(x_in: np.ndarray, nu: int, x_out: np.ndarray) -> None:
243 """
244 Perform the epistasis transformation.
246 :param x_in: the input string
247 :param nu: the epistasis parameter
248 :param x_out: the output string
249 """
250 total_len: Final[int] = len(x_in)
251 end: Final[int] = total_len - nu
252 i: int = 0
253 while i <= end:
254 __w_model_epistasis(x_in, i, nu, x_out)
255 i += nu
256 if i < total_len:
257 __w_model_epistasis(x_in, i, total_len - i, x_out)
260@numba.njit(nogil=True, cache=True, inline="always")
261def w_model_max_gamma(nopt: int) -> int:
262 """
263 Compute the maximum gamma value for a given length `nopt`.
265 :param nopt: the length of the optimal bit string
266 :return: the maximum gamma value
267 """
268 return int((nopt * (nopt - 1)) >> 1)
271@numba.njit(nogil=False, cache=True, inline="always")
272def w_model_create_ruggedness_permutation(
273 gamma: int, nopt: int, r: array) -> None:
274 """
275 Create the raw ruggedness array.
277 This function creates the ruggedness permutation where groups of rugged
278 transformations alternate with deceptive permutations for increasing
279 gamma.
281 :param gamma: the parameter for the ruggedness
282 :param nopt: the length of the optimal string
283 :param r: the array to receive the permutation
285 >>> r = array('i', range(11))
286 >>> w_model_create_ruggedness_permutation(0, 10, r)
287 >>> "".join(str(xx) for xx in r)
288 '012345678910'
289 >>> r = array('i', range(7))
290 >>> w_model_create_ruggedness_permutation(9, 6, r)
291 >>> r[3]
292 3
293 >>> r[6]
294 5
295 """
296 max_gamma: Final[int] = w_model_max_gamma(nopt)
297 start: int = 0 if gamma <= 0 else (
298 nopt - 1 - int(0.5 + sqrt(0.25 + ((max_gamma - gamma) << 1))))
299 k: int = 0
300 j: int = 1
301 while j <= start:
302 if (j & 1) != 0:
303 r[j] = nopt - k
304 else:
305 k += 1
306 r[j] = k
307 j += 1
309 while j <= nopt:
310 k += 1
311 r[j] = (nopt - k) if ((start & 1) != 0) else k
312 j += 1
314 upper: Final[int] = ((gamma - max_gamma) + (
315 ((nopt - start - 1) * (nopt - start)) >> 1))
316 j -= 1
317 i: int = 1
318 while i <= upper:
319 j -= 1
320 r[j], r[nopt] = r[nopt], r[j]
321 i += 1
324@numba.njit(nogil=True, cache=True, inline="always")
325def w_model_ruggedness_translate(gamma: int, nopt: int) -> int:
326 """
327 Transform the gamma values such that deceptive follows rugged.
329 If the ruggedness permutations are created with raw gamma values, then
330 rugged and deceptive permutations will alternate with rising gamma.
331 With this function here, the gamma parameters are translated such that
332 first all the rugged permutations come (with rising degree of ruggedness)
333 and then the deceptive ones come.
335 :param gamma: the parameter for the ruggedness
336 :param nopt: the length of the optimal string
337 :returns: the translated gamma values
339 >>> w_model_ruggedness_translate(12, 6)
340 9
341 >>> w_model_ruggedness_translate(34, 25)
342 57
343 >>> w_model_ruggedness_translate(0, 5)
344 0
345 >>> w_model_ruggedness_translate(1, 5)
346 1
347 >>> w_model_ruggedness_translate(2, 5)
348 2
349 >>> w_model_ruggedness_translate(3, 5)
350 3
351 >>> w_model_ruggedness_translate(4, 5)
352 4
353 >>> w_model_ruggedness_translate(5, 5)
354 8
355 >>> w_model_ruggedness_translate(6, 5)
356 9
357 >>> w_model_ruggedness_translate(7, 5)
358 10
359 >>> w_model_ruggedness_translate(8, 5)
360 7
361 >>> w_model_ruggedness_translate(9, 5)
362 6
363 >>> w_model_ruggedness_translate(10, 5)
364 5
365 """
366 if gamma <= 0:
367 return 0
369 last_upper: int = (nopt >> 1) * ((nopt + 1) >> 1)
370 if gamma <= last_upper:
371 j = int(((nopt + 2) * 0.5) - sqrt(
372 (((nopt * nopt) * 0.25) + 1) - gamma))
373 k = ((gamma - ((nopt + 2) * j)) + (j * j) + nopt)
374 return int((k + 1 + ((((nopt + 2) * j) - (j * j) - nopt - 1) << 1))
375 - (j - 1))
377 j = int((((nopt % 2) + 1) * 0.5) + sqrt(
378 (((1 - (nopt % 2)) * 0.25) + gamma) - 1 - last_upper))
379 k = gamma - (((j - (nopt % 2)) * (j - 1)) + 1 + last_upper)
380 return int(w_model_max_gamma(nopt) - k - ((2 * j * j) - j)
381 - ((nopt % 2) * ((-2 * j) + 1)))
384@numba.njit(nogil=True, cache=True, inline="always")
385def _w_model(x_in: np.ndarray, m: int, m_out: np.ndarray | None,
386 nu: int, nu_out: np.ndarray | None, r: array | None) -> int:
387 """
388 Compute the value of the multi-stage W-model.
390 :param x_in: the input array
391 :param m: the neutrality level
392 :param m_out: the temporary variable for the de-neutralized `x_in`
393 :param nu: the epistasis level
394 :param nu_out: the temporary variable for the epistasis transformed string
395 :param r: the ruggedness transform, if any
396 :returns: the objective value
397 """
398 neutral: np.ndarray
399 if m_out is None:
400 neutral = x_in
401 else:
402 neutral = m_out
403 w_model_neutrality(x_in, m, neutral)
404 epi: np.ndarray
405 if nu_out is None:
406 epi = neutral
407 else:
408 epi = nu_out
409 w_model_epistasis(neutral, nu, epi)
410 f = w_model_f(epi)
411 if r is None:
412 return int(f)
413 return r[f]
416class WModel(BitStringProblem):
417 """The tunable W-Model benchmark problem."""
419 def __init__(self, nopt: int, m: int = 1, nu: int = 2,
420 gamma: int = 0, name: str | None = None) -> None:
421 """
422 Initialize an W-Model instance.
424 :param nopt: the length of the optimal string
425 :param m: the neutrality parameter
426 :param nu: the epistasis parameter
427 :param gamma: the ruggedness parameter
428 :param name: the (optional) special name of this instance
429 """
430 super().__init__(nopt * m)
431 #: the length of the optimal string
432 self.nopt: Final[int] = check_int_range(nopt, "nopt", 2)
433 #: the neutrality parameter
434 self.m: Final[int] = check_int_range(m, "m", 1)
435 #: the internal buffer for de-neutralization
436 self.__m_out: Final[np.ndarray | None] = None if m < 2 \
437 else np.empty(nopt, DEFAULT_BOOL)
438 #: the epistasis parameter
439 self.nu: Final[int] = check_int_range(nu, "nu", 2, nopt)
440 #: the normalized epistasis parameter
441 self.nu1: Final[float] = (nu - 2) / (nopt - 2)
442 #: the internal buffer for de-epistazation
443 self.__nu_out: Final[np.ndarray | None] = None if nu <= 2 \
444 else np.empty(nopt, DEFAULT_BOOL)
445 max_gamma: Final[int] = w_model_max_gamma(nopt)
446 #: the ruggedness parameter
447 self.gamma: Final[int] = check_int_range(gamma, "gamma", 0, max_gamma)
448 #: the normalized ruggedness parameter
449 self.gamma1: Final[float] = gamma / max_gamma
450 #: the translated gamma parameter
451 self.gamma_prime: Final[int] = w_model_ruggedness_translate(
452 gamma, nopt)
453 r: array | None = None # the ruggedness table
454 if gamma > 0:
455 r = array("i", range(nopt + 1))
456 w_model_create_ruggedness_permutation(self.gamma_prime, nopt, r)
457 #: the ruggedness table
458 self.__r: Final[array | None] = r
460 if name is None:
461 name = f"wmodel_{self.nopt}_{self.m}_{self.nu}_{self.gamma}"
462 else:
463 if not isinstance(name, str):
464 raise type_error(name, "name", str)
465 if (len(name) <= 0) or (sanitize_name(name) != name):
466 raise ValueError(f"invalid name: {name!r}")
467 #: the name of this w-model instance
468 self.name: Final[str] = name
470 def upper_bound(self) -> int:
471 """
472 Get the upper bound of the bit string based problem.
474 :return: the length of the bit string
476 >>> WModel(7, 6, 4, 4).upper_bound()
477 7
478 """
479 return self.nopt
481 def evaluate(self, x: np.ndarray) -> int:
482 """
483 Evaluate a solution to the W-Model.
485 :param x: the bit string to evaluate
486 :returns: the value of the W-Model for the string
487 """
488 return _w_model(x, self.m, self.__m_out, self.nu,
489 self.__nu_out, self.__r)
491 def __str__(self):
492 """
493 Get the name of the W-Model instance.
495 :returns: the name of the W-Model instance.
496 """
497 return self.name
499 def log_parameters_to(self, logger: KeyValueLogSection) -> None:
500 """
501 Log all parameters of this component as key-value pairs.
503 :param logger: the logger for the parameters
505 >>> from moptipy.utils.logger import InMemoryLogger
506 >>> with InMemoryLogger() as l:
507 ... with l.key_values("C") as kv:
508 ... WModel(6, 2, 4, 7).log_parameters_to(kv)
509 ... text = l.get_log()
510 >>> text[1]
511 'name: wmodel_6_2_4_7'
512 >>> text[3]
513 'lowerBound: 0'
514 >>> text[4]
515 'upperBound: 6'
516 >>> text[5]
517 'n: 12'
518 >>> text[6]
519 'nopt: 6'
520 >>> text[7]
521 'm: 2'
522 >>> text[8]
523 'nu: 4'
524 >>> text[9]
525 'nu1: 0.5'
526 >>> text[11]
527 'gamma: 7'
528 >>> text[12]
529 'gamma1: 0.4666666666666667'
530 >>> text[14]
531 'gammaPrime: 11'
532 >>> len(text)
533 16
534 """
535 super().log_parameters_to(logger)
536 logger.key_value("nopt", self.nopt)
537 logger.key_value("m", self.m)
538 logger.key_value("nu", self.nu)
539 logger.key_value("nu1", self.nu1)
540 logger.key_value("gamma", self.gamma)
541 logger.key_value("gamma1", self.gamma1)
542 logger.key_value("gammaPrime", self.gamma_prime)
544 @classmethod
545 def default_instances(
546 cls: type, scale_min: int = 16, scale_max: int = 256) \
547 -> Iterator[Callable[[], "WModel"]]:
548 """
549 Get the 19 default instances of the W-Model.
551 :param scale_min: the minimum permitted scale
552 :param scale_max: the maximum permitted scale
553 :returns: an `Iterable` that can provide callables constructing the
554 19 default instances of the W-Model
556 >>> len(list(WModel.default_instances()))
557 19
559 >>> [x() for x in WModel.default_instances()]
560 [wmodel_1, wmodel_2, wmodel_3, wmodel_4, wmodel_5, wmodel_6, \
561wmodel_7, wmodel_8, wmodel_9, wmodel_10, wmodel_11, wmodel_12, \
562wmodel_13, wmodel_14, wmodel_15, wmodel_16, wmodel_17, wmodel_18, \
563wmodel_19]
565 >>> len(list(WModel.default_instances(scale_max=200)))
566 18
568 >>> len(list(WModel.default_instances(scale_min=40)))
569 14
570 """
571 check_int_range(scale_max, "scale_max", check_int_range(
572 scale_min, "scale_min", 1, 1_000_000_000) + 1, 1_000_000_000)
573 return (cast("Callable[[], WModel]",
574 lambda a=iid, b=z[0], c=z[1], d=z[2], g=z[3]:
575 WModel(b, c, d, g, f"wmodel_{a + 1}"))
576 for iid, z in enumerate([
577 (10, 2, 6, 10), (10, 2, 6, 18), (16, 1, 5, 72),
578 (16, 3, 9, 72), (25, 1, 23, 90), (32, 1, 2, 397),
579 (32, 4, 11, 0), (32, 4, 14, 0), (32, 4, 8, 128),
580 (50, 1, 36, 245), (50, 2, 21, 256), (50, 3, 16, 613),
581 (64, 2, 32, 256), (64, 3, 21, 16), (64, 3, 21, 256),
582 (64, 3, 21, 403), (64, 4, 52, 2), (75, 1, 60, 16),
583 (75, 2, 32, 4)]) if scale_min <= z[0] * z[1] <= scale_max)