diff --git a/Client/worker.py b/Client/worker.py index 7096dd32..35a7b847 100644 --- a/Client/worker.py +++ b/Client/worker.py @@ -59,7 +59,7 @@ ## Basic configuration of the Client. These timeouts can be changed at will -CLIENT_VERSION = 42 # Client version to send to the Server +CLIENT_VERSION = 43 # Client version to send to the Server TIMEOUT_HTTP = 30 # Timeout in seconds for HTTP requests TIMEOUT_ERROR = 10 # Timeout in seconds when any errors are thrown TIMEOUT_WORKLOAD = 30 # Timeout in seconds between workload requests @@ -331,32 +331,37 @@ def report_results(config, batches): 'crashes' : 0, # " disconnect" or "connection stalls" 'timelosses' : 0, # " loses on time " 'illegals' : 0, # " illegal move " + + 'spsa_delta' : '', # JSON dump of the delta vector for SPSA, otherwise empty } for batch in batches: + payload['trinomial' ] = [x+y for x,y in zip(payload['trinomial' ], batch['trinomial' ])] + payload['pentanomial'] = [x+y for x,y in zip(payload['pentanomial'], batch['pentanomial'])] + payload['crashes' ] += batch['crashes' ] + payload['timelosses' ] += batch['timelosses'] + payload['illegals' ] += batch['illegals' ] + + # Collapse into a JSON friendly format for Django + payload['trinomial' ] = ' '.join(map(str, payload['trinomial' ])) + payload['pentanomial'] = ' '.join(map(str, payload['pentanomial'])) - payload['trinomial' ] = [x+y for x,y in zip(payload['trinomial' ], batch['trinomial' ])] - payload['pentanomial'] = [x+y for x,y in zip(payload['pentanomial'], batch['pentanomial'])] + if config.workload['test']['type'] == 'SPSA': - payload['crashes' ] += batch['crashes' ] - payload['timelosses'] += batch['timelosses'] - payload['illegals' ] += batch['illegals' ] + # Server expects a delta vector, already ordered by Parameter index + ordered_parameters = sorted(config.workload['spsa'].items(), key=lambda kv: kv[1]['index']) + ordered_delta = [0] * len(ordered_parameters) - if config.workload['test']['type'] == 'SPSA': + for batch in batches: # Pairs can be added one at a time, or in bulk result = batch['trinomial'][2] - batch['trinomial'][0] - # For each param compute the update step for the Server - for name, param in config.workload['spsa'].items(): + for index, (name, param) in enumerate(ordered_parameters): delta = param['r'] * param['c'] * result * param['flip'][batch['runner_idx']] - payload['spsa_%s' % (name)] = payload.get('spsa_%s' % (name), 0.0) + delta - - # Collapse into a JSON friendly format for Django - payload['trinomial' ] = ' '.join(map(str, payload['trinomial' ])) - payload['pentanomial'] = ' '.join(map(str, payload['pentanomial'])) + ordered_delta[index] = ordered_delta[index] + delta - print (payload) + payload['spsa_delta'] = json.dumps(ordered_delta) return ServerReporter.report(config, 'clientSubmitResults', payload) diff --git a/Config/config.json b/Config/config.json index eb2dfbf1..1e1ee9e4 100644 --- a/Config/config.json +++ b/Config/config.json @@ -1,5 +1,5 @@ { - "client_version" : 42, + "client_version" : 43, "client_repo_url" : "https://github.com/AndyGrant/OpenBench", "client_repo_ref" : "master", diff --git a/OpenBench/migrations/0002_spsarun_spsaparameter.py b/OpenBench/migrations/0002_spsarun_spsaparameter.py new file mode 100644 index 00000000..0aae08cf --- /dev/null +++ b/OpenBench/migrations/0002_spsarun_spsaparameter.py @@ -0,0 +1,47 @@ +# Generated by Django 4.2.1 on 2025-06-10 23:37 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('OpenBench', '0001_initial'), + ] + + operations = [ + migrations.CreateModel( + name='SPSARun', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('reporting_type', models.CharField(choices=[('BULK', 'BULK'), ('BATCHED', 'BATCHED')], max_length=16)), + ('distribution_type', models.CharField(choices=[('SINGLE', 'SINGLE'), ('MULTIPLE', 'MULTIPLE')], max_length=16)), + ('alpha', models.FloatField()), + ('gamma', models.FloatField()), + ('iterations', models.IntegerField()), + ('pairs_per', models.IntegerField()), + ('a_ratio', models.FloatField()), + ('a_value', models.FloatField()), + ('tune', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='spsa_run', to='OpenBench.test')), + ], + ), + migrations.CreateModel( + name='SPSAParameter', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('name', models.CharField(max_length=64)), + ('value', models.FloatField()), + ('is_float', models.BooleanField()), + ('start', models.FloatField()), + ('min_value', models.FloatField()), + ('max_value', models.FloatField()), + ('c_end', models.FloatField()), + ('r_end', models.FloatField()), + ('c_value', models.FloatField()), + ('a_end', models.FloatField()), + ('a_value', models.FloatField()), + ('spsa_run', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='parameters', to='OpenBench.spsarun')), + ], + ), + ] diff --git a/OpenBench/migrations/0003_alter_spsaparameter_spsa_run.py b/OpenBench/migrations/0003_alter_spsaparameter_spsa_run.py new file mode 100644 index 00000000..6e662336 --- /dev/null +++ b/OpenBench/migrations/0003_alter_spsaparameter_spsa_run.py @@ -0,0 +1,19 @@ +# Generated by Django 4.2.1 on 2025-06-10 23:48 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('OpenBench', '0002_spsarun_spsaparameter'), + ] + + operations = [ + migrations.AlterField( + model_name='spsaparameter', + name='spsa_run', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parameters', to='OpenBench.spsarun'), + ), + ] diff --git a/OpenBench/migrations/0004_spsaparameter_index.py b/OpenBench/migrations/0004_spsaparameter_index.py new file mode 100644 index 00000000..7f33255c --- /dev/null +++ b/OpenBench/migrations/0004_spsaparameter_index.py @@ -0,0 +1,19 @@ +# Generated by Django 4.2.1 on 2025-06-11 00:09 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('OpenBench', '0003_alter_spsaparameter_spsa_run'), + ] + + operations = [ + migrations.AddField( + model_name='spsaparameter', + name='index', + field=models.IntegerField(default=0), + preserve_default=False, + ), + ] diff --git a/OpenBench/migrations/0005_remove_spsaparameter_a_end_remove_spsarun_a_value.py b/OpenBench/migrations/0005_remove_spsaparameter_a_end_remove_spsarun_a_value.py new file mode 100644 index 00000000..03b7a66a --- /dev/null +++ b/OpenBench/migrations/0005_remove_spsaparameter_a_end_remove_spsarun_a_value.py @@ -0,0 +1,21 @@ +# Generated by Django 4.2.1 on 2026-01-26 05:13 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('OpenBench', '0004_spsaparameter_index'), + ] + + operations = [ + migrations.RemoveField( + model_name='spsaparameter', + name='a_end', + ), + migrations.RemoveField( + model_name='spsarun', + name='a_value', + ), + ] diff --git a/OpenBench/migrations/0006_auto_20260126_0648.py b/OpenBench/migrations/0006_auto_20260126_0648.py new file mode 100644 index 00000000..7ebbcf7d --- /dev/null +++ b/OpenBench/migrations/0006_auto_20260126_0648.py @@ -0,0 +1,62 @@ +# Generated by Django 4.2.1 on 2026-01-26 06:48 + +from django.db import migrations + +def forwards(apps, schema_editor): + + Test = apps.get_model('OpenBench', 'Test') + SPSARun = apps.get_model('OpenBench', 'SPSARun') + SPSAParameter = apps.get_model('OpenBench', 'SPSAParameter') + + for test in Test.objects.filter(test_mode='SPSA'): + + if not test.spsa: + continue + + try: + test.spsa_run + continue + except SPSARun.DoesNotExist: + pass # Need to create the SPSARun + + spsa_run = SPSARun.objects.create( + tune = test, + reporting_type = test.spsa.get('reporting_type', 'BATCHED'), + distribution_type = test.spsa.get('distribution_type', 'SINGLE'), + alpha = test.spsa.get('Alpha'), + gamma = test.spsa.get('Gamma'), + iterations = test.spsa.get('iterations'), + pairs_per = test.spsa.get('pairs_per'), + a_ratio = test.spsa.get('A_ratio'), + ) + + for name, param in test.spsa.get('parameters', {}).items(): + SPSAParameter.objects.create( + spsa_run = spsa_run, + name = name, + index = param.get('index'), + value = param.get('value'), + is_float = param.get('float'), + start = param.get('start'), + min_value = param.get('min'), + max_value = param.get('max'), + c_end = param.get('c_end'), + r_end = param.get('r_end'), + c_value = param.get('c'), + a_value = param.get('a'), + ) + + +def backwards(apps, schema_editor): + SPSARun = apps.get_model('OpenBench', 'SPSARun') + SPSARun.objects.all().delete() + +class Migration(migrations.Migration): + + dependencies = [ + ('OpenBench', '0005_remove_spsaparameter_a_end_remove_spsarun_a_value'), + ] + + operations = [ + migrations.RunPython(forwards, backwards), + ] diff --git a/OpenBench/migrations/0007_remove_test_spsa.py b/OpenBench/migrations/0007_remove_test_spsa.py new file mode 100644 index 00000000..15b15197 --- /dev/null +++ b/OpenBench/migrations/0007_remove_test_spsa.py @@ -0,0 +1,17 @@ +# Generated by Django 4.2.1 on 2026-01-26 07:08 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('OpenBench', '0006_auto_20260126_0648'), + ] + + operations = [ + migrations.RemoveField( + model_name='test', + name='spsa', + ), + ] diff --git a/OpenBench/models.py b/OpenBench/models.py index b256b5e2..4539a552 100644 --- a/OpenBench/models.py +++ b/OpenBench/models.py @@ -145,7 +145,6 @@ class ScaleMethod(TextChoices): currentllr = FloatField(default=0.0) # SPRT upperllr = FloatField(default=0.0) # SPRT max_games = IntegerField(default=0) # GAMES or DATAGEN - spsa = JSONField(default=dict, blank=True, null=True) # SPSA genfens_args = CharField(max_length=256, default='', blank=True) # DATAGEN play_reverses = BooleanField(default=False) # DATAGEN @@ -234,3 +233,41 @@ def __str__(self): def filename(self): return '%s.%s.%s.pgn.bz2' % (self.test_id, self.result_id, self.book_index) + +class SPSARun(Model): + + class SPSAReportingType(TextChoices): + BULK = 'BULK' , 'BULK' + BATCHED = 'BATCHED', 'BATCHED' + + class SPSADistributionType(TextChoices): + SINGLE = 'SINGLE' , 'SINGLE' + MULTIPLE = 'MULTIPLE', 'MULTIPLE' + + tune = OneToOneField(Test, on_delete=CASCADE, related_name='spsa_run', null=True, blank=True) + + reporting_type = CharField(max_length=16, choices=SPSAReportingType.choices) + distribution_type = CharField(max_length=16, choices=SPSADistributionType.choices) + + alpha = FloatField() # Constants + gamma = FloatField() + iterations = IntegerField() + pairs_per = IntegerField() + a_ratio = FloatField() + +class SPSAParameter(Model): + + spsa_run = ForeignKey(SPSARun, on_delete=CASCADE, related_name='parameters') + name = CharField(max_length=64) + index = IntegerField() + value = FloatField() # Only field that changes + + is_float = BooleanField() # Constants + start = FloatField() + min_value = FloatField() + max_value = FloatField() + c_end = FloatField() + r_end = FloatField() + + c_value = FloatField() # Constants pre-computed for speed + a_value = FloatField() diff --git a/OpenBench/spsa_utils.py b/OpenBench/spsa_utils.py new file mode 100644 index 00000000..4f3775ec --- /dev/null +++ b/OpenBench/spsa_utils.py @@ -0,0 +1,190 @@ +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # +# # +# OpenBench is a chess engine testing framework authored by Andrew Grant. # +# # +# # +# OpenBench is free software: you can redistribute it and/or modify # +# it under the terms of the GNU General Public License as published by # +# the Free Software Foundation, either version 3 of the License, or # +# (at your option) any later version. # +# # +# OpenBench is distributed in the hope that it will be useful, # +# but WITHOUT ANY WARRANTY; without even the implied warranty of # +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # +# GNU General Public License for more details. # +# # +# You should have received a copy of the GNU General Public License # +# along with this program. If not, see . # +# # +# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # + +import numpy as np + +from OpenBench.models import SPSARun, SPSAParameter + +def spsa_param_digest_headers(workload): + # No real arguments are expected, but this is utilized as a template filter + return ['Name', 'Curr', 'Start', 'Min', 'Max', 'C', 'C_end', 'R', 'R_end'] + +def spsa_original_input(workload): + + lines = [] + for param in workload.spsa_run.parameters.order_by('index'): + lines.append(', '.join([ + param.name, + 'float' if param.is_float else 'int', + str(param.start), + str(param.min_value), + str(param.max_value), + str(param.c_end), + str(param.r_end), + ])) + + return '\n'.join(lines) + +def spsa_optimal_values(workload): + return '\n'.join([ + '%s, %s' % (param.name, param.value if param.is_float else int(round(param.value))) + for param in workload.spsa_run.parameters.order_by('index') + ]) + +def create_spsa_run(workload, request): + + alpha = float(request.POST['spsa_alpha']) + gamma = float(request.POST['spsa_gamma']) + a_ratio = float(request.POST['spsa_A_ratio']) + iterations = int(request.POST['spsa_iterations']) + + spsa_run = SPSARun.objects.create( + tune = workload, + reporting_type = request.POST['spsa_reporting_type'], + distribution_type = request.POST['spsa_distribution_type'], + alpha = alpha, + gamma = gamma, + iterations = iterations, + pairs_per = int(request.POST['spsa_pairs_per']), + a_ratio = a_ratio, + ) + + params = [] + for index, line in enumerate(request.POST['spsa_inputs'].splitlines()): + + name, dtype, value, min_value, max_value, c_end, r_end = map(str.strip, line.split(',')) + + c_value = float(c_end) * iterations ** gamma + a_end = float(r_end) * float(c_end) ** 2 + a_value = float(a_end) * (a_ratio * iterations + iterations) ** alpha + + params.append(SPSAParameter( + spsa_run = spsa_run, + name = name, + index = index, + value = float(value), + is_float = dtype == 'float', + start = float(value), + min_value = float(min_value), + max_value = float(max_value), + c_end = float(c_end), + r_end = float(r_end), + c_value = c_value, + a_value = a_value, + )) + + SPSAParameter.objects.bulk_create(params) + return spsa_run + +def spsa_param_digest(workload): + + # C & R, as if we were being assigned a workload right now + spsa_run = workload.spsa_run + iteration = 1 + (workload.games / (spsa_run.pairs_per * 2)) + c_compression = iteration ** spsa_run.gamma + r_compression = (spsa_run.a_ratio * spsa_run.iterations + iteration) ** spsa_run.alpha + + digest = [] + for param in spsa_run.parameters.order_by('index'): + + # C and R if we got a workload right now + c = max(param.c_value / c_compression, 0.00 if param.is_float else 0.50) + r = param.a_value / r_compression / c ** 2 + fstr = '%.4f' if param.is_float else '%d' + + digest.append([ + param.name, + '%.4f' % (param.value), + fstr % (param.start), + fstr % (param.min_value), + fstr % (param.max_value), + '%.4f' % (c), + '%.4f' % (param.c_end), + '%.4f' % (r), + '%.4f' % (param.r_end), + ]) + + return digest + +def spsa_workload_assignment_dict(workload, runner_count): + + if workload.test_mode != 'SPSA': + return None + + params = list(workload.spsa_run.parameters.order_by('index')) + + names = [p.name for p in params] + values = np.array([p.value for p in params]) + mins = np.array([p.min_value for p in params]) + maxs = np.array([p.max_value for p in params]) + a_values = np.array([p.a_value for p in params]) + c_values = np.array([p.c_value for p in params]) # Scaled later + is_float = np.array([p.is_float for p in params]) + + # Only use one set of parameters if distribution is SINGLE. + # Duplicate the params, even though they are the same, across all + # sockets on the machine, in the event of a singular SPSA distribution + + is_single = workload.spsa_run.distribution_type == 'SINGLE' + permutations = 1 if is_single else runner_count + duplicates = 1 if not is_single else runner_count + + # C & R are scaled over the course of the iterations + iteration = 1 + (workload.games / (workload.spsa_run.pairs_per * 2)) + c_compression = iteration ** workload.spsa_run.gamma + r_compression = (workload.spsa_run.a_ratio * workload.spsa_run.iterations + iteration) ** workload.spsa_run.alpha + + # Applying scaling + c_values = np.maximum(c_values / c_compression, np.where(is_float, 0.0, 0.5)) + r_values = a_values / r_compression / c_values**2 + + # Apply flips for each parameter, for each permutation + flips = np.random.choice([-1, 1], size=(len(params), permutations)) + devs = values[:, None] + flips * c_values[:, None] + bases = values[:, None] - flips * c_values[:, None] + + # Identify Integers, as they require rounding conversions + mask_int = ~is_float[:, None] # shape (num_params, 1) + mask_int = np.broadcast_to(mask_int, devs.shape) # shape (num_params, permutations) + + # Probabilistic rounding for integer parameters + rand_mat = np.random.rand(len(params), permutations) + devs[mask_int] = np.floor(devs[mask_int] + rand_mat[mask_int]) + bases[mask_int] = np.floor(bases[mask_int] + rand_mat[mask_int]) + + # Clip to the original min/max + devs = np.clip(devs, mins[:, None], maxs[:, None]) + bases = np.clip(bases, mins[:, None], maxs[:, None]) + + # Duplicate if the client will use multiple runners + devs = np.repeat(devs , duplicates, axis=1) + bases = np.repeat(bases, duplicates, axis=1) + flips = np.repeat(flips, duplicates, axis=1) + + return { + name : { + 'index' : i, + 'dev' : [float(x) if is_float[i] else int(x) for x in devs[i]], + 'base' : [float(x) if is_float[i] else int(x) for x in bases[i]], + 'flip' : flips[i].tolist(), + 'c' : float(c_values[i]), + 'r' : float(r_values[i]), + } for i, name in enumerate(names) + } diff --git a/OpenBench/templatetags/mytags.py b/OpenBench/templatetags/mytags.py index 8898b969..40c002ee 100644 --- a/OpenBench/templatetags/mytags.py +++ b/OpenBench/templatetags/mytags.py @@ -18,8 +18,14 @@ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # -import re, django -import OpenBench.config, OpenBench.utils, OpenBench.stats, OpenBench.models +import django +import re + +import OpenBench.config +import OpenBench.models +import OpenBench.spsa_utils +import OpenBench.stats +import OpenBench.utils def oneDigitPrecision(value): try: @@ -64,10 +70,11 @@ def shortStatBlock(test): penta_line = 'Ptnml(0-2): %d, %d, %d, %d, %d' % test.as_penta() if test.test_mode == 'SPSA': + spsa_run = test.spsa_run # Avoid extra database accesses statlines = [ - 'Tuning %d Parameters' % (len(test.spsa['parameters'].keys())), - '%d/%d Iterations' % (test.games / (2 * test.spsa['pairs_per']), test.spsa['iterations']), - '%d/%d Games Played' % (test.games, 2 * test.spsa['iterations'] * test.spsa['pairs_per'])] + 'Tuning %d Parameters' % (spsa_run.parameters.count()), + '%d/%d Iterations' % (test.games / (2 * spsa_run.pairs_per), spsa_run.iterations), + '%d/%d Games Played' % (test.games, 2 * spsa_run.iterations * spsa_run.pairs_per)] elif test.test_mode == 'SPRT': llr_line = 'LLR: %0.2f (%0.2f, %0.2f) [%0.2f, %0.2f]' % ( @@ -219,94 +226,6 @@ def machine_name(machine_id): register.filter('removePrefix', removePrefix) register.filter('machine_name', machine_name) -#### - -def spsa_param_digest(workload): - - digest = [] - - # C and R are compressed as we progress iterations - iteration = 1 + (workload.games / (workload.spsa['pairs_per'] * 2)) - c_compression = iteration ** workload.spsa['Gamma'] - r_compression = (workload.spsa['A'] + iteration) ** workload.spsa['Alpha'] - - # Maintain the original order, if there was one - keys = sorted( - workload.spsa['parameters'].keys(), - key=lambda x: workload.spsa['parameters'][x].get('index', -1) - ) - - for name in keys: - - param = workload.spsa['parameters'][name] - - # C and R if we got a workload right now - c = max(param['c'] / c_compression, 0.00 if param['float'] else 0.50) - r = param['a'] / r_compression / c ** 2 - - fstr = '%.4f' if param['float'] else '%d' - - digest.append([ - name, - '%.4f' % (param['value']), - fstr % (param['start']), - fstr % (param['min' ]), - fstr % (param['max' ]), - '%.4f' % (c), - '%.4f' % (param['c_end']), - '%.4f' % (r), - '%.4f' % (param['r_end']), - ]) - - return digest - -def spsa_param_digest_headers(workload): - return ['Name', 'Curr', 'Start', 'Min', 'Max', 'C', 'C_end', 'R', 'R_end'] - -def spsa_original_input(workload): - - # Maintain the original order, if there was one - keys = sorted( - workload.spsa['parameters'].keys(), - key=lambda x: workload.spsa['parameters'][x].get('index', -1) - ) - - lines = [] - for name in keys: - - param = workload.spsa['parameters'][name] - dtype = 'float' if param['float'] else 'int' - - # Original 7 token Input - lines.append(', '.join([ - name, - dtype, - str(param['start']), - str(param['min' ]), - str(param['max' ]), - str(param['c_end']), - str(param['r_end']), - ])) - - return '\n'.join(lines) - -def spsa_optimal_values(workload): - - # Maintain the original order, if there was one - keys = sorted( - workload.spsa['parameters'].keys(), - key=lambda x: workload.spsa['parameters'][x].get('index', -1) - ) - - lines = [] - for name in keys: - param = workload.spsa['parameters'][name] - value = param['value'] if param['float'] else round(param['value']) - lines.append(', '.join([name, str(value)])) - - return '\n'.join(lines) - - def book_download_link(workload): if workload.book_name in OpenBench.config.OPENBENCH_CONFIG['books']: return OpenBench.config.OPENBENCH_CONFIG['books'][workload.book_name]['source'] @@ -369,10 +288,10 @@ def test_is_fischer(test): return 'FRC' in test.book_name.upper() or '960' in test.book_name.upper() -register.filter('spsa_param_digest', spsa_param_digest) -register.filter('spsa_param_digest_headers', spsa_param_digest_headers) -register.filter('spsa_original_input', spsa_original_input) -register.filter('spsa_optimal_values', spsa_optimal_values) +register.filter('spsa_param_digest', OpenBench.spsa_utils.spsa_param_digest) +register.filter('spsa_param_digest_headers', OpenBench.spsa_utils.spsa_param_digest_headers) +register.filter('spsa_original_input', OpenBench.spsa_utils.spsa_original_input) +register.filter('spsa_optimal_values', OpenBench.spsa_utils.spsa_optimal_values) register.filter('book_download_link', book_download_link) register.filter('network_download_link', network_download_link) diff --git a/OpenBench/urls.py b/OpenBench/urls.py index 228b479a..0efa5b9e 100644 --- a/OpenBench/urls.py +++ b/OpenBench/urls.py @@ -79,6 +79,7 @@ django.urls.path(r'api/networks///delete/', OpenBench.views.api_network_delete), django.urls.path(r'api/buildinfo/', OpenBench.views.api_build_info), django.urls.path(r'api/pgns//', OpenBench.views.api_pgns), + django.urls.path(r'api/spsa//', OpenBench.views.api_spsa), django.urls.path(r'api/workload//results/', OpenBench.views.api_workload_results), # Redirect anything else to the Index diff --git a/OpenBench/utils.py b/OpenBench/utils.py index 4c9133ad..e11171f2 100644 --- a/OpenBench/utils.py +++ b/OpenBench/utils.py @@ -419,8 +419,21 @@ def update_test(request, machine): # Pentanomial Implementation LL, LD, DD, DW, WW = map(int, request.POST['pentanomial'].split()) + # SPSA Delta update vector; might not have this + raw_spsa_delta = request.POST.get('spsa_delta', '') + spsa_delta = json.loads(raw_spsa_delta) if raw_spsa_delta else [] + with transaction.atomic(): + # MASSIVE risk for concurrent access to the Test. select_for_update() will lock the row, + # which correctly ensures no other entity can modify it. HOWEVER, spsa_run and the various + # spsa_run.parameters are NOT locked via this query. This is okay because no other location + # in OpenBench would be modifying the contents of those models. + # + # ALL of the updates here, even the trivial ones to the Profile and Machine, are wrapped in + # same transaction.atomic(). The sole purpose and utility of that is to ensure either EVERYTHING + # gets updated as per this function, or NOTHING gets updated. + test = Test.objects.select_for_update().get(id=test_id) if test.finished or test.deleted: @@ -465,12 +478,15 @@ def update_test(request, machine): elif test.test_mode == 'SPSA': - # Update each parameter, as determined by the Worker - for name, param in test.spsa['parameters'].items(): - x = param['value'] + float(request.POST['spsa_%s' % (name)]) - param['value'] = max(param['min'], min(param['max'], x)) + # Apply updates to every Parameter, ensuring clipping + parameters = list(test.spsa_run.parameters.order_by('index')) + for delta, param in zip(spsa_delta, parameters): + param.value = max(param.min_value, min(param.max_value, param.value + delta)) + + # Bulk update to fire off all the .save()s + SPSAParameter.objects.bulk_update(parameters, ['value']) - test.finished = test.games >= 2 * test.spsa['pairs_per'] * test.spsa['iterations'] + test.finished = test.games >= 2 * test.spsa_run.pairs_per * test.spsa_run.iterations elif test.test_mode == 'DATAGEN': @@ -479,31 +495,31 @@ def update_test(request, machine): test.save() - # Update Result object; No risk from concurrent access - Result.objects.filter(id=result_id).update( - games = F('games' ) + games, - losses = F('losses' ) + losses, - draws = F('draws' ) + draws, - wins = F('wins' ) + wins, - LL = F('LL' ) + LL, - LD = F('LD' ) + LD, - DD = F('DD' ) + DD, - DW = F('DW' ) + DW, - WW = F('WW' ) + WW, - crashes = F('crashes' ) + crashes, - timeloss = F('timeloss') + timelosses, - updated = timezone.now() - ) - - # Update Profile object; No risk from concurrent access - Profile.objects.filter(user=Machine.objects.get(id=machine_id).user).update( - games=F('games') + games, - updated=timezone.now() - ) - - # Update Machine object; No risk from concurrent access - Machine.objects.filter(id=machine_id).update( - updated=timezone.now() - ) + # Update Result object; No risk from concurrent access + Result.objects.filter(id=result_id).update( + games = F('games' ) + games, + losses = F('losses' ) + losses, + draws = F('draws' ) + draws, + wins = F('wins' ) + wins, + LL = F('LL' ) + LL, + LD = F('LD' ) + LD, + DD = F('DD' ) + DD, + DW = F('DW' ) + DW, + WW = F('WW' ) + WW, + crashes = F('crashes' ) + crashes, + timeloss = F('timeloss') + timelosses, + updated = timezone.now() + ) + + # Update Profile object; Some risk from concurrent access + Profile.objects.filter(user=Machine.objects.select_for_update().get(id=machine_id).user).update( + games=F('games') + games, + updated=timezone.now() + ) + + # Update Machine object; No meaningful risk from concurrent access + Machine.objects.filter(id=machine_id).update( + updated=timezone.now() + ) return [{}, { 'stop' : True }][test.finished] diff --git a/OpenBench/views.py b/OpenBench/views.py index c19b5ea8..7c5c4372 100644 --- a/OpenBench/views.py +++ b/OpenBench/views.py @@ -18,15 +18,16 @@ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # -import os, hashlib, datetime, json, secrets, sys, re +import csv, io, os, json, secrets import django.http import django.shortcuts import django.contrib.auth import OpenBench.config -import OpenBench.utils import OpenBench.model_utils +import OpenBench.spsa_utils +import OpenBench.utils from OpenBench.workloads.create_workload import create_workload from OpenBench.workloads.get_workload import get_workload @@ -468,7 +469,7 @@ def workload(request, workload_type, pk, action=None): if action != None: return modify_workload(request, pk, action) - if not (workload := Test.objects.filter(id=int(pk)).first()): + if not (workload := Test.objects.select_related('spsa_run').filter(id=int(pk)).first()): return redirect(request, '/index/', error='No such Workload exists') # Trying to view a Tune as a Test, for example @@ -946,6 +947,27 @@ def api_pgns(request, pgn_id): response['Content-Disposition'] = 'attachment; filename=%d.pgn.tar' % (pgn_id) return response +@csrf_exempt +def api_spsa(request, workload_id): + + # 0. Make sure the request has the correct permissions + if not api_authenticate(request): + return api_response({ 'error' : 'API requires authentication for this server' }) + + # 1. Make sure the workload actually exists for the requested SPSA session + try: workload = Test.objects.get(pk=workload_id) + except: return api_response({ 'error' : 'Requested Workload Id does not exist' }) + + output = io.StringIO() + writer = csv.writer(output) + + writer.writerow(OpenBench.spsa_utils.spsa_param_digest_headers(workload)) + writer.writerows(OpenBench.spsa_utils.spsa_param_digest(workload)) + + response = HttpResponse(output.getvalue(), content_type='text/plain') + response.charset = 'utf-8' + return response + @csrf_exempt def api_workload_results(request, workload_id): diff --git a/OpenBench/workloads/create_workload.py b/OpenBench/workloads/create_workload.py index 5cd48309..9d534e25 100644 --- a/OpenBench/workloads/create_workload.py +++ b/OpenBench/workloads/create_workload.py @@ -31,6 +31,9 @@ import math +from django.db import transaction + +import OpenBench.spsa_utils import OpenBench.utils import OpenBench.views @@ -200,11 +203,10 @@ def create_new_tune(request): test.win_adj = request.POST['win_adj'] test.draw_adj = request.POST['draw_adj'] - test.scale_method = request.POST['scale_method'] - test.scale_nps = int(request.POST['scale_nps']) + test.scale_method = request.POST['scale_method'] + test.scale_nps = int(request.POST['scale_nps']) test.test_mode = 'SPSA' - test.spsa = extract_spas_params(request) test.awaiting = not dev_has_all @@ -212,7 +214,9 @@ def create_new_tune(request): name = Network.objects.get(engine=test.dev_engine, sha256=test.dev_network).name test.dev_netname = test.base_netname = name - test.save() + with transaction.atomic(): + test.save() + OpenBench.spsa_utils.create_spsa_run(test, request).save() profile = Profile.objects.get(user=request.user) profile.tests += 1 @@ -285,51 +289,6 @@ def create_new_datagen(request): return test, None -def extract_spas_params(request): - - spsa = {} # SPSA Hyperparams - spsa['Alpha' ] = float(request.POST['spsa_alpha']) - spsa['Gamma' ] = float(request.POST['spsa_gamma']) - spsa['A_ratio'] = float(request.POST['spsa_A_ratio']) - - # Tuning durations - spsa['iterations'] = int(request.POST['spsa_iterations']) - spsa['pairs_per' ] = int(request.POST['spsa_pairs_per']) - spsa['A' ] = spsa['A_ratio'] * spsa['iterations'] - - # Tuning Methodologies - spsa['reporting_type' ] = request.POST['spsa_reporting_type'] - spsa['distribution_type'] = request.POST['spsa_distribution_type'] - - # Each individual tuning parameter - spsa['parameters'] = {} - for index, line in enumerate(request.POST['spsa_inputs'].split('\n')): - - # Comma-seperated values, already verified in verify_workload() - name, data_type, value, minimum, maximum, c_end, r_end = line.split(',') - - # Recall the original order of inputs - param = {} - param['index'] = index - - # Raw extraction - param['float'] = data_type.strip() == 'float' - param['start'] = float(value) - param['value'] = float(value) - param['min' ] = float(minimum) - param['max' ] = float(maximum) - param['c_end'] = float(c_end) - param['r_end'] = float(r_end) - - # Verbatim Fishtest logic for computing these - param['c'] = param['c_end'] * spsa['iterations'] ** spsa['Gamma'] - param['a_end'] = param['r_end'] * param['c_end'] ** 2 - param['a'] = param['a_end'] * (spsa['A'] + spsa['iterations']) ** spsa['Alpha'] - - spsa['parameters'][name] = param - - return spsa - def get_engine(source, name, sha, bench): engine = Engine.objects.filter(name=name, source=source, sha=sha, bench=bench) diff --git a/OpenBench/workloads/get_workload.py b/OpenBench/workloads/get_workload.py index ea7ce98b..e7ce03db 100644 --- a/OpenBench/workloads/get_workload.py +++ b/OpenBench/workloads/get_workload.py @@ -32,6 +32,7 @@ from OpenBench.config import OPENBENCH_CONFIG from OpenBench.models import Result, Test +from OpenBench.spsa_utils import spsa_workload_assignment_dict from django.db import transaction @@ -183,11 +184,6 @@ def compute_resource_distribution(workloads, machine, has_focus): def workload_to_dictionary(test, result, machine): - # HACK: Remove this after a while, to avoid a complex DB migration - if test.scale_nps == 0: - test.scale_nps = OPENBENCH_CONFIG['engines'][test.base_engine]['nps'] - test.save() - workload = {} workload['result'] = { @@ -246,8 +242,8 @@ def workload_to_dictionary(test, result, machine): } workload['distribution'] = game_distribution(test, machine) - workload['spsa'] = spsa_to_dictionary(test, workload) - workload['reporting_type'] = test.spsa.get('reporting_type', 'BATCHED') + workload['spsa'] = spsa_workload_assignment_dict(test, workload['distribution']['runner-count']) + workload['reporting_type'] = test.spsa_run.reporting_type with transaction.atomic(): @@ -269,67 +265,6 @@ def workload_to_dictionary(test, result, machine): return workload -def spsa_to_dictionary(test, workload): - - if test.test_mode != 'SPSA': - return None - - # Only use one set of parameters if distribution is SINGLE. - # Duplicate the params, even though they are the same, across all - # Sockets on the machine, in the event of a singular SPSA distribution - is_single = test.spsa['distribution_type'] == 'SINGLE' - permutations = 1 if is_single else workload['distribution']['runner-count'] - duplicates = 1 if not is_single else workload['distribution']['runner-count'] - - # C & R are scaled over the course of the iterations - iteration = 1 + (test.games / (test.spsa['pairs_per'] * 2)) - c_compression = iteration ** test.spsa['Gamma'] - r_compression = (test.spsa['A'] + iteration) ** test.spsa['Alpha'] - - spsa = {} - for name, param in test.spsa['parameters'].items(): - - spsa[name] = { - 'dev' : [], # One for each Permutation the Worker will run - 'base' : [], # One for each Permutation the Worker will run - 'flip' : [], # One for each Permutation the Worker will run - } - - # C & R are constants for a particular assignment, for all Permutations - spsa[name]['c'] = max(param['c'] / c_compression, 0.00 if param['float'] else 0.50) - spsa[name]['r'] = param['a'] / r_compression / spsa[name]['c'] ** 2 - - for f in range(permutations): - - # Adjust current best by +- C - flip = 1 if random.getrandbits(1) else -1 - dev = param['value'] + flip * spsa[name]['c'] - base = param['value'] - flip * spsa[name]['c'] - - # Probabilistic rounding for Integer types - if not param['float']: - r = random.uniform(0, 1) - dev = math.floor(dev + r) - base = math.floor(base + r) - - # Clip within [Min, Max] - dev = max(param['min'], min(param['max'], dev )) - base = max(param['min'], min(param['max'], base)) - - # Round integer values down - if not param['float']: - dev = int(dev ) - base = int(base) - - # Append each permutation - for g in range(duplicates): - spsa[name]['dev' ].append(dev) - spsa[name]['base'].append(base) - spsa[name]['flip'].append(flip) - - - return spsa - def extract_option(options, option): if (match := re.search('(?<=%s=")[^"]*' % (option), options)): @@ -364,7 +299,7 @@ def game_distribution(test, machine): spsa_count = (worker_threads // max(dev_threads, base_threads)) // 2 # SPSA is treated specially, if we are distributing many parameter sets at once - is_multiple_spsa = test.test_mode == 'SPSA' and test.spsa['distribution_type'] == 'MULTIPLE' + is_multiple_spsa = test.test_mode == 'SPSA' and test.spsa_run.distribution_type == 'MULTIPLE' return { 'runner-count' : spsa_count if is_multiple_spsa else worker_sockets, diff --git a/Templates/OpenBench/workload.html b/Templates/OpenBench/workload.html index 327aa6e8..69929822 100644 --- a/Templates/OpenBench/workload.html +++ b/Templates/OpenBench/workload.html @@ -59,11 +59,11 @@ Scaling{{workload.scale_method}} {{workload.scale_nps}} NPS {% if type == "TUNE" %} - Alpha{{workload.spsa.Alpha}} - Gamma{{workload.spsa.Gamma}} - A-Ratio{{workload.spsa.A_ratio}} - Reporting{{workload.spsa.reporting_type}} - Distribution{{workload.spsa.distribution_type}} + Alpha{{workload.spsa_run.alpha}} + Gamma{{workload.spsa_run.gamma}} + A-Ratio{{workload.spsa_run.a_ratio}} + Reporting{{workload.spsa_run.reporting_type}} + Distribution{{workload.spsa_run.distribution_type}} Pairs Per Point{{workload.workload_size}} {% elif type == "DATAGEN" %} Genfens Args{{workload.genfens_args}}