summaryrefslogtreecommitdiff
path: root/tools/dsss_experiments-ber.py
blob: 02a2c50c69aa884e97b48609e94d2291dfc49c40 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
#!/usr/bin/env python
# coding: utf-8

# In[17]:


import numpy as np
import numbers
import math
from scipy import signal as sig
import scipy.fftpack
import random
import struct
import random
import ipywidgets
import itertools
import datetime
import multiprocessing
import traceback
from collections import defaultdict
import json

from tqdm import tqdm
import colorednoise

np.set_printoptions(linewidth=240)



sampling_rate = 10 # sp/s


# In[4]:


# From https://github.com/mubeta06/python/blob/master/signal_processing/sp/gold.py
preferred_pairs = {5:[[2],[1,2,3]], 6:[[5],[1,4,5]], 7:[[4],[4,5,6]],
                        8:[[1,2,3,6,7],[1,2,7]], 9:[[5],[3,5,6]], 
                        10:[[2,5,9],[3,4,6,8,9]], 11:[[9],[3,6,9]]}

def gen_gold(seq1, seq2):
    gold = [seq1, seq2]
    for shift in range(len(seq1)):
        gold.append(seq1 ^ np.roll(seq2, -shift))
    return gold

def gold(n):
    n = int(n)
    if not n in preferred_pairs:
        raise KeyError('preferred pairs for %s bits unknown' % str(n))
    t0, t1 = preferred_pairs[n]
    (seq0, _st0), (seq1, _st1) = sig.max_len_seq(n, taps=t0), sig.max_len_seq(n, taps=t1)
    return gen_gold(seq0, seq1)


# In[5]:


def modulate(data, nbits=5):
    # 0, 1 -> -1, 1
    mask = np.array(gold(nbits))*2 - 1
    
    sel = mask[data>>1]
    data_lsb_centered = ((data&1)*2 - 1)

    signal = (np.multiply(sel, np.tile(data_lsb_centered, (2**nbits-1, 1)).T).flatten() + 1) // 2
    return np.hstack([ np.zeros(len(mask)), signal, np.zeros(len(mask)) ])


# In[6]:


def correlate(sequence, nbits=5, decimation=1, mask_filter=lambda x: x):
    mask = np.tile(np.array(gold(nbits))[:,:,np.newaxis]*2 - 1, (1, 1, decimation)).reshape((2**nbits + 1, (2**nbits-1) * decimation))

    sequence -= np.mean(sequence)
    
    return np.array([np.correlate(sequence, row, mode='full') for row in mask])


# In[7]:


def load_noise_meas_params(capture_file):
    with open(capture_file, 'rb') as f:
        meas_data = np.copy(np.frombuffer(f.read(), dtype='float32'))
        print('mean:', np.mean(meas_data), 'len:', len(meas_data))
        meas_data -= np.mean(meas_data)
        return {'meas_data': meas_data}

def mains_noise_measured(n, meas_data):
    last_valid = len(meas_data) - n
    start = np.random.randint(last_valid)
    return meas_data[start:start+n]

def load_noise_synth_params(specfile):
    with open(specfile) as f:
        d = json.load(f)
        return {'spl_x': np.linspace(*d['x_spec']),
                'spl_N': d['x_spec'][2],
                'psd_spl': (d['t'], d['c'], d['k']) }

def mains_noise_synthetic(n, psd_spl, spl_N, spl_x):
    noise = np.random.normal(size=spl_N) * 2
    spec = scipy.fftpack.fft(noise) **2

    spec *= np.exp(scipy.interpolate.splev(spl_x, psd_spl))

    spec **= 1/2
    
    renoise = scipy.fftpack.ifft(spec)
    return renoise[10000:][:n]

# In[8]:


def generate_test_signal(duration, nbits=6, signal_amplitude=2.0e-3, decimation=10, seed=0):
    test_data = np.random.RandomState(seed=seed).randint(0, 2 * (2**nbits), duration)
    
    signal = np.repeat(modulate(test_data, nbits) * 2.0 - 1, decimation) * signal_amplitude
    noise = mains_noise(len(signal))
    
    return test_data, signal + noise


# In[9]:


nonlinear_distance = lambda x: 100**(2*np.abs(0.5-x%1)) / (np.abs(x)+3)**2 * (np.clip(np.abs(x), 0, 0.5) * 2)**5

# In[11]:


noprint = lambda *args, **kwargs: None


# In[12]:


def run_ser_test(sample_duration=128, nbits=6, signal_amplitude=2.0e-3, decimation=10, threshold_factor=4.0, power_avg_width=2.5, max_lookahead=6.5, pol_score_factor=1.0, seed=0, ax=None, print=print, ser_maxshift=3, debug_range=None):

    test_data, signal = generate_test_signal(sample_duration, nbits, signal_amplitude, decimation, seed)
    cor_an = correlate(signal, nbits=nbits, decimation=decimation)

    power_avg_width = int(power_avg_width * (2**nbits - 1) * decimation)

    bit_period = (2**nbits) * decimation
    peak_group_threshold = 0.05 * bit_period
    hole_patching_threshold = 0.01 * bit_period
    
    cwt_res = np.array([ sig.cwt(row, sig.ricker, [0.73 * decimation]).flatten() for row in cor_an ])
    if ax:
        ax.grid()
        ax.plot(cwt_res.T)
        
    th = np.array([ np.convolve(np.abs(row), np.ones((power_avg_width,))/power_avg_width, mode='same') for row in cwt_res ])

    def compare_th(elem):
        idx, (th, val) = elem
        #print('compare_th:', th.shape, val.shape)
        return np.any(np.abs(val) > th*threshold_factor)

    peaks = [ list(group) for val, group in itertools.groupby(enumerate(zip(th.T, cwt_res.T)), compare_th) if val ]
    peaks_processed = []
    peak_group = []
    for group in peaks:
        pos = np.mean([idx for idx, _val in group])
        #pol = np.mean([max(val.min(), val.max(), key=abs) for _idx, (_th, val) in group])
        pol = max([max(val.min(), val.max(), key=abs) for _idx, (_th, val) in group], key=abs)
        pol_idx = np.argmax(np.bincount([ np.argmax(np.abs(val)) for _idx, (_th, val) in group ]))
        peaks_processed.append((pos, pol, pol_idx))
        #print(f'group', pos, pol, pol_idx)
        #for pol, (_idx, (_th, val)) in zip([max(val.min(), val.max(), key=abs) for _idx, (_th, val) in group], group):
        #    print('    ', pol, val)
        #if ax:
        #    ax.axvline(pos, color='cyan', alpha=0.3)
        msg = f'peak at {pos} = {pol} idx {pol_idx}: '

        if peak_group:
            msg += f'continuing previous group: {peak_group[-1]},'
            group_start, last_pos, last_pol, peak_pos, last_pol_idx = peak_group[-1]

            if abs(pol) > abs(last_pol):
                msg += 'larger, '
                if ax:
                    ax.axvline(pos, color='magenta', alpha=0.5)
                peak_group[-1] = (group_start, pos, pol, pos, pol_idx)
                
            else:
                msg += 'smaller, '
                if ax:
                    ax.axvline(pos, color='blue', alpha=0.5)
                peak_group[-1] = (group_start, pos, last_pol, peak_pos, last_pol_idx)
        else:
            last_pos = None
        
        if not peak_group or pos - last_pos > peak_group_threshold:
            msg += 'terminating, '
            if peak_group:
                msg += f'previous group: {peak_group[-1]},'
                peak_pos = peak_group[-1][3]
                if ax:
                    ax.axvline(peak_pos, color='red', alpha=0.6)
                #ax3.text(peak_pos-20, 2.0, f'{0 if pol < 0 else 1}', horizontalalignment='right', verticalalignment='center', color='black')

            msg += f'new group: {(pos, pos, pol, pos, pol_idx)} '
            peak_group.append((pos, pos, pol, pos, pol_idx))
            if ax:
                ax.axvline(pos, color='cyan', alpha=0.5)
        
        if debug_range:
            low, high = debug_range
            if low < pos < high:
                print(msg)
                print(group)

    avg_peak = np.mean(np.abs(np.array([last_pol for _1, _2, last_pol, _3, _4 in peak_group])))
    print('avg_peak', avg_peak)

    noprint = lambda *args, **kwargs: None
    def mle_decode(peak_groups, print=print):
        peak_groups = [ (pos, pol, idx) for _1, _2, pol, pos, idx in peak_groups ]
        candidates = [ (abs(pol)/avg_peak, [(pos, pol, idx)]) for pos, pol, idx in peak_groups if pos < bit_period*2.5 ]

        while candidates:
            chain_candidates = []
            for chain_score, chain in candidates:
                pos, ampl, _idx = chain[-1]
                score_fun = lambda pos, npos, npol: pol_score_factor*abs(npol)/avg_peak + nonlinear_distance((npos-pos)/bit_period)
                next_candidates = sorted([ (score_fun(pos, npos, npol), npos, npol, nidx) for npos, npol, nidx in peak_groups if pos < npos < pos + bit_period*max_lookahead ], reverse=True)

                print(f'    candidates for {pos}, {ampl}:')
                for score, npos, npol, nidx in next_candidates:
                    print(f'        {score:.4f} {npos:.2f} {npol:.2f} {nidx:.2f}')

                nch, cor_len = cor_an.shape
                if cor_len - pos < 1.5*bit_period or not next_candidates:
                    score = sum(score_fun(opos, npos, npol) for (opos, _opol, _oidx), (npos, npol, _nidx) in zip(chain[:-1], chain[1:])) / len(chain)
                    yield score, chain

                else:
                    print('extending')
                    for score, npos, npol, nidx in next_candidates[:3]:
                        if score > 0.5:
                            new_chain_score = chain_score * 0.9 + score * 0.1
                            chain_candidates.append((new_chain_score, chain + [(npos, npol, nidx)]))
            print('chain candidates:')
            for score, chain in sorted(chain_candidates, reverse=True):
                print('    ', [(score, [(f'{pos:.2f}', f'{pol:.2f}') for pos, pol, _idx in chain])])
            candidates = [ (chain_score, chain) for chain_score, chain in sorted(chain_candidates, reverse=True)[:10] ]

    res = sorted(mle_decode(peak_group, print=noprint), reverse=True)
    #for i, (score, chain) in enumerate(res):
    #    print(f'Chain {i}@{score:.4f}: {chain}')
    (_score, chain), *_ = res

    def viz(chain, peaks):
        last_pos = None
        for pos, pol, nidx in chain:
            if last_pos:
                delta = int(round((pos - last_pos) / bit_period))
                if delta > 1:
                    print(f'skipped {delta-1} symbols at {pos}/{last_pos}')
                    
                    # Hole patching routine
                    for i in range(1, delta):
                        est_pos = last_pos + (pos - last_pos) / delta * i

                        icandidates = [ (ipos, ipol, iidx) for ipos, ipol, iidx in peaks if abs(est_pos - ipos) < hole_patching_threshold ]
                        if not icandidates:
                            yield None
                            continue

                        ipos, ipol, iidx = max(icandidates, key = lambda e: abs(e[1]))

                        decoded = iidx*2 + (0 if ipol < 0 else 1)
                        print(f'interpolating, last_pos={last_pos}, delta={delta}, pos={pos}, est={est_pos} dec={decoded}')
                        yield decoded
            
            decoded = nidx*2 + (0 if pol < 0 else 1)
            yield decoded
            if ax:
                ax.axvline(pos, color='blue', alpha=0.5)
                ax.text(pos-20, 0.0, f'{decoded}', horizontalalignment='right', verticalalignment='center', color='black')

            last_pos = pos

    decoded = list(viz(chain, peaks_processed))
    print('decoding [ref|dec]:')
    match_result = []
    for shift in range(-ser_maxshift, ser_maxshift):
        msg = f'=== shift = {shift} ===\n'
        failures = -shift if shift < 0 else 0 # we're skipping the first $shift symbols
        a = test_data if shift > 0 else test_data[-shift:]
        b = decoded if shift < 0 else decoded[shift:]
        for i, (ref, found) in enumerate(itertools.zip_longest(a, b)):
            if ref is None: # end of signal
                break
            msg += f'{ref if ref is not None else -1:>3d}|{found if found is not None else -1:>3d} {"✔" if ref==found else "✘" if found else " "}   '
            if ref != found:
                failures += 1
            if i%8 == 7:
                msg += '\n'
        match_result.append((failures, msg))
    failures, msg = min(match_result, key=lambda e: e[0])
    print(msg)
    ser = failures/len(test_data)
    print(f'Symbol error rate e={ser}: {failures}/{len(test_data)}')
    br = sampling_rate / decimation / (2**nbits) * nbits * (1 - ser) * 3600
    print(f'maximum bitrate r={br} b/h')
    return ser, br


# In[13]:


default_params = dict(
        power_avg_width=2.5,
        max_lookahead=6.5)

def calculate_ser(reps, v, seed, nbits, thf, duration, decimation):
    params = dict(default_params)
    params['signal_amplitude'] = v
    params['decimation'] = decimation
    params['nbits'] = nbits
    params['threshold_factor'] = thf

    out = []
    for _rep in range(reps):
        sers, brs = [], []
        try:
            ser, br = run_ser_test(**params, sample_duration=duration, print=noprint, seed=seed)
            out.append((ser, br, None))
        except Exception as e:
            #traceback.print_exc()
            out.append((None, None, f'got {e} seed {seed:08x} params {params}'))
    return out

import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--index', type=int, default=0)
parser.add_argument('-b', '--batches', type=int, default=1)
parser.add_argument('-z', '--chunksize', type=int, default=50)
parser.add_argument('-r', '--run-id', type=str, default='default')
group = parser.add_mutually_exclusive_group()
group.add_argument('-s', '--synthetic-noise', nargs='?', const='grid_freq_psd_spl_108pt.json', type=str)
group.add_argument('-m', '--measured-noise', nargs='?', default='fmeas_export_ocxo_2day.bin', const='fmeas_export_ocxo_2day.bin', type=str)
args = parser.parse_args()

# scheduled = [
#         (5,  1.5, 50, 50, 128),
#         (6,  2.0, 50, 50, 128),
#         (6,  2.5, 50, 50, 128),
#         (6,  3.0, 50, 50, 128),
#         (6,  3.5, 50, 50, 128),
#         (6,  4.0, 50, 50, 128),
#         (6,  4.5, 50, 50, 128),
#         (6,  5.0, 50, 50, 128),
#         (6,  5.5, 50, 50, 128),
#         (6,  6.0, 50, 50, 128),
#         (6,  6.5, 50, 50, 128),
#         (6,  7.0, 50, 50, 128),
#         (6,  7.5, 50, 50, 128),
#         (6,  8.0, 50, 50, 128),
#         (6,  8.5, 50, 50, 128),
#         (6,  9.0, 50, 50, 128),
#         (6,  9.5, 50, 50, 128),
#         (6, 10.0, 50, 50, 128),
#        ]
#scheduled = [ (5,  4.0, 50, 70, 100, dec) for dec in list(range(19)) + [int(round(20 * 10**dec)) for dec in np.linspace(0, 0.7, 20)] ] #int(round(1 * 10**dec))) for dec in np.linspace(0, 2, 30) ]

#param_name = 'par101'
#scheduled = [ (nbits,  thf, 100, 30, duration, 10) for thf in [1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0] for nbits, duration in [(5, 128), (6, 128), (7, 64), (8, 64)] ]# int(round(3 * 10**dec)) for dec in np.linspace(0, 2, 30) ]

#param_name = 'par102'
#scheduled = [ (nbits,  thf, 1000, 50, duration, int(round(3 * 10**dec))) for thf in [1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0] for nbits, duration in [(5, 128), (6, 128), (7, 64), (8, 64)] for dec in np.linspace(0, 2, 30) ]

#param_name = 'par103'
#scheduled = [ (nbits,  thf, 1000, 50, duration, 10) for thf in [1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0] for nbits, duration in [(5, 128), (6, 128), (7, 64), (8, 64)] ]

#param_name = 'par105'
#scheduled = [ (nbits,  thf, 1000, 30, duration, int(round(3 * 10**dec))) for thf in [2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0] for nbits, duration in [(5, 128), (6, 128)] for dec in np.linspace(0, 2, 30) ]

#param_name = 'dbg106'
#scheduled = [ (nbits,  thf, 20, 15, duration, 10) for thf in [1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0] for nbits, duration in [(5, 128), (6, 128)] ]

#param_name = 'par107'
#scheduled = [ (nbits,  thf, 100, 30, duration, 10) for thf in [1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0] for nbits, duration in [(5, 128), (6, 128), (7, 64), (8, 64)] ]

#param_name = 'par108'
#scheduled = [ (nbits,  thf, 100, 30, duration, int(round(3 * 10**dec))) for thf in [2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0] for nbits, duration in [(5, 128), (6, 128)] for dec in np.linspace(0, 2, 30) ]

#param_name = 'par110'
#scheduled = [ (nbits,  thf, 100, 30, duration, int(round(1 * 10**dec))) for thf in [3.5, 4.0, 4.5, 5.0] for nbits, duration in [(5, 128), (6, 128)] for dec in np.linspace(0.15, 2, 15) ]

#param_name = 'par111'
#scheduled = [ (nbits,  thf, 100, 30, duration, dec) for thf in [3.5, 4.0, 4.5, 5.0] for nbits, duration in [(5, 128), (6, 128)] for dec in [1, 2, 3, 4, 5, 6, 9, 12, 16, 22, 30, 40, 50] ]

# Faster chip duration graph
#param_name = 'par112'
#scheduled = [ (nbits,  thf, 50, 30, duration, dec) for thf in [4.0] for nbits, duration in [(5, 100)] for dec in [1, 2, 3, 4, 5, 6, 9, 12, 16, 22, 30, 40, 50] ]

#param_name = 'par113'
#scheduled = [ (nbits,  thf, 100, 30, duration, dec) for thf in [4.0, 4.5, 5.0, 5.5] for nbits, duration in [(5, 128), (6, 128)] for dec in [1, 2, 3, 4, 5, 6, 9, 12, 16, 22, 30, 40, 50] ]

#param_name = 'par114'
#scheduled = [ (nbits,  thf, 100, 30, duration, dec) for nbits, thf, duration in [(5, 4.0, 128), (6, 5.0, 128)] for dec in [1, 2, 3, 4, 5, 6, 9, 12, 16, 22, 30, 40, 50] ]

# Extra amplitude for 5 and 6 bit graphs NOTE: accidental duplicate param_name, earlier run is above set, later run this one.
#param_name = 'par114'
param_name = 'par115'
scheduled = [ (nbits,  thf, 100, 0.05e-3 * 10 ** np.linspace(0, 3.5, 50), duration, 10) for thf in [1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0] for nbits, duration in [(5, 128), (6, 128)] ]

random.Random(0).shuffle(scheduled)
batch = scheduled[args.index::args.batches]
print('Parameter set name', param_name)

if args.synthetic_noise is not None:
    noise_params = load_noise_synth_params(args.synthetic_noise)
    mains_noise = lambda n: mains_noise_synthetic(n, **noise_params)
    noise_source = 'synth'
else:
    noise_params = load_noise_meas_params(args.measured_noise)
    mains_noise = lambda n: mains_noise_measured(n, **noise_params)
    noise_source = 'meas'

def chunked(n, size):
    for start in range(0, n, size):
        end = min(reps, start + size)
        yield start, end

results = {}
with tqdm(total = 0) as tq:
    with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
        #for nbits, thf, reps, points, duration in [(5, 4.0, 50, 25, 128), (6, 4.0, 50, 25, 128), (7, 5.0, 25, 25, 128), (8, 6.0, 25, 25, 64)]:
        #for nbits, thf, reps, points, duration in [(6, 4.5, 5, 25, 128), (6, 5.0, 5, 25, 128)]:
        for nbits, thf, reps, points, duration, decimation in batch:
            st = np.random.RandomState(0)

            if isinstance(points, numbers.Number):
                vs = 0.05e-3 * 10 ** np.linspace(0, 2.0, points)
                points_id = points
            else:
                vs = points
                points_id = len(vs)
            tq.write('scheduled {}reps x {}pt {}b @ th={} t={}sym dec={}sp'.format(reps, points_id, nbits, thf, duration, decimation))
            results[(nbits, thf, reps, points_id, duration, decimation)] = { v:
                    [ pool.apply_async(calculate_ser, (end - start, v, st.randint(0xffffffff), nbits, thf, duration, decimation), callback=lambda _res: tq.update(end - start)) for start, end in chunked(reps, args.chunksize) ]
                    for v in vs }
            tq.total += len(vs) * reps
            tq.refresh()
        
        tq.write(f'scheduled {tq.total} tasks. waiting...')
        pool.close()
        pool.join()

        results = [ (key, [ (v, [ x for res in chunks for x in res.get() ]) for v, chunks in series.items() ]) for key, series in results.items() ]
        tq.write('done')

        #sers, brs = np.array(sers), np.array(brs)
        #ser, std = np.mean(sers), np.std(sers)
        #print(f'signal_amplitude={v:<.5f}: ser={ser:<.5f} ±{std:<.5f}, br={np.mean(brs):<.5f}')
        #return ser, std

with open(f'dsss_experiments_res-{param_name}-{noise_source}-{args.run_id}-{args.index}-{datetime.datetime.now():%Y-%m-%d-%H-%M-%S}.json', 'w') as f:
    json.dump(results, f)
    print(f'Wrote results to {f.name}')