vidjil_utils.py 24.9 KB
Newer Older
1
# -*- coding: utf-8 -*-
2
import math
3
import re
4
import defs
5
import json
6
import datetime
7
from gluon import current
8
from datetime import date
9

10
def format_size(n, unit='B'):
11
12
    '''
    Takes an integer n, representing a filesize and returns a string
13
14
    where the size is formatted with the correct SI prefix and
    with a constant number of significant digits.
15
16

    Example:
17
18
19
20
21
22
    >>> format_size(42)
    '42 B'
    >>> format_size(123456)
    '123 kB'
    >>> format_size(1000*1000)
    '1.00 MB'
23
    >>> format_size(1024*1024*1024)
24
25
26
    '1.07 GB'
    >>> format_size(42*(2**40))
    '46.2 TB'
27
    '''
28
29
30
31

    if n == 0:
        return '0'

32
    size = float(n)
33
    PREFIXES = ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49

    for prefix in PREFIXES:
        if size < 1000:
            break
        size /= 1000


    if size > 100 or not prefix:
        fmt = '%.0f'
    elif size > 10:
        fmt = '%.1f'
    else:
        fmt = '%.2f'

    return fmt % size + ' ' + prefix + unit

50

51
52
53
54


def age_years_months(birth, months_below_year=4):
    '''Get the age in years, and possibly months.'''
55
56
57
58
    
    if not isinstance(birth, datetime.date) :
        return '-/-'
    
59
60
61
62
63
64
65
66
67
68
69
70
71
72
    today = date.today()
    years = today.year - birth.year - ((today.month, today.day) < (birth.month, birth.day))
    age = '%dy' % years

    if years >= months_below_year:
        return age

    months = today.month - birth.month - (today.day < birth.day)
    if months < 0:
        months += 12

    age += ' %dm' % months
    return age

73
74
75
76
77
78
def anon_birth(patient_id, user_id):
    '''Anonymize birth date. Only the 'anon' access see the full birth date.'''
    db = current.db
    auth=current.auth

    birth = db.patient[patient_id].birth
79
80
81
82

    if birth is None:
        return ""

83
84
    age = age_years_months(birth)

85
    if auth.get_permission("anon", "patient", patient_id, user_id):
86
87
88
89
        return "%s (%s)" % (birth, age)
    else:
        return age

90
def anon_ids(patient_id):
91
    '''Anonymize patient name. Only the 'anon' access see the full patient name.'''
92
93
94
    db = current.db
    auth=current.auth
    
95
    patient = db.patient[patient_id]
96

97
    return display_names(patient.sample_set_id, patient.first_name, patient.last_name)
98

99
def anon_names(sample_set_id, first_name, last_name, can_view=None):
100
101
102
103
104
105
106
107
    '''
    Anonymize the given names of the patient whose ID is patient_id.
    This function performs at most one db call (to know if we can see
    the patient's personal informations). None is performed if can_view
    is provided (to tell if one can view the patient's personal informations)
    '''
    auth=current.auth

108
109
    ln = last_name
    fn = first_name
110
    if can_view or (can_view == None and auth.can_view_info('sample_set', sample_set_id)):
Mikaël Salson's avatar
Mikaël Salson committed
111
        name = ln + " " + fn
112
    else:
113
        name = unicode(safe_decoding(ln)[:3])
114

Ryan Herbert's avatar
Ryan Herbert committed
115
116
    return name

117
def display_names(sample_set_id, first_name, last_name, can_view=None):
Ryan Herbert's avatar
Ryan Herbert committed
118
119
120
121
122
123
124
125
126
    '''
    Return the name as displayed to a user or admin of a patient
    whose ID is patient_id.
    It makes use of anon_names which will return an anonymised version
    of the patient name if the user doesn't have permission to see the real name.
    Admins will also see the patient id.
    '''
    auth = current.auth

127
    name = anon_names(sample_set_id, first_name, last_name, can_view)
Ryan Herbert's avatar
Ryan Herbert committed
128

129
    # Admins also see the patient id
130
    if auth.is_admin():
131
        name += ' (%s)' % sample_set_id
132

133
    return name
134

135
136
137
138
139
140
141
142
143
def safe_decoding(string):
    '''
    Get a unicode string. If the string was already unicode we just return it.
    '''
    if isinstance(string, unicode):
        return string
    else:
        return string.decode('utf-8')

144
145
146
147
148
149
def safe_encoding(string):
    '''
    Try to encode the string in UTF-8 but if it fails just
    returns the string.
    '''
    try:
150
        return unicode(string).encode('utf-8')
151
152
153
    except UnicodeDecodeError:
        return string

Mikaël Salson's avatar
Mikaël Salson committed
154
155
156
157
158
159
160
161
162
163
164
165
166
def prevent_none(value, replacement_value):
    '''
    Return value if it is not None otherwise
    replacement_value

    >>> prevent_none(None, 2)
    2
    >>> prevent_none('toto', 2)
    'toto'
    '''
    if value is not None:
        return value
    return replacement_value
167

168
# take a list of strings to check and a filter_str (list of word to find (or not)) 
169
# return true if the string respect the filter list 
170
def advanced_filter(list_searched, filter_str):
171
    filter_list = filter_str.split(" ")
172
    list_searched = map(lambda s: s.lower(), list_searched)
173

174
175
    for f in filter_list :
        if len(f) > 0 and f[0] == "-" :
176
177
178
            pattern = f[1:]
        else:
            pattern = f
179
        result = filter(lambda item: pattern.lower() in item, list_searched)
180
181
        if len(result) == 0:
            return False
182
    return True
183
184


185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
def put_properties_in_dict(src_dict, dest_dict, properties):
    '''
    Put the values of src_dict in dest_dict.
    Only keys that are keys in properties are copied to dest_dict.
    The key in dest_dict is determined by properties[key]

    >>> put_properties_in_dict({'toto': [1, 2], 'tutu': 'A'}, {'toto': 3, 'machin': 2}, {'toto': 'toto', 'titi': 'titi', 'tutu': 'truc'}) == {'toto': [1, 2], 'truc': 'A', 'machin': 2}
    True
    '''
    for key in properties.iterkeys():
        if key in src_dict:
            dest_dict[properties[key]] = src_dict[key]
    return dest_dict


200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
#### Utilities on regex
def search_first_regex_in_file(regex, filename, max_nb_line=None):
    try:
        if max_nb_line is None:
            results = open(filename).readlines()
        else:
            results = open(filename).readlines(max_nb_line)
    except IOError as e:
        results = []

    matched_keys = {}
    for r in regex:
        for line in results:
            m = r.search(line)
            if m:
                for (key, val) in m.groupdict().items():
                    matched_keys[key] = val.replace('\\', '')
                break
    return matched_keys

220
221
222

#### Utilities on JSON

223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
def cleanup_json_sample(json_string):
    '''
    Takes a JSON sample and close the ) ] " ' so that
    the string can be parsed by a JSON parser.
    >>> cleanup_json_sample('"toto": [ [ 1 ], "t')
    '"toto": [ [ 1 ], "t"]'
    >>> cleanup_json_sample('"toto": [ [ 1 ], ')
    '"toto": [ [ 1 ]]'
    >>> cleanup_json_sample('{"germlines": {"custom": {"3": [')
    '{"germlines": {"custom": {"3": []}}}'
    >>> cleanup_json_sample('{"germlines": {"custom": {"3":')
    '{"germlines": {"custom": {}}}'
    >>> cleanup_json_sample('{"germlines": {"custom": {"3')
    '{"germlines": {"custom": {}}}'
    >>> cleanup_json_sample('{"germlines": {"custom": {"3": [2], "2')
    '{"germlines": {"custom": {"3": [2]}}}'
    >>> cleanup_json_sample('{"germlines": {"custom": {"3": [2], "2": "truc"')
    '{"germlines": {"custom": {"3": [2], "2": "truc"}}}'

    '''
    start_delimiters = ['{', '[', '"', "'"]
    end_delimiters = ['}', ']', '"', "'"]

    delimiter_stack = []
    pos_isolated_comma = None

    for i, char in enumerate(json_string):
        if char in start_delimiters or char in end_delimiters:
            try:
                corresponding_delimiter = start_delimiters[end_delimiters.index(char)]
            except ValueError:
                corresponding_delimiter = None
255
            if len(delimiter_stack) == 0 or delimiter_stack[-1][0] != corresponding_delimiter:
256
257
258
259
260
261
262
                delimiter_stack.append(char)
            else:
                delimiter_stack.pop()
            pos_isolated_comma = None
        elif char == ',':
            pos_isolated_comma = i

263
    if pos_isolated_comma != None:
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
        json_string = json_string[:pos_isolated_comma]
    json_string = json_string.strip()

    delimiter_stack.reverse()
    end_delimiter_stack = map(lambda c: end_delimiters[start_delimiters.index(c)], delimiter_stack)

    if (len(end_delimiter_stack) > 0 and end_delimiter_stack[0] == '}')\
       or (len(end_delimiter_stack) > 1 and end_delimiter_stack[0] in ['"', "'"] and end_delimiter_stack[1] == '}'):
        # We didn't close a dict. Are we in the middle of a property (eg. "toto": )
        last_colon = json_string.rfind(':')
        last_bracket = json_string.rfind('{')
        last_comma = json_string.rfind(',')-1
        property_start = max(last_comma, last_bracket)
        if last_colon == len(json_string)-1\
           or property_start > last_colon:
            json_string = json_string[:property_start+1]
280
            if len(end_delimiter_stack) > 1 and end_delimiter_stack[0] != '}':
281
282
283
284
285
286
287
                end_delimiter_stack.pop(0)


    return json_string + ''.join(end_delimiter_stack)



288
289
290
291
292
293
def extract_value_from_json_path(json_path, json):
    '''
    Highly inspired from http://stackoverflow.com/a/7320664/1192742

    Takes a path (for instance field1/field2/field3) and returns
    the value at that path.
294
    The path also support indexed opeations (such as field1/field2[3]/field4)
295
296
297
298
299
300

    If the value doesn't exist None will be returned.
    '''
    elem = json
    try:
        for x in json_path.strip("/").split("/"):
301
302
303
304
305
306
307
308
            list_pos = re.search(r'[[]\d+[]]', x)
            if list_pos is not None:
                list_pos = list_pos.span()
                index = int(x[list_pos[0]+1:list_pos[1]-1])
                x = x[:list_pos[0]]
                elem = elem.get(x)[index]
            else:
                elem = elem.get(x)
309
310
311
312
313
    except:
        pass

    return elem

314
def extract_fields_from_json(json_fields, pos_in_list, filename, max_bytes = None):
315
316
317
318
319
320
    '''
    Takes a map of JSON fields (the key is a common name
    and the value is a path) and return a similar map
    where the values are the values from the JSON filename.

    If the value retrieved from a JSON is an array, we will
321
322
    get only the item at position <pos_in_list> (if None, will
    get all of them)
323
324
    '''
    try:
325
        if max_bytes is None:
326
            json_dict = json.loads(open(filename).read())
327
328
        else:
            json_dict = json.loads(cleanup_json_sample(open(filename).read(max_bytes)))
329
    except IOError:
330
        current.log.debug('JSON loading failed')
331
        json_dict = {}
332
333
    except ValueError as e:
        current.log.debug(str(e))
334
335
336
    matched_keys = {}
    for field in json_fields:
        value = extract_value_from_json_path(json_fields[field], json_dict)
337
        if value is not None:
338
339
            if  not isinstance(value, basestring) and pos_in_list is not None\
                and len(value) > pos_in_list:
340
341
342
                matched_keys[field] = value[pos_in_list]
            else:
                matched_keys[field] = value
343
344
345

    return matched_keys

346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461

####


STATS_READLINES = 1000 # approx. size in which the stats are searched
STATS_MAXBYTES = 500000 # approx. size in which the stats are searched


def stats(samples):

    stats_regex = [
        # found 771265 40-windows in 2620561 segments (85.4%) inside 3068713 sequences # before 1f501e13 (-> 2015.05)
        'in (?P<seg>\d+) segments \((?P<seg_ratio>.*?)\) inside (?P<reads>\d+) sequences',

        # found 10750 50-windows in 13139 reads (99.9% of 13153 reads)
        'windows in (?P<seg>\d+) reads \((?P<seg_ratio>.*?) of (?P<reads>\d+) reads\)',

        # segmentation causes
        'log.* SEG_[+].*?-> (?P<SEG_plus>.*?).n',
        'log.* SEG_[-].*?-> (?P<SEG_minus>.*?).n',
    ]

    # stats by locus
    for locus in defs.LOCUS:
        locus_regex = locus.replace('+', '[+]')
        locus_group = locus.replace('+', 'p')
        stats_regex += [ 'log.* %(locus)s.*?->\s*?(?P<%(locus_g)s_reads>\d+)\s+(?P<%(locus_g)s_av_len>[0-9.]+)\s+(?P<%(locus_g)s_clones>\d+)\s+(?P<%(locus_g)s_av_reads>[0-9.]+)\s*.n'
                         % { 'locus': locus_regex, 'locus_g': locus_group } ]

    json_paths = {
        'result_file': {
            'main_clone': '/clones[0]/name',
            'main_clone_reads': '/clones[0]/reads[0]'
        },
        'fused_file': {
                  'reads distribution [>= 10%]': 'reads/distribution/0.1',
                  'reads distribution [>= 1% < 10%]': 'reads/distribution/0.01',
                  'reads distribution [>= .01% < 1%]': 'reads/distribution/0.001',
                  'reads distribution [>= .001% < .01%]': 'reads/distribution/0.0001',
                  'reads distribution [>= .0001% < .001%]': 'reads/distribution/0.00001',
                  'producer': 'samples/producer'
        }
    }

    keys_patient = [ 'info' ]
    keys_file = [ 'sampling_date', 'size_file' ]

    keys = []
    keys += keys_file
    keys += keys_patient

    regex = []
    for sr in stats_regex:
        r = re.compile(sr)
        regex += [r]
        keys += r.groupindex.keys()

    keys += sorted(json_paths['result_file'].keys() + json_paths['fused_file'].keys())

    tab = []
    found = {}

    for (metadata, f_result, f_fused, pos_in_fused) in samples:
        row = {}
        row_result = search_first_regex_in_file(regex, f_result, STATS_READLINES)
        row['result'] = row_result # TMP, for debug
        try:
            row_result_json = extract_fields_from_json(json_paths['result_file'], None, defs.DIR_RESULTS + results_f, STATS_MAXBYTES)
        except:
            row_result_json = []

        if f_fused:
            try:
                row_fused = extract_fields_from_json(json_paths['fused_file'], pos_in_fused, f_fused, STATS_MAXBYTES)
            except ValueError:
                row_fused = []
        else:
            row_fused = {}
        results_list = [row_result, row_result_json, row_fused]

        for key in keys:
            for map_result in results_list:
                if key in map_result:
                    row[key] = map_result[key]
                    found[key] = True
            if key not in found:
                if key in keys_patient:
                    row[key] = "TODO" + key  # metadata['patient'][key]
                    found[key] = True
                elif key in keys_file:
                    row[key] = "TODO" + key  # metadata['sequence_file'][key]
                    found[key] = True
                else:
                    row[key] = ''
        
        tab += [row]

    # Re-process some data
    keys += ['IGH_av_clones']
    for row in tab:
        row['IGH_av_clones'] = ''
        if 'IGH_av_reads' in row:
            try:
                row['IGH_av_clones'] = '%.4f' % (1.0 / float(row['IGH_av_reads']))
                found['IGH_av_clones'] = True
            except:
                pass

    # Keep only non-empty columns
    res = []
    for key in keys:
        if key in found:
            res += [key]

    return tab # res # TODO

462
463
####

464
465
466
467
468
SOURCES = "https://github.com/vidjil/vidjil/blob/master/server/web2py/applications/vidjil/%s#L%s"
SOURCES_DIR_DEFAULT = 'controllers/'
SOURCES_DIR = {
    'task.py': 'models/',
    'db.py': 'models/',
469
470
    'sequence_file.py': 'models/',
    'vidjil_utils.py': 'modules/',
471
472
473
}


474
475
log_patient = re.compile('\((\d+)\)')
log_config = re.compile(' c(\d+)')
476
log_task = re.compile('\[(\d+)\]')
477
log_py = re.compile('(.*[.]py):(\d+)')
478
479
480
481
482
483

def log_links(s):
    '''Add HTML links to a log string

    >>> log_links("abcdef")
    'abcdef'
484
    >>> log_links("[1234]abcdef")
485
    '[<a class="loglink pointer" onclick="db.call(\\'admin/showlog\\', {\\'file\\': \\'../..//mnt/result/tmp/out-001234/001234.vidjil.log\\', \\'format\\': \\'raw\\'})">1234</a>]abcdef'
486
    >>> log_links("abcdef(234)")
487
    'abcdef(<a class="loglink pointer" onclick="db.call(\\'patient/info\\', {\\'id\\': \\'234\\'})">234</a>)'
488
    >>> log_links("abcdef(234)abcdef c11")
489
    'abcdef(234)abcdef <a class="loglink pointer" href="?patient=234&config=11">c11</a>'
490
491
492
493
494
495
496
497
498
499
    '''

    ### Parses the input string

    m_patient = log_patient.search(s)
    patient = m_patient.group(1) if m_patient else None

    m_config = log_config.search(s)
    config = m_config.group(1) if m_config else None

500
501
502
    m_task = log_task.search(s)
    task = int(m_task.group(1)) if m_task else None

503
504
505
506
507
508
509
510
    m_py = log_py.search(s)
    if m_py:
        source = m_py.group(1)
        if source in SOURCES_DIR:
            source = SOURCES_DIR[source] + source
        else:
            source = SOURCES_DIR_DEFAULT + source

511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
    ### Rules

    url = ''  # href link
    call = '' # call to db

    if patient and config:
        url = "?patient=%s&config=%s" % (patient, config)
        (start, end) = m_config.span()
        start += 1

    elif patient:
        call = "patient/info"
        args = {'id': patient}
        (start, end) = m_patient.span()
        start += 1
        end -= 1

528
    if task:
529
        call = "admin/showlog"
530
        args = {'file': '../../' + defs.DIR_OUT_VIDJIL_ID % task + defs.BASENAME_OUT_VIDJIL_ID % task + '.vidjil.log', 'format': 'raw'}
531
532
533
534
        (start, end) = m_task.span()
        start += 1
        end -= 1

535
536
537
538
    if m_py:
        (start, end) = m_py.span(2)
        url = SOURCES % (source, m_py.group(2))

539
540
541
542
543
544
545
546
547
    ### Build final string

    link = ''
    if url:
        link = 'href="%s"' % url
    if call:
        link = '''onclick="db.call('%s', %s)"''' % (call, str(args))

    if link:
548
        s = '%s<a class="loglink pointer" %s>%s</a>%s' % (s[:start], link, s[start:end], s[end:])
549
550

    return s
551
552
553
554
555
556
557
558

def check_enough_space(directory):
    import subprocess
    df = subprocess.Popen(["df", directory], stdout=subprocess.PIPE)
    output = df.communicate()[0]
    device, size, used, available, percent, mountpoint = output.split("\n")[1].split()
    available = int(available)
    size = int(size)
559
    result = available >= (size * (defs.FS_LOCK_THRESHHOLD/100))
560
    return result
Ryan Herbert's avatar
Ryan Herbert committed
561
562
563
564
565

def get_found_types(data):
    known_types = set([defs.SET_TYPE_PATIENT, defs.SET_TYPE_RUN, defs.SET_TYPE_GENERIC])
    present_types = set(data.keys())
    return known_types.intersection(present_types)
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587

def reset_db(db):
    mysql = db._uri[:5] == "mysql"
    # if using mysql disable foreign keys to be able to truncate
    if mysql:
        db.executesql('SET FOREIGN_KEY_CHECKS = 0;')
    try:
        for table in db :
            r = None
            try:
                # check if table exists (db can contain tables that don't exist, like auth_cas)
                r = db(table.id > 0).select(limitby = (0,1))
            except:
                pass
            if r is not None:
                table.truncate()
    except:
        raise
    finally:
        # lets not forget to renable foreign keys
        if mysql:
            db.executesql('SET FOREIGN_KEY_CHECKS = 1;')
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690

def init_db_helper(db, auth, force=False, admin_email="plop@plop.com", admin_password="1234"):
    from gluon.main import save_password
    from permission_enum import PermissionEnum
    if (force) or (db(db.auth_user.id > 0).count() == 0) : 
        if force:
            reset_db(db)

        id_first_user=""

        ## création du premier user
        id_first_user=db.auth_user.insert(
            password = db.auth_user.password.validate(admin_password)[0],
            email = admin_email,
            first_name = 'System',
            last_name = 'Administrator'
        )

        # set web2py administration interface password to the same as vidjil admin password
        save_password(admin_password, 443)

        ## création des groupes de base
        id_admin_group=db.auth_group.insert(role='admin')
        id_sa_group=db.auth_group.insert(role='user_1')
        id_public_group=db.auth_group.insert(role="public")

        db.auth_membership.insert(user_id=id_first_user, group_id=id_admin_group)
        db.auth_membership.insert(user_id=id_first_user, group_id=id_sa_group)
        db.auth_membership.insert(user_id=id_first_user, group_id=id_public_group)

        ## base Vidjil configs

        db.config.insert(
            name = 'default + extract reads',
            program = 'vidjil',
            command = '-c clones -3 -z 100 -r 1 -g germline/homo-sapiens.g -e 1 -2 -d -w 50 -U ',
            fuse_command = '-t 100',
            info = 'Same as the default "multi+inc+xxx" (multi-locus, with some incomplete/unusual/unexpected recombinations), and extract analyzed reads in the "out" temporary directory.'
        )
        db.config.insert(
            name = 'multi+inc+xxx',
            program = 'vidjil',
            command = '-c clones -3 -z 100 -r 1 -g germline/homo-sapiens.g -e 1 -2 -d -w 50 ',
            fuse_command = '-t 100',
            info = 'multi-locus, with some incomplete/unusual/unexpected recombinations'
        )
        db.config.insert(
            name = 'multi+inc',
            program = 'vidjil',
            command = '-c clones -3 -z 100 -r 1 -g germline/homo-sapiens.g -e 1 -w 50 ',
            fuse_command = '-t 100',
            info = 'multi-locus, with some incomplete/unusual recombinations'
        )
        db.config.insert(
            name = 'multi',
            program = 'vidjil',
            command = '-c clones -3 -z 100 -r 1 -g germline/homo-sapiens.g:IGH,IGK,IGL,TRA,TRB,TRG,TRD -i -e 1 -d -w 50 ',
            fuse_command = '-t 100',
            info = 'multi-locus, only complete recombinations'
        )
        db.config.insert(
            name = 'TRG',
            program = 'vidjil',
            command = '-c clones -3 -z 100 -r 1 -g germline/homo-sapiens.g:TRG ',
            fuse_command = '-t 100',
            info = 'TRG, VgJg'
        )
        db.config.insert(
            name = 'IGH',
            program = 'vidjil',
            command = '-c clones -w 60 -d -3 -z 100 -r 1 -g germline/homo-sapiens.g:IGH ',
            fuse_command = '-t 100',
            info = 'IGH, Vh(Dh)Jh'
        )

        ## permission
        ## system admin have admin/read/create rights on all patients, groups and configs
        auth.add_permission(id_admin_group, PermissionEnum.access.value, db.sample_set, 0)
        auth.add_permission(id_admin_group, PermissionEnum.access.value, db.patient, 0)
        auth.add_permission(id_admin_group, PermissionEnum.access.value, db.run, 0)
        auth.add_permission(id_admin_group, PermissionEnum.access.value, db.generic, 0)
        auth.add_permission(id_admin_group, PermissionEnum.access.value, db.config, 0)
        auth.add_permission(id_admin_group, PermissionEnum.access.value, db.pre_process, 0)
        auth.add_permission(id_admin_group, PermissionEnum.access.value, db.auth_group, 0)
        auth.add_permission(id_admin_group, PermissionEnum.admin.value, db.sample_set, 0)
        auth.add_permission(id_admin_group, PermissionEnum.admin.value, db.patient, 0)
        auth.add_permission(id_admin_group, PermissionEnum.admin.value, db.generic, 0)
        auth.add_permission(id_admin_group, PermissionEnum.admin.value, db.run, 0)
        auth.add_permission(id_admin_group, PermissionEnum.admin_group.value, db.auth_group, 0)
        auth.add_permission(id_admin_group, PermissionEnum.admin_config.value, db.config, 0)
        auth.add_permission(id_admin_group, PermissionEnum.admin_pre_process.value, db.pre_process, 0)
        auth.add_permission(id_admin_group, PermissionEnum.read.value, db.sample_set, 0)
        auth.add_permission(id_admin_group, PermissionEnum.read.value, db.patient, 0)
        auth.add_permission(id_admin_group, PermissionEnum.read.value, db.run, 0)
        auth.add_permission(id_admin_group, PermissionEnum.read.value, db.generic, 0)
        auth.add_permission(id_admin_group, PermissionEnum.read_group.value, db.auth_group, 0)
        auth.add_permission(id_admin_group, PermissionEnum.read_config.value, db.config, 0)
        auth.add_permission(id_admin_group, PermissionEnum.read_pre_process.value, db.pre_process, 0)
        auth.add_permission(id_admin_group, PermissionEnum.create.value, db.sample_set, 0)
        auth.add_permission(id_admin_group, PermissionEnum.create_group.value, db.auth_group, 0)
        auth.add_permission(id_admin_group, PermissionEnum.create_config.value, db.config, 0)
        auth.add_permission(id_admin_group, PermissionEnum.create_pre_process.value, db.pre_process, 0)
        auth.add_permission(id_admin_group, 'impersonate', db.auth_user, 0)
691
692
693
694
695
696
697
698

        auth.add_permission(id_public_group, PermissionEnum.read_config.value, db.config, 0)
        for config in db(db.config.id > 0).select():
            auth.add_permission(id_public_group, PermissionEnum.access.value, db.config, config.id)

        auth.add_permission(id_public_group, PermissionEnum.read_pre_process.value, db.pre_process, 0)
        for pre_process in db(db.pre_process.id > 0).select():
            auth.add_permission(id_public_group, PermissionEnum.access.value, db.pre_process, pre_process.id)
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723

        tags = ['ALL', 'T-ALL',  'B-ALL',
                'pre-B-ALL','pro-B-ALL', 'mature-B-ALL',
                'CML', 'HCL', 'MZL', 'T-PLL',
                'CLL', 'LGL',
                'lymphoma',
                'MCL', 'NHL', 'HL', 'FL', 'DLBCL',
                'WM', 'MAG',
                'MM',
                'diagnosis', 'MRD', 'relapse', 'CR', 'deceased',
                'pre-BMT', 'post-BMT', 'pre-SCT', 'post-SCT',
                'dilution', 'standard',
                'QC', 'EuroMRD',
                'marrow', 'blood',
                'repertoire',
                'TIL', 'CAR-T', 'scFv',
                'FR1', 'FR2', 'FR3',
                'TRA', 'TRB', 'TRG', 'TRD',
                'IGH', 'IGK', 'KDE', 'IGL',
                'IKAROS',
                'BCR-ABL', 'TEL-AML1', 'E2A-PBX',
                'BCL2',
                'PAX5']
        for tag in tags:
            tid  = db.tag.insert(name=tag)
724
            db.group_tag.insert(group_id=id_public_group, tag_id=tid)