Attention une mise à jour du service Gitlab va être effectuée le mardi 18 janvier (et non lundi 17 comme annoncé précédemment) entre 18h00 et 18h30. Cette mise à jour va générer une interruption du service dont nous ne maîtrisons pas complètement la durée mais qui ne devrait pas excéder quelques minutes.

controller.py 57.1 KB
Newer Older
1
2
3
4
#!/usr/bin/python3


import asyncio
5
import collections
6
from   concurrent.futures   import ThreadPoolExecutor
7
import contextlib
8
import datetime
9
import enum
BAIRE Anthony's avatar
BAIRE Anthony committed
10
import itertools
BAIRE Anthony's avatar
BAIRE Anthony committed
11
import json
12
13
14
import logging
import re
import os
15
import shlex
16
17
18
import signal
import socket
import sys
19
20
21
import time
import threading
import traceback
22
23
24

import docker
import MySQLdb
BAIRE Anthony's avatar
BAIRE Anthony committed
25
26
from   sqlalchemy import desc
import sqlalchemy.orm.scoping
27
import yaml
28

29
import config_reader
30
from database import *
31
from shared_swarm import SharedSwarmClient, ShuttingDown
32

33
34
HOST_PATH="/vol/host/"

35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
##################################################
# default number of concurrent tasks

# pushing images from the sandbox to the registry
NB_PUSH_SANDBOX = 2

# pulling images from the registry to the sandbox
NB_PULL_SANDBOX = 2

# pulling images from the registry to the swarm
NB_PULL_SWARM   = 2

# sandbox actions (start, stop, commit, ...)
NB_SANDBOX_TASKS = 4

# jobs
NB_JOB_TASKS     = 4


# default thread pool (used by most of the tasks)
default_executor = ThreadPoolExecutor(10)

##################################################

BAIRE Anthony's avatar
BAIRE Anthony committed
59
log = logging.getLogger("controller")
60

61
62
63
64
65
assert MySQLdb.threadsafety >= 1


class Error(Exception):
    pass
66

BAIRE Anthony's avatar
BAIRE Anthony committed
67
68
69
70
71
72
73
74
75
76
77
78
79
def docker_check_error(func, *k, **kw):
    """Wrapper for docker-py methods that produce a stream

    Methods producing a stram (eg: push, build) do not report all errors
    by raising exceptions. Some errors are reported later in the stream.

    This function parses the stream and raise Error() if needed.
    """
    for elem in func(*k, stream=True, **kw):
        js = json.loads(elem.decode())
        if "error" in js:
            raise Error("push error: " + js["error"])

BAIRE Anthony's avatar
BAIRE Anthony committed
80
81
82
83
84
85
86
87
88
@contextlib.contextmanager
def docker_warning(msg, *k, ignore=None):
    """Catch docker errors and issue a warning instead"""
    try:
        yield
    except docker.errors.APIError as e:
        if ignore is None or not isinstance(e, ignore):
            k += e,
            log.warning(msg + " (%s)", *k)
BAIRE Anthony's avatar
BAIRE Anthony committed
89

BAIRE Anthony's avatar
BAIRE Anthony committed
90
91
@contextlib.contextmanager
def report_error(fmt, *k):
BAIRE Anthony's avatar
doc    
BAIRE Anthony committed
92
93
94
95
96
97
98
    """Context manager for logging exceptions

    This function logs exceptions (when leaving the context) with log.error()
    (if the exception inherit from Error) or log.exception() otherwise.

    The log message is prepended with the string generated by: fmt % k
    """
BAIRE Anthony's avatar
BAIRE Anthony committed
99
100
101
102
103
104
105
106
    try:
        yield
    except Exception as e:
        msg = fmt % k
        log_func = log.error if isinstance(e, Error) else log.exception
        log_func("%s (%s)", msg,
                traceback.format_exception_only(type(e), e)[-1].strip())
        raise
107

108

109
110
111
112
113
114
115
116
117
def auto_create_task(func):
    """Decorator for forcing the creation of a task when a coroutine function is called

    Return a wrappers that calls the function, create the task on the fly and
    return it. Also it installs a no-op callback to avoid warnings in case the
    result is not used.
    """
    assert asyncio.iscoroutinefunction(func)

BAIRE Anthony's avatar
BAIRE Anthony committed
118
    def wrapper(*k, **kw):
119
        tsk = asyncio.async(func(*k, **kw))
BAIRE Anthony's avatar
BAIRE Anthony committed
120
        # ignore warning about result not used
121
122
        tsk.add_done_callback(lambda f: f.exception())
        return tsk
BAIRE Anthony's avatar
BAIRE Anthony committed
123
124
    return wrapper

125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
def cascade_future(src, dst):
    """propagate the result of a future to another future
    
    This function installs a callback to the future `src`, that propagates its
    result to the future `dst`.
    """
    def callback(fut):
        ex = fut.exception()
        if ex is None:
            dst.set_result(fut.result())
        else:
            dst.set_exception(ex)

    if src.done():
        callback(src)
    else:
        src.add_done_callback(callback)

BAIRE Anthony's avatar
todo    
BAIRE Anthony committed
143

144
class Manager:
BAIRE Anthony's avatar
BAIRE Anthony committed
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
    """A class for scheduling asynchronous jobs on a collection of keys

    (the one-line summary is not very helpful, sorry)
    

    Job submission

    The job is scheduled on key KEY by calling .process(KEY), the function
    returns an asyncio.Future object (which will receive the result)

    If .process(KEY) is called while a job is already running for KEY, then the
    manager arranges the job to be run a second time in a row for the same key


    Job implementation

    This is an abstract class. The job shall be implemented as the coroutine
    named ._process() in the inherited class.

    The manager guarantees that ._process() cannot be called multiple times
    concurrently for the same key

    In case a shutdown is requested (see .shutdown()):
     - all jobs that are not yet started are cancelled
     - running jobs continue until they return or until they try to acquire the
       internal semaphore (which raises ShuttingDown())


    Concurrency

    The job tasks are all started immediately (whatever the number of existing
    jobs)

    It is possible to limit the concurrency to a maximum number of parallel
    tasks. Manager provides an internal semaphore (the number of tokens is set
    in .__init__())

    The semaphore can be locked in two ways:
     - by locking the manager:

            with (yield from self): ... concurrency limited to `nb_tokens`
            tasks ...

     - by calling .run_in_executor (this is for running non-async code):

            yield from self.run_in_executor(func, args...)

    Shutdown

    When .shutdown() is called the Manager ensures that all jobs are properly
    terminated
     - it cancels all jobs that are not yet started
     - it prevents starting new jobs
     - it lets running pending jobs, but interrupts them if they when ther try
       to acquire the internal semaphore

    All cancel/interrupted tasks have their future raise ShuttingDown().

    .shutdown() returns after all pending jobs are terminated.


    Thread safety

    - Manager is *not* thread safe, all public methods must be called from the
      same thread
    """
211

212
    class _Handle:
BAIRE Anthony's avatar
BAIRE Anthony committed
213
        __slots__ = "key", "cur", "nxt"
214

215
    def __init__(self, nb_tokens=1, *, executor = default_executor, interruptible = False):
BAIRE Anthony's avatar
BAIRE Anthony committed
216
        # {key: _TaskHandler}
217
        self._handles = {}
218
        self._semaphore = asyncio.Semaphore(nb_tokens)
219
        self._shutdown = asyncio.Future()
220
        self._executor = executor
221
        self._interruptible = interruptible
222
223
224
225
226
227
228
229
230
231

    def _create_task(self, hnd):
        assert hnd.nxt is None
        def reset():
            assert not hnd.cur.done()
            nxt = hnd.nxt
            if nxt is not None:
                cascade_future(hnd.cur, nxt)
                hnd.nxt = None

BAIRE Anthony's avatar
BAIRE Anthony committed
232
        hnd.cur = asyncio.async(self._process(hnd.key, reset))
233
        hnd.cur.add_done_callback(lambda fut: self._done(hnd))
BAIRE Anthony's avatar
BAIRE Anthony committed
234
        log.debug("task scheduled %r %r", self, hnd.key)
235
        return hnd.cur
236

BAIRE Anthony's avatar
BAIRE Anthony committed
237
238
239
240
241
    def process(self, key):
        """Schedule the job to be run on key `key`

        returns an asyncio.Future (that will provide the result of ._process())
        """
242
        if self._shutdown.done():
243
244
            return self._shutdown

BAIRE Anthony's avatar
BAIRE Anthony committed
245
        hnd = self._handles.get(key)
246
247
        if hnd is None:
            # create handle
BAIRE Anthony's avatar
BAIRE Anthony committed
248
249
            self._handles[key] = hnd = self._Handle()
            hnd.key = key
250
251
252
253
254
255
256
257
258
259
260
261
262
263
            hnd.cur = None
            hnd.nxt = None

        if hnd.cur is None:
            # create task
            return self._create_task(hnd)
        else:
            # reschedule task
            if hnd.nxt is None:
                hnd.nxt = asyncio.Future()
            return hnd.nxt

    def _done(self, hnd):

BAIRE Anthony's avatar
BAIRE Anthony committed
264
        assert hnd is self._handles.get(hnd.key)
265
266
267
268
        assert hnd.cur.done()

        try:
            hnd.cur.result()
269
270
        except ShuttingDown:
            pass
271
        except Exception:
BAIRE Anthony's avatar
BAIRE Anthony committed
272
            log.exception("task %r %r unhandled exception", self, hnd.key)
273
274
275
276

        nxt     = hnd.nxt
        hnd.cur = hnd.nxt = None
        if nxt is None:
BAIRE Anthony's avatar
BAIRE Anthony committed
277
            del self._handles[hnd.key]
278
        else:
279
            cascade_future(self._create_task(hnd), nxt)
280

281

282
283
    @asyncio.coroutine
    def __iter__(self):
BAIRE Anthony's avatar
BAIRE Anthony committed
284
285
286
287
288
289
290
291
292
293
        """Coroutine for locking the internal semaphore
        
        Usage:
            with (yield from manager):
              ...

        Warning:
            after a shutdown is initiated, this function will always raise
            ShuttingDown()
        """
294
        ctx = yield from iter(self._semaphore)
295
        if self._shutdown.done():
296
297
298
299
300
301
            with ctx:
                raise ShuttingDown()
        return ctx


    @asyncio.coroutine
302
    def run_in_executor(self, *k, lock=True):
BAIRE Anthony's avatar
BAIRE Anthony committed
303
304
305
306
307
        """Run a function in a separate thread (with limited concurrency)

        This function locks the internal semaphore and runs the provided
        functionc call in a separate thread (using the executor)
        """
308
309
310
311
312
313
314
315

        def run():
            coro = asyncio.get_event_loop().run_in_executor(self._executor, *k)
            if self._interruptible:
                return next(asyncio.as_completed((coro, self._shutdown)))
            else:
                return coro
        
316
317
        if lock:
            with (yield from self):
318
                return (yield from run())
319
        else:
320
            return (yield from run())
321
322


BAIRE Anthony's avatar
BAIRE Anthony committed
323
    @asyncio.coroutine
BAIRE Anthony's avatar
BAIRE Anthony committed
324
    def _process(self, key, reset):
BAIRE Anthony's avatar
doc    
BAIRE Anthony committed
325
326
327
328
329
330
331
332
333
334
335
        """Actual implementation of the job (to be reimplemented in inherited classes)

        The Manager class guarantees that this function cannot be called
        multiple times concurrently on the same key (in case the same key is
        submitted multiple times, they Manager will call this function a second
        time after it has terminated).

        `reset` is a function that may be called to reset the 'dirty' state of
        this key (this is to avoid calling ._process() a second time if not
        necessary)
        """
336
337
        raise NotImplementedError()

338
    @asyncio.coroutine
339
    def shutdown(self):
BAIRE Anthony's avatar
BAIRE Anthony committed
340
341
342
343
        """Initiate a graceful shutdown

        This coroutine terminates once all tasks are properly terminated.
        """
344
345
        exc = ShuttingDown()
        self._shutdown.set_exception(exc)
346
        self._shutdown.exception()  # to avoid asyncio warnings
347
348
349
        # cancel all 'next' tasks
        for hnd in self._handles.values():
            if hnd.nxt is not None:
350
                hnd.nxt.set_exception(exc)
351
352
                hnd.nxt = None

353
354
355
356
        if not self._interruptible:
            yield from asyncio.gather(
                    *(h.cur for h in self._handles.values() if h.cur is not None),
                    return_exceptions=True)
BAIRE Anthony's avatar
BAIRE Anthony committed
357

358
359

class SandboxManager(Manager):
BAIRE Anthony's avatar
BAIRE Anthony committed
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
    """Manager for sandbox operation

    This manager handles all sandbox operations (start, stop, commit).

    Operations are requested asynchronously in the database:
     - Webapp.sandbox_state=starting (for starting a sandbox)
     - Webapp.sandbox_state=stopping (for stopping a sandbox)
     - WebappVersion with state=sandbox (for committing a new image)

    All state changes on Webapp.sandbox_state are atomic (e.g: if the user
    requests a stop while the sandbox is starting then the manager finishes
    with starting the sandbox but does not update the state, and it runs
    immediately again to stop the sandbox)

    Whatever is the value of Webapp.sandbox_state, the manager first examines
    commit requests and makes the commit if requested.

    If the container already exists before starting the webapp (not possible in
    normal operations), then a recovery image is committed first.

    When a commit is successful. The image manager is notified for pushing the
    image to the registry and (if it is not a recovery image) to pull it to the
    swarm (because this image will very likely be used soon).

    State changes:

     - sandbox start:
            starting->running     (normal case)
            starting->start_error (error case)

     - sandbox stop:
            stopping->idle        (normal case)
            stopping->stop_error  (error case)

     - image commit:
            sandbox->committed  (normal case)
            sandbox->error      (error case)
            (none)->committed   (recovery version) 
    """
399

400
    def __init__(self, ctrl, nb_threads = NB_SANDBOX_TASKS):
401
        super().__init__(nb_threads)
402
403
404
        self.ctrl = ctrl


BAIRE Anthony's avatar
BAIRE Anthony committed
405
406
    def inspect_sandbox(self, webapp):
        try:
BAIRE Anthony's avatar
BAIRE Anthony committed
407
408
            return self.ctrl.sandbox.inspect_container(
                    self.ctrl.gen_sandbox_name(webapp))
BAIRE Anthony's avatar
BAIRE Anthony committed
409
410
411
        except docker.errors.NotFound:
            return None

BAIRE Anthony's avatar
BAIRE Anthony committed
412
413
414
415
416
417

    @staticmethod
    def filter_commit_version(query, webapp_id):
        """Narrow a WebappVersion query to select the candidate versions to be committed"""
        return (query.
                filter_by(webapp_id=webapp_id,
BAIRE Anthony's avatar
BAIRE Anthony committed
418
                         state = int(VersionState.SANDBOX))
BAIRE Anthony's avatar
BAIRE Anthony committed
419
420
421
                    )

    def _start(self, webapp, version):
422
423
424
425
426
427
        """Start a webapp sandbox

        (to be executed in a thread pool)
        """

        ctrl = self.ctrl
BAIRE Anthony's avatar
BAIRE Anthony committed
428
        ses  = ctrl.session
429
430

        # prepare sandbox parameters
BAIRE Anthony's avatar
BAIRE Anthony committed
431
432
433

        # docker image
        if version is None:
434
            image = "%s:%s" % (ctrl.gen_factory_name(webapp.docker_os),
BAIRE Anthony's avatar
BAIRE Anthony committed
435
                    webapp.docker_os.version)
436
        else:
BAIRE Anthony's avatar
BAIRE Anthony committed
437
            image = "%s:%s" % (webapp.image_name, version.number)
BAIRE Anthony's avatar
BAIRE Anthony committed
438
439

        log.debug("sandbox %r: using image %r", webapp.docker_name, image)
440
441
442
443
444
445
446
447
448
449
450
451

        # safety checks
        # (because docker_name is used it the paths of the external volumes
        if ("/" in webapp.docker_name) or (webapp.docker_name in ("", ".", "..")):
            raise Error("malformatted docker_name")

        uid = webapp.id + 2000
        if uid < 2000:
            # just for safety
            raise Error("bad webapp id")

        # remove stale container (if any)
452
453
        if self.inspect_sandbox(webapp) is not None:
            self._stop(webapp)
454

BAIRE Anthony's avatar
BAIRE Anthony committed
455
        container = webapp.sandbox_name
456
457
        try:
            # prepare the sandbox
458
459
460
461
            # (create ssh keys)
            ctrl.check_host_path("isdir", ctrl.toolbox_path)
            ctrl.check_host_path("isdir", ctrl.sandbox_path)

BAIRE Anthony's avatar
BAIRE Anthony committed
462
            ctrl.sandbox.create_container("busybox:latest", name=container,
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
                    command = ["/bin/sh", "-c", """
set -ex

export PATH="$PATH:/.toolbox/bin"

# clean sandbox dir
rm -rf {sbx}

# create dirs
for dir in {sbx} {etc} {run}
do
    mkdir -p            ${{dir}}
    chown {uid}:65534   ${{dir}}
    chmod 0700          ${{dir}}
done

# xauth file
touch               {run}/XAuthority
chown {uid}:65534   {run}/XAuthority
chmod 0600          {run}/XAuthority

# generate ssh keys
(for type in ecdsa ed25519 rsa
do
    key={etc}/ssh_host_${{type}}_key
    [ -f $key ] || ssh-keygen -N '' -f $key -t $type >&2
    
    echo -n '{hostname}. ' | cat - ${{key}}.pub
done) > {etc}/ssh_known_hosts

# known_host file for allgo-shell
ssh-keygen -H -f {etc}/ssh_known_hosts
chmod 0644       {etc}/ssh_known_hosts
rm -f            {etc}/ssh_known_hosts.old

# authentication key for allgo-shell
rm -f               {etc}/identity
ssh-keygen -N '' -f {etc}/identity
chown {uid}:65534   {etc}/identity

# forced shell for the sshd config 
cat > {etc}/shell <<EOF
#!/bin/sh

export PATH="\$PATH:/.toolbox/bin"

uid=\`id -u\`
shell="\`getent passwd \$uid 2>/dev/null| cut -d : -f 7\`"
if [ -z "\$shell" ] ; then
    shell=/bin/sh
fi

if [ -n "\$SSH_ORIGINAL_COMMAND" ] ; then
    exec "\$shell" -c "\$SSH_ORIGINAL_COMMAND"
else
    exec "\$shell"
fi

EOF
chmod 755 {etc}/shell

# sshd config
cat > {etc}/sshd_config <<EOF
Port 22
Protocol 2

# turned off because it requires creating a 'sshd' user inside the sandbox
UsePrivilegeSeparation no

StrictModes no

ForceCommand /.sandbox/etc/ssh/shell

PermitRootLogin without-password

PubkeyAuthentication yes
AuthorizedKeysFile  /.sandbox/etc/ssh/identity.pub .ssh/authorized_keys .ssh/authorized_keys2

ChallengeResponseAuthentication no
PasswordAuthentication          no

X11Forwarding yes
X11DisplayOffset 10
PrintMotd no
PrintLastLog no
TCPKeepAlive yes

# Allow client to pass locale environment variables
AcceptEnv LANG LC_*

Subsystem sftp internal-sftp

UsePAM no
EOF
                    """.format(uid=uid,
                        hostname = "%s-sandbox-%s" % (ctrl.env, webapp.docker_name),
                        sbx = "/mnt/%s"         % webapp.docker_name,
                        etc = "/mnt/%s/etc/ssh" % webapp.docker_name,
                        run = "/mnt/%s/run"     % webapp.docker_name,
                        )],
563
                    host_config = ctrl.sandbox.create_host_config(
564
565
566
567
                        binds   = {
                            ctrl.sandbox_path: {"bind": "/mnt"},
                            ctrl.toolbox_path: {"bind": "/.toolbox", "mode": "ro"},
                    }))
568
569
            ctrl.sandbox.start(container)
            if ctrl.sandbox.wait(container):
570
571
                log.debug("sandbox %s output:\n%s", webapp.docker_name,
                        ctrl.sandbox.logs(container).decode(errors="replace"))
572
573
574
575
                raise Error("sandbox preparation failed")
            ctrl.sandbox.remove_container(container)

            # create and start the sandbox
576
577
578
579
580

            etc_dir = os.path.join(ctrl.sandbox_path, webapp.docker_name, "etc")
            run_dir = os.path.join(ctrl.sandbox_path, webapp.docker_name, "run")
            ctrl.check_host_path("isdir", etc_dir)
            ctrl.check_host_path("isdir", run_dir)
581
582
583
584

            if version is None and webapp.entrypoint:
                # prepend instructions to initialise a dummy entrypoint
                dn, bn = os.path.split(webapp.entrypoint)
BAIRE Anthony's avatar
todo    
BAIRE Anthony committed
585
                # FIXME: do nothing if entrypoint already exists
586
                prepare = """
587
                    {mkdir}
588
                    test -f {entrypoint} || cat > {entrypoint} <<EOF
589
590
591
592
593
594
595
#!/bin/sh
echo
echo "This is app '{name}' called with parameters '\$@'"
echo
echo "The workdir contains:"
ls -l
EOF
596
                    chmod 0755 -- {entrypoint}
597

598
                """.format( entrypoint  = shlex.quote(webapp.entrypoint),
599
                            name        = webapp.docker_name,
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
                            mkdir       = (("mkdir -p -- %s" % shlex.quote(dn)) if dn else ""))
            else:
                prepare = ""

            command = ["/bin/sh", "-c", """
set -x
export PATH="$PATH:/.toolbox/bin"

{prepare}

# xauth file (needed for X11 forwarding)
touch       /root/.Xauthority
chmod 600   /root/.Xauthority

exec /.toolbox/bin/sshd -D
            """.format(prepare=prepare)]
616

617
            ctrl.sandbox.create_container(image, name=container, hostname=container,
618
                    command = command,
619
620
                    host_config = ctrl.sandbox.create_host_config(
                        binds = {
621
622
623
                            etc_dir: {"bind": "/.sandbox/etc", "mode": "ro"},
                            run_dir: {"bind": "/.sandbox/run", "mode": "rw"},
                            ctrl.toolbox_path: {"bind": "/.toolbox", "mode": "ro"},
624
625
626
                            },
                        # TODO: maybe drop other caps
                        cap_drop = ["NET_RAW"],
627
                        restart_policy = {"Name": "unless-stopped"},
628
                        network_mode = ctrl.sandbox_network,
629
630
631
632
633
                        ))

            ctrl.sandbox.start(container)

        except:
BAIRE Anthony's avatar
BAIRE Anthony committed
634
635
            with docker_warning("cleanup error: unable to remove container %r",
                    container, ignore=docker.errors.NotFound):
636
637
638
                ctrl.sandbox.remove_container(container, force=True)
            raise

BAIRE Anthony's avatar
BAIRE Anthony committed
639
    def _commit(self, webapp, versions):
640
641
642
643
644
645
646
647
648
649
650
        """Commit a webapp sandbox

        (to be executed in a thread pool)

        The image version is looked up in webapp_versions (where state==sandbox).
        
        In case of any error, a recovery version is committed instead (to avoid
        loosing the work done inside the sandbox) and the candidates are put in
        error state.
        """
        ctrl = self.ctrl
BAIRE Anthony's avatar
BAIRE Anthony committed
651
        ses  = ctrl.session
652

BAIRE Anthony's avatar
BAIRE Anthony committed
653
654
655
656
657
658
        # pre-commit checks
        # - ensure that there is exactly one candidate webapp_version with
        #   state=sandbox
        # - otherwise:
        #   - put all candidates in error state
        #   - create a recovery version
659

BAIRE Anthony's avatar
BAIRE Anthony committed
660
661
662
663
664
        # version be committed
        version = None
        
        # error msg (if any)
        error = None
665

BAIRE Anthony's avatar
BAIRE Anthony committed
666
667
        # version ids to be recovered
        recover = ()
668

BAIRE Anthony's avatar
BAIRE Anthony committed
669
670
671
672
673
674
        if len(versions) == 1:
            # normal case (sandbox commit)
            version = versions[0]
            if not version.number:
                error   = "empty version number"
                recover = version.id,
675

BAIRE Anthony's avatar
BAIRE Anthony committed
676
677
678
        elif not versions:
            # sandbox rollback (when user drops a sandbox without committing a new image)
            error     = "dangling sandbox"
679

BAIRE Anthony's avatar
BAIRE Anthony committed
680
681
682
683
684
        else:
            # multiple candidates (should never happen)
            error    = "multiple candidate versions (%s)" % (
                ", ".join(map(repr, sorted(v.number for v in versions))))
            recover = tuple(v.id for v in versions)
685

BAIRE Anthony's avatar
BAIRE Anthony committed
686
687
        # TODO: make 'sandbox' a reserved name

BAIRE Anthony's avatar
BAIRE Anthony committed
688
689
690
        if error:
            changelog = "pre-commit error: " + error
            log.error("sandbox %r version id %r: %s", webapp.docker_name, recover, changelog)
691

BAIRE Anthony's avatar
BAIRE Anthony committed
692
            with ses.begin():
693
694
                # put all candidates in 'error state'
                if recover:
BAIRE Anthony's avatar
BAIRE Anthony committed
695
                    ses.execute('''UPDATE webapp_versions
BAIRE Anthony's avatar
fix sql    
BAIRE Anthony committed
696
697
                            SET changelog=CONCAT(changelog, " [", :changelog, "]"), state=:state
                            WHERE id IN :ids''', dict(changelog=changelog, ids=recover,
BAIRE Anthony's avatar
BAIRE Anthony committed
698
                                state=int(VersionState.ERROR)))
699
700

                # create a recovery version
BAIRE Anthony's avatar
BAIRE Anthony committed
701
702
703
704
705
                version = WebappVersion(
                        webapp_id = webapp.id,
                        number    = time.strftime("recovery-%Y%m%d-%H%M%S"),
                        changelog = changelog,
                        published = False,
BAIRE Anthony's avatar
BAIRE Anthony committed
706
                        state     = int(VersionState.SANDBOX))
BAIRE Anthony's avatar
BAIRE Anthony committed
707
708
709
                ses.add(version)
            ses.refresh(version)
            ses.expunge(version)
710

BAIRE Anthony's avatar
BAIRE Anthony committed
711
        assert version is not None
712

BAIRE Anthony's avatar
BAIRE Anthony committed
713
        # commit the docker image
714

BAIRE Anthony's avatar
BAIRE Anthony committed
715
716
        log.debug("dicts %r %r", webapp.__dict__, version.__dict__)
        log.info("commit sandbox %r version %r", webapp.docker_name, version.number)
717

BAIRE Anthony's avatar
BAIRE Anthony committed
718
719
720
721
        container = webapp.sandbox_name
        next_state = image_size = None
        try:
            # stop the container (if stopping or if creating a new sandbox)
BAIRE Anthony's avatar
BAIRE Anthony committed
722
            if webapp.sandbox_state in (SandboxState.STOPPING, SandboxState.STARTING):
BAIRE Anthony's avatar
BAIRE Anthony committed
723
724
               ctrl.sandbox.stop(container)
               ctrl.sandbox.wait(container)
725

BAIRE Anthony's avatar
BAIRE Anthony committed
726
727
            # commit
            cid = ctrl.sandbox.commit(container, webapp.image_name, version.number)
BAIRE Anthony's avatar
BAIRE Anthony committed
728
            next_state = VersionState.COMMITTED
BAIRE Anthony's avatar
BAIRE Anthony committed
729
730
731
732
733
734
735
            image_size = ctrl.sandbox.inspect_image(cid)["Size"]

            return version, error

        except docker.errors.NotFound:
            error = "commit error: container not found %r" % container
            log.error("%s", error)
BAIRE Anthony's avatar
BAIRE Anthony committed
736
            next_state = VersionState.ERROR
BAIRE Anthony's avatar
BAIRE Anthony committed
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
            image_size = 0
            ses.execute('''UPDATE webapp_versions
                    SET changelog=CONCAT(changelog, " [commit error: sandbox is down]")
                    WHERE id=%d''' % version.id)

            # here we do not propagate the error to allow starting/stopping the
            # sandbox immediately (without going through sandbox_state=:error)
            return None, error

        except Exception as e:
            log.exception("sandbox %r version %r: unexpected commit error (sandbox may still be recovered)",
                    webapp.docker_name, version.number)
            raise

        finally:
            # NOTE: if anything unexpected happens, the version is
            # left in state 'sandbox' and we propagate the exception to
            # ensure the work done inside the sandbox is not dropped
            # and the sandbox is pute in 'error' state
            #
            # The error will not be reported to the user. If this is an issue,
            # then the solution would be to create another error state (to be
            # used when the sandbox is still there).
            # 
            if next_state is not None:
                with ses.begin():
                    ses.execute("UPDATE webapp_versions SET state=%d, docker_image_size=%d WHERE id=%d"
                            % (next_state, image_size, version.id))
BAIRE Anthony's avatar
BAIRE Anthony committed
765

766

BAIRE Anthony's avatar
BAIRE Anthony committed
767
    @asyncio.coroutine
BAIRE Anthony's avatar
BAIRE Anthony committed
768
    def _manage_commit(self, webapp, versions, *, force=False):
BAIRE Anthony's avatar
BAIRE Anthony committed
769
770
771
772
773
774
775
776
        """Manage sandbox commit (if needed) and notify the image manager

        The commit is performed if one of these conditions is fulfilled:
        - a commit was requested (there is at least one WebappVersion entry
          with state=sandbox for this app)
        - a docker container exists (for this sandbox) and force is true
        """

BAIRE Anthony's avatar
BAIRE Anthony committed
777
        if not (versions or (force and self.inspect_sandbox(webapp) is not None)):
BAIRE Anthony's avatar
BAIRE Anthony committed
778
779
780
            return
        
        # do the commit
BAIRE Anthony's avatar
BAIRE Anthony committed
781
        version, error = yield from self.run_in_executor(self._commit, webapp, versions)
BAIRE Anthony's avatar
BAIRE Anthony committed
782
783

        # trigger push/pull operations (but do not wait)
BAIRE Anthony's avatar
BAIRE Anthony committed
784
        if version is not None:
785
786

            # push to the registry
BAIRE Anthony's avatar
BAIRE Anthony committed
787
            self.ctrl.image_manager.push(version.id)
788
789
790
791

            if not error:
                # preemptive pull to the swarm
                # (the image may be needed soon)
BAIRE Anthony's avatar
BAIRE Anthony committed
792
                self.ctrl.image_manager.pull(version.id, swarm=True)
793
794


BAIRE Anthony's avatar
BAIRE Anthony committed
795
796
    def _stop(self, webapp):
        """Stop a webapp sandbox
797

BAIRE Anthony's avatar
BAIRE Anthony committed
798
        (to be executed in a thread pool)
799
800
801
        """
        try:
            # FIXME: remove volumes (v=True) too ?
BAIRE Anthony's avatar
BAIRE Anthony committed
802
            self.ctrl.sandbox.remove_container(webapp.sandbox_name, force=True)
803
804
805
        except docker.errors.NotFound:
            pass

806
807

    @asyncio.coroutine
808
809
810
811
    def _process(self, webapp_id, reset):
        ctrl = self.ctrl

        log.debug("process sandbox %d", webapp_id)
BAIRE Anthony's avatar
BAIRE Anthony committed
812
813
814
815
816
817
818
819

        ses = ctrl.session
        with ses.begin():
            # current state of the sandbox + load docker os
            webapp = ses.query(Webapp).filter_by(id=webapp_id).one()
            webapp.docker_os

            # version to be started
820
            sandbox_version = webapp.sandbox_version
BAIRE Anthony's avatar
BAIRE Anthony committed
821
822
823
824
825
826
827
828
829
830

            # requested commits
            commit_versions = self.filter_commit_version(ses.query(WebappVersion), webapp_id).all()

            ses.expunge_all()

        # docker name of the sandbox & image
        webapp.sandbox_name = ctrl.gen_sandbox_name(webapp)
        webapp.image_name   = ctrl.gen_image_name(webapp)

BAIRE Anthony's avatar
BAIRE Anthony committed
831
        phase = "inspect"
832
        next_state = fail_state = None
833
        try:
BAIRE Anthony's avatar
BAIRE Anthony committed
834
            if webapp.sandbox_state == SandboxState.STARTING:
835
                # start the sandbox
BAIRE Anthony's avatar
BAIRE Anthony committed
836
                phase = "start"
BAIRE Anthony's avatar
BAIRE Anthony committed
837
838
                next_state = SandboxState.RUNNING
                fail_state = SandboxState.START_ERROR
839

BAIRE Anthony's avatar
BAIRE Anthony committed
840
                # commit (if a sandbox exists)
BAIRE Anthony's avatar
BAIRE Anthony committed
841
                yield from self._manage_commit(webapp, commit_versions, force=True)
BAIRE Anthony's avatar
BAIRE Anthony committed
842

BAIRE Anthony's avatar
BAIRE Anthony committed
843
                if sandbox_version is not None:
844
845
846
847
848
849
                    # ensure version belongs to this application
                    if sandbox_version.webapp_id != webapp.id:
                        raise Error("invalid version id %d (belongs to webapp %d)" % (
                            sandbox_version.id, sandbox_version.webapp_id))

                    # pull requested image
BAIRE Anthony's avatar
BAIRE Anthony committed
850
                    yield from ctrl.image_manager.pull(sandbox_version.id)
851
852
853
854
855
                else:
                    # pull image
                    yield from ctrl.image_manager.sandbox_pull_manager.process((
                        ctrl.gen_factory_name(webapp.docker_os),
                        webapp.docker_os.version))
BAIRE Anthony's avatar
BAIRE Anthony committed
856
857

                # start sandbox
BAIRE Anthony's avatar
BAIRE Anthony committed
858
                yield from self.run_in_executor(self._start, webapp, sandbox_version)
859

BAIRE Anthony's avatar
BAIRE Anthony committed
860
            elif webapp.sandbox_state == SandboxState.STOPPING:
861
                # stop the sandbox
BAIRE Anthony's avatar
BAIRE Anthony committed
862
                phase = "stop"
BAIRE Anthony's avatar
BAIRE Anthony committed
863
864
                next_state = SandboxState.IDLE
                fail_state = SandboxState.STOP_ERROR
865

BAIRE Anthony's avatar
BAIRE Anthony committed
866
                # commit (if requested)
BAIRE Anthony's avatar
BAIRE Anthony committed
867
                yield from self._manage_commit(webapp, commit_versions)
868

BAIRE Anthony's avatar
BAIRE Anthony committed
869
                yield from self.run_in_executor(self._stop, webapp)
870
871

            else:
BAIRE Anthony's avatar
BAIRE Anthony committed
872
873
                # commit (if requested)
                phase = "commit"
BAIRE Anthony's avatar
BAIRE Anthony committed
874
                yield from self._manage_commit(webapp, commit_versions)
BAIRE Anthony's avatar
BAIRE Anthony committed
875

876
877
878
879
        except ShuttingDown:
            next_state = None
            log.info("sandbox %r %s aborted (controller shutdown)", webapp.docker_name, phase)

880
881
882
883
        except BaseException as e:
            next_state = fail_state

            log_func = log.error if isinstance(e, (docker.errors.APIError, Error)) else log.exception
BAIRE Anthony's avatar
BAIRE Anthony committed
884
            log_func ("sandbox %r %s error (%s)", webapp.docker_name, phase,
885
886
887
888
889
890
891
892
                    traceback.format_exception_only(type(e), e)[-1].strip())

        finally:
            if next_state is not None:
                # atomically update the sandbox state in the db
                # (in case another action is requested during the process, eg: the user
                #  stops the sandbox while it is not fully started)
                log.info("sandbox %r is now in state %r", webapp.docker_name, next_state.name)
BAIRE Anthony's avatar
BAIRE Anthony committed
893
894
895
                with ses.begin():
                    ses.execute("UPDATE webapps SET sandbox_state=%d WHERE id=%d AND sandbox_state=%d" %
                            (next_state, webapp_id, webapp.sandbox_state))
896

897
898
899
            log.debug("done    sandbox %d", webapp_id)


BAIRE Anthony's avatar
BAIRE Anthony committed
900
class JobManager(Manager):
901
    class JobInfo:
902
        __slots__ = "job_id", "ver_id", "ctr_id", "version", "ctr_name", "client", "cpu", "mem", "node_id"
903

904
905
    def __init__(self, ctrl, bigmem_apps=()):
        super().__init__(0)
BAIRE Anthony's avatar
BAIRE Anthony committed
906
907
        self.ctrl = ctrl

908
909
910
911
912
913
914
915
        self.bigmem_apps = bigmem_apps


    @asyncio.coroutine
    def __iter__(self):
        raise NotImplementedError()


916
    def _create_job(self, info):
BAIRE Anthony's avatar
BAIRE Anthony committed
917
918
        ctrl = self.ctrl
        ses  = ctrl.session
919
920
921
        tmp_img = None

        assert info.ctr_id is None
BAIRE Anthony's avatar
BAIRE Anthony committed
922
923
924

        try:
            with ses.begin():
925
                job = ses.query(Job).filter_by(id=info.job_id).one()
BAIRE Anthony's avatar
BAIRE Anthony committed
926
927
928
                webapp = job.webapp

                log.info("start job %d (%s:%s)",
929
                        info.job_id, webapp.docker_name, info.version)
BAIRE Anthony's avatar
BAIRE Anthony committed
930

BAIRE Anthony's avatar
BAIRE Anthony committed
931
                job.state = int(JobState.RUNNING)       # pragma: nobranch (TODO: remove (coverage bug))
BAIRE Anthony's avatar
BAIRE Anthony committed
932
933
934

            
            repo = ctrl.gen_image_name(webapp)
935
            image = "%s:%s" % (repo, info.version)
BAIRE Anthony's avatar
BAIRE Anthony committed
936
937
938
939

            job_path = ctrl.gen_job_path(job)
            log.debug("job.path: %r", job_path)

940
941
942
            if info.ver_id is None:
                assert info.version == "sandbox"
                image = tmp_img = info.client.commit(ctrl.gen_sandbox_name(webapp), repo, info.version)["Id"]
BAIRE Anthony's avatar
BAIRE Anthony committed
943
944
945
946
947
            
            # TODO use another workdir
            # TODO use another uid

            ctrl.check_host_path("isdir", job_path)
948
            hc = ctrl.sandbox.create_host_config(
949
                        binds = {job_path: {"bind": "/tmp"}},
950
951
952
                        cap_drop = ["all"],
                        # FIXME: CAP_DAC_OVERRIDE needed because all nfs files have uid,gid=1000,1000
                        cap_add = ["dac_override"],
953
954
955
                        cpu_quota   = (None if info.cpu is None else (info.cpu * 1024)),
                        cpu_period  = (None if info.cpu is None else 1024),
#                        cpu_shares = info.cpu,
956
#                        mem_reservation = ctrl.mem_soft_limit,
957
                        mem_limit = info.mem,
958
959
960
961
                    )
            if ctrl.mem_soft_limit:
                # TODO: upgrade docker-py (and use create_host_config)
                hc["MemoryReservation"] = ctrl.mem_soft_limit
962
963
964
965
            # NOTE: cpu_shares has a different meaining in docker swarm and docker engine
            #  - swarm:  nb of cpus
            #  - engine: 1/1024 share of the total cpu resouces of the machine
            # engine requires  cpu_share>1
966
            if ctrl.cpu_shares:
BAIRE Anthony's avatar
BAIRE Anthony committed
967
                # TODO: upgrade docker-py (and use create_host_config)
968
                hc["CpuShares"] = info.cpu
969
            log.debug("host_config %r", hc)
970
            info.ctr_id = info.client.create_container(image, name=info.ctr_name,
971
                    working_dir = "/tmp",
972
                    # NOTE: the command line is a little complex, but this is
BAIRE Anthony's avatar
todos    
BAIRE Anthony committed
973
                    #   to ensure that (TODO write tests for this):
974
975
976
977
978
979
980
981
982
                    #   - no output is lost (we go though a pipe in case the
                    #     app has multiple processes writing to stdout/stderr
                    #     concurrently)
                    #   - we get the exit code of the app (not the exit code of
                    #     cat)
                    #   - we are failsafe (if fifo creation fails then the app
                    #     is run anyway, with the exit code of cat)
                    #   - we have no unusual dependencies (only sh, cat and
                    #     mkfifo)
983
                    command = ["/bin/sh", "-c", """
984
                                fifo=/.allgo.fifo.{job_id}
985
986
987
988
989
990
                                if mkfifo "$fifo"
                                then
                                    exec cat <"$fifo" >allgo.log &
                                    exec "$@" >"$fifo" 2>&1 &
                                    wait %1
                                    wait %2
991
                                    rm "$fifo"
992
993
994
                                else
                                    "$@" 2>&1 | cat >allgo.log
                                fi
995
996
997
998
                                failcnt="`cat /sys/fs/cgroup/memory/memory.failcnt`"
                                if [ "$failcnt" -ne 0 ] ; then
                                    echo "WARNING: out of memory (memory.failcnt=$failcnt)" >>allgo.log
                                fi
BAIRE Anthony's avatar
BAIRE Anthony committed
999
                        """.format(job_id=job.id),
1000
                        "job%d" % job.id, webapp.entrypoint] + shlex.split(job.param),
BAIRE Anthony's avatar
BAIRE Anthony committed
1001

1002
                    labels = {"allgo.tmp_img": tmp_img or ""},
1003
                    environment=["constraint:node==" + info.node_id],
1004
                    host_config = hc)["Id"]
1005
            info.client.start(info.ctr_id)
1006
1007
1008
1009
1010

            with ses.begin():
                # save the container_id into the db
                job.container_id = info.ctr_id

1011
        except:
BAIRE Anthony's avatar
todo    
BAIRE Anthony committed
1012
            #TODO introduce a state JobState.ERROR
1013
1014
            self._remove_job(info, tmp_img=tmp_img)
            raise
BAIRE Anthony's avatar
BAIRE Anthony committed
1015
1016


1017
1018
    def _remove_job(self, info, *, tmp_img=None):
        ses = self.ctrl.session
BAIRE Anthony's avatar
BAIRE Anthony committed
1019

1020
1021
1022
        # TODO: report launch errors to the user
        # TODO: report exit code to the user
        # TODO: use another uid
BAIRE Anthony's avatar
BAIRE Anthony committed
1023

1024
1025
1026
1027
1028
1029
1030
        def parse_docker_timestamp(value):
            return datetime.datetime.strptime(
                    # limit the precision to the microsecond
                    # (othewise strptime fails)
                    re.sub(r"(\.\d{,6})\d*Z$", r"\1Z", value),
                    # iso8601 format
                    "%Y-%m-%dT%H:%M:%S.%fZ")
BAIRE Anthony's avatar
BAIRE Anthony committed
1031

1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
        with ses.begin():
            job = ses.query(Job).filter_by(id=info.job_id).one()

            exec_time = 0.0
            if info.ctr_id is not None:
                try:
                    js = info.client.inspect_container(info.ctr_id)
                except docker.errors.NotFound:
                    pass
                else:
                    started_at  = js["State"].get("StartedAt", "0001-")
                    finished_at = js["State"].get("FinishedAt")
BAIRE Anthony's avatar
BAIRE Anthony committed
1044

1045
1046
1047
1048
1049
1050
                    # default docker date is '0001-01-01T00:00:00Z'
                    if not started_at.startswith("0001-"):
                        try:
                            exec_time =( parse_docker_timestamp(finished_at)
                                        - parse_docker_timestamp(started_at)
                                        ).total_seconds()
BAIRE Anthony's avatar
BAIRE Anthony committed
1051
1052
                        except Exception: # pragma: nocover
                            log.exception("job %d: unable to compute exec time", info.job_id)
BAIRE Anthony's avatar
BAIRE Anthony committed
1053

1054
                    if tmp_img is None:
BAIRE Anthony's avatar
BAIRE Anthony committed
1055
                        tmp_img = js["Config"]["Labels"].get("allgo.tmp_img") or None
BAIRE Anthony's avatar
BAIRE Anthony committed
1056

1057
1058
                with docker_warning("job %d: cleanup error: unable to remove container", info.job_id):
                    info.client.remove_container(info.ctr_id)
BAIRE Anthony's avatar
BAIRE Anthony committed
1059
1060

            if tmp_img is not None:
1061
1062
1063
1064
1065
                with docker_warning("job %d: cleanup error: unable to remove tmp image", info.job_id):
                    info.client.remove_image(tmp_img)

            job.exec_time = exec_time
            job.state     = int(JobState.DONE)
1066
            job.container_id = None
1067
1068
1069
1070
1071
1072
1073
1074

        log.info("stop  job %d (duration %fs)", info.job_id, exec_time)


    @asyncio.coroutine
    def _finish_job(self, info):
        # wait for container to terminate
        if info.ctr_id is not None:
1075
            yield from info.client.wait_async(info.ctr_id)
BAIRE Anthony's avatar
BAIRE Anthony committed
1076

1077
1078
        # remove container
        yield from self.run_in_executor(self._remove_job, info, lock=False)
BAIRE Anthony's avatar
BAIRE Anthony committed
1079

1080
    @asyncio.coroutine
BAIRE Anthony's avatar
BAIRE Anthony committed
1081
1082
1083
1084
1085
1086
1087
1088
    def _process(self, job_id, reset):
        ctrl = self.ctrl
        ses  = ctrl.session
        log.debug("process job id %d", job_id)

        with ses.begin():
            # query db
            job = ses.query(Job).filter_by(id=job_id).first()
BAIRE Anthony's avatar
BAIRE Anthony committed
1089
            if job is None:     # pragma: nocover
1090
1091
                # unknown job
                log.warning("unknown job id %d", job_id)
BAIRE Anthony's avatar
BAIRE Anthony committed
1092
                return
BAIRE Anthony's avatar
BAIRE Anthony committed
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102

            state = JobState(job.state)

            if job.webapp is None:
                log.error("job %d: webapp id %r not found", job_id, job.webapp_id)
                if state == JobState.WAITING:       # pragma: nobranch
                    job.state = int(JobState.DONE)
                    job.exec_time = 0
                # TODO report error to the user ?
                return
1103
1104

            docker_name = job.webapp.docker_name
1105
1106
1107
1108
            
            info = self.JobInfo()
            info.job_id     = job_id
            info.ctr_id     = None
1109
            info.node_id    = None
1110
1111
            info.version    = job.version
            info.ctr_name   = ctrl.gen_job_name(job)
BAIRE Anthony's avatar
BAIRE Anthony committed
1112

1113
1114
1115
1116
1117
            # NOTE: .cpu .mem are the amount of cpu/mem requested when creating
            # a new job. They do not apply to already created jobs (by a
            # previous instance of the controller)
            info.cpu        = ctrl.cpu_shares
            info.mem        = ctrl.bigmem_hard_limit if docker_name in self.bigmem_apps else ctrl.mem_hard_limit
1118

BAIRE Anthony's avatar
BAIRE Anthony committed
1119
1120

            if job.version == "sandbox":
1121
                info.client  = ctrl.sandbox
BAIRE Anthony's avatar
BAIRE Anthony committed
1122
            else:
1123
1124
1125
1126
1127
                info.client  = ctrl.swarm


            if state == JobState.WAITING:
                # job is not yet started
1128
1129
1130
1131
                if job.container_id is not None:
                    log.warning("job %d is in state WAITING but already has a container id: %r (will be ignored)",
                            job.id, job.container_id)

1132
1133
1134
1135
1136
1137
                if job.version == "sandbox":
                    # to be run in the sandbox
                    info.ver_id = None
                else:
                    # to be run in the swarm

1138
                    # Find the wanted WebappVersion
1139
1140
1141
1142
                    #TODO: replace version_id with webapp_version_id
                    ver = ses.query(WebappVersion).filter_by(
                            webapp_id = job.webapp_id,
                            number    = job.version).filter(
1143
                                WebappVersion.state.in_((
BAIRE Anthony's avatar
BAIRE Anthony committed
1144
1145
                                    int(VersionState.COMMITTED),
                                    int(VersionState.READY)))
1146
1147
1148
                            ).order_by(
                                    WebappVersion.state.desc(),
                                    WebappVersion.id.desc()).first()
1149
1150
1151
1152
1153
1154
1155
1156
1157
                    if ver is None:
                        log.error("job %d: webapp %r version %r not found",
                                job_id, job.webapp.docker_name, job.version)

                        job.state = int(JobState.DONE)
                        # TODO report error to the user
                        return
                    info.ver_id = ver.id

BAIRE Anthony's avatar
BAIRE Anthony committed
1158
            elif state == JobState.RUNNING:     # pragma: nobranch
1159
1160
1161
1162
1163
1164
                # job is already started

                # we do not care about the actual version_id *but* we need to
                # know whether we are in the swarm or in the sandbox
                info.ver_id = None if job.version == "sandbox" else -1

1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
                if job.container_id is None:
                    # look up container id (if it is not yet in the db)
                    # FIXME: to be removed after migration (the container id is now stored in the db at creation time)
                    try:
                        info.ctr_id = job.container_id = info.client.inspect_container(
                                "%s-job-%d-%s" % (ctrl.env, job.id, job.webapp.docker_name))["Id"]
                    except docker.errors.NotFound:
                        pass
                else:
                    # check the presence of the container and validates its id against its name
                    info.ctr_id = ctrl.check_job_container(info.client, job)
1176
                    
1177
            else:
1178
                # unexpected state
1179
1180
                if state != JobState.DONE:
                    log.warning("job id %d is in unexpected state %s", job_id, state.name)
1181
1182
                return

BAIRE Anthony's avatar
BAIRE Anthony committed
1183

1184
1185
1186
1187
1188
        if state == JobState.WAITING:
            # job is not yet started

            # pull the image to the swarm
            if info.ver_id is not None:
BAIRE Anthony's avatar
BAIRE Anthony committed
1189
1190
                # NOTE: race condition: will fail if ver.state==sandbox
                #  jobs must be submitted after the image is committed
1191
1192
                yield from ctrl.image_manager.pull(info.ver_id, swarm=True)

1193
1194
1195
            # request a slot from the shared swarm
            with info.client.request_slot(info.ctr_name, info.cpu or 0, info.mem or 0):
                info.node_id = yield from info.client.wait_slot(info.ctr_name)
1196
                yield from self.run_in_executor(self._create_job, info, lock=False)
1197
            yield from self._finish_job(info)
1198

BAIRE Anthony's avatar
BAIRE Anthony committed
1199
        elif state == JobState.RUNNING: # pragma: nobranch
1200
1201
            # the job is already running
            # -> wait for its termination
1202
            yield from self._finish_job(info)
1203

1204
1205
1206
1207
1208
1209
1210
1211
1212
# NOTE: for the push/pull managers, interruptible=True guarantees that the
#   managers terminate immediately, however it cannot guarantee that the
#   process will terminate immediately because the ThreadPoolExecuter installs
#   a atexit handler that joins all the background threads.
#
#   Anyway this is not a big issue since all pending push/pull raise
#   ShuttingDown immediately, thus we won't end up with a sandbox/job
#   in an inconsistent state when SIGKILL arrives.
#
1213
1214

class PullManager(Manager):
1215
    def __init__(self, nb_threads, client, name):
1216
        super().__init__(nb_threads, interruptible=True)
1217
        self.client    = client
1218
1219
        self.name      = name

BAIRE Anthony's avatar
BAIRE Anthony committed
1220
    @asyncio.coroutine
1221
1222
1223
1224
1225
    def _process(self, img, reset):
        image, version = img
        log.info("pull to the %-10s %s:%s", self.name, image, version)
        return self.run_in_executor(self.client.pull, image, version)

1226
1227

class PushManager(Manager):
BAIRE Anthony's avatar
BAIRE Anthony committed
1228
    def __init__(self, nb_threads, ctrl):
1229
        super().__init__(nb_threads, interruptible=True)