diff --git a/.gitignore b/.gitignore index 8e3f39a69..f2762cffc 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,6 @@ MANIFEST build dist -_build -docs/gh-pages *.py[co] __pycache__ *.egg-info @@ -16,3 +14,16 @@ __pycache__ .coverage .cache absolute.json +htmlcov/ + +# Sphinx documentation +_build +docs/_build/ +docs/gh-pages + +# PyBuilder +target/ + +# PyCharm +.idea/ +*.iml diff --git a/.travis.yml b/.travis.yml index 0a3a96915..0b6833799 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,15 +1,16 @@ +dist: xenial language: python python: - "nightly" - - '3.6-dev' - - 3.5 - - 3.4 - - 3.3 - - 2.7 -sudo: false + - "3.8-dev" + - "3.7" + - "3.6" + - "3.5" + - "2.7" install: - pip install --upgrade setuptools pip - - pip install --upgrade --pre -e .[test] pytest-cov pytest-warnings codecov + - pip install --upgrade --upgrade-strategy eager --pre -e .[test] pytest-cov codecov 'coverage<5' + - pip freeze script: - py.test --cov jupyter_client jupyter_client after_success: @@ -17,3 +18,6 @@ after_success: matrix: allow_failures: - python: nightly +branches: + except: + - /^auto-backport-of-pr-[0-9]+$/ diff --git a/MANIFEST.in b/MANIFEST.in index 42edd273d..994648d70 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -5,6 +5,7 @@ include README.md # Documentation graft docs exclude docs/\#* +exclude docs/_* # Examples graft examples diff --git a/docs/changelog.rst b/docs/changelog.rst index 35e21b5c6..11cbd9f1a 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -4,6 +4,82 @@ Changes in Jupyter Client ========================= +5.2.3 +===== + +`5.2.3 on GitHub `__ + +- Fix hang on close in :class:`.ThreadedKernelClient` (used in QtConsole) + when using tornado with asyncio + (default behavior of tornado 5, see :ghpull:`352`). +- Fix errors when using deprecated :attr:`.KernelManager.kernel_cmd` + (:ghpull:`343`, :ghpull:`344`). + +5.2.2 +===== + +`5.2.2 on GitHub `__ + +- Fix :meth:`.KernelSpecManager.get_all_specs` method in subclasses + that only override :meth:`.KernelSpecManager.find_kernel_specs` + and :meth:`.KernelSpecManager.get_kernel_spec`. + See :ghissue:`338` and :ghpull:`339`. +- Eliminate occasional error messages during process exit (:ghpull:`336`). +- Improve error message when attempting to bind on invalid address (:ghpull:`330`). +- Add missing direct dependency on tornado (:ghpull:`323`). + + +5.2.1 +===== + +`5.2.1 on GitHub `__ + +- Add parenthesis to conditional pytest requirement to work around a bug in the + ``wheel`` package, that generate a ``.whl`` which otherwise always depends on + ``pytest`` see :ghissue:`324` and :ghpull:`325`. + +5.2 +=== + +`5.2 on GitHub `__ + +- Define Jupyter protocol version 5.3: + + - Kernels can now opt to be interrupted by a message sent on the control channel + instead of a system signal. See :ref:`kernelspecs` and :ref:`msging_interrupt` + (:ghpull:`294`). + +- New ``jupyter kernel`` command to launch an installed kernel by name + (:ghpull:`240`). +- Kernelspecs where the command starts with e.g. ``python3`` or + ``python3.6``—matching the version ``jupyter_client`` is running on—are now + launched with the same Python executable as the launching process (:ghpull:`306`). + This extends the special handling of ``python`` added in 5.0. +- Command line arguments specified by a kernelspec can now include + ``{resource_dir}``, which will be substituted with the kernelspec resource + directory path when the kernel is launched (:ghpull:`289`). +- Kernelspecs now have an optional ``metadata`` field to hold arbitrary metadata + about kernels—see :ref:`kernelspecs` (:ghpull:`274`). +- Make the ``KernelRestarter`` class used by a ``KernelManager`` configurable + (:ghpull:`290`). +- When killing a kernel on Unix, kill its process group (:ghpull:`314`). +- If a kernel dies soon after starting, reassign random ports before restarting + it, in case one of the previously chosen ports has been bound by another + process (:ghpull:`279`). +- Avoid unnecessary filesystem operations when finding a kernelspec with + :meth:`.KernelSpecManager.get_kernel_spec` (:ghpull:`311`). +- :meth:`.KernelSpecManager.get_all_specs` will no longer raise an exception on + encountering an invalid ``kernel.json`` file. It will raise a warning and + continue (:ghpull:`310`). +- Check for non-contiguous buffers before trying to send them through ZMQ + (:ghpull:`258`). +- Compatibility with upcoming Tornado version 5.0 (:ghpull:`304`). +- Simplify setup code by always using setuptools (:ghpull:`284`). +- Soften warnings when setting the sticky bit on runtime files fails + (:ghpull:`286`). +- Various corrections and improvements to documentation. + + 5.1 === diff --git a/docs/conf.py b/docs/conf.py index 849d7a56e..c3de08efd 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -34,6 +34,7 @@ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.napoleon', + 'sphinxcontrib_github_alt', ] # Add any paths that contain templates here, relative to this directory. @@ -55,6 +56,8 @@ copyright = '2015, Jupyter Development Team' author = 'Jupyter Development Team' +github_project_url = "https://github.com/jupyter/jupyter_client" + # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. diff --git a/docs/environment.yml b/docs/environment.yml index 3690c73b7..b7ed943c1 100644 --- a/docs/environment.yml +++ b/docs/environment.yml @@ -3,8 +3,10 @@ channels: - conda-forge dependencies: - pyzmq -- python==3.5 +- python==3.7 - traitlets>=4.1 - jupyter_core - sphinx>=1.3.6 - sphinx_rtd_theme +- pip: + - sphinxcontrib_github_alt diff --git a/docs/index.rst b/docs/index.rst index a0b8855cc..41e218ccc 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -23,6 +23,7 @@ with Jupyter kernels. kernels wrapperkernels + kernel_providers .. toctree:: :maxdepth: 2 diff --git a/docs/kernel_providers.rst b/docs/kernel_providers.rst new file mode 100644 index 000000000..2e1b7e295 --- /dev/null +++ b/docs/kernel_providers.rst @@ -0,0 +1,156 @@ +================ +Kernel providers +================ + +.. note:: + This is a new interface under development, and may still change. + Not all Jupyter applications use this yet. + See :ref:`kernelspecs` for the established way of discovering kernel types. + +Creating a kernel provider +========================== + +By writing a kernel provider, you can extend how Jupyter applications discover +and start kernels. For example, you could find kernels in an environment system +like conda, or kernels on remote systems which you can access. + +To write a kernel provider, subclass +:class:`jupyter_client.discovery.KernelProviderBase`, giving your provider an ID +and overriding two methods. + +.. class:: MyKernelProvider + + .. attribute:: id + + A short string identifying this provider. Cannot contain forward slash + (``/``). + + .. method:: find_kernels() + + Get the available kernel types this provider knows about. + Return an iterable of 2-tuples: (name, attributes). + *name* is a short string identifying the kernel type. + *attributes* is a dictionary with information to allow selecting a kernel. + + .. method:: make_manager(name) + + Prepare and return a :class:`~jupyter_client.KernelManager` instance + ready to start a new kernel instance of the type identified by *name*. + The input will be one of the names given by :meth:`find_kernels`. + +For example, imagine we want to tell Jupyter about kernels for a new language +called *oblong*:: + + # oblong_provider.py + from jupyter_client.discovery import KernelProviderBase + from jupyter_client import KernelManager + from shutil import which + + class OblongKernelProvider(KernelProviderBase): + id = 'oblong' + + def find_kernels(self): + if not which('oblong-kernel'): + return # Check it's available + + # Two variants - for a real kernel, these could be something like + # different conda environments. + yield 'standard', { + 'display_name': 'Oblong (standard)', + 'language': {'name': 'oblong'}, + 'argv': ['oblong-kernel'], + } + yield 'rounded', { + 'display_name': 'Oblong (rounded)', + 'language': {'name': 'oblong'}, + 'argv': ['oblong-kernel'], + } + + def make_manager(self, name): + if name == 'standard': + return KernelManager(kernel_cmd=['oblong-kernel'], + extra_env={'ROUNDED': '0'}) + elif name == 'rounded': + return KernelManager(kernel_cmd=['oblong-kernel'], + extra_env={'ROUNDED': '1'}) + else: + raise ValueError("Unknown kernel %s" % name) + +You would then register this with an *entry point*. In your ``setup.py``, put +something like this:: + + setup(... + entry_points = { + 'jupyter_client.kernel_providers' : [ + # The name before the '=' should match the id attribute + 'oblong = oblong_provider:OblongKernelProvider', + ] + }) + +Finding kernel types +==================== + +To find and start kernels in client code, use +:class:`jupyter_client.discovery.KernelFinder`. This uses multiple kernel +providers to find available kernels. Like a kernel provider, it has methods +``find_kernels`` and ``make_manager``. The kernel names it works +with have the provider ID as a prefix, e.g. ``oblong/rounded`` (from the example +above). + +:: + + from jupyter_client.discovery import KernelFinder + kf = KernelFinder.from_entrypoints() + + ## Find available kernel types + for name, attributes in kf.find_kernels(): + print(name, ':', attributes['display_name']) + # oblong/standard : Oblong (standard) + # oblong/rounded : Oblong(rounded) + # ... + + ## Start a kernel by name + manager = kf.make_manager('oblong/standard') + manager.start_kernel() + +.. module:: jupyter_client.discovery + +.. autoclass:: KernelFinder + + .. automethod:: from_entrypoints + + .. automethod:: find_kernels + + .. automethod:: make_manager + +Kernel providers included in ``jupyter_client`` +=============================================== + +``jupyter_client`` includes two kernel providers: + +.. autoclass:: KernelSpecProvider + + .. seealso:: :ref:`kernelspecs` + +.. autoclass:: IPykernelProvider + +Glossary +======== + +Kernel instance + A running kernel, a process which can accept ZMQ connections from frontends. + Its state includes a namespace and an execution counter. + +Kernel type + The software to run a kernel instance, along with the context in which a + kernel starts. One kernel type allows starting multiple, initially similar + kernel instances. For instance, one kernel type may be associated with one + conda environment containing ``ipykernel``. The same kernel software in + another environment would be a different kernel type. Another software package + for a kernel, such as ``IRkernel``, would also be a different kernel type. + +Kernel provider + A Python class to discover kernel types and allow a client to start instances + of those kernel types. For instance, one kernel provider might find conda + environments containing ``ipykernel`` and allow starting kernel instances in + these environments. diff --git a/docs/kernels.rst b/docs/kernels.rst index 3319dda31..f3c06d1e1 100644 --- a/docs/kernels.rst +++ b/docs/kernels.rst @@ -6,12 +6,12 @@ Making kernels for Jupyter A 'kernel' is a program that runs and introspects the user's code. IPython includes a kernel for Python code, and people have written kernels for -`several other languages `_. +`several other languages `_. -When Jupyter starts a kernel, it passes it a connection file. This specifies +At kernel startup, Jupyter passes the kernel a connection file. This specifies how to set up communications with the frontend. -There are two options for writing a kernel: +There are three options for writing a kernel: 1. You can reuse the IPython kernel machinery to handle the communications, and just describe how to execute your code. This is much simpler if the target @@ -19,6 +19,17 @@ There are two options for writing a kernel: 2. You can implement the kernel machinery in your target language. This is more work initially, but the people using your kernel might be more likely to contribute to it if it's in the language they know. +3. You can use the `xeus `_ library that is + a C++ implementation of the Jupyter kernel protocol. Kernel authors only need to + implement the language-specific logic in their implementation + (execute code, auto-completion...). This is the simplest + solution if your target language can be driven from C or C++: e.g. if it has + a C-API like most scripting languages. Check out the + `xeus documentation `_ for more details. + Examples of kernels based on xeus include: + - `xeus-cling `_ + - `xeus-python `_ + - `JuniperKernel `_ Connection files ================ @@ -132,11 +143,21 @@ JSON serialised dictionary containing the following keys and values: is found, a kernel with a matching `language` will be used. This allows a notebook written on any Python or Julia kernel to be properly associated with the user's Python or Julia kernel, even if they aren't listed under the same name as the author's. +- **interrupt_mode** (optional): May be either ``signal`` or ``message`` and + specifies how a client is supposed to interrupt cell execution on this kernel, + either by sending an interrupt ``signal`` via the operating system's + signalling facilities (e.g. `SIGINT` on POSIX systems), or by sending an + ``interrupt_request`` message on the control channel (see + :ref:`msging_interrupt`). If this is not specified + the client will default to ``signal`` mode. - **env** (optional): A dictionary of environment variables to set for the kernel. These will be added to the current environment variables before the kernel is - started. + started. Existing environment variables can be referenced using ``${}`` and + will be substituted with the corresponding value. Administrators should note that use + of ``${}`` can expose sensitive variables and should use only in controlled + circumstances. - **metadata** (optional): A dictionary of additional attributes about this - kernel; used by clients to aid clients in kernel selection. Metadata added + kernel; used by clients to aid in kernel selection. Metadata added here should be namespaced for the tool reading and writing that metadata. For example, the kernel.json file for IPython looks like this:: diff --git a/docs/messaging.rst b/docs/messaging.rst index 776dda681..3dfa5e2db 100644 --- a/docs/messaging.rst +++ b/docs/messaging.rst @@ -21,7 +21,7 @@ Versioning The Jupyter message specification is versioned independently of the packages that use it. -The current version of the specification is 5.2. +The current version of the specification is 5.3. .. note:: *New in* and *Changed in* messages in this document refer to versions of the @@ -48,15 +48,15 @@ kernel has dedicated sockets for the following functions: each frontend and the kernel. 2. **IOPub**: this socket is the 'broadcast channel' where the kernel publishes all - side effects (stdout, stderr, etc.) as well as the requests coming from any - client over the shell socket and its own requests on the stdin socket. There - are a number of actions in Python which generate side effects: :func:`print` - writes to ``sys.stdout``, errors generate tracebacks, etc. Additionally, in - a multi-client scenario, we want all frontends to be able to know what each - other has sent to the kernel (this can be useful in collaborative scenarios, - for example). This socket allows both side effects and the information - about communications taking place with one client over the shell channel - to be made available to all clients in a uniform manner. + side effects (stdout, stderr, debugging events etc.) as well as the requests + coming from any client over the shell socket and its own requests on the + stdin socket. There are a number of actions in Python which generate side + effects: :func:`print` writes to ``sys.stdout``, errors generate tracebacks, + etc. Additionally, in a multi-client scenario, we want all frontends to be + able to know what each other has sent to the kernel (this can be useful in + collaborative scenarios, for example). This socket allows both side effects + and the information about communications taking place with one client over + the shell channel to be made available to all clients in a uniform manner. 3. **stdin**: this ROUTER socket is connected to all frontends, and it allows the kernel to request input from the active frontend when :func:`raw_input` is called. @@ -72,8 +72,9 @@ kernel has dedicated sockets for the following functions: which ones are from other clients, so they can display each type appropriately. -4. **Control**: This channel is identical to Shell, but operates on a separate socket, - to allow important messages to avoid queueing behind execution requests (e.g. shutdown or abort). +4. **Control**: This channel is identical to Shell, but operates on a separate + socket to avoid queueing behind execution requests. The control channel is + used for shutdown and restart messages, as well as for debugging messages. 5. **Heartbeat**: This socket allows for simple bytestring messages to be sent between the frontend and the kernel to ensure that they are still connected. @@ -122,9 +123,28 @@ A message is defined by the following four-dictionary structure:: 'buffers': list, } +.. note:: + + The ``session`` id in a message header identifies a unique entity with state, + such as a kernel process or client process. + + A client session id, in message headers from a client, should be unique among + all clients connected to a kernel. When a client reconnects to a kernel, it + should use the same client session id in its message headers. When a client + restarts, it should generate a new client session id. + + A kernel session id, in message headers from a kernel, should identify a + particular kernel process. If a kernel is restarted, the kernel session id + should be regenerated. + + The session id in a message header can be used to identify the sending entity. + For example, if a client disconnects and reconnects to a kernel, and messages + from the kernel have a different kernel session id than prior to the disconnect, + the client should assume that the kernel was restarted. + .. versionchanged:: 5.0 - ``version`` key added to the header. + ``version`` key added to the header. .. versionchanged:: 5.1 @@ -139,8 +159,9 @@ Compatibility ============= Kernels must implement the :ref:`execute ` and :ref:`kernel info -` messages in order to be usable. All other message types -are optional, although we recommend implementing :ref:`completion +` messages, along with the associated busy and idle +:ref:`status` messages. All other message types are +optional, although we recommend implementing :ref:`completion ` if possible. Kernels do not need to send any reply for messages they don't handle, and frontends should provide sensible behaviour if no reply arrives (except for the required execution and kernel info messages). @@ -305,6 +326,9 @@ All reply messages have a ``'status'`` field, which will have one of the followi but with no information about the error. No fields should be present other that `status`. +As a special case, ``execute_reply`` messages (see :ref:`execution_results`) +have an ``execution_count`` field regardless of their status. + .. versionchanged:: 5.1 ``status='abort'`` has not proved useful, and is considered deprecated. @@ -351,9 +375,9 @@ Message type: ``execute_request``:: # should not send these messages. 'allow_stdin' : True, - # A boolean flag, which, if True, does not abort the execution queue, if an exception is encountered. - # This allows the queued execution of multiple execute_requests, even if they generate exceptions. - 'stop_on_error' : False, + # A boolean flag, which, if True, aborts the execution queue if an exception is encountered. + # If False, queued execute_requests will execute even if this request generates an exception. + 'stop_on_error' : True, } .. versionchanged:: 5.0 @@ -415,7 +439,7 @@ Execution results Message type: ``execute_reply``:: content = { - # One of: 'ok' OR 'error' OR 'abort' + # One of: 'ok' OR 'error' OR 'aborted' 'status' : str, # The global kernel counter that increases by one with each request that @@ -614,6 +638,11 @@ Message type: ``complete_request``:: Message type: ``complete_reply``:: content = { + # status should be 'ok' unless an exception was raised during the request, + # in which case it should be 'error', along with the usual error message content + # in other messages. + 'status' : 'ok' + # The list of all matches to the completion request, such as # ['a.isalnum', 'a.isalpha'] for the above example. 'matches' : list, @@ -625,11 +654,6 @@ Message type: ``complete_reply``:: # Information that frontend plugins might use for extra display information about completions. 'metadata' : dict, - - # status should be 'ok' unless an exception was raised during the request, - # in which case it should be 'error', along with the usual error message content - # in other messages. - 'status' : 'ok' } .. versionchanged:: 5.0 @@ -814,8 +838,6 @@ Message type: ``comm_info_reply``:: .. versionadded:: 5.1 - ``comm_info`` is a proposed addition for msgspec v5.1. - .. _msging_kernel_info: Kernel info @@ -835,6 +857,9 @@ Message type: ``kernel_info_request``:: Message type: ``kernel_info_reply``:: content = { + # 'ok' if the request succeeded or 'error', with error information as in all other replies. + 'status' : 'ok', + # Version of messaging protocol. # The first integer indicates major version. It is incremented when # there is any backward incompatible change. @@ -917,6 +942,9 @@ and `codemirror modes `_ for those fields ``language`` moved to ``language_info.name`` +Messages on the Control (ROUTER/DEALER) channel +=============================================== + .. _msging_shutdown: Kernel shutdown @@ -934,8 +962,7 @@ multiple cases: The client sends a shutdown request to the kernel, and once it receives the reply message (which is otherwise empty), it can assume that the kernel has -completed shutdown safely. The request can be sent on either the `control` or -`shell` channels. +completed shutdown safely. The request is sent on the `control` channel. Upon their own shutdown, client applications will typically execute a last minute sanity check and forcefully terminate any kernel that is still alive, to @@ -959,11 +986,82 @@ Message type: ``shutdown_reply``:: socket, they simply send a forceful process termination signal, since a dead process is unlikely to respond in any useful way to messages. +.. versionchanged:: 5.4 -Messages on the IOPub (PUB/SUB) channel -======================================= + Sending a ``shutdown_request`` message on the ``shell`` channel is deprecated. + +.. _msging_interrupt: + +Kernel interrupt +---------------- +In case a kernel can not catch operating system interrupt signals (e.g. the used +runtime handles signals and does not allow a user program to define a callback), +a kernel can choose to be notified using a message instead. For this to work, +the kernels kernelspec must set `interrupt_mode` to ``message``. An interruption +will then result in the following message on the `control` channel: +Message type: ``interrupt_request``:: + + content = {} + +Message type: ``interrupt_reply``:: + + content = {} + +.. versionadded:: 5.3 + +Debug request +------------- + +This message type is used with debugging kernels to request specific actions +to be performed by the debugger such as adding a breakpoint or stepping into +a code. + +The contents of debug requests and replies respectively follow the schemas of the +``Request`` and ``Response`` messages of the *Debug Adapter Protocol* (DAP_). + +Message type: ``debug_request``:: + + content = { + # The type of debug message + 'type': 'request', + # A unique sequence number + 'seq': int, + # The command to execute + 'command': str, + # Optional: arguments for the command + 'arguments': {} + } + +Message type: ``debug_reply``:: + + content = { + # The type of debug message + 'type': 'response', + # Sequence number of the corresponding request + 'request_seq': int + # Outcome of the request. + # - True if the request was successful. Then the 'body' attribute may contain the result of the request. + # - False if the request failed. Then the 'message' attribute contains the short-form error, + # and 'body' may contain additional information. + 'success': bool, + # Optional: short-form error in case of failure + 'message': str, + # Optional: request result in case of success, and further failure information otherwise. + 'body': any + } + +The ``content`` dict can be any JSON information used by debugging frontends +and kernels. + +Debug requests and replies are sent over the `control` channel to prevent queuing +behind execution requests. + +.. versionadded:: 5.5 + +Messages on the IOPub (PUB/SUB) channel +======================================= Streams (stdout, stderr, etc) ------------------------------ @@ -1163,13 +1261,15 @@ Message type: ``error``:: content = { # Similar content to the execute_reply messages for the 'error' case, - # except the 'status' field is omitted. + # except the 'status' and 'execution_count' fields are omitted. } .. versionchanged:: 5.0 ``pyerr`` renamed to ``error`` +.. _status: + Kernel status ------------- @@ -1208,14 +1308,6 @@ between the busy and idle status messages associated with a given request. Busy and idle messages should be sent before/after handling every request, not just execution. -.. note:: - - Extra status messages are added between the notebook webserver and websocket clients - that are not sent by the kernel. These are: - - - restarting (kernel has died, but will be automatically restarted) - - dead (kernel has died, restarting has failed) - Clear output ------------ @@ -1238,6 +1330,34 @@ Message type: ``clear_output``:: The selective clearing keys are ignored in v4 and the default behavior remains the same, so v4 clear_output messages will be safely handled by a v4.1 frontend. +.. _debug_event: + +Debug event +----------- + +This message type is used by debugging kernels to send debugging events to the +frontend. + +The content of the debug events follows the schema of the ``Event`` message of +the *Debug Adapter Protocol* (DAP_). + +Message type: ``debug_event``:: + + content = { + # The type of debug message + 'type': 'event', + # A unique sequence number + 'seq': int, + # Type of event + 'event': str, + # Optional: event-specific information + 'body': {} + } + +The ``content`` dict can be any JSON information used by debugging frontends. + +.. versionadded:: 5.5 + .. _stdin_messages: Messages on the stdin (ROUTER/DEALER) channel @@ -1309,7 +1429,6 @@ Heartbeat for kernels Clients send ping messages on a REQ socket, which are echoed right back from the Kernel's REP socket. These are simple bytestrings, not full JSON messages described above. - Custom Messages =============== @@ -1321,8 +1440,7 @@ To do this, IPython adds a notion of a ``Comm``, which exists on both sides, and can communicate in either direction. These messages are fully symmetrical - both the Kernel and the Frontend can send each message, -and no messages expect a reply. -The Kernel listens for these messages on the Shell channel, +and no messages expect a reply. The Kernel listens for these messages on the Shell channel, and the Frontend listens for them on the IOPub channel. Opening a Comm @@ -1411,17 +1529,18 @@ Frontends claiming to implement protocol 5.2 **MUST** identify cursor_pos as the Kernels may choose to expect the UTF-16 offset from requests implementing protocol 5.1 and earlier, in order to behave correctly with the most popular frontends. But they should know that doing so *introduces* the inverse bug for the frontends that do not have this bug. +As an example, use a python3 kernel and evaluate ``𨭎𨭎𨭎𨭎𨭎 = 10``. Then type ``𨭎𨭎`` followed by the tab key and see if it properly completes. + Known affected frontends (as of 2017-06): - Jupyter Notebook < 5.1 - JupyterLab < 0.24 - nteract < 0.2.0 -- CoCalc - Jupyter Console and QtConsole with Python 2 on macOS and Windows Known *not* affected frontends: -- QtConsole, Jupyter Console with Python 3 or Python 2 on Linux +- QtConsole, Jupyter Console with Python 3 or Python 2 on Linux, CoCalc .. seealso:: @@ -1430,3 +1549,4 @@ Known *not* affected frontends: .. _ZeroMQ: http://zeromq.org .. _nteract: https://nteract.io +.. _DAP: https://microsoft.github.io/debug-adapter-protocol/specification diff --git a/docs/wrapperkernels.rst b/docs/wrapperkernels.rst index bc9a8a55b..5ebb5aac4 100644 --- a/docs/wrapperkernels.rst +++ b/docs/wrapperkernels.rst @@ -2,9 +2,9 @@ Making simple Python wrapper kernels ==================================== You can re-use IPython's kernel machinery to easily make new kernels. -This is useful for languages that have Python bindings, such as `Octave -`_ (via -`Oct2Py `_), or languages +This is useful for languages that have Python bindings, such as `Hy +`_ (see +`Calysto Hy `_), or languages where the REPL can be controlled in a tty using `pexpect `_, such as bash. @@ -13,6 +13,11 @@ such as bash. `bash_kernel `_ A simple kernel for bash, written using this machinery +The `Metakernel `_ library makes it easier to +write a wrapper kernel that includes a base set of line and cell magics. It also has a ``ProcessKernel`` subclass that makes it easy to write kernels that use ``pexpect``. +See `Octave Kernel `_ as an example. + + Required steps -------------- @@ -24,7 +29,7 @@ following methods and attributes: .. attribute:: implementation implementation_version banner - + Information for :ref:`msging_kernel_info` replies. 'Implementation' refers to the kernel (e.g. IPython), rather than the language (e.g. Python). The 'banner' is displayed to the user in console @@ -43,9 +48,9 @@ following methods and attributes: Other keys may be added to this later. .. method:: do_execute(code, silent, store_history=True, user_expressions=None, allow_stdin=False) - + Execute user code. - + :param str code: The code to be executed. :param bool silent: Whether to display output. :param bool store_history: Whether to record this code in history and @@ -55,7 +60,7 @@ following methods and attributes: after the code has run. You can ignore this if you need to. :param bool allow_stdin: Whether the frontend can provide input on request (e.g. for Python's :func:`raw_input`). - + Your method should return a dict containing the fields described in :ref:`execution_results`. To display output, it can send messages using :meth:`~ipykernel.kernelbase.Kernel.send_response`. @@ -131,25 +136,25 @@ relevant section of the :doc:`messaging spec `. .. method:: do_complete(code, cursor_pos) Code completion - + :param str code: The code already present :param int cursor_pos: The position in the code where completion is requested - + .. seealso:: - + :ref:`msging_completion` messages .. method:: do_inspect(code, cursor_pos, detail_level=0) Object introspection - + :param str code: The code :param int cursor_pos: The position in the code where introspection is requested :param int detail_level: 0 or 1 for more or less detail. In IPython, 1 gets the source code. - + .. seealso:: - + :ref:`msging_inspection` messages .. method:: do_history(hist_access_type, output, raw, session=None, start=None, stop=None, n=None, pattern=None, unique=False) @@ -159,27 +164,27 @@ relevant section of the :doc:`messaging spec `. for all the arguments shown with defaults here. .. seealso:: - + :ref:`msging_history` messages .. method:: do_is_complete(code) - + Is code entered in a console-like interface complete and ready to execute, or should a continuation prompt be shown? - + :param str code: The code entered so far - possibly multiple lines - + .. seealso:: - + :ref:`msging_is_complete` messages .. method:: do_shutdown(restart) Shutdown the kernel. You only need to handle your own clean up - the kernel machinery will take care of cleaning up its own things before stopping. - + :param bool restart: Whether the kernel will be started again afterwards - + .. seealso:: - + :ref:`msging_shutdown` messages diff --git a/jupyter_client/_version.py b/jupyter_client/_version.py index 90dd2e93e..53bf21fbd 100644 --- a/jupyter_client/_version.py +++ b/jupyter_client/_version.py @@ -1,5 +1,5 @@ -version_info = (5, 1, 0) +version_info = (6, 0, 0, 'dev') __version__ = '.'.join(map(str, version_info)) -protocol_version_info = (5, 2) +protocol_version_info = (5, 3) protocol_version = "%i.%i" % protocol_version_info diff --git a/jupyter_client/blocking/client.py b/jupyter_client/blocking/client.py index c0196ba36..87e0e769e 100644 --- a/jupyter_client/blocking/client.py +++ b/jupyter_client/blocking/client.py @@ -36,7 +36,7 @@ TimeoutError = RuntimeError -def reqrep(meth): +def reqrep(meth, channel='shell'): def wrapped(self, *args, **kwargs): reply = kwargs.pop('reply', False) timeout = kwargs.pop('timeout', None) @@ -44,7 +44,7 @@ def wrapped(self, *args, **kwargs): if not reply: return msg_id - return self._recv_reply(msg_id, timeout=timeout) + return self._recv_reply(msg_id, timeout=timeout, channel=channel) if not meth.__doc__: # python -OO removes docstrings, @@ -135,9 +135,10 @@ def wait_for_ready(self, timeout=None): iopub_channel_class = Type(ZMQSocketChannel) stdin_channel_class = Type(ZMQSocketChannel) hb_channel_class = Type(HBChannel) + control_channel_class = Type(ZMQSocketChannel) - def _recv_reply(self, msg_id, timeout=None): + def _recv_reply(self, msg_id, timeout=None, channel='shell'): """Receive and return the reply for a given request""" if timeout is not None: deadline = monotonic() + timeout @@ -145,7 +146,10 @@ def _recv_reply(self, msg_id, timeout=None): if timeout is not None: timeout = max(0, deadline - monotonic()) try: - reply = self.get_shell_msg(timeout=timeout) + if channel == 'control': + reply = self.get_control_msg(timeout=timeout) + else: + reply = self.get_shell_msg(timeout=timeout) except Empty: raise TimeoutError("Timeout waiting for reply") if reply['parent_header'].get('msg_id') != msg_id: @@ -154,13 +158,16 @@ def _recv_reply(self, msg_id, timeout=None): return reply + # replies come on the shell channel execute = reqrep(KernelClient.execute) history = reqrep(KernelClient.history) complete = reqrep(KernelClient.complete) inspect = reqrep(KernelClient.inspect) kernel_info = reqrep(KernelClient.kernel_info) comm_info = reqrep(KernelClient.comm_info) - shutdown = reqrep(KernelClient.shutdown) + + # replies come on the control channel + shutdown = reqrep(KernelClient.shutdown, channel='control') def _stdin_hook_default(self, msg): diff --git a/jupyter_client/channels.py b/jupyter_client/channels.py index dd9906723..51d4d11a8 100644 --- a/jupyter_client/channels.py +++ b/jupyter_client/channels.py @@ -7,7 +7,7 @@ import atexit import errno -from threading import Thread +from threading import Thread, Event import time import zmq @@ -73,6 +73,7 @@ def __init__(self, context=None, session=None, address=None): # running is False until `.start()` is called self._running = False + self._exit = Event() # don't start paused self._pause = False self.poller = zmq.Poller() @@ -80,7 +81,10 @@ def __init__(self, context=None, session=None, address=None): @staticmethod @atexit.register def _notice_exit(): - HBChannel._exiting = True + # Class definitions can be torn down during interpreter shutdown. + # We only need to set _exiting flag if this hasn't happened. + if HBChannel is not None: + HBChannel._exiting = True def _create_socket(self): if self.socket is not None: @@ -135,11 +139,10 @@ def run(self): while self._running: if self._pause: # just sleep, and skip the rest of the loop - time.sleep(self.time_to_dead) + self._exit.wait(self.time_to_dead) continue since_last_heartbeat = 0.0 - # io.rprint('Ping from HB channel') # dbg # no need to catch EFSM here, because the previous event was # either a recv or connect, which cannot be followed by EFSM self.socket.send(b'ping') @@ -152,7 +155,7 @@ def run(self): # sleep the remainder of the cycle remainder = self.time_to_dead - (time.time() - request_time) if remainder > 0: - time.sleep(remainder) + self._exit.wait(remainder) continue else: # nothing was received within the time limit, signal heart failure @@ -181,6 +184,7 @@ def is_beating(self): def stop(self): """Stop the channel's event loop and join its thread.""" self._running = False + self._exit.set() self.join() self.close() diff --git a/jupyter_client/client.py b/jupyter_client/client.py index 763af85a7..de1b189d9 100644 --- a/jupyter_client/client.py +++ b/jupyter_client/client.py @@ -35,12 +35,13 @@ def validate_string_dict(dct): class KernelClient(ConnectionFileMixin): """Communicates with a single kernel on any host via zmq channels. - There are four channels associated with each kernel: + There are five channels associated with each kernel: * shell: for request/reply calls to the kernel. * iopub: for the kernel to publish results to frontends. * hb: for monitoring the kernel's heartbeat. * stdin: for frontends to reply to raw_input calls in the kernel. + * control: for kernel management calls to the kernel. The messages that can be sent on these channels are exposed as methods of the client (KernelClient.execute, complete, history, etc.). These methods only @@ -51,19 +52,21 @@ class KernelClient(ConnectionFileMixin): # The PyZMQ Context to use for communication with the kernel. context = Instance(zmq.Context) def _context_default(self): - return zmq.Context.instance() + return zmq.Context() # The classes to use for the various channels shell_channel_class = Type(ChannelABC) iopub_channel_class = Type(ChannelABC) stdin_channel_class = Type(ChannelABC) hb_channel_class = Type(HBChannelABC) + control_channel_class = Type(ChannelABC) # Protected traits _shell_channel = Any() _iopub_channel = Any() _stdin_channel = Any() _hb_channel = Any() + _control_channel = Any() # flag for whether execute requests should be allowed to call raw_input: allow_stdin = True @@ -84,11 +87,15 @@ def get_stdin_msg(self, *args, **kwargs): """Get a message from the stdin channel""" return self.stdin_channel.get_msg(*args, **kwargs) + def get_control_msg(self, *args, **kwargs): + """Get a message from the control channel""" + return self.control_channel.get_msg(*args, **kwargs) + #-------------------------------------------------------------------------- # Channel management methods #-------------------------------------------------------------------------- - def start_channels(self, shell=True, iopub=True, stdin=True, hb=True): + def start_channels(self, shell=True, iopub=True, stdin=True, hb=True, control=True): """Starts the channels for this kernel. This will create the channels if they do not exist and then start @@ -109,6 +116,8 @@ def start_channels(self, shell=True, iopub=True, stdin=True, hb=True): self.allow_stdin = False if hb: self.hb_channel.start() + if control: + self.control_channel.start() def stop_channels(self): """Stops all the running channels for this kernel. @@ -123,12 +132,15 @@ def stop_channels(self): self.stdin_channel.stop() if self.hb_channel.is_alive(): self.hb_channel.stop() + if self.control_channel.is_alive(): + self.control_channel.stop() @property def channels_running(self): """Are any of the channels created and running?""" return (self.shell_channel.is_alive() or self.iopub_channel.is_alive() or - self.stdin_channel.is_alive() or self.hb_channel.is_alive()) + self.stdin_channel.is_alive() or self.hb_channel.is_alive() or + self.control_channel.is_alive()) ioloop = None # Overridden in subclasses that use pyzmq event loop @@ -179,6 +191,18 @@ def hb_channel(self): ) return self._hb_channel + @property + def control_channel(self): + """Get the control channel object for this kernel.""" + if self._control_channel is None: + url = self._make_url('control') + self.log.debug("connecting control channel to %s", url) + socket = self.connect_control(identity=self.session.bsession) + self._control_channel = self.control_channel_class( + socket, self.session, self.ioloop + ) + return self._control_channel + def is_alive(self): """Is the kernel process still running?""" from .manager import KernelManager @@ -383,8 +407,24 @@ def _handle_kernel_info_reply(self, msg): if adapt_version != major_protocol_version: self.session.adapt_version = adapt_version + def is_complete(self, code): + """Ask the kernel whether some code is complete and ready to execute.""" + msg = self.session.msg('is_complete_request', {'code': code}) + self.shell_channel.send(msg) + return msg['header']['msg_id'] + + def input(self, string): + """Send a string of raw input to the kernel. + + This should only be called in response to the kernel sending an + ``input_request`` message on the stdin channel. + """ + content = dict(value=string) + msg = self.session.msg('input_reply', content) + self.stdin_channel.send(msg) + def shutdown(self, restart=False): - """Request an immediate kernel shutdown. + """Request an immediate kernel shutdown on the control channel. Upon receipt of the (empty) reply, client code can safely assume that the kernel has shut down and it's safe to forcefully terminate it if @@ -401,24 +441,7 @@ def shutdown(self, restart=False): # Send quit message to kernel. Once we implement kernel-side setattr, # this should probably be done that way, but for now this will do. msg = self.session.msg('shutdown_request', {'restart':restart}) - self.shell_channel.send(msg) - return msg['header']['msg_id'] - - def is_complete(self, code): - """Ask the kernel whether some code is complete and ready to execute.""" - msg = self.session.msg('is_complete_request', {'code': code}) - self.shell_channel.send(msg) + self.control_channel.send(msg) return msg['header']['msg_id'] - def input(self, string): - """Send a string of raw input to the kernel. - - This should only be called in response to the kernel sending an - ``input_request`` message on the stdin channel. - """ - content = dict(value=string) - msg = self.session.msg('input_reply', content) - self.stdin_channel.send(msg) - - KernelClientABC.register(KernelClient) diff --git a/jupyter_client/clientabc.py b/jupyter_client/clientabc.py index 7a718284a..9a47d2fcb 100644 --- a/jupyter_client/clientabc.py +++ b/jupyter_client/clientabc.py @@ -47,12 +47,16 @@ def hb_channel_class(self): def stdin_channel_class(self): pass + @abc.abstractproperty + def control_channel_class(self): + pass + #-------------------------------------------------------------------------- # Channel management methods #-------------------------------------------------------------------------- @abc.abstractmethod - def start_channels(self, shell=True, iopub=True, stdin=True, hb=True): + def start_channels(self, shell=True, iopub=True, stdin=True, hb=True, control=True): pass @abc.abstractmethod @@ -78,3 +82,7 @@ def stdin_channel(self): @abc.abstractproperty def hb_channel(self): pass + + @abc.abstractproperty + def control_channel(self): + pass diff --git a/jupyter_client/connect.py b/jupyter_client/connect.py index 91efbc461..972d3158b 100644 --- a/jupyter_client/connect.py +++ b/jupyter_client/connect.py @@ -29,15 +29,15 @@ bytes_to_str, cast_bytes, cast_bytes_py2, string_types, ) from traitlets import ( - Bool, Integer, Unicode, CaselessStrEnum, Instance, Type, + Bool, Integer, Unicode, CaselessStrEnum, Instance, Type, observe ) from jupyter_core.paths import jupyter_data_dir, jupyter_runtime_dir def write_connection_file(fname=None, shell_port=0, iopub_port=0, stdin_port=0, hb_port=0, - control_port=0, ip='', key=b'', transport='tcp', - signature_scheme='hmac-sha256', kernel_name='' - ): + control_port=0, ip='', key=b'', transport='tcp', + signature_scheme='hmac-sha256', kernel_name='' + ): """Generates a JSON config file, including the selection of random ports. Parameters @@ -193,7 +193,7 @@ def find_connection_file(filename='kernel-*.json', path=None, profile=None): path = ['.', jupyter_runtime_dir()] if isinstance(path, string_types): path = [path] - + try: # first, try explicit name return filefind(filename, path) @@ -208,11 +208,11 @@ def find_connection_file(filename='kernel-*.json', path=None, profile=None): else: # accept any substring match pat = '*%s*' % filename - + matches = [] for p in path: matches.extend(glob.glob(os.path.join(p, pat))) - + matches = [ os.path.abspath(m) for m in matches ] if not matches: raise IOError("Could not find %r in %r" % (filename, path)) @@ -226,7 +226,7 @@ def find_connection_file(filename='kernel-*.json', path=None, profile=None): def tunnel_to_kernel(connection_info, sshserver, sshkey=None): """tunnel connections to a kernel via ssh - This will open four SSH tunnels from localhost on this machine to the + This will open five SSH tunnels from localhost on this machine to the ports associated with the kernel. They can be either direct localhost-localhost tunnels, or if an intermediate server is necessary, the kernel must be listening on a public IP. @@ -246,10 +246,10 @@ def tunnel_to_kernel(connection_info, sshserver, sshkey=None): Returns ------- - (shell, iopub, stdin, hb) : ints - The four ports on localhost that have been forwarded to the kernel. + (shell, iopub, stdin, hb, control) : ints + The five ports on localhost that have been forwarded to the kernel. """ - from zmq.ssh import tunnel + from .ssh import tunnel if isinstance(connection_info, string_types): # it's a path, unpack it with open(connection_info) as f: @@ -257,8 +257,8 @@ def tunnel_to_kernel(connection_info, sshserver, sshkey=None): cf = connection_info - lports = tunnel.select_random_ports(4) - rports = cf['shell_port'], cf['iopub_port'], cf['stdin_port'], cf['hb_port'] + lports = tunnel.select_random_ports(5) + rports = cf['shell_port'], cf['iopub_port'], cf['stdin_port'], cf['hb_port'], cf['control_port'] remote_ip = cf['ip'] @@ -289,11 +289,11 @@ def tunnel_to_kernel(connection_info, sshserver, sshkey=None): class ConnectionFileMixin(LoggingConfigurable): """Mixin for configurable classes that work with connection files""" - + data_dir = Unicode() def _data_dir_default(self): return jupyter_data_dir() - + # The addresses for the communication channels connection_file = Unicode('', config=True, help="""JSON file in which to store connection info [default: kernel-.json] @@ -323,8 +323,9 @@ def _ip_default(self): else: return localhost() - def _ip_changed(self, name, old, new): - if new == '*': + @observe('ip') + def _ip_changed(self, change): + if change['new'] == '*': self.ip = '0.0.0.0' # protected traits @@ -480,7 +481,7 @@ def write_connection_file(self): def load_connection_file(self, connection_file=None): """Load connection info from JSON dict in self.connection_file. - + Parameters ---------- connection_file: unicode, optional @@ -496,10 +497,10 @@ def load_connection_file(self, connection_file=None): def load_connection_info(self, info): """Load connection info from a dict containing connection info. - + Typically this data comes from a connection file and is called by load_connection_file. - + Parameters ---------- info: dict diff --git a/jupyter_client/consoleapp.py b/jupyter_client/consoleapp.py index ce2ead429..753c91ab7 100644 --- a/jupyter_client/consoleapp.py +++ b/jupyter_client/consoleapp.py @@ -18,7 +18,7 @@ from traitlets.config.application import boolean_flag from ipython_genutils.path import filefind from traitlets import ( - Dict, List, Unicode, CUnicode, CBool, Any + Dict, List, Unicode, CUnicode, CBool, Any, Type ) from jupyter_core.application import base_flags, base_aliases @@ -72,6 +72,7 @@ shell = 'JupyterConsoleApp.shell_port', iopub = 'JupyterConsoleApp.iopub_port', stdin = 'JupyterConsoleApp.stdin_port', + control = 'JupyterConsoleApp.control_port', existing = 'JupyterConsoleApp.existing', f = 'JupyterConsoleApp.connection_file', @@ -110,7 +111,11 @@ class JupyterConsoleApp(ConnectionFileMixin): classes = classes flags = Dict(flags) aliases = Dict(aliases) - kernel_manager_class = KernelManager + kernel_manager_class = Type( + default_value=KernelManager, + config=True, + help='The kernel manager class to use.' + ) kernel_client_class = BlockingKernelClient kernel_argv = List(Unicode()) @@ -218,7 +223,8 @@ def init_ssh(self): shell_port=self.shell_port, iopub_port=self.iopub_port, stdin_port=self.stdin_port, - hb_port=self.hb_port + hb_port=self.hb_port, + control_port=self.control_port ) self.log.info("Forwarding connections to %s via %s"%(ip, self.sshserver)) @@ -232,7 +238,7 @@ def init_ssh(self): self.log.error("Could not setup tunnels", exc_info=True) self.exit(1) - self.shell_port, self.iopub_port, self.stdin_port, self.hb_port = newports + self.shell_port, self.iopub_port, self.stdin_port, self.hb_port, self.control_port = newports cf = self.connection_file root, ext = os.path.splitext(cf) @@ -271,6 +277,7 @@ def init_kernel_manager(self): iopub_port=self.iopub_port, stdin_port=self.stdin_port, hb_port=self.hb_port, + control_port=self.control_port, connection_file=self.connection_file, kernel_name=self.kernel_name, parent=self, @@ -281,10 +288,8 @@ def init_kernel_manager(self): self.exit(1) self.kernel_manager.client_factory = self.kernel_client_class - # FIXME: remove special treatment of IPython kernels kwargs = {} - if self.kernel_manager.ipykernel: - kwargs['extra_arguments'] = self.kernel_argv + kwargs['extra_arguments'] = self.kernel_argv self.kernel_manager.start_kernel(**kwargs) atexit.register(self.kernel_manager.cleanup_ipc_files) @@ -298,6 +303,7 @@ def init_kernel_manager(self): self.iopub_port=km.iopub_port self.stdin_port=km.stdin_port self.hb_port=km.hb_port + self.control_port=km.control_port self.connection_file = km.connection_file atexit.register(self.kernel_manager.cleanup_connection_file) @@ -314,6 +320,7 @@ def init_kernel_client(self): iopub_port=self.iopub_port, stdin_port=self.stdin_port, hb_port=self.hb_port, + control_port=self.control_port, connection_file=self.connection_file, parent=self, ) diff --git a/jupyter_client/discovery.py b/jupyter_client/discovery.py new file mode 100644 index 000000000..2bfe92b2a --- /dev/null +++ b/jupyter_client/discovery.py @@ -0,0 +1,131 @@ +from abc import ABCMeta, abstractmethod +import entrypoints +import logging +import six + +from .kernelspec import KernelSpecManager +from .manager import KernelManager + +log = logging.getLogger(__name__) + +class KernelProviderBase(six.with_metaclass(ABCMeta, object)): + id = None # Should be a short string identifying the provider class. + + @abstractmethod + def find_kernels(self): + """Return an iterator of (kernel_name, kernel_info_dict) tuples.""" + pass + + @abstractmethod + def make_manager(self, name): + """Make and return a KernelManager instance to start a specified kernel + + name will be one of the kernel names produced by find_kernels() + """ + pass + +class KernelSpecProvider(KernelProviderBase): + """Offers kernel types from installed kernelspec directories. + """ + id = 'spec' + + def __init__(self): + self.ksm = KernelSpecManager() + + def find_kernels(self): + for name, resdir in self.ksm.find_kernel_specs().items(): + spec = self.ksm._get_kernel_spec_by_name(name, resdir) + yield name, { + # TODO: get full language info + 'language': {'name': spec.language}, + 'display_name': spec.display_name, + 'argv': spec.argv, + } + + def make_manager(self, name): + spec = self.ksm.get_kernel_spec(name) + return KernelManager(kernel_cmd=spec.argv, extra_env=spec.env) + + +class IPykernelProvider(KernelProviderBase): + """Offers a kernel type using the Python interpreter it's running in. + + This checks if ipykernel is importable first. + """ + id = 'pyimport' + + def _check_for_kernel(self): + try: + from ipykernel.kernelspec import RESOURCES, get_kernel_dict + from ipykernel.ipkernel import IPythonKernel + except ImportError: + return None + else: + return { + 'spec': get_kernel_dict(), + 'language_info': IPythonKernel.language_info, + 'resources_dir': RESOURCES, + } + + def find_kernels(self): + info = self._check_for_kernel() + + if info: + yield 'kernel', { + 'language': info['language_info'], + 'display_name': info['spec']['display_name'], + 'argv': info['spec']['argv'], + } + + def make_manager(self, name): + info = self._check_for_kernel() + if info is None: + raise Exception("ipykernel is not importable") + return KernelManager(kernel_cmd=info['spec']['argv']) + + +class KernelFinder(object): + """Manages a collection of kernel providers to find available kernel types + + *providers* should be a list of kernel provider instances. + """ + def __init__(self, providers): + self.providers = providers + + @classmethod + def from_entrypoints(cls): + """Load all kernel providers advertised by entry points. + + Kernel providers should use the "jupyter_client.kernel_providers" + entry point group. + + Returns an instance of KernelFinder. + """ + providers = [] + for ep in entrypoints.get_group_all('jupyter_client.kernel_providers'): + try: + provider = ep.load()() # Load and instantiate + except Exception: + log.error('Error loading kernel provider', exc_info=True) + else: + providers.append(provider) + + return cls(providers) + + def find_kernels(self): + """Iterate over available kernel types. + + Yields 2-tuples of (prefixed_name, attributes) + """ + for provider in self.providers: + for kid, attributes in provider.find_kernels(): + id = provider.id + '/' + kid + yield id, attributes + + def make_manager(self, name): + """Make a KernelManager instance for a given kernel type. + """ + provider_id, kernel_id = name.split('/', 1) + for provider in self.providers: + if provider_id == provider.id: + return provider.make_manager(kernel_id) diff --git a/jupyter_client/ioloop/manager.py b/jupyter_client/ioloop/manager.py index 511a73f55..a8c83141a 100644 --- a/jupyter_client/ioloop/manager.py +++ b/jupyter_client/ioloop/manager.py @@ -1,19 +1,11 @@ """A kernel manager with a tornado IOLoop""" -#----------------------------------------------------------------------------- -# Copyright (c) The Jupyter Development Team -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#----------------------------------------------------------------------------- - -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- +# Copyright (c) Jupyter Development Team. +# Distributed under the terms of the Modified BSD License. from __future__ import absolute_import -from zmq.eventloop import ioloop +from tornado import ioloop from zmq.eventloop.zmqstream import ZMQStream from traitlets import ( @@ -24,10 +16,6 @@ from jupyter_client.manager import KernelManager from .restarter import IOLoopKernelRestarter -#----------------------------------------------------------------------------- -# Code -#----------------------------------------------------------------------------- - def as_zmqstream(f): def wrapped(self, *args, **kwargs): @@ -37,9 +25,9 @@ def wrapped(self, *args, **kwargs): class IOLoopKernelManager(KernelManager): - loop = Instance('zmq.eventloop.ioloop.IOLoop') + loop = Instance('tornado.ioloop.IOLoop') def _loop_default(self): - return ioloop.IOLoop.instance() + return ioloop.IOLoop.current() restarter_class = Type( default_value=IOLoopKernelRestarter, @@ -66,8 +54,10 @@ def stop_restarter(self): if self.autorestart: if self._restarter is not None: self._restarter.stop() + self._restarter = None connect_shell = as_zmqstream(KernelManager.connect_shell) + connect_control = as_zmqstream(KernelManager.connect_control) connect_iopub = as_zmqstream(KernelManager.connect_iopub) connect_stdin = as_zmqstream(KernelManager.connect_stdin) connect_hb = as_zmqstream(KernelManager.connect_hb) diff --git a/jupyter_client/ioloop/restarter.py b/jupyter_client/ioloop/restarter.py index 6f531744c..69079eecf 100644 --- a/jupyter_client/ioloop/restarter.py +++ b/jupyter_client/ioloop/restarter.py @@ -4,37 +4,28 @@ restarts the kernel if it dies. """ -#----------------------------------------------------------------------------- -# Copyright (c) The Jupyter Development Team -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#----------------------------------------------------------------------------- - -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- +# Copyright (c) Jupyter Development Team. +# Distributed under the terms of the Modified BSD License. from __future__ import absolute_import +import warnings from zmq.eventloop import ioloop - from jupyter_client.restarter import KernelRestarter from traitlets import ( Instance, ) -#----------------------------------------------------------------------------- -# Code -#----------------------------------------------------------------------------- - class IOLoopKernelRestarter(KernelRestarter): """Monitor and autorestart a kernel.""" - loop = Instance('zmq.eventloop.ioloop.IOLoop') + loop = Instance('tornado.ioloop.IOLoop') def _loop_default(self): - return ioloop.IOLoop.instance() + warnings.warn("IOLoopKernelRestarter.loop is deprecated in jupyter-client 5.2", + DeprecationWarning, stacklevel=4, + ) + return ioloop.IOLoop.current() _pcallback = None @@ -42,7 +33,7 @@ def start(self): """Start the polling of the kernel.""" if self._pcallback is None: self._pcallback = ioloop.PeriodicCallback( - self.poll, 1000*self.time_to_dead, self.loop + self.poll, 1000*self.time_to_dead, ) self._pcallback.start() diff --git a/jupyter_client/kernelapp.py b/jupyter_client/kernelapp.py new file mode 100644 index 000000000..2d50a95d3 --- /dev/null +++ b/jupyter_client/kernelapp.py @@ -0,0 +1,83 @@ +import os +import signal +import uuid + +from jupyter_core.application import JupyterApp, base_flags +from tornado.ioloop import IOLoop +from traitlets import Unicode + +from . import __version__ +from .kernelspec import KernelSpecManager, NATIVE_KERNEL_NAME +from .manager import KernelManager + +class KernelApp(JupyterApp): + """Launch a kernel by name in a local subprocess. + """ + version = __version__ + description = "Run a kernel locally in a subprocess" + + classes = [KernelManager, KernelSpecManager] + + aliases = { + 'kernel': 'KernelApp.kernel_name', + 'ip': 'KernelManager.ip', + } + flags = {'debug': base_flags['debug']} + + kernel_name = Unicode(NATIVE_KERNEL_NAME, + help = 'The name of a kernel type to start' + ).tag(config=True) + + def initialize(self, argv=None): + super(KernelApp, self).initialize(argv) + + cf_basename = 'kernel-%s.json' % uuid.uuid4() + self.config.setdefault('KernelManager', {}).setdefault('connection_file', os.path.join(self.runtime_dir, cf_basename)) + self.km = KernelManager(kernel_name=self.kernel_name, + config=self.config) + + self.loop = IOLoop.current() + self.loop.add_callback(self._record_started) + + def setup_signals(self): + """Shutdown on SIGTERM or SIGINT (Ctrl-C)""" + if os.name == 'nt': + return + + def shutdown_handler(signo, frame): + self.loop.add_callback_from_signal(self.shutdown, signo) + for sig in [signal.SIGTERM, signal.SIGINT]: + signal.signal(sig, shutdown_handler) + + def shutdown(self, signo): + self.log.info('Shutting down on signal %d' % signo) + self.km.shutdown_kernel() + self.loop.stop() + + def log_connection_info(self): + cf = self.km.connection_file + self.log.info('Connection file: %s', cf) + self.log.info("To connect a client: --existing %s", os.path.basename(cf)) + + def _record_started(self): + """For tests, create a file to indicate that we've started + + Do not rely on this except in our own tests! + """ + fn = os.environ.get('JUPYTER_CLIENT_TEST_RECORD_STARTUP_PRIVATE') + if fn is not None: + with open(fn, 'wb'): + pass + + def start(self): + self.log.info('Starting kernel %r', self.kernel_name) + try: + self.km.start_kernel() + self.log_connection_info() + self.setup_signals() + self.loop.start() + finally: + self.km.cleanup() + + +main = KernelApp.launch_instance diff --git a/jupyter_client/kernelspec.py b/jupyter_client/kernelspec.py index 3465ac7a4..78a5b564c 100644 --- a/jupyter_client/kernelspec.py +++ b/jupyter_client/kernelspec.py @@ -3,6 +3,7 @@ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. +import errno import io import json import os @@ -13,7 +14,9 @@ pjoin = os.path.join from ipython_genutils.py3compat import PY3 -from traitlets import HasTraits, List, Unicode, Dict, Set, Bool, Type +from traitlets import ( + HasTraits, List, Unicode, Dict, Set, Bool, Type, CaselessStrEnum +) from traitlets.config import LoggingConfigurable from jupyter_core.paths import jupyter_data_dir, jupyter_path, SYSTEM_JUPYTER_PATH @@ -28,6 +31,9 @@ class KernelSpec(HasTraits): language = Unicode() env = Dict() resource_dir = Unicode() + interrupt_mode = CaselessStrEnum( + ['message', 'signal'], default_value='signal' + ) metadata = Dict() @classmethod @@ -46,6 +52,7 @@ def to_dict(self): env=self.env, display_name=self.display_name, language=self.language, + interrupt_mode=self.interrupt_mode, metadata=self.metadata, ) @@ -193,15 +200,39 @@ def _get_kernel_spec_by_name(self, kernel_name, resource_dir): return self.kernel_spec_class.from_resource_dir(resource_dir) + def _find_spec_directory(self, kernel_name): + """Find the resource directory of a named kernel spec""" + for kernel_dir in self.kernel_dirs: + try: + files = os.listdir(kernel_dir) + except OSError as e: + if e.errno in (errno.ENOTDIR, errno.ENOENT): + continue + raise + for f in files: + path = pjoin(kernel_dir, f) + if f.lower() == kernel_name and _is_kernel_dir(path): + return path + + if kernel_name == NATIVE_KERNEL_NAME: + try: + from ipykernel.kernelspec import RESOURCES + except ImportError: + pass + else: + return RESOURCES + def get_kernel_spec(self, kernel_name): """Returns a :class:`KernelSpec` instance for the given kernel_name. Raises :exc:`NoSuchKernel` if the given kernel name is not found. """ - d = self.find_kernel_specs() - try: - resource_dir = d[kernel_name.lower()] - except KeyError: + if not _is_valid_kernel_name(kernel_name): + self.log.warning("Kernelspec name %r is invalid: %s", kernel_name, + _kernel_name_description) + + resource_dir = self._find_spec_directory(kernel_name.lower()) + if resource_dir is None: raise NoSuchKernel(kernel_name) return self._get_kernel_spec_by_name(kernel_name, resource_dir) @@ -220,14 +251,28 @@ def get_all_specs(self): } """ d = self.find_kernel_specs() - return {kname: { - "resource_dir": d[kname], - "spec": self._get_kernel_spec_by_name(kname, d[kname]).to_dict() - } for kname in d} + res = {} + for kname, resource_dir in d.items(): + try: + if self.__class__ is KernelSpecManager: + spec = self._get_kernel_spec_by_name(kname, resource_dir) + else: + # avoid calling private methods in subclasses, + # which may have overridden find_kernel_specs + # and get_kernel_spec, but not the newer get_all_specs + spec = self.get_kernel_spec(kname) + + res[kname] = { + "resource_dir": resource_dir, + "spec": spec.to_dict() + } + except Exception: + self.log.warning("Error loading kernelspec %r", kname, exc_info=True) + return res def remove_kernel_spec(self, name): """Remove a kernel spec directory by name. - + Returns the path that was deleted. """ save_native = self.ensure_native_kernel @@ -263,7 +308,7 @@ def install_kernel_spec(self, source_dir, kernel_name=None, user=False, If ``user`` is False, it will attempt to install into the systemwide kernel registry. If the process does not have appropriate permissions, an :exc:`OSError` will be raised. - + If ``prefix`` is given, the kernelspec will be installed to PREFIX/share/jupyter/kernels/KERNEL_NAME. This can be sys.prefix for installation inside virtual or conda envs. @@ -284,16 +329,16 @@ def install_kernel_spec(self, source_dir, kernel_name=None, user=False, DeprecationWarning, stacklevel=2, ) - + destination = self._get_destination_dir(kernel_name, user=user, prefix=prefix) self.log.debug('Installing kernelspec in %s', destination) - + kernel_dir = os.path.dirname(destination) if kernel_dir not in self.kernel_dirs: self.log.warning("Installing to %s, which is not in %s. The kernelspec may not be found.", kernel_dir, self.kernel_dirs, ) - + if os.path.isdir(destination): self.log.info('Removing existing kernelspec in %s', destination) shutil.rmtree(destination) diff --git a/jupyter_client/launcher.py b/jupyter_client/launcher.py index 285778a68..1ba206269 100644 --- a/jupyter_client/launcher.py +++ b/jupyter_client/launcher.py @@ -101,7 +101,8 @@ def launch_kernel(cmd, stdin=None, stdout=None, stderr=None, env=None, except: from _subprocess import DuplicateHandle, GetCurrentProcess, \ DUPLICATE_SAME_ACCESS, CREATE_NEW_PROCESS_GROUP - # Launch the kernel process + + # create a handle on the parent to be inherited if independent: kwargs['creationflags'] = CREATE_NEW_PROCESS_GROUP else: @@ -111,6 +112,15 @@ def launch_kernel(cmd, stdin=None, stdout=None, stderr=None, env=None, DUPLICATE_SAME_ACCESS) env['JPY_PARENT_PID'] = str(int(handle)) + # Prevent creating new console window on pythonw + if redirect_out: + kwargs['creationflags'] = kwargs.setdefault('creationflags', 0) | 0x08000000 # CREATE_NO_WINDOW + + # Avoid closing the above parent and interrupt handles. + # close_fds is True by default on Python >=3.7 + # or when no stream is captured on Python <3.7 + # (we always capture stdin, so this is already False by default on <3.7) + kwargs['close_fds'] = False else: # Create a new session. # This makes it easier to interrupt the kernel, diff --git a/jupyter_client/manager.py b/jupyter_client/manager.py index d50a5fbb8..24de82d0f 100644 --- a/jupyter_client/manager.py +++ b/jupyter_client/manager.py @@ -11,25 +11,20 @@ import signal import sys import time -import warnings -try: - from queue import Empty # Py 3 -except ImportError: - from Queue import Empty # Py 2 import zmq from ipython_genutils.importstring import import_item from .localinterfaces import is_local_ip, local_ips from traitlets import ( - Any, Float, Instance, Unicode, List, Bool, Type, DottedObjectName + Any, Float, Instance, Unicode, List, Bool, Type, DottedObjectName, Dict, + default, observe ) from jupyter_client import ( launch_kernel, kernelspec, ) from .connect import ConnectionFileMixin -from .session import Session from .managerabc import ( KernelManagerABC ) @@ -44,7 +39,7 @@ class KernelManager(ConnectionFileMixin): # The PyZMQ Context to use for communication with the kernel. context = Instance(zmq.Context) def _context_default(self): - return zmq.Context.instance() + return zmq.Context() # the class to create with our `client` method client_class = DottedObjectName('jupyter_client.blocking.BlockingKernelClient') @@ -52,8 +47,9 @@ def _context_default(self): def _client_factory_default(self): return import_item(self.client_class) - def _client_class_changed(self, name, old, new): - self.client_factory = import_item(str(new)) + @observe('client_class') + def _client_class_changed(self, change): + self.client_factory = import_item(str(change['new'])) # The kernel process with which the KernelManager is communicating. # generally a Popen instance @@ -74,9 +70,10 @@ def _kernel_spec_manager_changed(self): kernel_name = Unicode(kernelspec.NATIVE_KERNEL_NAME) - def _kernel_name_changed(self, name, old, new): + @observe('kernel_name') + def _kernel_name_changed(self, change): self._kernel_spec = None - if new == 'python': + if change['new'] == 'python': self.kernel_name = kernelspec.NATIVE_KERNEL_NAME _kernel_spec = None @@ -87,23 +84,19 @@ def kernel_spec(self): self._kernel_spec = self.kernel_spec_manager.get_kernel_spec(self.kernel_name) return self._kernel_spec - kernel_cmd = List(Unicode(), config=True, - help="""DEPRECATED: Use kernel_name instead. - - The Popen Command to launch the kernel. - Override this if you have a custom kernel. - If kernel_cmd is specified in a configuration file, - Jupyter does not pass any arguments to the kernel, - because it cannot make any assumptions about the - arguments that the kernel understands. In particular, - this means that the kernel does not receive the - option --debug if it given on the Jupyter command line. - """ + kernel_cmd = List(Unicode(), + help="""The Popen Command to launch the kernel.""" + ) + + extra_env = Dict( + help="""Extra environment variables to be set for the kernel.""" ) - def _kernel_cmd_changed(self, name, old, new): - warnings.warn("Setting kernel_cmd is deprecated, use kernel_spec to " - "start different kernels.") + cache_ports = Bool(help='True if the MultiKernelManager should cache ports for this KernelManager instance') + + @default('cache_ports') + def _default_cache_ports(self): + return self.transport == 'tcp' @property def ipykernel(self): @@ -174,8 +167,10 @@ def format_kernel_cmd(self, extra_arguments=None): else: cmd = self.kernel_spec.argv + extra_arguments - if cmd and cmd[0] == 'python': - # executable is 'python', use sys.executable. + if cmd and cmd[0] in {'python', + 'python%i' % sys.version_info[0], + 'python%i.%i' % sys.version_info[:2]}: + # executable is 'python' or 'python3', use sys.executable. # These will typically be the same, # but if the current process is in an env # and has been launched by abspath without @@ -210,7 +205,7 @@ def _launch_kernel(self, kernel_cmd, **kw): def _connect_control_socket(self): if self._control_socket is None: - self._control_socket = self.connect_control() + self._control_socket = self._create_connected_socket('control') self._control_socket.linger = 100 def _close_control_socket(self): @@ -233,9 +228,10 @@ def start_kernel(self, **kw): """ if self.transport == 'tcp' and not is_local_ip(self.ip): raise RuntimeError("Can only launch a kernel on a local interface. " + "This one is not: %s." "Make sure that the '*_address' attributes are " "configured properly. " - "Currently valid addresses are: %s" % local_ips() + "Currently valid addresses are: %s" % (self.ip, local_ips()) ) # write connection file / get default ports @@ -251,17 +247,34 @@ def start_kernel(self, **kw): # If set, it can bork all the things. env.pop('PYTHONEXECUTABLE', None) if not self.kernel_cmd: - # If kernel_cmd has been set manually, don't refer to a kernel spec - # Environment variables from kernel spec are added to os.environ - env.update(self.kernel_spec.env or {}) - + # If kernel_cmd has been set manually, don't refer to a kernel spec. + # Environment variables from kernel spec are added to os.environ. + env.update(self._get_env_substitutions(self.kernel_spec.env, env)) + elif self.extra_env: + env.update(self._get_env_substitutions(self.extra_env, env)) + # launch the kernel subprocess self.log.debug("Starting kernel: %s", kernel_cmd) - self.kernel = self._launch_kernel(kernel_cmd, env=env, - **kw) + self.kernel = self._launch_kernel(kernel_cmd, env=env, **kw) self.start_restarter() self._connect_control_socket() + def _get_env_substitutions(self, templated_env, substitution_values): + """ Walks env entries in templated_env and applies possible substitutions from current env + (represented by substitution_values). + Returns the substituted list of env entries. + """ + substituted_env = {} + if templated_env: + from string import Template + + # For each templated env entry, fill any templated references + # matching names of env variables with those values and build + # new dict with substitutions. + for k, v in templated_env.items(): + substituted_env.update({k: Template(v).safe_substitute(substitution_values)}) + return substituted_env + def request_shutdown(self, restart=False): """Send a shutdown request via control channel """ @@ -283,6 +296,10 @@ def finish_shutdown(self, waittime=None, pollinterval=0.1): if self.is_alive(): time.sleep(pollinterval) else: + # If there's still a proc, wait and clear + if self.has_kernel: + self.kernel.wait() + self.kernel = None break else: # OK, we've waited long enough. @@ -297,13 +314,14 @@ def cleanup(self, connection_file=True): self.cleanup_ipc_files() self._close_control_socket() + self.session.parent = None def shutdown_kernel(self, now=False, restart=False): """Attempts to stop the kernel process cleanly. This attempts to shutdown the kernels cleanly by: - 1. Sending it a shutdown message over the shell channel. + 1. Sending it a shutdown message over the control channel. 2. If that fails, the kernel is shutdown forcibly by sending it a signal. @@ -384,7 +402,10 @@ def _kill_kernel(self): # Signal the kernel to terminate (sends SIGKILL on Unix and calls # TerminateProcess() on Win32). try: - self.kernel.kill() + if hasattr(signal, 'SIGKILL'): + self.signal_kernel(signal.SIGKILL) + else: + self.kernel.kill() except OSError as e: # In Windows, we will get an Access Denied error if the process # has already terminated. Ignore it. @@ -411,11 +432,18 @@ def interrupt_kernel(self): platforms. """ if self.has_kernel: - if sys.platform == 'win32': - from .win_interrupt import send_interrupt - send_interrupt(self.kernel.win32_interrupt_event) - else: - self.signal_kernel(signal.SIGINT) + interrupt_mode = self.kernel_spec.interrupt_mode + if interrupt_mode == 'signal': + if sys.platform == 'win32': + from .win_interrupt import send_interrupt + send_interrupt(self.kernel.win32_interrupt_event) + else: + self.signal_kernel(signal.SIGINT) + + elif interrupt_mode == 'message': + msg = self.session.msg("interrupt_request", content={}) + self._connect_control_socket() + self.session.send(self._control_socket, msg) else: raise RuntimeError("Cannot interrupt kernel. No kernel is running!") diff --git a/jupyter_client/multikernelmanager.py b/jupyter_client/multikernelmanager.py index a83be953c..522c44947 100644 --- a/jupyter_client/multikernelmanager.py +++ b/jupyter_client/multikernelmanager.py @@ -7,13 +7,14 @@ import os import uuid +import socket import zmq from traitlets.config.configurable import LoggingConfigurable from ipython_genutils.importstring import import_item from traitlets import ( - Instance, Dict, List, Unicode, Any, DottedObjectName + Any, Bool, Dict, DottedObjectName, Instance, Unicode, default, observe ) from ipython_genutils.py3compat import unicode_type @@ -47,23 +48,65 @@ class MultiKernelManager(LoggingConfigurable): ) kernel_spec_manager = Instance(KernelSpecManager, allow_none=True) - + kernel_manager_class = DottedObjectName( "jupyter_client.ioloop.IOLoopKernelManager", config=True, help="""The kernel manager class. This is configurable to allow subclassing of the KernelManager for customized behavior. """ ) - def _kernel_manager_class_changed(self, name, old, new): - self.kernel_manager_factory = import_item(new) + + def __init__(self, *args, **kwargs): + super(MultiKernelManager, self).__init__(*args, **kwargs) + + # Cache all the currently used ports + self.currently_used_ports = set() + + @observe('kernel_manager_class') + def _kernel_manager_class_changed(self, change): + self.kernel_manager_factory = self._create_kernel_manager_factory() kernel_manager_factory = Any(help="this is kernel_manager_class after import") + + @default('kernel_manager_factory') def _kernel_manager_factory_default(self): - return import_item(self.kernel_manager_class) + return self._create_kernel_manager_factory() + + def _create_kernel_manager_factory(self): + kernel_manager_ctor = import_item(self.kernel_manager_class) + + def create_kernel_manager(*args, **kwargs): + km = kernel_manager_ctor(*args, **kwargs) + + if km.cache_ports: + km.shell_port = self._find_available_port(km.ip) + km.iopub_port = self._find_available_port(km.ip) + km.stdin_port = self._find_available_port(km.ip) + km.hb_port = self._find_available_port(km.ip) + km.control_port = self._find_available_port(km.ip) + + return km + + return create_kernel_manager + + def _find_available_port(self, ip): + while True: + tmp_sock = socket.socket() + tmp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, b'\0' * 8) + tmp_sock.bind((ip, 0)) + port = tmp_sock.getsockname()[1] + tmp_sock.close() + + # This is a workaround for https://github.com/jupyter/jupyter_client/issues/487 + # We prevent two kernels to have the same ports. + if port not in self.currently_used_ports: + self.currently_used_ports.add(port) + + return port context = Instance('zmq.Context') def _context_default(self): - return zmq.Context.instance() + return zmq.Context() connection_dir = Unicode('') @@ -86,11 +129,11 @@ def start_kernel(self, kernel_name=None, **kwargs): """Start a new kernel. The caller can pick a kernel_id by passing one in as a keyword arg, - otherwise one will be picked using a uuid. + otherwise one will be generated using new_kernel_id(). The kernel ID for the newly started kernel is returned. """ - kernel_id = kwargs.pop('kernel_id', unicode_type(uuid.uuid4())) + kernel_id = kwargs.pop('kernel_id', self.new_kernel_id(**kwargs)) if kernel_id in self: raise DuplicateKernelError('Kernel already exists: %s' % kernel_id) @@ -111,7 +154,6 @@ def start_kernel(self, kernel_name=None, **kwargs): self._kernels[kernel_id] = km return kernel_id - @kernel_method def shutdown_kernel(self, kernel_id, now=False, restart=False): """Shutdown a kernel by its kernel uuid. @@ -125,8 +167,21 @@ def shutdown_kernel(self, kernel_id, now=False, restart=False): Will the kernel be restarted? """ self.log.info("Kernel shutdown: %s" % kernel_id) + + km = self.get_kernel(kernel_id) + + ports = ( + km.shell_port, km.iopub_port, km.stdin_port, + km.hb_port, km.control_port + ) + + km.shutdown_kernel(now=now, restart=restart) self.remove_kernel(kernel_id) + if km.cache_ports and not restart: + for port in ports: + self.currently_used_ports.remove(port) + @kernel_method def request_shutdown(self, kernel_id, restart=False): """Ask a kernel to shut down by its kernel uuid""" @@ -284,6 +339,22 @@ def connect_shell(self, kernel_id, identity=None): stream : zmq Socket or ZMQStream """ + @kernel_method + def connect_control(self, kernel_id, identity=None): + """Return a zmq Socket connected to the control channel. + + Parameters + ========== + kernel_id : uuid + The id of the kernel + identity : bytes (optional) + The zmq identity of the socket + + Returns + ======= + stream : zmq Socket or ZMQStream + """ + @kernel_method def connect_stdin(self, kernel_id, identity=None): """Return a zmq Socket connected to the stdin channel. @@ -315,3 +386,12 @@ def connect_hb(self, kernel_id, identity=None): ======= stream : zmq Socket or ZMQStream """ + + def new_kernel_id(self, **kwargs): + """ + Returns the id to associate with the kernel for this request. Subclasses may override + this method to substitute other sources of kernel ids. + :param kwargs: + :return: string-ized version 4 uuid + """ + return unicode_type(uuid.uuid4()) diff --git a/jupyter_client/session.py b/jupyter_client/session.py index af60ac259..9491b5d77 100644 --- a/jupyter_client/session.py +++ b/jupyter_client/session.py @@ -61,9 +61,9 @@ def compare_digest(a,b): return a == b from jupyter_client.jsonutil import extract_dates, squash_dates, date_default from ipython_genutils.py3compat import (str_to_bytes, str_to_unicode, unicode_type, iteritems) -from traitlets import (CBytes, Unicode, Bool, Any, Instance, Set, - DottedObjectName, CUnicode, Dict, Integer, - TraitError, +from traitlets import ( + CBytes, Unicode, Bool, Any, Instance, Set, DottedObjectName, CUnicode, + Dict, Integer, TraitError, observe ) from jupyter_client import protocol_version from jupyter_client.adapter import adapt @@ -180,20 +180,22 @@ class SessionFactory(LoggingConfigurable): """ logname = Unicode('') - def _logname_changed(self, name, old, new): - self.log = logging.getLogger(new) + + @observe('logname') + def _logname_changed(self, change): + self.log = logging.getLogger(change['new']) # not configurable: context = Instance('zmq.Context') def _context_default(self): - return zmq.Context.instance() + return zmq.Context() session = Instance('jupyter_client.session.Session', allow_none=True) - loop = Instance('zmq.eventloop.ioloop.IOLoop') + loop = Instance('tornado.ioloop.IOLoop') def _loop_default(self): - return IOLoop.instance() + return IOLoop.current() def __init__(self, **kwargs): super(SessionFactory, self).__init__(**kwargs) @@ -300,10 +302,10 @@ class Session(Configurable): """ debug = Bool(False, config=True, help="""Debug output in the Session""") - + check_pid = Bool(True, config=True, help="""Whether to check PID to protect against calls after fork. - + This check can be disabled if fork-safety is handled elsewhere. """) @@ -311,7 +313,10 @@ class Session(Configurable): help="""The name of the packer for serializing messages. Should be one of 'json', 'pickle', or an import name for a custom callable serializer.""") - def _packer_changed(self, name, old, new): + + @observe('packer') + def _packer_changed(self, change): + new = change['new'] if new.lower() == 'json': self.pack = json_packer self.unpack = json_unpacker @@ -326,7 +331,10 @@ def _packer_changed(self, name, old, new): unpacker = DottedObjectName('json', config=True, help="""The name of the unpacker for unserializing messages. Only used with custom functions for `packer`.""") - def _unpacker_changed(self, name, old, new): + + @observe('unpacker') + def _unpacker_changed(self, change): + new = change['new'] if new.lower() == 'json': self.pack = json_packer self.unpack = json_unpacker @@ -345,7 +353,8 @@ def _session_default(self): self.bsession = u.encode('ascii') return u - def _session_changed(self, name, old, new): + @observe('session') + def _session_changed(self, change): self.bsession = self.session.encode('ascii') # bsession is the session as bytes @@ -368,13 +377,17 @@ def _session_changed(self, name, old, new): def _key_default(self): return new_id_bytes() - def _key_changed(self): + @observe('key') + def _key_changed(self, change): self._new_auth() signature_scheme = Unicode('hmac-sha256', config=True, help="""The digest scheme used to construct the message signatures. Must have the form 'hmac-HASH'.""") - def _signature_scheme_changed(self, name, old, new): + + @observe('signature_scheme') + def _signature_scheme_changed(self, change): + new = change['new'] if not new.startswith('hmac-'): raise TraitError("signature_scheme must start with 'hmac-', got %r" % new) hash_name = new.split('-', 1)[1] @@ -387,9 +400,9 @@ def _signature_scheme_changed(self, name, old, new): digest_mod = Any() def _digest_mod_default(self): return hashlib.sha256 - + auth = Instance(hmac.HMAC, allow_none=True) - + def _new_auth(self): if self.key: self.auth = hmac.HMAC(self.key, digestmod=self.digest_mod) @@ -406,8 +419,10 @@ def _new_auth(self): keyfile = Unicode('', config=True, help="""path to file containing execution key.""") - def _keyfile_changed(self, name, old, new): - with open(new, 'rb') as f: + + @observe('keyfile') + def _keyfile_changed(self, change): + with open(change['new'], 'rb') as f: self.key = f.read().strip() # for protecting against sends from forks @@ -416,13 +431,19 @@ def _keyfile_changed(self, name, old, new): # serialization traits: pack = Any(default_packer) # the actual packer function - def _pack_changed(self, name, old, new): + + @observe('pack') + def _pack_changed(self, change): + new = change['new'] if not callable(new): raise TypeError("packer must be callable, not %s"%type(new)) unpack = Any(default_unpacker) # the actual packer function - def _unpack_changed(self, name, old, new): + + @observe('unpack') + def _unpack_changed(self, change): # unpacker is not checked - it is assumed to be + new = change['new'] if not callable(new): raise TypeError("unpacker must be callable, not %s"%type(new)) @@ -506,10 +527,12 @@ def clone(self): new_session.digest_history.update(self.digest_history) return new_session + message_count = 0 @property def msg_id(self): - """always return new uuid""" - return new_id() + message_number = self.message_count + self.message_count += 1 + return '{}_{}'.format(self.session, message_number) def _check_packers(self): """check packers for datetime support.""" @@ -779,7 +802,8 @@ def send_raw(self, stream, msg_list, flags=0, copy=True, ident=None): to_send.extend(ident) to_send.append(DELIM) - to_send.append(self.sign(msg_list)) + # Don't include buffers in signature (per spec). + to_send.append(self.sign(msg_list[0:4])) to_send.extend(msg_list) stream.send_multipart(to_send, flags, copy=copy) diff --git a/jupyter_client/ssh/__init__.py b/jupyter_client/ssh/__init__.py new file mode 100644 index 000000000..d7bc9d566 --- /dev/null +++ b/jupyter_client/ssh/__init__.py @@ -0,0 +1 @@ +from jupyter_client.ssh.tunnel import * diff --git a/jupyter_client/ssh/forward.py b/jupyter_client/ssh/forward.py new file mode 100644 index 000000000..a44c11769 --- /dev/null +++ b/jupyter_client/ssh/forward.py @@ -0,0 +1,92 @@ +# +# This file is adapted from a paramiko demo, and thus licensed under LGPL 2.1. +# Original Copyright (C) 2003-2007 Robey Pointer +# Edits Copyright (C) 2010 The IPython Team +# +# Paramiko is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR +# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Paramiko; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA. + +""" +Sample script showing how to do local port forwarding over paramiko. + +This script connects to the requested SSH server and sets up local port +forwarding (the openssh -L option) from a local port through a tunneled +connection to a destination reachable from the SSH server machine. +""" + +from __future__ import print_function + +import logging +import select +try: # Python 3 + import socketserver +except ImportError: # Python 2 + import SocketServer as socketserver + +logger = logging.getLogger('ssh') + + +class ForwardServer (socketserver.ThreadingTCPServer): + daemon_threads = True + allow_reuse_address = True + + +class Handler (socketserver.BaseRequestHandler): + + def handle(self): + try: + chan = self.ssh_transport.open_channel('direct-tcpip', + (self.chain_host, self.chain_port), + self.request.getpeername()) + except Exception as e: + logger.debug('Incoming request to %s:%d failed: %s' % (self.chain_host, + self.chain_port, + repr(e))) + return + if chan is None: + logger.debug('Incoming request to %s:%d was rejected by the SSH server.' % + (self.chain_host, self.chain_port)) + return + + logger.debug('Connected! Tunnel open %r -> %r -> %r' % (self.request.getpeername(), + chan.getpeername(), (self.chain_host, self.chain_port))) + while True: + r, w, x = select.select([self.request, chan], [], []) + if self.request in r: + data = self.request.recv(1024) + if len(data) == 0: + break + chan.send(data) + if chan in r: + data = chan.recv(1024) + if len(data) == 0: + break + self.request.send(data) + chan.close() + self.request.close() + logger.debug('Tunnel closed ') + + +def forward_tunnel(local_port, remote_host, remote_port, transport): + # this is a little convoluted, but lets me configure things for the Handler + # object. (SocketServer doesn't give Handlers any way to access the outer + # server normally.) + class SubHander (Handler): + chain_host = remote_host + chain_port = remote_port + ssh_transport = transport + ForwardServer(('127.0.0.1', local_port), SubHander).serve_forever() + + +__all__ = ['forward_tunnel'] diff --git a/jupyter_client/ssh/tunnel.py b/jupyter_client/ssh/tunnel.py new file mode 100644 index 000000000..e1cd08027 --- /dev/null +++ b/jupyter_client/ssh/tunnel.py @@ -0,0 +1,375 @@ +"""Basic ssh tunnel utilities, and convenience functions for tunneling +zeromq connections. +""" + +# Copyright (C) 2010-2011 IPython Development Team +# Copyright (C) 2011- PyZMQ Developers +# +# Redistributed from IPython under the terms of the BSD License. + + +from __future__ import print_function + +import atexit +import os +import re +import signal +import socket +import sys +import warnings +from getpass import getpass, getuser +from multiprocessing import Process + +try: + with warnings.catch_warnings(): + warnings.simplefilter('ignore', DeprecationWarning) + import paramiko + SSHException = paramiko.ssh_exception.SSHException +except ImportError: + paramiko = None + class SSHException(Exception): + pass +else: + from .forward import forward_tunnel + +try: + import pexpect +except ImportError: + pexpect = None + +from zmq.utils.strtypes import b + + +def select_random_ports(n): + """Select and return n random ports that are available.""" + ports = [] + sockets = [] + for i in range(n): + sock = socket.socket() + sock.bind(('', 0)) + ports.append(sock.getsockname()[1]) + sockets.append(sock) + for sock in sockets: + sock.close() + return ports + + +#----------------------------------------------------------------------------- +# Check for passwordless login +#----------------------------------------------------------------------------- +_password_pat = re.compile(b(r'pass(word|phrase):'), re.IGNORECASE) + + +def try_passwordless_ssh(server, keyfile, paramiko=None): + """Attempt to make an ssh connection without a password. + This is mainly used for requiring password input only once + when many tunnels may be connected to the same server. + + If paramiko is None, the default for the platform is chosen. + """ + if paramiko is None: + paramiko = sys.platform == 'win32' + if not paramiko: + f = _try_passwordless_openssh + else: + f = _try_passwordless_paramiko + return f(server, keyfile) + + +def _try_passwordless_openssh(server, keyfile): + """Try passwordless login with shell ssh command.""" + if pexpect is None: + raise ImportError("pexpect unavailable, use paramiko") + cmd = 'ssh -f ' + server + if keyfile: + cmd += ' -i ' + keyfile + cmd += ' exit' + + # pop SSH_ASKPASS from env + env = os.environ.copy() + env.pop('SSH_ASKPASS', None) + + ssh_newkey = 'Are you sure you want to continue connecting' + p = pexpect.spawn(cmd, env=env) + while True: + try: + i = p.expect([ssh_newkey, _password_pat], timeout=.1) + if i == 0: + raise SSHException('The authenticity of the host can\'t be established.') + except pexpect.TIMEOUT: + continue + except pexpect.EOF: + return True + else: + return False + + +def _try_passwordless_paramiko(server, keyfile): + """Try passwordless login with paramiko.""" + if paramiko is None: + msg = "Paramiko unavailable, " + if sys.platform == 'win32': + msg += "Paramiko is required for ssh tunneled connections on Windows." + else: + msg += "use OpenSSH." + raise ImportError(msg) + username, server, port = _split_server(server) + client = paramiko.SSHClient() + client.load_system_host_keys() + client.set_missing_host_key_policy(paramiko.WarningPolicy()) + try: + client.connect(server, port, username=username, key_filename=keyfile, + look_for_keys=True) + except paramiko.AuthenticationException: + return False + else: + client.close() + return True + + +def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60): + """Connect a socket to an address via an ssh tunnel. + + This is a wrapper for socket.connect(addr), when addr is not accessible + from the local machine. It simply creates an ssh tunnel using the remaining args, + and calls socket.connect('tcp://localhost:lport') where lport is the randomly + selected local port of the tunnel. + + """ + new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout) + socket.connect(new_url) + return tunnel + + +def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60): + """Open a tunneled connection from a 0MQ url. + + For use inside tunnel_connection. + + Returns + ------- + + (url, tunnel) : (str, object) + The 0MQ url that has been forwarded, and the tunnel object + """ + + lport = select_random_ports(1)[0] + transport, addr = addr.split('://') + ip, rport = addr.split(':') + rport = int(rport) + if paramiko is None: + paramiko = sys.platform == 'win32' + if paramiko: + tunnelf = paramiko_tunnel + else: + tunnelf = openssh_tunnel + + tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout) + return 'tcp://127.0.0.1:%i' % lport, tunnel + + +def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60): + """Create an ssh tunnel using command-line ssh that connects port lport + on this machine to localhost:rport on server. The tunnel + will automatically close when not in use, remaining open + for a minimum of timeout seconds for an initial connection. + + This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`, + as seen from `server`. + + keyfile and password may be specified, but ssh config is checked for defaults. + + Parameters + ---------- + + lport : int + local port for connecting to the tunnel from this machine. + rport : int + port on the remote machine to connect to. + server : str + The ssh server to connect to. The full ssh server string will be parsed. + user@server:port + remoteip : str [Default: 127.0.0.1] + The remote ip, specifying the destination of the tunnel. + Default is localhost, which means that the tunnel would redirect + localhost:lport on this machine to localhost:rport on the *server*. + + keyfile : str; path to public key file + This specifies a key to be used in ssh login, default None. + Regular default ssh keys will be used without specifying this argument. + password : str; + Your ssh password to the ssh server. Note that if this is left None, + you will be prompted for it if passwordless key based login is unavailable. + timeout : int [default: 60] + The time (in seconds) after which no activity will result in the tunnel + closing. This prevents orphaned tunnels from running forever. + """ + if pexpect is None: + raise ImportError("pexpect unavailable, use paramiko_tunnel") + ssh = "ssh " + if keyfile: + ssh += "-i " + keyfile + + if ':' in server: + server, port = server.split(':') + ssh += " -p %s" % port + + cmd = "%s -O check %s" % (ssh, server) + (output, exitstatus) = pexpect.run(cmd, withexitstatus=True) + if not exitstatus: + pid = int(output[output.find(b"(pid=")+5:output.find(b")")]) + cmd = "%s -O forward -L 127.0.0.1:%i:%s:%i %s" % ( + ssh, lport, remoteip, rport, server) + (output, exitstatus) = pexpect.run(cmd, withexitstatus=True) + if not exitstatus: + atexit.register(_stop_tunnel, cmd.replace("-O forward", "-O cancel", 1)) + return pid + cmd = "%s -f -S none -L 127.0.0.1:%i:%s:%i %s sleep %i" % ( + ssh, lport, remoteip, rport, server, timeout) + + # pop SSH_ASKPASS from env + env = os.environ.copy() + env.pop('SSH_ASKPASS', None) + + ssh_newkey = 'Are you sure you want to continue connecting' + tunnel = pexpect.spawn(cmd, env=env) + failed = False + while True: + try: + i = tunnel.expect([ssh_newkey, _password_pat], timeout=.1) + if i == 0: + raise SSHException('The authenticity of the host can\'t be established.') + except pexpect.TIMEOUT: + continue + except pexpect.EOF: + if tunnel.exitstatus: + print(tunnel.exitstatus) + print(tunnel.before) + print(tunnel.after) + raise RuntimeError("tunnel '%s' failed to start" % (cmd)) + else: + return tunnel.pid + else: + if failed: + print("Password rejected, try again") + password = None + if password is None: + password = getpass("%s's password: " % (server)) + tunnel.sendline(password) + failed = True + + +def _stop_tunnel(cmd): + pexpect.run(cmd) + + +def _split_server(server): + if '@' in server: + username, server = server.split('@', 1) + else: + username = getuser() + if ':' in server: + server, port = server.split(':') + port = int(port) + else: + port = 22 + return username, server, port + + +def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60): + """launch a tunner with paramiko in a subprocess. This should only be used + when shell ssh is unavailable (e.g. Windows). + + This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`, + as seen from `server`. + + If you are familiar with ssh tunnels, this creates the tunnel: + + ssh server -L localhost:lport:remoteip:rport + + keyfile and password may be specified, but ssh config is checked for defaults. + + + Parameters + ---------- + + lport : int + local port for connecting to the tunnel from this machine. + rport : int + port on the remote machine to connect to. + server : str + The ssh server to connect to. The full ssh server string will be parsed. + user@server:port + remoteip : str [Default: 127.0.0.1] + The remote ip, specifying the destination of the tunnel. + Default is localhost, which means that the tunnel would redirect + localhost:lport on this machine to localhost:rport on the *server*. + + keyfile : str; path to public key file + This specifies a key to be used in ssh login, default None. + Regular default ssh keys will be used without specifying this argument. + password : str; + Your ssh password to the ssh server. Note that if this is left None, + you will be prompted for it if passwordless key based login is unavailable. + timeout : int [default: 60] + The time (in seconds) after which no activity will result in the tunnel + closing. This prevents orphaned tunnels from running forever. + + """ + if paramiko is None: + raise ImportError("Paramiko not available") + + if password is None: + if not _try_passwordless_paramiko(server, keyfile): + password = getpass("%s's password: " % (server)) + + p = Process(target=_paramiko_tunnel, + args=(lport, rport, server, remoteip), + kwargs=dict(keyfile=keyfile, password=password)) + p.daemon = True + p.start() + return p + + +def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None): + """Function for actually starting a paramiko tunnel, to be passed + to multiprocessing.Process(target=this), and not called directly. + """ + username, server, port = _split_server(server) + client = paramiko.SSHClient() + client.load_system_host_keys() + client.set_missing_host_key_policy(paramiko.WarningPolicy()) + + try: + client.connect(server, port, username=username, key_filename=keyfile, + look_for_keys=True, password=password) +# except paramiko.AuthenticationException: +# if password is None: +# password = getpass("%s@%s's password: "%(username, server)) +# client.connect(server, port, username=username, password=password) +# else: +# raise + except Exception as e: + print('*** Failed to connect to %s:%d: %r' % (server, port, e)) + sys.exit(1) + + # Don't let SIGINT kill the tunnel subprocess + signal.signal(signal.SIGINT, signal.SIG_IGN) + + try: + forward_tunnel(lport, remoteip, rport, client.get_transport()) + except KeyboardInterrupt: + print('SIGINT: Port forwarding stopped cleanly') + sys.exit(0) + except Exception as e: + print("Port forwarding stopped uncleanly: %s" % e) + sys.exit(255) + + +if sys.platform == 'win32': + ssh_tunnel = paramiko_tunnel +else: + ssh_tunnel = openssh_tunnel + + +__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh'] diff --git a/jupyter_client/tests/signalkernel.py b/jupyter_client/tests/signalkernel.py index e56ea44b8..44a53eef6 100644 --- a/jupyter_client/tests/signalkernel.py +++ b/jupyter_client/tests/signalkernel.py @@ -4,6 +4,7 @@ # Distributed under the terms of the Modified BSD License. from __future__ import print_function +import os from subprocess import Popen, PIPE import sys @@ -38,6 +39,8 @@ def do_execute(self, code, silent, store_history=True, user_expressions=None, reply['user_expressions']['pid'] = self.children[-1].pid elif code == 'check': reply['user_expressions']['poll'] = [ child.poll() for child in self.children ] + elif code == 'env': + reply['user_expressions']['env'] = os.getenv("TEST_VARS", "") elif code == 'sleep': try: time.sleep(10) diff --git a/jupyter_client/tests/test_discovery.py b/jupyter_client/tests/test_discovery.py new file mode 100644 index 000000000..9d7833ba3 --- /dev/null +++ b/jupyter_client/tests/test_discovery.py @@ -0,0 +1,32 @@ +import sys + +from jupyter_client import KernelManager +from jupyter_client import discovery + +def test_ipykernel_provider(): + import ipykernel # Fail clearly if ipykernel not installed + ikf = discovery.IPykernelProvider() + + res = list(ikf.find_kernels()) + assert len(res) == 1, res + id, info = res[0] + assert id == 'kernel' + assert info['argv'][0] == sys.executable + +class DummyKernelProvider(discovery.KernelProviderBase): + """A dummy kernel provider for testing KernelFinder""" + id = 'dummy' + + def find_kernels(self): + yield 'sample', {'argv': ['dummy_kernel']} + + def make_manager(self, name): + return KernelManager(kernel_cmd=['dummy_kernel']) + +def test_meta_kernel_finder(): + kf = discovery.KernelFinder(providers=[DummyKernelProvider()]) + assert list(kf.find_kernels()) == \ + [('dummy/sample', {'argv': ['dummy_kernel']})] + + manager = kf.make_manager('dummy/sample') + assert manager.kernel_cmd == ['dummy_kernel'] diff --git a/jupyter_client/tests/test_jsonutil.py b/jupyter_client/tests/test_jsonutil.py index 9583a22c8..737ec4add 100644 --- a/jupyter_client/tests/test_jsonutil.py +++ b/jupyter_client/tests/test_jsonutil.py @@ -14,14 +14,33 @@ # py2 import mock +import pytest from dateutil.tz import tzlocal, tzoffset from jupyter_client import jsonutil from jupyter_client.session import utcnow +REFERENCE_DATETIME = datetime.datetime( + 2013, 7, 3, 16, 34, 52, 249482, tzlocal() +) + + +def test_extract_date_from_naive(): + ref = REFERENCE_DATETIME + timestamp = '2013-07-03T16:34:52.249482' + + with pytest.deprecated_call(match='Interpreting naive datetime as local'): + extracted = jsonutil.extract_dates(timestamp) + + assert isinstance(extracted, datetime.datetime) + assert extracted.tzinfo is not None + assert extracted.tzinfo.utcoffset(ref) == tzlocal().utcoffset(ref) + assert extracted == ref + + def test_extract_dates(): + ref = REFERENCE_DATETIME timestamps = [ - '2013-07-03T16:34:52.249482', '2013-07-03T16:34:52.249482Z', '2013-07-03T16:34:52.249482-0800', '2013-07-03T16:34:52.249482+0800', @@ -29,41 +48,40 @@ def test_extract_dates(): '2013-07-03T16:34:52.249482+08:00', ] extracted = jsonutil.extract_dates(timestamps) - ref = extracted[0] for dt in extracted: assert isinstance(dt, datetime.datetime) - assert dt.tzinfo != None + assert dt.tzinfo is not None + + assert extracted[0].tzinfo.utcoffset(ref) == timedelta(0) + assert extracted[1].tzinfo.utcoffset(ref) == timedelta(hours=-8) + assert extracted[2].tzinfo.utcoffset(ref) == timedelta(hours=8) + assert extracted[3].tzinfo.utcoffset(ref) == timedelta(hours=-8) + assert extracted[4].tzinfo.utcoffset(ref) == timedelta(hours=8) - assert extracted[0].tzinfo.utcoffset(ref) == tzlocal().utcoffset(ref) - assert extracted[1].tzinfo.utcoffset(ref) == timedelta(0) - assert extracted[2].tzinfo.utcoffset(ref) == timedelta(hours=-8) - assert extracted[3].tzinfo.utcoffset(ref) == timedelta(hours=8) - assert extracted[4].tzinfo.utcoffset(ref) == timedelta(hours=-8) - assert extracted[5].tzinfo.utcoffset(ref) == timedelta(hours=8) def test_parse_ms_precision(): base = '2013-07-03T16:34:52' digits = '1234567890' - parsed = jsonutil.parse_date(base) + parsed = jsonutil.parse_date(base+'Z') assert isinstance(parsed, datetime.datetime) for i in range(len(digits)): ts = base + '.' + digits[:i] - parsed = jsonutil.parse_date(ts) + parsed = jsonutil.parse_date(ts+'Z') if i >= 1 and i <= 6: assert isinstance(parsed, datetime.datetime) else: assert isinstance(parsed, str) - def test_date_default(): naive = datetime.datetime.now() local = tzoffset('Local', -8 * 3600) other = tzoffset('Other', 2 * 3600) data = dict(naive=naive, utc=utcnow(), withtz=naive.replace(tzinfo=other)) with mock.patch.object(jsonutil, 'tzlocal', lambda : local): - jsondata = json.dumps(data, default=jsonutil.date_default) + with pytest.deprecated_call(match='Please add timezone info'): + jsondata = json.dumps(data, default=jsonutil.date_default) assert "Z" in jsondata assert jsondata.count("Z") == 1 extracted = jsonutil.extract_dates(json.loads(jsondata)) diff --git a/jupyter_client/tests/test_kernelapp.py b/jupyter_client/tests/test_kernelapp.py new file mode 100644 index 000000000..2533472d4 --- /dev/null +++ b/jupyter_client/tests/test_kernelapp.py @@ -0,0 +1,67 @@ +from __future__ import division + +import os +import shutil +from subprocess import Popen, PIPE +import sys +from tempfile import mkdtemp +import time + +PY3 = sys.version_info[0] >= 3 + +def _launch(extra_env): + env = os.environ.copy() + env.update(extra_env) + return Popen([sys.executable, '-c', + 'from jupyter_client.kernelapp import main; main()'], + env=env, stderr=(PIPE if PY3 else None)) + +WAIT_TIME = 10 +POLL_FREQ = 10 + +def hacky_wait(p): + """Python 2 subprocess doesn't have timeouts :-(""" + for _ in range(WAIT_TIME * POLL_FREQ): + if p.poll() is not None: + return p.returncode + time.sleep(1 / POLL_FREQ) + else: + raise AssertionError("Process didn't exit in {} seconds" + .format(WAIT_TIME)) + +def test_kernelapp_lifecycle(): + # Check that 'jupyter kernel' starts and terminates OK. + runtime_dir = mkdtemp() + startup_dir = mkdtemp() + started = os.path.join(startup_dir, 'started') + try: + p = _launch({'JUPYTER_RUNTIME_DIR': runtime_dir, + 'JUPYTER_CLIENT_TEST_RECORD_STARTUP_PRIVATE': started, + }) + # Wait for start + for _ in range(WAIT_TIME * POLL_FREQ): + if os.path.isfile(started): + break + time.sleep(1 / POLL_FREQ) + else: + raise AssertionError("No started file created in {} seconds" + .format(WAIT_TIME)) + + # Connection file should be there by now + files = os.listdir(runtime_dir) + assert len(files) == 1 + cf = files[0] + assert cf.startswith('kernel') + assert cf.endswith('.json') + + # Send SIGTERM to shut down + p.terminate() + if PY3: + _, stderr = p.communicate(timeout=WAIT_TIME) + assert cf in stderr.decode('utf-8', 'replace') + else: + hacky_wait(p) + finally: + shutil.rmtree(runtime_dir) + shutil.rmtree(startup_dir) + diff --git a/jupyter_client/tests/test_kernelmanager.py b/jupyter_client/tests/test_kernelmanager.py index a23b33fa6..58010bcfb 100644 --- a/jupyter_client/tests/test_kernelmanager.py +++ b/jupyter_client/tests/test_kernelmanager.py @@ -11,6 +11,9 @@ from subprocess import PIPE import sys import time +import threading +import multiprocessing as mp +import pytest from unittest import TestCase from traitlets.config.loader import Config @@ -28,7 +31,7 @@ def setUp(self): def tearDown(self): self.env_patch.stop() - + def _install_test_kernel(self): kernel_dir = pjoin(paths.jupyter_data_dir(), 'kernels', 'signaltest') os.makedirs(kernel_dir) @@ -38,6 +41,7 @@ def _install_test_kernel(self): '-m', 'jupyter_client.tests.signalkernel', '-f', '{connection_file}'], 'display_name': "Signal Test Kernel", + 'env': {'TEST_VARS': '${TEST_VARS}:test_var_2'}, })) def _get_tcp_km(self): @@ -127,3 +131,175 @@ def test_start_new_kernel(self): self.assertTrue(km.is_alive()) self.assertTrue(kc.is_alive()) + + def _env_test_body(self, kc): + + def execute(cmd): + kc.execute(cmd) + reply = kc.get_shell_msg(TIMEOUT) + content = reply['content'] + self.assertEqual(content['status'], 'ok') + return content + + reply = execute('env') + self.assertIsNotNone(reply) + self.assertEquals(reply['user_expressions']['env'], 'test_var_1:test_var_2') + + def test_templated_kspec_env(self): + self._install_test_kernel() + km, kc = start_new_kernel(kernel_name='signaltest') + self.addCleanup(kc.stop_channels) + self.addCleanup(km.shutdown_kernel) + + self.assertTrue(km.is_alive()) + self.assertTrue(kc.is_alive()) + + self._env_test_body(kc) + + def _start_kernel_with_cmd(self, kernel_cmd, extra_env, **kwargs): + """Start a new kernel, and return its Manager and Client""" + km = KernelManager(kernel_name='signaltest') + km.kernel_cmd = kernel_cmd + km.extra_env = extra_env + km.start_kernel(**kwargs) + kc = km.client() + kc.start_channels() + try: + kc.wait_for_ready(timeout=60) + except RuntimeError: + kc.stop_channels() + km.shutdown_kernel() + raise + + return km, kc + + def test_templated_extra_env(self): + self._install_test_kernel() + kernel_cmd = [sys.executable, + '-m', 'jupyter_client.tests.signalkernel', + '-f', '{connection_file}'] + extra_env = {'TEST_VARS': '${TEST_VARS}:test_var_2'} + + km, kc = self._start_kernel_with_cmd(kernel_cmd, extra_env) + self.addCleanup(kc.stop_channels) + self.addCleanup(km.shutdown_kernel) + + self.assertTrue(km.is_alive()) + self.assertTrue(kc.is_alive()) + + self._env_test_body(kc) + + +class TestParallel: + + @pytest.fixture(autouse=True) + def env(self): + env_patch = test_env() + env_patch.start() + yield + env_patch.stop() + + @pytest.fixture(params=['tcp', 'ipc']) + def transport(self, request): + return request.param + + @pytest.fixture + def config(self, transport): + c = Config() + c.transport = transport + if transport == 'ipc': + c.ip = 'test' + return c + + def _install_test_kernel(self): + kernel_dir = pjoin(paths.jupyter_data_dir(), 'kernels', 'signaltest') + os.makedirs(kernel_dir) + with open(pjoin(kernel_dir, 'kernel.json'), 'w') as f: + f.write(json.dumps({ + 'argv': [sys.executable, + '-m', 'jupyter_client.tests.signalkernel', + '-f', '{connection_file}'], + 'display_name': "Signal Test Kernel", + })) + + def test_start_sequence_kernels(self, config): + """Ensure that a sequence of kernel startups doesn't break anything.""" + + self._install_test_kernel() + self._run_signaltest_lifecycle(config) + self._run_signaltest_lifecycle(config) + self._run_signaltest_lifecycle(config) + + def test_start_parallel_thread_kernels(self, config): + self._install_test_kernel() + self._run_signaltest_lifecycle(config) + + thread = threading.Thread(target=self._run_signaltest_lifecycle, args=(config,)) + thread2 = threading.Thread(target=self._run_signaltest_lifecycle, args=(config,)) + try: + thread.start() + thread2.start() + finally: + thread.join() + thread2.join() + + def test_start_parallel_process_kernels(self, config): + self._install_test_kernel() + + self._run_signaltest_lifecycle(config) + thread = threading.Thread(target=self._run_signaltest_lifecycle, args=(config,)) + proc = mp.Process(target=self._run_signaltest_lifecycle, args=(config,)) + try: + thread.start() + proc.start() + finally: + thread.join() + proc.join() + + assert proc.exitcode == 0 + + def test_start_sequence_process_kernels(self, config): + self._install_test_kernel() + self._run_signaltest_lifecycle(config) + proc = mp.Process(target=self._run_signaltest_lifecycle, args=(config,)) + try: + proc.start() + finally: + proc.join() + + assert proc.exitcode == 0 + + def _prepare_kernel(self, km, startup_timeout=TIMEOUT, **kwargs): + km.start_kernel(**kwargs) + kc = km.client() + kc.start_channels() + try: + kc.wait_for_ready(timeout=startup_timeout) + except RuntimeError: + kc.stop_channels() + km.shutdown_kernel() + raise + + return kc + + def _run_signaltest_lifecycle(self, config=None): + km = KernelManager(config=config, kernel_name='signaltest') + kc = self._prepare_kernel(km, stdout=PIPE, stderr=PIPE) + + def execute(cmd): + kc.execute(cmd) + reply = kc.get_shell_msg(TIMEOUT) + content = reply['content'] + assert content['status'] == 'ok' + return content + + execute("start") + assert km.is_alive() + execute('check') + assert km.is_alive() + + km.restart_kernel(now=True) + assert km.is_alive() + execute('check') + + km.shutdown_kernel() diff --git a/jupyter_client/tests/test_kernelspec.py b/jupyter_client/tests/test_kernelspec.py index b2ec4195c..2919923ef 100644 --- a/jupyter_client/tests/test_kernelspec.py +++ b/jupyter_client/tests/test_kernelspec.py @@ -4,6 +4,7 @@ # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. +import copy import io import json from logging import StreamHandler @@ -11,6 +12,7 @@ from os.path import join as pjoin from subprocess import Popen, PIPE, STDOUT import sys +import tempfile import unittest import pytest @@ -156,7 +158,7 @@ def test_validate_kernel_name(self): 'Haskell-1-2-3', ]: assert kernelspec._is_valid_kernel_name(good) - + for bad in [ 'has space', u'ünicode', @@ -165,4 +167,33 @@ def test_validate_kernel_name(self): ]: assert not kernelspec._is_valid_kernel_name(bad) - + def test_subclass(self): + """Test get_all_specs in subclasses that override find_kernel_specs""" + ksm = self.ksm + resource_dir = tempfile.gettempdir() + native_name = kernelspec.NATIVE_KERNEL_NAME + native_kernel = ksm.get_kernel_spec(native_name) + + class MyKSM(kernelspec.KernelSpecManager): + def get_kernel_spec(self, name): + spec = copy.copy(native_kernel) + if name == 'fake': + spec.name = name + spec.resource_dir = resource_dir + elif name == native_name: + pass + else: + raise KeyError(name) + return spec + + def find_kernel_specs(self): + return { + 'fake': resource_dir, + native_name: native_kernel.resource_dir, + } + + # ensure that get_all_specs doesn't raise if only + # find_kernel_specs and get_kernel_spec are defined + myksm = MyKSM() + specs = myksm.get_all_specs() + assert sorted(specs) == ['fake', native_name] diff --git a/jupyter_client/tests/test_multikernelmanager.py b/jupyter_client/tests/test_multikernelmanager.py index 2ca2ea451..08002fef7 100644 --- a/jupyter_client/tests/test_multikernelmanager.py +++ b/jupyter_client/tests/test_multikernelmanager.py @@ -1,14 +1,21 @@ """Tests for the notebook kernel and session manager.""" -from subprocess import PIPE +import os import time -from unittest import TestCase +import threading +import multiprocessing as mp +from subprocess import PIPE +from unittest import TestCase from traitlets.config.loader import Config -from ..localinterfaces import localhost from jupyter_client import KernelManager from jupyter_client.multikernelmanager import MultiKernelManager + from .utils import skip_win32 +from ..localinterfaces import localhost + +TIMEOUT = 30 + class TestKernelManager(TestCase): @@ -83,3 +90,43 @@ def test_ipc_lifecycle(self): def test_ipc_cinfo(self): km = self._get_ipc_km() self._run_cinfo(km, 'ipc', 'test') + + def test_start_sequence_tcp_kernels(self): + """Ensure that a sequence of kernel startups doesn't break anything.""" + self._run_lifecycle(self._get_tcp_km()) + self._run_lifecycle(self._get_tcp_km()) + self._run_lifecycle(self._get_tcp_km()) + + + def test_start_sequence_tcp_kernels(self): + """Ensure that a sequence of kernel startups doesn't break anything.""" + self._run_lifecycle(self._get_ipc_km()) + self._run_lifecycle(self._get_ipc_km()) + self._run_lifecycle(self._get_ipc_km()) + + def test_start_parallel_thread_kernels(self): + self.test_tcp_lifecycle() + + thread = threading.Thread(target=self.test_tcp_lifecycle) + thread2 = threading.Thread(target=self.test_tcp_lifecycle) + try: + thread.start() + thread2.start() + finally: + thread.join() + thread2.join() + + def test_start_parallel_process_kernels(self): + self.test_tcp_lifecycle() + + thread = threading.Thread(target=self.test_tcp_lifecycle) + proc = mp.Process(target=self.test_tcp_lifecycle) + + try: + thread.start() + proc.start() + finally: + thread.join() + proc.join() + + assert proc.exitcode == 0 diff --git a/jupyter_client/tests/test_session.py b/jupyter_client/tests/test_session.py index 43819a898..82d63df4c 100644 --- a/jupyter_client/tests/test_session.py +++ b/jupyter_client/tests/test_session.py @@ -8,6 +8,10 @@ import sys import uuid from datetime import datetime +try: + from unittest import mock +except ImportError: + import mock import pytest @@ -34,6 +38,14 @@ def setUp(self): self.session = ss.Session() +@pytest.fixture +def no_copy_threshold(): + """Disable zero-copy optimizations in pyzmq >= 17""" + with mock.patch.object(zmq, 'COPY_THRESHOLD', 1, create=True): + yield + + +@pytest.mark.usefixtures('no_copy_threshold') class TestSession(SessionTestCase): def test_msg(self): @@ -71,7 +83,7 @@ def test_default_secure(self): self.assertIsInstance(self.session.auth, hmac.HMAC) def test_send(self): - ctx = zmq.Context.instance() + ctx = zmq.Context() A = ctx.socket(zmq.PAIR) B = ctx.socket(zmq.PAIR) A.bind("inproc://test") @@ -129,7 +141,7 @@ def test_send(self): # buffers must be contiguous buf = memoryview(os.urandom(16)) - if sys.version_info >= (3,3): + if sys.version_info >= (3,4): with self.assertRaises(ValueError): self.session.send(A, msg, ident=b'foo', buffers=[buf[::2]]) @@ -304,7 +316,7 @@ def test_datetimes_msgpack(self): self._datetime_test(session) def test_send_raw(self): - ctx = zmq.Context.instance() + ctx = zmq.Context() A = ctx.socket(zmq.PAIR) B = ctx.socket(zmq.PAIR) A.bind("inproc://test") @@ -327,7 +339,7 @@ def test_send_raw(self): A.close() B.close() ctx.term() - + def test_clone(self): s = self.session s._add_digest('initial') diff --git a/jupyter_client/tests/test_ssh.py b/jupyter_client/tests/test_ssh.py new file mode 100644 index 000000000..e1673f9f4 --- /dev/null +++ b/jupyter_client/tests/test_ssh.py @@ -0,0 +1,8 @@ +from jupyter_client.ssh.tunnel import select_random_ports + +def test_random_ports(): + for i in range(4096): + ports = select_random_ports(10) + assert len(ports) == 10 + for p in ports: + assert ports.count(p) == 1 diff --git a/jupyter_client/tests/utils.py b/jupyter_client/tests/utils.py index 505084a30..64ce45dc4 100644 --- a/jupyter_client/tests/utils.py +++ b/jupyter_client/tests/utils.py @@ -29,6 +29,7 @@ def start(self): 'JUPYTER_DATA_DIR': pjoin(td.name, 'jupyter_data'), 'JUPYTER_RUNTIME_DIR': pjoin(td.name, 'jupyter_runtime'), 'IPYTHONDIR': pjoin(td.name, 'ipython'), + 'TEST_VARS': 'test_var_1', }) self.env_patch.start() diff --git a/jupyter_client/threaded.py b/jupyter_client/threaded.py index f437aa58b..801ac7acc 100644 --- a/jupyter_client/threaded.py +++ b/jupyter_client/threaded.py @@ -3,7 +3,8 @@ from __future__ import absolute_import import atexit import errno -from threading import Thread +import sys +from threading import Thread, Event import time # import ZMQError in top-level namespace, to avoid ugly attribute-error messages @@ -41,9 +42,15 @@ def __init__(self, socket, session, loop): self.socket = socket self.session = session self.ioloop = loop + evt = Event() - self.stream = zmqstream.ZMQStream(self.socket, self.ioloop) - self.stream.on_recv(self._handle_recv) + def setup_stream(): + self.stream = zmqstream.ZMQStream(self.socket, self.ioloop) + self.stream.on_recv(self._handle_recv) + evt.set() + + self.ioloop.add_callback(setup_stream) + evt.wait() _is_alive = False def is_alive(self): @@ -142,19 +149,40 @@ class IOLoopThread(Thread): """Run a pyzmq ioloop in a thread to send and receive messages """ _exiting = False + ioloop = None - def __init__(self, loop): + def __init__(self): super(IOLoopThread, self).__init__() self.daemon = True - self.ioloop = loop or ioloop.IOLoop() @staticmethod @atexit.register def _notice_exit(): - IOLoopThread._exiting = True + # Class definitions can be torn down during interpreter shutdown. + # We only need to set _exiting flag if this hasn't happened. + if IOLoopThread is not None: + IOLoopThread._exiting = True + + def start(self): + """Start the IOLoop thread + + Don't return until self.ioloop is defined, + which is created in the thread + """ + self._start_event = Event() + Thread.start(self) + self._start_event.wait() def run(self): """Run my loop, ignoring EINTR events in the poller""" + if 'asyncio' in sys.modules: + # tornado may be using asyncio, + # ensure an eventloop exists for this thread + import asyncio + asyncio.set_event_loop(asyncio.new_event_loop()) + self.ioloop = ioloop.IOLoop() + # signal that self.ioloop is defined + self._start_event.set() while True: try: self.ioloop.start() @@ -179,9 +207,10 @@ def stop(self): :meth:`~threading.Thread.start` is called again. """ if self.ioloop is not None: - self.ioloop.stop() + self.ioloop.add_callback(self.ioloop.stop) self.join() self.close() + self.ioloop = None def close(self): if self.ioloop is not None: @@ -195,23 +224,20 @@ class ThreadedKernelClient(KernelClient): """ A KernelClient that provides thread-safe sockets with async callbacks on message replies. """ - _ioloop = None @property def ioloop(self): - if self._ioloop is None: - self._ioloop = ioloop.IOLoop() - return self._ioloop + return self.ioloop_thread.ioloop ioloop_thread = Instance(IOLoopThread, allow_none=True) - def start_channels(self, shell=True, iopub=True, stdin=True, hb=True): + def start_channels(self, shell=True, iopub=True, stdin=True, hb=True, control=True): + self.ioloop_thread = IOLoopThread() + self.ioloop_thread.start() + if shell: self.shell_channel._inspect = self._check_kernel_info_reply - self.ioloop_thread = IOLoopThread(self.ioloop) - self.ioloop_thread.start() - - super(ThreadedKernelClient, self).start_channels(shell, iopub, stdin, hb) + super(ThreadedKernelClient, self).start_channels(shell, iopub, stdin, hb, control) def _check_kernel_info_reply(self, msg): """This is run in the ioloop thread when the kernel info reply is received @@ -229,3 +255,4 @@ def stop_channels(self): shell_channel_class = Type(ThreadedZMQSocketChannel) stdin_channel_class = Type(ThreadedZMQSocketChannel) hb_channel_class = Type(HBChannel) + control_channel_class = Type(ThreadedZMQSocketChannel) diff --git a/scripts/jupyter-kernel b/scripts/jupyter-kernel new file mode 100755 index 000000000..31144d405 --- /dev/null +++ b/scripts/jupyter-kernel @@ -0,0 +1,5 @@ +#!/usr/bin/env python +from jupyter_client.kernelapp import main + +if __name__ == '__main__': + main() diff --git a/setup.cfg b/setup.cfg index e0ca7a784..a2327e90f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,8 @@ [bdist_wheel] universal=1 +[metadata] +license_file = COPYING.md + [nosetests] warningfilters=default diff --git a/setup.py b/setup.py index 341af7fb2..d865eeef7 100644 --- a/setup.py +++ b/setup.py @@ -16,8 +16,8 @@ import sys v = sys.version_info -if v[:2] < (2,7) or (v[0] >= 3 and v[:2] < (3,3)): - error = "ERROR: %s requires Python version 2.7 or 3.3 or above." % name +if v[:2] < (2, 7) or (v[0] >= 3 and v[:2] < (3, 5)): + error = "ERROR: %s requires Python version 2.7 or 3.5 or above." % name print(error, file=sys.stderr) sys.exit(1) @@ -60,29 +60,45 @@ def run(self): name = name, version = version_ns['__version__'], packages = packages, - description = "Jupyter protocol implementation and client libraries", + description = 'Jupyter protocol implementation and client libraries', + long_description=open('README.md').read(), + long_description_content_type='text/markdown', author = 'Jupyter Development Team', author_email = 'jupyter@googlegroups.com', url = 'https://jupyter.org', license = 'BSD', platforms = "Linux, Mac OS X, Windows", keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'], + project_urls = { + 'Documentation': 'https://jupyter-client.readthedocs.io', + 'Source': 'https://github.com/jupyter/jupyter_client/', + 'Tracker': 'https://github.com/jupyter/jupyter_client/issues', + }, classifiers = [ + 'Framework :: Jupyter', 'Intended Audience :: Developers', + 'Intended Audience :: Education', 'Intended Audience :: System Administrators', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', + 'Operating System :: OS Independent', 'Programming Language :: Python', + 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', ], install_requires = [ 'traitlets', 'jupyter_core', 'pyzmq>=13', 'python-dateutil>=2.1', + 'entrypoints', + 'tornado>=4.1', ], + python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*', extras_require = { 'test': ['ipykernel', 'ipython', 'mock', 'pytest'], }, @@ -93,6 +109,11 @@ def run(self): 'console_scripts': [ 'jupyter-kernelspec = jupyter_client.kernelspecapp:KernelSpecApp.launch_instance', 'jupyter-run = jupyter_client.runapp:RunApp.launch_instance', + 'jupyter-kernel = jupyter_client.kernelapp:main', + ], + 'jupyter_client.kernel_providers' : [ + 'spec = jupyter_client.discovery:KernelSpecProvider', + 'pyimport = jupyter_client.discovery:IPykernelProvider', ] }, )