diff --git a/webpages/.gitignore b/webpages/.gitignore new file mode 100644 index 0000000..7615a9d --- /dev/null +++ b/webpages/.gitignore @@ -0,0 +1,4 @@ +_site +Gemfile.lock +.bundle +.jekyll-cache \ No newline at end of file diff --git a/webpages/Gemfile b/webpages/Gemfile new file mode 100644 index 0000000..ecbbb7d --- /dev/null +++ b/webpages/Gemfile @@ -0,0 +1,5 @@ +source 'https://rubygems.org' +# gem 'github-pages', group: :jekyll_plugins +gem "jekyll", "~> 4.0" +gem "jekyll-seo-tag" +gem 'webrick', '~> 1.3', '>= 1.3.1' diff --git a/webpages/_config.yml b/webpages/_config.yml new file mode 100644 index 0000000..39ca339 --- /dev/null +++ b/webpages/_config.yml @@ -0,0 +1,10 @@ +plugins: + - jekyll-seo-tag + +author: Michael N. Lipp + +logo: VM-Operator.svg + +tagline: VM-Operator by mnlipp + +description: A Kubernetes operator for running virtual machines (notably Qemu VMs) as pods. diff --git a/webpages/_includes/matomo.html b/webpages/_includes/matomo.html new file mode 100644 index 0000000..3a93186 --- /dev/null +++ b/webpages/_includes/matomo.html @@ -0,0 +1,23 @@ + + + + + diff --git a/webpages/_includes/toc.html b/webpages/_includes/toc.html new file mode 100644 index 0000000..56ac8e4 --- /dev/null +++ b/webpages/_includes/toc.html @@ -0,0 +1,96 @@ +{% capture tocWorkspace %} + {% comment %} + Version 1.0.10 + https://github.com/allejo/jekyll-toc + + "...like all things liquid - where there's a will, and ~36 hours to spare, there's usually a/some way" ~jaybe + + Usage: + {% include toc.html html=content sanitize=true class="inline_toc" id="my_toc" h_min=2 h_max=3 %} + + Parameters: + * html (string) - the HTML of compiled markdown generated by kramdown in Jekyll + + Optional Parameters: + * sanitize (bool) : false - when set to true, the headers will be stripped of any HTML in the TOC + * class (string) : '' - a CSS class assigned to the TOC + * id (string) : '' - an ID to assigned to the TOC + * h_min (int) : 1 - the minimum TOC header level to use; any header lower than this value will be ignored + * h_max (int) : 6 - the maximum TOC header level to use; any header greater than this value will be ignored + * ordered (bool) : false - when set to true, an ordered list will be outputted instead of an unordered list + * item_class (string) : '' - add custom class(es) for each list item; has support for '%level%' placeholder, which is the current heading level + * baseurl (string) : '' - add a base url to the TOC links for when your TOC is on another page than the actual content + * anchor_class (string) : '' - add custom class(es) for each anchor element + + Output: + An ordered or unordered list representing the table of contents of a markdown block. This snippet will only + generate the table of contents and will NOT output the markdown given to it + {% endcomment %} + + {% capture my_toc %}{% endcapture %} + {% assign orderedList = include.ordered | default: false %} + {% assign minHeader = include.h_min | default: 1 %} + {% assign maxHeader = include.h_max | default: 6 %} + {% assign nodes = include.html | split: ' maxHeader %} + {% continue %} + {% endif %} + + {% if firstHeader %} + {% assign firstHeader = false %} + {% assign minHeader = headerLevel %} + {% endif %} + + {% assign indentAmount = headerLevel | minus: minHeader %} + {% assign _workspace = node | split: '' | first }}>{% endcapture %} + {% assign header = _workspace[0] | replace: _hAttrToStrip, '' %} + + {% assign space = '' %} + {% for i in (1..indentAmount) %} + {% assign space = space | prepend: ' ' %} + {% endfor %} + + {% if include.item_class and include.item_class != blank %} + {% capture listItemClass %}{:.{{ include.item_class | replace: '%level%', headerLevel }}}{% endcapture %} + {% endif %} + + {% capture heading_body %}{% if include.sanitize %}{{ header | strip_html }}{% else %}{{ header }}{% endif %}{% endcapture %} + {% capture my_toc %}{{ my_toc }} +{{ space }}{{ listModifier }} {{ listItemClass }} [{{ heading_body | replace: "|", "\|" }}]({% if include.baseurl %}{{ include.baseurl }}{% endif %}#{{ html_id }}){% if include.anchor_class %}{:.{{ include.anchor_class }}}{% endif %}{% endcapture %} + {% endfor %} + + {% if include.class and include.class != blank %} + {% capture my_toc %}{:.{{ include.class }}} +{{ my_toc | lstrip }}{% endcapture %} + {% endif %} + + {% if include.id %} + {% capture my_toc %}{: #{{ include.id }}} +{{ my_toc | lstrip }}{% endcapture %} + {% endif %} +{% endcapture %}{% assign tocWorkspace = '' %}{{ my_toc | markdownify | strip }} diff --git a/webpages/_layouts/vm-operator.html b/webpages/_layouts/vm-operator.html new file mode 100644 index 0000000..dfe4220 --- /dev/null +++ b/webpages/_layouts/vm-operator.html @@ -0,0 +1,71 @@ + + + + + + + + + + + + {% seo %} + + +
+
+
+
+

VM-Operator

+

By Michael N. Lipp

+

Mastodon Follow

+
+
+ VM-Operator Logo +
+
+
+

+ +

View GitHub Project

+ +

+ +

Overview

+

The Runner

+

The Manager

+ +

The Web-GUI

+
+
+ + {% if page.tocTitle %} +

{{ page.tocTitle }}

+ {% include toc.html html=content %} + {% endif %} + + {{ content }} +
+ +
+ + {% include matomo.html %} + + + diff --git a/webpages/stylesheets/pygment_trac.css b/webpages/stylesheets/pygment_trac.css new file mode 100644 index 0000000..c6a6452 --- /dev/null +++ b/webpages/stylesheets/pygment_trac.css @@ -0,0 +1,69 @@ +.highlight { background: #ffffff; } +.highlight .c { color: #999988; font-style: italic } /* Comment */ +.highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ +.highlight .k { font-weight: bold } /* Keyword */ +.highlight .o { font-weight: bold } /* Operator */ +.highlight .cm { color: #999988; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #999999; font-weight: bold } /* Comment.Preproc */ +.highlight .c1 { color: #999988; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #999999; font-weight: bold; font-style: italic } /* Comment.Special */ +.highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */ +.highlight .gd .x { color: #000000; background-color: #ffaaaa } /* Generic.Deleted.Specific */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .gr { color: #aa0000 } /* Generic.Error */ +.highlight .gh { color: #999999 } /* Generic.Heading */ +.highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ +.highlight .gi .x { color: #000000; background-color: #aaffaa } /* Generic.Inserted.Specific */ +.highlight .go { color: #888888 } /* Generic.Output */ +.highlight .gp { color: #555555 } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold; } /* Generic.Subheading */ +.highlight .gt { color: #aa0000 } /* Generic.Traceback */ +.highlight .kc { font-weight: bold } /* Keyword.Constant */ +.highlight .kd { font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { font-weight: bold } /* Keyword.Pseudo */ +.highlight .kr { font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #445588; font-weight: bold } /* Keyword.Type */ +.highlight .m { color: #009999 } /* Literal.Number */ +.highlight .s { color: #d14 } /* Literal.String */ +.highlight .na { color: #008080 } /* Name.Attribute */ +.highlight .nb { color: #0086B3 } /* Name.Builtin */ +.highlight .nc { color: #445588; font-weight: bold } /* Name.Class */ +.highlight .no { color: #008080 } /* Name.Constant */ +.highlight .ni { color: #800080 } /* Name.Entity */ +.highlight .ne { color: #990000; font-weight: bold } /* Name.Exception */ +.highlight .nf { color: #990000; font-weight: bold } /* Name.Function */ +.highlight .nn { color: #555555 } /* Name.Namespace */ +.highlight .nt { color: #000080 } /* Name.Tag */ +.highlight .nv { color: #008080 } /* Name.Variable */ +.highlight .ow { font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mf { color: #009999 } /* Literal.Number.Float */ +.highlight .mh { color: #009999 } /* Literal.Number.Hex */ +.highlight .mi { color: #009999 } /* Literal.Number.Integer */ +.highlight .mo { color: #009999 } /* Literal.Number.Oct */ +.highlight .sb { color: #d14 } /* Literal.String.Backtick */ +.highlight .sc { color: #d14 } /* Literal.String.Char */ +.highlight .sd { color: #d14 } /* Literal.String.Doc */ +.highlight .s2 { color: #d14 } /* Literal.String.Double */ +.highlight .se { color: #d14 } /* Literal.String.Escape */ +.highlight .sh { color: #d14 } /* Literal.String.Heredoc */ +.highlight .si { color: #d14 } /* Literal.String.Interpol */ +.highlight .sx { color: #d14 } /* Literal.String.Other */ +.highlight .sr { color: #009926 } /* Literal.String.Regex */ +.highlight .s1 { color: #d14 } /* Literal.String.Single */ +.highlight .ss { color: #990073 } /* Literal.String.Symbol */ +.highlight .bp { color: #999999 } /* Name.Builtin.Pseudo */ +.highlight .vc { color: #008080 } /* Name.Variable.Class */ +.highlight .vg { color: #008080 } /* Name.Variable.Global */ +.highlight .vi { color: #008080 } /* Name.Variable.Instance */ +.highlight .il { color: #009999 } /* Literal.Number.Integer.Long */ + +.type-csharp .highlight .k { color: #0000FF } +.type-csharp .highlight .kt { color: #0000FF } +.type-csharp .highlight .nf { color: #000000; font-weight: normal } +.type-csharp .highlight .nc { color: #2B91AF } +.type-csharp .highlight .nn { color: #000000 } +.type-csharp .highlight .s { color: #A31515 } +.type-csharp .highlight .sc { color: #A31515 } diff --git a/webpages/stylesheets/styles.css b/webpages/stylesheets/styles.css new file mode 100644 index 0000000..748ffcb --- /dev/null +++ b/webpages/stylesheets/styles.css @@ -0,0 +1,244 @@ +body { + background-color: #fff; + padding:50px; + font: normal 16px/1.5 Verdana, Arial, Helvetica, sans-serif; + color:#595959; +} + +h1, h2, h3, h4, h5, h6 { + color:#222; + margin:0 0 20px; +} + +p, ul, ol, table, pre, dl { + margin:0 0 20px; +} + +h1, h2, h3 { + line-height:1.1; +} + +h1 { + font-size:28px; + font-weight: 500; +} + +h2 { + color:#393939; + font-weight: 500; +} + +h3, h4, h5, h6 { + color:#494949; + font-weight: 500; +} + +a { + color:#39c; + text-decoration:none; +} + +a:hover { + color:#069; +} + +a small { + font-size:11px; + color:#777; + margin-top:-0.3em; + display:block; +} + +a:hover small { + color:#777; +} + +.wrapper { + /* width:860px; */ + width: 100%; + margin:0 auto; +} + +blockquote { + border-left:1px solid #e5e5e5; + margin:0; + padding:0 0 0 20px; + font-style:italic; +} + +code, pre { + font-family:Monaco, Bitstream Vera Sans Mono, Lucida Console, Terminal, Consolas, Liberation Mono, DejaVu Sans Mono, Courier New, monospace; + color:#333; +} + +pre { + font-size: 15px; + padding:8px 15px; + background: #f8f8f8; + border-radius:5px; + border:1px solid #e5e5e5; + overflow-x: auto; +} + +a code { + color: inherit; +} + +table { + width:100%; + border-collapse:collapse; +} + +th, td { + text-align:left; + padding:5px 10px; + border-bottom:1px solid #e5e5e5; +} + +dt { + color:#444; + font-weight:500; +} + +th { + color:#444; +} + +img { + max-width:100%; +} + +header { + /* width:270px; */ + width:calc(29% - 50px); + height:calc(100% - 160px); + overflow: auto; + float:left; + position:fixed; + -webkit-font-smoothing:subpixel-antialiased; +} + +header li { + list-style-type: disc; +} + +header ul { + padding-left: 1rem; +} + +header ul > li { + margin-left: 1rem; +} + +ul.no-bullets { + padding-left: 0; +} + +ul.no-bullets > li { + list-style: none; +} + +strong { + color:#222; + font-weight:500; +} + +section { + width:70%; + max-width:54em; + float:right; + padding-bottom:50px; +} + +small { + font-size:11px; +} + +hr { + border:0; + background:#e5e5e5; + height:1px; + margin:0 0 20px; +} + +footer { + /* width:270px; */ + width:calc(24% - 50px); + height:40px; + float:left; + position:fixed; + padding:30px 0; + bottom:0px; + background-color:white; + -webkit-font-smoothing:subpixel-antialiased; +} + +.post-date { + float: right; +} + +.part-list-title { + margin-bottom:5px; +} + +.part-entry { + margin-bottom:5px; +} + +@media print, screen and (max-width: 960px) { + + div.wrapper { + width:auto; + margin:0; + } + + header, section, footer { + float:none; + position:static; + width:auto; + } + + header { + padding-right:320px; + } + + section { + border:1px solid #e5e5e5; + border-width:1px 0; + padding:20px 0; + margin:0 0 20px; + } + + header a small { + display:inline; + } +} + +@media print, screen and (max-width: 720px) { + body { + word-wrap:break-word; + } + + header { + padding:0; + } + + pre, code { + word-wrap:normal; + } +} + +@media print, screen and (max-width: 480px) { + body { + padding:15px; + } + +} + +@media print { + body { + padding:0.4in; + font-size:12pt; + color:#444; + } +} diff --git a/webpages/vm-operator/02_2_operator.png b/webpages/vm-operator/02_2_operator.png new file mode 100644 index 0000000..d3909d4 Binary files /dev/null and b/webpages/vm-operator/02_2_operator.png differ diff --git a/webpages/vm-operator/VM-Operator-GUI-preview.png b/webpages/vm-operator/VM-Operator-GUI-preview.png new file mode 100644 index 0000000..b5293d7 Binary files /dev/null and b/webpages/vm-operator/VM-Operator-GUI-preview.png differ diff --git a/webpages/vm-operator/VM-Operator-GUI-view.png b/webpages/vm-operator/VM-Operator-GUI-view.png new file mode 100644 index 0000000..0463cc5 Binary files /dev/null and b/webpages/vm-operator/VM-Operator-GUI-view.png differ diff --git a/webpages/vm-operator/VM-Operator-with-font.svg b/webpages/vm-operator/VM-Operator-with-font.svg new file mode 100644 index 0000000..6240969 --- /dev/null +++ b/webpages/vm-operator/VM-Operator-with-font.svg @@ -0,0 +1,173 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + VM + + + + + + + + + + + + + + + + + diff --git a/webpages/vm-operator/VM-Operator.svg b/webpages/vm-operator/VM-Operator.svg new file mode 100644 index 0000000..c8616d5 --- /dev/null +++ b/webpages/vm-operator/VM-Operator.svg @@ -0,0 +1,184 @@ + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/webpages/vm-operator/VmViewer-preview.png b/webpages/vm-operator/VmViewer-preview.png new file mode 100644 index 0000000..d13387f Binary files /dev/null and b/webpages/vm-operator/VmViewer-preview.png differ diff --git a/webpages/vm-operator/admin-gui.md b/webpages/vm-operator/admin-gui.md new file mode 100644 index 0000000..15a6dec --- /dev/null +++ b/webpages/vm-operator/admin-gui.md @@ -0,0 +1,18 @@ +--- +title: VM-Operator Web-GUI for Admins +layout: vm-operator +--- + +# Administrator view + +An overview display shows the current CPU and RAM usage and a graph +with recent changes. + +![VM-Operator GUI](VM-Operator-GUI-preview.png) + +The detail display lists all VMs. From here you can start and stop +the VMs and adjust the CPU and RAM usages (modifies the definition +in kubernetes). + +![VM-Operator GUI](VM-Operator-GUI-view.png) + diff --git a/webpages/vm-operator/controller.md b/webpages/vm-operator/controller.md new file mode 100644 index 0000000..2a00b16 --- /dev/null +++ b/webpages/vm-operator/controller.md @@ -0,0 +1,226 @@ +--- +title: VM-Operator Controller +layout: vm-operator +--- + +# The Controller + +The controller component (which is part of the manager) monitors +custom resources of kind `VirtualMachine`. It creates or modifies +other resources in the cluster as required to get the VM defined +by the CR up and running. + +Here is the sample definition of a VM from the +["local-path" example](https://github.com/mnlipp/VM-Operator/tree/main/example/local-path): + +```yaml +apiVersion: "vmoperator.jdrupes.org/v1" +kind: VirtualMachine +metadata: + namespace: vmop-demo + name: test-vm +spec: + guestShutdownStops: false + + vm: + state: Running + maximumCpus: 4 + currentCpus: 2 + maximumRam: 8Gi + currentRam: 4Gi + + networks: + - user: {} + + disks: + - volumeClaimTemplate: + metadata: + name: system + spec: + storageClassName: "" + selector: + matchLabels: + app.kubernetes.io/name: vmrunner + app.kubernetes.io/instance: test-vm + vmrunner.jdrupes.org/disk: system + resources: + requests: + storage: 40Gi + - cdrom: + image: "" + # image: https://download.fedoraproject.org/pub/fedora/linux/releases/38/Workstation/x86_64/iso/Fedora-Workstation-Live-x86_64-38-1.6.iso + # image: "Fedora-Workstation-Live-x86_64-38-1.6.iso" + + display: + spice: + port: 5910 + # Since 3.0.0: + # generateSecret: false +``` + +## Pod management + +The central resource created by the controller is a +[stateful set](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) +with the same name as the VM (metadata.name). Its number of replicas is +set to 1 if `spec.vm.state` is "Running" (default is "Stopped" which sets +replicas to 0). + +Property `spec.guestShutdownStops` (since 2.2.0) controls the effect of a +shutdown initiated by the guest. If set to `false` (default) a new pod +is automatically created by the stateful set controller and the VM thus +restarted. If set to `true`, the runner sets `spec.vm.state` to "Stopped" +before terminating and by this prevents the creation of a new pod. + +## Defining the basics + +How to define the number of CPUs and the size of the RAM of the VM +should be obvious from the example. Note that changes of the current +number of CPUs and the current RAM size will be propagated to +running VMs. + +## Defining disks + +Maybe the most interesting part is the definition of the VM's disks. +This is done by adding one or more `volumeClaimTemplate`s to the +list of disks. As its name suggests, such a template is used by the +controller to generate a PVC. + +The example template does not define any storage. Rather it references +some PV that you must have created first. This may be your first approach +if you have existing storage from running the VM outside Kubernetes +(e.g. with libvirtd). + +If you have ceph or some other full fledged storage provider installed +and create a new VM, provisioning a disk can happen automatically +as shown in this example: + +```yaml + disks: + - volumeClaimTemplate: + metadata: + name: system + spec: + storageClassName: rook-ceph-block + resources: + requests: + storage: 40Gi +``` + +The disk will be available as "/dev/*name*-disk" in the VM, +using the string from `.volumeClaimTemplate.metadata.name` as *name*. +If no name is defined in the metadata, then "/dev/disk-*n*" +is used instead, with *n* being the index of the disk +definition in the list of disks. + +Apart from appending "-disk" to the name (or generating the name) the +`volumeClaimTemplate` is simply copied into the stateful set definition +for the VM (with some additional labels, see below). The controller +for stateful sets appends the started pod's name to the name of the +volume claim templates when it creates the PVCs. Therefore you'll +eventually find the PVCs as "*name*-disk-*vmName*-0" +(or "disk-*n*-*vmName*-0"). + +PVCs generated from stateful set definitions are considered "precious" +and never removed automatically. This behavior fits perfectly for VMs. +Usually, you do not want the disks to be removed automatically when +you (maybe accidentally) remove the CR for the VM. To simplify the lookup +for an eventual (manual) removal, all PVCs are labeled with +"app.kubernetes.io/name: vm-runner", "app.kubernetes.io/instance: *vmName*", +and "app.kubernetes.io/managed-by: vm-operator". + +## Choosing an image for the runner + +The image used for the runner can be configured with +[`spec.image`](https://github.com/mnlipp/VM-Operator/blob/7e094e720b7b59a5e50f4a9a4ad29a6000ec76e6/deploy/crds/vms-crd.yaml#L19). +This is a mapping with either a single key `source` or a detailed +configuration using the keys `repository`, `path` etc. + +Currently two runner images are maintained. One that is based on +Arch Linux (`ghcr.io/mnlipp/org.jdrupes.vmoperator.runner.qemu-arch`) and a +second one based on Alpine (`ghcr.io/mnlipp/org.jdrupes.vmoperator.runner.qemu-alpine`). + +Starting with release 1.0, all versions of runner images and managers +that have the same major release number are guaranteed to be compatible. + +## Generating cloud-init data + +*Since: 2.2.0* + +The optional object `.spec.cloudInit` with sub-objects `.cloudInit.metaData`, +`.cloudInit.userData` and `.cloudInit.networkConfig` can be used to provide +data for +[cloud-init](https://cloudinit.readthedocs.io/en/latest/index.html). +The data from the CRD will be made available to the VM by the runner +as a vfat formatted disk (see the description of +[NoCloud](https://cloudinit.readthedocs.io/en/latest/reference/datasources/nocloud.html)). + +If `.metaData.instance-id` is not defined, the controller automatically +generates it from the CRD's `resourceVersion`. If `.metaData.local-hostname` +is not defined, the controller adds this property using the value from +`metadata.name`. + +Note that there are no schema definitions available for `.userData` +and `.networkConfig`. Whatever is defined in the CRD is copied to +the corresponding cloud-init file without any checks. (The introductory +comment `#cloud-config` required at the beginning of `.userData` is +generated automatically by the runner.) + +## Display secret/password + +*Since: 2.3.0* + +You can define a display password using a Kubernetes secret. +When you start a VM, the controller checks if there is a secret +with labels "app.kubernetes.io/name: vm-runner, +app.kubernetes.io/component: display-secret, +app.kubernetes.io/instance: *vmname*" in the namespace of the +VM definition. The name of the secret can be chosen freely. + +```yaml +kind: Secret +apiVersion: v1 +metadata: + name: test-vm-display-secret + namespace: vmop-demo + labels: + app.kubernetes.io/name: vm-runner + app.kubernetes.io/instance: test-vm + app.kubernetes.io/component: display-secret +type: Opaque +data: + display-password: dGVzdC12bQ== + # Since 3.0.0: + # password-expiry: bmV2ZXI= +``` + +If such a secret for the VM is found, the VM is configured to use +the display password specified. The display password in the secret +can be updated while the VM runs[^delay]. Activating/deactivating +the display password while a VM runs is not supported by Qemu and +therefore requires stopping the VM, adding/removing the secret and +restarting the VM. + +[^delay]: Be aware of the possible delay, see e.g. + [here](https://web.archive.org/web/20240223073838/https://ahmet.im/blog/kubernetes-secret-volumes-delay/). + +*Since: 3.0.0* + +The secret's `data` can have an additional property `data.password-expiry` which +specifies a (base64 encoded) expiry date for the password. Supported +values are those defined by qemu (`+n` seconds from now, `n` Unix +timestamp, `never` and `now`). + +Unless `spec.vm.display.spice.generateSecret` is set to `false` in the VM +definition (CRD), the controller creates a secret for the display +password automatically if none is found. The secret is created +with a random password that expires immediately, which makes the +display effectively inaccessible until the secret is modified. +Note that a password set manually may be overwritten by components +of the manager unless the password-expiry is set to "never" or +some time in the future. + +## Further reading + +For a detailed description of the available configuration options see the +[CRD](https://github.com/mnlipp/VM-Operator/blob/main/deploy/crds/vms-crd.yaml). diff --git a/webpages/vm-operator/favicon.svg b/webpages/vm-operator/favicon.svg new file mode 100644 index 0000000..e216c44 --- /dev/null +++ b/webpages/vm-operator/favicon.svg @@ -0,0 +1,88 @@ + + + + + + + + + + image/svg+xml + + + + + + + M + L + + diff --git a/webpages/vm-operator/index.md b/webpages/vm-operator/index.md new file mode 100644 index 0000000..04134d5 --- /dev/null +++ b/webpages/vm-operator/index.md @@ -0,0 +1,60 @@ +--- +title: VM-Operator by mnlipp +description: A Kubernetes operator for running virtual machines (notably Qemu VMs) in pods on Kubernetes +layout: vm-operator +--- + +# Welcome to VM-Operator + +The goal of this project is to provide the means for running Qemu +based VMs in Kubernetes pods. + +The image used for the VM pods combines Qemu and a control program +for starting and managing the Qemu process. This application is called +"[the runner](runner.md)". + +While you can deploy a runner manually (or with the help of some +helm templates), the preferred way is to deploy "[the manager](manager.md)" +application which acts as a Kubernetes operator for runners +and thus the VMs. + +If you just want to try out things, you can skip the remainder of this +page and proceed to "[the manager](manager.md)". + +## Motivation +The project was triggered by a remark in the discussion about RedHat +[dropping SPICE support](https://bugzilla.redhat.com/show_bug.cgi?id=2030592) +from the RHEL packages. Which means that you have to run Qemu in a +container on RHEL and derivatives if you want to continue using Spice. +So KubeVirt comes to mind. But +[one comment](https://bugzilla.redhat.com/show_bug.cgi?id=2030592#c4) +mentioned that the [KubeVirt](https://kubevirt.io/) project isn't +interested in supporting SPICE either. + +Time to have a look at alternatives. Libvirt has become a common +tool to configure and run Qemu. But some of its functionality, notably +the management of storage for the VMs and networking is already provided +by Kubernetes. Therefore this project takes a fresh approach of +running Qemu in a pod using a simple, lightweight manager called "runner". +Providing resources to the VM is left to Kubernetes mechanisms as +much as possible. + +## VMs and Pods + +VMs are not the typical workload managed by Kubernetes. You can neither +have replicas nor can the containers simply be restarted without a major +impact on the "application". So there are many features for managing +pods that we cannot make use of. Qemu in its container can only be +deployed as a pod or using a stateful set with replica 1, which is rather +close to simply deploying the pod (you get the restart and some PVC +management "for free"). + +A second look, however, reveals that Kubernetes has more to offer. +* It has a well defined API for managing resources. +* It provides access to different kinds of managed storage for the VMs. +* Its managing features *are* useful for running the component that +manages the pods with the VMs. + +And if you use Kubernetes anyway, well then the VMs within Kubernetes +provide you with a unified view of all (or most of) your workloads, +which simplifies the maintenance of your platform. diff --git a/webpages/vm-operator/manager.md b/webpages/vm-operator/manager.md new file mode 100644 index 0000000..4613201 --- /dev/null +++ b/webpages/vm-operator/manager.md @@ -0,0 +1,150 @@ +--- +title: VM-Operator Manager +layout: vm-operator +--- + +# The Manager + +The Manager is the program that provides the controller from the +[operator pattern](https://github.com/cncf/tag-app-delivery/blob/eece8f7307f2970f46f100f51932db106db46968/operator-wg/whitepaper/Operator-WhitePaper_v1-0.md#operator-components-in-kubernetes) +together with a Web-GUI. It should be run in a container in the cluster. + +## Installation + +A manager instance manages the VMs in its own namespace. The only +common (and therefore cluster scoped) resource used by all instances +is the CRD. It is available +[here](https://github.com/mnlipp/VM-Operator/raw/main/deploy/crds/vms-crd.yaml) +and must be created first. + +```sh +kubectl apply -f https://github.com/mnlipp/VM-Operator/raw/main/deploy/crds/vms-crd.yaml +``` + +The example above uses the CRD from the main branch. This is okay if +you apply it once. If you want to preserve the link for automatic +upgrades, you should use a link that points to one of the release branches. + +The next step is to create a namespace for the manager and the VMs, e.g. +`vmop-demo`. + +```sh +kubectl create namespace vmop-demo +``` + +Finally you have to create an account, the role, the binding etc. The +default files for creating these resources using the default namespace +can be found in the +[deploy](https://github.com/mnlipp/VM-Operator/tree/main/deploy) +directory. I recommend to use +[kustomize](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/kustomization/) to create your own configuration. + +## Initial Configuration + +Use one of the `kustomize.yaml` files from the +[example](https://github.com/mnlipp/VM-Operator/tree/main/example) directory +as a starting point. The directory contains two examples. Here's the file +from subdirectory `local-path`: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +# Again, I recommend to use the deploy directory from a +# release branch for anything but test environments. +- https://github.com/mnlipp/VM-Operator/deploy + +namespace: vmop-demo + +patches: +- patch: |- + kind: PersistentVolumeClaim + apiVersion: v1 + metadata: + name: vmop-image-repository + spec: + # Default is ReadOnlyMany + accessModes: + - ReadWriteOnce + resources: + requests: + # Default is 100Gi + storage: 10Gi + # Default is to use the default storage class + storageClassName: local-path + +- patch: |- + kind: ConfigMap + apiVersion: v1 + metadata: + name: vm-operator + data: + config.yaml: | + "/Manager": + # "/GuiHttpServer": + # See section about the GUI + "/Controller": + "/Reconciler": + runnerDataPvc: + # Default is to use the default storage class + storageClassName: local-path +``` + +The sample file adds a namespace (`vmop-demo`) to all resource +definitions and patches the PVC `vmop-image-repository`. This is a volume +that is mounted into all pods that run a VM. The volume is intended +to be used as a common repository for CDROM images. The PVC must exist +and it must be bound before any pods can run. + +The second patch affects the small volume that is created for each +runner and contains the VM's configuration data such as the EFI vars. +The manager's default configuration causes the PVC for this volume +to be created with no storage class (which causes the default storage +class to be used). The patch provides a new configuration file for +the manager that makes the reconciler use local-path as storage +class for this PVC. Details about the manager configuration can be +found in the next section. + +Note that you need none of the patches if you are fine with using your +cluster's default storage class and this class supports ReadOnlyMany as +access mode. + +Check that the pod with the manager is running: + +```sh +kubectl -n vmop-demo get pods -l app.kubernetes.io/name=vm-operator +``` + +Proceed to the description of [the controller](controller.html) +for creating your first VM. + +## Configuration Details + +The [config map](https://github.com/mnlipp/VM-Operator/blob/main/deploy/vmop-config-map.yaml) +for the manager may provide a configuration file (`config.yaml`) and +a file with logging properties (`logging.properties`). Both files are mounted +into the container that runs the manager and are evaluated by the manager +on startup. If no files are provided, the manager uses built-in defaults. + +The configuration file for the Manager follows the conventions of +the [JGrapes](https://jgrapes.org/) component framework. +The keys that start with a slash select the component within the +application's component hierarchy. The mapping associated with the +selected component configures this component's properties. + +The available configuration options for the components can be found +in their respective JavaDocs (e.g. +[here](latest-release/javadoc/org/jdrupes/vmoperator/manager/Reconciler.html) +for the Reconciler). + +## Development Configuration + +The [dev-example](https://github.com/mnlipp/VM-Operator/tree/main/dev-example) +directory contains a `kustomize.yaml` that uses the development namespace +`vmop-dev` and creates a deployment for the manager with 0 replicas. + +This environment can be used for running the manager in the IDE. As the +namespace to manage cannot be detected from the environment, you must use + `-c ../dev-example/config.yaml` as argument when starting the manager. This +configures it to use the namespace `vmop-dev`. diff --git a/webpages/vm-operator/runner.md b/webpages/vm-operator/runner.md new file mode 100644 index 0000000..d580530 --- /dev/null +++ b/webpages/vm-operator/runner.md @@ -0,0 +1,108 @@ +--- +title: VM-Operator Runner +layout: vm-operator +--- + +# The Runner + +For most use cases, Qemu needs to be started and controlled by another +program that manages the Qemu process. This program is called the +runner in this context. + +The most prominent reason for this second program is that it allows +a VM to be shutdown cleanly in response to a TERM signal. Qemu handles +the TERM signal by flushing all buffers and stopping, leaving the disks in +a [crash consistent state](https://gitlab.com/qemu-project/qemu/-/issues/148). +For a graceful shutdown, a parent process must handle the TERM signal, send +the `system_powerdown` command to the qemu process and wait for its completion. + +Another reason for having the runner is that another process needs to be started +before qemu if the VM is supposed to include a TPM (software TPM). + +Finally, we want some kind of higher level interface for applying runtime +changes to the VM such as changing the CD or configuring the number of +CPUs and the memory. + +The runner takes care of all these issues. Although it is intended to +run in a container (which runs in a Kubernetes pod) it does not require +a container. You can start and use it as an ordinary program on any +system, provided that you have the required commands (qemu, swtpm) +installed. + +## Stand-alone Configuration + +Upon startup, the runner reads its main configuration file +which defaults to `/etc/opt/vmrunner/config.yaml` and may be changed +using the `-c` (or `--config`) command line option. + +A sample configuration file with annotated options can be found +[here](https://github.com/mnlipp/VM-Operator/blob/main/org.jdrupes.vmoperator.runner.qemu/config-sample.yaml). +As the runner implementation uses the +[JGrapes](https://jgrapes.org/) framework, the file +follows the framework's +[conventions](https://jgrapes.org/latest-release/javadoc/org/jgrapes/util/YamlConfigurationStore.html). The top level "`/Runner`" selects +the component to be configured. Nested within is the information +to be applied to the component. + +The main entries in the configuration file are the "template" and +the "vm" information. The runner processes the +[freemarker template](https://freemarker.apache.org/), using the +"vm" information to derive the qemu command. The idea is that +the "vm" section provides high level information such as the boot +mode, the number of CPUs, the RAM size and the disks. The template +defines a particular VM type, i.e. it contains the "nasty details" +that do not need to be modified for some given set of VM instances. + +The templates provided with the runner can be found +[here](https://github.com/mnlipp/VM-Operator/tree/main/org.jdrupes.vmoperator.runner.qemu/templates). When details +of the VM configuration need modification, a new VM type +(i.e. a new template) has to be defined. Authoring a new +template requires some knowledge about the +[qemu invocation](https://www.qemu.org/docs/master/system/invocation.html). +Despite many "warnings" that you find in the web, configuring the +invocation arguments of qemu is only a bit (but not much) more +challenging than editing libvirt's XML. + +## Running in a Pod + +The real purpose of the runner is to run a VM on Kubernetes in a pod. +When running in a Kubernetes pod, `/etc/opt/vmrunner/config.yaml` should be +provided by a +[ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/). + +If additional templates are required, some ReadOnlyMany PV should +be mounted in `/opt/vmrunner/templates`. The PV should contain copies +of the standard templates as well as the additional templates. Of course, +a ConfigMap can be used for this purpose again. + +Networking options are rather limited. The assumption is that in general +the VM wants full network connectivity. To achieve this, the pod must +run with host networking and the host's networking must provide a +bridge that the VM can attach to. The only currently supported +alternative is the less performant +"[user networking](https://wiki.qemu.org/Documentation/Networking#User_Networking_(SLIRP))", +which may be used in a stand-alone development configuration. + +## Runtime changes + +The runner supports adaption to changes of the RAM size (using the +balloon device) and to changes of the number of CPUs. Note that +in order to get new CPUs online on Linux guests, you need a +[udev rule](https://docs.kernel.org/core-api/cpu_hotplug.html#user-space-notification) which is not installed by default[^simplest]. + +The runner also changes the images loaded in CDROM drives. If the +drive is locked, i.e. if it doesn't respond to the "open tray" command +the change will be suspended until the VM opens the tray. + +Finally, `powerdownTimeout` can be changed while the qemu process runs. + +[^simplest]: The simplest form of the rule is probably: + ``` + ACTION=="add", SUBSYSTEM=="cpu", ATTR{online}="1" + ``` + +## Testing with Helm + +There is a +[Helm Chart](https://github.com/mnlipp/VM-Operator/tree/main/org.jdrupes.vmoperator.runner.qemu/helm-test) +for testing the runner. diff --git a/webpages/vm-operator/upgrading.md b/webpages/vm-operator/upgrading.md new file mode 100644 index 0000000..41d04ad --- /dev/null +++ b/webpages/vm-operator/upgrading.md @@ -0,0 +1,29 @@ +--- +title: Upgrading +layout: vm-operator +--- + +# Upgrading + +## To version 3.0.0 + +All configuration files are backward compatible to version 2.3.0. +Note that in order to make use of the new viewer component, +[permissions](https://mnlipp.github.io/VM-Operator/user-gui.html#control-access-to-vms) +must be configured in the CR definition. Also note that +[display secrets](https://mnlipp.github.io/VM-Operator/user-gui.html#securing-access) +are automatically created unless explicitly disabled. + +## To version 2.3.0 + +Starting with version 2.3.0, the web GUI uses a login conlet that +supports OIDC providers. This effects the configuration of the +web GUI components. + +## To version 2.2.0 + +Version 2.2.0 sets the stateful set's `.spec.updateStrategy.type` to +"OnDelete". This fails for no apparent reason if a definition of +the stateful set with the default value "RollingUpdate" already exists. +In order to fix this, either the stateful set or the complete VM definition +must be deleted and the manager must be restarted. diff --git a/webpages/vm-operator/user-gui.md b/webpages/vm-operator/user-gui.md new file mode 100644 index 0000000..394c28f --- /dev/null +++ b/webpages/vm-operator/user-gui.md @@ -0,0 +1,143 @@ +--- +title: VM-Operator Web-GUI for Users +layout: vm-operator +--- + +# User view + +*Since 3.0.0* + +The idea of the user view is to provide an intuitive widget that +allows the users to access their own VMs and to optionally start +and stop them. + +![VM-Viewer](VmViewer-preview.png) + +The configuration options resulting from this seemingly simple +requirement are unexpectedly complex. + +## Control access to VMs + +First of all, we have to define which VMs a user can access. This +is done using the optional property `spec.permissions` of the +VM definition (CRD). + +```yaml +spec: + permissions: + - role: admin + may: + - "*" + - user: test + may: + - start + - stop + - accessConsole +``` + +Permissions can be granted to individual users or to roles. There +is a permission for each possible action. "*" grants them all. + +## Simple usage vs. expert usage + +Next, there are two ways to create the VM widgets (preview conlets +in the framework's terms). They can be created on demand or +automatically for each VM that a logged in user has permission to +access. The former is the preferred way for an administrator who +has access to all VMs and needs to open a particular VM's console +for trouble shooting only. The latter is the preferred way +for a regular user who has access to a limited number of VMs. +In this case, creating the widgets automatically has the additional +benefit that regular users don't need to know how to create and +configure the widgets using the menu and the properties dialog. + +Automatic synchronization of widgets and accessible VMs is controlled +by the property `syncPreviewsFor` of the VM viewer. It's an array with +objects that either specify a role or a user. + +```yaml +"/Manager": + # This configures the GUI + "/GuiHttpServer": + "/ConsoleWeblet": + "/WebConsole": + "/ComponentCollector": + "/VmViewer": + syncPreviewsFor: + - role: user + - user: test + displayResource: + preferredIpVersion: ipv4 +``` + +## Console access + +Access to the VM's console is implemented by generating a +[connection file](https://manpages.debian.org/testing/virt-viewer/remote-viewer.1.en.html#CONNECTION_FILE) for virt-viewer when the user clicks on +the console icon. If automatic open is enabled for this kind of +files in the browser, the console opens without further user action. + +The file contains all required and optional information to start the +remote viewer. + + * The "host" is by default the IP address of the node that the + VM's pod is running on (remember that the runner uses host + networking). + * The "port" is simply taken from the VM definition. + +In more complex scenarios, an administrator may have set up a load +balancer that hides the worker node's IP addresses or the worker +nodes use an internal network and can only be accessed through a +proxy. For both cases, the values to include in the connection file +can be specified as properties of `spec.vm.display.spice` in the +VM definition. + +```yaml +spec: + vm: + display: + spice: + port: 5930 + server: 192.168.19.32 + proxyUrl: http://lgpe-spice.some.host:1234 + generateSecret: true +``` + +The value of `server` is used as value for key "host" in the +connection file, thus overriding the default value. The +value of `proxyUrl` is used as value for key "proxy". + +## Securing access + +As described [previously](./controller.html#display-secretpassword), +access to a VM's display can be secured with a password. If a secret +with a password exists for a VM, the password is +included in the connection file. + +While this approach is very convenient for the user, it is not +secure, because this leaves the password as plain text in a file on +the user's computer (the downloaded connection file). To work around +this, the display secret is updated with a random password with +limited validity, unless the display secret defines a `password-expiry` +in the future or with value "never" or doesn't define a +`password-expiry` at all. + +The automatically generated password is the base64 encoded value +of 16 (strong) random bytes (128 random bits). It is valid for +10 seconds only. This may be challenging on a slower computer +or if users may not enable automatic open for connection files +in the browser. The validity can therefore be adjusted in the +configuration. + +```yaml +"/Manager": + "/Controller": + "/DisplaySecretMonitor": + # Validity of generated password in seconds + passwordValidity: 10 +``` + +Taking into account that the controller generates a display +secret automatically by default, this approach to securing +console access should be sufficient in all cases. (Any feedback +if something has been missed is appreciated.) diff --git a/webpages/vm-operator/webgui.md b/webpages/vm-operator/webgui.md new file mode 100644 index 0000000..38b9faa --- /dev/null +++ b/webpages/vm-operator/webgui.md @@ -0,0 +1,117 @@ +--- +title: VM-Operator Web-GUI +layout: vm-operator +--- + +# The Web-GUI + +The manager component provides a GUI via a web server. The web GUI is +implemented using components from the +[JGrapes WebConsole](https://jgrapes.org/WebConsole.html) +project. Configuration of the GUI therefore follows the conventions +of that framework. + +The structure of the configuration information should be easy to +understand from the examples provided. In general, configuration values +are applied to the individual components that make up an application. +The hierarchy of the components is reflected in the configuration +information because components are "addressed" by their position in +that hierarchy. (See +[the package description](latest-release/javadoc/org/jdrupes/vmoperator/manager/package-summary.html) +for information about the complete component structure.) + +## Network access + +By default, the service is made available at port 8080 of the manager +pod. Of course, a kubernetes service and an ingress configuration must +be added as required by the environment. (See the +[definition](https://github.com/mnlipp/VM-Operator/blob/main/deploy/vmop-service.yaml) +from the +[sample deployment](https://github.com/mnlipp/VM-Operator/tree/main/deploy)). + +## User Access + +Access to the web GUI is controlled by the login conlet. The framework +does not include sophisticated components for user management. Rather, +it assumes that an OIDC provider is responsible for user authentication +and role management. + +```yaml +"/Manager": + # "/GuiSocketServer": + # port: 8080 + "/GuiHttpServer": + # This configures the GUI + "/ConsoleWeblet": + "/WebConsole": + "/LoginConlet": + # Starting with version 2.3.0 the preferred approach is to + # configure an OIDC provider for user management and + # authorization. See the text for details. + oidcProviders: {} + + # Support for "local" users is provided as a fallback mechanism. + # Note that up to Version 2.2.x "users" was an object with user names + # as its properties. Starting with 2.3.0 it is a list as shown. + users: + - name: admin + fullName: Administrator + password: "Generate hash with bcrypt" + - name: test + fullName: Test Account + password: "Generate hash with bcrypt" + + # Required for using OIDC, see the text for details. + "/OidcClient": + redirectUri: https://my.server.here/oauth/callback" + + # May be used for assigning roles to both local users and users from + # the OIDC provider. Not needed if roles are managed by the OIDC provider. + "/RoleConfigurator": + rolesByUser: + # User admin has role admin + admin: + - admin + # Non-privileged users are users + test: + - user + # All users have role other + "*": + - other + replace: false + + # Manages the permissions for the roles. + "/RoleConletFilter": + conletTypesByRole: + # Admins can use all conlets + admin: + - "*" + # Users can use the viewer conlet + user: + - org.jdrupes.vmoperator.vmviewer.VmViewer + # Others cannot use any conlet (except login conlet to log out) + other: + # Up to version 2.2.x + # - org.jgrapes.webconlet.locallogin.LoginConlet + # Starting with version 2.3.0 + - org.jgrapes.webconlet.oidclogin.LoginConlet +``` + +How local users can be configured should be obvious from the example. +The configuration of OIDC providers for user authentication (and +optionally for role assignment) is explained in the documentation of the +[login conlet](https://jgrapes.org/javadoc-webconsole/org/jgrapes/webconlet/oidclogin/LoginConlet.html). +Details about the `RoleConfigurator` and `RoleConletFilter` can also be found +in the documentation of the +[JGrapes WebConsole](https://jgrapes.org/WebConsole.html) +project. + +The configuration above allows all users with role "admin" to use all +GUI components and users with role "user" to only use the viewer conlet, +i.e. the [User view](user-gui.html). The fallback role "other" allows +all users to use the login conlet to log out. + +## Views + +The configuration of the components that provide the manager and +users views is explained in the respective sections.