pacemaker  1.1.18-1a4ef7d180
Scalable High-Availability cluster resource manager
container.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include <crm_internal.h>
20 
21 #include <ctype.h>
22 
23 #include <crm/pengine/rules.h>
24 #include <crm/pengine/status.h>
25 #include <crm/pengine/internal.h>
26 #include <unpack.h>
27 #include <crm/msg_xml.h>
28 
29 #define VARIANT_CONTAINER 1
30 #include "./variant.h"
31 
32 void tuple_free(container_grouping_t *tuple);
33 
34 static char *
35 next_ip(const char *last_ip)
36 {
37  unsigned int oct1 = 0;
38  unsigned int oct2 = 0;
39  unsigned int oct3 = 0;
40  unsigned int oct4 = 0;
41  int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
42 
43  if (rc != 4) {
44  /*@ TODO check for IPv6 */
45  return NULL;
46 
47  } else if (oct3 > 253) {
48  return NULL;
49 
50  } else if (oct4 > 253) {
51  ++oct3;
52  oct4 = 1;
53 
54  } else {
55  ++oct4;
56  }
57 
58  return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
59 }
60 
61 static int
62 allocate_ip(container_variant_data_t *data, container_grouping_t *tuple, char *buffer, int max)
63 {
64  if(data->ip_range_start == NULL) {
65  return 0;
66 
67  } else if(data->ip_last) {
68  tuple->ipaddr = next_ip(data->ip_last);
69 
70  } else {
71  tuple->ipaddr = strdup(data->ip_range_start);
72  }
73 
74  data->ip_last = tuple->ipaddr;
75 #if 0
76  return snprintf(buffer, max, " --add-host=%s-%d:%s --link %s-docker-%d:%s-link-%d",
77  data->prefix, tuple->offset, tuple->ipaddr,
78  data->prefix, tuple->offset, data->prefix, tuple->offset);
79 #else
80  if (data->type == PE_CONTAINER_TYPE_DOCKER) {
81  return snprintf(buffer, max, " --add-host=%s-%d:%s",
82  data->prefix, tuple->offset, tuple->ipaddr);
83  } else if (data->type == PE_CONTAINER_TYPE_RKT) {
84  return snprintf(buffer, max, " --hosts-entry=%s=%s-%d",
85  tuple->ipaddr, data->prefix, tuple->offset);
86  } else {
87  return 0;
88  }
89 #endif
90 }
91 
92 static xmlNode *
93 create_resource(const char *name, const char *provider, const char *kind)
94 {
95  xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
96 
97  crm_xml_add(rsc, XML_ATTR_ID, name);
99  crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
100  crm_xml_add(rsc, XML_ATTR_TYPE, kind);
101 
102  return rsc;
103 }
104 
117 static bool
118 valid_network(container_variant_data_t *data)
119 {
120  if(data->ip_range_start) {
121  return TRUE;
122  }
123  if(data->control_port) {
124  if(data->replicas_per_host > 1) {
125  pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
126  data->replicas_per_host = 1;
127  /* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */
128  }
129  return TRUE;
130  }
131  return FALSE;
132 }
133 
134 static bool
135 create_ip_resource(
136  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
137  pe_working_set_t * data_set)
138 {
139  if(data->ip_range_start) {
140  char *id = NULL;
141  xmlNode *xml_ip = NULL;
142  xmlNode *xml_obj = NULL;
143 
144  id = crm_strdup_printf("%s-ip-%s", data->prefix, tuple->ipaddr);
146  xml_ip = create_resource(id, "heartbeat", "IPaddr2");
147  free(id);
148 
149  xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
150  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
151 
152  crm_create_nvpair_xml(xml_obj, NULL, "ip", tuple->ipaddr);
153  if(data->host_network) {
154  crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
155  }
156 
157  if(data->host_netmask) {
158  crm_create_nvpair_xml(xml_obj, NULL,
159  "cidr_netmask", data->host_netmask);
160 
161  } else {
162  crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
163  }
164 
165  xml_obj = create_xml_node(xml_ip, "operations");
166  crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
167 
168  // TODO: Other ops? Timeouts and intervals from underlying resource?
169 
170  crm_log_xml_trace(xml_ip, "Container-ip");
171  if (common_unpack(xml_ip, &tuple->ip, parent, data_set) == false) {
172  return FALSE;
173  }
174 
175  parent->children = g_list_append(parent->children, tuple->ip);
176  }
177  return TRUE;
178 }
179 
180 static bool
181 create_docker_resource(
182  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
183  pe_working_set_t * data_set)
184 {
185  int offset = 0, max = 4096;
186  char *buffer = calloc(1, max+1);
187 
188  int doffset = 0, dmax = 1024;
189  char *dbuffer = calloc(1, dmax+1);
190 
191  char *id = NULL;
192  xmlNode *xml_docker = NULL;
193  xmlNode *xml_obj = NULL;
194 
195  id = crm_strdup_printf("%s-docker-%d", data->prefix, tuple->offset);
197  xml_docker = create_resource(id, "heartbeat", "docker");
198  free(id);
199 
200  xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS);
201  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
202 
203  crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
204  crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
205  crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
206  crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
207 
208  offset += snprintf(buffer+offset, max-offset, " --restart=no");
209 
210  /* Set a container hostname only if we have an IP to map it to.
211  * The user can set -h or --uts=host themselves if they want a nicer
212  * name for logs, but this makes applications happy who need their
213  * hostname to match the IP they bind to.
214  */
215  if (data->ip_range_start != NULL) {
216  offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
217  data->prefix, tuple->offset);
218  }
219 
220  offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
221 
222  if(data->docker_network) {
223 // offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
224  offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
225  }
226 
227  if(data->control_port) {
228  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
229  } else {
230  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
231  }
232 
233  for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
234  container_mount_t *mount = pIter->data;
235 
236  if(mount->flags) {
237  char *source = crm_strdup_printf(
238  "%s/%s-%d", mount->source, data->prefix, tuple->offset);
239 
240  if(doffset > 0) {
241  doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
242  }
243  doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
244  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
245  free(source);
246 
247  } else {
248  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
249  }
250  if(mount->options) {
251  offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
252  }
253  }
254 
255  for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
256  container_port_t *port = pIter->data;
257 
258  if(tuple->ipaddr) {
259  offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
260  tuple->ipaddr, port->source, port->target);
261  } else if(safe_str_neq(data->docker_network, "host")) {
262  // No need to do port mapping if net=host
263  offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
264  }
265  }
266 
267  if(data->docker_run_options) {
268  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
269  }
270 
271  if(data->docker_host_options) {
272  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
273  }
274 
275  crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
276  free(buffer);
277 
278  crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
279  free(dbuffer);
280 
281  if(tuple->child) {
282  if(data->docker_run_command) {
283  crm_create_nvpair_xml(xml_obj, NULL,
284  "run_cmd", data->docker_run_command);
285  } else {
286  crm_create_nvpair_xml(xml_obj, NULL,
287  "run_cmd", SBIN_DIR "/pacemaker_remoted");
288  }
289 
290  /* TODO: Allow users to specify their own?
291  *
292  * We just want to know if the container is alive, we'll
293  * monitor the child independently
294  */
295  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
296  /* } else if(child && data->untrusted) {
297  * Support this use-case?
298  *
299  * The ability to have resources started/stopped by us, but
300  * unable to set attributes, etc.
301  *
302  * Arguably better to control API access this with ACLs like
303  * "normal" remote nodes
304  *
305  * crm_create_nvpair_xml(xml_obj, NULL,
306  * "run_cmd", "/usr/libexec/pacemaker/lrmd");
307  * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
308  * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
309  */
310  } else {
311  if(data->docker_run_command) {
312  crm_create_nvpair_xml(xml_obj, NULL,
313  "run_cmd", data->docker_run_command);
314  }
315 
316  /* TODO: Allow users to specify their own?
317  *
318  * We don't know what's in the container, so we just want
319  * to know if it is alive
320  */
321  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
322  }
323 
324 
325  xml_obj = create_xml_node(xml_docker, "operations");
326  crm_create_op_xml(xml_obj, ID(xml_docker), "monitor", "60s", NULL);
327 
328  // TODO: Other ops? Timeouts and intervals from underlying resource?
329  crm_log_xml_trace(xml_docker, "Container-docker");
330  if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) {
331  return FALSE;
332  }
333  parent->children = g_list_append(parent->children, tuple->docker);
334  return TRUE;
335 }
336 
337 static bool
338 create_rkt_resource(
339  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
340  pe_working_set_t * data_set)
341 {
342  int offset = 0, max = 4096;
343  char *buffer = calloc(1, max+1);
344 
345  int doffset = 0, dmax = 1024;
346  char *dbuffer = calloc(1, dmax+1);
347 
348  char *id = NULL;
349  xmlNode *xml_docker = NULL;
350  xmlNode *xml_obj = NULL;
351 
352  int volid = 0;
353 
354  id = crm_strdup_printf("%s-rkt-%d", data->prefix, tuple->offset);
356  xml_docker = create_resource(id, "heartbeat", "rkt");
357  free(id);
358 
359  xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS);
360  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
361 
362  crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
363  crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", "true");
364  crm_create_nvpair_xml(xml_obj, NULL, "force_kill", "false");
365  crm_create_nvpair_xml(xml_obj, NULL, "reuse", "false");
366 
367  /* Set a container hostname only if we have an IP to map it to.
368  * The user can set -h or --uts=host themselves if they want a nicer
369  * name for logs, but this makes applications happy who need their
370  * hostname to match the IP they bind to.
371  */
372  if (data->ip_range_start != NULL) {
373  offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d",
374  data->prefix, tuple->offset);
375  }
376 
377  if(data->docker_network) {
378 // offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
379  offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
380  }
381 
382  if(data->control_port) {
383  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port);
384  } else {
385  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
386  }
387 
388  for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
389  container_mount_t *mount = pIter->data;
390 
391  if(mount->flags) {
392  char *source = crm_strdup_printf(
393  "%s/%s-%d", mount->source, data->prefix, tuple->offset);
394 
395  if(doffset > 0) {
396  doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
397  }
398  doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
399  offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source);
400  if(mount->options) {
401  offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
402  }
403  offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
404  free(source);
405 
406  } else {
407  offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source);
408  if(mount->options) {
409  offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
410  }
411  offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
412  }
413  volid++;
414  }
415 
416  for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
417  container_port_t *port = pIter->data;
418 
419  if(tuple->ipaddr) {
420  offset += snprintf(buffer+offset, max-offset, " --port=%s:%s:%s",
421  port->target, tuple->ipaddr, port->source);
422  } else {
423  offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source);
424  }
425  }
426 
427  if(data->docker_run_options) {
428  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
429  }
430 
431  if(data->docker_host_options) {
432  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
433  }
434 
435  crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
436  free(buffer);
437 
438  crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
439  free(dbuffer);
440 
441  if(tuple->child) {
442  if(data->docker_run_command) {
443  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->docker_run_command);
444  } else {
445  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", SBIN_DIR"/pacemaker_remoted");
446  }
447 
448  /* TODO: Allow users to specify their own?
449  *
450  * We just want to know if the container is alive, we'll
451  * monitor the child independently
452  */
453  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
454  /* } else if(child && data->untrusted) {
455  * Support this use-case?
456  *
457  * The ability to have resources started/stopped by us, but
458  * unable to set attributes, etc.
459  *
460  * Arguably better to control API access this with ACLs like
461  * "normal" remote nodes
462  *
463  * crm_create_nvpair_xml(xml_obj, NULL,
464  * "run_cmd", "/usr/libexec/pacemaker/lrmd");
465  * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
466  * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
467  */
468  } else {
469  if(data->docker_run_command) {
470  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
471  data->docker_run_command);
472  }
473 
474  /* TODO: Allow users to specify their own?
475  *
476  * We don't know what's in the container, so we just want
477  * to know if it is alive
478  */
479  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
480  }
481 
482 
483  xml_obj = create_xml_node(xml_docker, "operations");
484  crm_create_op_xml(xml_obj, ID(xml_docker), "monitor", "60s", NULL);
485 
486  // TODO: Other ops? Timeouts and intervals from underlying resource?
487 
488  crm_log_xml_trace(xml_docker, "Container-rkt");
489  if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) {
490  return FALSE;
491  }
492  parent->children = g_list_append(parent->children, tuple->docker);
493  return TRUE;
494 }
495 
502 static void
503 disallow_node(resource_t *rsc, const char *uname)
504 {
505  gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
506 
507  if (match) {
508  ((pe_node_t *) match)->weight = -INFINITY;
509  ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
510  }
511  if (rsc->children) {
512  GListPtr child;
513 
514  for (child = rsc->children; child != NULL; child = child->next) {
515  disallow_node((resource_t *) (child->data), uname);
516  }
517  }
518 }
519 
520 static bool
521 create_remote_resource(
522  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
523  pe_working_set_t * data_set)
524 {
525  if (tuple->child && valid_network(data)) {
526  GHashTableIter gIter;
527  GListPtr rsc_iter = NULL;
528  node_t *node = NULL;
529  xmlNode *xml_remote = NULL;
530  char *id = crm_strdup_printf("%s-%d", data->prefix, tuple->offset);
531  char *port_s = NULL;
532  const char *uname = NULL;
533  const char *connect_name = NULL;
534 
535  if (remote_id_conflict(id, data_set)) {
536  free(id);
537  // The biggest hammer we have
538  id = crm_strdup_printf("pcmk-internal-%s-remote-%d", tuple->child->id, tuple->offset);
539  CRM_ASSERT(remote_id_conflict(id, data_set) == FALSE);
540  }
541 
542  /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
543  * connection does not have its own IP is a magic string that we use to
544  * support nested remotes (i.e. a bundle running on a remote node).
545  */
546  connect_name = (tuple->ipaddr? tuple->ipaddr : "#uname");
547 
548  if (data->control_port == NULL) {
549  port_s = crm_itoa(DEFAULT_REMOTE_PORT);
550  }
551 
552  /* This sets tuple->docker as tuple->remote's container, which is
553  * similar to what happens with guest nodes. This is how the PE knows
554  * that the bundle node is fenced by recovering docker, and that
555  * remote should be ordered relative to docker.
556  */
557  xml_remote = pe_create_remote_xml(NULL, id, tuple->docker->id,
558  XML_BOOLEAN_FALSE, NULL, "60s", NULL,
559  NULL, connect_name,
560  (data->control_port?
561  data->control_port : port_s));
562  free(port_s);
563 
564  /* Abandon our created ID, and pull the copy from the XML, because we
565  * need something that will get freed during data set cleanup to use as
566  * the node ID and uname.
567  */
568  free(id);
569  id = NULL;
570  uname = ID(xml_remote);
571 
572  /* Ensure a node has been created for the guest (it may have already
573  * been, if it has a permanent node attribute), and ensure its weight is
574  * -INFINITY so no other resources can run on it.
575  */
576  node = pe_find_node(data_set->nodes, uname);
577  if (node == NULL) {
578  node = pe_create_node(uname, uname, "remote", "-INFINITY",
579  data_set);
580  } else {
581  node->weight = -INFINITY;
582  }
584 
585  /* unpack_remote_nodes() ensures that each remote node and guest node
586  * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
587  * Unfortunately, a bundle has to be mostly unpacked before it's obvious
588  * what nodes will be needed, so we do it just above.
589  *
590  * Worse, that means that the node may have been utilized while
591  * unpacking other resources, without our weight correction. The most
592  * likely place for this to happen is when common_unpack() calls
593  * resource_location() to set a default score in symmetric clusters.
594  * This adds a node *copy* to each resource's allowed nodes, and these
595  * copies will have the wrong weight.
596  *
597  * As a hacky workaround, fix those copies here.
598  *
599  * @TODO Possible alternative: ensure bundles are unpacked before other
600  * resources, so the weight is correct before any copies are made.
601  */
602  for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) {
603  disallow_node((resource_t *) (rsc_iter->data), uname);
604  }
605 
606  tuple->node = node_copy(node);
607  tuple->node->weight = 500;
608  tuple->node->rsc_discover_mode = pe_discover_exclusive;
609 
610  /* Ensure the node shows up as allowed and with the correct discovery set */
611  g_hash_table_destroy(tuple->child->allowed_nodes);
612  tuple->child->allowed_nodes = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str);
613  g_hash_table_insert(tuple->child->allowed_nodes, (gpointer) tuple->node->details->id, node_copy(tuple->node));
614 
615  {
616  node_t *copy = node_copy(tuple->node);
617  copy->weight = -INFINITY;
618  g_hash_table_insert(tuple->child->parent->allowed_nodes, (gpointer) tuple->node->details->id, copy);
619  }
620  crm_log_xml_trace(xml_remote, "Container-remote");
621  if (common_unpack(xml_remote, &tuple->remote, parent, data_set) == FALSE) {
622  return FALSE;
623  }
624 
625  g_hash_table_iter_init(&gIter, tuple->remote->allowed_nodes);
626  while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
627  if(is_remote_node(node)) {
628  /* Remote resources can only run on 'normal' cluster node */
629  node->weight = -INFINITY;
630  }
631  }
632 
633  tuple->node->details->remote_rsc = tuple->remote;
634  tuple->remote->container = tuple->docker; // Ensures is_container_remote_node() functions correctly immediately
635 
636  /* A bundle's #kind is closer to "container" (guest node) than the
637  * "remote" set by pe_create_node().
638  */
639  g_hash_table_insert(tuple->node->details->attrs,
640  strdup(CRM_ATTR_KIND), strdup("container"));
641 
642  /* One effect of this is that setup_container() will add
643  * tuple->remote to tuple->docker's fillers, which will make
644  * rsc_contains_remote_node() true for tuple->docker.
645  *
646  * tuple->child does NOT get added to tuple->docker's fillers.
647  * The only noticeable effect if it did would be for its fail count to
648  * be taken into account when checking tuple->docker's migration
649  * threshold.
650  */
651  parent->children = g_list_append(parent->children, tuple->remote);
652  }
653  return TRUE;
654 }
655 
656 static bool
657 create_container(
658  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
659  pe_working_set_t * data_set)
660 {
661 
662  if (data->type == PE_CONTAINER_TYPE_DOCKER &&
663  create_docker_resource(parent, data, tuple, data_set) == FALSE) {
664  return FALSE;
665  }
666  if (data->type == PE_CONTAINER_TYPE_RKT &&
667  create_rkt_resource(parent, data, tuple, data_set) == FALSE) {
668  return FALSE;
669  }
670 
671  if(create_ip_resource(parent, data, tuple, data_set) == FALSE) {
672  return FALSE;
673  }
674  if(create_remote_resource(parent, data, tuple, data_set) == FALSE) {
675  return FALSE;
676  }
677  if(tuple->child && tuple->ipaddr) {
678  add_hash_param(tuple->child->meta, "external-ip", tuple->ipaddr);
679  }
680 
681  if(tuple->remote) {
682  /*
683  * Allow the remote connection resource to be allocated to a
684  * different node than the one on which the docker container
685  * is active.
686  *
687  * Makes it possible to have remote nodes, running docker
688  * containers with pacemaker_remoted inside in order to start
689  * services inside those containers.
690  */
691  set_bit(tuple->remote->flags, pe_rsc_allow_remote_remotes);
692  }
693 
694  return TRUE;
695 }
696 
697 static void
698 mount_add(container_variant_data_t *container_data, const char *source,
699  const char *target, const char *options, int flags)
700 {
701  container_mount_t *mount = calloc(1, sizeof(container_mount_t));
702 
703  mount->source = strdup(source);
704  mount->target = strdup(target);
705  if (options) {
706  mount->options = strdup(options);
707  }
708  mount->flags = flags;
709  container_data->mounts = g_list_append(container_data->mounts, mount);
710 }
711 
712 static void mount_free(container_mount_t *mount)
713 {
714  free(mount->source);
715  free(mount->target);
716  free(mount->options);
717  free(mount);
718 }
719 
720 static void port_free(container_port_t *port)
721 {
722  free(port->source);
723  free(port->target);
724  free(port);
725 }
726 
727 static container_grouping_t *
728 tuple_for_remote(resource_t *remote)
729 {
730  resource_t *top = remote;
731  container_variant_data_t *container_data = NULL;
732 
733  if (top == NULL) {
734  return NULL;
735  }
736 
737  while (top->parent != NULL) {
738  top = top->parent;
739  }
740 
741  get_container_variant_data(container_data, top);
742  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
743  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
744  if(tuple->remote == remote) {
745  return tuple;
746  }
747  }
748  CRM_LOG_ASSERT(FALSE);
749  return NULL;
750 }
751 
752 bool
754 {
755  const char *name;
756  const char *value;
757  const char *attr_list[] = {
761  };
762  const char *value_list[] = {
763  "remote",
765  "pacemaker"
766  };
767 
768  if(rsc == NULL) {
769  return FALSE;
770  }
771 
772  name = "addr";
773  value = g_hash_table_lookup(rsc->parameters, name);
774  if (safe_str_eq(value, "#uname") == FALSE) {
775  return FALSE;
776  }
777 
778  for (int lpc = 0; lpc < DIMOF(attr_list); lpc++) {
779  name = attr_list[lpc];
780  value = crm_element_value(rsc->xml, attr_list[lpc]);
781  if (safe_str_eq(value, value_list[lpc]) == FALSE) {
782  return FALSE;
783  }
784  }
785  return TRUE;
786 }
787 
788 const char *
789 container_fix_remote_addr_in(resource_t *rsc, xmlNode *xml, const char *field)
790 {
791  // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
792 
793  pe_node_t *node = NULL;
794  container_grouping_t *tuple = NULL;
795 
796  if(container_fix_remote_addr(rsc) == FALSE) {
797  return NULL;
798  }
799 
800  tuple = tuple_for_remote(rsc);
801  if(tuple == NULL) {
802  return NULL;
803  }
804 
805  node = tuple->docker->allocated_to;
806  if(node == NULL && tuple->docker->running_on) {
807  /* If it won't be running anywhere after the
808  * transition, go with where it's running now.
809  */
810  node = tuple->docker->running_on->data;
811  }
812 
813  if(node == NULL) {
814  crm_trace("Cannot fix address for %s", tuple->remote->id);
815  return NULL;
816  }
817 
818  crm_trace("Fixing addr for %s on %s", rsc->id, node->details->uname);
819  if(xml != NULL && field != NULL) {
820  crm_xml_add(xml, field, node->details->uname);
821  }
822 
823  return node->details->uname;
824 }
825 
826 gboolean
828 {
829  const char *value = NULL;
830  xmlNode *xml_obj = NULL;
831  xmlNode *xml_resource = NULL;
832  container_variant_data_t *container_data = NULL;
833 
834  CRM_ASSERT(rsc != NULL);
835  pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
836 
837  container_data = calloc(1, sizeof(container_variant_data_t));
838  rsc->variant_opaque = container_data;
839  container_data->prefix = strdup(rsc->id);
840 
841  xml_obj = first_named_child(rsc->xml, "docker");
842  if (xml_obj != NULL) {
843  container_data->type = PE_CONTAINER_TYPE_DOCKER;
844  } else {
845  xml_obj = first_named_child(rsc->xml, "rkt");
846  if (xml_obj != NULL) {
847  container_data->type = PE_CONTAINER_TYPE_RKT;
848  } else {
849  return FALSE;
850  }
851  }
852 
853  value = crm_element_value(xml_obj, "masters");
854  container_data->masters = crm_parse_int(value, "0");
855  if (container_data->masters < 0) {
856  pe_err("'masters' for %s must be nonnegative integer, using 0",
857  rsc->id);
858  container_data->masters = 0;
859  }
860 
861  value = crm_element_value(xml_obj, "replicas");
862  if ((value == NULL) && (container_data->masters > 0)) {
863  container_data->replicas = container_data->masters;
864  } else {
865  container_data->replicas = crm_parse_int(value, "1");
866  }
867  if (container_data->replicas < 1) {
868  pe_err("'replicas' for %s must be positive integer, using 1", rsc->id);
869  container_data->replicas = 1;
870  }
871 
872  /*
873  * Communication between containers on the same host via the
874  * floating IPs only works if docker is started with:
875  * --userland-proxy=false --ip-masq=false
876  */
877  value = crm_element_value(xml_obj, "replicas-per-host");
878  container_data->replicas_per_host = crm_parse_int(value, "1");
879  if (container_data->replicas_per_host < 1) {
880  pe_err("'replicas-per-host' for %s must be positive integer, using 1",
881  rsc->id);
882  container_data->replicas_per_host = 1;
883  }
884  if (container_data->replicas_per_host == 1) {
886  }
887 
888  container_data->docker_run_command = crm_element_value_copy(xml_obj, "run-command");
889  container_data->docker_run_options = crm_element_value_copy(xml_obj, "options");
890  container_data->image = crm_element_value_copy(xml_obj, "image");
891  container_data->docker_network = crm_element_value_copy(xml_obj, "network");
892 
893  xml_obj = first_named_child(rsc->xml, "network");
894  if(xml_obj) {
895 
896  container_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
897  container_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
898  container_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
899  container_data->control_port = crm_element_value_copy(xml_obj, "control-port");
900 
901  for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
902  xml_child = __xml_next_element(xml_child)) {
903 
904  container_port_t *port = calloc(1, sizeof(container_port_t));
905  port->source = crm_element_value_copy(xml_child, "port");
906 
907  if(port->source == NULL) {
908  port->source = crm_element_value_copy(xml_child, "range");
909  } else {
910  port->target = crm_element_value_copy(xml_child, "internal-port");
911  }
912 
913  if(port->source != NULL && strlen(port->source) > 0) {
914  if(port->target == NULL) {
915  port->target = strdup(port->source);
916  }
917  container_data->ports = g_list_append(container_data->ports, port);
918 
919  } else {
920  pe_err("Invalid port directive %s", ID(xml_child));
921  port_free(port);
922  }
923  }
924  }
925 
926  xml_obj = first_named_child(rsc->xml, "storage");
927  for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
928  xml_child = __xml_next_element(xml_child)) {
929 
930  const char *source = crm_element_value(xml_child, "source-dir");
931  const char *target = crm_element_value(xml_child, "target-dir");
932  const char *options = crm_element_value(xml_child, "options");
933  int flags = 0;
934 
935  if (source == NULL) {
936  source = crm_element_value(xml_child, "source-dir-root");
937  flags = 1;
938  }
939 
940  if (source && target) {
941  mount_add(container_data, source, target, options, flags);
942  } else {
943  pe_err("Invalid mount directive %s", ID(xml_child));
944  }
945  }
946 
947  xml_obj = first_named_child(rsc->xml, "primitive");
948  if (xml_obj && valid_network(container_data)) {
949  char *value = NULL;
950  xmlNode *xml_set = NULL;
951 
952  if(container_data->masters > 0) {
953  xml_resource = create_xml_node(NULL, XML_CIB_TAG_MASTER);
954 
955  } else {
956  xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
957  }
958 
959  crm_xml_set_id(xml_resource, "%s-%s", container_data->prefix, xml_resource->name);
960 
961  xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
962  crm_xml_set_id(xml_set, "%s-%s-meta", container_data->prefix, xml_resource->name);
963 
964  crm_create_nvpair_xml(xml_set, NULL,
966 
967  value = crm_itoa(container_data->replicas);
968  crm_create_nvpair_xml(xml_set, NULL,
970  free(value);
971 
972  value = crm_itoa(container_data->replicas_per_host);
973  crm_create_nvpair_xml(xml_set, NULL,
975  free(value);
976 
978  (container_data->replicas_per_host > 1)?
980 
981  if(container_data->masters) {
982  value = crm_itoa(container_data->masters);
983  crm_create_nvpair_xml(xml_set, NULL,
984  XML_RSC_ATTR_MASTER_MAX, value);
985  free(value);
986  }
987 
988  //crm_xml_add(xml_obj, XML_ATTR_ID, container_data->prefix);
989  add_node_copy(xml_resource, xml_obj);
990 
991  } else if(xml_obj) {
992  pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
993  rsc->id, ID(xml_obj));
994  return FALSE;
995  }
996 
997  if(xml_resource) {
998  int lpc = 0;
999  GListPtr childIter = NULL;
1000  resource_t *new_rsc = NULL;
1001  container_port_t *port = NULL;
1002  const char *key_loc = NULL;
1003 
1004  int offset = 0, max = 1024;
1005  char *buffer = NULL;
1006 
1007  if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) {
1008  pe_err("Failed unpacking resource %s", ID(rsc->xml));
1009  if (new_rsc != NULL && new_rsc->fns != NULL) {
1010  new_rsc->fns->free(new_rsc);
1011  }
1012  return FALSE;
1013  }
1014 
1015  container_data->child = new_rsc;
1016 
1017  /* We map the remote authentication key (likely) used on the DC to the
1018  * default key location inside the container. This is only the likely
1019  * location because an actual connection will do some validity checking
1020  * on the file before using it.
1021  *
1022  * Mapping to the default location inside the container avoids having to
1023  * pass another environment variable to the container.
1024  *
1025  * This makes several assumptions:
1026  * - if PCMK_authkey_location is set, it has the same value on all nodes
1027  * - the container technology does not propagate host environment
1028  * variables to the container
1029  * - the user does not set this environment variable via their container
1030  * image
1031  *
1032  * @TODO A convoluted but possible way around the first limitation would
1033  * be to allow a resource parameter to include environment
1034  * variable references in its value, and resolve them on the
1035  * executing node's crmd before sending the command to the lrmd.
1036  */
1037  key_loc = getenv("PCMK_authkey_location");
1038  if (key_loc == NULL) {
1039  key_loc = DEFAULT_REMOTE_KEY_LOCATION;
1040  }
1041  mount_add(container_data, key_loc, DEFAULT_REMOTE_KEY_LOCATION, NULL,
1042  0);
1043 
1044  mount_add(container_data, CRM_LOG_DIR "/bundles", "/var/log", NULL, 1);
1045 
1046  port = calloc(1, sizeof(container_port_t));
1047  if(container_data->control_port) {
1048  port->source = strdup(container_data->control_port);
1049  } else {
1050  /* If we wanted to respect PCMK_remote_port, we could use
1051  * crm_default_remote_port() here and elsewhere in this file instead
1052  * of DEFAULT_REMOTE_PORT.
1053  *
1054  * However, it gains nothing, since we control both the container
1055  * environment and the connection resource parameters, and the user
1056  * can use a different port if desired by setting control-port.
1057  */
1058  port->source = crm_itoa(DEFAULT_REMOTE_PORT);
1059  }
1060  port->target = strdup(port->source);
1061  container_data->ports = g_list_append(container_data->ports, port);
1062 
1063  buffer = calloc(1, max+1);
1064  for(childIter = container_data->child->children; childIter != NULL; childIter = childIter->next) {
1065  container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t));
1066  tuple->child = childIter->data;
1067  tuple->child->exclusive_discover = TRUE;
1068  tuple->offset = lpc++;
1069 
1070  // Ensure the child's notify gets set based on the underlying primitive's value
1071  if(is_set(tuple->child->flags, pe_rsc_notify)) {
1072  set_bit(container_data->child->flags, pe_rsc_notify);
1073  }
1074 
1075  offset += allocate_ip(container_data, tuple, buffer+offset, max-offset);
1076  container_data->tuples = g_list_append(container_data->tuples, tuple);
1077  container_data->attribute_target = g_hash_table_lookup(tuple->child->meta, XML_RSC_ATTR_TARGET);
1078  }
1079  container_data->docker_host_options = buffer;
1080  if(container_data->attribute_target) {
1081  g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET), strdup(container_data->attribute_target));
1082  g_hash_table_replace(container_data->child->meta, strdup(XML_RSC_ATTR_TARGET), strdup(container_data->attribute_target));
1083  }
1084 
1085  } else {
1086  // Just a naked container, no pacemaker-remote
1087  int offset = 0, max = 1024;
1088  char *buffer = calloc(1, max+1);
1089 
1090  for(int lpc = 0; lpc < container_data->replicas; lpc++) {
1091  container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t));
1092  tuple->offset = lpc;
1093  offset += allocate_ip(container_data, tuple, buffer+offset, max-offset);
1094  container_data->tuples = g_list_append(container_data->tuples, tuple);
1095  }
1096 
1097  container_data->docker_host_options = buffer;
1098  }
1099 
1100  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1101  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1102  if (create_container(rsc, container_data, tuple, data_set) == FALSE) {
1103  pe_err("Failed unpacking resource %s", rsc->id);
1104  rsc->fns->free(rsc);
1105  return FALSE;
1106  }
1107  }
1108 
1109  if(container_data->child) {
1110  rsc->children = g_list_append(rsc->children, container_data->child);
1111  }
1112  return TRUE;
1113 }
1114 
1115 static int
1116 tuple_rsc_active(resource_t *rsc, gboolean all)
1117 {
1118  if (rsc) {
1119  gboolean child_active = rsc->fns->active(rsc, all);
1120 
1121  if (child_active && !all) {
1122  return TRUE;
1123  } else if (!child_active && all) {
1124  return FALSE;
1125  }
1126  }
1127  return -1;
1128 }
1129 
1130 gboolean
1131 container_active(resource_t * rsc, gboolean all)
1132 {
1133  container_variant_data_t *container_data = NULL;
1134  GListPtr iter = NULL;
1135 
1136  get_container_variant_data(container_data, rsc);
1137  for (iter = container_data->tuples; iter != NULL; iter = iter->next) {
1138  container_grouping_t *tuple = (container_grouping_t *)(iter->data);
1139  int rsc_active;
1140 
1141  rsc_active = tuple_rsc_active(tuple->ip, all);
1142  if (rsc_active >= 0) {
1143  return (gboolean) rsc_active;
1144  }
1145 
1146  rsc_active = tuple_rsc_active(tuple->child, all);
1147  if (rsc_active >= 0) {
1148  return (gboolean) rsc_active;
1149  }
1150 
1151  rsc_active = tuple_rsc_active(tuple->docker, all);
1152  if (rsc_active >= 0) {
1153  return (gboolean) rsc_active;
1154  }
1155 
1156  rsc_active = tuple_rsc_active(tuple->remote, all);
1157  if (rsc_active >= 0) {
1158  return (gboolean) rsc_active;
1159  }
1160  }
1161 
1162  /* If "all" is TRUE, we've already checked that no resources were inactive,
1163  * so return TRUE; if "all" is FALSE, we didn't find any active resources,
1164  * so return FALSE.
1165  */
1166  return all;
1167 }
1168 
1169 resource_t *
1170 find_container_child(const char *stem, resource_t * rsc, node_t *node)
1171 {
1172  container_variant_data_t *container_data = NULL;
1173  resource_t *parent = uber_parent(rsc);
1174  CRM_ASSERT(parent->parent);
1175 
1176  parent = parent->parent;
1177  get_container_variant_data(container_data, parent);
1178 
1179  if (is_not_set(rsc->flags, pe_rsc_unique)) {
1180  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1181  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1182 
1183  CRM_ASSERT(tuple);
1184  if(tuple->node->details == node->details) {
1185  rsc = tuple->child;
1186  break;
1187  }
1188  }
1189  }
1190 
1191  if (rsc && safe_str_neq(stem, rsc->id)) {
1192  free(rsc->clone_name);
1193  rsc->clone_name = strdup(stem);
1194  }
1195 
1196  return rsc;
1197 }
1198 
1199 static void
1200 print_rsc_in_list(resource_t *rsc, const char *pre_text, long options,
1201  void *print_data)
1202 {
1203  if (rsc != NULL) {
1204  if (options & pe_print_html) {
1205  status_print("<li>");
1206  }
1207  rsc->fns->print(rsc, pre_text, options, print_data);
1208  if (options & pe_print_html) {
1209  status_print("</li>\n");
1210  }
1211  }
1212 }
1213 
1214 static const char*
1215 container_type_as_string(enum container_type t)
1216 {
1217  if (t == PE_CONTAINER_TYPE_DOCKER) {
1218  return PE_CONTAINER_TYPE_DOCKER_S;
1219  } else if (t == PE_CONTAINER_TYPE_RKT) {
1220  return PE_CONTAINER_TYPE_RKT_S;
1221  } else {
1222  return PE_CONTAINER_TYPE_UNKNOWN_S;
1223  }
1224 }
1225 
1226 static void
1227 container_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data)
1228 {
1229  container_variant_data_t *container_data = NULL;
1230  char *child_text = NULL;
1231  CRM_CHECK(rsc != NULL, return);
1232 
1233  if (pre_text == NULL) {
1234  pre_text = "";
1235  }
1236  child_text = crm_concat(pre_text, " ", ' ');
1237 
1238  get_container_variant_data(container_data, rsc);
1239 
1240  status_print("%s<bundle ", pre_text);
1241  status_print("id=\"%s\" ", rsc->id);
1242 
1243  // Always lowercase the container technology type for use as XML value
1244  status_print("type=\"");
1245  for (const char *c = container_type_as_string(container_data->type);
1246  *c; ++c) {
1247  status_print("%c", tolower(*c));
1248  }
1249  status_print("\" ");
1250 
1251  status_print("image=\"%s\" ", container_data->image);
1252  status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false");
1253  status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
1254  status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
1255  status_print(">\n");
1256 
1257  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1258  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1259 
1260  CRM_ASSERT(tuple);
1261  status_print("%s <replica id=\"%d\">\n", pre_text, tuple->offset);
1262  print_rsc_in_list(tuple->ip, child_text, options, print_data);
1263  print_rsc_in_list(tuple->child, child_text, options, print_data);
1264  print_rsc_in_list(tuple->docker, child_text, options, print_data);
1265  print_rsc_in_list(tuple->remote, child_text, options, print_data);
1266  status_print("%s </replica>\n", pre_text);
1267  }
1268  status_print("%s</bundle>\n", pre_text);
1269  free(child_text);
1270 }
1271 
1272 static void
1273 tuple_print(container_grouping_t * tuple, const char *pre_text, long options, void *print_data)
1274 {
1275  node_t *node = NULL;
1276  resource_t *rsc = tuple->child;
1277 
1278  int offset = 0;
1279  char buffer[LINE_MAX];
1280 
1281  if(rsc == NULL) {
1282  rsc = tuple->docker;
1283  }
1284 
1285  if(tuple->remote) {
1286  offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->remote));
1287  } else {
1288  offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->docker));
1289  }
1290  if(tuple->ipaddr) {
1291  offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", tuple->ipaddr);
1292  }
1293 
1294  if (tuple->docker->running_on) {
1295  node = tuple->docker->running_on->data;
1296  }
1297  common_print(rsc, pre_text, buffer, node, options, print_data);
1298 }
1299 
1300 void
1301 container_print(resource_t * rsc, const char *pre_text, long options, void *print_data)
1302 {
1303  container_variant_data_t *container_data = NULL;
1304  char *child_text = NULL;
1305  CRM_CHECK(rsc != NULL, return);
1306 
1307  if (options & pe_print_xml) {
1308  container_print_xml(rsc, pre_text, options, print_data);
1309  return;
1310  }
1311 
1312  get_container_variant_data(container_data, rsc);
1313 
1314  if (pre_text == NULL) {
1315  pre_text = " ";
1316  }
1317 
1318  status_print("%s%s container%s: %s [%s]%s%s\n",
1319  pre_text, container_type_as_string(container_data->type),
1320  container_data->replicas>1?" set":"", rsc->id, container_data->image,
1321  is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
1322  is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
1323  if (options & pe_print_html) {
1324  status_print("<br />\n<ul>\n");
1325  }
1326 
1327 
1328  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1329  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1330 
1331  CRM_ASSERT(tuple);
1332  if (options & pe_print_html) {
1333  status_print("<li>");
1334  }
1335 
1336  if(is_set(options, pe_print_clone_details)) {
1337  child_text = crm_strdup_printf(" %s", pre_text);
1338  if(g_list_length(container_data->tuples) > 1) {
1339  status_print(" %sReplica[%d]\n", pre_text, tuple->offset);
1340  }
1341  if (options & pe_print_html) {
1342  status_print("<br />\n<ul>\n");
1343  }
1344  print_rsc_in_list(tuple->ip, child_text, options, print_data);
1345  print_rsc_in_list(tuple->docker, child_text, options, print_data);
1346  print_rsc_in_list(tuple->remote, child_text, options, print_data);
1347  print_rsc_in_list(tuple->child, child_text, options, print_data);
1348  if (options & pe_print_html) {
1349  status_print("</ul>\n");
1350  }
1351  } else {
1352  child_text = crm_strdup_printf("%s ", pre_text);
1353  tuple_print(tuple, child_text, options, print_data);
1354  }
1355  free(child_text);
1356 
1357  if (options & pe_print_html) {
1358  status_print("</li>\n");
1359  }
1360  }
1361  if (options & pe_print_html) {
1362  status_print("</ul>\n");
1363  }
1364 }
1365 
1366 void
1367 tuple_free(container_grouping_t *tuple)
1368 {
1369  if(tuple == NULL) {
1370  return;
1371  }
1372 
1373  if(tuple->node) {
1374  free(tuple->node);
1375  tuple->node = NULL;
1376  }
1377 
1378  if(tuple->ip) {
1379  free_xml(tuple->ip->xml);
1380  tuple->ip->xml = NULL;
1381  tuple->ip->fns->free(tuple->ip);
1382  tuple->ip = NULL;
1383  }
1384  if(tuple->docker) {
1385  free_xml(tuple->docker->xml);
1386  tuple->docker->xml = NULL;
1387  tuple->docker->fns->free(tuple->docker);
1388  tuple->docker = NULL;
1389  }
1390  if(tuple->remote) {
1391  free_xml(tuple->remote->xml);
1392  tuple->remote->xml = NULL;
1393  tuple->remote->fns->free(tuple->remote);
1394  tuple->remote = NULL;
1395  }
1396  free(tuple->ipaddr);
1397  free(tuple);
1398 }
1399 
1400 void
1402 {
1403  container_variant_data_t *container_data = NULL;
1404  CRM_CHECK(rsc != NULL, return);
1405 
1406  get_container_variant_data(container_data, rsc);
1407  pe_rsc_trace(rsc, "Freeing %s", rsc->id);
1408 
1409  free(container_data->prefix);
1410  free(container_data->image);
1411  free(container_data->control_port);
1412  free(container_data->host_network);
1413  free(container_data->host_netmask);
1414  free(container_data->ip_range_start);
1415  free(container_data->docker_network);
1416  free(container_data->docker_run_options);
1417  free(container_data->docker_run_command);
1418  free(container_data->docker_host_options);
1419 
1420  g_list_free_full(container_data->tuples, (GDestroyNotify)tuple_free);
1421  g_list_free_full(container_data->mounts, (GDestroyNotify)mount_free);
1422  g_list_free_full(container_data->ports, (GDestroyNotify)port_free);
1423  g_list_free(rsc->children);
1424 
1425  if(container_data->child) {
1426  free_xml(container_data->child->xml);
1427  container_data->child->xml = NULL;
1428  container_data->child->fns->free(container_data->child);
1429  }
1430  common_free(rsc);
1431 }
1432 
1433 enum rsc_role_e
1434 container_resource_state(const resource_t * rsc, gboolean current)
1435 {
1436  enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
1437  return container_role;
1438 }
1439 
1447 int
1449 {
1450  if ((rsc == NULL) || (rsc->variant != pe_container)) {
1451  return 0;
1452  } else {
1453  container_variant_data_t *container_data = NULL;
1454 
1455  get_container_variant_data(container_data, rsc);
1456  return container_data->replicas;
1457  }
1458 }
bool remote_id_conflict(const char *remote_name, pe_working_set_t *data)
Definition: unpack.c:418
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:164
GListPtr nodes
Definition: status.h:106
const char * uname
Definition: status.h:138
xmlNode * xml
Definition: status.h:258
gboolean safe_str_neq(const char *a, const char *b)
Definition: strings.c:150
#define INFINITY
Definition: crm.h:83
int pe_bundle_replicas(const resource_t *rsc)
Get the number of configured replicas in a bundle.
Definition: container.c:1448
#define CRM_ATTR_KIND
Definition: crm.h:100
node_t * node_copy(const node_t *this_node)
Definition: utils.c:127
int weight
Definition: status.h:174
node_t * pe_create_node(const char *id, const char *uname, const char *type, const char *score, pe_working_set_t *data_set)
Definition: unpack.c:356
#define XML_ATTR_TYPE
Definition: msg_xml.h:105
void(* free)(resource_t *)
Definition: complex.h:51
#define XML_BOOLEAN_FALSE
Definition: msg_xml.h:118
gboolean common_unpack(xmlNode *xml_obj, resource_t **rsc, resource_t *parent, pe_working_set_t *data_set)
Definition: complex.c:465
enum pe_obj_types variant
Definition: status.h:264
void common_free(resource_t *rsc)
Definition: complex.c:911
#define status_print(fmt, args...)
Definition: unpack.h:79
int crm_parse_int(const char *text, const char *default_text)
Definition: strings.c:125
char * crm_element_value_copy(xmlNode *data, const char *name)
Definition: xml.c:3869
GListPtr resources
Definition: status.h:107
node_t * pe_find_node(GListPtr node_list, const char *uname)
Definition: status.c:301
#define CRM_LOG_ASSERT(expr)
Definition: logging.h:150
char * clone_name
Definition: status.h:257
resource_t * uber_parent(resource_t *rsc)
Definition: complex.c:897
#define clear_bit(word, bit)
Definition: crm_internal.h:191
#define XML_RSC_ATTR_INCARNATION_MAX
Definition: msg_xml.h:212
GListPtr children
Definition: status.h:300
#define XML_RSC_ATTR_TARGET
Definition: msg_xml.h:203
#define pe_rsc_allow_remote_remotes
Definition: status.h:199
void crm_xml_sanitize_id(char *id)
Sanitize a string so it is usable as an XML ID.
Definition: xml.c:3021
char * id
Definition: status.h:256
GHashTable * parameters
Definition: status.h:297
#define DEFAULT_REMOTE_PORT
Definition: lrmd.h:54
#define DEFAULT_REMOTE_KEY_LOCATION
Definition: lrmd.h:52
#define CRM_LOG_DIR
Definition: config.h:59
#define XML_TAG_ATTR_SETS
Definition: msg_xml.h:184
char uname[MAX_NAME]
Definition: internal.h:53
gboolean is_remote_node(node_t *node)
Definition: remote.c:62
struct node_shared_s * details
Definition: status.h:177
#define set_bit(word, bit)
Definition: crm_internal.h:190
#define PCMK_RESOURCE_CLASS_OCF
Definition: services.h:57
xmlNode * pe_create_remote_xml(xmlNode *parent, const char *uname, const char *container_id, const char *migrateable, const char *is_managed, const char *interval, const char *monitor_timeout, const char *start_timeout, const char *server, const char *port)
Definition: remote.c:158
#define XML_ATTR_ID
Definition: msg_xml.h:102
#define XML_CIB_TAG_RESOURCE
Definition: msg_xml.h:195
#define XML_BOOLEAN_TRUE
Definition: msg_xml.h:117
#define pe_rsc_failed
Definition: status.h:201
resource_object_functions_t * fns
Definition: status.h:265
GHashTable * allowed_nodes
Definition: status.h:291
void * variant_opaque
Definition: status.h:263
#define crm_trace(fmt, args...)
Definition: logging.h:254
xmlNode * add_node_copy(xmlNode *new_parent, xmlNode *xml_node)
Definition: xml.c:2405
xmlNode * crm_create_op_xml(xmlNode *parent, const char *prefix, const char *task, const char *interval, const char *timeout)
Create a CIB XML element for an operation.
Definition: operations.c:439
#define XML_AGENT_ATTR_PROVIDER
Definition: msg_xml.h:254
#define XML_RSC_ATTR_ORDERED
Definition: msg_xml.h:209
#define XML_TAG_META_SETS
Definition: msg_xml.h:185
xmlNode * create_xml_node(xmlNode *parent, const char *name)
Definition: xml.c:2588
const char * crm_element_value(xmlNode *data, const char *name)
Definition: xml.c:5165
unsigned long long flags
Definition: status.h:280
#define XML_RSC_ATTR_INCARNATION_NODEMAX
Definition: msg_xml.h:214
resource_t * parent
Definition: status.h:262
void free_xml(xmlNode *child)
Definition: xml.c:2706
bool container_fix_remote_addr(resource_t *rsc)
Definition: container.c:753
#define XML_RSC_ATTR_UNIQUE
Definition: msg_xml.h:220
gboolean(* active)(resource_t *, gboolean)
Definition: complex.h:48
void common_print(resource_t *rsc, const char *pre_text, const char *name, node_t *node, long options, void *print_data)
Definition: native.c:473
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Definition: xml.c:2490
#define XML_RSC_ATTR_MASTER_MAX
Definition: msg_xml.h:215
void(* print)(resource_t *, const char *, long, void *)
Definition: complex.h:47
#define pe_rsc_unique
Definition: status.h:189
gboolean container_unpack(resource_t *rsc, pe_working_set_t *data_set)
Definition: container.c:827
#define SBIN_DIR
Definition: config.h:688
GHashTable * meta
Definition: status.h:296
enum rsc_role_e container_resource_state(const resource_t *rsc, gboolean current)
Definition: container.c:1434
void tuple_free(container_grouping_t *tuple)
Definition: container.c:1367
#define XML_CIB_TAG_INCARNATION
Definition: msg_xml.h:197
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:423
void crm_xml_set_id(xmlNode *xml, const char *format,...) __attribute__((__format__(__printf__
gboolean container_active(resource_t *rsc, gboolean all)
Definition: container.c:1131
xmlNode * crm_create_nvpair_xml(xmlNode *parent, const char *id, const char *name, const char *value)
Create an XML name/value pair.
Definition: xml.c:4831
#define DIMOF(a)
Definition: crm.h:39
#define pe_rsc_managed
Definition: status.h:184
#define crm_str_hash
Definition: util.h:73
#define CRM_ASSERT(expr)
Definition: error.h:35
char data[0]
Definition: internal.h:58
rsc_role_e
Definition: common.h:81
#define XML_CIB_TAG_MASTER
Definition: msg_xml.h:198
int rsc_discover_mode
Definition: status.h:178
xmlNode * first_named_child(xmlNode *parent, const char *name)
Definition: xml.c:5053
#define crm_log_xml_trace(xml, text)
Definition: logging.h:262
Definition: status.h:173
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:25
char * crm_concat(const char *prefix, const char *suffix, char join)
Definition: strings.c:32
#define ID(x)
Definition: msg_xml.h:446
#define pe_err(fmt...)
Definition: internal.h:27
char * crm_itoa(int an_int)
Definition: strings.c:60
#define safe_str_eq(a, b)
Definition: util.h:72
char * crm_strdup_printf(char const *format,...) __attribute__((__format__(__printf__
const char * container_fix_remote_addr_in(resource_t *rsc, xmlNode *xml, const char *field)
Definition: container.c:789
void container_print(resource_t *rsc, const char *pre_text, long options, void *print_data)
Definition: container.c:1301
GList * GListPtr
Definition: crm.h:218
#define pe_rsc_notify
Definition: status.h:188
resource_t * find_container_child(const char *stem, resource_t *rsc, node_t *node)
Definition: container.c:1170
void g_hash_destroy_str(gpointer data)
Definition: strings.c:74
const char * rsc_printable_id(resource_t *rsc)
Definition: utils.c:2082
uint64_t flags
Definition: remote.c:156
#define XML_AGENT_ATTR_CLASS
Definition: msg_xml.h:253
void container_free(resource_t *rsc)
Definition: container.c:1401