pacemaker  1.1.18-36d2962a86
Scalable High-Availability cluster resource manager
container.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
3  *
4  * This library is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * This library is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with this library; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include <crm_internal.h>
20 
21 #include <ctype.h>
22 
23 #include <crm/pengine/rules.h>
24 #include <crm/pengine/status.h>
25 #include <crm/pengine/internal.h>
26 #include <unpack.h>
27 #include <crm/msg_xml.h>
28 
29 #define VARIANT_CONTAINER 1
30 #include "./variant.h"
31 
32 void tuple_free(container_grouping_t *tuple);
33 
34 static char *
35 next_ip(const char *last_ip)
36 {
37  unsigned int oct1 = 0;
38  unsigned int oct2 = 0;
39  unsigned int oct3 = 0;
40  unsigned int oct4 = 0;
41  int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
42 
43  if (rc != 4) {
44  /*@ TODO check for IPv6 */
45  return NULL;
46 
47  } else if (oct3 > 253) {
48  return NULL;
49 
50  } else if (oct4 > 253) {
51  ++oct3;
52  oct4 = 1;
53 
54  } else {
55  ++oct4;
56  }
57 
58  return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
59 }
60 
61 static int
62 allocate_ip(container_variant_data_t *data, container_grouping_t *tuple, char *buffer, int max)
63 {
64  if(data->ip_range_start == NULL) {
65  return 0;
66 
67  } else if(data->ip_last) {
68  tuple->ipaddr = next_ip(data->ip_last);
69 
70  } else {
71  tuple->ipaddr = strdup(data->ip_range_start);
72  }
73 
74  data->ip_last = tuple->ipaddr;
75 #if 0
76  return snprintf(buffer, max, " --add-host=%s-%d:%s --link %s-docker-%d:%s-link-%d",
77  data->prefix, tuple->offset, tuple->ipaddr,
78  data->prefix, tuple->offset, data->prefix, tuple->offset);
79 #else
80  if (data->type == PE_CONTAINER_TYPE_DOCKER) {
81  return snprintf(buffer, max, " --add-host=%s-%d:%s",
82  data->prefix, tuple->offset, tuple->ipaddr);
83  } else if (data->type == PE_CONTAINER_TYPE_RKT) {
84  return snprintf(buffer, max, " --hosts-entry=%s=%s-%d",
85  tuple->ipaddr, data->prefix, tuple->offset);
86  } else {
87  return 0;
88  }
89 #endif
90 }
91 
92 static xmlNode *
93 create_resource(const char *name, const char *provider, const char *kind)
94 {
95  xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
96 
97  crm_xml_add(rsc, XML_ATTR_ID, name);
99  crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
100  crm_xml_add(rsc, XML_ATTR_TYPE, kind);
101 
102  return rsc;
103 }
104 
117 static bool
118 valid_network(container_variant_data_t *data)
119 {
120  if(data->ip_range_start) {
121  return TRUE;
122  }
123  if(data->control_port) {
124  if(data->replicas_per_host > 1) {
125  pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
126  data->replicas_per_host = 1;
127  /* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */
128  }
129  return TRUE;
130  }
131  return FALSE;
132 }
133 
134 static bool
135 create_ip_resource(
136  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
137  pe_working_set_t * data_set)
138 {
139  if(data->ip_range_start) {
140  char *id = NULL;
141  xmlNode *xml_ip = NULL;
142  xmlNode *xml_obj = NULL;
143 
144  id = crm_strdup_printf("%s-ip-%s", data->prefix, tuple->ipaddr);
146  xml_ip = create_resource(id, "heartbeat", "IPaddr2");
147  free(id);
148 
149  xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
150  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
151 
152  crm_create_nvpair_xml(xml_obj, NULL, "ip", tuple->ipaddr);
153  if(data->host_network) {
154  crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
155  }
156 
157  if(data->host_netmask) {
158  crm_create_nvpair_xml(xml_obj, NULL,
159  "cidr_netmask", data->host_netmask);
160 
161  } else {
162  crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
163  }
164 
165  xml_obj = create_xml_node(xml_ip, "operations");
166  crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
167 
168  // TODO: Other ops? Timeouts and intervals from underlying resource?
169 
170  if (common_unpack(xml_ip, &tuple->ip, parent, data_set) == false) {
171  return FALSE;
172  }
173 
174  parent->children = g_list_append(parent->children, tuple->ip);
175  }
176  return TRUE;
177 }
178 
179 static bool
180 create_docker_resource(
181  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
182  pe_working_set_t * data_set)
183 {
184  int offset = 0, max = 4096;
185  char *buffer = calloc(1, max+1);
186 
187  int doffset = 0, dmax = 1024;
188  char *dbuffer = calloc(1, dmax+1);
189 
190  char *id = NULL;
191  xmlNode *xml_docker = NULL;
192  xmlNode *xml_obj = NULL;
193 
194  id = crm_strdup_printf("%s-docker-%d", data->prefix, tuple->offset);
196  xml_docker = create_resource(id, "heartbeat", "docker");
197  free(id);
198 
199  xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS);
200  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
201 
202  crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
203  crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
204  crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
205  crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
206 
207  offset += snprintf(buffer+offset, max-offset, " --restart=no");
208 
209  /* Set a container hostname only if we have an IP to map it to.
210  * The user can set -h or --uts=host themselves if they want a nicer
211  * name for logs, but this makes applications happy who need their
212  * hostname to match the IP they bind to.
213  */
214  if (data->ip_range_start != NULL) {
215  offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
216  data->prefix, tuple->offset);
217  }
218 
219  if(data->docker_network) {
220 // offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
221  offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
222  }
223 
224  if(data->control_port) {
225  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
226  } else {
227  offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
228  }
229 
230  for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
231  container_mount_t *mount = pIter->data;
232 
233  if(mount->flags) {
234  char *source = crm_strdup_printf(
235  "%s/%s-%d", mount->source, data->prefix, tuple->offset);
236 
237  if(doffset > 0) {
238  doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
239  }
240  doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
241  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
242  free(source);
243 
244  } else {
245  offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
246  }
247  if(mount->options) {
248  offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
249  }
250  }
251 
252  for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
253  container_port_t *port = pIter->data;
254 
255  if(tuple->ipaddr) {
256  offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
257  tuple->ipaddr, port->source, port->target);
258  } else {
259  offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
260  }
261  }
262 
263  if(data->docker_run_options) {
264  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
265  }
266 
267  if(data->docker_host_options) {
268  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
269  }
270 
271  crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
272  free(buffer);
273 
274  crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
275  free(dbuffer);
276 
277  if(tuple->child) {
278  if(data->docker_run_command) {
279  crm_create_nvpair_xml(xml_obj, NULL,
280  "run_cmd", data->docker_run_command);
281  } else {
282  crm_create_nvpair_xml(xml_obj, NULL,
283  "run_cmd", SBIN_DIR "/pacemaker_remoted");
284  }
285 
286  /* TODO: Allow users to specify their own?
287  *
288  * We just want to know if the container is alive, we'll
289  * monitor the child independently
290  */
291  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
292  /* } else if(child && data->untrusted) {
293  * Support this use-case?
294  *
295  * The ability to have resources started/stopped by us, but
296  * unable to set attributes, etc.
297  *
298  * Arguably better to control API access this with ACLs like
299  * "normal" remote nodes
300  *
301  * crm_create_nvpair_xml(xml_obj, NULL,
302  * "run_cmd", "/usr/libexec/pacemaker/lrmd");
303  * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
304  * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
305  */
306  } else {
307  if(data->docker_run_command) {
308  crm_create_nvpair_xml(xml_obj, NULL,
309  "run_cmd", data->docker_run_command);
310  }
311 
312  /* TODO: Allow users to specify their own?
313  *
314  * We don't know what's in the container, so we just want
315  * to know if it is alive
316  */
317  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
318  }
319 
320 
321  xml_obj = create_xml_node(xml_docker, "operations");
322  crm_create_op_xml(xml_obj, ID(xml_docker), "monitor", "60s", NULL);
323 
324  // TODO: Other ops? Timeouts and intervals from underlying resource?
325 
326  if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) {
327  return FALSE;
328  }
329  parent->children = g_list_append(parent->children, tuple->docker);
330  return TRUE;
331 }
332 
333 static bool
334 create_rkt_resource(
335  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
336  pe_working_set_t * data_set)
337 {
338  int offset = 0, max = 4096;
339  char *buffer = calloc(1, max+1);
340 
341  int doffset = 0, dmax = 1024;
342  char *dbuffer = calloc(1, dmax+1);
343 
344  char *id = NULL;
345  xmlNode *xml_docker = NULL;
346  xmlNode *xml_obj = NULL;
347 
348  int volid = 0;
349 
350  id = crm_strdup_printf("%s-rkt-%d", data->prefix, tuple->offset);
352  xml_docker = create_resource(id, "heartbeat", "rkt");
353  free(id);
354 
355  xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS);
356  crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
357 
358  crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
359  crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", "true");
360  crm_create_nvpair_xml(xml_obj, NULL, "force_kill", "false");
361  crm_create_nvpair_xml(xml_obj, NULL, "reuse", "false");
362 
363  /* Set a container hostname only if we have an IP to map it to.
364  * The user can set -h or --uts=host themselves if they want a nicer
365  * name for logs, but this makes applications happy who need their
366  * hostname to match the IP they bind to.
367  */
368  if (data->ip_range_start != NULL) {
369  offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d",
370  data->prefix, tuple->offset);
371  }
372 
373  if(data->docker_network) {
374 // offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
375  offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
376  }
377 
378  if(data->control_port) {
379  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port);
380  } else {
381  offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
382  }
383 
384  for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
385  container_mount_t *mount = pIter->data;
386 
387  if(mount->flags) {
388  char *source = crm_strdup_printf(
389  "%s/%s-%d", mount->source, data->prefix, tuple->offset);
390 
391  if(doffset > 0) {
392  doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
393  }
394  doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
395  offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source);
396  if(mount->options) {
397  offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
398  }
399  offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
400  free(source);
401 
402  } else {
403  offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source);
404  if(mount->options) {
405  offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
406  }
407  offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
408  }
409  volid++;
410  }
411 
412  for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
413  container_port_t *port = pIter->data;
414 
415  if(tuple->ipaddr) {
416  offset += snprintf(buffer+offset, max-offset, " --port=%s:%s:%s",
417  port->target, tuple->ipaddr, port->source);
418  } else {
419  offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source);
420  }
421  }
422 
423  if(data->docker_run_options) {
424  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
425  }
426 
427  if(data->docker_host_options) {
428  offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
429  }
430 
431  crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
432  free(buffer);
433 
434  crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
435  free(dbuffer);
436 
437  if(tuple->child) {
438  if(data->docker_run_command) {
439  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->docker_run_command);
440  } else {
441  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", SBIN_DIR"/pacemaker_remoted");
442  }
443 
444  /* TODO: Allow users to specify their own?
445  *
446  * We just want to know if the container is alive, we'll
447  * monitor the child independently
448  */
449  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
450  /* } else if(child && data->untrusted) {
451  * Support this use-case?
452  *
453  * The ability to have resources started/stopped by us, but
454  * unable to set attributes, etc.
455  *
456  * Arguably better to control API access this with ACLs like
457  * "normal" remote nodes
458  *
459  * crm_create_nvpair_xml(xml_obj, NULL,
460  * "run_cmd", "/usr/libexec/pacemaker/lrmd");
461  * crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
462  * "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
463  */
464  } else {
465  if(data->docker_run_command) {
466  crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
467  data->docker_run_command);
468  }
469 
470  /* TODO: Allow users to specify their own?
471  *
472  * We don't know what's in the container, so we just want
473  * to know if it is alive
474  */
475  crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
476  }
477 
478 
479  xml_obj = create_xml_node(xml_docker, "operations");
480  crm_create_op_xml(xml_obj, ID(xml_docker), "monitor", "60s", NULL);
481 
482  // TODO: Other ops? Timeouts and intervals from underlying resource?
483 
484  if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) {
485  return FALSE;
486  }
487  parent->children = g_list_append(parent->children, tuple->docker);
488  return TRUE;
489 }
490 
497 static void
498 disallow_node(resource_t *rsc, const char *uname)
499 {
500  gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
501 
502  if (match) {
503  ((pe_node_t *) match)->weight = -INFINITY;
504  ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
505  }
506  if (rsc->children) {
507  GListPtr child;
508 
509  for (child = rsc->children; child != NULL; child = child->next) {
510  disallow_node((resource_t *) (child->data), uname);
511  }
512  }
513 }
514 
515 static bool
516 create_remote_resource(
517  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
518  pe_working_set_t * data_set)
519 {
520  if (tuple->child && valid_network(data)) {
521  GHashTableIter gIter;
522  GListPtr rsc_iter = NULL;
523  node_t *node = NULL;
524  xmlNode *xml_remote = NULL;
525  char *id = crm_strdup_printf("%s-%d", data->prefix, tuple->offset);
526  char *port_s = NULL;
527  const char *uname = NULL;
528  const char *connect_name = NULL;
529 
530  if (remote_id_conflict(id, data_set)) {
531  free(id);
532  // The biggest hammer we have
533  id = crm_strdup_printf("pcmk-internal-%s-remote-%d", tuple->child->id, tuple->offset);
534  CRM_ASSERT(remote_id_conflict(id, data_set) == FALSE);
535  }
536 
537  /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
538  * connection does not have its own IP is a magic string that we use to
539  * support nested remotes (i.e. a bundle running on a remote node).
540  */
541  connect_name = (tuple->ipaddr? tuple->ipaddr : "#uname");
542 
543  if (data->control_port == NULL) {
544  port_s = crm_itoa(DEFAULT_REMOTE_PORT);
545  }
546 
547  /* This sets tuple->docker as tuple->remote's container, which is
548  * similar to what happens with guest nodes. This is how the PE knows
549  * that the bundle node is fenced by recovering docker, and that
550  * remote should be ordered relative to docker.
551  */
552  xml_remote = pe_create_remote_xml(NULL, id, tuple->docker->id,
553  XML_BOOLEAN_FALSE, NULL, "60s", NULL,
554  NULL, connect_name,
555  (data->control_port?
556  data->control_port : port_s));
557  free(port_s);
558 
559  /* Abandon our created ID, and pull the copy from the XML, because we
560  * need something that will get freed during data set cleanup to use as
561  * the node ID and uname.
562  */
563  free(id);
564  id = NULL;
565  uname = ID(xml_remote);
566 
567  /* Ensure a node has been created for the guest (it may have already
568  * been, if it has a permanent node attribute), and ensure its weight is
569  * -INFINITY so no other resources can run on it.
570  */
571  node = pe_find_node(data_set->nodes, uname);
572  if (node == NULL) {
573  node = pe_create_node(uname, uname, "remote", "-INFINITY",
574  data_set);
575  } else {
576  node->weight = -INFINITY;
577  }
579 
580  /* unpack_remote_nodes() ensures that each remote node and guest node
581  * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
582  * Unfortunately, a bundle has to be mostly unpacked before it's obvious
583  * what nodes will be needed, so we do it just above.
584  *
585  * Worse, that means that the node may have been utilized while
586  * unpacking other resources, without our weight correction. The most
587  * likely place for this to happen is when common_unpack() calls
588  * resource_location() to set a default score in symmetric clusters.
589  * This adds a node *copy* to each resource's allowed nodes, and these
590  * copies will have the wrong weight.
591  *
592  * As a hacky workaround, fix those copies here.
593  *
594  * @TODO Possible alternative: ensure bundles are unpacked before other
595  * resources, so the weight is correct before any copies are made.
596  */
597  for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) {
598  disallow_node((resource_t *) (rsc_iter->data), uname);
599  }
600 
601  tuple->node = node_copy(node);
602  tuple->node->weight = 500;
603  tuple->node->rsc_discover_mode = pe_discover_exclusive;
604 
605  /* Ensure the node shows up as allowed and with the correct discovery set */
606  g_hash_table_insert(tuple->child->allowed_nodes, (gpointer) tuple->node->details->id, node_copy(tuple->node));
607 
608  if (common_unpack(xml_remote, &tuple->remote, parent, data_set) == FALSE) {
609  return FALSE;
610  }
611 
612  g_hash_table_iter_init(&gIter, tuple->remote->allowed_nodes);
613  while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
614  if(is_remote_node(node)) {
615  /* Remote resources can only run on 'normal' cluster node */
616  node->weight = -INFINITY;
617  }
618  }
619 
620  tuple->node->details->remote_rsc = tuple->remote;
621 
622  /* A bundle's #kind is closer to "container" (guest node) than the
623  * "remote" set by pe_create_node().
624  */
625  g_hash_table_insert(tuple->node->details->attrs,
626  strdup(CRM_ATTR_KIND), strdup("container"));
627 
628  /* One effect of this is that setup_container() will add
629  * tuple->remote to tuple->docker's fillers, which will make
630  * rsc_contains_remote_node() true for tuple->docker.
631  *
632  * tuple->child does NOT get added to tuple->docker's fillers.
633  * The only noticeable effect if it did would be for its fail count to
634  * be taken into account when checking tuple->docker's migration
635  * threshold.
636  */
637  parent->children = g_list_append(parent->children, tuple->remote);
638  }
639  return TRUE;
640 }
641 
642 static bool
643 create_container(
644  resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
645  pe_working_set_t * data_set)
646 {
647 
648  if (data->type == PE_CONTAINER_TYPE_DOCKER &&
649  create_docker_resource(parent, data, tuple, data_set) == FALSE) {
650  return FALSE;
651  }
652  if (data->type == PE_CONTAINER_TYPE_RKT &&
653  create_rkt_resource(parent, data, tuple, data_set) == FALSE) {
654  return FALSE;
655  }
656 
657  if(create_ip_resource(parent, data, tuple, data_set) == FALSE) {
658  return FALSE;
659  }
660  if(create_remote_resource(parent, data, tuple, data_set) == FALSE) {
661  return FALSE;
662  }
663  if(tuple->child && tuple->ipaddr) {
664  add_hash_param(tuple->child->meta, "external-ip", tuple->ipaddr);
665  }
666 
667  if(tuple->remote) {
668  /*
669  * Allow the remote connection resource to be allocated to a
670  * different node than the one on which the docker container
671  * is active.
672  *
673  * Makes it possible to have remote nodes, running docker
674  * containers with pacemaker_remoted inside in order to start
675  * services inside those containers.
676  */
677  set_bit(tuple->remote->flags, pe_rsc_allow_remote_remotes);
678  }
679 
680  return TRUE;
681 }
682 
683 static void
684 mount_add(container_variant_data_t *container_data, const char *source,
685  const char *target, const char *options, int flags)
686 {
687  container_mount_t *mount = calloc(1, sizeof(container_mount_t));
688 
689  mount->source = strdup(source);
690  mount->target = strdup(target);
691  if (options) {
692  mount->options = strdup(options);
693  }
694  mount->flags = flags;
695  container_data->mounts = g_list_append(container_data->mounts, mount);
696 }
697 
698 static void mount_free(container_mount_t *mount)
699 {
700  free(mount->source);
701  free(mount->target);
702  free(mount->options);
703  free(mount);
704 }
705 
706 static void port_free(container_port_t *port)
707 {
708  free(port->source);
709  free(port->target);
710  free(port);
711 }
712 
713 gboolean
715 {
716  const char *value = NULL;
717  xmlNode *xml_obj = NULL;
718  xmlNode *xml_resource = NULL;
719  container_variant_data_t *container_data = NULL;
720 
721  CRM_ASSERT(rsc != NULL);
722  pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
723 
724  container_data = calloc(1, sizeof(container_variant_data_t));
725  rsc->variant_opaque = container_data;
726  container_data->prefix = strdup(rsc->id);
727 
728  xml_obj = first_named_child(rsc->xml, "docker");
729  if (xml_obj != NULL) {
730  container_data->type = PE_CONTAINER_TYPE_DOCKER;
731  } else {
732  xml_obj = first_named_child(rsc->xml, "rkt");
733  if (xml_obj != NULL) {
734  container_data->type = PE_CONTAINER_TYPE_RKT;
735  } else {
736  return FALSE;
737  }
738  }
739 
740  value = crm_element_value(xml_obj, "masters");
741  container_data->masters = crm_parse_int(value, "0");
742  if (container_data->masters < 0) {
743  pe_err("'masters' for %s must be nonnegative integer, using 0",
744  rsc->id);
745  container_data->masters = 0;
746  }
747 
748  value = crm_element_value(xml_obj, "replicas");
749  if ((value == NULL) && (container_data->masters > 0)) {
750  container_data->replicas = container_data->masters;
751  } else {
752  container_data->replicas = crm_parse_int(value, "1");
753  }
754  if (container_data->replicas < 1) {
755  pe_err("'replicas' for %s must be positive integer, using 1", rsc->id);
756  container_data->replicas = 1;
757  }
758 
759  /*
760  * Communication between containers on the same host via the
761  * floating IPs only works if docker is started with:
762  * --userland-proxy=false --ip-masq=false
763  */
764  value = crm_element_value(xml_obj, "replicas-per-host");
765  container_data->replicas_per_host = crm_parse_int(value, "1");
766  if (container_data->replicas_per_host < 1) {
767  pe_err("'replicas-per-host' for %s must be positive integer, using 1",
768  rsc->id);
769  container_data->replicas_per_host = 1;
770  }
771  if (container_data->replicas_per_host == 1) {
773  }
774 
775  container_data->docker_run_command = crm_element_value_copy(xml_obj, "run-command");
776  container_data->docker_run_options = crm_element_value_copy(xml_obj, "options");
777  container_data->image = crm_element_value_copy(xml_obj, "image");
778  container_data->docker_network = crm_element_value_copy(xml_obj, "network");
779 
780  xml_obj = first_named_child(rsc->xml, "network");
781  if(xml_obj) {
782 
783  container_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
784  container_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
785  container_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
786  container_data->control_port = crm_element_value_copy(xml_obj, "control-port");
787 
788  for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
789  xml_child = __xml_next_element(xml_child)) {
790 
791  container_port_t *port = calloc(1, sizeof(container_port_t));
792  port->source = crm_element_value_copy(xml_child, "port");
793 
794  if(port->source == NULL) {
795  port->source = crm_element_value_copy(xml_child, "range");
796  } else {
797  port->target = crm_element_value_copy(xml_child, "internal-port");
798  }
799 
800  if(port->source != NULL && strlen(port->source) > 0) {
801  if(port->target == NULL) {
802  port->target = strdup(port->source);
803  }
804  container_data->ports = g_list_append(container_data->ports, port);
805 
806  } else {
807  pe_err("Invalid port directive %s", ID(xml_child));
808  port_free(port);
809  }
810  }
811  }
812 
813  xml_obj = first_named_child(rsc->xml, "storage");
814  for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
815  xml_child = __xml_next_element(xml_child)) {
816 
817  const char *source = crm_element_value(xml_child, "source-dir");
818  const char *target = crm_element_value(xml_child, "target-dir");
819  const char *options = crm_element_value(xml_child, "options");
820  int flags = 0;
821 
822  if (source == NULL) {
823  source = crm_element_value(xml_child, "source-dir-root");
824  flags = 1;
825  }
826 
827  if (source && target) {
828  mount_add(container_data, source, target, options, flags);
829  } else {
830  pe_err("Invalid mount directive %s", ID(xml_child));
831  }
832  }
833 
834  xml_obj = first_named_child(rsc->xml, "primitive");
835  if (xml_obj && valid_network(container_data)) {
836  char *value = NULL;
837  xmlNode *xml_set = NULL;
838 
839  if(container_data->masters > 0) {
840  xml_resource = create_xml_node(NULL, XML_CIB_TAG_MASTER);
841 
842  } else {
843  xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
844  }
845 
846  crm_xml_set_id(xml_resource, "%s-%s", container_data->prefix, xml_resource->name);
847 
848  xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
849  crm_xml_set_id(xml_set, "%s-%s-meta", container_data->prefix, xml_resource->name);
850 
851  crm_create_nvpair_xml(xml_set, NULL,
853 
854  value = crm_itoa(container_data->replicas);
855  crm_create_nvpair_xml(xml_set, NULL,
857  free(value);
858 
859  value = crm_itoa(container_data->replicas_per_host);
860  crm_create_nvpair_xml(xml_set, NULL,
862  free(value);
863 
865  (container_data->replicas_per_host > 1)?
867 
868  if(container_data->masters) {
869  value = crm_itoa(container_data->masters);
870  crm_create_nvpair_xml(xml_set, NULL,
871  XML_RSC_ATTR_MASTER_MAX, value);
872  free(value);
873  }
874 
875  //crm_xml_add(xml_obj, XML_ATTR_ID, container_data->prefix);
876  add_node_copy(xml_resource, xml_obj);
877 
878  } else if(xml_obj) {
879  pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
880  rsc->id, ID(xml_obj));
881  return FALSE;
882  }
883 
884  if(xml_resource) {
885  int lpc = 0;
886  GListPtr childIter = NULL;
887  resource_t *new_rsc = NULL;
888  container_port_t *port = NULL;
889  const char *key_loc = NULL;
890 
891  int offset = 0, max = 1024;
892  char *buffer = NULL;
893 
894  if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) {
895  pe_err("Failed unpacking resource %s", ID(rsc->xml));
896  if (new_rsc != NULL && new_rsc->fns != NULL) {
897  new_rsc->fns->free(new_rsc);
898  }
899  return FALSE;
900  }
901 
902  container_data->child = new_rsc;
903 
904  /* We map the remote authentication key (likely) used on the DC to the
905  * default key location inside the container. This is only the likely
906  * location because an actual connection will do some validity checking
907  * on the file before using it.
908  *
909  * Mapping to the default location inside the container avoids having to
910  * pass another environment variable to the container.
911  *
912  * This makes several assumptions:
913  * - if PCMK_authkey_location is set, it has the same value on all nodes
914  * - the container technology does not propagate host environment
915  * variables to the container
916  * - the user does not set this environment variable via their container
917  * image
918  *
919  * @TODO A convoluted but possible way around the first limitation would
920  * be to allow a resource parameter to include environment
921  * variable references in its value, and resolve them on the
922  * executing node's crmd before sending the command to the lrmd.
923  */
924  key_loc = getenv("PCMK_authkey_location");
925  if (key_loc == NULL) {
926  key_loc = DEFAULT_REMOTE_KEY_LOCATION;
927  }
928  mount_add(container_data, key_loc, DEFAULT_REMOTE_KEY_LOCATION, NULL,
929  0);
930 
931  mount_add(container_data, CRM_LOG_DIR "/bundles", "/var/log", NULL, 1);
932 
933  port = calloc(1, sizeof(container_port_t));
934  if(container_data->control_port) {
935  port->source = strdup(container_data->control_port);
936  } else {
937  /* If we wanted to respect PCMK_remote_port, we could use
938  * crm_default_remote_port() here and elsewhere in this file instead
939  * of DEFAULT_REMOTE_PORT.
940  *
941  * However, it gains nothing, since we control both the container
942  * environment and the connection resource parameters, and the user
943  * can use a different port if desired by setting control-port.
944  */
945  port->source = crm_itoa(DEFAULT_REMOTE_PORT);
946  }
947  port->target = strdup(port->source);
948  container_data->ports = g_list_append(container_data->ports, port);
949 
950  buffer = calloc(1, max+1);
951  for(childIter = container_data->child->children; childIter != NULL; childIter = childIter->next) {
952  container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t));
953  tuple->child = childIter->data;
954  tuple->offset = lpc++;
955 
956  // Ensure the child's notify gets set based on the underlying primitive's value
957  if(is_set(tuple->child->flags, pe_rsc_notify)) {
958  set_bit(container_data->child->flags, pe_rsc_notify);
959  }
960 
961  offset += allocate_ip(container_data, tuple, buffer+offset, max-offset);
962  container_data->tuples = g_list_append(container_data->tuples, tuple);
963  container_data->attribute_target = g_hash_table_lookup(tuple->child->meta, XML_RSC_ATTR_TARGET);
964  }
965  container_data->docker_host_options = buffer;
966  if(container_data->attribute_target) {
967  g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET), strdup(container_data->attribute_target));
968  g_hash_table_replace(container_data->child->meta, strdup(XML_RSC_ATTR_TARGET), strdup(container_data->attribute_target));
969  }
970 
971  } else {
972  // Just a naked container, no pacemaker-remote
973  int offset = 0, max = 1024;
974  char *buffer = calloc(1, max+1);
975 
976  for(int lpc = 0; lpc < container_data->replicas; lpc++) {
977  container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t));
978  tuple->offset = lpc;
979  offset += allocate_ip(container_data, tuple, buffer+offset, max-offset);
980  container_data->tuples = g_list_append(container_data->tuples, tuple);
981  }
982 
983  container_data->docker_host_options = buffer;
984  }
985 
986  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
987  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
988  if (create_container(rsc, container_data, tuple, data_set) == FALSE) {
989  pe_err("Failed unpacking resource %s", rsc->id);
990  rsc->fns->free(rsc);
991  return FALSE;
992  }
993  }
994 
995  if(container_data->child) {
996  rsc->children = g_list_append(rsc->children, container_data->child);
997  }
998  return TRUE;
999 }
1000 
1001 static int
1002 tuple_rsc_active(resource_t *rsc, gboolean all)
1003 {
1004  if (rsc) {
1005  gboolean child_active = rsc->fns->active(rsc, all);
1006 
1007  if (child_active && !all) {
1008  return TRUE;
1009  } else if (!child_active && all) {
1010  return FALSE;
1011  }
1012  }
1013  return -1;
1014 }
1015 
1016 gboolean
1017 container_active(resource_t * rsc, gboolean all)
1018 {
1019  container_variant_data_t *container_data = NULL;
1020  GListPtr iter = NULL;
1021 
1022  get_container_variant_data(container_data, rsc);
1023  for (iter = container_data->tuples; iter != NULL; iter = iter->next) {
1024  container_grouping_t *tuple = (container_grouping_t *)(iter->data);
1025  int rsc_active;
1026 
1027  rsc_active = tuple_rsc_active(tuple->ip, all);
1028  if (rsc_active >= 0) {
1029  return (gboolean) rsc_active;
1030  }
1031 
1032  rsc_active = tuple_rsc_active(tuple->child, all);
1033  if (rsc_active >= 0) {
1034  return (gboolean) rsc_active;
1035  }
1036 
1037  rsc_active = tuple_rsc_active(tuple->docker, all);
1038  if (rsc_active >= 0) {
1039  return (gboolean) rsc_active;
1040  }
1041 
1042  rsc_active = tuple_rsc_active(tuple->remote, all);
1043  if (rsc_active >= 0) {
1044  return (gboolean) rsc_active;
1045  }
1046  }
1047 
1048  /* If "all" is TRUE, we've already checked that no resources were inactive,
1049  * so return TRUE; if "all" is FALSE, we didn't find any active resources,
1050  * so return FALSE.
1051  */
1052  return all;
1053 }
1054 
1055 resource_t *
1056 find_container_child(const char *stem, resource_t * rsc, node_t *node)
1057 {
1058  container_variant_data_t *container_data = NULL;
1059  resource_t *parent = uber_parent(rsc);
1060  CRM_ASSERT(parent->parent);
1061 
1062  parent = parent->parent;
1063  get_container_variant_data(container_data, parent);
1064 
1065  if (is_not_set(rsc->flags, pe_rsc_unique)) {
1066  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1067  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1068 
1069  CRM_ASSERT(tuple);
1070  if(tuple->node->details == node->details) {
1071  rsc = tuple->child;
1072  break;
1073  }
1074  }
1075  }
1076 
1077  if (rsc && safe_str_neq(stem, rsc->id)) {
1078  free(rsc->clone_name);
1079  rsc->clone_name = strdup(stem);
1080  }
1081 
1082  return rsc;
1083 }
1084 
1085 static void
1086 print_rsc_in_list(resource_t *rsc, const char *pre_text, long options,
1087  void *print_data)
1088 {
1089  if (rsc != NULL) {
1090  if (options & pe_print_html) {
1091  status_print("<li>");
1092  }
1093  rsc->fns->print(rsc, pre_text, options, print_data);
1094  if (options & pe_print_html) {
1095  status_print("</li>\n");
1096  }
1097  }
1098 }
1099 
1100 static const char*
1101 container_type_as_string(enum container_type t)
1102 {
1103  if (t == PE_CONTAINER_TYPE_DOCKER) {
1104  return PE_CONTAINER_TYPE_DOCKER_S;
1105  } else if (t == PE_CONTAINER_TYPE_RKT) {
1106  return PE_CONTAINER_TYPE_RKT_S;
1107  } else {
1108  return PE_CONTAINER_TYPE_UNKNOWN_S;
1109  }
1110 }
1111 
1112 static void
1113 container_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data)
1114 {
1115  container_variant_data_t *container_data = NULL;
1116  char *child_text = NULL;
1117  CRM_CHECK(rsc != NULL, return);
1118 
1119  if (pre_text == NULL) {
1120  pre_text = "";
1121  }
1122  child_text = crm_concat(pre_text, " ", ' ');
1123 
1124  get_container_variant_data(container_data, rsc);
1125 
1126  status_print("%s<bundle ", pre_text);
1127  status_print("id=\"%s\" ", rsc->id);
1128 
1129  // Always lowercase the container technology type for use as XML value
1130  status_print("type=\"");
1131  for (const char *c = container_type_as_string(container_data->type);
1132  *c; ++c) {
1133  status_print("%c", tolower(*c));
1134  }
1135  status_print("\" ");
1136 
1137  status_print("image=\"%s\" ", container_data->image);
1138  status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false");
1139  status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
1140  status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
1141  status_print(">\n");
1142 
1143  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1144  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1145 
1146  CRM_ASSERT(tuple);
1147  status_print("%s <replica id=\"%d\">\n", pre_text, tuple->offset);
1148  print_rsc_in_list(tuple->ip, child_text, options, print_data);
1149  print_rsc_in_list(tuple->child, child_text, options, print_data);
1150  print_rsc_in_list(tuple->docker, child_text, options, print_data);
1151  print_rsc_in_list(tuple->remote, child_text, options, print_data);
1152  status_print("%s </replica>\n", pre_text);
1153  }
1154  status_print("%s</bundle>\n", pre_text);
1155  free(child_text);
1156 }
1157 
1158 static void
1159 tuple_print(container_grouping_t * tuple, const char *pre_text, long options, void *print_data)
1160 {
1161  node_t *node = NULL;
1162  resource_t *rsc = tuple->child;
1163 
1164  int offset = 0;
1165  char buffer[LINE_MAX];
1166 
1167  if(rsc == NULL) {
1168  rsc = tuple->docker;
1169  }
1170 
1171  if(tuple->remote) {
1172  offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->remote));
1173  } else {
1174  offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->docker));
1175  }
1176  if(tuple->ipaddr) {
1177  offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", tuple->ipaddr);
1178  }
1179 
1180  if (tuple->docker->running_on) {
1181  node = tuple->docker->running_on->data;
1182  }
1183  common_print(rsc, pre_text, buffer, node, options, print_data);
1184 }
1185 
1186 void
1187 container_print(resource_t * rsc, const char *pre_text, long options, void *print_data)
1188 {
1189  container_variant_data_t *container_data = NULL;
1190  char *child_text = NULL;
1191  CRM_CHECK(rsc != NULL, return);
1192 
1193  if (options & pe_print_xml) {
1194  container_print_xml(rsc, pre_text, options, print_data);
1195  return;
1196  }
1197 
1198  get_container_variant_data(container_data, rsc);
1199 
1200  if (pre_text == NULL) {
1201  pre_text = " ";
1202  }
1203 
1204  status_print("%s%s container%s: %s [%s]%s%s\n",
1205  pre_text, container_type_as_string(container_data->type),
1206  container_data->replicas>1?" set":"", rsc->id, container_data->image,
1207  is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
1208  is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
1209  if (options & pe_print_html) {
1210  status_print("<br />\n<ul>\n");
1211  }
1212 
1213 
1214  for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
1215  container_grouping_t *tuple = (container_grouping_t *)gIter->data;
1216 
1217  CRM_ASSERT(tuple);
1218  if (options & pe_print_html) {
1219  status_print("<li>");
1220  }
1221 
1222  if(is_set(options, pe_print_clone_details)) {
1223  child_text = crm_strdup_printf(" %s", pre_text);
1224  if(g_list_length(container_data->tuples) > 1) {
1225  status_print(" %sReplica[%d]\n", pre_text, tuple->offset);
1226  }
1227  if (options & pe_print_html) {
1228  status_print("<br />\n<ul>\n");
1229  }
1230  print_rsc_in_list(tuple->ip, child_text, options, print_data);
1231  print_rsc_in_list(tuple->docker, child_text, options, print_data);
1232  print_rsc_in_list(tuple->remote, child_text, options, print_data);
1233  print_rsc_in_list(tuple->child, child_text, options, print_data);
1234  if (options & pe_print_html) {
1235  status_print("</ul>\n");
1236  }
1237  } else {
1238  child_text = crm_strdup_printf("%s ", pre_text);
1239  tuple_print(tuple, child_text, options, print_data);
1240  }
1241  free(child_text);
1242 
1243  if (options & pe_print_html) {
1244  status_print("</li>\n");
1245  }
1246  }
1247  if (options & pe_print_html) {
1248  status_print("</ul>\n");
1249  }
1250 }
1251 
1252 void
1253 tuple_free(container_grouping_t *tuple)
1254 {
1255  if(tuple == NULL) {
1256  return;
1257  }
1258 
1259  if(tuple->node) {
1260  free(tuple->node);
1261  tuple->node = NULL;
1262  }
1263 
1264  if(tuple->ip) {
1265  free_xml(tuple->ip->xml);
1266  tuple->ip->xml = NULL;
1267  tuple->ip->fns->free(tuple->ip);
1268  tuple->ip = NULL;
1269  }
1270  if(tuple->docker) {
1271  free_xml(tuple->docker->xml);
1272  tuple->docker->xml = NULL;
1273  tuple->docker->fns->free(tuple->docker);
1274  tuple->docker = NULL;
1275  }
1276  if(tuple->remote) {
1277  free_xml(tuple->remote->xml);
1278  tuple->remote->xml = NULL;
1279  tuple->remote->fns->free(tuple->remote);
1280  tuple->remote = NULL;
1281  }
1282  free(tuple->ipaddr);
1283  free(tuple);
1284 }
1285 
1286 void
1288 {
1289  container_variant_data_t *container_data = NULL;
1290  CRM_CHECK(rsc != NULL, return);
1291 
1292  get_container_variant_data(container_data, rsc);
1293  pe_rsc_trace(rsc, "Freeing %s", rsc->id);
1294 
1295  free(container_data->prefix);
1296  free(container_data->image);
1297  free(container_data->control_port);
1298  free(container_data->host_network);
1299  free(container_data->host_netmask);
1300  free(container_data->ip_range_start);
1301  free(container_data->docker_network);
1302  free(container_data->docker_run_options);
1303  free(container_data->docker_run_command);
1304  free(container_data->docker_host_options);
1305 
1306  g_list_free_full(container_data->tuples, (GDestroyNotify)tuple_free);
1307  g_list_free_full(container_data->mounts, (GDestroyNotify)mount_free);
1308  g_list_free_full(container_data->ports, (GDestroyNotify)port_free);
1309  g_list_free(rsc->children);
1310 
1311  if(container_data->child) {
1312  free_xml(container_data->child->xml);
1313  container_data->child->xml = NULL;
1314  container_data->child->fns->free(container_data->child);
1315  }
1316  common_free(rsc);
1317 }
1318 
1319 enum rsc_role_e
1320 container_resource_state(const resource_t * rsc, gboolean current)
1321 {
1322  enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
1323  return container_role;
1324 }
1325 
1333 int
1335 {
1336  if ((rsc == NULL) || (rsc->variant != pe_container)) {
1337  return 0;
1338  } else {
1339  container_variant_data_t *container_data = NULL;
1340 
1341  get_container_variant_data(container_data, rsc);
1342  return container_data->replicas;
1343  }
1344 }
bool remote_id_conflict(const char *remote_name, pe_working_set_t *data)
Definition: unpack.c:418
#define CRM_CHECK(expr, failure_action)
Definition: logging.h:164
GListPtr nodes
Definition: status.h:105
xmlNode * xml
Definition: status.h:256
gboolean safe_str_neq(const char *a, const char *b)
Definition: strings.c:150
#define INFINITY
Definition: crm.h:83
int pe_bundle_replicas(const resource_t *rsc)
Get the number of configured replicas in a bundle.
Definition: container.c:1334
#define CRM_ATTR_KIND
Definition: crm.h:100
node_t * node_copy(const node_t *this_node)
Definition: utils.c:127
int weight
Definition: status.h:173
node_t * pe_create_node(const char *id, const char *uname, const char *type, const char *score, pe_working_set_t *data_set)
Definition: unpack.c:356
#define XML_ATTR_TYPE
Definition: msg_xml.h:105
void(* free)(resource_t *)
Definition: complex.h:51
#define XML_BOOLEAN_FALSE
Definition: msg_xml.h:118
gboolean common_unpack(xmlNode *xml_obj, resource_t **rsc, resource_t *parent, pe_working_set_t *data_set)
Definition: complex.c:465
enum pe_obj_types variant
Definition: status.h:262
void common_free(resource_t *rsc)
Definition: complex.c:910
#define status_print(fmt, args...)
Definition: unpack.h:79
int crm_parse_int(const char *text, const char *default_text)
Definition: strings.c:125
char * crm_element_value_copy(xmlNode *data, const char *name)
Definition: xml.c:3869
GListPtr resources
Definition: status.h:106
node_t * pe_find_node(GListPtr node_list, const char *uname)
Definition: status.c:295
char * clone_name
Definition: status.h:255
resource_t * uber_parent(resource_t *rsc)
Definition: complex.c:896
#define clear_bit(word, bit)
Definition: crm_internal.h:191
#define XML_RSC_ATTR_INCARNATION_MAX
Definition: msg_xml.h:212
GListPtr children
Definition: status.h:298
#define XML_RSC_ATTR_TARGET
Definition: msg_xml.h:203
#define pe_rsc_allow_remote_remotes
Definition: status.h:198
void crm_xml_sanitize_id(char *id)
Sanitize a string so it is usable as an XML ID.
Definition: xml.c:3021
char * id
Definition: status.h:254
#define DEFAULT_REMOTE_PORT
Definition: lrmd.h:54
#define DEFAULT_REMOTE_KEY_LOCATION
Definition: lrmd.h:52
#define CRM_LOG_DIR
Definition: config.h:59
#define XML_TAG_ATTR_SETS
Definition: msg_xml.h:184
char uname[MAX_NAME]
Definition: internal.h:53
gboolean is_remote_node(node_t *node)
Definition: remote.c:62
struct node_shared_s * details
Definition: status.h:176
#define set_bit(word, bit)
Definition: crm_internal.h:190
#define PCMK_RESOURCE_CLASS_OCF
Definition: services.h:57
xmlNode * pe_create_remote_xml(xmlNode *parent, const char *uname, const char *container_id, const char *migrateable, const char *is_managed, const char *interval, const char *monitor_timeout, const char *start_timeout, const char *server, const char *port)
Definition: remote.c:158
#define XML_ATTR_ID
Definition: msg_xml.h:102
#define XML_CIB_TAG_RESOURCE
Definition: msg_xml.h:195
#define XML_BOOLEAN_TRUE
Definition: msg_xml.h:117
#define pe_rsc_failed
Definition: status.h:200
resource_object_functions_t * fns
Definition: status.h:263
GHashTable * allowed_nodes
Definition: status.h:289
void * variant_opaque
Definition: status.h:261
xmlNode * add_node_copy(xmlNode *new_parent, xmlNode *xml_node)
Definition: xml.c:2405
xmlNode * crm_create_op_xml(xmlNode *parent, const char *prefix, const char *task, const char *interval, const char *timeout)
Create a CIB XML element for an operation.
Definition: operations.c:439
#define XML_AGENT_ATTR_PROVIDER
Definition: msg_xml.h:254
#define XML_RSC_ATTR_ORDERED
Definition: msg_xml.h:209
#define XML_TAG_META_SETS
Definition: msg_xml.h:185
xmlNode * create_xml_node(xmlNode *parent, const char *name)
Definition: xml.c:2588
const char * crm_element_value(xmlNode *data, const char *name)
Definition: xml.c:5165
unsigned long long flags
Definition: status.h:278
#define XML_RSC_ATTR_INCARNATION_NODEMAX
Definition: msg_xml.h:214
resource_t * parent
Definition: status.h:260
void free_xml(xmlNode *child)
Definition: xml.c:2706
#define XML_RSC_ATTR_UNIQUE
Definition: msg_xml.h:220
gboolean(* active)(resource_t *, gboolean)
Definition: complex.h:48
void common_print(resource_t *rsc, const char *pre_text, const char *name, node_t *node, long options, void *print_data)
Definition: native.c:468
const char * crm_xml_add(xmlNode *node, const char *name, const char *value)
Definition: xml.c:2490
#define XML_RSC_ATTR_MASTER_MAX
Definition: msg_xml.h:215
void(* print)(resource_t *, const char *, long, void *)
Definition: complex.h:47
#define pe_rsc_unique
Definition: status.h:188
gboolean container_unpack(resource_t *rsc, pe_working_set_t *data_set)
Definition: container.c:714
#define SBIN_DIR
Definition: config.h:688
GHashTable * meta
Definition: status.h:294
enum rsc_role_e container_resource_state(const resource_t *rsc, gboolean current)
Definition: container.c:1320
void tuple_free(container_grouping_t *tuple)
Definition: container.c:1253
#define XML_CIB_TAG_INCARNATION
Definition: msg_xml.h:197
void add_hash_param(GHashTable *hash, const char *name, const char *value)
Definition: common.c:423
void crm_xml_set_id(xmlNode *xml, const char *format,...) __attribute__((__format__(__printf__
gboolean container_active(resource_t *rsc, gboolean all)
Definition: container.c:1017
xmlNode * crm_create_nvpair_xml(xmlNode *parent, const char *id, const char *name, const char *value)
Create an XML name/value pair.
Definition: xml.c:4831
#define pe_rsc_managed
Definition: status.h:183
#define CRM_ASSERT(expr)
Definition: error.h:35
char data[0]
Definition: internal.h:58
rsc_role_e
Definition: common.h:81
#define XML_CIB_TAG_MASTER
Definition: msg_xml.h:198
int rsc_discover_mode
Definition: status.h:177
xmlNode * first_named_child(xmlNode *parent, const char *name)
Definition: xml.c:5053
Definition: status.h:172
#define pe_rsc_trace(rsc, fmt, args...)
Definition: internal.h:25
char * crm_concat(const char *prefix, const char *suffix, char join)
Definition: strings.c:32
#define ID(x)
Definition: msg_xml.h:446
#define pe_err(fmt...)
Definition: internal.h:27
char * crm_itoa(int an_int)
Definition: strings.c:60
char * crm_strdup_printf(char const *format,...) __attribute__((__format__(__printf__
void container_print(resource_t *rsc, const char *pre_text, long options, void *print_data)
Definition: container.c:1187
GList * GListPtr
Definition: crm.h:218
#define pe_rsc_notify
Definition: status.h:187
resource_t * find_container_child(const char *stem, resource_t *rsc, node_t *node)
Definition: container.c:1056
const char * rsc_printable_id(resource_t *rsc)
Definition: utils.c:2062
uint64_t flags
Definition: remote.c:156
#define XML_AGENT_ATTR_CLASS
Definition: msg_xml.h:253
void container_free(resource_t *rsc)
Definition: container.c:1287