diff options
| author | ewolinetz <ewolinet@redhat.com> | 2016-09-28 10:52:07 -0500 | 
|---|---|---|
| committer | ewolinetz <ewolinet@redhat.com> | 2016-12-14 15:38:10 -0600 | 
| commit | b579a4acfa64f85119ffbcbb8f6701972ef0dbb6 (patch) | |
| tree | 6b65a25017defdca2fafe8655a858436c34db679 /roles/openshift_logging/files | |
| parent | 43f52e292afac7bde5e588377e56d9c49574806c (diff) | |
| download | openshift-b579a4acfa64f85119ffbcbb8f6701972ef0dbb6.tar.gz openshift-b579a4acfa64f85119ffbcbb8f6701972ef0dbb6.tar.bz2 openshift-b579a4acfa64f85119ffbcbb8f6701972ef0dbb6.tar.xz openshift-b579a4acfa64f85119ffbcbb8f6701972ef0dbb6.zip | |
Creating openshift_logging role for deploying Aggregated Logging without a deployer image
Diffstat (limited to 'roles/openshift_logging/files')
| -rw-r--r-- | roles/openshift_logging/files/curator.yml | 18 | ||||
| -rw-r--r-- | roles/openshift_logging/files/elasticsearch-logging.yml | 72 | ||||
| -rw-r--r-- | roles/openshift_logging/files/elasticsearch.yml | 74 | ||||
| -rw-r--r-- | roles/openshift_logging/files/es_migration.sh | 81 | ||||
| -rw-r--r-- | roles/openshift_logging/files/fluent.conf | 34 | ||||
| -rw-r--r-- | roles/openshift_logging/files/fluentd-throttle-config.yaml | 7 | ||||
| -rw-r--r-- | roles/openshift_logging/files/generate-jks.sh | 71 | ||||
| -rw-r--r-- | roles/openshift_logging/files/logging-deployer-sa.yaml | 6 | ||||
| -rw-r--r-- | roles/openshift_logging/files/secure-forward.conf | 24 | ||||
| -rw-r--r-- | roles/openshift_logging/files/server-tls.json | 5 | ||||
| -rw-r--r-- | roles/openshift_logging/files/signing.conf | 103 | ||||
| -rw-r--r-- | roles/openshift_logging/files/util.sh | 192 | 
12 files changed, 687 insertions, 0 deletions
| diff --git a/roles/openshift_logging/files/curator.yml b/roles/openshift_logging/files/curator.yml new file mode 100644 index 000000000..8d62d8e7d --- /dev/null +++ b/roles/openshift_logging/files/curator.yml @@ -0,0 +1,18 @@ +# Logging example curator config file + +# uncomment and use this to override the defaults from env vars +#.defaults: +#  delete: +#    days: 30 +#  runhour: 0 +#  runminute: 0 + +# to keep ops logs for a different duration: +#.operations: +#  delete: +#    weeks: 8 + +# example for a normal project +#myapp: +#  delete: +#    weeks: 1 diff --git a/roles/openshift_logging/files/elasticsearch-logging.yml b/roles/openshift_logging/files/elasticsearch-logging.yml new file mode 100644 index 000000000..377abe21f --- /dev/null +++ b/roles/openshift_logging/files/elasticsearch-logging.yml @@ -0,0 +1,72 @@ +# you can override this using by setting a system property, for example -Des.logger.level=DEBUG +es.logger.level: INFO +rootLogger: ${es.logger.level}, console, file +logger: +  # log action execution errors for easier debugging +  action: WARN +  # reduce the logging for aws, too much is logged under the default INFO +  com.amazonaws: WARN +  io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL} +  io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL} + +  # gateway +  #gateway: DEBUG +  #index.gateway: DEBUG + +  # peer shard recovery +  #indices.recovery: DEBUG + +  # discovery +  #discovery: TRACE + +  index.search.slowlog: TRACE, index_search_slow_log_file +  index.indexing.slowlog: TRACE, index_indexing_slow_log_file + +  # search-guard +  com.floragunn.searchguard: WARN + +additivity: +  index.search.slowlog: false +  index.indexing.slowlog: false + +appender: +  console: +    type: console +    layout: +      type: consolePattern +      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + +  file: +    type: dailyRollingFile +    file: ${path.logs}/${cluster.name}.log +    datePattern: "'.'yyyy-MM-dd" +    layout: +      type: pattern +      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + +  # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files. +  # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html +  #file: +    #type: extrasRollingFile +    #file: ${path.logs}/${cluster.name}.log +    #rollingPolicy: timeBased +    #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz +    #layout: +      #type: pattern +      #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + +  index_search_slow_log_file: +    type: dailyRollingFile +    file: ${path.logs}/${cluster.name}_index_search_slowlog.log +    datePattern: "'.'yyyy-MM-dd" +    layout: +      type: pattern +      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" + +  index_indexing_slow_log_file: +    type: dailyRollingFile +    file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log +    datePattern: "'.'yyyy-MM-dd" +    layout: +      type: pattern +      conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" diff --git a/roles/openshift_logging/files/elasticsearch.yml b/roles/openshift_logging/files/elasticsearch.yml new file mode 100644 index 000000000..4eff30e61 --- /dev/null +++ b/roles/openshift_logging/files/elasticsearch.yml @@ -0,0 +1,74 @@ +cluster: +  name: ${CLUSTER_NAME} + +script: +  inline: on +  indexed: on + +index: +  number_of_shards: 1 +  number_of_replicas: 0 +  auto_expand_replicas: 0-3 +  unassigned.node_left.delayed_timeout: 2m +  translog: +    flush_threshold_size: 256mb +    flush_threshold_period: 5m + +node: +  master: true +  data: true + +network: +  host: 0.0.0.0 + +cloud: +  kubernetes: +    service: ${SERVICE_DNS} +    namespace: ${NAMESPACE} + +discovery: +  type: kubernetes +  zen.ping.multicast.enabled: false + +gateway: +  expected_master_nodes: ${NODE_QUORUM} +  recover_after_nodes: ${RECOVER_AFTER_NODES} +  expected_nodes: ${RECOVER_EXPECTED_NODES} +  recover_after_time: ${RECOVER_AFTER_TIME} + +io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"] + +openshift.searchguard: +  keystore.path: /etc/elasticsearch/secret/admin.jks +  truststore.path: /etc/elasticsearch/secret/searchguard.truststore + + +path: +  data: /elasticsearch/persistent/${CLUSTER_NAME}/data +  logs: /elasticsearch/${CLUSTER_NAME}/logs +  work: /elasticsearch/${CLUSTER_NAME}/work +  scripts: /elasticsearch/${CLUSTER_NAME}/scripts + +searchguard: +  authcz.admin_dn: +  - CN=system.admin,OU=OpenShift,O=Logging +  config_index_name: ".searchguard.${HOSTNAME}" +  ssl: +    transport: +      enabled: true +      enforce_hostname_verification: false +      keystore_type: JKS +      keystore_filepath: /etc/elasticsearch/secret/searchguard.key +      keystore_password: kspass +      truststore_type: JKS +      truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore +      truststore_password: tspass +    http: +      enabled: true +      keystore_type: JKS +      keystore_filepath: /etc/elasticsearch/secret/key +      keystore_password: kspass +      clientauth_mode: OPTIONAL +      truststore_type: JKS +      truststore_filepath: /etc/elasticsearch/secret/truststore +      truststore_password: tspass diff --git a/roles/openshift_logging/files/es_migration.sh b/roles/openshift_logging/files/es_migration.sh new file mode 100644 index 000000000..cca283bae --- /dev/null +++ b/roles/openshift_logging/files/es_migration.sh @@ -0,0 +1,81 @@ +#! bin/bash + +CA=${1:-/etc/openshift/logging/ca.crt} +KEY=${2:-/etc/openshift/logging/system.admin.key} +CERT=${3:-/etc/openshift/logging/system.admin.crt} +openshift_logging_es_host=${4:-logging-es} +openshift_logging_es_port=${5:-9200} +namespace=${6:-logging} + +# for each index in _cat/indices +# skip indices that begin with . - .kibana, .operations, etc. +# skip indices that contain a uuid +# get a list of unique project +# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices +# we are interested in - the awk will strip that part off +function get_list_of_indices() { +    curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \ +        awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \ +        '$3 !~ "^[.]" && $3 !~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \ +    sort -u +} + +# for each index in _cat/indices +# skip indices that begin with . - .kibana, .operations, etc. +# get a list of unique project.uuid +# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices +# we are interested in - the awk will strip that part off +function get_list_of_proj_uuid_indices() { +    curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \ +        awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \ +            '$3 !~ "^[.]" && $3 ~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \ +        sort -u +} + +if [[ -z "$(oc get pods -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}')" ]]; then +  echo "No Elasticsearch pods found running.  Cannot update common data model." +  exit 1 +fi + +count=$(get_list_of_indices | wc -l) +if [ $count -eq 0 ]; then +  echo No matching indices found - skipping update_for_uuid +else +  echo Creating aliases for $count index patterns . . . +  { +    echo '{"actions":[' +    get_list_of_indices | \ +      while IFS=. read proj ; do +        # e.g. make test.uuid.* an alias of test.* so we can search for +        # /test.uuid.*/_search and get both the test.uuid.* and +        # the test.* indices +        uid=$(oc get project "$proj" -o jsonpath='{.metadata.uid}' 2>/dev/null) +        [ -n "$uid" ] && echo "{\"add\":{\"index\":\"$proj.*\",\"alias\":\"$proj.$uuid.*\"}}" +      done +    echo ']}' +  } | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases" +fi + +count=$(get_list_of_proj_uuid_indices | wc -l) +if [ $count -eq 0 ] ; then +    echo No matching indexes found - skipping update_for_common_data_model +    exit 0 +fi + +echo Creating aliases for $count index patterns . . . +# for each index in _cat/indices +# skip indices that begin with . - .kibana, .operations, etc. +# get a list of unique project.uuid +# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices +# we are interested in - the awk will strip that part off +{ +  echo '{"actions":[' +  get_list_of_proj_uuid_indices | \ +    while IFS=. read proj uuid ; do +      # e.g. make project.test.uuid.* and alias of test.uuid.* so we can search for +      # /project.test.uuid.*/_search and get both the test.uuid.* and +      # the project.test.uuid.* indices +      echo "{\"add\":{\"index\":\"$proj.$uuid.*\",\"alias\":\"${PROJ_PREFIX}$proj.$uuid.*\"}}" +    done +  echo ']}' +} | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases" diff --git a/roles/openshift_logging/files/fluent.conf b/roles/openshift_logging/files/fluent.conf new file mode 100644 index 000000000..aa843e983 --- /dev/null +++ b/roles/openshift_logging/files/fluent.conf @@ -0,0 +1,34 @@ +# This file is the fluentd configuration entrypoint. Edit with care. + +@include configs.d/openshift/system.conf + +# In each section below, pre- and post- includes don't include anything initially; +# they exist to enable future additions to openshift conf as needed. + +## sources +## ordered so that syslog always runs last... +@include configs.d/openshift/input-pre-*.conf +@include configs.d/dynamic/input-docker-*.conf +@include configs.d/dynamic/input-syslog-*.conf +@include configs.d/openshift/input-post-*.conf +## + +<label @INGRESS> +## filters +  @include configs.d/openshift/filter-pre-*.conf +  @include configs.d/openshift/filter-retag-journal.conf +  @include configs.d/openshift/filter-k8s-meta.conf +  @include configs.d/openshift/filter-kibana-transform.conf +  @include configs.d/openshift/filter-k8s-flatten-hash.conf +  @include configs.d/openshift/filter-k8s-record-transform.conf +  @include configs.d/openshift/filter-syslog-record-transform.conf +  @include configs.d/openshift/filter-post-*.conf +## + +## matches +  @include configs.d/openshift/output-pre-*.conf +  @include configs.d/openshift/output-operations.conf +  @include configs.d/openshift/output-applications.conf +  # no post - applications.conf matches everything left +## +</label> diff --git a/roles/openshift_logging/files/fluentd-throttle-config.yaml b/roles/openshift_logging/files/fluentd-throttle-config.yaml new file mode 100644 index 000000000..375621ff1 --- /dev/null +++ b/roles/openshift_logging/files/fluentd-throttle-config.yaml @@ -0,0 +1,7 @@ +# Logging example fluentd throttling config file + +#example-project: +#  read_lines_limit: 10 +# +#.operations: +#  read_lines_limit: 100 diff --git a/roles/openshift_logging/files/generate-jks.sh b/roles/openshift_logging/files/generate-jks.sh new file mode 100644 index 000000000..8760f37fe --- /dev/null +++ b/roles/openshift_logging/files/generate-jks.sh @@ -0,0 +1,71 @@ +#! /bin/sh +set -ex + +function importPKCS() { +  dir=${SCRATCH_DIR:-_output} +  NODE_NAME=$1 +  ks_pass=${KS_PASS:-kspass} +  ts_pass=${TS_PASS:-tspass} +  rm -rf $NODE_NAME + +  keytool \ +    -importkeystore \ +    -srckeystore $NODE_NAME.pkcs12 \ +    -srcstoretype PKCS12 \ +    -srcstorepass pass \ +    -deststorepass $ks_pass \ +    -destkeypass $ks_pass \ +    -destkeystore $dir/keystore.jks \ +    -alias 1 \ +    -destalias $NODE_NAME + +  echo "Import back to keystore (including CA chain)" + +  keytool  \ +    -import \ +    -file $dir/ca.crt  \ +    -keystore $dir/keystore.jks   \ +    -storepass $ks_pass  \ +    -noprompt -alias sig-ca + +  echo All done for $NODE_NAME +} + +function createTruststore() { + +  echo "Import CA to truststore for validating client certs" + +  keytool  \ +    -import \ +    -file $dir/ca.crt  \ +    -keystore $dir/truststore.jks   \ +    -storepass $ts_pass  \ +    -noprompt -alias sig-ca +} + +dir="/opt/deploy/" +SCRATCH_DIR=$dir + +admin_user='system.admin' + +if [[ ! -f $dir/system.admin.jks || -z "$(keytool -list -keystore $dir/system.admin.jks -storepass kspass | grep sig-ca)" ]]; then +  importPKCS "system.admin" +  mv $dir/keystore.jks $dir/system.admin.jks +fi + +if [[ ! -f $dir/searchguard_node_key || -z "$(keytool -list -keystore $dir/searchguard_node_key -storepass kspass | grep sig-ca)" ]]; then +  importPKCS "elasticsearch" +  mv $dir/keystore.jks $dir/searchguard_node_key +fi + + +if [[ ! -f $dir/system.admin.jks || -z "$(keytool -list -keystore $dir/system.admin.jks -storepass kspass | grep sig-ca)" ]]; then +  importPKCS "logging-es" +fi + +[ ! -f $dir/truststore.jks ] && createTruststore + +[ ! -f $dir/searchguard_node_truststore ] && cp $dir/truststore.jks $dir/searchguard_node_truststore + +# necessary so that the job knows it completed successfully +exit 0 diff --git a/roles/openshift_logging/files/logging-deployer-sa.yaml b/roles/openshift_logging/files/logging-deployer-sa.yaml new file mode 100644 index 000000000..334c9402b --- /dev/null +++ b/roles/openshift_logging/files/logging-deployer-sa.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: +  name: logging-deployer +secrets: +- name: logging-deployer diff --git a/roles/openshift_logging/files/secure-forward.conf b/roles/openshift_logging/files/secure-forward.conf new file mode 100644 index 000000000..f4483df79 --- /dev/null +++ b/roles/openshift_logging/files/secure-forward.conf @@ -0,0 +1,24 @@ +# @type secure_forward + +# self_hostname ${HOSTNAME} +# shared_key <SECRET_STRING> + +# secure yes +# enable_strict_verification yes + +# ca_cert_path /etc/fluent/keys/your_ca_cert +# ca_private_key_path /etc/fluent/keys/your_private_key +  # for private CA secret key +# ca_private_key_passphrase passphrase + +# <server> +  # or IP +#   host server.fqdn.example.com +#   port 24284 +# </server> +# <server> +  # ip address to connect +#   host 203.0.113.8 +  # specify hostlabel for FQDN verification if ipaddress is used for host +#   hostlabel server.fqdn.example.com +# </server> diff --git a/roles/openshift_logging/files/server-tls.json b/roles/openshift_logging/files/server-tls.json new file mode 100644 index 000000000..86deb23e3 --- /dev/null +++ b/roles/openshift_logging/files/server-tls.json @@ -0,0 +1,5 @@ +// See for available options: https://nodejs.org/api/tls.html#tls_tls_createserver_options_secureconnectionlistener +tls_options = { +	ciphers: 'kEECDH:+kEECDH+SHA:kEDH:+kEDH+SHA:+kEDH+CAMELLIA:kECDH:+kECDH+SHA:kRSA:+kRSA+SHA:+kRSA+CAMELLIA:!aNULL:!eNULL:!SSLv2:!RC4:!DES:!EXP:!SEED:!IDEA:+3DES', +	honorCipherOrder: true +} diff --git a/roles/openshift_logging/files/signing.conf b/roles/openshift_logging/files/signing.conf new file mode 100644 index 000000000..810a057d9 --- /dev/null +++ b/roles/openshift_logging/files/signing.conf @@ -0,0 +1,103 @@ +# Simple Signing CA + +# The [default] section contains global constants that can be referred to from +# the entire configuration file. It may also hold settings pertaining to more +# than one openssl command. + +[ default ] +#dir                     = _output               # Top dir + +# The next part of the configuration file is used by the openssl req command. +# It defines the CA's key pair, its DN, and the desired extensions for the CA +# certificate. + +[ req ] +default_bits            = 2048                  # RSA key size +encrypt_key             = yes                   # Protect private key +default_md              = sha1                  # MD to use +utf8                    = yes                   # Input is UTF-8 +string_mask             = utf8only              # Emit UTF-8 strings +prompt                  = no                    # Don't prompt for DN +distinguished_name      = ca_dn                 # DN section +req_extensions          = ca_reqext             # Desired extensions + +[ ca_dn ] +0.domainComponent       = "io" +1.domainComponent       = "openshift" +organizationName        = "OpenShift Origin" +organizationalUnitName  = "Logging Signing CA" +commonName              = "Logging Signing CA" + +[ ca_reqext ] +keyUsage                = critical,keyCertSign,cRLSign +basicConstraints        = critical,CA:true,pathlen:0 +subjectKeyIdentifier    = hash + +# The remainder of the configuration file is used by the openssl ca command. +# The CA section defines the locations of CA assets, as well as the policies +# applying to the CA. + +[ ca ] +default_ca              = signing_ca            # The default CA section + +[ signing_ca ] +certificate             = $dir/ca.crt       # The CA cert +private_key             = $dir/ca.key # CA private key +new_certs_dir           = $dir/           # Certificate archive +serial                  = $dir/ca.serial.txt # Serial number file +crlnumber               = $dir/ca.crl.srl # CRL number file +database                = $dir/ca.db # Index file +unique_subject          = no                    # Require unique subject +default_days            = 730                   # How long to certify for +default_md              = sha1                  # MD to use +policy                  = any_pol             # Default naming policy +email_in_dn             = no                    # Add email to cert DN +preserve                = no                    # Keep passed DN ordering +name_opt                = ca_default            # Subject DN display options +cert_opt                = ca_default            # Certificate display options +copy_extensions         = copy                  # Copy extensions from CSR +x509_extensions         = client_ext             # Default cert extensions +default_crl_days        = 7                     # How long before next CRL +crl_extensions          = crl_ext               # CRL extensions + +# Naming policies control which parts of a DN end up in the certificate and +# under what circumstances certification should be denied. + +[ match_pol ] +domainComponent         = match                 # Must match 'simple.org' +organizationName        = match                 # Must match 'Simple Inc' +organizationalUnitName  = optional              # Included if present +commonName              = supplied              # Must be present + +[ any_pol ] +domainComponent         = optional +countryName             = optional +stateOrProvinceName     = optional +localityName            = optional +organizationName        = optional +organizationalUnitName  = optional +commonName              = optional +emailAddress            = optional + +# Certificate extensions define what types of certificates the CA is able to +# create. + +[ client_ext ] +keyUsage                = critical,digitalSignature,keyEncipherment +basicConstraints        = CA:false +extendedKeyUsage        = clientAuth +subjectKeyIdentifier    = hash +authorityKeyIdentifier  = keyid + +[ server_ext ] +keyUsage                = critical,digitalSignature,keyEncipherment +basicConstraints        = CA:false +extendedKeyUsage        = serverAuth,clientAuth +subjectKeyIdentifier    = hash +authorityKeyIdentifier  = keyid + +# CRL extensions exist solely to point to the CA certificate that has issued +# the CRL. + +[ crl_ext ] +authorityKeyIdentifier  = keyid diff --git a/roles/openshift_logging/files/util.sh b/roles/openshift_logging/files/util.sh new file mode 100644 index 000000000..5752a0fcd --- /dev/null +++ b/roles/openshift_logging/files/util.sh @@ -0,0 +1,192 @@ +#!/bin/bash + +function generate_JKS_chain() { +  dir=${SCRATCH_DIR:-_output} +  ADD_OID=$1 +  NODE_NAME=$2 +  CERT_NAMES=${3:-$NODE_NAME} +  ks_pass=${KS_PASS:-kspass} +  ts_pass=${TS_PASS:-tspass} +  rm -rf $NODE_NAME + +  extension_names="" +  for name in ${CERT_NAMES//,/ }; do +	extension_names="${extension_names},dns:${name}" +  done + +  if [ "$ADD_OID" = true ]; then +    extension_names="${extension_names},oid:1.2.3.4.5.5" +  fi + +  echo Generating keystore and certificate for node $NODE_NAME + +  "$keytool" -genkey \ +        -alias     $NODE_NAME \ +        -keystore  $dir/keystore.jks \ +        -keypass   $ks_pass \ +        -storepass $ks_pass \ +        -keyalg    RSA \ +        -keysize   2048 \ +        -validity  712 \ +        -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \ +        -ext san=dns:localhost,ip:127.0.0.1"${extension_names}" + +  echo Generating certificate signing request for node $NODE_NAME + +  "$keytool" -certreq \ +        -alias      $NODE_NAME \ +        -keystore   $dir/keystore.jks \ +        -storepass  $ks_pass \ +        -file       $dir/$NODE_NAME.csr \ +        -keyalg     rsa \ +        -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \ +        -ext san=dns:localhost,ip:127.0.0.1"${extension_names}" + +  echo Sign certificate request with CA + +  openssl ca \ +    -in $dir/$NODE_NAME.csr \ +    -notext \ +    -out $dir/$NODE_NAME.crt \ +    -config $dir/signing.conf \ +    -extensions v3_req \ +    -batch \ +	-extensions server_ext + +  echo "Import back to keystore (including CA chain)" + +  "$keytool"  \ +    -import \ +    -file $dir/ca.crt  \ +    -keystore $dir/keystore.jks   \ +    -storepass $ks_pass  \ +    -noprompt -alias sig-ca + +  "$keytool" \ +    -import \ +    -file $dir/$NODE_NAME.crt \ +    -keystore $dir/keystore.jks \ +    -storepass $ks_pass \ +    -noprompt \ +    -alias $NODE_NAME + +  echo "Import CA to truststore for validating client certs" + +  "$keytool"  \ +    -import \ +    -file $dir/ca.crt  \ +    -keystore $dir/truststore.jks   \ +    -storepass $ts_pass  \ +    -noprompt -alias sig-ca + +  echo All done for $NODE_NAME +} + +function generate_PEM_cert() { +  NODE_NAME="$1" +  dir=${SCRATCH_DIR:-_output}  # for writing files to bundle into secrets + +  echo Generating keystore and certificate for node ${NODE_NAME} + +  openssl req -out "$dir/$NODE_NAME.csr" -new -newkey rsa:2048 -keyout "$dir/$NODE_NAME.key" -subj "/CN=$NODE_NAME/OU=OpenShift/O=Logging" -days 712 -nodes + +  echo Sign certificate request with CA +  openssl ca \ +    -in "$dir/$NODE_NAME.csr" \ +    -notext \ +    -out "$dir/$NODE_NAME.crt" \ +    -config $dir/signing.conf \ +    -extensions v3_req \ +    -batch \ +	-extensions server_ext +} + +function generate_JKS_client_cert() { +  NODE_NAME="$1" +  ks_pass=${KS_PASS:-kspass} +  ts_pass=${TS_PASS:-tspass} +  dir=${SCRATCH_DIR:-_output}  # for writing files to bundle into secrets + +  echo Generating keystore and certificate for node ${NODE_NAME} + +  "$keytool" -genkey \ +        -alias     $NODE_NAME \ +        -keystore  $dir/$NODE_NAME.jks \ +        -keyalg    RSA \ +        -keysize   2048 \ +        -validity  712 \ +        -keypass $ks_pass \ +        -storepass $ks_pass \ +        -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" + +  echo Generating certificate signing request for node $NODE_NAME + +  "$keytool" -certreq \ +          -alias      $NODE_NAME \ +          -keystore   $dir/$NODE_NAME.jks \ +          -file       $dir/$NODE_NAME.csr \ +          -keyalg     rsa \ +          -keypass $ks_pass \ +          -storepass $ks_pass \ +          -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" + +  echo Sign certificate request with CA +  openssl ca \ +    -in "$dir/$NODE_NAME.csr" \ +    -notext \ +    -out "$dir/$NODE_NAME.crt" \ +    -config $dir/signing.conf \ +    -extensions v3_req \ +    -batch \ +	-extensions server_ext + +  echo "Import back to keystore (including CA chain)" + +  "$keytool"  \ +    -import \ +    -file $dir/ca.crt  \ +    -keystore $dir/$NODE_NAME.jks   \ +    -storepass $ks_pass  \ +    -noprompt -alias sig-ca + +  "$keytool" \ +    -import \ +    -file $dir/$NODE_NAME.crt \ +    -keystore $dir/$NODE_NAME.jks \ +    -storepass $ks_pass \ +    -noprompt \ +    -alias $NODE_NAME + +  echo All done for $NODE_NAME +} + +function join { local IFS="$1"; shift; echo "$*"; } + +function get_es_dcs() { +  oc get dc --selector logging-infra=elasticsearch -o name +} + +function get_curator_dcs() { +  oc get dc --selector logging-infra=curator -o name +} + +function extract_nodeselector() { +  local inputstring="${1//\"/}"  # remove any errant double quotes in the inputs +  local selectors=() + +  for keyvalstr in ${inputstring//\,/ }; do + +    keyval=( ${keyvalstr//=/ } ) + +    if [[ -n "${keyval[0]}" && -n "${keyval[1]}" ]]; then +      selectors+=( "\"${keyval[0]}\": \"${keyval[1]}\"") +    else +      echo "Could not make a node selector label from '${keyval[*]}'" +      exit 255 +    fi +  done + +  if [[ "${#selectors[*]}" -gt 0 ]]; then +    echo nodeSelector: "{" $(join , "${selectors[@]}") "}" +  fi +} | 
